path
stringlengths 7
265
| concatenated_notebook
stringlengths 46
17M
|
---|---|
dsp-session4-pandas1.ipynb | ###Markdown
basic import
###Code
import pandas as pd
obj = pd.Series([1,2,3,4])
obj
obj.values
obj.index
###Output
_____no_output_____
###Markdown
self-defined index/label
###Code
obj = pd.Series([1,2,3,4], index=list('ABCD'))
obj
obj.index
###Output
_____no_output_____
###Markdown
Indexing and slicing
###Code
obj[0]
obj[0:3]
obj[[0,2]]
obj['A']
obj['A':'D']
obj[['A','C']]
obj.loc['C':]
obj.iloc[1:3]
obj
obj > 2
obj[obj>2]
obj + 2
obj % 2
obj+obj
###Output
_____no_output_____
###Markdown
Series and Dict
###Code
obj = pd.Series({'B':2,'C':1, 'D':3})
obj
obj2 = pd.Series({'B':2,'C':1, 'D':3}, index=['A','B','C'])
obj2
import numpy as np
np.nan
obj3=obj+obj2
obj3
pd.isnull(obj3)
'A' in obj3
'X' in obj3
obj3['D'] = 8
obj3
obj3['E'] = 10
obj3
obj = pd.Series([1,2,3,4])
obj
obj.index = ['One','Two','Three','Four']
obj
obj3.index = ['One','Two','Three','Four','Five']
obj3
obj.index = ['One','Two','Three']
###Output
_____no_output_____
###Markdown
Series with different data type
###Code
obj4 = pd.Series({'A':[1,2],'B':3,'C':4})
obj4
obj4.values
###Output
_____no_output_____
###Markdown
DataFrame construction
###Code
# list of list
import numpy as np
df = pd.DataFrame([[1,2],[3,4]])
df
# 2d array
import numpy as np
df = pd.DataFrame(np.array([[1,2],[3,4]]))
df
data = {
'name':['Alice','Tom','Steven'],
'grade':[70,80,95],
'gender':['M','F','M']
}
df = pd.DataFrame(data)
df
df.head(2)
df = pd.DataFrame(data, columns=['name','grade','number'])
df
df = pd.DataFrame(data, columns=['name','grade','number'], index=['one','two','three'])
df
df.name
df['grade']
df.loc['one']
df.iloc[0]
df
df.grade = 100
df
df.number = range(3)
df
s = pd.Series([1001,1002], index=['one','two'])
s
df.number = s
df
data = {
'name':{1:'Alice',2:'Tom',3:'Steven'},
'grade':{1:70,2:80,3:95},
}
df = pd.DataFrame(data)
df
df = pd.DataFrame(np.arange(9).reshape(3,3), index=list('abc'), columns=list('ABC'))
df
df.reindex(['a','b','d'])
df.reindex(columns=['A','C','D'])
ps = pd.Series(np.arange(5), index=list('ABCDE'))
ps
ps.drop('A')
ps
ps.drop(['A','B'])
df = pd.DataFrame(np.arange(9).reshape(3,3), index=['a','b','c'], columns=['one','two','three'])
df
df.drop('a')
df.drop(['a','b'])
df.drop('one', axis=1)
df
df.drop('one', axis=1, inplace=True)
df
df = pd.DataFrame(np.arange(9).reshape(3,3), index=['a','b','c'], columns=['one','two','three'])
df
df['one']
df[['one','two']]
df[1:]
df[df['one']>0]
df[df>5] = 5
df
df = pd.DataFrame(np.arange(9).reshape(3,3), index=['a','b','c'], columns=['one','two','three'])
df
df.loc['a',['one','two']]
df.loc[['a','c'], ['one','two']]
df.loc[:'c','two']
df.iloc[0,[0,1]]
df.iloc[:2,2:]
df.iloc[:2,2:][df>3]
df.at['b','one']
df.iat[1,0]
###Output
_____no_output_____ |
Visualization-Copy1.ipynb | ###Markdown
Paper visualizations
###Code
!pip install --user neural_renderer_pytorch
import os
import imageio
import trimesh
import torch
import numpy as np
import matplotlib as mpl
import matplotlib.cm as cm
import matplotlib.pyplot as plt
%matplotlib inline
import neural_renderer as nr
from scipy.spatial import cKDTree as KDTree
from datasets import make_data_instance_from_stl
from models import *
import pdb
def get_rotate_matrix(rotation_angle1):
cosval = np.cos(rotation_angle1)
sinval = np.sin(rotation_angle1)
rotation_matrix_x = np.array([[1, 0, 0, 0],
[0, cosval, -sinval, 0],
[0, sinval, cosval, 0],
[0, 0, 0, 1]])
rotation_matrix_y = np.array([[cosval, 0, sinval, 0],
[0, 1, 0, 0],
[-sinval, 0, cosval, 0],
[0, 0, 0, 1]])
rotation_matrix_z = np.array([[cosval, -sinval, 0, 0],
[sinval, cosval, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
scale_y_neg = np.array([
[1, 0, 0, 0],
[0, -1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]
])
neg = np.array([
[-1, 0, 0, 0],
[0, -1, 0, 0],
[0, 0, -1, 0],
[0, 0, 0, 1]
])
# y,z swap = x rotate -90, scale y -1
# new_pts0[:, 1] = new_pts[:, 2]
# new_pts0[:, 2] = new_pts[:, 1]
#
# x y swap + negative = z rotate -90, scale y -1
# new_pts0[:, 0] = - new_pts0[:, 1] = - new_pts[:, 2]
# new_pts0[:, 1] = - new_pts[:, 0]
# return np.linalg.multi_dot([rotation_matrix_z, rotation_matrix_y, rotation_matrix_y, scale_y_neg, rotation_matrix_z, scale_y_neg, rotation_matrix_x])
return np.linalg.multi_dot([neg, rotation_matrix_z, rotation_matrix_z, scale_y_neg, rotation_matrix_x])
def get_projection_matricies(az, el, distance_ratio, roll = 0, focal_length=35, img_w=137, img_h=137):
"""
Calculate 4x3 3D to 2D projection matrix given viewpoint parameters.
Code from "https://github.com/Xharlie/DISN"
"""
F_MM = focal_length # Focal length
SENSOR_SIZE_MM = 32.
PIXEL_ASPECT_RATIO = 1. # pixel_aspect_x / pixel_aspect_y
RESOLUTION_PCT = 100.
SKEW = 0.
CAM_MAX_DIST = 1.75
CAM_ROT = np.asarray([[1.910685676922942e-15, 4.371138828673793e-08, 1.0],
[1.0, -4.371138828673793e-08, -0.0],
[4.371138828673793e-08, 1.0, -4.371138828673793e-08]])
# Calculate intrinsic matrix.
scale = RESOLUTION_PCT / 100
# print('scale', scale)
f_u = F_MM * img_w * scale / SENSOR_SIZE_MM
f_v = F_MM * img_h * scale * PIXEL_ASPECT_RATIO / SENSOR_SIZE_MM
# print('f_u', f_u, 'f_v', f_v)
u_0 = img_w * scale / 2
v_0 = img_h * scale / 2
K = np.matrix(((f_u, SKEW, u_0), (0, f_v, v_0), (0, 0, 1)))
# Calculate rotation and translation matrices.
# Step 1: World coordinate to object coordinate.
sa = np.sin(np.radians(-az))
ca = np.cos(np.radians(-az))
se = np.sin(np.radians(-el))
ce = np.cos(np.radians(-el))
R_world2obj = np.transpose(np.matrix(((ca * ce, -sa, ca * se),
(sa * ce, ca, sa * se),
(-se, 0, ce))))
# Step 2: Object coordinate to camera coordinate.
R_obj2cam = np.transpose(np.matrix(CAM_ROT))
R_world2cam = R_obj2cam * R_world2obj
cam_location = np.transpose(np.matrix((distance_ratio * CAM_MAX_DIST,
0,
0)))
T_world2cam = -1 * R_obj2cam * cam_location
# Step 3: Fix blender camera's y and z axis direction.
R_camfix = np.matrix(((1, 0, 0), (0, -1, 0), (0, 0, -1)))
R_world2cam = R_camfix * R_world2cam
T_world2cam = R_camfix * T_world2cam
RT = np.hstack((R_world2cam, T_world2cam))
# finally, consider roll
cr = np.cos(np.radians(roll))
sr = np.sin(np.radians(roll))
R_z = np.matrix(((cr, -sr, 0),
(sr, cr, 0),
(0, 0, 1)))
rot_mat = get_rotate_matrix(-np.pi / 2)
return K, R_z@RT@rot_mat
def load_fld(fld_path):
'''
Takes a path to generated fld file with following colomns: x,y,z,p,k,omega,nut
and converts it into a geometric data instance.
'''
fld = np.genfromtxt(fld_path, delimiter=',', skip_header=1)
np.random.shuffle(fld)
fld[fld > 10e5] = np.nan
fld = fld[~np.isnan(fld).any(axis=1)]
answers = fld[:, 3:]
"""
mean_values = [-2.06707869e+00, 1.04133005e-01, 2.17513919e+02, 6.04485806e-05]
std_values = [3.71674873e+00, 4.93675056e-02, 1.10871494e+02, 2.63155496e-05]
for f in range(answers.shape[1]):
answers[:, f] = (answers[:, f] - mean_values[f]) / std_values[f]
"""
stl_path = fld_path.replace('fld', 'stl', 1)[:-9] + '.stl'
mesh = trimesh.load(stl_path)
# reinterpolate features on mesh
fld_tree = KDTree(fld[:, :3])
distances, indeces = fld_tree.query(mesh.vertices, k=1)
interpolations = answers[indeces].squeeze()
return mesh, interpolations
def load_predicted(ply_path):
'''
Takes a path to generated fld file with following colomns: x,y,z,p,k,omega,nut
and converts it into a geometric data instance.
'''
answers_path = ply_path.replace('meshes', 'predictions', 1)[:-4] + '.npy'
answers = np.load(answers_path)
mesh = trimesh.load(ply_path)
return mesh, answers
def interpolate_on_faces(field, faces):
#TODO: no batch support for now
nv = field.shape[0]
nf = faces.shape[0]
field = field.reshape((nv, 1))
# pytorch only supports long and byte tensors for indexing
face_coordinates = field[faces.long()].squeeze(0)
centroids = 1.0/3 * torch.sum(face_coordinates, 1)
return centroids.squeeze(-1)
def visualize(vertices, faces, fields, field_to_visualize = 0,
img_resolution = 1200, azimuth = 210, elevation=10, distance_ratio = 0.8, colormap=cm.jet,
color_blind=False):
"""
Interface to neural_render to produce nice visualizations. It requires GPU.
Inputs:
vertices in [V,3]
faces in [F,3]
fields in [V,3]
(ideally you can substitute this with a torch_geometric.data.Data object.
I didn't because I don't have it installed)
Output:
Image in [img_resolution, img_resolution, 3]
"""
# first set up camera
intrinsic, extrinsic = get_projection_matricies(azimuth, elevation, distance_ratio, img_w=img_resolution, img_h=img_resolution)
K_cuda = torch.tensor(intrinsic[np.newaxis, :, :].copy()).float().cuda().unsqueeze(0)
R_cuda = torch.tensor(extrinsic[np.newaxis, 0:3, 0:3].copy()).float().cuda().unsqueeze(0)
t_cuda = torch.tensor(extrinsic[np.newaxis, np.newaxis, 0:3, 3].copy()).float().cuda().unsqueeze(0)
# initialize renderer
renderer = nr.Renderer(image_size = img_resolution, orig_size = img_resolution, K=K_cuda, R=R_cuda, t=t_cuda, anti_aliasing=True)
# now move vertices, faces to GPU
verts_dr = torch.tensor(vertices.copy(), dtype=torch.float32, requires_grad = False).cuda()
faces_dr = torch.tensor(faces.copy()).cuda()
field_dr = torch.tensor(fields[:, field_to_visualize].copy(),dtype=torch.float32, requires_grad = False).cuda()
# interpolate field on traingle center
field_on_faces = interpolate_on_faces(field_dr, faces_dr)
#TODO: find good values here? Maybe across the dataset to make visualization consistent? or this is good enough? I am not sure...
norm = mpl.colors.Normalize(vmin= -6, vmax=6)
cmap = colormap
m = cm.ScalarMappable(norm=norm, cmap=cmap)
# field_on_faces = torch.clamp((field_on_faces-field_min)/(field_max-field_min),0,1)
textures_dr = torch.ones(faces_dr.shape[0], 1, 1, 1, 3, dtype=torch.float32).cuda()
# feel free to pick your favorite color map here, I used this one for Sanity check, maybe we can use another one here??
if not color_blind:
textures_dr[:,0,0,0, :] = torch.tensor(list(map(m.to_rgba, field_on_faces.cpu().detach())), dtype=torch.float32).cuda()[:, :3]
images_out, depth, alpha = renderer(verts_dr.unsqueeze(0), faces_dr.unsqueeze(0), textures_dr.unsqueeze(0))
images_out = torch.cat([images_out[0], alpha])
image_out_export = 255*images_out.detach().cpu().numpy().transpose((1, 2, 0))
return image_out_export
def make_data_instance_from_ply(path):
mesh = trimesh.load(path)
edge_attr = [mesh.vertices[a] - mesh.vertices[b] for a, b in mesh.edges]
data = torch_geometric.data.Data(x = torch.tensor(mesh.vertices, dtype=torch.float),
pos= torch.tensor(mesh.vertices, dtype=torch.float),
face = torch.tensor(mesh.faces, dtype=torch.long).t(),
edge_attr = torch.tensor(edge_attr, dtype=torch.float),
edge_index= torch.tensor(mesh.edges, dtype=torch.long).t().contiguous())
return data
def process_mesh(path, suffix="", model=None, out_dir=None, take_from_fld=True, prefields=None,
norm_field=False, **kwargs):
FLD_PATH = path
if take_from_fld:
mesh, fields = load_fld(FLD_PATH)
else:
mesh, fields = load_predicted(FLD_PATH)
if out_dir is None:
out_dir = os.path.join(*FLD_PATH.split("/")[:-2], 'output')
if model is not None:
if suffix == "":
suffix = '_predicted'
if take_from_fld:
data_instance = make_data_instance_from_stl(path)
else:
data_instance = make_data_instance_from_ply(path)
fields = model(data_instance.to('cuda:0')).cpu().detach().numpy()
if prefields is not None:
fields = prefields
if norm_field:
fields = (fields - np.mean(fields[:, 0])) / np.std(fields[:, 0])
image = visualize(mesh.vertices, mesh.faces, fields, **kwargs)
image_filename = os.path.join(out_dir, FLD_PATH.split("/")[-1][:-4]) + suffix + ".png"
imageio.imwrite(image_filename, image.astype(np.uint8))
def process_dir(path):
files = os.listdir(path)
for name in files:
process_mesh(os.path.join(path, name))
model = SplineCNN8Residuals(3)
model.load_state_dict(torch.load("Expirements/Networks15/normilized_full_latest.nn"))
model.to('cuda:0')
model = model.eval()
print("done")
###Output
done
###Markdown
Optimization Plots
###Code
def visualize_mesh_opt(path, out, take_each=3, baseline=False, **kwargs):
if not os.path.exists(out):
os.makedirs(out)
path = os.path.join(path, 'meshes')
files = [os.path.join(path, name) for name in filter(lambda x: x[0] == '0' and x[-3:] == "ply", os.listdir(path))]
for idx in range(0, len(files), take_each): #(list(range(0, 30, take_each)) +
inp_path = files[idx]
data_instance = make_data_instance_from_ply(inp_path)
if baseline:
if idx == 0:
edge_attrs = data_instance.edge_attr
continue
else:
data_instance.edge_attr = edge_attrs
fields = model(data_instance.to('cuda:0')).cpu().detach().numpy()
# process_mesh(inp_path, suffix='_intr', prefields=fields,
# out_dir=out, norm_field=True, **kwargs,
# azimuth=240, elevation=5, take_from_fld=False)
# process_mesh(inp_path, suffix='_angl', prefields=fields,
# out_dir=out, norm_field=True, **kwargs,
# take_from_fld=False)
# process_mesh(inp_path, suffix='_pery', prefields=fields,
# out_dir=out, norm_field=True, **kwargs,
# azimuth=-270, elevation=90, take_from_fld=False)
process_mesh(inp_path, suffix='_perz', prefields=fields,
out_dir=out, norm_field=True, **kwargs,
azimuth=-270, elevation=0, take_from_fld=False)
# process_mesh(inp_path, suffix='_perx', prefields=fields,
# out_dir=out, norm_field=True, **kwargs,
# azimuth=180, elevation=0, take_from_fld=False)
# process_mesh(inp_path, suffix='_spoiler2', prefields=fields,
# out_dir=out, norm_field=True,
# azimuth=-45, elevation=0, take_from_fld=False)
# for idx in [535]:
# visualize_mesh_opt('Expirements/OptimizationPaper/AfterMeeting/FreeformDrag/%dminus/' % idx,
# 'Expirements/Visualizations/Paper/OptimizationDifferent/%dFreeFormMinus/' % idx,
# baseline=True, take_each=3)
# for idx in [69]:
# visualize_mesh_opt('Expirements/OptimizationPaper/AfterMeeting/UmetamiDrag2/%04d/' % idx,
# 'Expirements/Visualizations/Paper/OptimizationDifferent/%dUmetami/' % idx,
# baseline=True, take_each=3)
for idx in [535, 69, 32, 162, 61]:
visualize_mesh_opt('Expirements/OptimizationPaper/AfterMeeting/ScalingDrag/%d/' % idx,
'Expirements/OptimizationPaper/AfterMeeting/ScalingDrag/%d/frames' % idx, take_each=1, baseline=True)
for idx in [175]:
visualize_mesh_opt('Expirements/OptimizationPaper/AfterMeeting/FreeformDrag/%03d/' % idx,
'Expirements/Visualizations/Paper/HighRes/FreeForm%04d/' % idx, take_each=19)
for idx in [175]:
visualize_mesh_opt('Expirements/OptimizationPaper/AfterMeeting/DeepSDFDrag//%03d/' % idx,
'Expirements/Visualizations/Paper/HighRes/DeepSDF%04d/' % idx, take_each=100)
fields = np.load(a.replace('meshes', 'predictions').replace('ply', 'npy'))
fields
plya
visualize_mesh_opt('Expirements/OptimizationPaper/AfterMeeting/DeepSDFDrag/175/',
'Expirements/Visualizations/Paper/OptimizationDifferent/175SpoilerDisappear')
###Output
_____no_output_____
###Markdown
Hotmap Visualizations
###Code
out_dir = "Expirements/Visualizations/Paper/PredictionComparison/afmhotNormFull_1"
colormap = cm.afmhot
process_mesh('/cvlabdata2/home/artem/Data/cars_remeshed_dsdf/outputs/fld/0001_0015.fld',
out_dir=out_dir, norm_field=True, colormap=colormap)
process_mesh('/cvlabdata2/home/artem/Data/cars_remeshed_dsdf/outputs/fld/0001_0015.fld',
out_dir=out_dir, norm_field=True, model=model, colormap=colormap)
process_mesh('/cvlabdata2/home/artem/Data/cars_remeshed_dsdf/outputs/fld/0002_0015.fld',
out_dir=out_dir, norm_field=True, colormap=cm.afmhot)
process_mesh('/cvlabdata2/home/artem/Data/cars_remeshed_dsdf/outputs/fld/0002_0015.fld',
out_dir=out_dir, norm_field=True, model=model, colormap=colormap)
process_mesh('/cvlabdata2/home/artem/Data/cars_remeshed_dsdf/outputs/fld/0003_0015.fld',
out_dir=out_dir, norm_field=True)
process_mesh('/cvlabdata2/home/artem/Data/cars_remeshed_dsdf/outputs/fld/0003_0015.fld',
out_dir=out_dir, norm_field=True, model=model, colormap=colormap)
process_mesh('/cvlabdata2/home/artem/Data/cars_remeshed_dsdf/outputs/fld/0004_0015.fld',
out_dir=out_dir, norm_field=True, colormap=colormap)
process_mesh('/cvlabdata2/home/artem/Data/cars_remeshed_dsdf/outputs/fld/0004_0015.fld',
out_dir=out_dir, norm_field=True, model=model, colormap=colormap)
out_dir = "Expirements/Visualizations/Paper/PredictionComparison/hotNormFull_1"
colormap = cm.hot
process_mesh('/cvlabdata2/home/artem/Data/cars_remeshed_dsdf/outputs/fld/0001_0015.fld',
out_dir=out_dir, norm_field=True, colormap=colormap)
process_mesh('/cvlabdata2/home/artem/Data/cars_remeshed_dsdf/outputs/fld/0001_0015.fld',
out_dir=out_dir, norm_field=True, model=model, colormap=colormap)
process_mesh('/cvlabdata2/home/artem/Data/cars_remeshed_dsdf/outputs/fld/0002_0015.fld',
out_dir=out_dir, norm_field=True, colormap=cm.afmhot)
process_mesh('/cvlabdata2/home/artem/Data/cars_remeshed_dsdf/outputs/fld/0002_0015.fld',
out_dir=out_dir, norm_field=True, model=model, colormap=colormap)
process_mesh('/cvlabdata2/home/artem/Data/cars_remeshed_dsdf/outputs/fld/0003_0015.fld',
out_dir=out_dir, norm_field=True)
process_mesh('/cvlabdata2/home/artem/Data/cars_remeshed_dsdf/outputs/fld/0003_0015.fld',
out_dir=out_dir, norm_field=True, model=model, colormap=colormap)
process_mesh('/cvlabdata2/home/artem/Data/cars_remeshed_dsdf/outputs/fld/0004_0015.fld',
out_dir=out_dir, norm_field=True, colormap=colormap)
process_mesh('/cvlabdata2/home/artem/Data/cars_remeshed_dsdf/outputs/fld/0004_0015.fld',
out_dir=out_dir, norm_field=True, model=model, colormap=colormap)
out_dir = "Expirements/Visualizations/Paper/HighRes"
colormap = cm.jet
for idx in [411]:
inp_path = '/cvlabdata2/home/artem/Data/cars_remeshed_dsdf/outputs/fld/%04d_0015.fld' % idx
if os.path.exists(inp_path):
process_mesh(inp_path, suffix='_spoilerHR_-120_10',
out_dir=out_dir, norm_field=True,
azimuth=-120, elevation=10)
# out_dir=out_dir, norm_field=True,
# azimuth=240, elevation=5, img_resolution=600)
# process_mesh(inp_path, suffix='_angl',
# out_dir=out_dir, norm_field=True, img_resolution=600)
# process_mesh(inp_path, suffix='_pery',
# out_dir=out_dir, norm_field=True,
# azimuth=270, elevation=90, img_resolution=600)
# process_mesh(inp_path, suffix='_perz',
# out_dir=out_dir, norm_field=True,
# azimuth=270, elevation=0, img_resolution=600)
# process_mesh(inp_path, suffix='_perx',
# out_dir=out_dir, norm_field=True,
# azimuth=180, elevation=0, img_resolution=600)
# process_mesh(inp_path, out_dir=out_dir, norm_field=True, model=model, colormap=colormap, img_resolution=600)
else:
print("No such file ", inp_path)
inp_path = 'Expirements/OptimizationPaper/AfterMeeting/DeepSDFDrag/175/meshes/00039.ply'
data_instance = make_data_instance_from_ply(inp_path)
fields = model(data_instance.to('cuda:0')).cpu().detach().numpy()
process_mesh (inp_path , prefields=fields,
out_dir="Expirements/Visualizations/Paper/OptimizationDifferent/175SpoilerDisappear",
norm_field=True, suffix='_spoiler', azimuth=-30, elevation=0, take_from_fld=False)
###Output
_____no_output_____
###Markdown
Display Distributions
###Code
mesh, fields = load_fld('/cvlabdata2/home/artem/Data/cars_refined/simulated/fld/0002_0005.fld')
print( np.min(fields[:, 0]), np.max(fields[:, 0]) )
norm_fields = (fields[:, 0] - np.mean(fields[:, 0])) / np.std(fields[:, 0])
print(np.min(norm_fields), np.max(norm_fields))
plt.hist(norm_fields, bins=100)
plt.show()
###Output
_____no_output_____
###Markdown
Draw Colormap
###Code
img = plt.imshow(np.array([[-6, 6]]), cmap="jet")
img.set_visible(False)
plt.colorbar(orientation="vertical")
import pylab as pl
import numpy as np
a = np.array([[-1,1]])
pl.figure(figsize=(1, 9))
img = pl.imshow(a, cmap="jet")
pl.gca().set_visible(False)
cb = pl.colorbar(orientation="vertical", cax=pl.axes([0.1, 0.2, 0.4, 0.6]), ticks=[-0.8, 0, 0.8])
lines = cb.ax.tick_params(size = 0, width = 5)
pl.savefig("Expirements/Visualizations/Paper/PredictionComparison/jetColorMapOld/colorbar.png")
len(lines[0].get_linewidths())
###Output
_____no_output_____
###Markdown
Optimisation Progress
###Code
root = '/cvlabdata2/home/artem/DeepSDF/Expirements/OptimizationPaper/CleanedDataBadDrag/'
for name in filter(lambda x: x[0] != '.' and x != 'DeepSDFDragFree', os.listdir(root)):
result = 0
num = 0
exp_dir = os.path.join(root, name)
for idx in filter(lambda x: x[0] != '.', os.listdir(exp_dir)):
for step_id in [0, 10, 20, 29]:
file_name = os.path.join(exp_dir, str(idx), 'meshes', str(step_id))
print(file_name)
###Output
_____no_output_____ |
Capturing-Feature-Flags/Commits Analysis.ipynb | ###Markdown
Analyze the commit histories from the scraperI this notebook, we aim to identify *feature flagging* projects by analyzing commits that contain *feature flagging* phrases which we scraped from GitHub.
###Code
import json
import re
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from tqdm import tqdm
import statistics
from github import Github
%matplotlib inline
pd.set_option('display.max_colwidth', -1)
###Output
_____no_output_____
###Markdown
Phase 1: Initial SamplingWe collected all commits from github that contain *feature flagging* identifiers
###Code
df = pd.read_csv("feature_all_commits.csv", sep=';', header=0, encoding = "ISO-8859-1")
df = df.drop_duplicates()
df.fillna('', inplace=True)
counts = df['slug'].value_counts()
print("Number of projects: %d" % len(counts))
print("Number of commits: %d" % len(df))
###Output
Number of projects: 231223
Number of commits: 3918003
###Markdown
Phase 2: Data CleaningAs there are too many commits, we first need to filter the collected data.* 2.1, filter by number of *feature flagging* commits (>=10)* 2.2, remove duplicate projects, i.e., projects that are clones (not forks) 2.1, filter by number of commits
###Code
min_nr_commits = 10
df = df[df['slug'].isin(df['slug'].value_counts()[df['slug'].value_counts()>=min_nr_commits].index)]
counts = df['slug'].value_counts()
print("Number of projects (commits >= %d): %d" % (min_nr_commits, len(counts)))
print("Number of commits: %d" % len(df))
###Output
Number of projects (commits >= 10): 48512
Number of commits: 3507783
###Markdown
Manually classified projectsIn the following lists, we store the *manually* classified projects:* **none_feature_flag_repos**: projects that do not use feature flagging* **clones**: cloned projects that were not detected in our filtering* **dont_know**: projects where we are not sure how and for what purpose they use flags* **feature_flag_repos**:projects that use feature flags
###Code
none_feature_flag_repos = ['torvalds/linux', 'git-portage/git-portage', 'obache/xxxsrc', 'piyawad/test1', 'CTSRD-CHERI/cheribsd', 'marco-c/gecko-dev-wordified', 'witwall/icu46', 'sambler/myblender', 'jyin0813/OpenBSD-src','glk/freebsd-head', 'geogebra/geogebra', 'yaomeiguan/epiphany-sourceware', 'frida/v8',
'navsystem/gentoo-portage', 'virtbsd/virtbsd', 'aosp-mirror/platform_external_skia', 'davidswelt/aquamacs-emacs-pre2015', 'llvm-mirror/llvm', 'oyvindberg/ScalablyTyped', 'PuniCZ/soulcore', 'wzrdtales/tdb', 'chapel-lang/chapel', 'llvm-project/llvm-project-20170507', 'freebsd/freebsd-ports-gnome',
'openstack/openstack', 'apache/subversion', 'hisilicon/qemu', 'maldini03/redkancut', 'navsystem/gentoo-portage', 'virtbsd/virtbsd', 'aosp-mirror/platform_external_skia', 'davidswelt/aquamacs-emacs-pre2015', 'llvm-mirror/llvm', 'oyvindberg/ScalablyTyped', 'PuniCZ/soulcore', 'wzrdtales/tdb',
'chapel-lang/chapel', 'llvm-project/llvm-project-20170507', 'freebsd/freebsd-ports-gnome', 'openstack/openstack', 'apache/subversion', 'hisilicon/qemu', 'maldini03/redkancut', 'bminor/mesa-mesa', 'joker-eph/clangllvm', 'jmckaskill/subversion', 'guix-mirror/guix', 'mutoso-mirrors/linux-historical',
'scummvm/scummvm', 'CleverRaven/Cataclysm-DDA', 'twitter-forks/mysql', 'DragonFlyBSD/DragonFlyBSD', 'obache/nul', 'dspinellis/linux-history-repo', 'pf3gnuchains/pf3gnuchains4x', 'yedaoq/icu46', 'linux-mailinglist-archives/linux-kernel.vger.kernel.org.0', 'davidl-zend/zenddbi', 'SHIFTPHONES/android_kernel_shift_mt6797',
'svn2github/subversion', 'markphip/subversion', 'Distrotech/evolution', '1367485513/riscv-binutils', '0xAX/emacs', '1095811981/studyCode', '1500WK1500/u-boot-nexusS', '10sr/gnumeric-gda', 'IIJ-NetBSD/netbsd-src', '79man/asan', '1g4-linux/binutils-gdb', '2asoft/freebsd-ports-my', 'simonpcook/llvm-clang-submodulerepo',
'habitat-sh/habitat', 'BitconFeng/Deep-Feature-video',
'AOEpeople/TYPO3-Feature-Flag', #feature flag library
]
clones = ['0vert1m3/test', '0100111001000010/Homebrew', '0xfab-ri/ffmpeg', 'Havoc-OS/android_frameworks_base', 'AOSPSubstratum/frameworks_base', 'ekayy/eatlovegive', 'miju12/hardware_qcom-caf_sm8150', 'mitodl/reddit', 'Kazicata747/A.House',
'dhruv0018/intelligence-web', # actual is krossoverintelligence/intelligence-web
'tonado/openstack-dev', 'Alex-teach/Movoo', 'MilenMT/DistributedServerCache', 'ahmadgunn/android_kernel_whyred', 'XPerience-AOSP-Lollipop/android_kernel_leeco_msm8976', 'FanCooking/android_kernel_lk', 'project-draco-hr/neo4j', 'ParrotSec/linux-parrot',
]
dont_know = ['LibreOffice/core', 'gcc-mirror/gcc', 'mozilla/gecko-dev', 'webrtc-uwp/chromium-tools', 'bazelbuild/bazel', 'bloomberg/chromium.bb', 'tianocore/edk2', 'AidenFeng/EDKII', '10Dev/Blender3D', 'llvm-mirror/clang', '136060150/webrtc', 'llvm-mirror/compiler-rt', 'WordPress/gutenberg', 'AtomCommunity/hardware_qcom_audio-caf_sm8150',
'iomintz/thumbnail-finder', # somehow not indexed
'pavithracjs/atlassian-ui-library', # dont know how to scrape
'Enalean/tuleap', # lots of flags that are not feature toggles
'pavithracjs/atlassian-ui-library',
'alphagov/whitehall', # does not really use the flags in the project
'HeisenbergKernel/prima', # see https://github.com/HeisenbergKernel/prima/blob/4225852f6e7ed47819137b6c298093b57b588ad0/Kbuild
'SuperiorOS/android_external_e2fsprogs', 'halfline/rhel7', # they use flags but I dont know what they actually use them for
'Unity-Technologies/ScriptableRenderPipeline', 'Sravan-Devarapalli/Milestone-v0.920', 'openzfsonosx/zfs', 'alphagov/pay-connector', # unclear
'SalesforceFoundation/Cumulus', # I dont know
'eciis/web', # I dont know how they actually use toggles
'Augmint/augmint-web', # too few flags see: https://github.com/Augmint/augmint-web/tree/staging/src/containers/account/index.js
]
feature_flag_repos = ['chromium/chromium', 'nelsonomuto/test-complexity', 'instructure/canvas-lms', 'dimagi/commcare-hq', 'Automattic/wp-calypso', 'gitlabhq/gitlabhq','stonedpebbles/inventory-check', 'kubernetes/kubernetes', 'crdroidandroid/android_frameworks_base', 'live-clones/launchpad', 'CodeNow/runnable-angular',
'juju/juju', 'Factlink/factlink-core', 'hypothesis/h', 'emberjs/ember.js', 'SIGLUS/lmis-moz-mobile', 'edx/edx-platform', 'rogerwang/WebKit_trimmed', 'CartoDB/cartodb', 'rust-lang/rust', 'alphagov/govuk-puppet', 'ceph/ceph', 'wordpress-mobile/WordPress-iOS', 'hello/suripu', 'WebKit/webkit', '1480c1/aom', 'aosp-mirror/platform_frameworks_base',
'moneyadviceservice/frontend', 'Audiobay/audiobaymarketplace', 'department-of-veterans-affairs/vets-website', 'cfpb/cfgov-refresh', 'getsentry/sentry', 'dantehemerson/gitter-webap-mirror', 'sharetribe/sharetribe', 'ets-berkeley-edu/calcentral', 'department-of-veterans-affairs/caseflow', 'Aperta-project/Aperta', 'lupapiste/lupapiste',
'keybase/client', 'circleci/frontend', 'cloudfoundry/cloud_controller_ng', 'franbow/shopware', 'VisualComposer/builder', 'facebook/react', 'UniversityofWarwick/tabula', 'reddit-archive/reddit', 'KaurAvneet/Oculo', 'PIH/mirebalais-puppet', 'gocd/gocd', 'Bootleggers-BrokenLab/packages_apps_Launcher3', 'hmcts/probate-frontend',
'dotnet/roslyn', 'Yoast/wordpress-seo', 'education/classroom', 'smbc-digital/iag-webapp', 'signalapp/Signal-iOS', 'fabric8-ui/fabric8-ui', 'influxdata/influxdb', 'letsencrypt/boulder', 'DoSomething/phoenix', 'wordpress-mobile/WordPress-Android', 'rets-ci/wp-rets-client', 'neo4j/neo4j', 'bundler/bundler',
'uktrade/great-domestic-ui', 'vespa-engine/vespa', 'kangdroid-project/platform_cts', 'transcom/mymove', 'xapi-project/xen-api', 'ZeitOnline/vivi', 'carbon-design-system/carbon', 'alphagov/digitalmarketplace-supplier-frontend', 'kubernetes/kops', 'sonaproject/tempest', 'uktrade/data-hub-frontend', 'loomnetwork/loomchain',
'desktop/desktop', '4teamwork/opengever.core', 'newrelic/node-newrelic', 'emberjs/data', 'zalando/nakadi', 'all-of-us/workbench', 'DFE-Digital/schools-experience', 'matrix-org/matrix-react-sdk', 'spinnaker/deck', 'openstack/devstack', 'zooniverse/Panoptes', 'PIH/openmrs-module-mirebalais', 'travis-ci/travis-api',
'hmrc/mobile-help-to-save', 'dialogs/api-schema', 'tokio-rs/tracing', '18F/identity-idp', 'devgateway/ocvn', 'ministryofjustice/prison-visits-2', 'ccrpjournal/clinicalresearch', 'Yoast/javascript', 'rafaeljesus/newww', 'navikt/modiapersonoversikt', 'Opentrons/opentrons', 'woocommerce/woocommerce-ios', 'DFE-Digital/get-help-to-retrain',
'tokio-rs/tokio']
###Output
_____no_output_____
###Markdown
2.2, filter out projects that are clones (not forks)* remove the projects that have a commit with the same *SHA* (keep the project with more commits)* remove the projects that have a commit whate *title + line changes* are the same (keep the projct with more commits)
###Code
known_roots = ["chromium/chromium", "torvalds/linux", "llvm-mirror/llvm", "WordPress/WordPress", 'aosp-mirror/platform_frameworks_base', 'instructure/canvas-lms', 'sharetribe/sharetribe']
'''removes clones if thier SHA changes is the same'''
def remove_clones_sha(df, slug, inplace=True):
df_slug = df[df['slug'] == slug]
shas = df_slug['sha'].values
df_clones = df[df['sha'].isin(shas)]
df_clones = df_clones[df_clones['slug'] != slug]
if len(df_clones) > 0:
df = df[~df['slug'].isin(df_clones['slug'].values)]
# df.drop(df.loc[lambda df: df['slug'].isin(df_clones['slug'].values)].index, inplace=inplace)
return df, df_clones['slug'].unique().tolist()
''' removes clones if thier title + line changes is the same
Some clones have commits with new SHAs and the commit message may also be changed as well.'''
def remove_clones(df, slug, inplace=True):
df_slug = df[df['slug'] == slug]
df_slug = df_slug[(df_slug['title'].str.len() > 10) & (df_slug['changes'].str.len() > 10)]
df_clones = df[(df['title_changes']).isin(df_slug['title_changes'])]
df_clones = df_clones[df_clones['slug'] != slug]
if len(df_clones) > 0:
df.drop(df.loc[lambda df: df['slug'].isin(df_clones['slug'].values)].index, inplace=inplace)
return df_clones['slug'].unique().tolist()
def get_slugs(df):
slugs = []
classified_slugs = known_roots + none_feature_flag_repos + feature_flag_repos + dont_know
slugs += classified_slugs
for slug in df['slug'].value_counts().keys():
if slug not in classified_slugs:
slugs.append(slug)
return slugs
def remove_all_copies(df):
# remove known clones
df = df[~df['slug'].isin(clones)]
# remove clones with same SHA
removed_slugs = []
for slug in tqdm(get_slugs(df)):
if slug not in removed_slugs:
df, new_removed_slugs = remove_clones_sha(df, slug)
removed_slugs += new_removed_slugs
# remove clones with same title + line change
df['title_changes'] = df['title'] + df['changes']
for slug in tqdm(get_slugs(df)):
if slug not in removed_slugs:
removed_slugs += remove_clones(df, slug)
df.drop(['title_changes'], axis=1)
return df
df = remove_all_copies(df)
df_candidate_counts = df['slug'].value_counts()
df_candidate_counts.to_csv('candidate_projects.csv', header=['Flagging Commits'])
print("Number of projects (No clones): %d" % len(df_candidate_counts))
print("Number of commits: %d" % len(df))
###Output
Number of projects (No clones): 3239
Number of commits: 126067
###Markdown
remove merge commits
###Code
df = df[~df['message'].str.match("Merge branch '[\w\-\/]+' into [\w\-\/]*")]
df = df[df['slug'].isin(df['slug'].value_counts()[df['slug'].value_counts()>=min_nr_commits].index)]
counts = df['slug'].value_counts()
print("Number of projects (No merges): %d" % len(counts))
print("Number of commits: %d" % len(df))
df.to_csv('commits_after_cleanup.csv', sep = ';', encoding = "ISO-8859-1")
###Output
_____no_output_____
###Markdown
Phase 3: Assembling a Dataset of Feature Flagging ProjectsFrom the remaining projects we need to manually (iteratively) identify likely feature flagging projects.The following code is used fo the improved heuristic that orders projects by their likelyhood of using *feature flags*. We used this script to identify feature flagging projects.
###Code
'''Used to start the script direclty from here.'''
df = pd.read_csv('commits_after_cleanup.csv', sep=';', header=0, encoding = "ISO-8859-1")
df_feature_flags = df[df['slug'].isin(feature_flag_repos)]
print("Number of classified feature flagging projects: %d" % len(df_feature_flags['slug'].value_counts()))
df_unclassified = df
df_unclassified = df_unclassified[~df_unclassified['slug'].isin(none_feature_flag_repos)]
df_unclassified = df_unclassified[~df_unclassified['slug'].isin(clones)]
df_unclassified = df_unclassified[~df_unclassified['slug'].isin(dont_know)]
df_unclassified = df_unclassified[~df_unclassified['slug'].isin(feature_flag_repos)]
print("Number of unclassified projects: %d" % len(df_unclassified['slug'].value_counts()))
###Output
Number of unclassified projects: 3002
###Markdown
Plot the distribution of query matches to feature flagging and non-feature flagging projects
###Code
def plot_query_distribution(repos, title=None):
df_repos = df[df['slug'].isin(repos)]
all_feature_flagging = ["feature flag", "feature toggle", "feature switch",
"feature flipper", "feature gate", "feature bit"]
all_removal = ["remove flag", "remove toggle", "cleanup flag",
"cleanup toggle", "delete flag", "delete toggle"]
query_strings = all_feature_flagging + all_removal
queries = {}
for query in query_strings:
if ' ' in query:
split = query.split(' ')
queries[split[0] + ".{0,50}" + split[1]] = []
queries['all feature flagging'] = []
queries['all removal'] = []
for query in queries:
queries[query] = []
for slug in repos:
df_slug = df_repos[df_repos['slug'] == slug]
totalCount = len(df_slug)
if totalCount == 0:
continue
prog_1 = re.compile(query, flags=re.IGNORECASE)
if query == 'all feature flagging':
flagging_query = ''
for q in all_feature_flagging:
flagging_query += '(' + q.split(' ')[0] + '.{0,50}' + q.split(' ')[1] + ')|'
prog_1 = re.compile(flagging_query[0:-1], flags=re.IGNORECASE)
if query == 'all removal':
removal_query = ''
for q in all_removal:
removal_query = removal_query + '(' + q.split(' ')[0] + '.{0,50}' + q.split(' ')[1] + ')|'
prog_1 = re.compile(removal_query[0:-1], flags=re.IGNORECASE)
count = 0
for message in df_slug.message:
if prog_1.search(message) is not None:
count = count + 1
queries[query].append(100 / totalCount * count)
df_queries = pd.DataFrame(queries)
plot = df_queries.plot.box(figsize=(5,2), showfliers = True, ylim=(-1,101))
plt.xticks(rotation=90)
return plot
###Output
_____no_output_____
###Markdown
Non feature flagging projects
###Code
plot = plot_query_distribution(none_feature_flag_repos, 'non-flagging repos')
plt.gcf().subplots_adjust(bottom=0)
plot.get_figure().savefig('distribution_non_flagging.pdf', format='pdf', bbox_inches="tight")
###Output
_____no_output_____
###Markdown
Feature flagging projects
###Code
plot = plot_query_distribution(feature_flag_repos, 'flagging repos')
plt.gcf().subplots_adjust(bottom=0)
plot.get_figure().savefig('distribution_flagging.pdf', format='pdf', bbox_inches="tight")
###Output
_____no_output_____
###Markdown
Sort and filter the remaining projects based on their likelyhood of using feature flags
###Code
# https://stackoverflow.com/questions/15325182/how-to-filter-rows-in-pandas-by-regex
df_filtered = df[df['slug'].isin(df['slug'].value_counts()[df['slug'].value_counts()>=10].index)]
df_filtered['message'].fillna('', inplace=True)
df_feature_flag_regexed = df_filtered[df_filtered['message'].str.contains(r'((feature.{0,50}flag)|(feature.{0,50}toggle)|(feature.{0,50}flipper))')]
# df_feature_flag_regexed = df_filtered[df_filtered['message'].str.contains(r'((feature flag)|(feature toggle)|(feature flipper))')]
series_counts_feature_flag_regexed = df_feature_flag_regexed['slug'].value_counts()
series_counts = df_filtered['slug'].value_counts()
df_counts_feature_flag_regexed = pd.DataFrame(list(series_counts_feature_flag_regexed.to_dict().items()), columns=['slug', 'regex_count'])
df_counts = pd.DataFrame(list(series_counts.to_dict().items()), columns=['slug', 'count_all'])
merged = pd.merge(df_counts_feature_flag_regexed,df_counts,on="slug")
merged = pd.DataFrame(zip(merged['slug'],100/merged['count_all']*merged['regex_count']), columns=['slug', 'percentage_regex'])
merged = pd.merge(merged, df_counts, on='slug')
merged = pd.merge(merged, df_counts_feature_flag_regexed, on='slug')
df_ff_projects = merged
df_ff_projects = df_ff_projects[~df_ff_projects['slug'].isin(feature_flag_repos)]
df_ff_projects = df_ff_projects[~df_ff_projects['slug'].isin(none_feature_flag_repos)]
df_ff_projects = df_ff_projects[~df_ff_projects['slug'].isin(clones)]
df_ff_projects = df_ff_projects[~df_ff_projects['slug'].isin(dont_know)]
df_ff_projects = df_ff_projects[df_ff_projects['percentage_regex'] > 30]
df_ff_projects = df_ff_projects[df_ff_projects['count_all'] > 25]
df_ff_projects.sort_values(by=['percentage_regex'], inplace=True, ascending=False)
df_ff_projects.head(20)
###Output
C:\Anaconda3\envs\feature flags\lib\site-packages\pandas\core\strings.py:1843: UserWarning: This pattern has match groups. To actually get the groups, use str.extract.
return func(self, *args, **kwargs)
###Markdown
Select the project with the highest probability
###Code
next_repo = df_ff_projects['slug'].values[0]
plot_query_distribution([next_repo], next_repo)
df_ff_projects.head(1)
###Output
_____no_output_____
###Markdown
Find feature flagging file of the selected project- files that appear often- files that have very small changes (i.e. adding a single line)
###Code
changes = df[df['slug'] == next_repo]['changes'].values
file_dict = {}
for change in changes:
if len(change) == 2:
continue
split = change[2:-2].split('),(')
for file_change in split:
file_split = file_change.split(',')
file = file_split[0].strip()
if file not in file_dict:
file_dict[file] = []
additions = file_split[-2].strip()
deletions = file_split[-1].strip()
file_dict[file].append(int(additions) + int(deletions))
dict_data = {}
for f in file_dict:
dict_data[f] = [statistics.median(file_dict[f]), statistics.mean(file_dict[f]), len(file_dict[f])]
df_changes = pd.DataFrame(dict_data).T
df_changes.columns = ['median', 'mean', 'count']
df_changes[['median', 'count']].plot.scatter(x='median', y='count')
df_changes = df_changes[(df_changes['median'] < 10)&(df_changes['count'] > 1)]
df_changes.sort_values(by=['count'], inplace=True, ascending=False)
df_changes
gh = Github()
repo = gh.get_repo(next_repo)
master_name = repo.raw_data['default_branch']
###Output
_____no_output_____
###Markdown
The file with the most small changes
###Code
print('https://github.com/%s' % next_repo)
print('https://github.com/%s/tree/%s/%s' % (next_repo, master_name, df_changes.index[0]))
###Output
https://github.com/fecgov/fec-cms
https://github.com/fecgov/fec-cms/tree/develop/fec/fec/settings/base.py
###Markdown
The files ordered by their number of small changes
###Code
for file in df_changes.index:
print('https://github.com/%s/tree/%s/%s' % (next_repo,master_name,file))
###Output
https://github.com/fecgov/fec-cms/tree/develop/fec/fec/settings/base.py
https://github.com/fecgov/fec-cms/tree/develop/fec/fec/templates/base.html
https://github.com/fecgov/fec-cms/tree/develop/fec/legal/templates/legal/home.html
https://github.com/fecgov/fec-cms/tree/develop/fec/home/views.py
https://github.com/fecgov/fec-cms/tree/develop/fec/home/templates/home/latest_updates.html
https://github.com/fecgov/fec-cms/tree/develop/fec/data/templates/landing.jinja
https://github.com/fecgov/fec-cms/tree/develop/fec/legal/urls.py
https://github.com/fecgov/fec-cms/tree/develop/fec/data/api_caller.py
https://github.com/fecgov/fec-cms/tree/develop/manifest_prod.yml
https://github.com/fecgov/fec-cms/tree/develop/fec/home/templates/home/registration-and-reporting/landing_page.html
https://github.com/fecgov/fec-cms/tree/develop/fec/data/templates/partials/advanced/raising.jinja
https://github.com/fecgov/fec-cms/tree/develop/fec/data/templates/partials/advanced/spending.jinja
https://github.com/fecgov/fec-cms/tree/develop/fec/fec/static/js/pages/data-landing.js
https://github.com/fecgov/fec-cms/tree/develop/fec/data/templates/partials/browse-data/raising.jinja
https://github.com/fecgov/fec-cms/tree/develop/fec/data/templates/partials/browse-data/spending.jinja
###Markdown
Create a visualization of classified and potential projects (Figure 1)
###Code
df_filtered = df[df['slug'].isin(df['slug'].value_counts()[df['slug'].value_counts()>=10].index)]
df_filtered['message'].fillna('', inplace=True)
df_feature_flag_regexed = df_filtered[df_filtered['message'].str.contains(r'((feature.{0,50}flag)|(feature.{0,50}toggle)|(feature.{0,50}flipper))')]
#df_feature_flag_regexed = df_filtered[df_filtered['message'].str.contains(r'((feature flag)|(feature toggle)|(feature flipper))')]
series_counts_feature_flag_regexed = df_feature_flag_regexed['slug'].value_counts()
series_counts = df_filtered['slug'].value_counts()
df_counts_feature_flag_regexed = pd.DataFrame(list(series_counts_feature_flag_regexed.to_dict().items()), columns=['slug', 'regex_count'])
df_counts = pd.DataFrame(list(series_counts.to_dict().items()), columns=['slug', 'count_all'])
merged = pd.merge(df_counts_feature_flag_regexed,df_counts,on="slug")
merged = pd.DataFrame(zip(merged['slug'],(100/merged['count_all']*merged['regex_count'])), columns=['slug', 'percentage_regex'])
merged = pd.merge(merged, df_counts, on='slug')
merged = pd.merge(merged, df_counts_feature_flag_regexed, on='slug')
df_ff_projects = merged
plt.rcParams.update({'font.size': 20})
fig, ax = plt.subplots(figsize=(10,6))
plt.xscale('log')
ax.set_xlim(10,10000)
df_rest = df_ff_projects[~df_ff_projects['slug'].isin(none_feature_flag_repos)]
df_rest = df_rest[~df_rest['slug'].isin(feature_flag_repos)]
df_rest = df_rest[~df_rest['slug'].isin(clones)]
df_rest = df_rest[~df_rest['slug'].isin(dont_know)]
df_likely = df[df['slug'].isin(df_rest['slug'])]
df_likely = df_likely[df_likely['changes'].str.match(r'.*feature[\w\-_]*(flag|toggle).*')]
df_rest = df_rest[~df_rest['slug'].isin(df_likely['slug'])]
ax.scatter(df_rest['count_all'], df_rest['percentage_regex'], s=100,color='w',alpha=0.25,edgecolors='gray', label="Unclassified")
df_propable = df_ff_projects[df_ff_projects['slug'].isin(df_likely['slug'])]
print("Number of likely feature flagging projects: %d" % len(df_propable))
df_propable['slug'].to_csv('likely_projects.csv', index=False, header=['Slug'])
ax.scatter(df_propable['count_all'], df_propable['percentage_regex'], s=100,color='b',alpha=0.5,edgecolors='black', label="Likely")
df_feature_flag_repos = df_ff_projects[df_ff_projects['slug'].isin(feature_flag_repos)]
ax.scatter(df_feature_flag_repos['count_all'], df_feature_flag_repos['percentage_regex'], s=100,color='g',alpha=0.5,edgecolors='black', label="Confirmed", marker="P")
df_feature_flag_repos['slug'].to_csv('feature_flag_projects.csv', index=False, header=['Slug'])
df_dont_know = df_ff_projects[df_ff_projects['slug'].isin(dont_know)]
ax.scatter(df_dont_know['count_all'], df_dont_know['percentage_regex'], s=100, color='y',alpha=0.5,edgecolors='black', label="Unclear", marker='+')
df_none_feature_flag_repos = df_ff_projects[df_ff_projects['slug'].isin(none_feature_flag_repos)]
ax.scatter(df_none_feature_flag_repos['count_all'], df_none_feature_flag_repos['percentage_regex'], s=50,color='r',alpha=0.5,edgecolors='black', label="Denied", marker = 'x')
ax.legend()
ax.set_xlabel('number of commits')
ax.set_ylabel('percentage feature flagging')
fig.savefig('classified.pdf', format='pdf', bbox_inches="tight")
###Output
Number of likely feature flagging projects: 185
|
binder/bucket=prq49/a=all_stints_all_series_profiles+endeavor=16/genome_robustness.ipynb | ###Markdown
get data
###Code
s3_handle = boto3.resource(
's3',
region_name="us-east-2",
config=botocore.config.Config(
signature_version=botocore.UNSIGNED,
),
)
bucket_handle = s3_handle.Bucket('prq49')
series_profiles, = bucket_handle.objects.filter(
Prefix=f'endeavor=16/series-profiles/stage=8+what=elaborated/',
)
df = pd.read_csv(
f's3://prq49/{series_profiles.key}',
compression='xz',
)
dfdigest = '{:x}'.format( hash_pandas_object( df ).sum() )
dfdigest
for stint in df['Stint'].unique():
exec(f'df{stint} = df[ df["Stint"] == {stint} ]')
dfm10 = df[ df['Stint'] % 10 == 0 ]
###Output
_____no_output_____
###Markdown
how does genome robustness change over time?
###Code
tp.tee(
sns.lineplot,
data=dfm10,
x='Stint',
y='Fraction Mutations that are Deleterious',
hue='Series',
legend=False,
teeplot_outattrs={
**{
'bucket' : ib.dub( df['Bucket'] ),
'endeavor' : ib.dub( df['Endeavor'].astype(int) ),
'transform' : 'identity',
'_dfdigest' : dfdigest,
},
**make_outattr_metadata(),
},
)
def swarmplot_boxplot(*args, **kwargs):
sns.swarmplot(
*args,
**kwargs,
edgecolor='w',
linewidth=0.5,
s=4,
)
sns.boxplot(
*args,
**kwargs,
)
tp.tee(
swarmplot_boxplot,
data=dfm10,
x='Stint',
y='Fraction Mutations that are Deleterious',
teeplot_outattrs={
**{
'bucket' : ib.dub( df['Bucket'] ),
'endeavor' : ib.dub( df['Endeavor'].astype(int) ),
'transform' : 'filter-Stint-mod10',
'_dfdigest' : dfdigest,
},
**make_outattr_metadata(),
},
)
tp.tee(
sns.barplot,
data=dfm10,
x='Stint',
y='Fraction Mutations that are Deleterious',
teeplot_outattrs={
**{
'bucket' : ib.dub( df['Bucket'] ),
'endeavor' : ib.dub( df['Endeavor'].astype(int) ),
'transform' : 'filter-Stint-mod10',
'_dfdigest' : dfdigest,
},
**make_outattr_metadata(),
},
)
###Output
_____no_output_____
###Markdown
how does fraction deleterious mutating mutants change over time?
###Code
tp.tee(
sns.lineplot,
data=dfm10,
x='Stint',
y='Fraction Mutating Mutations that are Deleterious',
hue='Series',
legend=False,
teeplot_outattrs={
**{
'bucket' : ib.dub( df['Bucket'] ),
'endeavor' : ib.dub( df['Endeavor'].astype(int) ),
'transform' : 'filter-Stint-mod10',
'_dfdigest' : dfdigest,
},
**make_outattr_metadata(),
},
)
def swarmplot_boxplot(*args, **kwargs):
sns.swarmplot(
*args,
**kwargs,
edgecolor='w',
linewidth=0.5,
s=4,
)
sns.boxplot(
*args,
**kwargs,
)
tp.tee(
swarmplot_boxplot,
data=dfm10,
x='Stint',
y='Fraction Mutating Mutations that are Deleterious',
teeplot_outattrs={
**{
'bucket' : ib.dub( df['Bucket'] ),
'endeavor' : ib.dub( df['Endeavor'].astype(int) ),
'transform' : 'filter-Stint-mod10',
'_dfdigest' : dfdigest,
},
**make_outattr_metadata(),
},
)
tp.tee(
sns.barplot,
data=dfm10,
x='Stint',
y='Fraction Mutating Mutations that are Deleterious',
teeplot_outattrs={
**{
'bucket' : ib.dub( df['Bucket'] ),
'endeavor' : ib.dub( df['Endeavor'].astype(int) ),
'transform' : 'filter-Stint-mod10',
'_dfdigest' : dfdigest,
},
**make_outattr_metadata(),
},
)
###Output
_____no_output_____
###Markdown
how does fraction advantageous mutating mutants change over time?
###Code
tp.tee(
sns.lineplot,
data=dfm10,
x='Stint',
y='Fraction Mutating Mutations that are Advantageous',
hue='Series',
legend=False,
teeplot_outattrs={
**{
'bucket' : ib.dub( df['Bucket'] ),
'endeavor' : ib.dub( df['Endeavor'].astype(int) ),
'transform' : 'filter-Stint-mod10',
'_dfdigest' : dfdigest,
},
**make_outattr_metadata(),
},
)
def swarmplot_boxplot(*args, **kwargs):
sns.swarmplot(
*args,
**kwargs,
edgecolor='w',
linewidth=0.5,
s=4,
)
sns.boxplot(
*args,
**kwargs,
)
tp.tee(
swarmplot_boxplot,
data=dfm10,
x='Stint',
y='Fraction Mutating Mutations that are Advantageous',
teeplot_outattrs={
**{
'bucket' : ib.dub( df['Bucket'] ),
'endeavor' : ib.dub( df['Endeavor'].astype(int) ),
'transform' : 'filter-Stint-mod10',
'_dfdigest' : dfdigest,
},
**make_outattr_metadata(),
},
)
tp.tee(
sns.barplot,
data=dfm10,
x='Stint',
y='Fraction Mutating Mutations that are Advantageous',
teeplot_outattrs={
**{
'bucket' : ib.dub( df['Bucket'] ),
'endeavor' : ib.dub( df['Endeavor'].astype(int) ),
'transform' : 'filter-Stint-mod10',
'_dfdigest' : dfdigest,
},
**make_outattr_metadata(),
},
)
###Output
_____no_output_____
###Markdown
how does median mutating mutant fitness change over time?
###Code
tp.tee(
sns.lineplot,
data=dfm10,
x='Stint',
y='Median Mutating Mutant Fitness Differential',
hue='Series',
legend=False,
teeplot_outattrs={
**{
'bucket' : ib.dub( df['Bucket'] ),
'endeavor' : ib.dub( df['Endeavor'].astype(int) ),
'transform' : 'filter-Stint-mod10',
'_dfdigest' : dfdigest,
},
**make_outattr_metadata(),
},
)
def swarmplot_boxplot(*args, **kwargs):
sns.swarmplot(
*args,
**kwargs,
edgecolor='w',
linewidth=0.5,
s=4,
)
sns.boxplot(
*args,
**kwargs,
)
tp.tee(
swarmplot_boxplot,
data=dfm10,
x='Stint',
y='Median Mutating Mutant Fitness Differential',
teeplot_outattrs={
**{
'bucket' : ib.dub( df['Bucket'] ),
'endeavor' : ib.dub( df['Endeavor'].astype(int) ),
'transform' : 'filter-Stint-mod10',
'_dfdigest' : dfdigest,
},
**make_outattr_metadata(),
},
)
tp.tee(
sns.barplot,
data=dfm10,
x='Stint',
y='Median Mutating Mutant Fitness Differential',
teeplot_outattrs={
**{
'bucket' : ib.dub( df['Bucket'] ),
'endeavor' : ib.dub( df['Endeavor'].astype(int) ),
'transform' : 'filter-Stint-mod10',
'_dfdigest' : dfdigest,
},
**make_outattr_metadata(),
},
)
###Output
_____no_output_____ |
MachineLearning-master/MNIST_TSNE.ipynb | ###Markdown
I like the t-SNE classifier, so let's implement that!This notebook needs to be further fleshed out...
###Code
%matplotlib notebook
import numpy as np
np.random.seed(123)
from matplotlib import pyplot as plt
import matplotlib.patheffects as PathEffects
import os
from keras.datasets import mnist
import sklearn
print(f"scikit-learn version: {sklearn.__version__}")
from sklearn import metrics
from sklearn.manifold import TSNE
from sklearn.preprocessing import scale
from sklearn.metrics.pairwise import paired_distances
###Output
Using TensorFlow backend.
###Markdown
Load data
###Code
(X_train, y_train), (X_test, y_test) = mnist.load_data()
print("Training data shape")
print(X_train.shape)
print("Testing data shape")
print(X_test.shape)
# scale data
X_train = X_train.reshape(X_train.shape[0], -1).astype('float32')
# X_train = sklearn.preprocessing.StandardScaler().fit_transform(X_train)
print(X_train.shape)
from sklearn import datasets
digits = datasets.load_digits(n_class=6)
# digits = datasets.load_digits()
X = digits.data
y = digits.target
n_samples, n_features = X.shape
n_neighbors = 30
X_train = X
# TSNE
# model = TSNE(random_state=42, verbose=2, n_components=2,
# learning_rate=500, perplexity=20, n_iter=5000)
model = TSNE(random_state=42, verbose=2, n_components=2, early_exaggeration=50,
learning_rate=500, perplexity=20, n_iter=1000,
init='pca')
reduced_data = model.fit_transform(X_train)
# reduced_data = model.fit_transform(X_train[:1000])
print(reduced_data.shape)
# plot
fig = plt.figure()
ax = plt.subplot(111, aspect="equal")
# ax.scatter(reduced_data[:,0], reduced_data[:,1])
for i in range(6):
# for i in range(10):
l_r = reduced_data[y == i]
ax.scatter(l_r[:,0], l_r[:,1], label=str(i))
x_text, y_text = np.median(l_r, axis=0)
txt = ax.text(x_text, y_text, str(i))
txt.set_path_effects([PathEffects.Stroke(linewidth=5, foreground='w'), PathEffects.Normal()])
(X_train, y_train), (X_test, y_test) = mnist.load_data()
print("Training data shape")
print(X_train.shape)
print("Testing data shape")
print(X_test.shape)
# scale data
X_train = X_train.reshape(X_train.shape[0], -1).astype('float32')
# X_train = sklearn.preprocessing.StandardScaler().fit_transform(X_train)
print(X_train.shape)
from sklearn import datasets
digits = datasets.load_digits()
X = digits.data
y = digits.target
n_samples, n_features = X.shape
n_neighbors = 30
X_train = X
# TSNE
# model = TSNE(random_state=42, verbose=2, n_components=2,
# learning_rate=500, perplexity=20, n_iter=5000)
model = TSNE(random_state=42, verbose=2, n_components=2, early_exaggeration=50,
learning_rate=500, perplexity=20, n_iter=1000,
init='pca')
reduced_data = model.fit_transform(X_train)
# reduced_data = model.fit_transform(X_train[:1000])
print(reduced_data.shape)
# plot
fig = plt.figure()
ax = plt.subplot(111, aspect="equal")
# ax.scatter(reduced_data[:,0], reduced_data[:,1])
for i in range(10):
l_r = reduced_data[y == i]
ax.scatter(l_r[:,0], l_r[:,1], label=str(i))
x_text, y_text = np.median(l_r, axis=0)
txt = ax.text(x_text, y_text, str(i))
txt.set_path_effects([PathEffects.Stroke(linewidth=5, foreground='w'), PathEffects.Normal()])
(X_train, y_train), (X_test, y_test) = mnist.load_data()
print("Training data shape")
print(X_train.shape)
print("Testing data shape")
print(X_test.shape)
# scale data
X_train = X_train.reshape(X_train.shape[0], -1).astype('float32')
# X_train = sklearn.preprocessing.StandardScaler().fit_transform(X_train)
print(X_train.shape)
# TSNE
# model = TSNE(random_state=42, verbose=2, n_components=2,
# learning_rate=500, perplexity=20, n_iter=5000)
model = TSNE(random_state=2050, verbose=2, n_components=2, early_exaggeration=10,
learning_rate=200, perplexity=20, n_iter=2000)
# reduced_data = model.fit_transform(X_train)
reduced_data = model.fit_transform(X_train[:1000])
print(reduced_data.shape)
# plot
fig = plt.figure()
ax = plt.subplot(111, aspect="equal")
# ax.scatter(reduced_data[:,0], reduced_data[:,1])
for i in range(10):
l_r = reduced_data[y_train[:1000] == i]
ax.scatter(l_r[:,0], l_r[:,1], label=str(i))
x_text, y_text = np.median(l_r, axis=0)
txt = ax.text(x_text, y_text, str(i))
txt.set_path_effects([PathEffects.Stroke(linewidth=5, foreground='w'), PathEffects.Normal()])
###Output
_____no_output_____ |
GPU_computing/CUDA_lab2_TODO.ipynb | ###Markdown
▶️ CUDA setup
###Code
!nvcc --version
!nvidia-smi
###Output
_____no_output_____
###Markdown
NVCC Plugin for Jupyter notebook*Usage*:* Load Extension `%load_ext nvcc_plugin`* Mark a cell to be treated as cuda cell`%%cuda --name example.cu --compile false`**NOTE**: The cell must contain either code or comments to be run successfully. It accepts 2 arguments. `-n | --name` - which is the name of either CUDA source or Header. The name parameter must have extension `.cu` or `.h`. Second argument -c | --compile; default value is false. The argument is a flag to specify if the cell will be compiled and run right away or not. It might be usefull if you're playing in the main function* We are ready to run CUDA C/C++ code right in your Notebook. For this we need explicitly say to the interpreter, that we want to use the extension by adding `%%cu` at the beginning of each cell with CUDA code.
###Code
!pip install git+https://github.com/andreinechaev/nvcc4jupyter.git
%load_ext nvcc_plugin
#@title Bash setup
%%writefile /root/.bashrc
# If not running interactively, don't do anything
[ -z "$PS1" ] && return
# don't put duplicate lines in the history. See bash(1) for more options
# ... or force ignoredups and ignorespace
HISTCONTROL=ignoredups:ignorespace
# append to the history file, don't overwrite it
shopt -s histappend
# for setting history length see HISTSIZE and HISTFILESIZE in bash(1)
HISTSIZE=10000
HISTFILESIZE=20000
# check the window size after each command and, if necessary,
# update the values of LINES and COLUMNS.
shopt -s checkwinsize
# make less more friendly for non-text input files, see lesspipe(1)
[ -x /usr/bin/lesspipe ] && eval "$(SHELL=/bin/sh lesspipe)"
PS1='\[\033[01;34m\]\w\[\033[00m\]\$ '
# enable color support of ls and also add handy aliases
if [ -x /usr/bin/dircolors ]; then
test -r ~/.dircolors && eval "$(dircolors -b ~/.dircolors)" || eval "$(dircolors -b)"
alias ls='ls --color=auto'
#alias dir='dir --color=auto'
#alias vdir='vdir --color=auto'
alias grep='grep --color=auto'
alias fgrep='fgrep --color=auto'
alias egrep='egrep --color=auto'
fi
# some more ls aliases
alias ll='ls -lF'
alias la='ls -A'
alias l='ls -CF'
# path setup
export PATH="/usr/local/cuda/bin:$PATH"
###Output
_____no_output_____
###Markdown
▶️ VS Code on Colab
###Code
#@title Colab-ssh tunnel
#@markdown Execute this cell to open the ssh tunnel. Check [colab-ssh documentation](https://github.com/WassimBenzarti/colab-ssh) for more details.
# Install colab_ssh on google colab
!pip install colab_ssh --upgrade
from colab_ssh import launch_ssh_cloudflared, init_git_cloudflared
ssh_tunnel_password = "gpu" #@param {type: "string"}
launch_ssh_cloudflared(password=ssh_tunnel_password)
# Optional: if you want to clone a Github or Gitlab repository
repository_url="https://github.com/giulianogrossi/GPUcomputing" #@param {type: "string"}
init_git_cloudflared(repository_url)
###Output
_____no_output_____
###Markdown
✅ Image flip - CPU (multithreading)
###Code
%%writefile /content/src/ImageStuff.h
struct ImgProp {
int Hpixels;
int Vpixels;
unsigned char HeaderInfo[54];
unsigned long int Hbytes;
};
struct Pixel {
unsigned char R;
unsigned char G;
unsigned char B;
};
typedef unsigned char pel; // pixel element
pel** ReadBMP(char*); // Load a BMP image
void WriteBMP(pel**, char*); // Store a BMP image
extern struct ImgProp ip;
%%writefile /content/src/ImageStuff.c
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include "ImageStuff.h"
/*
* Load a BMP image
*/
pel** ReadBMP(char* filename) {
FILE* f = fopen(filename, "rb");
if (f == NULL) {
printf("\n\n%s NOT FOUND\n\n", filename);
exit(1);
}
pel HeaderInfo[54];
fread(HeaderInfo, sizeof(pel), 54, f); // read the 54-byte header
// extract image height and width from header
int width = *(int*) &HeaderInfo[18];
int height = *(int*) &HeaderInfo[22];
//copy header for re-use
for (unsigned int i = 0; i < 54; i++)
ip.HeaderInfo[i] = HeaderInfo[i];
ip.Vpixels = height;
ip.Hpixels = width;
int RowBytes = (width * 3 + 3) & (~3);
ip.Hbytes = RowBytes;
printf("\n Input BMP File name: %20s (%u x %u)", filename, ip.Hpixels, ip.Vpixels);
pel tmp;
pel **TheImage = (pel **) malloc(height * sizeof(pel*));
for (unsigned int i = 0; i < height; i++)
TheImage[i] = (pel *) malloc(RowBytes * sizeof(pel));
for (unsigned int i = 0; i < height; i++)
fread(TheImage[i], sizeof(unsigned char), RowBytes, f);
fclose(f);
return TheImage; // remember to free() it in caller!
}
/*
* Store a BMP image
*/
void WriteBMP(pel** img, char* filename) {
FILE* f = fopen(filename, "wb");
if (f == NULL) {
printf("\n\nFILE CREATION ERROR: %s\n\n", filename);
exit(1);
}
//write header
for (unsigned int x = 0; x < 54; x++)
fputc(ip.HeaderInfo[x], f);
//write data
for (unsigned int x = 0; x < ip.Vpixels; x++)
for (unsigned int y = 0; y < ip.Hbytes; y++) {
char temp = img[x][y];
fputc(temp, f);
}
printf("\n Output BMP File name: %20s (%u x %u)", filename, ip.Hpixels,
ip.Vpixels);
fclose(f);
}
%%writefile /content/src/Imflip.c
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include "ImageStuff.h"
struct ImgProp ip;
pel** FlipImageV(pel** img) {
struct Pixel pix; //temp swap pixel
int row, col;
//vertical flip
for (col = 0; col < ip.Hbytes; col += 3) {
row = 0;
while (row < ip.Vpixels / 2) {
pix.B = img[row][col];
pix.G = img[row][col + 1];
pix.R = img[row][col + 2];
img[row][col] = img[ip.Vpixels - (row + 1)][col];
img[row][col + 1] = img[ip.Vpixels - (row + 1)][col + 1];
img[row][col + 2] = img[ip.Vpixels - (row + 1)][col + 2];
img[ip.Vpixels - (row + 1)][col] = pix.B;
img[ip.Vpixels - (row + 1)][col + 1] = pix.G;
img[ip.Vpixels - (row + 1)][col + 2] = pix.R;
row++;
}
}
return img;
}
pel** FlipImageH(pel** img) {
struct Pixel pix; //temp swap pixel
int row, col;
//horizontal flip
for (row = 0; row < ip.Vpixels; row++) {
col = 0;
while (col < (ip.Hpixels * 3) / 2) {
pix.B = img[row][col];
pix.G = img[row][col + 1];
pix.R = img[row][col + 2];
img[row][col] = img[row][ip.Hpixels * 3 - (col + 3)];
img[row][col + 1] = img[row][ip.Hpixels * 3 - (col + 2)];
img[row][col + 2] = img[row][ip.Hpixels * 3 - (col + 1)];
img[row][ip.Hpixels * 3 - (col + 3)] = pix.B;
img[row][ip.Hpixels * 3 - (col + 2)] = pix.G;
img[row][ip.Hpixels * 3 - (col + 1)] = pix.R;
col += 3;
}
}
return img;
}
int main(int argc, char** argv) {
if (argc != 4) {
printf("\n\nUsage: imflip [input] [output] [V | H]");
printf("\n\nExample: imflip square.bmp square_h.bmp h\n\n");
return 0;
}
pel** data = ReadBMP(argv[1]);
double timer;
unsigned int a;
clock_t start, stop;
start = clock();
switch (argv[3][0]) {
case 'v':
case 'V':
data = FlipImageV(data);
break;
case 'h':
case 'H':
data = FlipImageH(data);
break;
default:
printf("\nINVALID OPTION\n");
return 0;
}
stop = clock();
timer = ((double)(stop-start))/(double)CLOCKS_PER_SEC;
// merge with header and write to file
WriteBMP(data, argv[2]);
// free() the allocated memory for the image
for (int i = 0; i < ip.Vpixels; i++)
free(data[i]);
free(data);
printf("\n\nTotal execution time: %9.4f sec", timer);
printf(" (%7.3f ns per pixel)\n", 1000000 * timer / (double) (ip.Hpixels * ip.Vpixels));
return 0;
}
!gcc src/ImageStuff.c src/Imflip.c -o imflip
!./imflip /content/dog.bmp dogV.bmp V
!./imflip /content/dog.bmp dogH.bmp H
###Output
_____no_output_____
###Markdown
Librerie python per lettura/scrittura file di immagini e loro display: [openCV](https://docs.opencv.org/master/index.html) e [matplotlib](https://matplotlib.org/). Le immagini vengono rappresentate come array multidimensionali tratti dalla libreria fondamentale per il calcolo scientifico [NumPy](https://numpy.org/)
###Code
import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
# reads as a NumPy array: row (height) x column (width) x color (3)
dog = cv.imread('/content/dog.bmp')
print('Image size: ', dog.shape)
# BGR is converted to RGB
dog = cv.cvtColor(dog, cv.COLOR_BGR2RGB)
dogV = cv.imread('dogV.bmp')
dogV = cv.cvtColor(dogV, cv.COLOR_BGR2RGB)
dogH = cv.imread('dogH.bmp')
dogH = cv.cvtColor(dogH, cv.COLOR_BGR2RGB)
plt.imshow(dog)
plt.show()
plt.imshow(dogV)
plt.show()
plt.imshow(dogH)
plt.show()
%%writefile /content/src/ImflipPth.c
#include <pthread.h>
#include <stdint.h>
#include <ctype.h>
#include <stdlib.h>
#include <stdio.h>
#include <sys/time.h>
#include "ImageStuff.h"
#define MAXTHREADS 128
int NumThreads; // Total number of threads working in parallel
int ThParam[MAXTHREADS]; // Thread parameters ...
pthread_t ThHandle[MAXTHREADS]; // Thread handles
pthread_attr_t ThAttr; // Pthread attrributes
void (*FlipFunc)(pel** img); // Function pointer to flip the image
void* (*MTFlipFunc)(void *arg); // Function pointer to flip the image, multi-threaded version
pel** TheImage; // This is the main image
struct ImgProp ip;
// serial version
void FlipImageV(pel** img) {
struct Pixel pix; //temp swap pixel
int row, col;
//vertical flip
for (col = 0; col < ip.Hbytes; col += 3) {
row = 0;
while (row < ip.Vpixels / 2) {
pix.B = img[row][col];
pix.G = img[row][col + 1];
pix.R = img[row][col + 2];
img[row][col] = img[ip.Vpixels - (row + 1)][col];
img[row][col + 1] = img[ip.Vpixels - (row + 1)][col + 1];
img[row][col + 2] = img[ip.Vpixels - (row + 1)][col + 2];
img[ip.Vpixels - (row + 1)][col] = pix.B;
img[ip.Vpixels - (row + 1)][col + 1] = pix.G;
img[ip.Vpixels - (row + 1)][col + 2] = pix.R;
row++;
}
}
}
void FlipImageH(pel** img) {
// TODO
}
void *MTFlipV(void* tid) {
struct Pixel pix; //temp swap pixel
int row, col;
long ts = *((int *) tid); // My thread ID is stored here
ts *= ip.Hbytes / NumThreads; // start index
long te = ts + ip.Hbytes / NumThreads - 1; // end index
for (col = ts; col <= te; col += 3) {
row = 0;
while (row < ip.Vpixels / 2) {
pix.B = TheImage[row][col];
pix.G = TheImage[row][col + 1];
pix.R = TheImage[row][col + 2];
TheImage[row][col] = TheImage[ip.Vpixels - (row + 1)][col];
TheImage[row][col + 1] = TheImage[ip.Vpixels - (row + 1)][col + 1];
TheImage[row][col + 2] = TheImage[ip.Vpixels - (row + 1)][col + 2];
TheImage[ip.Vpixels - (row + 1)][col] = pix.B;
TheImage[ip.Vpixels - (row + 1)][col + 1] = pix.G;
TheImage[ip.Vpixels - (row + 1)][col + 2] = pix.R;
row++;
}
}
pthread_exit(0);
}
// multi-threaded version
void *MTFlipH(void* tid) {
struct Pixel pix; //temp swap pixel
int row, col;
long ts = *((int *) tid); // My thread ID is stored here
ts *= ip.Vpixels / NumThreads; // start index
long te = ts + ip.Vpixels / NumThreads - 1; // end index
for (row = ts; row <= te; row++) {
col = 0;
while (col < ip.Hpixels * 3 / 2) {
pix.B = TheImage[row][col];
pix.G = TheImage[row][col + 1];
pix.R = TheImage[row][col + 2];
TheImage[row][col] = TheImage[row][ip.Hpixels * 3 - (col + 3)];
TheImage[row][col + 1] = TheImage[row][ip.Hpixels * 3 - (col + 2)];
TheImage[row][col + 2] = TheImage[row][ip.Hpixels * 3 - (col + 1)];
TheImage[row][ip.Hpixels * 3 - (col + 3)] = pix.B;
TheImage[row][ip.Hpixels * 3 - (col + 2)] = pix.G;
TheImage[row][ip.Hpixels * 3 - (col + 1)] = pix.R;
col += 3;
}
}
pthread_exit(NULL);
}
int main(int argc, char** argv) {
char Flip;
int a, i, ThErr;
struct timeval t;
double StartTime, EndTime;
double TimeElapsed;
switch (argc) {
case 3:
NumThreads = 1;
Flip = 'V';
break;
case 4:
NumThreads = 1;
Flip = toupper(argv[3][0]);
break;
case 5:
NumThreads = atoi(argv[4]);
Flip = toupper(argv[3][0]);
break;
default:
printf("\n\nUsage: imflipP input output [v/h] [thread count]");
printf("\n\nExample: imflipP infilename.bmp outname.bmp h 8\n\n");
return 0;
}
if (NumThreads != 1) {
printf("\nExecuting the multi-threaded version with %d threads ...\n",NumThreads);
MTFlipFunc = (Flip == 'V') ? MTFlipV : MTFlipH;
} else {
printf("\nExecuting the serial version ...\n");
FlipFunc = (Flip == 'V') ? FlipImageV : FlipImageH;
}
// load image
TheImage = ReadBMP(argv[1]);
gettimeofday(&t, NULL);
StartTime = (double) t.tv_sec * 1000000.0 + ((double) t.tv_usec);
if (NumThreads > 1) {
pthread_attr_init(&ThAttr);
pthread_attr_setdetachstate(&ThAttr, PTHREAD_CREATE_JOINABLE);
for (i = 0; i < NumThreads; i++) {
ThParam[i] = i;
ThErr = pthread_create(&ThHandle[i], &ThAttr, MTFlipFunc, (void *) &ThParam[i]);
if (ThErr != 0) {
printf("\nThread Creation Error %d. Exiting abruptly... \n", ThErr);
exit(EXIT_FAILURE);
}
}
pthread_attr_destroy(&ThAttr);
for (i = 0; i < NumThreads; i++) {
pthread_join(ThHandle[i], NULL);
}
} else
(*FlipFunc)(TheImage);
gettimeofday(&t, NULL);
EndTime = (double) t.tv_sec * 1000000.0 + ((double) t.tv_usec);
TimeElapsed = (EndTime - StartTime) / 1000000.00;
//merge with header and write to file
WriteBMP(TheImage, argv[2]);
// free() the allocated memory for the image
for (i = 0; i < ip.Vpixels; i++) {
free(TheImage[i]);
}
free(TheImage);
printf("\n\nTotal execution time: %9.4f sec (%s flip)", TimeElapsed,
Flip == 'V' ? "Vertical" : "Horizontal");
printf(" (%6.3f ns/pixel)\n",
1000000 * TimeElapsed / (double) (ip.Hpixels * ip.Vpixels));
return (EXIT_SUCCESS);
}
!gcc -o imflip src/ImageStuff.c src/ImflipPth.c -pthread
!./imflip /content/julia_jet.bmp julia_jetV.bmp V 1
import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
# reads as a NumPy array: row (height) x column (width) x color (3)
julia_jet = cv.imread('/content/drive/MyDrive/images/julia_jet.bmp')
print('Image size: ', julia_jet.shape)
# BGR is converted to RGB
julia_jet = cv.cvtColor(julia_jet, cv.COLOR_BGR2RGB)
julia_jetV = cv.imread('julia_jetV.bmp')
julia_jetV = cv.cvtColor(julia_jetV, cv.COLOR_BGR2RGB)
plt.imshow(julia_jet)
plt.show()
plt.imshow(julia_jetV)
plt.show()
###Output
_____no_output_____
###Markdown
🔴 TODO Individuare il numero di pthread che dà la prestazione migliore in termine di tempo impiegato.```best ptherad = ``` ✅ Blocks and grids
###Code
###Output
_____no_output_____
###Markdown
**Grid 1D**: stampa DIMs e IDs di grid, block e thread
###Code
%%cu
// %%writefile /content/grid.cu
#include <stdio.h>
__global__ void checkIndex(void) {
printf("threadIdx:(%d, %d, %d) blockIdx:(%d, %d, %d) "
"blockDim:(%d, %d, %d) gridDim:(%d, %d, %d)\n",
threadIdx.x, threadIdx.y, threadIdx.z,
blockIdx.x, blockIdx.y, blockIdx.z,
blockDim.x, blockDim.y, blockDim.z,
gridDim.x,gridDim.y,gridDim.z);
}
int main(int argc, char **argv) {
// definisce grid e struttura dei blocchi
dim3 block(4);
dim3 grid(3);
// controlla dim. dal lato host
printf("CHECK lato host:\n");
printf("grid.x = %d\t grid.y = %d\t grid.z = %d\n", grid.x, grid.y, grid.z);
printf("block.x = %d\t block.y = %d\t block.z %d\n\n", block.x, block.y, block.z);
// controlla dim. dal lato device
printf("CHECK lato device:\n");
checkIndex<<<grid, block>>>();
// reset device
cudaDeviceReset();
return(0);
}
!nvcc -arch=sm_37 /content/grid.cu -o /content/grid
!./grid
###Output
_____no_output_____
###Markdown
🔴 TODO Definire un kernel con block 2D e grid 2D e stampare a video solo i thread la cui somma degli ID (`threadIdx.x + threadIdx.y + blockIdx.x + blockIdx.y`) è pari a un numero della sequenza di Fibonacci ([Fibonacci-wikipedia](https://it.wikipedia.org/wiki/Successione_di_Fibonacci))$\begin{align}F_0 &= 0,\\F_1 &= 1,\\F_{n}&=F_{{n-1}}+F_{{n-2}},\quad \text{(per ogni $n>1$)}\end{align}$
###Code
%%cu
#include <stdio.h>
/*
* Show DIMs & IDs for grid, block and thread
*/
__global__ void checkIndex(void) {
int n = threadIdx.x + threadIdx.y + blockIdx.x + blockIdx.y;
int isfib = 0;
int a = 0;
int b = 1;
while(a <= n){
if (n == a){
isfib = 1;
}
int c = a+b;
a = b;
b = c;
}
if(isfib==1){
printf("threadIdx:(%d, %d, %d) blockIdx:(%d, %d, %d) "
"blockDim:(%d, %d, %d) gridDim:(%d, %d, %d), "
"the sum is %d\n",
threadIdx.x, threadIdx.y, threadIdx.z,
blockIdx.x, blockIdx.y, blockIdx.z,
blockDim.x, blockDim.y, blockDim.z,
gridDim.x,gridDim.y,gridDim.z,
n);
}
}
int main(int argc, char **argv) {
// grid and block structure
dim3 block(10,10);
dim3 grid(1,1);
// check for host
printf("CHECK for host:\n");
printf("grid.x = %d\t grid.y = %d\t grid.z = %d\n", grid.x, grid.y, grid.z);
printf("block.x = %d\t block.y = %d\t block.z %d\n\n", block.x, block.y, block.z);
// check for device
printf("CHECK for device:\n");
checkIndex<<<grid, block>>>();
// reset device
cudaDeviceReset();
return (0);
}
###Output
_____no_output_____
###Markdown
✅ Image fplip - GPU
###Code
%%writefile /content/src/bmpUtil.h
#ifndef _BPMUTIL_H
#define _BPMUTIL_H
struct imgBMP {
int width;
int height;
unsigned char headInfo[54];
unsigned long int rowByte;
} img;
#define WIDTHB img.rowByte
#define WIDTH img.width
#define HEIGHT img.height
#define IMAGESIZE (WIDTHB*HEIGHT)
struct pixel {
unsigned char R;
unsigned char G;
unsigned char B;
};
typedef unsigned long ulong;
typedef unsigned int uint;
typedef unsigned char pel; // pixel element
pel *ReadBMPlin(char*); // Load a BMP image
void WriteBMPlin(pel *, char*); // Store a BMP image
#endif
%%writefile /content/src/common.h
#include <sys/time.h>
#ifndef _COMMON_H
#define _COMMON_H
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if (error != cudaSuccess) \
{ \
fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \
fprintf(stderr, "code: %d, reason: %s\n", error, \
cudaGetErrorString(error)); \
} \
}
inline double seconds() {
struct timeval tp;
struct timezone tzp;
int i = gettimeofday(&tp, &tzp);
return ((double)tp.tv_sec + (double)tp.tv_usec * 1.e-6);
}
inline void device_name() {
// set up device
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("device %d: %s\n", dev, deviceProp.name);
CHECK(cudaSetDevice(dev));
}
#endif
###Output
_____no_output_____
###Markdown
🔴 TODO
###Code
%%writefile /content/src/ImgFlipCUDA.cu
#include <stdio.h>
#include <stdlib.h>
#include "bmpUtil.h"
#include "common.h"
/*
* Kernel 1D that flips the given image vertically
* each thread only flips a single pixel (R,G,B)
*/
__global__ void VflipGPU(pel *imgDst, const pel *imgSrc, const uint w, const uint h) {
int row_block = (w + blockDim.x - 1) / blockDim.x; //numero di blocchi per riga
int thread_in_block = blockDim.x; //numero di thread in ogni blocco
//cerchiamo la x del pixel, ovvero del thread
int x = (blockIdx.x % row_block) * thread_in_block + threadIdx.x;
if(x < w){
int y = (blockIdx.x / row_block);
int true_index = y * w * 3 + x*3;
int altro_y = h - 1 - y;
int altro_true_index = altro_y * 3 * w + x*3;
imgDst[true_index] = imgSrc[altro_true_index];
imgDst[true_index+1] = imgSrc[altro_true_index+1];
imgDst[true_index+2] = imgSrc[altro_true_index+2];
imgDst[altro_true_index] = imgSrc[true_index];
imgDst[altro_true_index+1] = imgSrc[true_index+1];
imgDst[altro_true_index+2] = imgSrc[true_index+2];
}
}
/*
* Kernel that flips the given image horizontally
* each thread only flips a single pixel (R,G,B)
*/
__global__ void HflipGPU(pel *ImgDst, pel *ImgSrc, uint width) {
int row_block = (width + blockDim.x - 1) / blockDim.x; //numero di blocchi per riga
int thread_in_block = blockDim.x;
int x = (blockIdx.x % row_block) * thread_in_block + threadIdx.x;
if(x < width / 2){
int y = blockIdx.x / row_block;
int altro_x = width - 1 - x;
int true_index = y * width *3 + x * 3;
int altro_true_index = y * width * 3 + altro_x * 3;
ImgDst[true_index] = ImgSrc[altro_true_index];
ImgDst[true_index+1] = ImgSrc[altro_true_index+1];
ImgDst[true_index+2] = ImgSrc[altro_true_index+2];
ImgDst[altro_true_index] = ImgSrc[true_index];
ImgDst[altro_true_index+1] = ImgSrc[true_index+1];
ImgDst[altro_true_index+2] = ImgSrc[true_index+2];
}
}
/*
* Read a 24-bit/pixel BMP file into a 1D linear array.
* Allocate memory to store the 1D image and return its pointer
*/
pel *ReadBMPlin(char* fn) {
static pel *Img;
FILE* f = fopen(fn, "rb");
if (f == NULL) {
printf("\n\n%s NOT FOUND\n\n", fn);
exit(EXIT_FAILURE);
}
pel HeaderInfo[54];
size_t nByte = fread(HeaderInfo, sizeof(pel), 54, f); // read the 54-byte header
// extract image height and width from header
int width = *(int*) &HeaderInfo[18];
img.width = width;
int height = *(int*) &HeaderInfo[22];
img.height = height;
int RowBytes = (width * 3 + 3) & (~3); // row is multiple of 4 pixel
img.rowByte = RowBytes;
//save header for re-use
memcpy(img.headInfo, HeaderInfo, 54);
printf("\n Input File name: %5s (%d x %d) File Size=%lu", fn, img.width,
img.height, IMAGESIZE);
// allocate memory to store the main image (1 Dimensional array)
Img = (pel *) malloc(IMAGESIZE);
if (Img == NULL)
return Img; // Cannot allocate memory
// read the image from disk
size_t out = fread(Img, sizeof(pel), IMAGESIZE, f);
fclose(f);
return Img;
}
/*
* Write the 1D linear-memory stored image into file
*/
void WriteBMPlin(pel *Img, char* fn) {
FILE* f = fopen(fn, "wb");
if (f == NULL) {
printf("\n\nFILE CREATION ERROR: %s\n\n", fn);
exit(1);
}
//write header
fwrite(img.headInfo, sizeof(pel), 54, f);
//write data
fwrite(Img, sizeof(pel), IMAGESIZE, f);
printf("\nOutput File name: %5s (%u x %u) File Size=%lu", fn, img.width,
img.height, IMAGESIZE);
fclose(f);
}
/*
* MAIN
*/
int main(int argc, char **argv) {
char flip = 'V';
uint dimBlock = 127, dimGrid;
pel *imgSrc, *imgDst; // Where images are stored in CPU
pel *imgSrcGPU, *imgDstGPU; // Where images are stored in GPU
if (argc > 4) {
dimBlock = atoi(argv[4]);
flip = argv[3][0];
}
else if (argc > 3) {
flip = argv[3][0];
}
else if (argc < 3) {
printf("\n\nUsage: imflipGPU InputFilename OutputFilename [V/H] [dimBlock]");
exit(EXIT_FAILURE);
}
if ((flip != 'V') && (flip != 'H')) {
printf("Invalid flip option '%c'. Must be 'V','H'... \n",flip);
exit(EXIT_FAILURE);
}
// Create CPU memory to store the input and output images
imgSrc = ReadBMPlin(argv[1]); // Read the input image if memory can be allocated
if (imgSrc == NULL) {
printf("Cannot allocate memory for the input image...\n");
exit(EXIT_FAILURE);
}
imgDst = (pel *) malloc(IMAGESIZE);
if (imgDst == NULL) {
free(imgSrc);
printf("Cannot allocate memory for the input image...\n");
exit(EXIT_FAILURE);
}
// Allocate GPU buffer for the input and output images
CHECK(cudaMalloc((void**) &imgSrcGPU, IMAGESIZE));
CHECK(cudaMalloc((void**) &imgDstGPU, IMAGESIZE));
// Copy input vectors from host memory to GPU buffers.
CHECK(cudaMemcpy(imgSrcGPU, imgSrc, IMAGESIZE, cudaMemcpyHostToDevice));
// invoke kernels (define grid and block sizes)
int rowBlock = (WIDTH + dimBlock - 1) / dimBlock;
dimGrid = (HEIGHT) * rowBlock; //AAA il diviso due lo abbiamo aggiunto noi E INVECE ORA L'HO TOLTO
double start = seconds(); // start time
switch (flip) {
case 'H':
HflipGPU<<<dimGrid, dimBlock>>>(imgDstGPU, imgSrcGPU, WIDTH);
break;
case 'V':
VflipGPU<<<dimGrid, dimBlock>>>(imgDstGPU, imgSrcGPU, WIDTH, HEIGHT);
break;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
CHECK(cudaDeviceSynchronize());
double stop = seconds(); // elapsed time
// Copy output (results) from GPU buffer to host (CPU) memory.
CHECK(cudaMemcpy(imgDst, imgDstGPU, IMAGESIZE, cudaMemcpyDeviceToHost));
// Write the flipped image back to disk
WriteBMPlin(imgDst, argv[2]);
printf("\nKernel elapsed time %f sec \n\n", stop - start);
// Deallocate CPU, GPU memory and destroy events.
cudaFree(imgSrcGPU);
cudaFree(imgDstGPU);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools spel as Parallel Nsight and Visual Profiler to show complete traces.
cudaError_t cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
free(imgSrc);
free(imgDst);
exit(EXIT_FAILURE);
}
free(imgSrc);
free(imgDst);
return (EXIT_SUCCESS);
}
!nvcc src/ImgFlipCUDA.cu -arch=sm_37 -o imfpliGPU
!./imfpliGPU /content/dog.bmp dog_v.bmp V
###Output
_____no_output_____ |
_notebooks/2021-08-18-Hamming_Codes.ipynb | ###Markdown
Hamming Codes> Notes and code after watching 3Blue1Brown's Hamming code videos. toc: true - branch: master- badges: true- comments: true- author: Eyad Mohamed Ali- categories: [python, personal projects] Youtube & CodesToday, while mindlessly looking at my Youtube home page, I stumbled upon a couple of videos by Grant Sanderson on his channel [3Blue1Brown](https://www.youtube.com/channel/UCYO_jab_esuFRV4b17AJtAw). Sanderson introduces the concept in a way that lets the viewer rediscover Hamming's methodology. I highly recommend watching the videos yourself to understand the contents of this post.> youtube: https://youtu.be/X8jsijhllIA After watching both videos, I felt inspired to try out the code that Sanderson displayed in the [second video](https://youtu.be/b3NxrZOu_CE). I created a simple Python code that first produces a random message comprised of 1s and 0s. The code then takes the generated message and changes the appropriate parity bits. The resulting message will then become a no-error version of the original. Below is the final, commented-version of the code:
###Code
#collapse
from functools import reduce
# When running this locally, you will need numpy installed https://numpy.org/install/
import numpy as np
# Printing method that prints out the message in a square
def print_block(message):
row_size = len(message) ** 0.5
i = 0
for bit in message:
print(bit, end=" ")
if i == row_size - 1:
print()
i = (i + 1) % row_size
def apply_parity(message):
# Find which parity bits need to be changed
ans = reduce(lambda x,y: x^y, [pos for pos, bit in list(enumerate(message)) if bit])
# Removing the "0b" that python generates
bits = bin(ans)[2:]
print("Resulting binary from the XOR operation: ", bits)
i = 0
# Iterate through the binary in reverse to access bit 1, 2, 4, and so on...
for bit in bits[::-1]:
# If this bit was marked to be swapped by the XOR function, we change it
if int(bit):
print("Changing bit #", 2**i, " from ", message[2**i], " to ", int(not message[2**i]))
message[2**i] = int(not message[2**i])
# Using i to keep track of the parity bit we're at
i += 1
# Simple formula to use the 0th element to keep an even number of 1s
message[0] = (sum(message) - message[0]) % 2
return message
if __name__ == "__main__":
# For debugging purposes (Makes sure that the same numbers appear each time)
np.random.seed(42)
while True:
try:
size = int(input(
"""Pick size:
1. 4 bits
2. 16 bits
3. 64 bits
4. 256 bits
Choice: """))
# Makes sure option is within limits
if 1 <= size <= 4:
break
else:
print("Please limit yourself to options 1 to 4")
except Exception:
# In case non integer is entered into the terminal
print("Please try again!")
# Get a random list of 1s and 0s
message = np.random.randint(0, 2, 2**(size*2))
print("Initial message:")
print_block(message)
# Applying the right parity bits
message = apply_parity(message)
print("After parity applied:")
print_block(message)
###Output
Pick size:
1. 4 bits
2. 16 bits
3. 64 bits
4. 256 bits
Choice: 2
Initial message:
0 1 0 0
0 1 0 0
0 1 0 0
0 0 1 0
Resulting binary from the XOR operation: 11
Changing bit # 1 from 1 to 0
Changing bit # 2 from 0 to 1
After parity applied:
0 0 1 0
0 1 0 0
0 1 0 0
0 0 1 0
###Markdown
Next, I decided to add a function that would create an error in the message and another function to rectify that error. The former would randomly select a bit and change its value. The latter would use the same XOR method in "apply_parity" to identify the bit causing the error, then fix it. Below are the two additional functions:
###Code
# Randomly change one bit in the message
def rand_error(message):
error = np.random.randint(0, len(message))
print("Changing bit #", error, " from ", message[error], " to ", int( not message[error]))
message[error] = not message[error]
return message
# Fixing the faulty bit
def fix_error(message):
# Find the faulty bit
error = reduce(lambda x,y: x^y, [pos for pos, bit in list(enumerate(message)) if bit])
print("Changing bit #", error, " from ", message[error], " to ", int( not message[error]))
# Change the bit
message[error] = not message[error]
return message
if __name__ == "__main__":
# For debugging purposes (Makes sure that the same numbers appear each time)
np.random.seed(42)
while True:
try:
size = int(input(
"""Pick size:
1. 4 bits
2. 16 bits
3. 64 bits
4. 256 bits
Choice: """))
# Makes sure option is within limits
if 1 <= size <= 4:
break
else:
print("Please limit yourself to options 1 to 4")
except Exception:
# In case non integer is entered into the terminal
print("Please try again!")
# Get a random list of 1s and 0s
message = np.random.randint(0, 2, 2**(size*2))
# Applying the right parity bits
message = apply_parity(message)
print("Before Error:")
print_block(message)
# Adding an error into the message
print("Adding an error:")
message = rand_error(message)
print_block(message)
print("Fixing the error:")
message = fix_error(message)
print_block(message)
###Output
Pick size:
1. 4 bits
2. 16 bits
3. 64 bits
4. 256 bits
Choice: 2
Resulting binary from the XOR operation: 11
Changing bit # 1 from 1 to 0
Changing bit # 2 from 0 to 1
Before Error:
0 0 1 0
0 1 0 0
0 1 0 0
0 0 1 0
Adding an error:
Changing bit # 3 from 0 to 1
0 0 1 1
0 1 0 0
0 1 0 0
0 0 1 0
Fixing the error:
Changing bit # 3 from 1 to 0
0 0 1 0
0 1 0 0
0 1 0 0
0 0 1 0
|
notebooks/user_item_recommendations/2-filter-movies-for-training.ipynb | ###Markdown
Filter movies for trainingAs seen in the exploratory-analysis notebook, there are ~18,000 movies in the original dataset with less than 100 ratings. This notebook will filter out these movies, resulting in a dataset with ~8500 movies.
###Code
import numpy as np
import pandas as pd
import os
import sys
import pickle
import time
import datetime
import matplotlib.pyplot as plt
import seaborn as sns
from importlib import reload
%matplotlib inline
from IPython.core.display import display, HTML, clear_output
display(HTML("<style>.container { width:80% !important; }</style>"))
pd.options.display.max_rows = 999
pd.options.display.max_columns = 999
pd.options.display.max_colwidth = 999
###Output
_____no_output_____
###Markdown
Import custom module/class
###Code
cwd = os.getcwd()
path = os.path.join(cwd, '..', 'src')
if not path in sys.path:
sys.path.append(path)
del cwd, path
from MovieRecommender import MovieFilter
###Output
_____no_output_____
###Markdown
Load data
###Code
cwd = os.getcwd()
movies = pd.read_csv(os.path.join(cwd, "..", "data", "movies.csv"))
movies.shape
movies.head()
ratings = pd.read_csv(os.path.join(cwd, "..", "data", "ratings.csv"))
ratings.head()
###Output
_____no_output_____
###Markdown
Filter movies
###Code
freq = ratings.groupby('movieId')['rating'].count()
freq.head()
mf = MovieFilter(movies)
mf.filter_rating_freq(freq, threshold=100)
movies.shape
red_movies = mf.movies
red_movies.shape
red_movies.head()
ratings.head()
ratings.shape
red_ratings_data = pd.merge(ratings, red_movies[['movieId']], on='movieId')
red_ratings_data.head()
red_ratings_data.shape
###Output
_____no_output_____
###Markdown
Persist filtered ratings data
###Code
cwd = os.getcwd()
red_ratings_data.to_csv(os.path.join(cwd, "..", "data", "ratings_filtered.csv"), index=False)
###Output
_____no_output_____ |
notebooks/integrate_and_fire.ipynb | ###Markdown
Leaky Integrate-and-Fire NeuronThe integrate-and-fire neuron was first envisioned by the French neuroscientist [Louis Lapicque](https://en.wikipedia.org/wiki/Louis_Lapicque) in 1907. Although the neuron theory was already stablished some years before, the nature of the action potentials was still unknown [[1]](References). Therefore, the integrate-and-fire model tries the reproduce the electrical activity of a neuron, disregarding the mechanisms underlying it. In this notebook, we will deal with a modification of the this model: the leaky integrate-and-fire neuron. A neuron can be modeled as an RC circuit. The figure below shows the analogy between a neuron and a RC circuit. The main idea behind the model is the following: the cell membrane separates both the external and internal environment of a cell. The internal environment –named [cytosol](https://en.wikipedia.org/wiki/Cytosol)– is rich in negative charged molecules; the external environment is however, majoritarily compounded by positive charged molecules. Hence, the cell membrane that divides both environments functions as a insulator. If an external current $I$ is applied to the neuron, this will charge the membrane. This current can be understood as the entrance of positive ions towards the cytosol. Whenever the current is droped, the cell membrane will slowly return to its original charge. Therefore, the cell membrane acts similarly to a condensor. These properties allow the neuron to be represented as a RC circuit. The cell membrane can be represented by a first order differential equation (ODE) [[2]](References): $$\begin{equation}I(t) = \frac{V(t) - V_R}{R} + C \frac{d V}{dt},\end{equation}$$where $V(t)$ is the internal potential over time, $V_R$ the resting membrane potential, $I(t)$ the injected current over time, $R$ the resistance of the membrane and $C$ its capacitance. We will assume that the current are is kept constant, $I$. Bearing this in mind, we can rearrange the equation:$$\begin{equation}\tau\frac{dV}{dt} = - (V(t) - V_R) + RI,\end{equation}$$where $\tau = RC$. This ODE can be solved by separation of variables for $V(t_0) = V_R$:$$\begin{equation}\int^{V(t)}_{V_R} \frac{1}{-(V-V_R)+RI} = \int^{t}_{0} \frac{1}{\tau}dt \\\end{equation}$$$$\begin{equation}-\Big [\log |-(V-V_R)+RI| \Big ]^{V(t)}_{V_R} = \frac{t}{\tau} \\\end{equation}$$$$\begin{equation}\log{\frac{-(V-V_R)+RI}{RI}} = \frac{t}{\tau} \\\end{equation}$$$$\begin{equation}\boxed{V(t) = V_R + RI(1 - e^{-\frac{t}{\tau}})}\end{equation}$$ Finally we have to introduce the firing. Whenever the potential of the cell reaches a given threshold $\theta$, a spike will be produced. Consequently, the potential of the cell will decrease back its resting potential. Mathematically, we can describe this behavior as a piecewise function:$$V(t) = \begin{cases}0, &\quad\text{if } V(t) > \theta \\V(t) = V_R + RI(1 - e^{-\frac{t}{\tau}}), &\quad\text{otherwise}\end{cases}$$ Note that $t$ is also reseted after a spike is produced.
###Code
# Add temporary path to code
import sys
sys.path.append("..")
# Import the module with the LeakyIntegrateAndFire model
from neural_models import LeakyIntegrateAndFire
# Create the model
model = LeakyIntegrateAndFire(VR=-70, R=100, C=0.3, theta=-55)
# Print the model parameters
print(model)
# Run the model
model.run(current=0.3)
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
# Plot the results
plt.figure(figsize=(8, 5))
plt.plot(model.tvec, model.V, color='royalblue')
plt.plot(model.tvec, np.repeat(model.theta, len(model.V)),
color='lightcoral', linestyle='--')
plt.xlabel('time [t]', fontsize=12)
plt.ylabel('Voltage [V]', fontsize=12)
plt.grid(alpha=0.3)
plt.show()
###Output
_____no_output_____
###Markdown
The plot shows how the voltage of the cell changes as a function of time. The red dashed line corresponds to $\theta$. For $t = 0$, the neuron is at its resting potential. At $t>0$ the current is applied. The membrane potential increases until reaching the threshold. Afterward, the neuron spikes and its potential decreases back to $V_R$ (according to the equation above).In addition, it will be interesting to compute the period and spiking frequency given a constant intensity. Analitically, it will be the time when $V(t) = \theta$:$$\theta = V_R + RI(1 - e^{-\frac{T}{\tau}}),$$where $T$ is the spiking period. By isolating $T$: $$T = - \tau \log{\Big |1 - \frac{\theta - V_R}{RI} \Big|}$$The frequency is by definition: $$f = \frac{1}{T}$$
###Code
period = - model.tau * np.log(1 - (model.theta - model.VR) / (model.R * model.current))
print(period)
###Output
20.79441541679836
###Markdown
For the example above, the period $T = 20.784$ implies the neuron fires approximately each 21ms. If we want to observe a determined number of $N$ spikes, we can calculate the time of simulation needed as follows: $$t = NT = \frac{N}{f}$$ For example, if we want to observe $N = 10$ spikes, we have to run the simulation for approximately 208 ms:
###Code
# Run and plot the model
model.run(t=208, current=0.3)
# Plot the results
plt.figure(figsize=(8, 5))
plt.plot(model.tvec, model.V, color='royalblue')
plt.plot(model.tvec, np.repeat(model.theta, len(model.V)),
color='lightcoral', linestyle='--')
plt.xlabel('time [t]', fontsize=12)
plt.ylabel('Voltage [V]', fontsize=12)
plt.grid(alpha=0.3)
plt.show()
###Output
_____no_output_____ |
ImportantScripts/PythonNotebook/primeapp_1_replica_vm comparison.ipynb | ###Markdown
Node Utilization (CPU and memory)
###Code
plt.figure()
plt.plot(df_pods_node[0]['node_cpu_util'], label='node_cpu_util')
plt.plot(df_pods_node[0]['node_mem_util'], label='node_mem_util')
plt.legend()
plt.show()
plt.figure()
plt.plot(df_pods_node[0]['pod_cpu_usage'], label='pod_cpu_usage')
plt.plot(df_pods_node[0]['pod_mem_usage'], label='pod_mem_usage')
plt.legend()
plt.show()
df_pods_node[0].fillna(0)
df_pods_node[0].corr()
dftemp_cpu = df_pods_node[0][['requests','node_cores','node_mem', 'node_cpu_util', 'pod_cpu_usage','pod_cpu_limit','pod_cpu_request','pod_mem_limit','pod_mem_request', 'requests_duration_mean', 'requests_duration_percentile_95']]
dftemp_mem = df_pods_node[0][['requests', 'node_cores','node_mem', 'node_mem_util','pod_cpu_limit','pod_cpu_request','pod_mem_usage','pod_mem_limit','pod_mem_request', 'requests_duration_mean', 'requests_duration_percentile_95']]
plt.plot( dftemp_cpu['node_cpu_util'], color='blue', linewidth=2)
plt.plot( dftemp_cpu['pod_cpu_usage'], color='red', linewidth=2)
plt.plot( dftemp_cpu['requests'], color='green', linewidth=2)
plt.plot(dftemp_cpu['requests_duration_percentile_95'], color='blue', linewidth=2)
import seaborn as sb
###Output
_____no_output_____
###Markdown
Linear Regression
###Code
from sklearn import datasets, linear_model
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import scale
# Use only one feature
df_X = dftemp_cpu[['requests']].values
df_Y = dftemp_cpu[['node_cpu_util']].values
from numpy import *
from scipy.interpolate import *
df_X = df_X.flatten()
df_Y = df_Y.flatten()
p1=polyfit(df_X, df_Y, 1)
p2=polyfit(df_X, df_Y, 2)
p3=polyfit(df_X, df_Y, 3)
plt.plot(df_X, df_Y,'o')
#plt.plot(df_X, polyval(p1,df_X), 'b-')
#plt.plot(df_X, polyval(p2,df_X), 'g-')
plt.plot(df_X, polyval(p3,df_X), 'y-')
p3
# Use only one feature
df_X = dftemp_cpu[['pod_cpu_usage', 'pod_cpu_limit']].values
df_Y = dftemp_cpu[['requests']].values
X_train, X_test, y_train, y_test = train_test_split(df_X, df_Y, test_size=0.33, random_state=42)
from sklearn.linear_model import Ridge
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
from sklearn.pipeline import Pipeline
# Create linear regression object
model = linear_model.LinearRegression()
#model = Pipeline([('poly', PolynomialFeatures(degree=2)),
# ('linear', LinearRegression(fit_intercept=False))])
#regr = linear_model.Ridge (alpha = .01)
#regr = linear_model.Lasso(alpha = 0.1)
#regr = linear_model.LassoLars(alpha=.1)
#regr = make_pipeline(PolynomialFeatures(2), Ridge())
# Train the model using the training sets
model.fit(X_train, y_train)
# Make predictions using the testing set
y_pred = model.predict(X_test)
# The coefficients
print('Coefficients: \n', model.coef_)
print('intercept: \n', model.intercept_)
# The mean squared error
print("Mean squared error: %.2f"
% mean_squared_error(y_test, y_pred))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % r2_score(y_test, y_pred))
#print ('Train score %.2f', regr.score(X_train, y_train) )
#print ('Test score %.2f', regr.score(X_test, y_test) )
#print ('Pred score %.2f', regr.score(X_test, y_pred) )
# Plot outputs
plt.scatter(X_test[:,0], y_test, color='black')
#plt.plot(X_test[:,0], y_pred, color='blue')
plt.plot(X_test[:,0],y_pred,'-r')
plt.show()
model.predict([[0.5, 1]])
#pd.DataFrame(list(zip(y_pred,y_test)), columns = ['predict', 'test'])
###Output
_____no_output_____
###Markdown
dataset_pod_hello_world
###Code
dataset_pod_hello_world.index = pd.to_datetime(dataset_pod_hello_world.index)
merged.index = pd.to_datetime(merged.index)
newmergedhello = dataset_pod_hello_world.reindex(merged.index, method='nearest')
finalDFhello = pd.merge(newmergedhello, merged, left_index=True, right_index=True)
finalDFhello.to_csv('final_hello.csv')
dfhello = read_csv('final_hello.csv',index_col=0)
dfhello = dfhello.fillna(0)
dfhello = dfhello.sort_values(by=['aggregate.rps.mean'])
dfhello = dfhello.reset_index()
dfhello = dfhello[['aggregate.rps.mean', 'cpu', 'aggregate.scenarioDuration.median']]
plt.plot(dfhello['aggregate.rps.mean'], dfhello['cpu'], color='blue', linewidth=3)
def linear(dft):
# Use only one feature
df_X = dft[['aggregate.rps.mean']].values
df_Y = dft[['cpu']].values
X_train, X_test, y_train, y_test = train_test_split(df_X, df_Y, test_size=0.33, random_state=42)
# Create linear regression object
regr = linear_model.LinearRegression(normalize=True)
#regr = linear_model.Ridge (alpha = .5)
#regr = linear_model.Lasso(alpha = 0.1)
#regr = linear_model.LassoLars(alpha=.1)
#regr = make_pipeline(PolynomialFeatures(3), Ridge())
# Train the model using the training sets
regr.fit(X_train, y_train)
# Make predictions using the testing set
y_pred = regr.predict(X_test)
# The coefficients
print('Coefficients: \n', regr.coef_)
print('intercept: \n', regr.intercept_)
# The mean squared error
print("Mean squared error: %.2f"
% mean_squared_error(y_test, y_pred))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % r2_score(y_test, y_pred))
print ('Train score %.2f', regr.score(X_train, y_train) )
print ('Test score %.2f', regr.score(X_test, y_test) )
print ('Pred score %.2f', regr.score(X_test, y_pred) )
# Plot outputs
plt.scatter(X_test, y_test, color='black')
plt.plot(X_test, y_pred, color='blue')
plt.show()
linear(dfhello)
dataset_pod_pdescp.index = pd.to_datetime(dataset_pod_pdescp.index)
merged.index = pd.to_datetime(merged.index)
newmergedpdescp = dataset_pod_pdescp.reindex(merged.index, method='nearest')
finalDFpdescp = pd.merge(newmergedpdescp, merged, left_index=True, right_index=True)
finalDFpdescp.to_csv('final_pdescp.csv')
dfpdescp = read_csv('final_pdescp.csv',index_col=0)
dfpdescp = dfpdescp.fillna(0)
dfpdescp = dfpdescp.sort_values(by=['aggregate.rps.mean'])
dfpdescp = dfpdescp.reset_index()
dfpdescp = dfpdescp[['aggregate.rps.mean', 'cpu']]
plt.plot(dfpdescp['aggregate.rps.mean'], dfpdescp['cpu'], color='blue', linewidth=3)
linear(dfpdescp)
dataset_pod_server.index = pd.to_datetime(dataset_pod_server.index)
merged.index = pd.to_datetime(merged.index)
newmergedserver = dataset_pod_server.reindex(merged.index, method='nearest')
finalDFserver = pd.merge(newmergedserver, merged, left_index=True, right_index=True)
finalDFserver.to_csv('final_server.csv')
dfpserver = read_csv('final_server.csv',index_col=0)
dfpserver = dfpserver.fillna(0)
dfpserver = dfpserver.sort_values(by=['aggregate.rps.mean'])
dfpserver = dfpserver.reset_index()
dfpserver = dfpserver[['aggregate.rps.mean', 'cpu']]
plt.plot(dfpserver['aggregate.rps.mean'], dfpserver['cpu'], color='blue', linewidth=3)
linear(dfpserver)
###Output
_____no_output_____ |
Assignment_10_Palencia.ipynb | ###Markdown
Linear Algebra for CHE Laboratory 10 : Linear Combination and Vector Spaces ObjectivesAt the end of this activity you will be able to:1. Be familiar with representing linear combinations in the 2-dimensional plane.2. Visualize spans using vector fields in Python.3. Perform vector fields operations using scientific programming.
###Code
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
###Output
_____no_output_____
###Markdown
Linear Combination
###Code
vectX = np.array([2,5])
vectY = np.array([7,9])
###Output
_____no_output_____
###Markdown
$$X = \begin{bmatrix} 2\\5 \\\end{bmatrix} , Y = \begin{bmatrix} 7\\9 \\\end{bmatrix} $$ Span of single vectors $$X = c\cdot \begin{bmatrix} 2\\5 \\\end{bmatrix} $$
###Code
c = np.arange(-10,10,0.125)
plt.scatter(c*vectX[0],c*vectX[1])
plt.xlim(-10,10)
plt.ylim(-10,10)
plt.axhline(y=0, color='k')
plt.axvline(x=0, color='k')
plt.grid()
plt.show()
###Output
_____no_output_____
###Markdown
$$Y = c\cdot \begin{bmatrix} 7\\9 \\\end{bmatrix} $$
###Code
c = np.arange(-15,15,0.5)
plt.scatter(c*vectY[0],c*vectY[1])
plt.xlim(-20,20)
plt.ylim(-20,20)
plt.axhline(y=0, color='k')
plt.axvline(x=0, color='k')
plt.grid()
plt.show()
###Output
_____no_output_____
###Markdown
Span of a linear combination of vectors $$S = \begin{Bmatrix} c_1 \cdot\begin{bmatrix} 1\\0 \\\end{bmatrix}, c_2 \cdot \begin{bmatrix} 1\\-1 \\\end{bmatrix}\end{Bmatrix} $$
###Code
vectA = np.array([1,0])
vectB = np.array([1,-1])
R = np.arange(-10,10,1)
c1, c2 = np.meshgrid(R,R)
vectR = vectA + vectB
spanRx = c1*vectA[0] + c2*vectB[0]
spanRy = c1*vectA[1] + c2*vectB[1]
plt.scatter(R*vectA[0],R*vectA[1])
plt.scatter(R*vectB[0],R*vectB[1])
plt.scatter(spanRx,spanRy, s=5, alpha=0.75)
plt.axhline(y=0, color='k')
plt.axvline(x=0, color='k')
plt.grid()
plt.show()
vectP = np.array([2,1])
vectQ = np.array([4,3])
R = np.arange(-10,10,1)
c1, c2 = np.meshgrid(R,R)
vectR = vectP + vectQ
spanRx = c1*vectP[0] + c2*vectQ[0]
spanRy = c1*vectP[1] + c2*vectQ[1]
plt.scatter(R*vectA[0],R*vectA[1])
plt.scatter(R*vectB[0],R*vectB[1])
plt.scatter(spanRx,spanRy, s=5, alpha=0.75)
plt.axhline(y=0, color='k')
plt.axvline(x=0, color='k')
plt.grid()
plt.show()
###Output
_____no_output_____
###Markdown
Activity : TASK 1 $$A = \begin{Bmatrix} A \cdot\begin{bmatrix} 6\\12 \\\end{bmatrix}, B \cdot \begin{bmatrix} 6\\24 \\\end{bmatrix}\end{Bmatrix} $$$$X = \begin{array}\ 6\hat{x} + 12\hat{y} \\ \end{array}$$$$Y = \begin{array}\ 6\hat{x} + 24\hat{y} \\ \end{array}$$
###Code
X = np.array([6,12])
Y = np.array([6,24])
#Y= np.array([-1,-3])
Z = np.arange(-17,17,1)
A, B = np.meshgrid(Z,Z)
vectZ = X + Y
spanZx = A*X[0] + B*Y[0]
spanZy = A*X[1] + B*Y[1]
plt.scatter(Z*X[0],Z*Y[1])
plt.scatter(Z*X[0],Z*Y[1])
plt.scatter(spanZx,spanZy, s=0.5, alpha=0.5)
plt.axhline(y=0, color='pink')
plt.axvline(x=0, color='pink')
plt.grid()
plt.show()
###Output
_____no_output_____ |
IC50 to pIC50.ipynb | ###Markdown
在构效关系模型的建立中,首先要把IC50数据转换为pIC50,这么做会促使实验者以对数方式设计实验区间,分析实验数据,而不是仅仅在算数尺度设计实验和分析数据。对于大多数生物应答系统,输入数据间隔为对数尺度时,才有可能会观测到反馈数据的不同;算数尺度的应答,将会带来大的误差风险或者无法观测到有效差异。因此在进行QSAR模型建立前,对于IC50,转换成pIC50是及其必要的。pIC50将更好的协助实验者正确评估自己的生物实验数据,特别是不至于过度夸大IC50的差异,如100 nM 与 300 nM 可能看起来差别为200 nM, 而从pIC50 角度,它们只相差0.48,属于同一级别的数据。将IC50 转换为pIC50 也提高了数据的可读性,正如最后几行python所示,通过简单的分类,实验者可快速通过在一个很小的区间内对该数据进行好坏的归类,而不必在原始的IC50所包含的从一到万的区间内进行区分,这将提高工作效率。 Covert IC50 from nanomolar to Molar, here I assume all the input IC50s are in namomolar, since this is most the case.首先我们把IC50从纳摩尔转换成单位为摩尔,转换关系为10的9次方
###Code
IC50_NanoMolar = 300
IC50_Molar = IC50_NanoMolar / 1000000000
print("IC50_Molar: ")
print(IC50_Molar)
###Output
IC50_Molar:
3e-07
###Markdown
Then the next cell just put the IC50 in molar to its negative logarithm接下来只需取其负对数即可,不过需要先引入math函数
###Code
import math
pIC50 = -math.log (IC50_Molar,10)
print(pIC50)
###Output
6.5228787452803365
###Markdown
Now let's try to comment the bioactivity of this certain value of IC50, here I define pIC50 >= 7 good, pIC50 < 5 bad, and 5=<pIC50<7 medium.下面对该IC50数据进行归类,如果大于6.5(300 nM),为好,小于等于5.7(2000 nM)为差,二者之间为中
###Code
if pIC50 <= 5.7:
print("The bioactivity is bad")
elif pIC50 >= 6.5:
print("The bioactivity is good")
else:
print("The bioactivity is medium")
###Output
The bioactivity is good
|
Problem 055 - Lychrel numbers.ipynb | ###Markdown
If we take 47, reverse and add, 47 + 74 = 121, which is palindromic.Not all numbers produce palindromes so quickly. For example, 349 + 943 = 1292, 1292 + 2921 = 4213 4213 + 3124 = 7337That is, 349 took three iterations to arrive at a palindrome.Although no one has proved it yet, it is thought that some numbers, like 196, never produce a palindrome. A number that never forms a palindrome through the reverse and add process is called a Lychrel number. Due to the theoretical nature of these numbers, and for the purpose of this problem, we shall assume that a number is Lychrel until proven otherwise. In addition you are given that for every number below ten-thousand, it will either (i) become a palindrome in less than fifty iterations, or, (ii) no one, with all the computing power that exists, has managed so far to map it to a palindrome. In fact, 10677 is the first number to be shown to require over fifty iterations before producing a palindrome: 4668731596684224866951378664 (53 iterations, 28-digits).Surprisingly, there are palindromic numbers that are themselves Lychrel numbers; the first example is 4994.How many Lychrel numbers are there below ten-thousand?NOTE: Wording was modified slightly on 24 April 2007 to emphasise the theoretical nature of Lychrel numbers.
###Code
open System
open System.Numerics
let rev (str:string) =
let strArrayRev = str.ToCharArray() |> Array.rev
String.Join("", strArrayRev)
let addRev n =
n + BigInteger.Parse(rev(string(n)))
let isPalindrome n = string(n) = rev(string(n))
let rec isLychrel' n i =
if i > 50I then true
else
if i > 0I && isPalindrome n then false
else isLychrel' (addRev n) (i + 1I)
let isLychrel n =
isLychrel' n 0I
seq { 1I..10000I }
|> Seq.filter isLychrel
|> Seq.length
###Output
_____no_output_____ |
sphinx/examples/clip_geom.ipynb | ###Markdown
Example - Clip
###Code
import rioxarray
%matplotlib inline
###Output
_____no_output_____
###Markdown
Load in xarray datasetNotes: - `chunks=True` only works if you have a dask installed. Otherwise, you can skip this. - `masked=True` will convert from integer to `float64` and fill with `NaN`. If this behavior is not desired, you can skip this.
###Code
xds = rioxarray.open_rasterio("../../test/test_data/compare/small_dem_3m_merged.tif", masked=True, chunks=True)
xds
xds.plot()
###Output
_____no_output_____
###Markdown
Clip using a geometry
###Code
geometries = [
{
'type': 'Polygon',
'coordinates': [[
[425499.18381405267, 4615331.540546387],
[425499.18381405267, 4615478.540546387],
[425526.18381405267, 4615478.540546387],
[425526.18381405267, 4615331.540546387],
[425499.18381405267, 4615331.540546387]
]]
}
]
clipped = xds.rio.clip(geometries, xds.rio.crs)
clipped
clipped.plot()
clipped.rio.to_raster("clipped.tif", compress='LZMA', tiled=True, dtype="int32")
###Output
_____no_output_____
###Markdown
Clip using a GeoDataFrame
###Code
import geopandas
from shapely.geometry import box, mapping
geodf = geopandas.GeoDataFrame(
geometry=[
box(425499.18381405267, 4615331.540546387, 425526.18381405267, 4615478.540546387)
],
crs=xds.rio.crs.to_dict()
)
clipped = xds.rio.clip(geodf.geometry.apply(mapping), geodf.crs, drop=False, invert=True)
clipped.plot()
clipped.rio.to_raster("clipped_invert.tif", compress='LZMA', tiled=True, dtype="int32")
###Output
_____no_output_____ |
notebooks_en/1_Linear_Regression.ipynb | ###Markdown
Content under Creative Commons Attribution license CC-BY 4.0, code under BSD 3-Clause License © 2021 Lorena A. Barba Linear regression by gradient descentThis module of _Engineering Computations_ takes a step-by-step approach to introduce you to the essential ideas of deep learning, an algorithmic technology that is taking the world by storm. It is at the core of the artificial intelligence boom, and we think every scientist and engineer should understand the basics, at least. Another term for deep learning is deep neural networks. In this module, you will learn how neural-network models are built, computationally. The inspiration for deep learning may have been how the brain works, but in practice what we have is a method to build models, using mostly linear algebra and a little bit of calculus. These models are not magical, or even "intelligent"—they are just about _optimization_, which every engineer knows about!In this lesson, we take the first step of model-building: linear regression. The very first module of the _Engineering Computations_ series discusses [linear regression with real data](http://go.gwu.edu/engcomp1lesson5), and there we found the model parameters (slope and $y$-intercept) analytically. Let's forget about that for this lesson. The key concept we introduce here will be _gradient descent_. Start your ride here! Gradient descentThis lesson is partly based on a tutorial at the 2019 SciPy Conference by Eric Ma [1]. He begins his tutorial by presenting the idea of _gradient descent_ with a simple quadratic function: the question is how do we find this function's minimum?$$f(w) = w^2 +3w -5$$We know from calculus that at the minimum, the derivative of the function is zero (the tangent to the function curve is horizontal), and the second derivative is positive (the curve slants _up_ on each side of the minimum). The analytical derivative of the function above is $f^\prime(w) = 2w + 3$ and the second derivative is $f^{\prime\prime}(w)=2>0$. Thus, we make $2w+3=0$ to find the minimum.Let's play with this function using SymPy. We'll later use NumPy, and make plots with Matplotlib, so we load all the libraries in one place.
###Code
import sympy
import numpy
from matplotlib import pyplot
%matplotlib inline
###Output
_____no_output_____
###Markdown
We run this SymPy method to get beautiful typeset symbols and equations (in the Jupyter notebook, it will use [MathJax](https://en.wikipedia.org/wiki/MathJax) by default):
###Code
sympy.init_printing()
###Output
_____no_output_____
###Markdown
Now we'll define the Python variable `w` to be a SymPy symbol, and create the expression `f` to match the mathematical function above, and plot it.
###Code
w = sympy.Symbol('w', real=True)
f = w**2 + 3*w - 5
f
sympy.plotting.plot(f);
###Output
_____no_output_____
###Markdown
A neat parabola. We can see from the plot that the minimum of $f(w)$ is reached somewhere between $w=-2.5$ and $w=0$. SymPy can tell us the derivative, and the value where it is zero:
###Code
fprime = f.diff(w)
fprime
sympy.solve(fprime, w)
###Output
_____no_output_____
###Markdown
That looks about right: $-3/2$ or $-1.5$. We could have also solved this by hand, because it's a simple function. But for more complicated functions, finding the minimum analytically could be more difficult. Instead, we can use the iterative method of gradient descent. The idea in gradient descent is to find the value of $w$ at the function minimum by starting with an initial guess, then iteratively taking small steps down the slope of the function, i.e., in the negative gradient direction. To illustrate the process, we turn the symbolic expression `fprime` into a Python function that we can call, and use it in a simple loop taking small steps:
###Code
fpnum = sympy.lambdify(w, fprime)
type(fpnum)
###Output
_____no_output_____
###Markdown
Yep. We got a Python function with the [`sympy.lambdify()`](https://docs.sympy.org/latest/modules/utilities/lambdify.html) method, whose return value is of type `function`. Now, you can pick any starting guess, say $w=10$, and advance in a loop taking steps of size $0.01$ (a choice we make; more on this later):
###Code
w = 10.0 # starting guess for the min
for i in range(1000):
w = w - fpnum(w)*0.01 # with 0.01 the step size
print(w)
###Output
-1.4999999806458753
###Markdown
That gave a result very close to the true value $-1.5$, and all we needed was a function for the derivative of $f(w)$. This is how you find the argument of the minimum of a function iteratively. Note> Implied in this method is that the function is differentiable, and that we can step *down* the slope, meaning its second derivative is positive, or the function is _convex_. Gradient descent steps in the direction of the negative slope to approach the minimum. Linear regressionSuppose you have data consisting of one independent variable and one dependent variable, and when you plot the data it seems to noisily follow a trend line. To build a model with this data, you assume the relationship is _linear_, and seek to find the line's slope and $y$-intercept (the model parameters) that best fit the data. Though this sounds straightforward, some key ideas of machine learning are contained:- we don't _know_ the true relationship between the variables, we _assume_ it is linear (and go for it!)- the model we chose (linear) has some parameters (slope, intercept) that are unknown- we will need some data (observational, experimental) of the dependent and independent variables- we find the model parameters by fitting the "best" line to the data- the model with its parameters can then be used to make _predictions_Let's make some synthetic data to play with, following the example in Eric Ma's tutorial [1].
###Code
# make sythetic data (from Eric's example)
x_data = numpy.linspace(-5, 5, 100)
w_true = 2
b_true = 20
y_data = w_true*x_data + b_true + numpy.random.normal(size=len(x_data))
pyplot.scatter(x_data,y_data);
###Output
_____no_output_____
###Markdown
This situation arises often. In **Module 1** of _Engineering Computations_, we used a real data set of Earth temperature over time and we fit an ominously sloped line. We derived analytical formulas for the model coefficients and wrote our own custom functions, and we also learned that NumPy has a built-in function that will do it for us: `numpy.polyfit(x, y, 1)` will return the two parameters $w, b$ for the line$$y = w x + b $$Here, we will instead use gradient descent to get the parameters of the linear model. The first step is to define a function that represents the _deviation_ of the data from the model. For linear regression, we use the sum (or the mean) of the square _errors_: the differences between each data point and the predicted value from the linear model (also called _residuals_). Each data point deviates from the linear regression: we aim to minimize the sum of squares of the residuals.Let's review our ingredients:1. observational data, in the form of two arrays: $x, y$2. our linear model: $y = wx + b$3. a function that measures the discrepancy between the data and the fitting line: $\frac{1}{N}\sum (y_i - f(x_i))^2$The last item is called a "loss function" (also sometimes "cost function"). Our method will be to step down the slope of the loss function, to find its minimum.As a first approach, let's again use SymPy, which can compute derivatives for us. Below, we define the loss function for a single data point, and make Python functions with its derivatives with respect to the model parameters. We will call these functions in a sequence of steps that start at an initial guess for the parameters (we choose zero), and step in the negative gradient multiplied by a step size (we choose $0.01$). After $1000$ steps, you see that the values of $w$ and $b$ are quite close to the true values from our synthetic data.
###Code
w, b, x, y = sympy.symbols('w b x y')
loss = (w*x + b - y)**2
loss
grad_b = sympy.lambdify([w,b,x,y], loss.diff(b), 'numpy')
grad_w = sympy.lambdify([w,b,x,y], loss.diff(w), 'numpy')
###Output
_____no_output_____
###Markdown
Be sure to read the documentation for [`sympy.lambdify()`](https://docs.sympy.org/latest/modules/utilities/lambdify.html), which explains the argument list.Now, we step down the slope. Note that we first compute the derivatives with respect to both parameters _at all the data points_ (thanks to NumPy array operations), and we take the average. Then we step both parameters (starting from an initial guess of zero).
###Code
w = 0
b = 0
for i in range(1000):
descent_b = numpy.sum(grad_b(w,b,x_data,y_data))/len(x_data)
descent_w = numpy.sum(grad_w(w,b,x_data,y_data))/len(x_data)
w = w - descent_w*0.01 # with 0.01 the step size
b = b - descent_b*0.01
print(w)
print(b)
pyplot.scatter(x_data,y_data)
pyplot.plot(x_data, w*x_data + b, '-r');
###Output
_____no_output_____
###Markdown
It works! That line looks to be fitting the data pretty well. Now we have a "best fit" line that represents the data, and that we can use to estimate the value of the dependent variable for any value of the independent variable, even if not present in the data. That is, _to make predictions_. Key idea> "Learning" means building a model by finding the parameters that best fit the data. We do it by minimizing a loss function (a.k.a. cost function), which involves computing derivatives with respect to the parameters in the model. Here, we used SymPy to help us out with the derivatives, but for more complex models (which may have many parameters), this could be a cumbersome approach. Instead, we will make use of the technique of _automatic differentiation_, which evaluates the derivative of a function written in code. You'll learn more about it in the next lesson, on **logistic regression**. What we've learned- Gradient descent can find a minimum of a function.- Linear regression starts by assuming a linear relationship between two variables.- A model includes the assumed relationship in the data and model parameters.- Observational data allows finding the parameters in the model (slope, intercept).- A loss function captures the deviation between the observed and the predicted values of the dependent variable.- We find the parameters by minimizing the loss function via gradient descent.- SymPy computes derivatives with `sympy.diff()` and returns numeric functions with `simpy.lambdify()`. References1. Eric Ma, "Deep Learning Fundamentals: Forward Model, Differentiable Loss Function & Optimization," SciPy 2019 tutorial. [video on YouTube](https://youtu.be/JPBz7-UCqRo) and [archive on GitHub](https://github.com/ericmjl/dl-workshop/releases/tag/scipy2019).
###Code
# Execute this cell to load the notebook's style sheet, then ignore it
from IPython.core.display import HTML
css_file = '../style/custom.css'
HTML(open(css_file, "r").read())
###Output
_____no_output_____ |
Final Project - News Headline Generation and Validation/.ipynb_checkpoints/data_preprocessing_title-checkpoint.ipynb | ###Markdown
1. Basic Feature Extraction
###Code
#reading csv
train = pd.read_csv('articles_small.csv', encoding='ISO-8859-1',low_memory=False)
train
train = train[train.notnull()]
train
train = train.dropna(how='any')
train
train['title'][0]
#print('------------------------------------------------------------')
train['content'][0]
heads = train['title']
heads
descs = train['content']
descs
list_title = []
for i in heads:
title = ftfy.fix_text(i)
list_title.append(title)
print(title)
print('---------------')
list_title
list_content = []
for i in descs:
descs = ftfy.fix_text(i)
list_content.append(descs)
print(descs)
print('---------------')
list_content
descs
list_content = list_content[:2]
list_title = list_title[:2]
new_dict = {}
for i in list_title:
for j in list_content:
new_dict[i] = j
new_dict
#Number of stopwords
#stop = stopwords.words('english')
#train['stopwords'] = train['content'].apply(lambda x: len([x for x in x.split() if x in stop]))
#train[['content','stopwords']].head()
###Output
_____no_output_____
###Markdown
2. Basic Pre-processing
###Code
#transform data into lower case
train = train.apply(lambda x: " ".join(x.lower() for x in x.split()))
train.head()
#Removing Punctuation
train = train.str.replace('[^\w\s]','')
train
train.to_csv('output_cleaned_title.csv', index=False)
#Removal of Stop Words
#train['content'] = train['content'].apply(lambda x: " ".join(x for x in x.split() if x not in stop))
#train['content'].head()
#Common word removal
#freq = pd.Series(' '.join(train['content']).split()).value_counts()[:10]
#freq
#freq = list(freq.index)
#train['content'] = train['content'].apply(lambda x: " ".join(x for x in x.split() if x not in freq))
#train['content'].head()
#Rare words removal
#rare = pd.Series(' '.join(train['content']).split()).value_counts()[-10:]
#rare
#rare = list(rare.index)
#train['content'] = train['content'].apply(lambda x: " ".join(x for x in x.split() if x not in rare))
#train['content'].head()
#Tokenization - dividing the text into a sequence of words or sentences
#we have used the textblob library to first transform our data into a blob and then converted them into a series of words
tokenized_words=[]
#for i,x in enumerate(train['content']):
#if(len(x) > 1 ):
#tokenized_words = TextBlob(x).words
#Stemming - removal of suffices, like “ing”, “ly”, “s”
#st = PorterStemmer()
#train['content'].apply(lambda x: " ".join([st.stem(word) for word in x.split()]))
#Lemmatization - it converts the word into its root word
#train['content'] = train['content'].apply(lambda x: " ".join([Word(word).lemmatize() for word in x.split()]))
#train['content'].head()
###Output
_____no_output_____
###Markdown
3. Advance Text Processing
###Code
#N-grams - combination of multiple words used together.
#j=[]
#for i,x in enumerate(train['content']):
#j = TextBlob(x).ngrams(2)
# Term frequency - ratio of the count of a word present in a sentence, to the length of the sentence
#tf1 = (train['content']).apply(lambda x: pd.value_counts(x.split(" "))).sum(axis = 0).reset_index()
#tf1.columns = ['words','tf']
#tf1
#Inverse Document Frequency - log of the ratio of the total number of rows to the number of rows in which that word is present
#import numpy as np
#for i,word in enumerate(tf1['words']):
#tf1.loc[i, 'idf'] = np.log(train.shape[0]/(len(train[train['content'].str.contains(word)])))
#tf1
#Term Frequency – Inverse Document Frequency (TF-IDF) - multiplication of the TF and IDF
#tf1['tfidf'] = tf1['tf'] * tf1['idf']
#tf1
#tfidf = TfidfVectorizer(max_features=1000, lowercase=True, analyzer='word',
#stop_words= 'english',ngram_range=(1,1))
#train_vect = tfidf.fit_transform(train['content'])
#train_vect
#Bag of Words - representation of text which describes the presence of words within the text data
#bow = CountVectorizer(max_features=1000, lowercase=True, ngram_range=(1,1),analyzer = "word")
#train_bow = bow.fit_transform(train['content'])
#train_bow
# Word Embeddings
#glove_input_file = 'glove.6B.100d.txt'
#word2vec_output_file = 'glove.6B.100d.txt.word2vec'
# convert it into the word2vec format
#glove2word2vec(glove_input_file, word2vec_output_file)
#load the above word2vec file as a model
#filename = 'glove.6B.100d.txt.word2vec'
#model = KeyedVectors.load_word2vec_format(filename, binary=False)
# take the average to represent the string ‘go away’ in the form of vectors having 100 dimensions
#(model['music'] + model['family'])/2
###Output
_____no_output_____ |
_downloads/4deabe4297e14641851ebc4109012fa2/plot_group_ica_tutorial.ipynb | ###Markdown
ICA: a tutorialAuthor: Pierre AblinGroup ICA extends the celebrated Independent Component Analysis to multipledatasets.Single view ICA decomposes a dataset $X$ as$X = S \times A^{\top}$, where $S$ are the independentsources (meaning that the columns of $S$ are independent),and $A$ is the mixing matrix.In group ICA, we have several views $Xs = [X_1, \dots, X_n]$.Each view is obtained as\begin{align}X_i \simeq S \times A_i.T\end{align}so the views share the same sources $S$, but have different mixingmatrices $A_i$. It is a powerful tool for group inference, as itallows to extract signals that are comon across views.
###Code
# License: MIT
import numpy as np
import matplotlib.pyplot as plt
from mvlearn.decomposition import GroupICA
###Output
_____no_output_____
###Markdown
Define a Function to Plot Sources
###Code
def plot_sources(S):
n_samples, n_sources = S.shape
fig, axes = plt.subplots(n_sources, 1, figsize=(6, 4), sharex=True)
for ax, sig in zip(axes, S.T):
ax.plot(sig)
###Output
_____no_output_____
###Markdown
Define Independent Sources and Generate Noisy ObservationsDefine indepdent sources. Next, generate some views, which are noisyobservations of linear transforms of these sources.
###Code
np.random.seed(0)
n_samples = 2000
time = np.linspace(0, 8, n_samples)
s1 = np.sin(2 * time) * np.sin(40 * time)
s2 = np.sin(3 * time) ** 5
s3 = np.random.laplace(size=s1.shape)
S = np.c_[s1, s2, s3]
plot_sources(S)
n_views = 10
mixings = [np.random.randn(3, 3) for _ in range(n_views)]
Xs = [np.dot(S, A.T) + 0.3 * np.random.randn(n_samples, 3) for A in mixings]
# We can visualize one dataset: it looks quite messy.
plot_sources(Xs[0])
###Output
_____no_output_____
###Markdown
Apply Group ICANext, we can apply group ICA. The option `multiview_output=False` means thatwe want to recover the estimated sources when we do `.transform`. Here, welook at what the algorithm estimates as the sources from the multiviewdata.
###Code
groupica = GroupICA(multiview_output=False).fit(Xs)
estimated_sources = groupica.transform(Xs)
plot_sources(estimated_sources)
###Output
_____no_output_____
###Markdown
Inspect Estimated MixingsWe see they look pretty good! We can also wheck that it has correctlypredicted each mixing matrix. The estimated mixing matrices are stored inthe `.individual_mixing_` attribute.If $\tilde{A}$ is the estimated mixing matrix and $A$ is thetrue mixing matrix, we can look at $\tilde{A}^{-1}A$. It should beclose to a scale and permuation matrix: in this case, the sources arecorrectly estimated, up to scale and permutation.
###Code
estimated_mixings = groupica.individual_mixing_
plt.matshow(np.dot(np.linalg.pinv(estimated_mixings[0]), mixings[0]))
###Output
_____no_output_____
###Markdown
Group ICA on Only 2 ViewsA great advantage of groupICA is that it leverages the multiple views toreduce noise. For instance, if only had two views, we would have obtainedthe following.
###Code
estimated_sources = groupica.fit_transform(Xs[:2])
plot_sources(estimated_sources)
# Another important property of group ICA is that it can recover signals that
# are common to all datasets, and separate these signals from the rest.
# Imagine that we only have one common source across datasets:
common_source = S[:, 0]
mixings = np.random.randn(n_views, 3)
Xs = [a * common_source[:, None] + 0.3 * np.random.randn(n_samples, 3)
for a in mixings]
estimated_sources = groupica.fit_transform(Xs)
plot_sources(estimated_sources)
# It recovers the common source on one channel, and the other estimated
# sources are noise.
###Output
_____no_output_____ |
Mnist_TF_rasberry.ipynb | ###Markdown
MNIST inspired from https://github.com/gato/tensor-on-pi/blob/master/Convolutional%20Neural%20Network%20digit%20predictor.ipynb
###Code
import os
import shutil
import time
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow.keras import Sequential
from tensorflow.keras.callbacks import ModelCheckpoint, TensorBoard
from tensorflow.keras.layers import Dense, Flatten, Softmax, Conv2D, Dropout, MaxPooling2D
print(tf.__version__)
mnist = tf.keras.datasets.mnist.load_data()
(x_train, y_train), (x_test, y_test) = mnist
HEIGHT, WIDTH = x_train[0].shape
NCLASSES = tf.size(tf.unique(y_train).y)
print("Image height x width is", HEIGHT, "x", WIDTH)
tf.print("There are", NCLASSES, "classes")
def get_model():
model = Sequential([
Conv2D(64, kernel_size=3,
activation='relu', input_shape=(WIDTH, HEIGHT, 1)),
MaxPooling2D(2),
Conv2D(32, kernel_size=3,
activation='relu'),
MaxPooling2D(2),
Flatten(),
Dense(400, activation='relu'),
Dense(100, activation='relu'),
Dropout(.25),
Dense(10),
Softmax()
])
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
return model
BUFFER_SIZE = 5000
BATCH_SIZE = 100
def scale(image, label):
image = tf.cast(image, tf.float32)
image /= 255
image = tf.expand_dims(image, -1)
return image, label
def load_dataset(training=True):
"""Loads MNIST dataset into a tf.data.Dataset"""
(x_train, y_train), (x_test, y_test) = mnist
x = x_train if training else x_test
y = y_train if training else y_test
# One-hot encode the classes
y = tf.keras.utils.to_categorical(y, NCLASSES)
dataset = tf.data.Dataset.from_tensor_slices((x, y))
dataset = dataset.map(scale).batch(BATCH_SIZE)
if training:
dataset = dataset.shuffle(BUFFER_SIZE).repeat()
return dataset
NUM_EPOCHS = 10
STEPS_PER_EPOCH = 100
model = get_model()
train_data = load_dataset()
validation_data = load_dataset(training=False)
OUTDIR = "mnist_digits/"
checkpoint_callback = ModelCheckpoint(
OUTDIR, save_weights_only=True, verbose=1)
tensorboard_callback = TensorBoard(log_dir=OUTDIR)
t1 = time.perf_counter()
history = model.fit(
train_data,
validation_data=validation_data,
epochs=NUM_EPOCHS,
steps_per_epoch=STEPS_PER_EPOCH,
verbose=2,
callbacks=[checkpoint_callback, tensorboard_callback]
)
t2 = time.perf_counter()
print("training took: {:4.4f} secs.".format(t2 - t1))
###Output
_____no_output_____ |
notebooks_BQ simple query.ipynb | ###Markdown
Full scan on 10B rows of Wikipedia logs
###Code
%%%sql
SELECT
LANGUAGE,
SUM(views) AS views
FROM
[bigquery-samples:wikipedia_benchmark.Wiki10B]
WHERE
REGEXP_MATCH(title,
"G.*o.*o.*g")
GROUP BY
LANGUAGE
ORDER BY
views DESC;
###Output
_____no_output_____ |
fastai_foundation_notes_3.ipynb | ###Markdown
When softmax is a bad idea (this is what many top researchers get wrong)Let's say you want the model to categorize the objects in the image (cat, dog, plane, fish, building).**Problem:** you could have two images (1) probably a fish, definitely not the others (2) no object. But definitely not cat, dog, plane, building. Definitely not a fish either but absolutely not the others. Because softmax is just the relative weight of each object, the two images might produce identical softmax as the output. **Note:**- Softmax only works well when there is one and only one object out of the categories of interest in the image i.e. there can't be more than two objects in the same image, or images with no objects. Example: language modeling (what's the next word? The next word is definitely one and only one word) - So why do researchers always use softmax? Because the most famous dataset is ImageNet and in ImageNet all pictures fit the description above ^ (one and only one object)- Wait, what about creating a 6th category "no object" and try to predict that? In practice, that has not worked very well because while there are distinct features for concrete objects such as fish, what does "not fish, not cat, not dog, not plane, not building" look like? - Wait, what about all these top papers that work well with softmax? Try reproducing the paper without the softmax. You'll probably get better results- So what do we use instead of softmax? We use good old **binomial**, where you go through each of the categories and ask "does this image have a cat?", "does this image have a dog?" etc. The output of one category is not relative to the other categories. Use Exception in Control Flow and Learning Rate Finder ConvNet Import and Helper Functions
###Code
#export
from drive.MyDrive.fastai_foundation.exp.nb_fastai_foundation_notes_2 import *
###Output
_____no_output_____
###Markdown
Normalize Data
###Code
#export
# we want to normalize BOTH train and valid to the mean and std of train
def normalize_ds(train, valid):
m, s = train.mean(), train.std()
return normalize(train, m, s), normalize(valid, m, s)
x_train,y_train,x_valid,y_valid = get_data(); print("x_train: ", x_train.shape)
x_train, x_valid = normalize_ds(x_train, x_valid)
train_ds, valid_ds = Dataset(x_train, y_train), Dataset(x_valid, y_valid)
x_train.mean(), x_train.std()
# not_normalized_data = get_dl_group()
# I incorporated the normalize_ds function in get_dl_group
data = get_dl_group(normalize=True, batch_size=512)
??get_dl_group
x_train.shape == torch.Size([50000, 784])
###Output
_____no_output_____
###Markdown
CNN Model`nn.Conv2d(in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, padding_mode='zeros')``in_channels` is the number of channels of the previous layer. `out_channels` is the number of filters you want to have and thus the number of channels in the output.
###Code
# Lambda lets us pass functions into nn.Sequential, which take Module type of classes such as Linear
class Lambda(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x):
return self.fn(x)
#export
def mnist_resize(x):
# x.view(batch_size, channels, img_width, img_height)
return x.view(-1, 1, 28, 28)
def flatten(x):
# x.view(batch_size, size when everything is flattened into 1 dimension)
return x.view(x.shape[0], -1)
def get_cnn_model(output_dim):
return nn.Sequential(
Lambda(mnist_resize),
nn.Conv2d(1, 8, 5, padding=2, stride=2), # 14 * 14 * 8channels
nn.ReLU(),
nn.Conv2d(8, 16, 3, padding=1, stride=2), # 7 * 7 * 16channels
nn.ReLU(),
nn.Conv2d(16, 32, 3, padding=1, stride=2), # 4 * 4 * 32channels
nn.ReLU(),
nn.Conv2d(32, 32, 3, padding=1, stride=2), # 2 * 2 * 32channels
nn.AdaptiveAvgPool2d(1), # average pool where you just specify output dim and pytorch calculates strides etc for you, trying to give as little overlapping as possible between pools
Lambda(flatten), # batch_size * 32
nn.Linear(32, output_dim)
)
n, m, c = attrgetter('n', 'm', 'c')(data); nh = 50
model = get_cnn_model(c)
loss_fn = F.cross_entropy
optimizer = optim.SGD(model.parameters(), lr=0.4)
callback_funcs = [Recorder, accuracy_cb_fn, hyperparam_scheduler_cb_fn]
learner = Learner(data, model, loss_fn, optimizer)
runner = Runner(cb_fns=callback_funcs)
%time runner.fit(learner, epochs=2)
###Output
before_fit
before_train_epoch!
before_validate_epoch!
train stats: tensor(0.1820)
valid stats: tensor(0.2009)
before_train_epoch!
before_validate_epoch!
train stats: tensor(0.5542)
valid stats: tensor(0.5373)
epochs fitted: 2
CPU times: user 8.8 s, sys: 576 ms, total: 9.37 s
Wall time: 9.35 s
###Markdown
Speed Up Training with `CudaCallback`
###Code
# export
# move model & data to device
class CudaCallback(Callback):
def __init__(self, device):
self.device = device
def before_fit(self):
self.learner.model.to(self.device)
def before_batch(self):
self.runner.x_batch = self.runner.x_batch.to(self.device)
self.runner.y_batch = self.runner.y_batch.to(self.device)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
cuda_callback = partial(CudaCallback, device)
model = get_cnn_model(c)
loss_fn = F.cross_entropy
optimizer = optim.SGD(model.parameters(), lr=0.4)
callback_funcs = [Recorder, accuracy_cb_fn, hyperparam_scheduler_cb_fn, cuda_callback]
learner = Learner(data, model, loss_fn, optimizer)
runner = Runner(cb_fns=callback_funcs)
%time runner.fit(learner, epochs=2)
###Output
before_fit
before_train_epoch!
before_validate_epoch!
train stats: tensor(0.1899, device='cuda:0')
valid stats: tensor(0.1064, device='cuda:0')
before_train_epoch!
before_validate_epoch!
train stats: tensor(0.4111, device='cuda:0')
valid stats: tensor(0.4505, device='cuda:0')
epochs fitted: 2
CPU times: user 3.55 s, sys: 977 ms, total: 4.53 s
Wall time: 4.53 s
###Markdown
Generalize / Refactor with `BatchTransformXCallback`The get_cnn_model above can't be re-used for anything except for MNIST dataset. We need to generalize it. We replace `lambda(mnist_resize)` with a callback.
###Code
#export
class BatchTransformXCallback(Callback):
def __init__(self, transform_fn): self.transform_fn = transform_fn
def before_batch(self):
self.runner.x_batch = self.transform_fn(self.runner.x_batch)
mnist_batch_transform = partial(BatchTransformXCallback, mnist_resize)
###Output
_____no_output_____
###Markdown
Generalize / Refactor `nn.Sequential`
###Code
def conv2d(in_channels, out_channels, kernel_size, padding=1, stride=2):
return nn.Sequential(nn.Conv2d(in_channels, out_channels, kernel_size, padding=kernel_size//2, stride=stride), nn.ReLU())
out_channels = [8, 16, 32, 32]
def get_cnn_layers(out_channels, output_dim):
out_channels = [1] + out_channels
conv_layers = [ conv2d(out_channels[i], out_channels[i+1], 5 if i==0 else 3) for i in range(len(out_channels)-1)]
cnn_layers = conv_layers + [nn.AdaptiveAvgPool2d(1), Lambda(flatten), nn.Linear(out_channels[-1], output_dim)]
return cnn_layers
def get_cnn_model(output_dim):
return nn.Sequential(*get_cnn_layers(out_channels, output_dim))
###Output
_____no_output_____
###Markdown
Notice above that we set stride size to 5 for the first layer and 3 for the following. Here's why (Jeremy Howard's video is a bit confusing here -- need to keep in mind that he's only talking about a single convolution on a small part of the image):Let's say we choose 8 kernels/filters for the first layer (pretty reasonable for small images like MNIST), if we have a kernel size of 3 x 3, for each convolution, because we end up with a vector of 8 values (1 value for each of the 8 kernels), we go from 9 values to 8 values. This is pretty pointless: the whole point of neural networks is to extract/compress information; you barely extracted any information at all here. If we do kernel size of 5 x 5, we still end up with vector of 8 values (this is independent of kernel size -- there's always gonna be 1 value of each of the 8 kernels with each convolution), but we go from 25 to 8. Now that's some real information extraction! Refactor Runner
###Code
??
def get_train(data, model, cb_fns=None, opt_fn=None, lr=0.6, loss_fn=F.cross_entropy):
if not opt_fn:
optimizer = optim.SGD(model.parameters(), lr=0.4)
else:
optimizer = opt_fn(model.parameters(), lr=lr)
learner = Learner(data, model, loss_fn, optimizer)
runner = Runner(cb_fns=cb_fns)
return runner, learner
callback_funcs = [Recorder, accuracy_cb_fn, hyperparam_scheduler_cb_fn, mnist_batch_transform, cuda_callback]
model = get_cnn_model(c)
runner, learner = get_train(data, model, lr=0.4, cb_fns=callback_funcs)
%time runner.fit(learner, epochs=3)
###Output
before_fit
before_train_epoch!
before_validate_epoch!
train stats: tensor(0.2191, device='cuda:0')
valid stats: tensor(0.2193, device='cuda:0')
before_train_epoch!
before_validate_epoch!
train stats: tensor(0.7778, device='cuda:0')
valid stats: tensor(0.5791, device='cuda:0')
before_train_epoch!
before_validate_epoch!
train stats: tensor(0.9494, device='cuda:0')
valid stats: tensor(0.7062, device='cuda:0')
epochs fitted: 3
CPU times: user 1.93 s, sys: 57.3 ms, total: 1.98 s
Wall time: 1.99 s
###Markdown
See in Between Layers Let's check the mean and std of each layer to see if there's exploding/vanishing gradient problem. We create `SequentialModel` to use instead of `nn.Sequential` in order to log this info.
###Code
class SequentialModel(nn.Module):
def __init__(self, *layers):
super().__init__()
self.layers = nn.ModuleList(layers)
self.means = [[] for layer in layers]
self.stds = [[] for layer in layers]
def __call__(self, x):
for i, layer in enumerate(self.layers):
x = layer(x)
self.means[i].append(x.data.mean())
self.stds[i].append(x.data.std())
return x
def __iter__(self): return iter(self.layers)
def test_sequential_model():
x, _ = data.train_ds[0]
x = mnist_resize(x)
torch.manual_seed(42)
nn_sequential_model = get_cnn_model(c)
torch.manual_seed(42)
our_sequential_model = SequentialModel(*get_cnn_layers(out_channels, c))
assert torch.all(torch.isclose(nn_sequential_model(x), our_sequential_model(x)))
test_sequential_model()
model = SequentialModel(*get_cnn_layers(out_channels, c))
runner, learner = get_train(data, model, lr=0.4, cb_fns=callback_funcs)
runner.fit(learner, epochs=2)
for layer_means in model.means: plt.plot(layer_means)
plt.legend(range(6)); # each color is the mean for a layer during each forward pass
for layer_stds in model.stds: plt.plot(layer_stds)
plt.legend(range(6));
for layer_means in model.means: plt.plot(layer_means[:100])
plt.legend(range(6))
for layer_stds in model.stds: plt.plot(layer_stds[:10])
plt.legend(range(6))
###Output
_____no_output_____
###Markdown
You see that the standard deviation of the activation is getting smaller with each layer. This is not good because when the mean is 0 (we want the mean to be 0 to avoid exploding or vanshing gradient problem), small std --> activations are all 0 --> gradients are all 0 (gradients depend of on activations). Hooks`SequentialModel` works, but we don't want to always be rewriting our models. Let's generalize/refactor it. To do that, we need callbacks. PyTorch name these layer-level callbacks "hooks". Think: these "hooks" let us hook into the space between layers. PyTorch Hook
###Code
model = get_cnn_model(c)
runner, learner = get_train(data, model, lr=0.5, cb_fns=callback_funcs)
means = [[] for _ in model]
stds = [[] for _ in model]
def append_stats(i, module, input, output):
means[i].append(output.data.mean())
stds[i].append(output.data.std())
for i,m in enumerate(model): m.register_forward_hook(partial(append_stats, i))
runner.fit(learner, epochs=1)
for layer_means in means: plt.plot(layer_means)
plt.legend(range(6)); # each color is the mean for a layer during each forward pass
plt.xlabel('step')
###Output
_____no_output_____
###Markdown
Refactor with a Hook Class
###Code
#export
class Hook():
def __init__(self, module, fn, activation_lower_bound=0, activation_upper_bound=10, num_bins=40):
self.hook = module.register_forward_hook(partial(fn, self))
self.activation_lower_bound, self.activation_upper_bound, self.num_bins = activation_lower_bound, activation_upper_bound, num_bins
def remove(self): self.hook.remove()
def __del__(self): self.remove()
def append_stats(hook, module, input, output):
if not hasattr(hook,'stats'): hook.stats = ([],[])
means, stds = hook.stats
means.append(output.data.mean())
stds.append(output.data.std())
model = get_cnn_model(c)
hooks = [Hook(layer, append_stats) for layer in model]
runner, learner = get_train(data, model, lr=0.5, cb_fns=callback_funcs)
runner.fit(learner, epochs=1)
for h in hooks:
plt.plot(h.stats[0])
h.remove()
plt.legend(range(8));
###Output
_____no_output_____
###Markdown
Refactor with Hooks class
###Code
#export
from typing import *
def listify(o):
if o is None: return []
if isinstance(o, list): return o
if isinstance(o, str): return [o]
if isinstance(o, Iterable): return list(o)
return [o]
# classes that inherit from ListContainer have properties of list
class ListContainer():
def __init__(self, items): self.items = listify(items)
def __getitem__(self, idx):
if isinstance(idx, (int,slice)): return self.items[idx]
if isinstance(idx[0],bool):
assert len(idx)==len(self) # bool mask
return [o for m,o in zip(idx,self.items) if m]
return [self.items[i] for i in idx]
def __len__(self): return len(self.items)
def __iter__(self): return iter(self.items)
def __setitem__(self, i, o): self.items[i] = o
def __delitem__(self, i): del(self.items[i])
def __repr__(self):
res = f'{self.__class__.__name__} ({len(self)} items)\n{self.items[:10]}'
if len(self)>10: res = res[:-1]+ '...]'
return res
#export
class Hooks(ListContainer):
def __init__(self, model, fn, activation_lower_bound=0, activation_upper_bound=10, num_bins=40):
# Hooks is simply a list of Hook
super().__init__([Hook(layer, fn, activation_lower_bound=activation_lower_bound, activation_upper_bound=activation_upper_bound, num_bins=num_bins) for layer in model])
# __enter__ and __exist__ allow us to use `with ... as ...`
def __enter__(self, *args): return self
def __exit__(self, *args): self.remove()
def __del__(self): self.remove()
def remove(self):
for hook in self: hook.remove()
model = get_cnn_model(c)
hooks = Hooks(model, append_stats)
runner, learner = get_train(data, model, lr=0.5, cb_fns=callback_funcs)
runner.fit(learner, epochs=1)
fig, ax = plt.subplots(1,2, figsize=(10,4))
for h in hooks:
means, stds = h.stats
ax[0].plot(means)
ax[1].plot(stds)
h.remove()
plt.legend(range(8));
###Output
_____no_output_____
###Markdown
Better the mean & std with Kaiming initialization
###Code
# get a batch of data to examine the mean and std
x,y = next(iter(data.train_dl))
x = mnist_resize(x).cuda()
print(x.mean(), x.std())
# check the mean and std after first layer
p = model[0](x)
print(p.mean(), p.std())
from torch.nn.init import kaiming_normal_, kaiming_uniform_
# kaiming initialization
def kaiming_init(model, uniform=False):
init_fn = kaiming_uniform_ if uniform else kaiming_normal_
for layer in model:
if isinstance(layer, nn.Sequential):
init_fn(layer[0].weight, a=0.1)
layer[0].bias.data.zero_()
kaiming_init(model)
# check the mean and std after first layer -- the std is much closer to 1 after kaiming init
p = model[0](x)
print(p.mean(), p.std())
def plot_stds_and_means(hooks, batches=None):
fig, ax = plt.subplots(1,2, figsize=(10,4))
for h in hooks:
means, stds = h.stats[0], h.stats[1]
if not batches:
ax[0].plot(means)
ax[1].plot(stds)
else:
ax[0].plot(means[:batches])
ax[1].plot(stds[:batches])
plt.legend(range(6))
model = get_cnn_model(c)
kaiming_init(model)
runner, learner = get_train(data, model, lr=0.9, cb_fns=callback_funcs)
with Hooks(model, append_stats) as hooks:
runner.fit(learner, epochs=2)
plot_stds_and_means(hooks, 10) # plot the first 10 batches
plot_stds_and_means(hooks) # plot all
###Output
before_fit
before_train_epoch!
before_validate_epoch!
train stats: tensor(0.5630, device='cuda:0')
valid stats: tensor(0.8918, device='cuda:0')
before_train_epoch!
before_validate_epoch!
train stats: tensor(0.9330, device='cuda:0')
valid stats: tensor(0.9247, device='cuda:0')
epochs fitted: 2
###Markdown
Looking Deeper into the Model with Histograms
###Code
activation_lower_bound = 0
activation_upper_bound = 10
num_bins = 40
bin_size = (activation_upper_bound - activation_lower_bound) / num_bins
def append_stats(hook, module, input, output):
if not hasattr(hook,'stats'): hook.stats = ([],[],[])
if not hasattr(hook,'steps'): hook.steps = 0
means, stds, hists = hook.stats
means.append(output.data.mean().cpu())
stds.append(output.data.std().cpu())
hook.steps += 1
# at each step/batch, at each layer, all the activations are counted and put into bins
# number of activations is equal to the output dimension for that layer
hists.append(output.data.cpu().histc(hook.num_bins,hook.activation_lower_bound,hook.activation_upper_bound)) # histc isn't implemented on the GPU
# reshape and log for visualization
def get_hist(h):
hists = h.stats[2] # each hists is shape [num_steps, num_bins], corresponding to 1 hook (1 layer)
return torch.stack(hists).t().float().log1p() # note we're logging the *counts* here to make the distribution more colorful
model = get_cnn_model(c)
kaiming_init(model)
runner, learner = get_train(data, model, lr=0.9, cb_fns=callback_funcs)
with Hooks(model, append_stats, activation_lower_bound=-7, activation_upper_bound=7, num_bins=40) as hooks:
runner.fit(learner, epochs=1)
_, axes = plt.subplots(2, 2, figsize=(15, 10))
for i, (ax, hook) in enumerate(zip(axes.flatten(), hooks[:4])):
hist = get_hist(hook)
ax.imshow(hist, origin="lower")
ax.set_xlabel('n-th batch')
ax.set_ylabel(f'n-th bin out of {num_bins} from activation value {activation_lower_bound} to {activation_upper_bound}')
ax.set_title(f'layer {i+1}')
print("total steps done: ", hook.steps)
plt.tight_layout()
###Output
before_fit
before_train_epoch!
before_validate_epoch!
train stats: tensor(0.3511, device='cuda:0')
valid stats: tensor(0.8361, device='cuda:0')
epochs fitted: 1
total steps done: 108
total steps done: 108
total steps done: 108
total steps done: 108
###Markdown
In the histogram above, the color represent the counts (the brighter the color, the higher the count).
###Code
from prettytable import PrettyTable
def count_parameters(model):
table = PrettyTable(["Modules", "Parameters"])
total_params = 0
for name, parameter in model.named_parameters():
if not parameter.requires_grad: continue
param = parameter.numel()
table.add_row([name, param])
total_params+=param
print(table)
print(f"Total Trainable Params: {total_params}")
return total_params
count_parameters(model)
###Output
+------------+------------+
| Modules | Parameters |
+------------+------------+
| 0.0.weight | 200 |
| 0.0.bias | 8 |
| 1.0.weight | 1152 |
| 1.0.bias | 16 |
| 2.0.weight | 4608 |
| 2.0.bias | 32 |
| 3.0.weight | 9216 |
| 3.0.bias | 32 |
| 6.weight | 320 |
| 6.bias | 10 |
+------------+------------+
Total Trainable Params: 15594
###Markdown
There are way more activations than parameters at each layer, becuase this is a convolutional network! The size of weights (kernel) doesn't scale with the number of activations - they're shared more than in fully connected neural network. Look at how most activations are in the bottom bins
###Code
def get_percentage_of_min_activations(hook, n_bottom_bins=2):
hist = hook.stats[2]
hist = torch.stack(hist).t().float() # shape [num_bins, num_steps]
return hist[:n_bottom_bins].sum(0) / hist.sum(0) # shape [num_steps]
def plot_percentage_of_bottom_bins(hooks, n_bottom_bins=1, num_bins=None):
_, axes = plt.subplots(2, 2, figsize=(15, 10))
for i, (ax, hook) in enumerate(zip(axes.flatten(), hooks[:4])):
ax.plot(get_percentage_of_min_activations(hook, n_bottom_bins))
ax.set_ylim(0,1)
ax.set_ylabel(f'percentage of activations in the bottom {n_bottom_bins} out of {num_bins} bins')
ax.set_xlabel('n-th batch')
ax.set_title(f'layer {i+1}')
plot_percentage_of_bottom_bins(hooks, n_bottom_bins=1, num_bins=num_bins)
###Output
_____no_output_____
###Markdown
This is not good - as you can see, most of the activations are in the bottom 1 bins (close to 0 activation). Improve ConvNet Generalized ReLU
###Code
#export
class GeneralReLU(nn.Module):
def __init__(self, leak=None, subtract_value=None, max_value=None):
super().__init__()
self.fn = nn.LeakyReLU(negative_slope=leak) if leak else nn.ReLU()
self.subtract_value = subtract_value
self.max_value = max_value
def forward(self, x):
x = self.fn(x)
if self.subtract_value: x = x - self.subtract_value
if self.max_value: x = x.clamp_max_(self.max_value)
return x
general_relu = partial(GeneralReLU, leak=0.1, subtract_value=0.4, max_value=6.)
def conv2d(in_channels, out_channels, kernel_size, padding=1, stride=2):
return nn.Conv2d(in_channels, out_channels, kernel_size, padding=kernel_size//2, stride=stride)
def get_cnn_layers(out_channels, output_dim, activation_fn):
out_channels = [1] + out_channels
conv_layers = [ nn.Sequential(conv2d(out_channels[i], out_channels[i+1], 5 if i==0 else 3), activation_fn()) for i in range(len(out_channels)-1) ]
cnn_layers = conv_layers + [nn.AdaptiveAvgPool2d(1), Lambda(flatten), nn.Linear(out_channels[-1], output_dim)]
return cnn_layers
def get_cnn_model(out_channels, output_dim, activation_fn):
model = nn.Sequential(*get_cnn_layers(out_channels, output_dim, activation_fn))
kaiming_init(model, uniform=True)
return model
def show_histograms(hooks):
_, axes = plt.subplots(2, 2, figsize=(15, 10))
for i, (ax, hook) in enumerate(zip(axes.flatten(), hooks[:4])):
hist = get_hist(hook)
ax.imshow(hist, origin="lower")
ax.set_xlabel('n-th batch')
ax.set_ylabel(f'n-th bin')
ax.set_title(f'layer {i+1}')
print("total steps done: ", hook.steps)
plt.tight_layout()
# Adjust the learning rate
one_cycle_get_lr = combine_schedulers([0.5, 0.5], [cos_scheduler(0.2, 1), cos_scheduler(1, 0.1)])
hyperparam_scheduler_cb_fn = partial(HyperParamScheduler, 'lr', one_cycle_get_lr)
plt.plot(torch.arange(0, 100), [one_cycle_get_lr(pos) for pos in torch.linspace(0, 1, 100)])
callback_funcs = [Recorder, accuracy_cb_fn, hyperparam_scheduler_cb_fn, mnist_batch_transform, cuda_callback]
###Output
_____no_output_____
###Markdown
Note: `lower_activation_bound`, `upper_activation_bound` could use some refactoring but keep in mind, everything that has to do with histograms trace to `hook.stats[2]`. So `lower_activation_bound` and `upper_activation_bound` are set in `append_stats`.
###Code
out_channels = [8, 16, 32, 32]
output_dim = c
general_relu = partial(GeneralReLU, leak=0.1, subtract_value=0.4, max_value=6.)
activation_fn = general_relu
model = get_cnn_model(out_channels, output_dim, activation_fn)
runner, learner = get_train(data, model, lr=0.2, cb_fns=callback_funcs)
with Hooks(learner.model, append_stats, activation_lower_bound=-7, activation_upper_bound=7, num_bins=40) as hooks:
runner.fit(learner, epochs=1)
show_histograms(hooks)
plot_stds_and_means(hooks)
def get_percentage_of_min_activations(hook, near_zero_bins=(0,1)):
hist = hook.stats[2]
hist = torch.stack(hist).t().float() # shape [num_bins, num_steps]
return hist[near_zero_bins[0]:near_zero_bins[1]].sum(0) / hist.sum(0) # shape [num_steps]
def plot_percentage_near_zero(hooks, near_zero_bins=(0,1), num_bins=None):
_, axes = plt.subplots(2, 2, figsize=(15, 10))
for i, (ax, hook) in enumerate(zip(axes.flatten(), hooks[:4])):
ax.plot(get_percentage_of_min_activations(hook, near_zero_bins))
ax.set_ylim(0,1)
ax.set_ylabel(f'percentage of activations near zero')
ax.set_xlabel('n-th batch')
ax.set_title(f'layer {i+1}')
plot_percentage_near_zero(hooks, near_zero_bins=(19,21), num_bins=num_bins)
###Output
_____no_output_____
###Markdown
We are wasting a lot fewer activations now!
###Code
# !python drive/MyDrive/fastai_foundation/notebook2script.py drive/MyDrive/Colab\ Notebooks/fastai_foundation_notes_3.ipynb
###Output
_____no_output_____ |
dev/notebooks/FirenzeCard_Stories_MM-1.ipynb | ###Markdown
Path analysis
###Code
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import pandas as pd
%matplotlib inline
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from pylab import *
# import igraph as ig # Need to install this in your virtual environment
from re import sub
import editdistance # Needs to be installed. Usage: editdistance.eval('banana', 'bahama')
from scipy.spatial.distance import pdist, squareform
from scipy.cluster.hierarchy import dendrogram, linkage
# import seaborn as sns
import sys
sys.path.append('../../src/')
from utils.database import dbutils
conn = dbutils.connect()
cursor = conn.cursor()
# Helper function for making summary tables/distributions
def frequency(dataframe,columnname):
out = dataframe[columnname].value_counts().to_frame()
out.columns = ['frequency']
out.index.name = columnname
out.reset_index(inplace=True)
out.sort_values('frequency',inplace=True,ascending=False)
out['cumulative'] = out['frequency'].cumsum()/out['frequency'].sum()
out['ccdf'] = 1 - out['cumulative']
return out
nodes = pd.read_sql('select * from optourism.firenze_card_locations', con=conn)
# df = pd.read_csv('../src/output/firenzedata_feature_extracted.csv')
# df['museum_id'].replace(to_replace=39,value=38,inplace=True)
# df['date'] = pd.to_datetime(df['entry_time'], format='%Y-%m-%d %H:%M:%S').dt.date
# df['hour'] = pd.to_datetime(df['date']) + pd.to_timedelta(pd.to_datetime(df['entry_time'], format='%Y-%m-%d %H:%M:%S').dt.hour, unit='h')
# df.columns
# df.iloc[:,[0,2,3,9,10,11,12,13,14,15,16,17]].head()
# frequency(df,'is_in_museum_37')
# frequency(df,'entrances_per_card_per_museum')
# frequency(df,'total_duration_card_use')
df = pd.read_sql('select * from optourism.firenze_card_logs', con=conn)
df['museum_id'].replace(to_replace=39,value=38,inplace=True)
df['short_name'] = df['museum_id'].replace(dict(zip(nodes['museum_id'],nodes['short_name'])))
df['string'] = df['museum_id'].replace(dict(zip(nodes['museum_id'],nodes['string'])))
df['date'] = pd.to_datetime(df['entry_time'], format='%Y-%m-%d %H:%M:%S').dt.date
df['hour'] = pd.to_datetime(df['date']) + pd.to_timedelta(pd.to_datetime(df['entry_time'], format='%Y-%m-%d %H:%M:%S').dt.hour, unit='h')
df['total_people'] = df['total_adults'] + df['minors']
df.head()
###Output
_____no_output_____
###Markdown
I propose distinguishing _paths_ from _flows_. A path is an itinerary, and the flow is the number of people who take the flow. E.g., a family or a tour group produces one path, but adds mulitple people to the overall flow. We now build a transition graph, a directed graph where an edge represents a person going from one museum to another *within the same day*. We also produce the *transition matrix,* a row-normalized n-by-n matrix of the frequency of transition from the row node to the column node. If you take a vector of the current volumes in each location, and multiply that my the transition matrix, you get a prediction for the number of people on each node at the next time. This prediction can be refined with corrections for daily/weekly patterns and such. Path analysis To make paths: We want a dataframe with user, the museum they went from and the museum they went to, the number of people on the card, and the time of entry to the next museum. We will drop much of this data in creating paths, which will be concatenations of single-character codes for each museum. To track the first visit per day, we add a dummy "source" node that everybody starts each day from. We give it the character code " ", and can then split(" ") along it.
###Code
df4 = df.groupby(['user_id','entry_time','date','hour','short_name','string']).sum()['total_people'].to_frame() # Need to group in this order to be correct further down
df4.reset_index(inplace=True)
df4['from'] = 'source' # Initialize 'from' column with 'source'
df4['to'] = df4['short_name'] # Copy 'to' column with row's museum_name
make_link = (df4['user_id'].shift(1)==df4['user_id'])&(df4['date'].shift(1)==df4['date']) # Row indexes at which to overwrite 'source'
df4['from'][make_link] = df4['short_name'].shift(1)[make_link]
df4['s'] = ' ' # Initialize 'from' column with 'source'
df4['t'] = df4['string'] # Copy 'to' column with row's museum_name
df4['s'][make_link] = df4['string'].shift(1)[make_link]
# Concatenating the source column is not enough, it leaves out the last place in the path.
# Need to add a second 'source' column that, for the last item in a day's path, contains two characters.
df4['path'] = df4['s']
df4['path'][df4['from'].shift(-1)=='source'] = (df4['path'] + df4['t'])[df4['from'].shift(-1)=='source']
# Note: the above trick doesn't work for the last row of data. So, do this as well:
df4.iloc[-1:]['path'] = df4.iloc[-1:]['s'] + df4.iloc[-1:]['t']
df4.head()
df5 = df4.groupby('user_id')['path'].sum().to_frame() # sum() on strings concatenates
df5.head()
df6 = df5['path'].apply(lambda x: pd.Series(x.strip().split(' '))) # Now split along strings. Takes a few seconds.
df6.head() # Note: 4 columns is correct, Firenze card is *72 hours from first use*, not from midnight of the day of first yse!
# df6.head(50) # Data stories just fall out! People traveling together, splitting off, etc. We assume this but strong coupling is hard to ignore.
# Ordered paths
fr1 = frequency(df5,'path')
fr1.head(20) # INSIGHT: the top 15 paths are permutations of Duomo, Uffizi, Accademia. But they are a very small fraction of the total.
fr1.iloc[0:50].plot.bar(x='path',y='frequency',figsize=(24,10))
plt.title('Most common total Firenze card paths (ordered set)')
plt.xlabel('x = Encoded path')
plt.ylabel('Number of cards with total path x')
# plt.yscale('log')
plt.show()
# nodes # To make a legend
df7 = df5['path'].apply(lambda x: ''.join(sorted(list(sub(' ','',x))))).to_frame()
df7.head()
fr2 = frequency(df7,'path')
fr2.head()
fr2.iloc[0:50].plot.bar(x='path',y='frequency',figsize=(24,10))
plt.title('Most common set of museums visited on Firenze card (unordered paths)')
plt.xlabel('x = Set of encoded museums')
plt.ylabel('Number of cards with total set x')
plt.show()
# How many nodes have differing numbers of minors on the card?
# How many nodes have differing numbers of minors on the card?
df8 = df.groupby(['user_id','entry_time','short_name'])['minors'].sum().reset_index()[['user_id','minors']].groupby('user_id').nunique()['minors'].to_frame()
df8.columns = ['unique_counts_of_minors']
df8.head()
frequency(df8, 'unique_counts_of_minors')
df.set_index('user_id').loc[df8[df8['unique_counts_of_minors']>2].index][['short_name','total_adults','minors']].head()
df5[(df5.index<2030243)&(df5.index>2030238)]
# What are the variable numbers of minors? Is it always just 0 vs 1? No, we see more variety.
df[df['user_id'].isin(df8[df8['unique_counts_of_minors']>2].index)][['user_id','entry_time','short_name','minors']].groupby(['user_id','entry_time','short_name'])['minors'].sum().reset_index()[['user_id','minors']].groupby('user_id')['minors'].value_counts().head(20)
cards = df.groupby('user_id').agg({'short_name':'nunique',
'total_adults':'sum',
'minors':'max',
})
# cards = df.groupby('user_id').agg({# 'entrances_per_card_per_museum':'sum',
# 'museum_id':'nunique', # Should be equal to sum of set of is_in columns
# # 'total_duration_card_use':'max',
# 'total_adults':'sum', # This sum should be equal to 'entrances_per_card_per_museum'
# 'is_in_museum_1':'max',
# 'is_in_museum_2':'max',
# 'is_in_museum_3':'max',
# 'is_in_museum_4':'max',
# 'is_in_museum_5':'max',
# 'is_in_museum_6':'max',
# 'is_in_museum_7':'max',
# 'is_in_museum_8':'max',
# 'is_in_museum_9':'max',
# 'is_in_museum_10':'max',
# 'is_in_museum_11':'max',
# 'is_in_museum_12':'max',
# 'is_in_museum_13':'max',
# 'is_in_museum_14':'max',
# 'is_in_museum_15':'max',
# 'is_in_museum_16':'max',
# 'is_in_museum_17':'max',
# 'is_in_museum_18':'max',
# 'is_in_museum_19':'max',
# 'is_in_museum_20':'max',
# 'is_in_museum_21':'max',
# 'is_in_museum_22':'max',
# 'is_in_museum_23':'max',
# 'is_in_museum_24':'max',
# 'is_in_museum_25':'max',
# 'is_in_museum_26':'max',
# 'is_in_museum_27':'max',
# 'is_in_museum_28':'max',
# 'is_in_museum_29':'max',
# 'is_in_museum_30':'max',
# 'is_in_museum_31':'max',
# 'is_in_museum_32':'max',
# 'is_in_museum_33':'max',
# 'is_in_museum_34':'max',
# 'is_in_museum_35':'max',
# 'is_in_museum_36':'max',
# 'is_in_museum_37':'max',
# 'is_in_museum_38':'max'
# })
# # Reorder correctly
# cards = cards[[# 'entrances_per_card_per_museum',
# 'museum_id',
# # 'total_duration_card_use',
# 'total_adults',
# 'is_in_museum_1',
# 'is_in_museum_2',
# 'is_in_museum_3',
# 'is_in_museum_4',
# 'is_in_museum_5',
# 'is_in_museum_6',
# 'is_in_museum_7',
# 'is_in_museum_8',
# 'is_in_museum_9',
# 'is_in_museum_10',
# 'is_in_museum_11',
# 'is_in_museum_12',
# 'is_in_museum_13',
# 'is_in_museum_14',
# 'is_in_museum_15',
# 'is_in_museum_16',
# 'is_in_museum_17',
# 'is_in_museum_18',
# 'is_in_museum_19',
# 'is_in_museum_20',
# 'is_in_museum_21',
# 'is_in_museum_22',
# 'is_in_museum_23',
# 'is_in_museum_24',
# 'is_in_museum_25',
# 'is_in_museum_26',
# 'is_in_museum_27',
# 'is_in_museum_28',
# 'is_in_museum_29',
# 'is_in_museum_30',
# 'is_in_museum_31',
# 'is_in_museum_32',
# 'is_in_museum_33',
# 'is_in_museum_34',
# 'is_in_museum_35',
# 'is_in_museum_36',
# 'is_in_museum_37',
# 'is_in_museum_38'
# ]]
# # Rename appropriately
# cards.columns = [# 'entrances',
# 'museums_visited',
# # 'use_duration',
# 'entrances_2',
# 'visited_museum_1',
# 'visited_museum_2',
# 'visited_museum_3',
# 'visited_museum_4',
# 'visited_museum_5',
# 'visited_museum_6',
# 'visited_museum_7',
# 'visited_museum_8',
# 'visited_museum_9',
# 'visited_museum_10',
# 'visited_museum_11',
# 'visited_museum_12',
# 'visited_museum_13',
# 'visited_museum_14',
# 'visited_museum_15',
# 'visited_museum_16',
# 'visited_museum_17',
# 'visited_museum_18',
# 'visited_museum_19',
# 'visited_museum_20',
# 'visited_museum_21',
# 'visited_museum_22',
# 'visited_museum_23',
# 'visited_museum_24',
# 'visited_museum_25',
# 'visited_museum_26',
# 'visited_museum_27',
# 'visited_museum_28',
# 'visited_museum_29',
# 'visited_museum_30',
# 'visited_museum_31',
# 'visited_museum_32',
# 'visited_museum_33',
# 'visited_museum_34',
# 'visited_museum_35',
# 'visited_museum_36',
# 'visited_museum_37',
# 'visited_museum_38']
cards.head(20)
pd.DataFrame([cards.iloc[:,1:39].sum(axis=1),cards['entrances_2']])
# Two tasks. First, do clustering on these people,
X = cards.iloc[:,1:39].as_matrix()
Z = linkage(y=X, method='single', metric='jaccard')
pdist()
df['museum_name'].nunique()
df['museum_id'].nunique()
df['short_name'].nunique()
df7 = df5['s2'].apply(lambda x: pd.Series(len(sub(' ','',x))))
df7.head()
df7.sort_values(0,ascending=False).head(10)
df6.loc[df7.sort_values(0,ascending=False).head(10).index]
fr2 = frequency(df7,0)
fr2.head()
f, ax = plt.subplots(figsize=(6,5), dpi=300)
ax.stem(fr2[0],fr2['frequency'], linestyle='steps--')
# yscale('log')
# xscale('log')
ax.set_title('Number of museum visits by Florence Card')
ax.set_ylabel('Frequency')
ax.set_xlabel('Number of museums')
plt.show()
# NOTE: This is the number of *visits*, not people on those cards!!
# (And, not number of museums visited, this counts multiple visits to the same museum as distinct)
df8 = df.groupby(['user_id','short_name','entry_time']).sum()['total_adults'].to_frame()
df8.head()
# Cards with more than one entrance to same museum
df9 = df.groupby(['user_id','short_name']).sum()['total_adults'].to_frame()
df9.columns = ['number_of_entries']
df9['number_of_entries'] = df9['number_of_entries']
df9[df9['number_of_entries']>1].head(50)
df8.shape[0] # Number of entries
df9.shape[0] # 12 repeat visits. Negligible.
df9[df9['number_of_entries']==1].shape[0]
df9[df9['number_of_entries']==2].shape[0]
df9[df9['number_of_entries']>2]
# # This is the number of people who entered on each card entry, not the number of repeat entries!
# frequency(df.groupby(['user_id','short_name',]).count()['entry_time'].to_frame(),'entry_time')
df9 = df7.reset_index()
df10 = df8.reset_index()
df11 = df9.merge(df10).groupby('user_id').sum()
df11.columns = ['visits','total_people']
df11['persons_per_visit'] = df11['total_people']/df11['visits']
df11.head()
# df11[df11['persons_per_visit']>1].plot.scatter(x='visits',y='persons_per_visit')
###Output
_____no_output_____
###Markdown
We now want the following: a measure of similarity between adjacent rows, for detecting people traveling together (making the assumption that they bought Firenze cards consecutively). This is simplest to do naively: not use anything statistical, but just fuzzy matching through _edit distance_, which is the number of operations (insertions, deletions, swaps) needed to change one string into another (or, opreations on list elements to change one list to another). Since there are 3 days, and since we want slight deviations in otherwise identical large itineraries to count less, we calculate the following: a column with the edit distance between each pair of days between rows, summed, followed by a column with the total number of visits per row.
###Code
# edit = pdist(X, lambda u, v: np.sqrt(((u-v)**2).sum()))
df6.fillna('',inplace=True)
df6.iloc[0:10]
def editdist(pair):
return editdistance.eval(pair[0],pair[1])
df7 = pd.concat([df6,df6.shift()],axis=1)
df7.columns = ['0','1','2','3','0+','1+','2+','3+']
df7.head()
# df8 = df7.iloc[:,[0,4,1,5,2,6,3,7]]
# df8.columns = ['0','0+','1','1+','2','2+','3','3+']
# df8.columns = ['0','0+','1','1+','2','2+','3','3+']
# df8.head()
df7['total_edit_distance'] = df7[['0','0+']].apply(editdist,axis=1) + df7[['1','1+']].apply(editdist,axis=1) + df7[['2','2+']].apply(editdist,axis=1) + df7[['3','3+']].apply(editdist,axis=1)
df7.head()
df7['len'] = df7['0'].str.len() + df7['1'].str.len() + df7['2'].str.len() + df7['3'].str.len()
df7['len+'] = df7['0+'].str.len() + df7['1+'].str.len() + df7['2+'].str.len() + df7['3+'].str.len()
df7['len_tot'] = df7['len'] + df7['len+']
df7.head()
fr3 = frequency(df7[df7['total_edit_distance']==0],'len_tot')
fr3
frequency(df7[df7['total_edit_distance']==0],'len_tot')
df8 = df7.reset_index(inplace=False)
df8.reset_index(inplace=True)
df8.head()
# df7[df7['total_edit_distance']==0].hist('len_tot',bins=100, grid=False, figsize=[16,8])
f, ax = plt.subplots(figsize=(12,5), dpi=300)
ax.stem(fr3['len_tot']/2,fr3['frequency'], linestyle='steps--')
# yscale('log')
# xscale('log')
ax.set_title('Number of museums in perfectly matched consecutive paths')
ax.set_ylabel('Number of cards')
ax.set_xlabel('Number of museums')
plt.show()
# NOTE: This is the number of *visits*, not people on those cards!!
# (And, not number of museums visited, this counts multiple visits to the same museum as distinct)
# df8.hist('user_id',bins=1000,figsize=[8,8])
# df8[df8['user_id']>1500000].hist('user_id',bins=1000,figsize=[8,8])
# df8.plot.scatter(x='index',y='total_edit_distance',figsize=[16,16], c=2+(df8['total_edit_distance']>0))
# sns.jointplot(x="index", y="total_edit_distance", data=df8)#, hue=(df9['total_edit_distance']==0))
# sns.jointplot(x="index", y="total_edit_distance", data=df8, kind='hex')
sns.jointplot(x="total_edit_distance", y="len_tot", data=df8)
sns.jointplot(x="total_edit_distance", y="len_tot", data=df8, kind='hex')
sns.jointplot(x="total_edit_distance", y="len_tot", data=df8, kind='kde')
###Output
_____no_output_____
###Markdown
Now, need to extract consecutive rows of zero edit distance.
###Code
df8['dist_gt_0'] = 1*(df8['total_edit_distance'] != 0)
# df8['offset'] = 1*(df8['zero_dist'] + df8['zero_dist'].shift()==0)
df8['group'] = cumsum(df8['dist_gt_0'])
df8.head(50)
df9 = df8[['group','user_id']].groupby('group').count()
df9.columns = ['people']
df9.head()
frequency(df9,'people')
# # The code below was my attempt to get a node for starting the day and ending the day from the paths.
# # The problem is that this gives the number of _cards_, not number of people! I had to go back to the
# # dynamic edgelist construction anyway.
# df6.head()
# df9 = df5['s2'].apply(lambda x: pd.Series(x.strip().split(' ')))
# df9.fillna(' ',inplace=True)
# df9['0_first'] = df9[0].apply(lambda x: pd.Series(x[0]))
# df9['0_last'] = df9[0].apply(lambda x: pd.Series(x[-1]))
# df9['0_len'] = df9[0].apply(lambda x: pd.Series(len(x)))
# df9['1_first'] = df9[1].apply(lambda x: pd.Series(x[0]))
# df9['1_last'] = df9[1].apply(lambda x: pd.Series(x[-1]))
# df9['1_len'] = df9[1].apply(lambda x: pd.Series(len(x)))
# df9['2_first'] = df9[2].apply(lambda x: pd.Series(x[0]))
# df9['2_last'] = df9[2].apply(lambda x: pd.Series(x[-1]))
# df9['2_len'] = df9[2].apply(lambda x: pd.Series(len(x)))
# df9['3_first'] = df9[3].apply(lambda x: pd.Series(x[0]))
# df9['3_last'] = df9[3].apply(lambda x: pd.Series(x[-1]))
# df9['3_len'] = df9[3].apply(lambda x: pd.Series(len(x)))
# df9.head()
# df9.replace(' ',np.nan,inplace=True)
# df9.head()
# from_home = frequency(df9[['0_first','1_first','2_first','3_first']].stack().to_frame(),0)[[0,'frequency']]
# from_home.columns = ['0','from_home']
# from_home.set_index('0',inplace=True)
# from_home.head()
# only = frequency(pd.concat(
# [df9[(df9['0_len']==1)&(df9['0_first'].notnull())]['0_first'],
# df9[(df9['1_len']==1)&(df9['1_first'].notnull())]['1_first'],
# df9[(df9['2_len']==1)&(df9['2_first'].notnull())]['2_first'],
# df9[(df9['3_len']==1)&(df9['3_first'].notnull())]['3_first']
# ],axis=0).to_frame()
# ,0)[[0,'frequency']]
# only.columns = ['0','only']
# only.set_index('0',inplace=True)
# only.head()
# to_home = frequency(df9[['0_last','1_last','2_last','3_last']].stack().to_frame(),0)[[0,'frequency']]
# to_home.columns = ['0','to_home']
# to_home.set_index('0',inplace=True)
# to_home.head()
# from_to_home = nodes.set_index('string')['short_name'].to_frame().join([from_home,to_home,only])
# from_to_home.set_index('short_name',inplace=True)
# from_to_home.columns = ['home_to_node','node_to_home','only_visit_of_day']
# # from_to_home['from_home'] = from_to_home['from_home_incl_only'] - from_to_home['only_visit_of_day']
# # from_to_home['to_home'] = from_to_home['to_home_incl_only'] - from_to_home['only_visit_of_day']
# from_to_home.head()
# from_to_home['home_to_node'].sort_values(ascending=False).to_frame().head(20)
# from_to_home['node_to_home'].sort_values(ascending=False).to_frame().head(20)
# from_to_home.reset_index(inplace=True)
# from_to_home
# supp_edges = pd.DataFrame({'from':['home']*from_to_home.shape[0] + from_to_home['short_name'].tolist(),
# 'to':from_to_home['short_name'].tolist() + ['home']*from_to_home.shape[0],
# 'weight':from_to_home['home_to_node'].tolist() + from_to_home['node_to_home'].tolist() })
# supp_edges.dropna(how='any',inplace=True)
# supp_edges
frequency(df6,0).head()
frequency(df6,1).head()
frequency(df6,2).head()
frequency(df6,3).head()
###Output
_____no_output_____
###Markdown
Now, I want a set of scatterplots between these frequencies.
###Code
pt = pd.concat([frequency(df6,0),frequency(df6,1),frequency(df6,2),frequency(df6,3)])
pt['daily_path'] = pt[0].replace(np.nan, '', regex=True) + pt[1].replace(np.nan, '', regex=True) + pt[2].replace(np.nan, '', regex=True) + pt[3].replace(np.nan, '', regex=True)
pt.drop([0,1,2,3,'ccdf','cumulative'],axis=1,inplace=True)
pt.head()
pt2 = pt.groupby('daily_path').sum()
pt2.sort_values('frequency', inplace=True, ascending=False)
pt2.head()
pt2[pt2['frequency']>200].plot.bar(figsize=(16,8))
plt.title('Most common daily Firenze card paths across all days')
plt.xlabel('x = Encoded path')
plt.ylabel('Number of cards with daily path x')
# plt.yscale('log')
plt.show()
nodes.head()
# For reference, here are the displayed museums
# nodes[['string','short_name']].set_index('string').reindex(['D','P','U','A','V','T','N','C','G','B','S','c','m','M','b','Y','2'])
nodes[nodes['string'].isin(['D','P','U','A','V','T','N','C','G','B','S','c','m','M','b','Y','2'])][['string','short_name']]
df6[pd.isnull(df6[0].str[0])].head()
df6.to_csv('encoded_paths.csv')
nodes.to_csv('encoded_paths_legend.csv')
df6.values
###Output
_____no_output_____ |
src/数据清洗篇/工具介绍/pandas/pandas的序列对象.ipynb | ###Markdown
pandas的序列对象pandas的序列对象[Series](https://pandas.pydata.org/pandas-docs/stable/reference/series.html)是numpy1维数组的封装,带index 创建`Series`创建Series有两种方式+ 通过序列创建,这个序列可以是迭代器,list,dict,也可以是numpy的一维数组,dict创建后key就是它的index,其他序列index默认则是序列的位数+ 通过固定值创建,创建后每一位都是这个固定值,但这就必须指定index(第二位)参数了我们也可以用参数`index`为序列手动指定index
###Code
import pandas as pd
import numpy as np
a = pd.Series([1,2,3])
a
b = pd.Series(np.array([1,2,3]))
b
c = pd.Series({"a":1,"b":2,"c":3})
c
d = pd.Series(4,range(5))
d
###Output
_____no_output_____
###Markdown
序列命名Series对象可以使用`name`参数设置名字
###Code
s = pd.Series(np.random.randn(5), name='取名字真难')
s
###Output
_____no_output_____
###Markdown
元素类型和numpy的数组一样,`Series`是同构定长的可迭代数据结构,它可以使用元素`dtype`查看
###Code
a.dtype
###Output
_____no_output_____
###Markdown
要设置类型,需要使用接口`astype(dtype, copy=True, errors='raise', **kwargs)`+ 参数`dtype`的取值范围是numpy的数组中的dtype和python对象类型的并集,另外额外多一种`category`类型,标明是分类数据(有限类别)+ 参数`copy`标明是在原来对象上修改还是在原来对象内容基础上创建一个新的对象返回+ 参数`errors`标明类型转化过程中遇到错误后的处理方式, `raise`会抛出错误,`ignore`则会跳过错误,返回原始数据
###Code
e = pd.Series(["a","1","3"])
e.astype("int64")
e.astype("int64",errors="ignore")
###Output
_____no_output_____
###Markdown
元素类型推断`pandas.api.types.infer_dtype()`提供了推断数据类型的能力,其返回值可以有```stringunicodebytesfloatingintegermixed-integermixed-integer-floatdecimalcomplexcategoricalbooleandatetime64datetimedatetimedelta64timedeltatimeperiodmixed```另外还有专门针对不同类型的判断函数,包括:+ `pandas.api.types.is_bool_dtype()`+ `pandas.api.types.is_categorical_dtype()`+ `pandas.api.types.is_complex_dtype()`+ `pandas.api.types.is_datetime64_any_dtype()`+ `pandas.api.types.is_datetime64_dtype()`+ `pandas.api.types.is_datetime64_ns_dtype()`+ `pandas.api.types.is_datetime64tz_dtype()`+ `pandas.api.types.is_extension_array_dtype()`+ `pandas.api.types.is_float_dtype()`+ `pandas.api.types.is_int64_dtype()`+ `pandas.api.types.is_integer_dtype()`+ `pandas.api.types.is_interval_dtype()`+ `pandas.api.types.is_numeric_dtype()`+ `pandas.api.types.is_object_dtype()`+ `pandas.api.types.is_period_dtype()`+ `pandas.api.types.is_signed_integer_dtype()`+ `pandas.api.types.is_string_dtype()`+ `pandas.api.types.is_timedelta64_dtype()`+ `pandas.api.types.is_timedelta64_ns_dtype()`+ `pandas.api.types.is_unsigned_integer_dtype()` 迭代类似list,`Series`是可迭代对象,直接迭代时抛出的是每一位的值,使用`items()`接口则会抛出和字典一样的index,value对
###Code
for i in enumerate(s):
print(i)
for k,v in c.items():
print(f"{k}:{v}")
###Output
a:1
b:2
c:3
###Markdown
取值`Series`取值可以类似字典一样用index取,也可以类似list一样
###Code
c["a"]
c[1]
###Output
_____no_output_____
###Markdown
矢量化操作和标签对齐Series使用原始numpy数组时通常不需要循环,在pandas中使用Series时也是如此.Series可以使用多数numpy的Universal Functions.
###Code
b+2
###Output
_____no_output_____ |
01-Experimentation/sdk-custom-job-tb.ipynb | ###Markdown
Custom Training Job with Tensorboard MonitoringThis notebook demonstrates how to submit a custom Vertex training job and monitor it using Vertex TensorBoard. ScenarioThe training scenario is fine-tuning BERT on the [GLUE COLA](https://nyu-mll.github.io/CoLA/) dataset. Notes- The training regimen utilizes [TensorFlow NLP Modelling Toolkit](https://github.com/tensorflow/models/tree/master/official/nlp)- Due to a complexity of the ML task, the custom training job is configured to use a multi-gpu training node
###Code
import os
import numpy as np
import time
import tensorflow as tf
import tensorflow_hub as hub
import tensorflow_datasets as tfds
import tensorflow_text as text # A dependency of the preprocessing model
import tensorflow_addons as tfa
from official.nlp import optimization
from google.cloud import aiplatform as vertex_ai
from google.cloud.aiplatform.utils import TensorboardClientWithOverride
from google.cloud.aiplatform_v1beta1.types import Tensorboard
tf.get_logger().setLevel('ERROR')
###Output
_____no_output_____
###Markdown
Evironment setup
###Code
PROJECT = 'jk-mlops-dev'
REGION = 'us-central1'
STAGING_BUCKET = 'gs://jk-vertex-workshop-bucket'
VERTEX_SA = '[email protected]'
###Output
_____no_output_____
###Markdown
Configure data preprocessing Make BERT data preprocessing model
###Code
TFHUB_HANDLE_PREPROCESS = 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3'
def make_bert_preprocess_model(sentence_features, seq_length=128):
"""Returns a model mapping string features to BERT inputs."""
input_segments = [
tf.keras.layers.Input(shape=(), dtype=tf.string, name=ft)
for ft in sentence_features]
# Tokenize the text to word pieces.
bert_preprocess = hub.load(TFHUB_HANDLE_PREPROCESS)
tokenizer = hub.KerasLayer(bert_preprocess.tokenize, name='tokenizer')
segments = [tokenizer(s) for s in input_segments]
# Pack inputs. The details (start/end token ids, dict of output tensors)
# are model-dependent, so this gets loaded from the SavedModel.
packer = hub.KerasLayer(bert_preprocess.bert_pack_inputs,
arguments=dict(seq_length=seq_length),
name='packer')
model_inputs = packer(segments)
return tf.keras.Model(input_segments, model_inputs)
###Output
_____no_output_____
###Markdown
Try BERT data preprocessing model
###Code
test_preprocess_model = make_bert_preprocess_model(['sentence1', 'sentence2'])
test_text = [np.array(['some random test sentence']),
np.array(['another random sentence'])]
text_preprocessed = test_preprocess_model(test_text)
print('Keys : ', list(text_preprocessed.keys()))
print('Shape Word Ids : ', text_preprocessed['input_word_ids'].shape)
print('Word Ids : ', text_preprocessed['input_word_ids'][0, :16])
print('Shape Mask : ', text_preprocessed['input_mask'].shape)
print('Input Mask : ', text_preprocessed['input_mask'][0, :16])
print('Shape Type Ids : ', text_preprocessed['input_type_ids'].shape)
print('Type Ids : ', text_preprocessed['input_type_ids'][0, :16])
###Output
INFO:absl:Using /tmp/tfhub_modules to cache modules.
Keys : ['input_word_ids', 'input_mask', 'input_type_ids']
Shape Word Ids : (1, 128)
Word Ids : tf.Tensor(
[ 101 2070 6721 3231 6251 102 2178 6721 6251 102 0 0 0 0
0 0], shape=(16,), dtype=int32)
Shape Mask : (1, 128)
Input Mask : tf.Tensor([1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0], shape=(16,), dtype=int32)
Shape Type Ids : (1, 128)
Type Ids : tf.Tensor([0 0 0 0 0 0 1 1 1 1 0 0 0 0 0 0], shape=(16,), dtype=int32)
###Markdown
Visualize BERT data preprocessing model
###Code
tf.keras.utils.plot_model(test_preprocess_model)
###Output
_____no_output_____
###Markdown
Configure the text classification model
###Code
TFHUB_HANDLE_ENCODER = 'https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/3'
def build_classifier_model(num_classes, dropout_ratio):
"""Creates a text classification model based on BERT encoder."""
inputs = dict(
input_word_ids=tf.keras.layers.Input(shape=(None,), dtype=tf.int32),
input_mask=tf.keras.layers.Input(shape=(None,), dtype=tf.int32),
input_type_ids=tf.keras.layers.Input(shape=(None,), dtype=tf.int32),
)
encoder = hub.KerasLayer(TFHUB_HANDLE_ENCODER, trainable=True, name='encoder')
net = encoder(inputs)['pooled_output']
net = tf.keras.layers.Dropout(rate=dropout_ratio)(net)
net = tf.keras.layers.Dense(num_classes, activation=None, name='classifier')(net)
return tf.keras.Model(inputs, net, name='prediction')
dropout_ratio = 0.1
classes = 2
classifier_model = build_classifier_model(classes, dropout_ratio)
###Output
_____no_output_____
###Markdown
Test the model
###Code
bert_raw_result = classifier_model(text_preprocessed)
print(tf.sigmoid(bert_raw_result))
###Output
tf.Tensor([[0.42943832 0.9724505 ]], shape=(1, 2), dtype=float32)
###Markdown
Visualize the model
###Code
tf.keras.utils.plot_model(classifier_model)
###Output
_____no_output_____
###Markdown
Configure `tf.data` pipelines Load the `glue/cola` datasetWe will use [TensorFlow Datasets](https://www.tensorflow.org/datasets). Since the `glue/cola` dataset is rather small we will load to memory.
###Code
tfds_name = 'glue/cola'
tfds_info = tfds.builder(tfds_name).info
num_classes = tfds_info.features['label'].num_classes
num_examples = tfds_info.splits.total_num_examples
sentence_features = list(tfds_info.features.keys())
available_splits = list(tfds_info.splits.keys())
labels_names = tfds_info.features['label'].names
print(f'Using {tfds_name} from TFDS')
print(f'This dataset has {num_examples} examples')
print(f'Number of classes: {num_classes}')
print(f'Features {sentence_features}')
print(f'Splits {available_splits}')
print(f'Labels names {labels_names}')
in_memory_ds = tfds.load(tfds_name, batch_size=-1, shuffle_files=True)
###Output
INFO:absl:Load dataset info from /home/jupyter/tensorflow_datasets/glue/cola/1.0.0
Using glue/cola from TFDS
This dataset has 10657 examples
Number of classes: 2
Features ['sentence', 'label', 'idx']
Splits ['train', 'validation', 'test']
Labels names ['unacceptable', 'acceptable']
INFO:absl:Load dataset info from /home/jupyter/tensorflow_datasets/glue/cola/1.0.0
INFO:absl:Reusing dataset glue (/home/jupyter/tensorflow_datasets/glue/cola/1.0.0)
INFO:absl:Constructing tf.data.Dataset glue for split None, from /home/jupyter/tensorflow_datasets/glue/cola/1.0.0
###Markdown
Show some examples
###Code
sample_dataset = tf.data.Dataset.from_tensor_slices(in_memory_ds['train'])
for row in sample_dataset.take(2):
print(row)
###Output
{'idx': <tf.Tensor: shape=(), dtype=int32, numpy=1680>, 'label': <tf.Tensor: shape=(), dtype=int64, numpy=1>, 'sentence': <tf.Tensor: shape=(), dtype=string, numpy=b'It is this hat that it is certain that he was wearing.'>}
{'idx': <tf.Tensor: shape=(), dtype=int32, numpy=1456>, 'label': <tf.Tensor: shape=(), dtype=int64, numpy=1>, 'sentence': <tf.Tensor: shape=(), dtype=string, numpy=b'Her efficient looking up of the answer pleased the boss.'>}
###Markdown
Create data ingestion pipelines.We will be training on a multi-gpu node using the `MirroredStrategy` distribution strategy. When using the `MirroredStrategy` each batch of the input is divided equally among the replicas. Typically, you would want to increase your batch size as you add more accelerators, so as to make effective use of the extra computing power.
###Code
strategy = tf.distribute.MirroredStrategy()
batch_size_per_replica = 16
global_batch_size = batch_size_per_replica * strategy.num_replicas_in_sync
features = ['sentence']
def get_data_pipeline(in_memory_ds, info, split, batch_size,
bert_preprocess_model):
is_training = split.startswith('train')
dataset = tf.data.Dataset.from_tensor_slices(in_memory_ds[split])
num_examples = info.splits[split].num_examples
if is_training:
dataset = dataset.shuffle(num_examples)
dataset = dataset.repeat()
dataset = dataset.batch(batch_size)
dataset = dataset.map(lambda ex: (bert_preprocess_model(ex), ex['label']))
dataset = dataset.cache().prefetch(buffer_size=tf.data.AUTOTUNE)
return dataset, num_examples
bert_preprocess_model = make_bert_preprocess_model(features)
train_dataset, train_data_size = get_data_pipeline(
in_memory_ds, tfds_info, 'train', global_batch_size, bert_preprocess_model)
validation_dataset, validation_data_size = get_data_pipeline(
in_memory_ds, tfds_info, 'validation', global_batch_size, bert_preprocess_model)
###Output
_____no_output_____
###Markdown
Configure local training Compile the model Fine-tuning follows the optimizer set-up from BERT pre-training (as in [Classify text with BERT](https://www.tensorflow.org/text/tutorials/classify_text_with_bert)): It uses the AdamW optimizer with a linear decay of a notional initial learning rate, prefixed with a linear warm-up phase over the first 10% of training steps (`num_warmup_steps`). In line with the BERT paper, the initial learning rate is smaller for fine-tuning (best of 5e-5, 3e-5, 2e-5).
###Code
epochs = 3
init_lr = 2e-5
dropout_ratio = 0.1
num_classes = 2
steps_per_epoch = train_data_size // global_batch_size
num_train_steps = steps_per_epoch * epochs
num_warmup_steps = num_train_steps // 10
validation_steps = validation_data_size // global_batch_size
with strategy.scope():
classifier_model = build_classifier_model(num_classes, dropout_ratio)
optimizer = optimization.create_optimizer(
init_lr=init_lr,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
optimizer_type='adamw')
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
metrics = tf.keras.metrics.SparseCategoricalAccuracy(
'accuracy', dtype=tf.float32)
classifier_model.compile(optimizer=optimizer, loss=loss, metrics=[metrics])
###Output
INFO:absl:using Adamw optimizer
INFO:absl:gradient_clip_norm=1.000000
###Markdown
Start a local training run
###Code
history = classifier_model.fit(
x=train_dataset,
validation_data=validation_dataset,
steps_per_epoch=steps_per_epoch,
epochs=epochs,
validation_steps=validation_steps)
###Output
Epoch 1/3
534/534 [==============================] - 266s 468ms/step - loss: 0.6197 - accuracy: 0.6631 - val_loss: 0.4313 - val_accuracy: 0.8298
Epoch 2/3
534/534 [==============================] - 248s 464ms/step - loss: 0.2934 - accuracy: 0.8826 - val_loss: 0.4768 - val_accuracy: 0.8308
Epoch 3/3
534/534 [==============================] - 248s 464ms/step - loss: 0.1798 - accuracy: 0.9399 - val_loss: 0.6637 - val_accuracy: 0.8260
###Markdown
Run Vertex custom training job Create a training script
###Code
folder = 'trainer'
if tf.io.gfile.exists(folder):
tf.io.gfile.rmtree(folder)
tf.io.gfile.mkdir(folder)
file_path = os.path.join(folder, 'train.py')
%%writefile {file_path}
# Copyright 2021 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
import os
import numpy as np
import tensorflow as tf
import tensorflow_hub as hub
import tensorflow_datasets as tfds
import tensorflow_text as text # A dependency of the preprocessing model
import tensorflow_addons as tfa
from absl import app
from absl import flags
from absl import logging
from official.nlp import optimization
TFHUB_HANDLE_ENCODER = 'https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/3'
TFHUB_HANDLE_PREPROCESS = 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3'
TFDS_NAME = 'glue/cola'
NUM_CLASSES = 2
SENTENCE_FEATURE = 'sentence'
LOCAL_MODEL_DIR = '/tmp/saved_model'
LOCAL_TB_DIR = '/tmp/logs'
LOCAL_CHECKPOINT_DIR = '/tmp/checkpoints'
FLAGS = flags.FLAGS
flags.DEFINE_integer('epochs', 2, 'Nubmer of epochs')
flags.DEFINE_integer('per_replica_batch_size', 16, 'Per replica batch size')
flags.DEFINE_float('init_lr', 2e-5, 'Initial learning rate')
flags.DEFINE_float('dropout_ratio', 0.1, 'Dropout ratio')
def make_bert_preprocess_model(sentence_features, seq_length=128):
"""Returns a model mapping string features to BERT inputs."""
input_segments = [
tf.keras.layers.Input(shape=(), dtype=tf.string, name=ft)
for ft in sentence_features]
# Tokenize the text to word pieces.
bert_preprocess = hub.load(TFHUB_HANDLE_PREPROCESS)
tokenizer = hub.KerasLayer(bert_preprocess.tokenize, name='tokenizer')
segments = [tokenizer(s) for s in input_segments]
# Pack inputs. The details (start/end token ids, dict of output tensors)
# are model-dependent, so this gets loaded from the SavedModel.
packer = hub.KerasLayer(bert_preprocess.bert_pack_inputs,
arguments=dict(seq_length=seq_length),
name='packer')
model_inputs = packer(segments)
return tf.keras.Model(input_segments, model_inputs)
def build_classifier_model(num_classes, dropout_ratio):
"""Creates a text classification model based on BERT encoder."""
inputs = dict(
input_word_ids=tf.keras.layers.Input(shape=(None,), dtype=tf.int32),
input_mask=tf.keras.layers.Input(shape=(None,), dtype=tf.int32),
input_type_ids=tf.keras.layers.Input(shape=(None,), dtype=tf.int32),
)
encoder = hub.KerasLayer(TFHUB_HANDLE_ENCODER, trainable=True, name='encoder')
net = encoder(inputs)['pooled_output']
net = tf.keras.layers.Dropout(rate=dropout_ratio)(net)
net = tf.keras.layers.Dense(num_classes, activation=None, name='classifier')(net)
return tf.keras.Model(inputs, net, name='prediction')
def get_data_pipeline(in_memory_ds, info, split,
batch_size, bert_preprocess_model):
"""Creates a sentence preprocessing pipeline."""
is_training = split.startswith('train')
dataset = tf.data.Dataset.from_tensor_slices(in_memory_ds[split])
num_examples = info.splits[split].num_examples
if is_training:
dataset = dataset.shuffle(num_examples)
dataset = dataset.repeat()
dataset = dataset.batch(batch_size)
dataset = dataset.map(lambda ex: (bert_preprocess_model(ex), ex['label']))
dataset = dataset.cache().prefetch(buffer_size=tf.data.AUTOTUNE)
return dataset, num_examples
def set_job_dirs():
"""Sets job directories based on env variables set by Vertex AI."""
model_dir = os.getenv('AIP_MODEL_DIR', LOCAL_MODEL_DIR)
tb_dir = os.getenv('AIP_TENSORBOARD_LOG_DIR', LOCAL_TB_DIR)
checkpoint_dir = os.getenv('AIP_CHECKPOINT_DIR', LOCAL_CHECKPOINT_DIR)
return model_dir, tb_dir, checkpoint_dir
def main(argv):
"""Starts a training run."""
del argv
logging.info('Setting up training.')
logging.info(' epochs: {}'.format(FLAGS.epochs))
logging.info(' per_replica_batch_size: {}'.format(FLAGS.per_replica_batch_size))
logging.info(' init_lr: {}'.format(FLAGS.init_lr))
logging.info(' dropout_ratio: {}'.format(FLAGS.dropout_ratio))
# Set distribution strategy
strategy = tf.distribute.MirroredStrategy()
global_batch_size = (strategy.num_replicas_in_sync *
FLAGS.per_replica_batch_size)
# Configure input data pipelines
tfds_info = tfds.builder(TFDS_NAME).info
num_classes = tfds_info.features['label'].num_classes
num_examples = tfds_info.splits.total_num_examples
available_splits = list(tfds_info.splits.keys())
labels_names = tfds_info.features['label'].names
with tf.device('/job:localhost'):
in_memory_ds = tfds.load(TFDS_NAME, batch_size=-1, shuffle_files=True)
bert_preprocess_model = make_bert_preprocess_model([SENTENCE_FEATURE])
train_dataset, train_data_size = get_data_pipeline(
in_memory_ds, tfds_info, 'train', global_batch_size, bert_preprocess_model)
validation_dataset, validation_data_size = get_data_pipeline(
in_memory_ds, tfds_info, 'validation', global_batch_size, bert_preprocess_model)
# Configure the model
steps_per_epoch = train_data_size // global_batch_size
num_train_steps = steps_per_epoch * FLAGS.epochs
num_warmup_steps = num_train_steps // 10
validation_steps = validation_data_size // global_batch_size
with strategy.scope():
classifier_model = build_classifier_model(NUM_CLASSES, FLAGS.dropout_ratio)
optimizer = optimization.create_optimizer(
init_lr=FLAGS.init_lr,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
optimizer_type='adamw')
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
metrics = tf.keras.metrics.SparseCategoricalAccuracy(
'accuracy', dtype=tf.float32)
classifier_model.compile(optimizer=optimizer, loss=loss, metrics=[metrics])
model_dir, tb_dir, checkpoint_dir = set_job_dirs()
# Configure Keras callbacks
callbacks = [tf.keras.callbacks.experimental.BackupAndRestore(backup_dir=checkpoint_dir)]
callbacks.append(tf.keras.callbacks.TensorBoard(
log_dir=tb_dir, update_freq='batch'))
logging.info('Starting training ...')
classifier_model.fit(
x=train_dataset,
validation_data=validation_dataset,
steps_per_epoch=steps_per_epoch,
epochs=FLAGS.epochs,
validation_steps=validation_steps,
callbacks=callbacks)
# Save trained model
logging.info('Training completed. Saving the trained model to: {}'.format(model_dir))
classifier_model.save(model_dir)
if __name__ == '__main__':
logging.set_verbosity(logging.INFO)
app.run(main)
###Output
Writing trainer/train.py
###Markdown
Intialize Vertex SDK
###Code
vertex_ai.init(
project=PROJECT,
location=REGION,
staging_bucket=STAGING_BUCKET
)
###Output
_____no_output_____
###Markdown
Create or set Tensorboard
###Code
tb_client = api_client = vertex_ai.initializer.global_config.create_client(
client_class=TensorboardClientWithOverride, location_override=REGION
)
parent = f'projects/{PROJECT}/locations/{REGION}'
tensorboard_display_name = 'Demo Tensorboard'
tensorboard_ref = None
for tensorboard in tb_client.list_tensorboards(parent=parent):
if tensorboard.display_name == tensorboard_display_name:
tensorboard_ref = tensorboard
if not tensorboard_ref:
print('Creating new Tensorboard')
tb_specs = Tensorboard(
display_name=tensorboard_display_name,
description=tensorboard_display_name
)
operation = tb_client.create_tensorboard(parent=parent, tensorboard=tb_specs)
tensorboard_ref = operation.result()
else:
print('Using existing Tensorboard:', tensorboard_ref.name)
###Output
Using existing Tensorboard: projects/895222332033/locations/us-central1/tensorboards/5843350147769040896
###Markdown
Configure and submit Vertex job
###Code
job_name = job_name = "JOB_{}".format(time.strftime("%Y%m%d_%H%M%S"))
base_output_dir = f'{STAGING_BUCKET}/jobs/{job_name}'
container_uri = 'us-docker.pkg.dev/vertex-ai/training/tf-gpu.2-4:latest'
requirements = ['tf-models-official==2.4.0', 'tensorflow-text==2.4.3', 'tensorflow-datasets==4.3.0']
args = ['--epochs=3', '--per_replica_batch_size=16']
machine_type = 'n1-standard-4'
accelerator_type = 'NVIDIA_TESLA_T4'
accelerator_count = 2
job = vertex_ai.CustomJob.from_local_script(
display_name=job_name,
machine_type=machine_type,
accelerator_type=accelerator_type,
accelerator_count=accelerator_count,
script_path='trainer/train.py',
container_uri=container_uri,
requirements=requirements,
args=args
)
job.run(sync=False,
service_account=VERTEX_SA,
tensorboard=tensorboard_ref.name)
###Output
INFO:google.cloud.aiplatform.utils.source_utils:Training script copied to:
gs://jk-vertex-workshop-bucket/aiplatform-2021-06-02-03:57:56.583-aiplatform_custom_trainer_script-0.1.tar.gz.
INFO:google.cloud.aiplatform.jobs:Creating CustomJob
INFO:google.cloud.aiplatform.jobs:CustomJob created. Resource name: projects/895222332033/locations/us-central1/customJobs/1166999651488890880
INFO:google.cloud.aiplatform.jobs:To use this CustomJob in another session:
INFO:google.cloud.aiplatform.jobs:custom_job = aiplatform.CustomJob.get('projects/895222332033/locations/us-central1/customJobs/1166999651488890880')
INFO:google.cloud.aiplatform.jobs:View Custom Job:
https://console.cloud.google.com/ai/platform/locations/us-central1/training/1166999651488890880?project=895222332033
INFO:google.cloud.aiplatform.jobs:CustomJob projects/895222332033/locations/us-central1/customJobs/1166999651488890880 current state:
JobState.JOB_STATE_PENDING
INFO:google.cloud.aiplatform.jobs:CustomJob projects/895222332033/locations/us-central1/customJobs/1166999651488890880 current state:
JobState.JOB_STATE_PENDING
INFO:google.cloud.aiplatform.jobs:CustomJob projects/895222332033/locations/us-central1/customJobs/1166999651488890880 current state:
JobState.JOB_STATE_PENDING
INFO:google.cloud.aiplatform.jobs:CustomJob projects/895222332033/locations/us-central1/customJobs/1166999651488890880 current state:
JobState.JOB_STATE_PENDING
|
.ipynb_checkpoints/bank-loan-approval-using-AI-checkpoint.ipynb | ###Markdown
Bank Loan Approval Prediction using Artificial Neaural Network In this project, we will build and train a deep neaural network model to predict the likelyhood of a liability customer buying personal loans based on customer features.
###Code
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.layers import Dense, Activation, Dropout
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.metrics import Accuracy
import matplotlib.pyplot as plt
bank_df = pd.read_csv("UniversalBank.csv")
bank_df.head()
bank_df.shape
###Output
_____no_output_____
###Markdown
- ID: Customer ID- Age: Customer Age- Experience: Amount of work experience in years- Income: Amount of annual income (in thousands)- Zipcode: Zipcode of where customer lives- Family: Number of family members- CCAvg: Average monthly credit card spendings- Education: Education level (1: Bachelor, 2: Master, 3: Advanced Degree)- Mortgage: Mortgage of house (in thousands)- Securities Account: Boolean of whether customer has a securities account- CD Account: Boolean of whether customer has Certificate of Deposit account- Online: Boolean of whether customer uses online banking- CreditCard: Does the customer use credit card issued by the bank?- Personal Loan: This is the target variable (Binary Classification Problem) Exploratory Data Analysis
###Code
bank_df.info()
bank_df.describe().transpose()
bank_df.isnull().sum()
###Output
_____no_output_____
###Markdown
Great, we have no missing values!
###Code
avg_age = bank_df["Age"].mean()
print ("The average age of this dataset is {:.1f}.".format(avg_age))
percent_cc = sum(bank_df["CreditCard"] == 1)/len(bank_df)
print ("The percentage of customers that own the bank's credit card is {:.2%}.".format(percent_cc))
percent_loan = sum(bank_df["Personal Loan"] == 1)/len(bank_df)
print ("The percentage of customers that took out a personal loan is {:.2%}.".format(percent_loan))
###Output
The percentage of customers that took out a personal loan is 9.60%.
###Markdown
Data Visualization
###Code
sns.countplot(x=bank_df["Personal Loan"])
plt.show()
sns.countplot(x=bank_df["Education"])
plt.show()
sns.countplot(x=bank_df["CreditCard"])
plt.show()
plt.figure(figsize=(20,10))
sns.countplot(x=bank_df["Age"])
plt.show()
# lets look at the distribution of the income
plt.figure(figsize=(15,8))
sns.distplot(bank_df["Income"])
plt.show()
# lets create 2 dataframes: one with personal loans and one without personal loans
personal_loans = bank_df[bank_df['Personal Loan'] == 1].copy()
no_personal_loans = bank_df[bank_df['Personal Loan'] == 0].copy()
personal_loans.describe().T
no_personal_loans.describe().T
plt.figure(figsize=(15,8))
sns.distplot(personal_loans["Income"])
sns.distplot(no_personal_loans["Income"])
plt.show()
cm = bank_df.corr()
plt.figure(figsize=(20,20))
sns.heatmap(cm, annot=True)
plt.show()
# lets look at the distribution of average credit card spending
plt.figure(figsize=(15,8))
sns.distplot(bank_df["CCAvg"])
plt.show()
plt.figure(figsize=(15,8))
sns.distplot(personal_loans["CCAvg"])
sns.distplot(no_personal_loans["CCAvg"])
plt.show()
###Output
C:\Users\shaya\anaconda3\lib\site-packages\seaborn\distributions.py:2551: FutureWarning: `distplot` is a deprecated function and will be removed in a future version. Please adapt your code to use either `displot` (a figure-level function with similar flexibility) or `histplot` (an axes-level function for histograms).
warnings.warn(msg, FutureWarning)
C:\Users\shaya\anaconda3\lib\site-packages\seaborn\distributions.py:2551: FutureWarning: `distplot` is a deprecated function and will be removed in a future version. Please adapt your code to use either `displot` (a figure-level function with similar flexibility) or `histplot` (an axes-level function for histograms).
warnings.warn(msg, FutureWarning)
###Markdown
Data Preparation
###Code
from tensorflow.keras.utils import to_categorical
X = bank_df.drop(columns=["Personal Loan"])
y = bank_df["Personal Loan"]
y = to_categorical(y)
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1)
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
X_train.shape, X_test.shape, y_train.shape, y_test.shape
###Output
_____no_output_____
###Markdown
Building a multi-layer neaural network model
###Code
# sequential model
ann_model = keras.Sequential()
# adding dense layer
ann_model.add(Dense(250, input_dim=13, kernel_initializer='normal', activation='relu'))
ann_model.add(Dropout(0.3))
ann_model.add(Dense(500, activation='relu'))
ann_model.add(Dropout(0.3))
ann_model.add(Dense(500, activation='relu'))
ann_model.add(Dropout(0.3))
ann_model.add(Dense(500, activation='relu'))
ann_model.add(Dropout(0.4))
ann_model.add(Dense(250, activation='linear'))
ann_model.add(Dropout(0.4))
# adding dense layer with softmax activation/output layer
ann_model.add(Dense(2, activation='softmax'))
ann_model.summary()
###Output
Model: "sequential_1"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
dense_6 (Dense) (None, 250) 3500
_________________________________________________________________
dropout_5 (Dropout) (None, 250) 0
_________________________________________________________________
dense_7 (Dense) (None, 500) 125500
_________________________________________________________________
dropout_6 (Dropout) (None, 500) 0
_________________________________________________________________
dense_8 (Dense) (None, 500) 250500
_________________________________________________________________
dropout_7 (Dropout) (None, 500) 0
_________________________________________________________________
dense_9 (Dense) (None, 500) 250500
_________________________________________________________________
dropout_8 (Dropout) (None, 500) 0
_________________________________________________________________
dense_10 (Dense) (None, 250) 125250
_________________________________________________________________
dropout_9 (Dropout) (None, 250) 0
_________________________________________________________________
dense_11 (Dense) (None, 2) 502
=================================================================
Total params: 755,752
Trainable params: 755,752
Non-trainable params: 0
_________________________________________________________________
###Markdown
Compilation and training of deep learning model
###Code
# custom functions for f1, precision and recall
from keras import backend as K
def recall_m(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def precision_m(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def f1_m(y_true, y_pred):
precision = precision_m(y_true, y_pred)
recall = recall_m(y_true, y_pred)
return 2*((precision*recall)/(precision+recall+K.epsilon()))
ann_model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=[f1_m]) # metrics=['accuracy']
history = ann_model.fit(X_train, y_train, epochs=20, validation_split=0.2, verbose=1)
# Plot the model performance across epochs
plt.figure(figsize=(15,8))
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model Loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train_loss','val_loss'], loc = 'upper right')
plt.show()
###Output
_____no_output_____
###Markdown
Evaluating model performance
###Code
predictions = ann_model.predict(X_test)
predict = []
for i in predictions:
predict.append(np.argmax(i))
from sklearn import metrics
y_test = np.argmax(y_test, axis=1)
f1_test = metrics.f1_score(y_test, predict)
prec = metrics.precision_score(y_test, predict)
rec = metrics.recall_score(y_test, predict)
acc = metrics.accuracy_score(y_test, predict)
print ("F1 Score: {:.4f}.".format(f1_test))
print ("Precision: {:.4f}.".format(prec))
print ("Recall: {:.4f}.".format(rec))
print ("Accuracy: {:.4f}.".format(acc)) # note this is not a good measure of performance for this project as dataset is unbalanced.
conf_mat = metrics.confusion_matrix(y_test, predict)
plt.figure(figsize=(10,8))
sns.heatmap(conf_mat, annot=True, cbar=False)
plt.show()
print(metrics.classification_report(y_test, predict))
###Output
precision recall f1-score support
0 1.00 1.00 1.00 450
1 0.98 0.96 0.97 50
accuracy 0.99 500
macro avg 0.99 0.98 0.98 500
weighted avg 0.99 0.99 0.99 500
|
cbig_rnn.ipynb | ###Markdown
CBIG_RNNThis python notebook trains, tests and evaluates the CBIG_RNN model. in the first piece of code, testing is done on a longitudinal data set (D2) and in the second piece of code, tesing is done on a cross-sectional data set (D3).Data set used for training and evalution D1 and D4 ADNI data sets respectively. Train model on data set D1 and test model on longitudinal data set D2
###Code
import pandas as pd
import datetime
from pathlib import Path
from tadpole_algorithms.models import CBIG_RNN
from tadpole_algorithms.preprocessing.split import split_test_train_tadpole
"""
Train model on ADNI data set D1
Test model on ADNI data set D2
"""
# Load D1_D2 train and possible test data set
data_path_train_test = Path("data/TADPOLE_D1_D2.csv")
data_df_train_test = pd.read_csv(data_path_train_test)
# Load D4 evaluation data set
data_path_eval = Path("data/TADPOLE_D4_corr.csv")
data_df_eval = pd.read_csv(data_path_eval)
# Split data in test, train and evaluation data
train_df, test_df, eval_df = split_test_train_tadpole(data_df_train_test, data_df_eval)
test_df = test_df.fillna(0)
# Define and train model
model = CBIG_RNN(cleanup=True)
model.train(train_df)
# # Predict forecast on the test set
forecast_df_d2 = model.predict(test_df)
###Output
_____no_output_____
###Markdown
Train model on data set D1 and test model on cross sectional data set D3
###Code
import pandas as pd
import datetime
from pathlib import Path
from tadpole_algorithms.models import CBIG_RNN
from tadpole_algorithms.preprocessing.split import split_test_train_d3
from tadpole_algorithms.preprocessing.rewrite_df import rewrite_d3
"""
Train model on ADNI data set D1
Test model on ADNI data set D3
"""
# Load D1_D2 train and possible test data set
data_path_train = Path("data/TADPOLE_D1_D2.csv")
data_df_train = pd.read_csv(data_path_train)
# Load D3 possible test set
data_path_test = Path("data/TADPOLE_D3.csv")
data_df_test = pd.read_csv(data_path_test)
# Load D4 evaluation data set
data_path_eval = Path("data/TADPOLE_D4_corr.csv")
data_df_eval = pd.read_csv(data_path_eval)
# Split data in test, train and evulation data
train_df, test_df, eval_df = split_test_train_d3(data_df_train, data_df_test, data_df_eval)
test_df = test_df.fillna(0)
test_df = rewrite_d3(test_df)
# Define and train model
model = CBIG_RNN(clean_up=True, isD3=True, features='cross_sectional_features', epochs=100)
model.train(train_df)
# # Predict forecast on the test set
forecast_df_d3 = model.predict(test_df)
###Output
/home/ljan/storage/miniconda3/envs/tadpole/lib/python3.6/site-packages/IPython/core/interactiveshell.py:3072: DtypeWarning: Columns (471,473,474,487,488,489,490,491,492,493,494,495,496,497,498,499,500,501,502,503,504,505,506,507,508,509,510,511,512,513,514,515,516,517,518,519,520,521,522,523,524,525,526,527,528,529,530,531,532,533,534,535,536,537,538,539,540,541,542,543,544,545,546,547,548,549,550,551,552,553,554,555,556,557,558,559,560,561,562,563,569,570,572,573,574,575,576,577,578,579,580,581,582,583,584,585,586,587,588,589,590,591,592,593,594,595,596,597,599,601,606,607,608,609,610,611,612,613,614,615,616,617,618,619,620,621,624,625,626,627,628,629,630,631,632,633,634,636,637,638,639,640,641,642,643,644,645,646,647,648,649,650,651,652,653,654,655,656,657,658,659,660,661,663,664,665,666,667,668,669,670,671,672,673,674,675,676,677,678,679,680,681,682,683,684,685,686,687,688,689,690,691,692,693,694,695,696,697,698,699,700,701,702,703,704,705,706,707,708,709,710,711,712,713,714,715,716,717,718,719,720,721,722,723,724,725,726,727,728,729,730,731,732,733,734,735,736,737,738,739,745,746,748,749,750,751,752,753,754,755,756,757,758,759,760,761,762,763,764,765,766,767,770,771,776,777,778,779,780,781,782,783,784,785,786,787,788,789,790,791,794,795,797,798,799,800,801,802,803,804,806,807,808,809,810,811,812,813,814,815,816,817,818,819,820,821,822,823,824,825,826,827,828,829,830,831) have mixed types.Specify dtype option on import or set low_memory=False.
interactivity=interactivity, compiler=compiler, result=result)
###Markdown
Evaluate model tested on D2 on ADNI data set D4
###Code
from tadpole_algorithms.evaluation import evaluate_forecast
from tadpole_algorithms.evaluation import print_metrics
# Evaluate the model
dictionary = evaluate_forecast(eval_df, forecast_df_d2)
# Print metrics
print_metrics(dictionary)
###Output
[[69 16 1]
[13 53 26]
[ 1 1 30]]
mAUC (multiclass Area Under Curve): 0.893
bca (balanced classification accuracy): 0.818
adasMAE (ADAS13 Mean Aboslute Error): 5.055
ventsMAE (Ventricles Mean Aboslute Error): 0.013
adasWES (ADAS13 Weighted Error Score): 5.055
ventsWES (Ventricles Weighted Error Score ): 0.013
adasCPA (ADAS13 Coverage Probability Accuracy for 50% Confidence Interval: 0.404
ventsCPA (Ventricles Coverage Probability Accuracy for 50% Confidence Interval: 0.453
###Markdown
Evaluate model tested on D3 on ADNI data set D4
###Code
from tadpole_algorithms.evaluation import evaluate_forecast
from tadpole_algorithms.evaluation import print_metrics
# Evaluate the model
dictionary = evaluate_forecast(eval_df, forecast_df_d3)
# Print metrics
print_metrics(dictionary)
###Output
[[75 2 9]
[51 4 37]
[ 4 0 28]]
mAUC (multiclass Area Under Curve): 0.780
bca (balanced classification accuracy): 0.679
adasMAE (ADAS13 Mean Aboslute Error): 7.176
ventsMAE (Ventricles Mean Aboslute Error): 0.014
adasWES (ADAS13 Weighted Error Score): 7.176
ventsWES (Ventricles Weighted Error Score ): 0.014
adasCPA (ADAS13 Coverage Probability Accuracy for 50% Confidence Interval: 0.390
ventsCPA (Ventricles Coverage Probability Accuracy for 50% Confidence Interval: 0.473
|
PythonScripts/Paper1Figures/fig_N_evolution.ipynb | ###Markdown
Stratification and density within the canyon
###Code
#import gsw as sw # Gibbs seawater package
import cmocean as cmo
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
import matplotlib.gridspec as gspec
%matplotlib inline
from netCDF4 import Dataset
import numpy as np
import pandas as pd
import seaborn as sns
import sys
import xarray as xr
import canyon_tools.readout_tools as rout
import canyon_tools.metrics_tools as mpt
import warnings
warnings.filterwarnings("ignore")
sns.set_context('paper')
sns.set_style('white')
def calc_rho(RhoRef,T,S,alpha=2.0E-4, beta=7.4E-4):
"""-----------------------------------------------------------------------------
calc_rho calculates the density using a linear equation of state.
INPUT:
RhoRef : reference density at the same z as T and S slices. Can be a scalar or a
vector, depending on the size of T and S.
T, S : should be at least 2D arrays in coordinate order (..., Y , X )
alpha = 2.0E-4 # 1/degC, thermal expansion coefficient
beta = 7.4E-4, haline expansion coefficient
OUTPUT:
rho - Density [...,ny,nx]
-----------------------------------------------------------------------------"""
#Linear eq. of state
rho = RhoRef*(np.ones(np.shape(T)) - alpha*(T[...,:,:]) + beta*(S[...,:,:]))
return rho
def call_rho(t,yslice,xslice):
T = state.Temp.isel(T=t,Y=yslice,X=xslice)
S = state.S.isel(T=t,Y=yslice,X=xslice)
rho = calc_rho(RhoRef,T,S,alpha=2.0E-4, beta=7.4E-4)
return(rho)
CanyonGrid='/ocean/kramosmu/MITgcm/TracerExperiments/CNTDIFF/run38/gridGlob.nc'
CanyonGridOut = Dataset(CanyonGrid)
CanyonGridNoC='/ocean/kramosmu/MITgcm/TracerExperiments/CNTDIFF/run42/gridGlob.nc'
CanyonGridOutNoC = Dataset(CanyonGridNoC)
CanyonState='/ocean/kramosmu/MITgcm/TracerExperiments/CNTDIFF/run38/stateGlob.nc'
CanyonStateOut = Dataset(CanyonState)
# Grid variables
nx = 360
ny = 360
nz = 90
nt = 19 # t dimension size
time = CanyonStateOut.variables['T']
# Grid, state and tracers datasets of base case
grid_file = '/ocean/kramosmu/MITgcm/TracerExperiments/CNTDIFF/run38/gridGlob.nc'
grid = xr.open_dataset(grid_file)
state_file = '/ocean/kramosmu/MITgcm/TracerExperiments/CNTDIFF/run38/stateGlob.nc'
state = xr.open_dataset(state_file)
ptracers_file = '/ocean/kramosmu/MITgcm/TracerExperiments/CNTDIFF/run38/ptracersGlob.nc'
ptracers = xr.open_dataset(ptracers_file)
#RhoRef = np.squeeze(rdmds('/ocean/kramosmu/MITgcm/TracerExperiments/CNTDIFF/run38/RhoRef'))
RhoRef = 999.79998779 # It is constant in all my runs, can't run rdmds
import canyon_records
import nocanyon_records
records = canyon_records.main()
recordsNoC = nocanyon_records.main()
ii=0
for rec in records:
print(ii,rec.name)
ii=ii+1
select_rec = [0,5,6,10,12,14,2,15]
# Save mean maximum N of days 3-6 and std for each run.
keys = ['N_tt06','N_tt08','N_tt10','N_tt12']
key0 = 'N_tt00'
stname = 'DnC' # Station at downstream side of canyon
for record in records:
filename1 = ('/ocean/kramosmu/OutputAnalysis/outputanalysisnotebooks/results/metricsDataFrames/N_%s_%s.csv' %
(record.name,stname))
df = pd.read_csv(filename1)
df_anom=(df.sub(df[key0].squeeze(),axis=0)).add(df[key0][0])
df_anom2 = (df.sub(df[key0].squeeze(),axis=0))
maxd3 = max(df_anom[keys[0]][26:])
maxd4 = max(df_anom[keys[1]][26:])
maxd5 = max(df_anom[keys[2]][26:])
maxd6 = max(df_anom[keys[3]][26:])
record.maxN = np.mean(np.array([maxd3,maxd4,maxd5,maxd6]))
record.stdN = np.std(np.array([maxd3,maxd4,maxd5,maxd6]))
record.Nprof = df_anom2[keys[2]][:]
yind = 230 # y index for alongshore cross-section
#yind2 = 250 # y index for alongshore cross-section
xslice=slice(0,360)
yslice=slice(100,300)
xslice_spd = slice(60,300)
yslice_spd = slice(150,280)
x_qslice = slice(60,300,15)
y_qslice = slice(150,280,15)
tslice = slice(6,10)
xind = 180
yslice_u = slice(150,300)
zind = 27
# plot 5
xind_umean = 120
yslice_umean = slice(150,267)
zslice_umean = slice(25,50)
tslice_umean = slice(0,20)
# plot 6
yslice_bac = slice(225,300)
xslice_bac = slice(100,360)
hFacmasked = np.ma.masked_values(grid.HFacC.data, 0)
MaskC = np.ma.getmask(hFacmasked)
MaskExpand = np.expand_dims(MaskC,0)
MaskExpand = MaskExpand + np.zeros((ptracers.Tr1).shape)
plt.rcParams['font.size'] = 8.0
f = plt.figure(figsize = (7.48,4.5)) # 190mm = 7.48 in, 115mm = 4.5in
gs = gspec.GridSpec(1, 2, width_ratios=[0.4,1],wspace=0.2)
gs1 = gspec.GridSpecFromSubplotSpec(2, 1, subplot_spec=gs[0,1],hspace=0.2,height_ratios=[1,1])
gs10 = gspec.GridSpecFromSubplotSpec(1, 2, subplot_spec=gs1[0,0],wspace=0.05)
ax0 = plt.subplot(gs[0,0])
ax1 = plt.subplot(gs1[0,0])
ax2 = plt.subplot(gs10[0,0])
ax3 = plt.subplot(gs10[0,1],yticks=[])
ax4 = plt.subplot(gs1[1,0])
t=4 # days
#%%%%%%%%%%%%% Contours density alongshore %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
tslice = slice(6,8)
yslice = 245
xslice = slice(120,240)
rho_min = 1020.4-1000.0
rho_max = 1022.0-1000.0 # 1022.4 if y=230,1021.4 if y=260
density = call_rho(tslice,yslice,xslice)
csU2 = np.linspace(rho_min,rho_max,num=30)
csU = np.linspace(rho_min,rho_max,num=30)
mesh=ax2.contourf(grid.X[xslice]/1000,grid.Z[:48],
np.ma.array(np.nanmean(density[:,:48,:].data-1000,axis=0),mask=MaskC[:48,yslice,xslice]),
csU,cmap=cmo.cm.matter)
ax2.axvline(grid.X[200]/1000,linestyle='--',color='0.8')
ax2.plot(grid.X[xslice]/1000,-grid.Depth[267,xslice],':',color='0.8',linewidth=3)
ax2.axhline(-grid.Depth[226,100],linestyle=':',color='0.8',linewidth=3)
CS = ax2.contour(grid.X[xslice]/1000,grid.Z[:48],
np.ma.array(np.nanmean(density[:,:48,:].data-1000,axis=0),mask=MaskC[:48,yslice,xslice]),
csU2,colors='k',linewidths=[0.75] )
ax2.set_axis_bgcolor((205/255.0, 201/255.0, 201/255.0))
ax2.text(0.6,0.1,'$\sigma$ (kg/m$^{3}$)',transform=ax2.transAxes)
ax2.set_ylabel('Depth (m)',labelpad=0.0)
ax2.set_xlabel('Alongshore distance (km)',labelpad=0.0)
ax2.tick_params(axis='x', pad=1)
ax2.tick_params(axis='y', pad=1)
#%%%%%%%%%%%%% Contours density cross-shore %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
yslice = slice(220,270)
xslice = 200
rho_min = 1020.4-1000.0
rho_max = 1022.0-1000.0 # 1022.4 if y=230,1021.4 if y=260
density = call_rho(tslice,yslice,xslice)
csU2 = np.linspace(rho_min,rho_max,num=30)
csU = np.linspace(rho_min,rho_max,num=30)
mesh=ax3.contourf(grid.Y[yslice]/1000,grid.Z[:48],
np.ma.array(np.nanmean(density[:,:48,:].data-1000,axis=0),mask=MaskC[:48,yslice,xslice]),
csU,cmap=cmo.cm.matter)
cbar_ax = f.add_axes([0.85, 0.565, 0.017, 0.18])
cb=f.colorbar(mesh, cax=cbar_ax,ticks=[20.4,20.8,21.2,21.6,22],format='%.1f')
cb.ax.yaxis.set_tick_params(pad=1)
ax3.axvline(grid.Y[245]/1000,linestyle='--',color='0.8')
ax3.plot(grid.Y[yslice]/1000,-grid.Depth[yslice,100],':',color='0.8',linewidth=3)
CS = ax3.contour(grid.Y[yslice]/1000,grid.Z[:48],
np.ma.array(np.nanmean(density[:,:48,:].data-1000,axis=0),mask=MaskC[:48,yslice,xslice]),
csU2,colors='k',linewidths=[0.75] )
ax3.set_axis_bgcolor((205/255.0, 201/255.0, 201/255.0))
ax3.set_xlabel('CS distance (km)',labelpad=0.0)
ax3.text(0.45,0.1,'$\sigma$ (kg/m$^{3}$)',transform=ax3.transAxes)
ax3.tick_params(axis='x', pad=1)
ax3.tick_params(axis='y', pad=1)
#%%%%%%%%%%%%%%%%%%%%% N profiles day 4 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
ax0.axvline(0,color='0.8',linestyle='-')
ax0.axhline(-grid.Depth[267,100],linestyle=':',color='0.8',linewidth=3)
ax0.axhline(-grid.Depth[226,100],linestyle=':',color='0.8',linewidth=3)
ax0.axhline(grid.Z[26],linestyle=':',color='0.8',linewidth=3)
for ind in select_rec:
rec=records[ind]
ax0.plot(rec.Nprof[:48]*1000,grid.Z[:48],color=sns.xkcd_rgb[rec.color],label=rec.label)
ax0.set_xlabel('$N-N_0$ ($10^{-3}$ s$^{-1}$)',labelpad=0.0)
ax0.set_ylabel('Depth (m)',labelpad=0.0)
#%%%%%%%%%%%%%%%%%%%%% max N evolution %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
keys = ['N_tt00','N_tt02','N_tt04','N_tt06','N_tt08','N_tt10','N_tt12','N_tt14','N_tt16','N_tt18']
labels = []
sel_labels=[records[ind].label for ind in select_rec]
for ind in select_rec:
rec=records[ind]
tt=0
for key in keys:
filename1 = ('/ocean/kramosmu/OutputAnalysis/outputanalysisnotebooks/results/metricsDataFrames/N_%s_%s.csv' %
(rec.name,'DnC'))
df = pd.read_csv(filename1)
df_anom=(df.sub(df[key0].squeeze(),axis=0)).add(df[key0][0])
plt1, = ax4.plot(tt,max(df_anom[key][27:])*1000,marker=rec.mstyle,markersize=9,color=sns.xkcd_rgb[rec.color])
tt=tt+1
labels.append(plt1)
ax4.legend(labels,sel_labels,bbox_to_anchor=(1.05,-0.15),ncol=4,labelspacing=0.1,columnspacing=0.1,frameon=True)
ax4.set_ylabel('$N_{max}$ ($10^{-3}$ s$^{-1}$)',labelpad=0.0)
ax4.set_xlabel('Days',labelpad=0.0)
#plt.savefig('fig_N_evolution.eps',format='eps',bbox_inches='tight')
###Output
_____no_output_____ |
Big-Data-Clusters/CU14/public/content/cert-management/cer044-install-controller-cert.ipynb | ###Markdown
CER044 - Install signed Controller certificate==============================================This notebook installs into the Big Data Cluster the certificate signedusing:- [CER034 - Sign Controller certificate with cluster Root CA](../cert-management/cer034-sign-controller-generated-cert.ipynb)NOTE: At the end of this notebook the Controller pod and all pods thatuse PolyBase (Master Pool and Compute Pool pods) will be restarted toload the new certificates.Steps----- Parameters
###Code
app_name = "controller"
scaledset_name = "control"
container_name = "controller"
prefix_keyfile_name = "controller"
common_name = "controller-svc"
test_cert_store_root = "/var/opt/secrets/test-certificates"
###Output
_____no_output_____
###Markdown
Common functionsDefine helper functions used in this notebook.
###Code
# Define `run` function for transient fault handling, suggestions on error, and scrolling updates on Windows
import sys
import os
import re
import platform
import shlex
import shutil
import datetime
from subprocess import Popen, PIPE
from IPython.display import Markdown
retry_hints = {} # Output in stderr known to be transient, therefore automatically retry
error_hints = {} # Output in stderr where a known SOP/TSG exists which will be HINTed for further help
install_hint = {} # The SOP to help install the executable if it cannot be found
def run(cmd, return_output=False, no_output=False, retry_count=0, base64_decode=False, return_as_json=False, regex_mask=None):
"""Run shell command, stream stdout, print stderr and optionally return output
NOTES:
1. Commands that need this kind of ' quoting on Windows e.g.:
kubectl get nodes -o jsonpath={.items[?(@.metadata.annotations.pv-candidate=='data-pool')].metadata.name}
Need to actually pass in as '"':
kubectl get nodes -o jsonpath={.items[?(@.metadata.annotations.pv-candidate=='"'data-pool'"')].metadata.name}
The ' quote approach, although correct when pasting into Windows cmd, will hang at the line:
`iter(p.stdout.readline, b'')`
The shlex.split call does the right thing for each platform, just use the '"' pattern for a '
"""
MAX_RETRIES = 5
output = ""
retry = False
# When running `azdata sql query` on Windows, replace any \n in """ strings, with " ", otherwise we see:
#
# ('HY090', '[HY090] [Microsoft][ODBC Driver Manager] Invalid string or buffer length (0) (SQLExecDirectW)')
#
if platform.system() == "Windows" and cmd.startswith("azdata sql query"):
cmd = cmd.replace("\n", " ")
# shlex.split is required on bash and for Windows paths with spaces
#
cmd_actual = shlex.split(cmd)
# Store this (i.e. kubectl, python etc.) to support binary context aware error_hints and retries
#
user_provided_exe_name = cmd_actual[0].lower()
# When running python, use the python in the ADS sandbox ({sys.executable})
#
if cmd.startswith("python "):
cmd_actual[0] = cmd_actual[0].replace("python", sys.executable)
# On Mac, when ADS is not launched from terminal, LC_ALL may not be set, which causes pip installs to fail
# with:
#
# UnicodeDecodeError: 'ascii' codec can't decode byte 0xc5 in position 4969: ordinal not in range(128)
#
# Setting it to a default value of "en_US.UTF-8" enables pip install to complete
#
if platform.system() == "Darwin" and "LC_ALL" not in os.environ:
os.environ["LC_ALL"] = "en_US.UTF-8"
# When running `kubectl`, if AZDATA_OPENSHIFT is set, use `oc`
#
if cmd.startswith("kubectl ") and "AZDATA_OPENSHIFT" in os.environ:
cmd_actual[0] = cmd_actual[0].replace("kubectl", "oc")
# To aid supportability, determine which binary file will actually be executed on the machine
#
which_binary = None
# Special case for CURL on Windows. The version of CURL in Windows System32 does not work to
# get JWT tokens, it returns "(56) Failure when receiving data from the peer". If another instance
# of CURL exists on the machine use that one. (Unfortunately the curl.exe in System32 is almost
# always the first curl.exe in the path, and it can't be uninstalled from System32, so here we
# look for the 2nd installation of CURL in the path)
if platform.system() == "Windows" and cmd.startswith("curl "):
path = os.getenv('PATH')
for p in path.split(os.path.pathsep):
p = os.path.join(p, "curl.exe")
if os.path.exists(p) and os.access(p, os.X_OK):
if p.lower().find("system32") == -1:
cmd_actual[0] = p
which_binary = p
break
# Find the path based location (shutil.which) of the executable that will be run (and display it to aid supportability), this
# seems to be required for .msi installs of azdata.cmd/az.cmd. (otherwise Popen returns FileNotFound)
#
# NOTE: Bash needs cmd to be the list of the space separated values hence shlex.split.
#
if which_binary == None:
which_binary = shutil.which(cmd_actual[0])
# Display an install HINT, so the user can click on a SOP to install the missing binary
#
if which_binary == None:
print(f"The path used to search for '{cmd_actual[0]}' was:")
print(sys.path)
if user_provided_exe_name in install_hint and install_hint[user_provided_exe_name] is not None:
display(Markdown(f'HINT: Use [{install_hint[user_provided_exe_name][0]}]({install_hint[user_provided_exe_name][1]}) to resolve this issue.'))
raise FileNotFoundError(f"Executable '{cmd_actual[0]}' not found in path (where/which)")
else:
cmd_actual[0] = which_binary
start_time = datetime.datetime.now().replace(microsecond=0)
cmd_display = cmd
if regex_mask is not None:
regex = re.compile(regex_mask)
cmd_display = re.sub(regex, '******', cmd)
print(f"START: {cmd_display} @ {start_time} ({datetime.datetime.utcnow().replace(microsecond=0)} UTC)")
print(f" using: {which_binary} ({platform.system()} {platform.release()} on {platform.machine()})")
print(f" cwd: {os.getcwd()}")
# Command-line tools such as CURL and AZDATA HDFS commands output
# scrolling progress bars, which causes Jupyter to hang forever, to
# workaround this, use no_output=True
#
# Work around a infinite hang when a notebook generates a non-zero return code, break out, and do not wait
#
wait = True
try:
if no_output:
p = Popen(cmd_actual)
else:
p = Popen(cmd_actual, stdout=PIPE, stderr=PIPE, bufsize=1)
with p.stdout:
for line in iter(p.stdout.readline, b''):
line = line.decode()
if return_output:
output = output + line
else:
if cmd.startswith("azdata notebook run"): # Hyperlink the .ipynb file
regex = re.compile(' "(.*)"\: "(.*)"')
match = regex.match(line)
if match:
if match.group(1).find("HTML") != -1:
display(Markdown(f' - "{match.group(1)}": "{match.group(2)}"'))
else:
display(Markdown(f' - "{match.group(1)}": "[{match.group(2)}]({match.group(2)})"'))
wait = False
break # otherwise infinite hang, have not worked out why yet.
else:
print(line, end='')
if wait:
p.wait()
except FileNotFoundError as e:
if install_hint is not None:
display(Markdown(f'HINT: Use {install_hint} to resolve this issue.'))
raise FileNotFoundError(f"Executable '{cmd_actual[0]}' not found in path (where/which)") from e
exit_code_workaround = 0 # WORKAROUND: azdata hangs on exception from notebook on p.wait()
if not no_output:
for line in iter(p.stderr.readline, b''):
try:
line_decoded = line.decode()
except UnicodeDecodeError:
# NOTE: Sometimes we get characters back that cannot be decoded(), e.g.
#
# \xa0
#
# For example see this in the response from `az group create`:
#
# ERROR: Get Token request returned http error: 400 and server
# response: {"error":"invalid_grant",# "error_description":"AADSTS700082:
# The refresh token has expired due to inactivity.\xa0The token was
# issued on 2018-10-25T23:35:11.9832872Z
#
# which generates the exception:
#
# UnicodeDecodeError: 'utf-8' codec can't decode byte 0xa0 in position 179: invalid start byte
#
print("WARNING: Unable to decode stderr line, printing raw bytes:")
print(line)
line_decoded = ""
pass
else:
# azdata emits a single empty line to stderr when doing an hdfs cp, don't
# print this empty "ERR:" as it confuses.
#
if line_decoded == "":
continue
print(f"STDERR: {line_decoded}", end='')
if line_decoded.startswith("An exception has occurred") or line_decoded.startswith("ERROR: An error occurred while executing the following cell"):
exit_code_workaround = 1
# inject HINTs to next TSG/SOP based on output in stderr
#
if user_provided_exe_name in error_hints:
for error_hint in error_hints[user_provided_exe_name]:
if line_decoded.find(error_hint[0]) != -1:
display(Markdown(f'HINT: Use [{error_hint[1]}]({error_hint[2]}) to resolve this issue.'))
# Verify if a transient error, if so automatically retry (recursive)
#
if user_provided_exe_name in retry_hints:
for retry_hint in retry_hints[user_provided_exe_name]:
if line_decoded.find(retry_hint) != -1:
if retry_count < MAX_RETRIES:
print(f"RETRY: {retry_count} (due to: {retry_hint})")
retry_count = retry_count + 1
output = run(cmd, return_output=return_output, retry_count=retry_count)
if return_output:
if base64_decode:
import base64
return base64.b64decode(output).decode('utf-8')
else:
return output
elapsed = datetime.datetime.now().replace(microsecond=0) - start_time
# WORKAROUND: We avoid infinite hang above in the `azdata notebook run` failure case, by inferring success (from stdout output), so
# don't wait here, if success known above
#
if wait:
if p.returncode != 0:
raise SystemExit(f'Shell command:\n\n\t{cmd_display} ({elapsed}s elapsed)\n\nreturned non-zero exit code: {str(p.returncode)}.\n')
else:
if exit_code_workaround !=0 :
raise SystemExit(f'Shell command:\n\n\t{cmd_display} ({elapsed}s elapsed)\n\nreturned non-zero exit code: {str(exit_code_workaround)}.\n')
print(f'\nSUCCESS: {elapsed}s elapsed.\n')
if return_output:
if base64_decode:
import base64
return base64.b64decode(output).decode('utf-8')
else:
return output
# Hints for tool retry (on transient fault), known errors and install guide
#
retry_hints = {'azdata': ['Endpoint sql-server-master does not exist', 'Endpoint livy does not exist', 'Failed to get state for cluster', 'Endpoint webhdfs does not exist', 'Adaptive Server is unavailable or does not exist', 'Error: Address already in use', 'Login timeout expired (0) (SQLDriverConnect)', 'SSPI Provider: No Kerberos credentials available', ], 'kubectl': ['A connection attempt failed because the connected party did not properly respond after a period of time, or established connection failed because connected host has failed to respond', ], 'python': [ ], }
error_hints = {'azdata': [['Please run \'azdata login\' to first authenticate', 'SOP028 - azdata login', '../common/sop028-azdata-login.ipynb'], ['The token is expired', 'SOP028 - azdata login', '../common/sop028-azdata-login.ipynb'], ['Reason: Unauthorized', 'SOP028 - azdata login', '../common/sop028-azdata-login.ipynb'], ['Max retries exceeded with url: /api/v1/bdc/endpoints', 'SOP028 - azdata login', '../common/sop028-azdata-login.ipynb'], ['Look at the controller logs for more details', 'TSG027 - Observe cluster deployment', '../diagnose/tsg027-observe-bdc-create.ipynb'], ['provided port is already allocated', 'TSG062 - Get tail of all previous container logs for pods in BDC namespace', '../log-files/tsg062-tail-bdc-previous-container-logs.ipynb'], ['Create cluster failed since the existing namespace', 'SOP061 - Delete a big data cluster', '../install/sop061-delete-bdc.ipynb'], ['Failed to complete kube config setup', 'TSG067 - Failed to complete kube config setup', '../repair/tsg067-failed-to-complete-kube-config-setup.ipynb'], ['Data source name not found and no default driver specified', 'SOP069 - Install ODBC for SQL Server', '../install/sop069-install-odbc-driver-for-sql-server.ipynb'], ['Can\'t open lib \'ODBC Driver 17 for SQL Server', 'SOP069 - Install ODBC for SQL Server', '../install/sop069-install-odbc-driver-for-sql-server.ipynb'], ['Control plane upgrade failed. Failed to upgrade controller.', 'TSG108 - View the controller upgrade config map', '../diagnose/tsg108-controller-failed-to-upgrade.ipynb'], ['NameError: name \'azdata_login_secret_name\' is not defined', 'SOP013 - Create secret for azdata login (inside cluster)', '../common/sop013-create-secret-for-azdata-login.ipynb'], ['ERROR: No credentials were supplied, or the credentials were unavailable or inaccessible.', 'TSG124 - \'No credentials were supplied\' error from azdata login', '../repair/tsg124-no-credentials-were-supplied.ipynb'], ['Please accept the license terms to use this product through', 'TSG126 - azdata fails with \'accept the license terms to use this product\'', '../repair/tsg126-accept-license-terms.ipynb'], ], 'kubectl': [['no such host', 'TSG010 - Get configuration contexts', '../monitor-k8s/tsg010-get-kubernetes-contexts.ipynb'], ['No connection could be made because the target machine actively refused it', 'TSG056 - Kubectl fails with No connection could be made because the target machine actively refused it', '../repair/tsg056-kubectl-no-connection-could-be-made.ipynb'], ], 'python': [['Library not loaded: /usr/local/opt/unixodbc', 'SOP012 - Install unixodbc for Mac', '../install/sop012-brew-install-odbc-for-sql-server.ipynb'], ['WARNING: You are using pip version', 'SOP040 - Upgrade pip in ADS Python sandbox', '../install/sop040-upgrade-pip.ipynb'], ], }
install_hint = {'azdata': [ 'SOP063 - Install azdata CLI (using package manager)', '../install/sop063-packman-install-azdata.ipynb' ], 'kubectl': [ 'SOP036 - Install kubectl command line interface', '../install/sop036-install-kubectl.ipynb' ], }
print('Common functions defined successfully.')
###Output
_____no_output_____
###Markdown
Get the Kubernetes namespace for the big data clusterGet the namespace of the Big Data Cluster use the kubectl command lineinterface .**NOTE:**If there is more than one Big Data Cluster in the target Kubernetescluster, then either:- set \[0\] to the correct value for the big data cluster.- set the environment variable AZDATA\_NAMESPACE, before starting Azure Data Studio.
###Code
# Place Kubernetes namespace name for BDC into 'namespace' variable
if "AZDATA_NAMESPACE" in os.environ:
namespace = os.environ["AZDATA_NAMESPACE"]
else:
try:
namespace = run(f'kubectl get namespace --selector=MSSQL_CLUSTER -o jsonpath={{.items[0].metadata.name}}', return_output=True)
except:
from IPython.display import Markdown
print(f"ERROR: Unable to find a Kubernetes namespace with label 'MSSQL_CLUSTER'. SQL Server Big Data Cluster Kubernetes namespaces contain the label 'MSSQL_CLUSTER'.")
display(Markdown(f'HINT: Use [TSG081 - Get namespaces (Kubernetes)](../monitor-k8s/tsg081-get-kubernetes-namespaces.ipynb) to resolve this issue.'))
display(Markdown(f'HINT: Use [TSG010 - Get configuration contexts](../monitor-k8s/tsg010-get-kubernetes-contexts.ipynb) to resolve this issue.'))
display(Markdown(f'HINT: Use [SOP011 - Set kubernetes configuration context](../common/sop011-set-kubernetes-context.ipynb) to resolve this issue.'))
raise
print(f'The SQL Server Big Data Cluster Kubernetes namespace is: {namespace}')
###Output
_____no_output_____
###Markdown
Create a temporary directory to stage files
###Code
# Create a temporary directory to hold configuration files
import tempfile
temp_dir = tempfile.mkdtemp()
print(f"Temporary directory created: {temp_dir}")
###Output
_____no_output_____
###Markdown
Helper function to save configuration files to disk
###Code
# Define helper function 'save_file' to save configuration files to the temporary directory created above
import os
import io
def save_file(filename, contents):
with io.open(os.path.join(temp_dir, filename), "w", encoding='utf8', newline='\n') as text_file:
text_file.write(contents)
print("File saved: " + os.path.join(temp_dir, filename))
print("Function `save_file` defined successfully.")
###Output
_____no_output_____
###Markdown
Get name of the ‘Running’ `controller` `pod`
###Code
# Place the name of the 'Running' controller pod in variable `controller`
controller = run(f'kubectl get pod --selector=app=controller -n {namespace} -o jsonpath={{.items[0].metadata.name}} --field-selector=status.phase=Running', return_output=True)
print(f"Controller pod name: {controller}")
###Output
_____no_output_____
###Markdown
Validate certificate common name and alt names
###Code
import json
from urllib.parse import urlparse
kubernetes_default_record_name = 'kubernetes.default'
kubernetes_default_svc_prefix = 'kubernetes.default.svc'
default_dns_suffix = 'svc.cluster.local'
dns_suffix = ''
nslookup_output=run(f'kubectl exec {controller} -c controller -n {namespace} -- bash -c "nslookup {kubernetes_default_record_name} > /tmp/nslookup.out; cat /tmp/nslookup.out; rm /tmp/nslookup.out" ', return_output=True)
name = re.findall('Name:\s+(.[^,|^\s|^\n]+)', nslookup_output)
if not name or kubernetes_default_svc_prefix not in name[0]:
dns_suffix = default_dns_suffix
else:
dns_suffix = 'svc' + name[0].replace(kubernetes_default_svc_prefix, '')
alt_names = ""
bdc_fqdn = ""
alt_names += f"DNS.1 = {common_name}\n"
alt_names += f"DNS.2 = {common_name}.{namespace}.{dns_suffix} \n"
hdfs_vault_svc = "hdfsvault-svc"
mssql_vault_svc = "mssqlvault-svc"
bdc_config = run("azdata bdc config show", return_output=True)
bdc_config = json.loads(bdc_config)
dns_counter = 3 # DNS.1 and DNS.2 are already in the certificate template
# Stateful set related DNS names
#
if app_name == "gateway" or app_name == "master":
alt_names += f'DNS.{str(dns_counter)} = {pod}.{common_name}\n'
dns_counter = dns_counter + 1
alt_names += f'DNS.{str(dns_counter)} = {pod}.{common_name}.{namespace}.{dns_suffix}\n'
dns_counter = dns_counter + 1
# AD related DNS names
#
if "security" in bdc_config["spec"] and "activeDirectory" in bdc_config["spec"]["security"]:
domain_dns_name = bdc_config["spec"]["security"]["activeDirectory"]["domainDnsName"]
subdomain_name = bdc_config["spec"]["security"]["activeDirectory"]["subdomain"]
if subdomain_name:
bdc_fqdn = f"{subdomain_name}.{domain_dns_name}"
else:
bdc_fqdn = f"{namespace}.{domain_dns_name}"
alt_names += f"DNS.{str(dns_counter)} = {common_name}.{bdc_fqdn}\n"
dns_counter = dns_counter + 1
if app_name == "gateway" or app_name == "master":
alt_names += f'DNS.{str(dns_counter)} = {pod}.{bdc_fqdn}\n'
dns_counter = dns_counter + 1
# Endpoint DNS names for bdc certificates
#
if app_name in bdc_config["spec"]["resources"]:
app_name_endpoints = bdc_config["spec"]["resources"][app_name]["spec"]["endpoints"]
for endpoint in app_name_endpoints:
if "dnsName" in endpoint:
alt_names += f'DNS.{str(dns_counter)} = {endpoint["dnsName"]}\n'
dns_counter = dns_counter + 1
# Endpoint DNS names for control plane certificates
#
if app_name == "controller" or app_name == "mgmtproxy":
bdc_endpoint_list = run("azdata bdc endpoint list", return_output=True)
bdc_endpoint_list = json.loads(bdc_endpoint_list)
# Parse the DNS host name from:
#
# "endpoint": "https://monitor.aris.local:30777"
#
for endpoint in bdc_endpoint_list:
if endpoint["name"] == app_name:
url = urlparse(endpoint["endpoint"])
alt_names += f"DNS.{str(dns_counter)} = {url.hostname}\n"
dns_counter = dns_counter + 1
# Special case for the controller certificate
#
if app_name == "controller":
alt_names += f"DNS.{str(dns_counter)} = localhost\n"
dns_counter = dns_counter + 1
# Add hdfsvault-svc host for key management calls.
#
alt_names += f"DNS.{str(dns_counter)} = {hdfs_vault_svc}\n"
dns_counter = dns_counter + 1
# Add mssqlvault-svc host for key management calls.
#
alt_names += f"DNS.{str(dns_counter)} = {mssql_vault_svc}\n"
dns_counter = dns_counter + 1
# Add hdfsvault-svc FQDN for key management calls.
#
if bdc_fqdn:
alt_names += f"DNS.{str(dns_counter)} = {hdfs_vault_svc}.{bdc_fqdn}\n"
dns_counter = dns_counter + 1
# Add mssqlvault-svc FQDN for key management calls.
#
if bdc_fqdn:
alt_names += f"DNS.{str(dns_counter)} = {mssql_vault_svc}.{bdc_fqdn}\n"
dns_counter = dns_counter + 1
required_dns_names = re.findall('DNS\.[0-9] = ([^,|^\s|^\n]+)', alt_names)
# Get certificate common name and DNS names
# use nameopt compat, to generate CN= format on all versions of openssl
#
cert = run(f'kubectl exec {controller} -c controller -n {namespace} -- openssl x509 -nameopt compat -in {test_cert_store_root}/{app_name}/{prefix_keyfile_name}-certificate.pem -text -noout', return_output=True)
subject = re.findall('Subject:(.+)', cert)[0]
certficate_common_name = re.findall('CN=(.[^,|^\s|^\n]+)', subject)[0]
certficate_dns_names = re.findall('DNS:(.[^,|^\s|^\n]+)', cert)
# Validate the common name
#
if (common_name != certficate_common_name):
run(f'kubectl exec {controller} -c controller -n {namespace} -- bash -c "rm -rf {test_cert_store_root}/{app_name}"')
raise SystemExit(f'Certficate common name does not match the expected one: {common_name}')
# Validate the DNS names
#
if not all(dns_name in certficate_dns_names for dns_name in required_dns_names):
run(f'kubectl exec {controller} -c controller -n {namespace} -- bash -c "rm -rf {test_cert_store_root}/{app_name}"')
raise SystemExit(f'Certficate does not have all required DNS names: {required_dns_names}')
###Output
_____no_output_____
###Markdown
Copy certifcate files from `controller` to local machine
###Code
import os
cwd = os.getcwd()
os.chdir(temp_dir) # Workaround kubectl bug on Windows, can't put c:\ on kubectl cp cmd line
run(f'kubectl cp {controller}:{test_cert_store_root}/{app_name}/{prefix_keyfile_name}-certificate.p12 {prefix_keyfile_name}-certificate.p12 -c controller -n {namespace}')
run(f'kubectl cp {controller}:{test_cert_store_root}/{app_name}/{prefix_keyfile_name}-certificate.pem {prefix_keyfile_name}-certificate.pem -c controller -n {namespace}')
run(f'kubectl cp {controller}:{test_cert_store_root}/{app_name}/{prefix_keyfile_name}-privatekey.pem {prefix_keyfile_name}-privatekey.pem -c controller -n {namespace}')
f = open(f"{prefix_keyfile_name}-privatekey.pem", "r")
private_key = f.read()
if not private_key.startswith("-----BEGIN RSA PRIVATE KEY-----"):
raise SystemExit(f'Incorrect private key format')
os.chdir(cwd)
###Output
_____no_output_____
###Markdown
Copy certifcate files from local machine to `controldb`
###Code
import os
cwd = os.getcwd()
os.chdir(temp_dir) # Workaround kubectl bug on Windows, can't put c:\ on kubectl cp cmd line
run(f'kubectl cp {prefix_keyfile_name}-certificate.p12 controldb-0:/var/opt/mssql/{prefix_keyfile_name}-certificate.p12 -c mssql-server -n {namespace}')
run(f'kubectl cp {prefix_keyfile_name}-certificate.pem controldb-0:/var/opt/mssql/{prefix_keyfile_name}-certificate.pem -c mssql-server -n {namespace}')
run(f'kubectl cp {prefix_keyfile_name}-privatekey.pem controldb-0:/var/opt/mssql/{prefix_keyfile_name}-privatekey.pem -c mssql-server -n {namespace}')
os.chdir(cwd)
###Output
_____no_output_____
###Markdown
Get the `controller-db-rw-secret` secretGet the controller SQL symmetric key password for decryption.
###Code
import base64
controller_db_rw_secret = run(f'kubectl get secret/controller-db-rw-secret -n {namespace} -o jsonpath={{.data.encryptionPassword}}', return_output=True)
controller_db_rw_secret = base64.b64decode(controller_db_rw_secret).decode('utf-8')
print("controller_db_rw_secret retrieved")
###Output
_____no_output_____
###Markdown
Update the files table with the certificates through opened SQL connection
###Code
import os
now = datetime.datetime.now()
nowstr = now.strftime("%Y_%m_%d_%H_%M_%S")
sql = f"""
OPEN SYMMETRIC KEY ControllerDbSymmetricKey DECRYPTION BY PASSWORD = '{controller_db_rw_secret}'
DECLARE @FileData VARBINARY(MAX), @Key uniqueidentifier;
SELECT @Key = KEY_GUID('ControllerDbSymmetricKey');
SELECT TOP 1 @FileData = [dbo].[fn_DecryptLob]([data]) FROM [dbo].[Files] WHERE [file_path] = '/config/scaledsets/control/containers/{container_name}/files/{prefix_keyfile_name}-certificate.p12' ORDER BY [created_time] DESC;
EXEC [dbo].[sp_set_file_data_encrypted] @FilePath = '/config/scaledsets/control/containers/{container_name}/backupfiles/{prefix_keyfile_name}-certificate-{nowstr}.p12',
@Data = @FileData,
@KeyGuid = @Key,
@Version = '0',
@User = '',
@Group = '',
@Mode = '';
SELECT TOP 1 @FileData = [dbo].[fn_DecryptLob]([data]) FROM [dbo].[Files] WHERE [file_path] = '/config/scaledsets/control/containers/{container_name}/files/{prefix_keyfile_name}-certificate.pem' ORDER BY [created_time] DESC;
EXEC [dbo].[sp_set_file_data_encrypted] @FilePath = '/config/scaledsets/control/containers/{container_name}/backupfiles/{prefix_keyfile_name}-certificate-{nowstr}.pem',
@Data = @FileData,
@KeyGuid = @Key,
@Version = '0',
@User = '',
@Group = '',
@Mode = '';
SELECT TOP 1 @FileData = [dbo].[fn_DecryptLob]([data]) FROM [dbo].[Files] WHERE [file_path] = '/config/scaledsets/control/containers/{container_name}/files/{prefix_keyfile_name}-privatekey.pem' ORDER BY [created_time] DESC;
EXEC [dbo].[sp_set_file_data_encrypted] @FilePath = '/config/scaledsets/control/containers/{container_name}/backupfiles/{prefix_keyfile_name}-privatekey-{nowstr}.pem',
@Data = @FileData,
@KeyGuid = @Key,
@Version = '0',
@User = '',
@Group = '',
@Mode = '';
SELECT TOP 1 @FileData = doc.BulkColumn FROM OPENROWSET(BULK N'/var/opt/mssql/{prefix_keyfile_name}-certificate.p12', SINGLE_BLOB) AS doc;
EXEC [dbo].[sp_set_file_data_encrypted] @FilePath = '/config/scaledsets/control/containers/{container_name}/files/{prefix_keyfile_name}-certificate.p12',
@Data = @FileData,
@KeyGuid = @Key,
@Version = '0',
@User = '',
@Group = '',
@Mode = '';
SELECT TOP 1 @FileData = doc.BulkColumn FROM OPENROWSET(BULK N'/var/opt/mssql/{prefix_keyfile_name}-certificate.pem', SINGLE_BLOB) AS doc;
EXEC [dbo].[sp_set_file_data_encrypted] @FilePath = '/config/scaledsets/control/containers/{container_name}/files/{prefix_keyfile_name}-certificate.pem',
@Data = @FileData,
@KeyGuid = @Key,
@Version = '0',
@User = '',
@Group = '',
@Mode = '';
SELECT TOP 1 @FileData = doc.BulkColumn FROM OPENROWSET(BULK N'/var/opt/mssql/{prefix_keyfile_name}-privatekey.pem', SINGLE_BLOB) AS doc;
EXEC [dbo].[sp_set_file_data_encrypted] @FilePath = '/config/scaledsets/control/containers/{container_name}/files/{prefix_keyfile_name}-privatekey.pem',
@Data = @FileData,
@KeyGuid = @Key,
@Version = '0',
@User = '',
@Group = '',
@Mode = '';
"""
save_file("insert_certificates.sql", sql)
cwd = os.getcwd()
os.chdir(temp_dir) # Workaround kubectl bug on Windows, can't put c:\ on kubectl cp cmd line
run(f'kubectl cp insert_certificates.sql controldb-0:/var/opt/mssql/insert_certificates.sql -c mssql-server -n {namespace}')
run(f"""kubectl exec controldb-0 -c mssql-server -n {namespace} -- bash -c "SQLCMDPASSWORD=`cat /var/run/secrets/credentials/mssql-sa-password/password` /opt/mssql-tools/bin/sqlcmd -b -U sa -d controller -i /var/opt/mssql/insert_certificates.sql" """)
# Cleanup
run(f"""kubectl exec controldb-0 -c mssql-server -n {namespace} -- bash -c "rm /var/opt/mssql/insert_certificates.sql" """)
run(f"""kubectl exec controldb-0 -c mssql-server -n {namespace} -- bash -c "rm /var/opt/mssql/{prefix_keyfile_name}-certificate.p12" """)
run(f"""kubectl exec controldb-0 -c mssql-server -n {namespace} -- bash -c "rm /var/opt/mssql/{prefix_keyfile_name}-certificate.pem" """)
run(f"""kubectl exec controldb-0 -c mssql-server -n {namespace} -- bash -c "rm /var/opt/mssql/{prefix_keyfile_name}-privatekey.pem" """)
os.chdir(cwd)
###Output
_____no_output_____
###Markdown
Clean up certificate staging areaRemove the certificate files generated on disk (they have now beenplaced in the controller database).
###Code
cmd = f"rm -r {test_cert_store_root}/{app_name}"
run(f'kubectl exec {controller} -c controller -n {namespace} -- bash -c "{cmd}"')
###Output
_____no_output_____
###Markdown
Clear out the controller\_db\_rw\_secret variable
###Code
controller_db_rw_secret= ""
###Output
_____no_output_____
###Markdown
Restart `controller` to pick up new certificates.Delete the controller pod so that it can restart the controller and pickup new certificates.
###Code
run(f'kubectl delete pod {controller} -n {namespace}')
###Output
_____no_output_____
###Markdown
Wait for controller to be healthy
###Code
import json
import threading
import time
timeout_s = 600
check_interval_s = 10
def get_controller_health(health_map):
controller_health = run(f"azdata bdc status show --r control", return_output=True)
controller_health_json = json.loads(controller_health)
controller_health_status = controller_health_json['healthStatus']
health_map['controller'] = controller_health_status
return True
def controller_healthy():
while True:
controller_health_status = 'unhealthy'
health_map = {}
time.sleep(check_interval_s)
getControllerHealthThread = threading.Thread(target=get_controller_health, args=(health_map,) )
getControllerHealthThread.start()
getControllerHealthThread.join(timeout=timeout_s)
if getControllerHealthThread.is_alive():
raise SystemExit("Timeout getting controller health.")
controller_health_status = health_map['controller'] if 'controller' in health_map else 'unhealthy'
if (controller_health_status == 'healthy'):
return True
def wait_for_controller_to_be_healthy():
waitForControllerToBehealthyThread = threading.Thread(target=controller_healthy)
waitForControllerToBehealthyThread.start()
waitForControllerToBehealthyThread.join(timeout=timeout_s)
if waitForControllerToBehealthyThread.is_alive():
raise SystemExit("Timeout waiting for controller to be healthy.")
wait_for_controller_to_be_healthy()
###Output
_____no_output_____
###Markdown
Clean up temporary directory for staging configuration files
###Code
# Delete the temporary directory used to hold configuration files
import shutil
shutil.rmtree(temp_dir)
print(f'Temporary directory deleted: {temp_dir}')
print("Notebook execution is complete.")
###Output
_____no_output_____ |
app-analysis/Mobile-app-analysis.ipynb | ###Markdown
Mobile-app-analysisApple estore and google play app analysis to identify common features among the most downloaded apps
###Code
from csv import reader
apple_file = list(reader(open('app-store-apple-data-set-10k-apps/AppleStore.csv', 'r')))
android_file = list(reader(open('google-play-store-apps/googleplaystore.csv', 'r')))
apple_header = apple_file[0]
apple_file = apple_file[1:]
android_header = android_file[0]
android_file = android_file[1:]
def explore_data(dataset, start, end, rows_and_columns=False):
dataset_slice = dataset[start:end]
for row in dataset_slice:
print(row)
print('\n') # adds a new (empty) line after each row
if rows_and_columns:
print('Number of rows:', len(dataset))
print('Number of columns:', len(dataset[0]))
###Output
_____no_output_____
###Markdown
Exploring the data sets Apple store header and first row
###Code
apple_header
explore_data(apple_file, 0, 1, True)
###Output
['1', '281656475', 'PAC-MAN Premium', '100788224', 'USD', '3.99', '21292', '26', '4', '4.5', '6.3.5', '4+', 'Games', '38', '5', '10', '1']
Number of rows: 7197
Number of columns: 17
###Markdown
Google play header and first row
###Code
android_header
explore_data(android_file, 0, 1, True)
###Output
['Photo Editor & Candy Camera & Grid & ScrapBook', 'ART_AND_DESIGN', '4.1', '159', '19M', '10,000+', 'Free', '0', 'Everyone', 'Art & Design', 'January 7, 2018', '1.0.0', '4.0.3 and up']
Number of rows: 10841
Number of columns: 13
###Markdown
Cleaning the dataAs discussed [here](https://www.kaggle.com/lava18/google-play-store-apps/discussion/66015latest-600082) the column "Category" is missing at row index 10472. So I removed this line from the dataset.
###Code
explore_data(android_file, 10472, 10473, False)
#del android_file[10472]
###Output
_____no_output_____
###Markdown
Duplicate EntriesThe google play dataset has some duplicate entries, probaly due to collecting the data at different times.First, I'll remove the duplicate lines, keeping olny one entry for any duplicate. As the lines are identical no information will be lost. After that, I will look at apps with duplicate names but with differences in any other columns.
###Code
def find_duplicate(dataset, multi_index=True):
unique_apps=[]
duplicate_apps=[]
if multi_index:
for row in dataset:
if row[0] in unique_apps:
if row[0] not in duplicate_apps:
duplicate_apps.append(row[0])
else:
unique_apps.append(row[0])
else:
for row in dataset:
if row in unique_apps:
if row not in duplicate_apps:
duplicate_apps.append(row)
else:
unique_apps.append(row)
return duplicate_apps
duplicate_apps = find_duplicate(android_file)
len(duplicate_apps)
duplicate = find_duplicate(duplicate_apps,False)
duplicate
###Output
_____no_output_____ |
EDA/IncomePredictionEDA.ipynb | ###Markdown
If we plot the distribution of the target column, we'd find that the peole with less than 50K annual income are more in number than the people with an annual income greaterthan 50K
###Code
plt.hist(y)
###Output
_____no_output_____
###Markdown
Hence, the dataset is imbalanced. we need to introduce some random sampling to make it balanced.
###Code
rdsmple = RandomOverSampler()
x_sampled,y_sampled = rdsmple.fit_sample(x,y)
# again plotting the target column
plt.hist(y_sampled)
###Output
_____no_output_____
###Markdown
As shown above, now the data looks to be balanced.
###Code
# splitting the data into training and test set
from sklearn.model_selection import train_test_split
train_x,test_x,train_y,test_y=train_test_split(x_sampled,y_sampled, random_state=355 )
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import CategoricalNB
gnb = GaussianNB(priors=None, var_smoothing=0.05)
y_pred = gnb.fit(train_x, train_y).predict(test_x)
from sklearn.metrics import accuracy_score
sc=accuracy_score(test_y,y_pred)
sc
from sklearn.model_selection import GridSearchCV
param_grid = {"var_smoothing": [1e-9,0.1, 0.001, 0.5,0.05,0.01,1e-8,1e-7,1e-6,1e-10,1e-11]}
grid = GridSearchCV(estimator=gnb, param_grid=param_grid, cv=5, verbose=3)
grid.fit(train_x, train_y)
grid.best_estimator_
from xgboost import XGBClassifier
xgb=XGBClassifier(base_score=0.5, booster='gbtree', colsample_bylevel=1,
colsample_bynode=1, colsample_bytree=1, criterion='gini', gamma=0,
learning_rate=0.1, max_delta_step=0, max_depth=9,
min_child_weight=1, missing=None, n_estimators=130, n_jobs=1,
nthread=None, objective='binary:logistic', random_state=0,
reg_alpha=0, reg_lambda=1, scale_pos_weight=1, seed=None,
silent=None, subsample=1, verbosity=1)
y_pred = xgb.fit(train_x, train_y).predict(test_x)
ac2=accuracy_score(test_y,y_pred)
ac2
param_grid = {"n_estimators": [10, 50, 100, 130], "criterion": ['gini', 'entropy'],
"max_depth": range(2, 10, 1)}
#Creating an object of the Grid Search class
grid = GridSearchCV(estimator=xgb, param_grid=param_grid, cv=5, verbose=3,n_jobs=-1)
#finding the best parameters
grid.fit(train_x, train_y)
grid.best_estimator_
###Output
_____no_output_____ |
NetworkData.ipynb | ###Markdown
Visualizing Network Data Combining NetworkX with Altair
###Code
import networkx as nx
import nx_altair as nxa
import numpy as np
# Generate a random graph
G = nx.fast_gnp_random_graph(n=20, p=0.25)
# Compute positions for viz.
pos = nx.spring_layout(G)
# Draw the graph using Altair
viz = nxa.draw_networkx(G, pos=pos)
# Show it as an interactive plot!
viz.interactive()
# Add weights to nodes and edges
for n in G.nodes():
G.nodes[n]['weight'] = np.random.randn()
for e in G.edges():
G.edges[e]['weight'] = np.random.uniform(1, 10)
# Draw the graph using Altair
viz = nxa.draw_networkx(
G, pos=pos,
node_color='weight',
cmap='viridis',
width='weight',
edge_color='black',
)
# Show it as an interactive plot!
viz.interactive()
pos = nx.circular_layout(G)
viz = nxa.draw_networkx(G,
pos=pos,
node_color='weight',
cmap='viridis',
width='weight',
edge_color='black',
)
viz.interactive()
G = nx.path_graph(12)
pos = nx.planar_layout(G)
viz = nxa.draw_networkx(G,
pos=pos,
)
viz.interactive()
###Output
_____no_output_____
###Markdown
Vega (experimental)cp.
###Code
display({
"application/vnd.vega.v5+json": {
"$schema": "https://vega.github.io/schema/vega/v5.json",
"description": "A node-link diagram with force-directed layout, depicting character co-occurrence in the novel Les Misérables.",
"width": 700,
"height": 500,
"padding": 0,
"autosize": "none",
"signals": [
{ "name": "cx", "update": "width / 2" },
{ "name": "cy", "update": "height / 2" },
{ "name": "nodeRadius", "value": 8,
"bind": {"input": "range", "min": 1, "max": 50, "step": 1} },
{ "name": "nodeCharge", "value": -30,
"bind": {"input": "range", "min":-100, "max": 10, "step": 1} },
{ "name": "linkDistance", "value": 30,
"bind": {"input": "range", "min": 5, "max": 100, "step": 1} },
{ "name": "static", "value": True,
"bind": {"input": "checkbox"} },
{
"description": "State variable for active node fix status.",
"name": "fix", "value": False,
"on": [
{
"events": "symbol:mouseout[!event.buttons], window:mouseup",
"update": "false"
},
{
"events": "symbol:mouseover",
"update": "fix || true"
},
{
"events": "[symbol:mousedown, window:mouseup] > window:mousemove!",
"update": "xy()",
"force": True
}
]
},
{
"description": "Graph node most recently interacted with.",
"name": "node", "value": "",
"on": [
{
"events": "symbol:mouseover",
"update": "fix === true ? item() : node"
}
]
},
{
"description": "Flag to restart Force simulation upon data changes.",
"name": "restart", "value": False,
"on": [
{"events": {"signal": "fix"}, "update": "fix && fix.length"}
]
}
],
"data": [
{
"name": "node-data",
"url": "data/miserables.json",
"format": {"type": "json", "property": "nodes"}
},
{
"name": "link-data",
"url": "data/miserables.json",
"format": {"type": "json", "property": "links"}
}
],
"scales": [
{
"name": "color",
"type": "ordinal",
"domain": {"data": "node-data", "field": "group"},
"range": {"scheme": "category20c"}
}
],
"marks": [
{
"name": "nodes",
"type": "symbol",
"zindex": 1,
"from": {"data": "node-data"},
"on": [
{
"trigger": "fix",
"modify": "node",
"values": "fix === true ? {fx: node.x, fy: node.y} : {fx: fix[0], fy: fix[1]}"
},
{
"trigger": "!fix",
"modify": "node", "values": "{fx: null, fy: null}"
}
],
"encode": {
"enter": {
"fill": {"scale": "color", "field": "group"},
"stroke": {"value": "white"}
},
"update": {
"size": {"signal": "2 * nodeRadius * nodeRadius"},
"cursor": {"value": "pointer"}
}
},
"transform": [
{
"type": "force",
"iterations": 300,
"restart": {"signal": "restart"},
"static": {"signal": "static"},
"signal": "force",
"forces": [
{"force": "center", "x": {"signal": "cx"}, "y": {"signal": "cy"}},
{"force": "collide", "radius": {"signal": "nodeRadius"}},
{"force": "nbody", "strength": {"signal": "nodeCharge"}},
{"force": "link", "links": "link-data", "distance": {"signal": "linkDistance"}}
]
}
]
},
{
"type": "path",
"from": {"data": "link-data"},
"interactive": False,
"encode": {
"update": {
"stroke": {"value": "#ccc"},
"strokeWidth": {"value": 0.5}
}
},
"transform": [
{
"type": "linkpath",
"require": {"signal": "force"},
"shape": "line",
"sourceX": "datum.source.x", "sourceY": "datum.source.y",
"targetX": "datum.target.x", "targetY": "datum.target.y"
}
]
}
]
}
}, raw=True)
###Output
_____no_output_____ |
DeepLearning - Udacity/code/first-neural-network/.ipynb_checkpoints/Your_first_neural_network-checkpoint.ipynb | ###Markdown
你的第一个神经网络在此项目中,你将构建你的第一个神经网络,并用该网络预测每日自行车租客人数。我们提供了一些代码,但是需要你来实现神经网络(大部分内容)。提交此项目后,欢迎进一步探索该数据和模型。
###Code
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
加载和准备数据构建神经网络的关键一步是正确地准备数据。不同尺度级别的变量使网络难以高效地掌握正确的权重。我们在下方已经提供了加载和准备数据的代码。你很快将进一步学习这些代码!
###Code
data_path = 'Bike-Sharing-Dataset/hour.csv'
rides = pd.read_csv(data_path)
rides.head()
###Output
_____no_output_____
###Markdown
数据简介此数据集包含的是从 2011 年 1 月 1 日到 2012 年 12 月 31 日期间每天每小时的骑车人数。骑车用户分成临时用户和注册用户,cnt 列是骑车用户数汇总列。你可以在上方看到前几行数据。下图展示的是数据集中前 10 天左右的骑车人数(某些天不一定是 24 个条目,所以不是精确的 10 天)。你可以在这里看到每小时租金。这些数据很复杂!周末的骑行人数少些,工作日上下班期间是骑行高峰期。我们还可以从上方的数据中看到温度、湿度和风速信息,所有这些信息都会影响骑行人数。你需要用你的模型展示所有这些数据。
###Code
rides[:24*10].plot(x='dteday', y='cnt')
###Output
_____no_output_____
###Markdown
虚拟变量(哑变量)下面是一些分类变量,例如季节、天气、月份。要在我们的模型中包含这些数据,我们需要创建二进制虚拟变量。用 Pandas 库中的 `get_dummies()` 就可以轻松实现。
###Code
dummy_fields = ['season', 'weathersit', 'mnth', 'hr', 'weekday']
for each in dummy_fields:
dummies = pd.get_dummies(rides[each], prefix=each, drop_first=False)
rides = pd.concat([rides, dummies], axis=1)
fields_to_drop = ['instant', 'dteday', 'season', 'weathersit',
'weekday', 'atemp', 'mnth', 'workingday', 'hr']
data = rides.drop(fields_to_drop, axis=1)
data.head()
###Output
_____no_output_____
###Markdown
调整目标变量为了更轻松地训练网络,我们将对每个连续变量标准化,即转换和调整变量,使它们的均值为 0,标准差为 1。我们会保存换算因子,以便当我们使用网络进行预测时可以还原数据。
###Code
quant_features = ['casual', 'registered', 'cnt', 'temp', 'hum', 'windspeed']
# Store scalings in a dictionary so we can convert back later
scaled_features = {}
for each in quant_features:
mean, std = data[each].mean(), data[each].std()
scaled_features[each] = [mean, std]
data.loc[:, each] = (data[each] - mean)/std
data.head()
###Output
C:\ProgramData\Anaconda3\envs\tf.gpu\lib\site-packages\pandas\core\indexing.py:537: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
self.obj[item] = s
###Markdown
将数据拆分为训练、测试和验证数据集我们将大约最后 21 天的数据保存为测试数据集,这些数据集会在训练完网络后使用。我们将使用该数据集进行预测,并与实际的骑行人数进行对比。
###Code
# Save data for approximately the last 21 days
test_data = data[-21*24:]
# Now remove the test data from the data set
data = data[:-21*24]
# Separate the data into features and targets
target_fields = ['cnt', 'casual', 'registered']
features, targets = data.drop(target_fields, axis=1), data[target_fields]
test_features, test_targets = test_data.drop(target_fields, axis=1), test_data[target_fields]
###Output
_____no_output_____
###Markdown
我们将数据拆分为两个数据集,一个用作训练,一个在网络训练完后用来验证网络。因为数据是有时间序列特性的,所以我们用历史数据进行训练,然后尝试预测未来数据(验证数据集)。
###Code
# Hold out the last 60 days or so of the remaining data as a validation set
train_features, train_targets = features[:-60*24], targets[:-60*24]
val_features, val_targets = features[-60*24:], targets[-60*24:]
###Output
_____no_output_____
###Markdown
开始构建网络下面你将构建自己的网络。我们已经构建好结构和反向传递部分。你将实现网络的前向传递部分。还需要设置超参数:学习速率、隐藏单元的数量,以及训练传递数量。该网络有两个层级,一个隐藏层和一个输出层。隐藏层级将使用 S 型函数作为激活函数。输出层只有一个节点,用于递归,节点的输出和节点的输入相同。即激活函数是 $f(x)=x$。这种函数获得输入信号,并生成输出信号,但是会考虑阈值,称为激活函数。我们完成网络的每个层级,并计算每个神经元的输出。一个层级的所有输出变成下一层级神经元的输入。这一流程叫做前向传播(forward propagation)。我们在神经网络中使用权重将信号从输入层传播到输出层。我们还使用权重将错误从输出层传播回网络,以便更新权重。这叫做反向传播(backpropagation)。> **提示**:你需要为反向传播实现计算输出激活函数 ($f(x) = x$) 的导数。如果你不熟悉微积分,其实该函数就等同于等式 $y = x$。该等式的斜率是多少?也就是导数 $f(x)$。你需要完成以下任务:1. 实现 S 型激活函数。将 `__init__` 中的 `self.activation_function` 设为你的 S 型函数。2. 在 `train` 方法中实现前向传递。3. 在 `train` 方法中实现反向传播算法,包括计算输出错误。4. 在 `run` 方法中实现前向传递。
###Code
class NeuralNetwork(object):
def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Set number of nodes in input, hidden and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Initialize weights
self.weights_input_to_hidden = np.random.normal(0.0, self.input_nodes**-0.5,
(self.input_nodes, self.hidden_nodes))
self.weights_hidden_to_output = np.random.normal(0.0, self.hidden_nodes**-0.5,
(self.hidden_nodes, self.output_nodes))
self.lr = learning_rate
#### TODO: Set self.activation_function to your implemented sigmoid function ####
#
# Note: in Python, you can define a function with a lambda expression,
# as shown below.
self.activation_function = lambda x : 1 / (1 + np.exp(-x)) # Replace 0 with your sigmoid calculation.
### If the lambda code above is not something you're familiar with,
# You can uncomment out the following three lines and put your
# implementation there instead.
#
#def sigmoid(x):
# return 0 # Replace 0 with your sigmoid calculation here
#self.activation_function = sigmoid
def train(self, features, targets):
''' Train the network on batch of features and targets.
Arguments
---------
features: 2D array, each row is one data record, each column is a feature
targets: 1D array of target values
'''
n_records = features.shape[0]
delta_weights_i_h = np.zeros(self.weights_input_to_hidden.shape)
delta_weights_h_o = np.zeros(self.weights_hidden_to_output.shape)
for x, y in zip(features, targets):
#### Implement the forward pass here ####
### Forward pass ###
# TODO: Hidden layer - Replace these values with your calculations.
hidden_inputs = np.dot(self.weights_input_to_hidden , x) # signals into hidden layer
hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer
# TODO: Output layer - Replace these values with your calculations.
final_inputs = np.dot(self.weights_hidden_to_output , hidden_outputs) # signals into final output layer
final_outputs = final_inputs # signals from final output layer
# Tip:output layer's activation_fuc is y=x
#### Implement the backward pass here ####
### Backward pass ###
# TODO: Output error - Replace this value with your calculations.
output_error = y - final_outputs # Output layer error is the difference between desired target and actual output.
# TODO: Calculate the hidden layer's contribution to the error
hidden_error = np.dot(self.weights_hidden_to_output.T , output_error)\
* hidden_outputs * (1 - hidden_outputs)
# Weight step (hidden to output)
delta_weights_h_o += hidden_outputs[:,None] * output_error_term
# Weight step (input to hidden)
delta_weights_i_h += hidden_error_term * x[:,None]
# TODO: Update the weights - Replace these values with your calculations.
self.weights_hidden_to_output += self.lr * delta_weights_h_o / n_records # update hidden-to-output weights with gradient descent step
self.weights_input_to_hidden += self.lr * delta_weights_i_h / n_records # update input-to-hidden weights with gradient descent step
def run(self, features):
''' Run a forward pass through the network with input features
Arguments
---------
features: 1D array of feature values
'''
#### Implement the forward pass here ####
# TODO: Hidden layer - replace these values with the appropriate calculations.
hidden_inputs = np.dot(features , self.weights_input_to_hidden) # signals into hidden layer
hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer
# TODO: Output layer - Replace these values with the appropriate calculations.
final_inputs = np.dot(hidden_outputs , self.weights_hidden_to_output) # signals into final output layer
final_outputs = final_inputs # signals from final output layer
return final_outputs
def MSE(y, Y):
return np.mean((y-Y)**2)
###Output
_____no_output_____
###Markdown
单元测试运行这些单元测试,检查你的网络实现是否正确。这样可以帮助你确保网络已正确实现,然后再开始训练网络。这些测试必须成功才能通过此项目。
###Code
import unittest
inputs = np.array([[0.5, -0.2, 0.1]])
targets = np.array([[0.4]])
test_w_i_h = np.array([[0.1, -0.2],
[0.4, 0.5],
[-0.3, 0.2]])
test_w_h_o = np.array([[0.3],
[-0.1]])
class TestMethods(unittest.TestCase):
##########
# Unit tests for data loading
##########
def test_data_path(self):
# Test that file path to dataset has been unaltered
self.assertTrue(data_path.lower() == 'bike-sharing-dataset/hour.csv')
def test_data_loaded(self):
# Test that data frame loaded
self.assertTrue(isinstance(rides, pd.DataFrame))
##########
# Unit tests for network functionality
##########
def test_activation(self):
network = NeuralNetwork(3, 2, 1, 0.5)
# Test that the activation function is a sigmoid
self.assertTrue(np.all(network.activation_function(0.5) == 1/(1+np.exp(-0.5))))
def test_train(self):
# Test that weights are updated correctly on training
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
for i in range(100):
network.train(inputs, targets)
print(network.weights_hidden_to_output)
self.assertTrue(np.allclose(network.weights_hidden_to_output,
np.array([[ 0.37275328],
[-0.03172939]])))
print(network.weights_input_to_hidden)
self.assertTrue(np.allclose(network.weights_input_to_hidden,
np.array([[ 0.10562014, -0.20185996],
[0.39775194, 0.50074398],
[-0.29887597, 0.19962801]])))
def test_run(self):
# Test correctness of run method
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
self.assertTrue(np.allclose(network.run(inputs), 0.09998924))
suite = unittest.TestLoader().loadTestsFromModule(TestMethods())
unittest.TextTestRunner().run(suite)
###Output
....F
###Markdown
训练网络现在你将设置网络的超参数。策略是设置的超参数使训练集上的错误很小但是数据不会过拟合。如果网络训练时间太长,或者有太多的隐藏节点,可能就会过于针对特定训练集,无法泛化到验证数据集。即当训练集的损失降低时,验证集的损失将开始增大。你还将采用随机梯度下降 (SGD) 方法训练网络。对于每次训练,都获取随机样本数据,而不是整个数据集。与普通梯度下降相比,训练次数要更多,但是每次时间更短。这样的话,网络训练效率更高。稍后你将详细了解 SGD。 选择迭代次数也就是训练网络时从训练数据中抽样的批次数量。迭代次数越多,模型就与数据越拟合。但是,如果迭代次数太多,模型就无法很好地泛化到其他数据,这叫做过拟合。你需要选择一个使训练损失很低并且验证损失保持中等水平的数字。当你开始过拟合时,你会发现训练损失继续下降,但是验证损失开始上升。 选择学习速率速率可以调整权重更新幅度。如果速率太大,权重就会太大,导致网络无法与数据相拟合。建议从 0.1 开始。如果网络在与数据拟合时遇到问题,尝试降低学习速率。注意,学习速率越低,权重更新的步长就越小,神经网络收敛的时间就越长。 选择隐藏节点数量隐藏节点越多,模型的预测结果就越准确。尝试不同的隐藏节点的数量,看看对性能有何影响。你可以查看损失字典,寻找网络性能指标。如果隐藏单元的数量太少,那么模型就没有足够的空间进行学习,如果太多,则学习方向就有太多的选择。选择隐藏单元数量的技巧在于找到合适的平衡点。
###Code
import sys
### Set the hyperparameters here ###
iterations = 100
learning_rate = 0.1
hidden_nodes = 2
output_nodes = 1
N_i = train_features.shape[1]
network = NeuralNetwork(N_i, hidden_nodes, output_nodes, learning_rate)
losses = {'train':[], 'validation':[]}
for ii in range(iterations):
# Go through a random batch of 128 records from the training data set
batch = np.random.choice(train_features.index, size=128)
X, y = train_features.ix[batch].values, train_targets.ix[batch]['cnt']
network.train(X, y)
# Printing out the training progress
train_loss = MSE(network.run(train_features).T, train_targets['cnt'].values)
val_loss = MSE(network.run(val_features).T, val_targets['cnt'].values)
sys.stdout.write("\rProgress: {:2.1f}".format(100 * ii/float(iterations)) \
+ "% ... Training loss: " + str(train_loss)[:5] \
+ " ... Validation loss: " + str(val_loss)[:5])
sys.stdout.flush()
losses['train'].append(train_loss)
losses['validation'].append(val_loss)
plt.plot(losses['train'], label='Training loss')
plt.plot(losses['validation'], label='Validation loss')
plt.legend()
_ = plt.ylim()
###Output
_____no_output_____
###Markdown
检查预测结果使用测试数据看看网络对数据建模的效果如何。如果完全错了,请确保网络中的每步都正确实现。
###Code
fig, ax = plt.subplots(figsize=(8,4))
mean, std = scaled_features['cnt']
predictions = network.run(test_features).T*std + mean
ax.plot(predictions[0], label='Prediction')
ax.plot((test_targets['cnt']*std + mean).values, label='Data')
ax.set_xlim(right=len(predictions))
ax.legend()
dates = pd.to_datetime(rides.ix[test_data.index]['dteday'])
dates = dates.apply(lambda d: d.strftime('%b %d'))
ax.set_xticks(np.arange(len(dates))[12::24])
_ = ax.set_xticklabels(dates[12::24], rotation=45)
###Output
_____no_output_____ |
techniques with code/Handling Imbalanced Data- Over Sampling.ipynb | ###Markdown
Credit Card Kaggle- Fixing Imbalanced Dataset ContextIt is important that credit card companies are able to recognize fraudulent credit card transactions so that customers are not charged for items that they did not purchase. ContentThe datasets contains transactions made by credit cards in September 2013 by european cardholders. This dataset presents transactions that occurred in two days, where we have 492 frauds out of 284,807 transactions. The dataset is highly unbalanced, the positive class (frauds) account for 0.172% of all transactions.It contains only numerical input variables which are the result of a PCA transformation. Unfortunately, due to confidentiality issues, we cannot provide the original features and more background information about the data. Features V1, V2, ... V28 are the principal components obtained with PCA, the only features which have not been transformed with PCA are 'Time' and 'Amount'. Feature 'Time' contains the seconds elapsed between each transaction and the first transaction in the dataset. The feature 'Amount' is the transaction Amount, this feature can be used for example-dependant cost-senstive learning. Feature 'Class' is the response variable and it takes value 1 in case of fraud and 0 otherwise. InspirationIdentify fraudulent credit card transactions.Given the class imbalance ratio, we recommend measuring the accuracy using the Area Under the Precision-Recall Curve (AUPRC). Confusion matrix accuracy is not meaningful for unbalanced classification. AcknowledgementsThe dataset has been collected and analysed during a research collaboration of Worldline and the Machine Learning Group (http://mlg.ulb.ac.be) of ULB (Université Libre de Bruxelles) on big data mining and fraud detection. More details on current and past projects on related topics are available on https://www.researchgate.net/project/Fraud-detection-5 and the page of the DefeatFraud project
###Code
import numpy as np
import pandas as pd
import sklearn
import scipy
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import classification_report,accuracy_score
from sklearn.ensemble import IsolationForest
from sklearn.neighbors import LocalOutlierFactor
from sklearn.svm import OneClassSVM
from pylab import rcParams
rcParams['figure.figsize'] = 14, 8
RANDOM_SEED = 42
LABELS = ["Normal", "Fraud"]
data = pd.read_csv('creditcard.csv',sep=',')
data.head()
data.info()
#Create independent and Dependent Features
columns = data.columns.tolist()
# Filter the columns to remove data we do not want
columns = [c for c in columns if c not in ["Class"]]
# Store the variable we are predicting
target = "Class"
# Define a random state
state = np.random.RandomState(42)
X = data[columns]
Y = data[target]
# Print the shapes of X & Y
print(X.shape)
print(Y.shape)
###Output
(284807, 30)
(284807,)
###Markdown
Exploratory Data Analysis
###Code
data.isnull().values.any()
count_classes = pd.value_counts(data['Class'], sort = True)
count_classes.plot(kind = 'bar', rot=0)
plt.title("Transaction Class Distribution")
plt.xticks(range(2), LABELS)
plt.xlabel("Class")
plt.ylabel("Frequency")
## Get the Fraud and the normal dataset
fraud = data[data['Class']==1]
normal = data[data['Class']==0]
print(fraud.shape,normal.shape)
from imblearn.combine import SMOTETomek
from imblearn.under_sampling import NearMiss
# Implementing Oversampling for Handling Imbalanced
smk = SMOTETomek(random_state=42)
X_res,y_res=smk.fit_sample(X,Y)
X_res.shape,y_res.shape
from collections import Counter
print('Original dataset shape {}'.format(Counter(Y)))
print('Resampled dataset shape {}'.format(Counter(y_res)))
## RandomOverSampler to handle imbalanced data
from imblearn.over_sampling import RandomOverSampler
os = RandomOverSampler(ratio=0.5)
X_train_res, y_train_res = os.fit_sample(X, Y)
X_train_res.shape,y_train_res.shape
print('Original dataset shape {}'.format(Counter(Y)))
print('Resampled dataset shape {}'.format(Counter(y_train_res)))
# In this example I use SMOTETomek which is a method of imblearn. SMOTETomek is a hybrid method
# which uses an under sampling method (Tomek) in with an over sampling method (SMOTE).
os_us = SMOTETomek(ratio=0.5)
X_train_res1, y_train_res1 = os_us.fit_sample(X, Y)
X_train_res1.shape,y_train_res1.shape
print('Original dataset shape {}'.format(Counter(Y)))
print('Resampled dataset shape {}'.format(Counter(y_train_res1)))
###Output
Original dataset shape Counter({0: 284315, 1: 492})
Resampled dataset shape Counter({0: 283473, 1: 141315})
|
nbplain/06_copy_access.ipynb | ###Markdown
Copy and Access Numpy Arrays Overview:- **Teaching:** 5 min- **Exercises:** 15 min**Questions**- How do I copy numpy arrays?- How do I access elements or subsets of arrays?**Objectives**- Understand the difference between assigining and copying `numpy` arrays.- Know how to access elements, slices and subsets of `numpy` arrays. To Copy or not to Copy (Numpy Arrays)First as always we must import `numpy`:
###Code
import numpy as np
a = np.array( [-1, 0, 1] )
print(a)
###Output
[-1 0 1]
###Markdown
Mow let's assign the array we've created to another variable:
###Code
b = a
print("a", a)
print("b", b)
###Output
a [-1 0 1]
b [-1 0 1]
###Markdown
If we now modify `a` what will happen to `b`
###Code
a[0]=2
print(a)
print(b)
###Output
[2 0 1]
[2 0 1]
###Markdown
When we assign `b` to `a` both variables point to the same object, (the same happens with lists!). Copy()If we want to copy a numpy array we must explicitly call `.copy()`:
###Code
c = np.ones( (3,3) )
d = c
e = c.copy()
print("c", c)
print("d", d)
print("e", e)
###Output
c [[1. 1. 1.]
[1. 1. 1.]
[1. 1. 1.]]
d [[1. 1. 1.]
[1. 1. 1.]
[1. 1. 1.]]
e [[1. 1. 1.]
[1. 1. 1.]
[1. 1. 1.]]
###Markdown
Now let's modify `c`:
###Code
c[0][0]=10
print("c", c)
print("d", d)
print("e", e)
###Output
c [[10. 1. 1.]
[ 1. 1. 1.]
[ 1. 1. 1.]]
d [[10. 1. 1.]
[ 1. 1. 1.]
[ 1. 1. 1.]]
e [[1. 1. 1.]
[1. 1. 1.]
[1. 1. 1.]]
###Markdown
Exercise: Re-initialiseWhat happens if we re-initialise `c` e.g.```pythonc= np.zeroes( (3,3) )```[Solution]() Solution+: Re-initialise
###Code
c = np.zeros( (3,3) )
print("c", c)
print("d", d)
print("e", e)
###Output
c [[0. 0. 0.]
[0. 0. 0.]
[0. 0. 0.]]
d [[10. 1. 1.]
[ 1. 1. 1.]
[ 1. 1. 1.]]
e [[1. 1. 1.]
[1. 1. 1.]
[1. 1. 1.]]
###Markdown
If we reintialise `c`, `d` does not get changed and is still the original array. You can also use the function `id` to check whether two variables point to the same object:
###Code
print(id(c))
print(id(d))
print(id(e))
###Output
139972242570496
139972242569536
139972242569936
###Markdown
:Solution+ Accessing arraysBasic indexing and slicing can be used to access array elements, as we know from lists:
###Code
# a[start:stop:stride] (not inclusive of stop)
x = np.arange(8)
print("x", x)
print("x[0:7:2]", x[0:7:2])
print("x[0::2]", x[0::2])
###Output
x [0 1 2 3 4 5 6 7]
x[0:7:2] [0 2 4 6]
x[0::2] [0 2 4 6]
###Markdown
And as with lists negative indices are valid! Useful if you want to access from the end of the array
###Code
print(x[-1], x[-2], x[-3])
###Output
7 6 5
###Markdown
Exercise: AccessWhat will the following do:```pythonprint(x[2:-3:2])print(x[::-2])```[Solution]() Solution+:
###Code
print(x[2:-3:2])
print(x[::-2])
###Output
[2 4]
[7 5 3 1]
###Markdown
Negative indices in the *stop*, *start* positions work as they do in referencing individual elements of the array. A negative *stride* work backwards through the array, starting by default from the last element. :Solution+ Accessing Multidimensional dataTo access multidimensional arrays we can use multiple index subscripts, or we can use a `tuple`.
###Code
# Basic indexing of a 3d array
m = np.array([[[1,2],[3,4]],[[5,6],[7,8]]])
print(c.shape)
# using subscripts
print("m[1][0][1]:", m[1][0][1])
# using a tuple
print("m[(1,0,1)]:", m[(1,0,1)])
# the whole thing
print(m)
###Output
(3, 3)
m[1][0][1]: 6
m[(1,0,1)]: 6
[[[1 2]
[3 4]]
[[5 6]
[7 8]]]
###Markdown
We can access complete *slices* by leaving out indices or using elipses:
###Code
print(m[1])
print(m[1,0,...]) # can also use elipsis (3 dots)
# for missing indices
print(m[...,1,1]) # anywhere in indices
print(m[...,...,1]) # but only once
###Output
[[5 6]
[7 8]]
[5 6]
[4 8]
###Markdown
Exercise: Slice and CopyCreate a new array and assign a slice of the array to another variable.What happens when you change an element of the original array that is in the slice you have created, or vice-versa?How can you create a unique slice of the original array?[Solution]() Solution+:
###Code
n = np.ones( (3,3) )
o = n[0]
print("n", n)
print("o", o)
n[(0,0)] = 2
print("n", n)
print("o", o)
o[:] = -1
print("n", n)
print("o", o)
###Output
n [[-1. -1. -1.]
[ 1. 2. 1.]
[ 1. 1. 1.]]
o [-1. -1. -1.]
|
6.1_Extraer_Caracteristicas.ipynb | ###Markdown
6.1_Extraer_Caracteristicas
###Code
#!pip install scikit-learn
#!pip install imutils
#!pip install progressbar
import tensorflow as tf
from tensorflow.keras.applications import MobileNetV2
from tensorflow.keras.applications import imagenet_utils
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.preprocessing.image import load_img
from sklearn.preprocessing import LabelEncoder
from pyimagesearch.io import HDF5DatasetWriter
from imutils import paths
import numpy as np
import progressbar
import argparse
import random
import os
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
# Currently, memory growth needs to be the same across GPUs
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
except RuntimeError as e:
# Memory growth must be set before GPUs have been initialized
print(e)
img_size = 224
batch_size = 32
buffer_size = 1000 #control the number of extracted feature store in memory
path_train = 'data_' + str(img_size) + '/train'
path_validation = 'data_' + str(img_size) + '/validation'
path_output_train_hdf5 = 'HDF5/MobileNetV2_train.hdf5'
path_output_validation_hdf5 = 'HDF5/MobileNetV2_validation.hdf5'
#cargando las imágenes
imagePaths_train = list(paths.list_images(path_train))
imagePaths_validation = list(paths.list_images(path_validation))
#moviendo de forma aleatoria el orden de las imágenes
random.shuffle(imagePaths_train)
random.shuffle(imagePaths_validation)
#Obteniendo la etiqueta de la imagen
labels_train = [p.split(os.path.sep)[-2] for p in imagePaths_train]
#Códificando las etiquetas en númerops
le = LabelEncoder()
labels_train = le.fit_transform(labels_train)
#Obteniendo la etiqueta de la imagen
labels_validation = [p.split(os.path.sep)[-2] for p in imagePaths_validation]
#Códificando las etiquetas en númerops
le = LabelEncoder()
labels_validation = le.fit_transform(labels_validation)
###Output
_____no_output_____
###Markdown
creando el modelo base
###Code
model = MobileNetV2(
include_top=False,
weights='imagenet',
input_shape=(img_size,img_size,3)
)
###Output
_____no_output_____
###Markdown
Grabando en disco las caracteristicas
###Code
dataset_train = HDF5DatasetWriter((len(imagePaths_train), 1280 * 7 * 7), #es igual al tamaño de la última capa
path_output_train_hdf5,
dataKey = "features",
bufSize = buffer_size)
dataset_train.storeClassLabels(le.classes_)
dataset_validation = HDF5DatasetWriter((len(imagePaths_validation), 1280 * 7 * 7), #es igual al tamaño de la última capa
path_output_validation_hdf5,
dataKey = "features",
bufSize = buffer_size)
dataset_validation.storeClassLabels(le.classes_)
###Output
_____no_output_____
###Markdown
Obteniendo las caracteristicas
###Code
widgets = ["Extracción de características: ",
progressbar.Percentage(),
" ",
progressbar.Bar(),
" ",
progressbar.ETA()]
pbar = progressbar.ProgressBar(maxval=len(imagePaths_train),
widgets=widgets).start()
# recorriendo el array con una distancia del batch_size
for i in np.arange(0, len(imagePaths_train), batch_size):
# obteniendo los paths y labels de las imágenes
batchPaths = imagePaths_train[i:i + batch_size]
batchLabels = labels_train[i:i + batch_size]
batchImages = []
# recorremos el bath obtenido
for (j, imagePath) in enumerate(batchPaths):
#cargamos las imagenes con keras
image = load_img(imagePath, target_size=(img_size, img_size))
image = img_to_array(image)
#hacemos un pre-procesamiento de la imagen
#le agregamos una dimensión
image = np.expand_dims(image, axis=0)
#obteniendo la media de la intesidad RGB del pixel
image = imagenet_utils.preprocess_input(image)
#agregando la imagen al batch
batchImages.append(image)
#creamos un stack con el array
batchImages = np.vstack(batchImages)
features = model.predict(batchImages, batch_size=batch_size)
#hacemos un reshape de la predicción, convirtiendolo en 1D
features = features.reshape((features.shape[0], 1280 * 7 * 7))
#agregamps los features en el HDF5
dataset_train.add(features, batchLabels)
pbar.update(i)
#cerramos el HDF5
dataset_train.close()
pbar.finish()
widgets = ["Extracción de características: ",
progressbar.Percentage(),
" ",
progressbar.Bar(),
" ",
progressbar.ETA()]
pbar = progressbar.ProgressBar(maxval=len(imagePaths_validation),
widgets=widgets).start()
# recorriendo el array con una distancia del batch_size
for i in np.arange(0, len(imagePaths_validation), batch_size):
# obteniendo los paths y labels de las imágenes
batchPaths = imagePaths_validation[i:i + batch_size]
batchLabels = labels_validation[i:i + batch_size]
batchImages = []
# recorremos el bath obtenido
for (j, imagePath) in enumerate(batchPaths):
#cargamos las imagenes con keras
image = load_img(imagePath, target_size=(img_size, img_size))
image = img_to_array(image)
#hacemos un pre-procesamiento de la imagen
#le agregamos una dimensión
image = np.expand_dims(image, axis=0)
#obteniendo la media de la intesidad RGB del pixel
image = imagenet_utils.preprocess_input(image)
#agregando la imagen al batch
batchImages.append(image)
#creamos un stack con el array
batchImages = np.vstack(batchImages)
features = model.predict(batchImages, batch_size=batch_size)
#hacemos un reshape de la predicción, convirtiendolo en 1D
features = features.reshape((features.shape[0], 1280 * 7 * 7))
#agregamps los features en el HDF5
dataset_validation.add(features, batchLabels)
pbar.update(i)
#cerramos el HDF5
dataset_validation.close()
pbar.finish()
###Output
Extracción de características: 100% |###########################| Time: 0:00:08
###Markdown
Entrenando el clasificador
###Code
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import classification_report
from sklearn.svm import SVC
import argparse
import pickle
import h5py
#leemos el HDF5
db_train = h5py.File(path_output_train_hdf5, 'r')
db_validation = h5py.File(path_output_validation_hdf5, 'r')
print(db_train["features"].shape)
print(db_train["labels"].shape)
# entrenamos nuestro modelo usando las caracteristicas 128-d
# luego reproduce el reconocimiento facial
print("[INFO] entrenando el modelo...")
recognizer = SVC(C=1.0, kernel="linear", probability=True)
recognizer.fit(db_train["features"], db_train["labels"])
###Output
[INFO] entrenando el modelo...
###Markdown
Evaluando el clasificador
###Code
preds = recognizer.predict(db_validation["features"])
print(classification_report(db_validation["labels"], preds, target_names=db_validation["label_names"]))
model_output = 'machine_learning/svc_mobilenet.pickle'
labels_ouput = 'machine_learning/svc_labels.pickle'
# grabamos en disco nuestro modelo
f = open(model_output, "wb")
f.write(pickle.dumps(recognizer))
f.close()
# write the label encoder to disk
f = open(labels_ouput, "wb")
f.write(pickle.dumps(le))
f.close()
###Output
_____no_output_____ |
experiments/tl_3v2/filter/oracle.run1.framed-cores/trials/2/trial.ipynb | ###Markdown
Transfer Learning Template
###Code
%load_ext autoreload
%autoreload 2
%matplotlib inline
import os, json, sys, time, random
import numpy as np
import torch
from torch.optim import Adam
from easydict import EasyDict
import matplotlib.pyplot as plt
from steves_models.steves_ptn import Steves_Prototypical_Network
from steves_utils.lazy_iterable_wrapper import Lazy_Iterable_Wrapper
from steves_utils.iterable_aggregator import Iterable_Aggregator
from steves_utils.ptn_train_eval_test_jig import PTN_Train_Eval_Test_Jig
from steves_utils.torch_sequential_builder import build_sequential
from steves_utils.torch_utils import get_dataset_metrics, ptn_confusion_by_domain_over_dataloader
from steves_utils.utils_v2 import (per_domain_accuracy_from_confusion, get_datasets_base_path)
from steves_utils.PTN.utils import independent_accuracy_assesment
from torch.utils.data import DataLoader
from steves_utils.stratified_dataset.episodic_accessor import Episodic_Accessor_Factory
from steves_utils.ptn_do_report import (
get_loss_curve,
get_results_table,
get_parameters_table,
get_domain_accuracies,
)
from steves_utils.transforms import get_chained_transform
###Output
_____no_output_____
###Markdown
Allowed ParametersThese are allowed parameters, not defaultsEach of these values need to be present in the injected parameters (the notebook will raise an exception if they are not present)Papermill uses the cell tag "parameters" to inject the real parameters below this cell.Enable tags to see what I mean
###Code
required_parameters = {
"experiment_name",
"lr",
"device",
"seed",
"dataset_seed",
"n_shot",
"n_query",
"n_way",
"train_k_factor",
"val_k_factor",
"test_k_factor",
"n_epoch",
"patience",
"criteria_for_best",
"x_net",
"datasets",
"torch_default_dtype",
"NUM_LOGS_PER_EPOCH",
"BEST_MODEL_PATH",
"x_shape",
}
from steves_utils.CORES.utils import (
ALL_NODES,
ALL_NODES_MINIMUM_1000_EXAMPLES,
ALL_DAYS
)
from steves_utils.ORACLE.utils_v2 import (
ALL_DISTANCES_FEET_NARROWED,
ALL_RUNS,
ALL_SERIAL_NUMBERS,
)
standalone_parameters = {}
standalone_parameters["experiment_name"] = "STANDALONE PTN"
standalone_parameters["lr"] = 0.001
standalone_parameters["device"] = "cuda"
standalone_parameters["seed"] = 1337
standalone_parameters["dataset_seed"] = 1337
standalone_parameters["n_way"] = 8
standalone_parameters["n_shot"] = 3
standalone_parameters["n_query"] = 2
standalone_parameters["train_k_factor"] = 1
standalone_parameters["val_k_factor"] = 2
standalone_parameters["test_k_factor"] = 2
standalone_parameters["n_epoch"] = 50
standalone_parameters["patience"] = 10
standalone_parameters["criteria_for_best"] = "source_loss"
standalone_parameters["datasets"] = [
{
"labels": ALL_SERIAL_NUMBERS,
"domains": ALL_DISTANCES_FEET_NARROWED,
"num_examples_per_domain_per_label": 100,
"pickle_path": os.path.join(get_datasets_base_path(), "oracle.Run1_framed_2000Examples_stratified_ds.2022A.pkl"),
"source_or_target_dataset": "source",
"x_transforms": ["unit_mag", "minus_two"],
"episode_transforms": [],
"domain_prefix": "ORACLE_"
},
{
"labels": ALL_NODES,
"domains": ALL_DAYS,
"num_examples_per_domain_per_label": 100,
"pickle_path": os.path.join(get_datasets_base_path(), "cores.stratified_ds.2022A.pkl"),
"source_or_target_dataset": "target",
"x_transforms": ["unit_power", "times_zero"],
"episode_transforms": [],
"domain_prefix": "CORES_"
}
]
standalone_parameters["torch_default_dtype"] = "torch.float32"
standalone_parameters["x_net"] = [
{"class": "nnReshape", "kargs": {"shape":[-1, 1, 2, 256]}},
{"class": "Conv2d", "kargs": { "in_channels":1, "out_channels":256, "kernel_size":(1,7), "bias":False, "padding":(0,3), },},
{"class": "ReLU", "kargs": {"inplace": True}},
{"class": "BatchNorm2d", "kargs": {"num_features":256}},
{"class": "Conv2d", "kargs": { "in_channels":256, "out_channels":80, "kernel_size":(2,7), "bias":True, "padding":(0,3), },},
{"class": "ReLU", "kargs": {"inplace": True}},
{"class": "BatchNorm2d", "kargs": {"num_features":80}},
{"class": "Flatten", "kargs": {}},
{"class": "Linear", "kargs": {"in_features": 80*256, "out_features": 256}}, # 80 units per IQ pair
{"class": "ReLU", "kargs": {"inplace": True}},
{"class": "BatchNorm1d", "kargs": {"num_features":256}},
{"class": "Linear", "kargs": {"in_features": 256, "out_features": 256}},
]
# Parameters relevant to results
# These parameters will basically never need to change
standalone_parameters["NUM_LOGS_PER_EPOCH"] = 10
standalone_parameters["BEST_MODEL_PATH"] = "./best_model.pth"
# Parameters
parameters = {
"experiment_name": "tl_3-filterv2:oracle.run1.framed -> cores",
"device": "cuda",
"lr": 0.0001,
"x_shape": [2, 200],
"n_shot": 3,
"n_query": 2,
"train_k_factor": 3,
"val_k_factor": 2,
"test_k_factor": 2,
"torch_default_dtype": "torch.float32",
"n_epoch": 50,
"patience": 3,
"criteria_for_best": "target_accuracy",
"x_net": [
{"class": "nnReshape", "kargs": {"shape": [-1, 1, 2, 200]}},
{
"class": "Conv2d",
"kargs": {
"in_channels": 1,
"out_channels": 256,
"kernel_size": [1, 7],
"bias": False,
"padding": [0, 3],
},
},
{"class": "ReLU", "kargs": {"inplace": True}},
{"class": "BatchNorm2d", "kargs": {"num_features": 256}},
{
"class": "Conv2d",
"kargs": {
"in_channels": 256,
"out_channels": 80,
"kernel_size": [2, 7],
"bias": True,
"padding": [0, 3],
},
},
{"class": "ReLU", "kargs": {"inplace": True}},
{"class": "BatchNorm2d", "kargs": {"num_features": 80}},
{"class": "Flatten", "kargs": {}},
{"class": "Linear", "kargs": {"in_features": 16000, "out_features": 256}},
{"class": "ReLU", "kargs": {"inplace": True}},
{"class": "BatchNorm1d", "kargs": {"num_features": 256}},
{"class": "Linear", "kargs": {"in_features": 256, "out_features": 256}},
],
"NUM_LOGS_PER_EPOCH": 10,
"BEST_MODEL_PATH": "./best_model.pth",
"n_way": 16,
"datasets": [
{
"labels": [
"1-10.",
"1-11.",
"1-15.",
"1-16.",
"1-17.",
"1-18.",
"1-19.",
"10-4.",
"10-7.",
"11-1.",
"11-14.",
"11-17.",
"11-20.",
"11-7.",
"13-20.",
"13-8.",
"14-10.",
"14-11.",
"14-14.",
"14-7.",
"15-1.",
"15-20.",
"16-1.",
"16-16.",
"17-10.",
"17-11.",
"17-2.",
"19-1.",
"19-16.",
"19-19.",
"19-20.",
"19-3.",
"2-10.",
"2-11.",
"2-17.",
"2-18.",
"2-20.",
"2-3.",
"2-4.",
"2-5.",
"2-6.",
"2-7.",
"2-8.",
"3-13.",
"3-18.",
"3-3.",
"4-1.",
"4-10.",
"4-11.",
"4-19.",
"5-5.",
"6-15.",
"7-10.",
"7-14.",
"8-18.",
"8-20.",
"8-3.",
"8-8.",
],
"domains": [1, 2, 3, 4, 5],
"num_examples_per_domain_per_label": -1,
"pickle_path": "/mnt/wd500GB/CSC500/csc500-main/datasets/cores.stratified_ds.2022A.pkl",
"source_or_target_dataset": "target",
"x_transforms": ["lowpass_+/-10MHz", "take_200"],
"episode_transforms": [],
"domain_prefix": "C_",
},
{
"labels": [
"3123D52",
"3123D65",
"3123D79",
"3123D80",
"3123D54",
"3123D70",
"3123D7B",
"3123D89",
"3123D58",
"3123D76",
"3123D7D",
"3123EFE",
"3123D64",
"3123D78",
"3123D7E",
"3124E4A",
],
"domains": [32, 38, 8, 44, 14, 50, 20, 26],
"num_examples_per_domain_per_label": 2000,
"pickle_path": "/mnt/wd500GB/CSC500/csc500-main/datasets/oracle.Run1_framed_2000Examples_stratified_ds.2022A.pkl",
"source_or_target_dataset": "source",
"x_transforms": ["take_200", "resample_20Msps_to_25Msps"],
"episode_transforms": [],
"domain_prefix": "O_",
},
],
"seed": 1337,
"dataset_seed": 1337,
}
# Set this to True if you want to run this template directly
STANDALONE = False
if STANDALONE:
print("parameters not injected, running with standalone_parameters")
parameters = standalone_parameters
if not 'parameters' in locals() and not 'parameters' in globals():
raise Exception("Parameter injection failed")
#Use an easy dict for all the parameters
p = EasyDict(parameters)
if "x_shape" not in p:
p.x_shape = [2,256] # Default to this if we dont supply x_shape
supplied_keys = set(p.keys())
if supplied_keys != required_parameters:
print("Parameters are incorrect")
if len(supplied_keys - required_parameters)>0: print("Shouldn't have:", str(supplied_keys - required_parameters))
if len(required_parameters - supplied_keys)>0: print("Need to have:", str(required_parameters - supplied_keys))
raise RuntimeError("Parameters are incorrect")
###################################
# Set the RNGs and make it all deterministic
###################################
np.random.seed(p.seed)
random.seed(p.seed)
torch.manual_seed(p.seed)
torch.use_deterministic_algorithms(True)
###########################################
# The stratified datasets honor this
###########################################
torch.set_default_dtype(eval(p.torch_default_dtype))
###################################
# Build the network(s)
# Note: It's critical to do this AFTER setting the RNG
###################################
x_net = build_sequential(p.x_net)
start_time_secs = time.time()
p.domains_source = []
p.domains_target = []
train_original_source = []
val_original_source = []
test_original_source = []
train_original_target = []
val_original_target = []
test_original_target = []
# global_x_transform_func = lambda x: normalize(x.to(torch.get_default_dtype()), "unit_power") # unit_power, unit_mag
# global_x_transform_func = lambda x: normalize(x, "unit_power") # unit_power, unit_mag
def add_dataset(
labels,
domains,
pickle_path,
x_transforms,
episode_transforms,
domain_prefix,
num_examples_per_domain_per_label,
source_or_target_dataset:str,
iterator_seed=p.seed,
dataset_seed=p.dataset_seed,
n_shot=p.n_shot,
n_way=p.n_way,
n_query=p.n_query,
train_val_test_k_factors=(p.train_k_factor,p.val_k_factor,p.test_k_factor),
):
if x_transforms == []: x_transform = None
else: x_transform = get_chained_transform(x_transforms)
if episode_transforms == []: episode_transform = None
else: raise Exception("episode_transforms not implemented")
episode_transform = lambda tup, _prefix=domain_prefix: (_prefix + str(tup[0]), tup[1])
eaf = Episodic_Accessor_Factory(
labels=labels,
domains=domains,
num_examples_per_domain_per_label=num_examples_per_domain_per_label,
iterator_seed=iterator_seed,
dataset_seed=dataset_seed,
n_shot=n_shot,
n_way=n_way,
n_query=n_query,
train_val_test_k_factors=train_val_test_k_factors,
pickle_path=pickle_path,
x_transform_func=x_transform,
)
train, val, test = eaf.get_train(), eaf.get_val(), eaf.get_test()
train = Lazy_Iterable_Wrapper(train, episode_transform)
val = Lazy_Iterable_Wrapper(val, episode_transform)
test = Lazy_Iterable_Wrapper(test, episode_transform)
if source_or_target_dataset=="source":
train_original_source.append(train)
val_original_source.append(val)
test_original_source.append(test)
p.domains_source.extend(
[domain_prefix + str(u) for u in domains]
)
elif source_or_target_dataset=="target":
train_original_target.append(train)
val_original_target.append(val)
test_original_target.append(test)
p.domains_target.extend(
[domain_prefix + str(u) for u in domains]
)
else:
raise Exception(f"invalid source_or_target_dataset: {source_or_target_dataset}")
for ds in p.datasets:
add_dataset(**ds)
# from steves_utils.CORES.utils import (
# ALL_NODES,
# ALL_NODES_MINIMUM_1000_EXAMPLES,
# ALL_DAYS
# )
# add_dataset(
# labels=ALL_NODES,
# domains = ALL_DAYS,
# num_examples_per_domain_per_label=100,
# pickle_path=os.path.join(get_datasets_base_path(), "cores.stratified_ds.2022A.pkl"),
# source_or_target_dataset="target",
# x_transform_func=global_x_transform_func,
# domain_modifier=lambda u: f"cores_{u}"
# )
# from steves_utils.ORACLE.utils_v2 import (
# ALL_DISTANCES_FEET,
# ALL_RUNS,
# ALL_SERIAL_NUMBERS,
# )
# add_dataset(
# labels=ALL_SERIAL_NUMBERS,
# domains = list(set(ALL_DISTANCES_FEET) - {2,62}),
# num_examples_per_domain_per_label=100,
# pickle_path=os.path.join(get_datasets_base_path(), "oracle.Run2_framed_2000Examples_stratified_ds.2022A.pkl"),
# source_or_target_dataset="source",
# x_transform_func=global_x_transform_func,
# domain_modifier=lambda u: f"oracle1_{u}"
# )
# from steves_utils.ORACLE.utils_v2 import (
# ALL_DISTANCES_FEET,
# ALL_RUNS,
# ALL_SERIAL_NUMBERS,
# )
# add_dataset(
# labels=ALL_SERIAL_NUMBERS,
# domains = list(set(ALL_DISTANCES_FEET) - {2,62,56}),
# num_examples_per_domain_per_label=100,
# pickle_path=os.path.join(get_datasets_base_path(), "oracle.Run2_framed_2000Examples_stratified_ds.2022A.pkl"),
# source_or_target_dataset="source",
# x_transform_func=global_x_transform_func,
# domain_modifier=lambda u: f"oracle2_{u}"
# )
# add_dataset(
# labels=list(range(19)),
# domains = [0,1,2],
# num_examples_per_domain_per_label=100,
# pickle_path=os.path.join(get_datasets_base_path(), "metehan.stratified_ds.2022A.pkl"),
# source_or_target_dataset="target",
# x_transform_func=global_x_transform_func,
# domain_modifier=lambda u: f"met_{u}"
# )
# # from steves_utils.wisig.utils import (
# # ALL_NODES_MINIMUM_100_EXAMPLES,
# # ALL_NODES_MINIMUM_500_EXAMPLES,
# # ALL_NODES_MINIMUM_1000_EXAMPLES,
# # ALL_DAYS
# # )
# import steves_utils.wisig.utils as wisig
# add_dataset(
# labels=wisig.ALL_NODES_MINIMUM_100_EXAMPLES,
# domains = wisig.ALL_DAYS,
# num_examples_per_domain_per_label=100,
# pickle_path=os.path.join(get_datasets_base_path(), "wisig.node3-19.stratified_ds.2022A.pkl"),
# source_or_target_dataset="target",
# x_transform_func=global_x_transform_func,
# domain_modifier=lambda u: f"wisig_{u}"
# )
###################################
# Build the dataset
###################################
train_original_source = Iterable_Aggregator(train_original_source, p.seed)
val_original_source = Iterable_Aggregator(val_original_source, p.seed)
test_original_source = Iterable_Aggregator(test_original_source, p.seed)
train_original_target = Iterable_Aggregator(train_original_target, p.seed)
val_original_target = Iterable_Aggregator(val_original_target, p.seed)
test_original_target = Iterable_Aggregator(test_original_target, p.seed)
# For CNN We only use X and Y. And we only train on the source.
# Properly form the data using a transform lambda and Lazy_Iterable_Wrapper. Finally wrap them in a dataloader
transform_lambda = lambda ex: ex[1] # Original is (<domain>, <episode>) so we strip down to episode only
train_processed_source = Lazy_Iterable_Wrapper(train_original_source, transform_lambda)
val_processed_source = Lazy_Iterable_Wrapper(val_original_source, transform_lambda)
test_processed_source = Lazy_Iterable_Wrapper(test_original_source, transform_lambda)
train_processed_target = Lazy_Iterable_Wrapper(train_original_target, transform_lambda)
val_processed_target = Lazy_Iterable_Wrapper(val_original_target, transform_lambda)
test_processed_target = Lazy_Iterable_Wrapper(test_original_target, transform_lambda)
datasets = EasyDict({
"source": {
"original": {"train":train_original_source, "val":val_original_source, "test":test_original_source},
"processed": {"train":train_processed_source, "val":val_processed_source, "test":test_processed_source}
},
"target": {
"original": {"train":train_original_target, "val":val_original_target, "test":test_original_target},
"processed": {"train":train_processed_target, "val":val_processed_target, "test":test_processed_target}
},
})
from steves_utils.transforms import get_average_magnitude, get_average_power
print(set([u for u,_ in val_original_source]))
print(set([u for u,_ in val_original_target]))
s_x, s_y, q_x, q_y, _ = next(iter(train_processed_source))
print(s_x)
# for ds in [
# train_processed_source,
# val_processed_source,
# test_processed_source,
# train_processed_target,
# val_processed_target,
# test_processed_target
# ]:
# for s_x, s_y, q_x, q_y, _ in ds:
# for X in (s_x, q_x):
# for x in X:
# assert np.isclose(get_average_magnitude(x.numpy()), 1.0)
# assert np.isclose(get_average_power(x.numpy()), 1.0)
###################################
# Build the model
###################################
# easfsl only wants a tuple for the shape
model = Steves_Prototypical_Network(x_net, device=p.device, x_shape=tuple(p.x_shape))
optimizer = Adam(params=model.parameters(), lr=p.lr)
###################################
# train
###################################
jig = PTN_Train_Eval_Test_Jig(model, p.BEST_MODEL_PATH, p.device)
jig.train(
train_iterable=datasets.source.processed.train,
source_val_iterable=datasets.source.processed.val,
target_val_iterable=datasets.target.processed.val,
num_epochs=p.n_epoch,
num_logs_per_epoch=p.NUM_LOGS_PER_EPOCH,
patience=p.patience,
optimizer=optimizer,
criteria_for_best=p.criteria_for_best,
)
total_experiment_time_secs = time.time() - start_time_secs
###################################
# Evaluate the model
###################################
source_test_label_accuracy, source_test_label_loss = jig.test(datasets.source.processed.test)
target_test_label_accuracy, target_test_label_loss = jig.test(datasets.target.processed.test)
source_val_label_accuracy, source_val_label_loss = jig.test(datasets.source.processed.val)
target_val_label_accuracy, target_val_label_loss = jig.test(datasets.target.processed.val)
history = jig.get_history()
total_epochs_trained = len(history["epoch_indices"])
val_dl = Iterable_Aggregator((datasets.source.original.val,datasets.target.original.val))
confusion = ptn_confusion_by_domain_over_dataloader(model, p.device, val_dl)
per_domain_accuracy = per_domain_accuracy_from_confusion(confusion)
# Add a key to per_domain_accuracy for if it was a source domain
for domain, accuracy in per_domain_accuracy.items():
per_domain_accuracy[domain] = {
"accuracy": accuracy,
"source?": domain in p.domains_source
}
# Do an independent accuracy assesment JUST TO BE SURE!
# _source_test_label_accuracy = independent_accuracy_assesment(model, datasets.source.processed.test, p.device)
# _target_test_label_accuracy = independent_accuracy_assesment(model, datasets.target.processed.test, p.device)
# _source_val_label_accuracy = independent_accuracy_assesment(model, datasets.source.processed.val, p.device)
# _target_val_label_accuracy = independent_accuracy_assesment(model, datasets.target.processed.val, p.device)
# assert(_source_test_label_accuracy == source_test_label_accuracy)
# assert(_target_test_label_accuracy == target_test_label_accuracy)
# assert(_source_val_label_accuracy == source_val_label_accuracy)
# assert(_target_val_label_accuracy == target_val_label_accuracy)
experiment = {
"experiment_name": p.experiment_name,
"parameters": dict(p),
"results": {
"source_test_label_accuracy": source_test_label_accuracy,
"source_test_label_loss": source_test_label_loss,
"target_test_label_accuracy": target_test_label_accuracy,
"target_test_label_loss": target_test_label_loss,
"source_val_label_accuracy": source_val_label_accuracy,
"source_val_label_loss": source_val_label_loss,
"target_val_label_accuracy": target_val_label_accuracy,
"target_val_label_loss": target_val_label_loss,
"total_epochs_trained": total_epochs_trained,
"total_experiment_time_secs": total_experiment_time_secs,
"confusion": confusion,
"per_domain_accuracy": per_domain_accuracy,
},
"history": history,
"dataset_metrics": get_dataset_metrics(datasets, "ptn"),
}
ax = get_loss_curve(experiment)
plt.show()
get_results_table(experiment)
get_domain_accuracies(experiment)
print("Source Test Label Accuracy:", experiment["results"]["source_test_label_accuracy"], "Target Test Label Accuracy:", experiment["results"]["target_test_label_accuracy"])
print("Source Val Label Accuracy:", experiment["results"]["source_val_label_accuracy"], "Target Val Label Accuracy:", experiment["results"]["target_val_label_accuracy"])
json.dumps(experiment)
###Output
_____no_output_____ |
HCV/HCV.ipynb | ###Markdown
###Code
!wget 'https://archive.ics.uci.edu/ml/machine-learning-databases/00571/hcvdat0.csv'
# !pip install kmeans-smote
import pandas as pd
import numpy as np
dataset = pd.read_csv('/content/hcvdat0.csv')
dataset.head()
#check for missing values
dataset.isnull().sum()
# get missing values indexes for ALB, ALP, CHOL and PROT features
ALB_missing_indexes = dataset['ALB'].isnull().to_numpy().nonzero()
ALP_missing_indexes = dataset['ALP'].isnull().to_numpy().nonzero()
ALT_missing_indexes = dataset['ALT'].isnull().to_numpy().nonzero()
CHOL_missing_indexes = dataset['CHOL'].isnull().to_numpy().nonzero()
PROT_missing_indexes = dataset['PROT'].isnull().to_numpy().nonzero()
print('ALB missing values: \n', ALB_missing_indexes)
print('\nALP missing values: \n', ALP_missing_indexes)
print('\nALT missing values: \n', ALT_missing_indexes)
print('\nCHOL missing values: \n', CHOL_missing_indexes)
print('\nPROT missing values: \n', PROT_missing_indexes)
def missing_handle_mean(dataframe, column, indexes):
for index in indexes:
dataframe[column][index] = dataframe[column].mean()
return dataframe
def missing_handle_classmean(dataframe, column, indexes):
for index in indexes:
missing_value_class_values = []
index_label = dataframe['Category'][index]
for row_index, row in dataset.iterrows():
if str(index_label) == str(row['Category']):
missing_value_class_values.append( dataframe[column][row_index] )
cleanedList = [x for x in missing_value_class_values if str(x) != 'nan']
dataframe[column][index] = np.mean(cleanedList)
return dataframe
def preprocessing(df, missing_handler=None):
for row_index, row in df.iterrows():
# Sex Handler
if row['Sex'] == 'm':
df['Sex'][row_index] = 1
elif row['Sex'] == 'f':
df['Sex'][row_index] = 2
else:
pass
# Category to numerical
if row['Category'] == '0=Blood Donor':
df['Category'][row_index] = 0
elif row['Category'] == '0s=suspect Blood Donor':
df['Category'][row_index] = 1
elif row['Category'] == '1=Hepatitis':
df['Category'][row_index] = 2
elif row['Category'] == '2=Fibrosis':
df['Category'][row_index] = 3
elif row['Category'] == '3=Cirrhosis':
df['Category'][row_index] = 4
else:
pass
if missing_handler == 'Column_Mean':
df = missing_handle_mean(df, 'ALB', ALB_missing_indexes[0])
df = missing_handle_mean(df, 'ALP', ALP_missing_indexes[0])
df = missing_handle_mean(df, 'ALT', ALT_missing_indexes[0])
df = missing_handle_mean(df, 'CHOL', CHOL_missing_indexes[0])
df = missing_handle_mean(df, 'PROT', PROT_missing_indexes[0])
elif missing_handler == 'Class_Mean':
df = missing_handle_classmean(df, 'ALB', ALB_missing_indexes[0])
df = missing_handle_classmean(df, 'ALP', ALP_missing_indexes[0])
df = missing_handle_classmean(df, 'ALT', ALT_missing_indexes[0])
df = missing_handle_classmean(df, 'CHOL', CHOL_missing_indexes[0])
df = missing_handle_classmean(df, 'PROT', PROT_missing_indexes[0])
else:
pass
return df
dataset = preprocessing(dataset, missing_handler='Class_Mean')
# checking fo missing values after preprocessing
dataset.isnull().sum()
def split_dataset(df, train_percentage=0.8, test_percentage=0.2):
train = df.sample(frac=(1-test_percentage),random_state=200) #random state is a seed value
test = df.drop(train.index)
return train, test
trainset, testset = split_dataset(dataset, 0.8, 0.2)
from sklearn.preprocessing import LabelBinarizer
from scipy import sparse
# dataframe to numpy array
x_train = trainset.drop(['Category', 'Unnamed: 0'], axis = 1).values
y_train = trainset.loc[:, 'Category'].values.astype('int')
x_test = testset.drop(['Category', 'Unnamed: 0'], axis = 1).values
y_test = testset.loc[:, 'Category'].values.astype('int')
print('x_train shapes: ', x_train.shape)
print('y_train shapes: ', y_train.shape)
print('x_test shapes: ', x_test.shape)
print('x_test shapes: ', x_test.shape)
print('number of <Blood Donor> samples: ', np.count_nonzero(y_train == 0))
print('number of <suspect Blood Donor> samples in 2th class: ', np.count_nonzero(y_train == 1))
print('number of <Hepatitis> samples in 2th class: ', np.count_nonzero(y_train == 2))
print('number of <Cirrhosis> samples in 2th class: ', np.count_nonzero(y_train == 3))
# handle imbalanced data
from kmeans_smote import KMeansSMOTE
kmeans_smote = KMeansSMOTE(
kmeans_args={
'n_clusters': 100
},
smote_args={
'k_neighbors': 5
}
)
X_train_resampled, y_train_resampled = kmeans_smote.fit_sample(x_train, y_train)
print('new msamples added, x_train shape : ', X_train_resampled.shape)
print('new msamples added, y_train shape : ', y_train_resampled.shape)
print('number of <Blood Donor> samples: ', np.count_nonzero(y_train_resampled == 0))
print('number of <suspect Blood Donor> samples: ', np.count_nonzero(y_train_resampled == 1))
print('number of <Hepatitis> samples: ', np.count_nonzero(y_train_resampled == 2))
print('number of <Cirrhosis> samples: ', np.count_nonzero(y_train_resampled == 3))
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
clf = SVC(kernel='linear', decision_function_shape='ovr')
clf.fit(X_train_resampled, y_train_resampled)
from sklearn.metrics import confusion_matrix
y_pred = clf.predict(x_test)
print(y_pred)
print(y_test)
confusion_matrix(y_test, y_pred)
clf.score(x_test, y_test)
###Output
_____no_output_____ |
Vgg19_BINARY.ipynb | ###Markdown
PREPARING DATASET
###Code
import glob
import numpy as np
import os
import shutil
import matplotlib.pyplot as plt
from keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array, array_to_img
%matplotlib inline
IMG_DIM = (150,150)
train_files = glob.glob('/content/drive/MyDrive/DATASET_BUS_BINARY/training_data/*')
train_imgs = [img_to_array(load_img(img, target_size=IMG_DIM)) for img in train_files]
train_imgs = np.array(train_imgs)
train_labels = [fn.split('/')[-1].split('.')[0].strip() for fn in train_files]
validation_files = glob.glob('/content/drive/MyDrive/DATASET_BUS_BINARY/validation_data/*')
validation_imgs = [img_to_array(load_img(img, target_size=IMG_DIM)) for img in validation_files]
validation_imgs = np.array(validation_imgs)
validation_labels = [fn.split('/')[-1].split('.')[0].strip() for fn in validation_files]
test_files = glob.glob('/content/drive/MyDrive/DATASET_BUS_BINARY/test_data/*')
test_imgs = [img_to_array(load_img(img, target_size=IMG_DIM)) for img in test_files]
test_imgs = np.array(test_imgs)
test_labels = [fn.split('/')[-1].split('.')[0].strip() for fn in test_files]
print('Train dataset shape:', train_imgs.shape,
'\tValidation dataset shape:', validation_imgs.shape,'\tTest dataset shape:', test_imgs.shape)
train_imgs_scaled = train_imgs.astype('float32')
validation_imgs_scaled = validation_imgs.astype('float32')
test_imgs_scaled =test_imgs.astype('float32')
train_imgs_scaled /= 255
validation_imgs_scaled /= 255
test_imgs_scaled /= 255
print(train_imgs[0].shape)
array_to_img(train_imgs[0])
print(train_imgs[6].shape)
array_to_img(train_imgs[6])
print(test_imgs[25].shape)
array_to_img(test_imgs[25])
batch_size = 30
num_classes = 2
epochs = 30
input_shape = (150, 150, 3)
# encode text category labels
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
le.fit(train_labels)
train_labels_enc = le.transform(train_labels)
validation_labels_enc = le.transform(validation_labels)
test_labels_enc = le.transform(test_labels)
print(train_labels[25:30], train_labels_enc[25:30], test_labels_enc[25:30])
train_datagen = ImageDataGenerator(rescale=1./255, zoom_range=0.3, rotation_range=20,
width_shift_range=0, height_shift_range=0, shear_range=0,
horizontal_flip=True, fill_mode='constant')
val_datagen = ImageDataGenerator(rescale=1./255)
test_datagen = ImageDataGenerator(rescale=1./255)
img_id = 6
bening_generator = train_datagen.flow(train_imgs[img_id:img_id+1], train_labels[img_id:img_id+1],
batch_size=1)
bening = [next(bening_generator) for i in range(0,5)]
fig, ax = plt.subplots(1,5, figsize=(16, 6))
print('Labels:', [item[1][0] for item in bening])
l = [ax[i].imshow(bening[i][0][0]) for i in range(0,5)]
img_id = 1500
malignant_generator = train_datagen.flow(train_imgs[img_id:img_id+1], train_labels[img_id:img_id+1],
batch_size=1)
malignant = [next(malignant_generator) for i in range(0,5)]
fig, ax = plt.subplots(1,5, figsize=(16, 6))
print('Labels:', [item[1][0] for item in malignant])
l = [ax[i].imshow(malignant[i][0][0]) for i in range(0,5)]
train_generator = train_datagen.flow(train_imgs, train_labels_enc, batch_size=30)
val_generator = val_datagen.flow(validation_imgs, validation_labels_enc, batch_size=20)
test_generator = test_datagen.flow(test_imgs, test_labels_enc, batch_size=20)
input_shape = (150, 150, 3)
TRAIN_STEPS_PER_EPOCH = np.ceil((len(train_imgs)/30)-1)
# to ensure that there are enough images for training bahch
VAL_STEPS_PER_EPOCH = np.ceil((len(validation_imgs)/20)-1)
print(TRAIN_STEPS_PER_EPOCH,VAL_STEPS_PER_EPOCH)
###Output
64.0 24.0
###Markdown
---------------------------------------------
###Code
import keras
from keras.models import Sequential
from keras import optimizers
from keras.preprocessing import image
##
from keras.applications import vgg19
from keras.models import Model
import pandas as pd
from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout, InputLayer, GlobalAveragePooling2D, Input
vgg = vgg19.VGG19(include_top=False, weights='imagenet',
input_shape=input_shape)
output = vgg.layers[-1].output
output = keras.layers.Flatten()(output)
vgg_model = Model(vgg.input, output)
vgg_model.trainable = True
set_trainable = False
for layer in vgg_model.layers:
if layer.name in ['block5_conv1']:
set_trainable = True
if set_trainable:
layer.trainable = True
else:
layer.trainable = False
layers = [(layer, layer.name, layer.trainable) for layer in vgg_model.layers]
pd.DataFrame(layers, columns=['Layer Type', 'Layer Name', 'Layer Trainable'])
###
for layer in vgg.layers:
layer.trainable = False
x = keras.layers.Flatten()(vgg.output)
x = keras.layers.Dense(512, activation='relu')(x)
x = keras.layers.Dense(256, activation='relu')(x)
x = keras.layers.Dense(128, activation='relu')(x)
predictions = keras.layers.Dense(1, activation='sigmoid')(x)
full_model = keras.models.Model(inputs=vgg.input, outputs=predictions)
full_model.summary()
import tensorflow_datasets as tfds
import pathlib
from matplotlib import pyplot
from keras.callbacks import EarlyStopping
from keras.callbacks import ModelCheckpoint
import time
from keras.models import load_model
from keras.optimizers import SGD
opt = SGD(lr=0.01)
full_model.compile(loss='binary_crossentropy',
optimizer= opt,
metrics=['accuracy'])
tic=time.time()
# training
filepath='/content/drive/MyDrive/MODELOS/vgg19_roi/weights-improvement-{epoch:02d}-{val_accuracy:.2f}.hdf5'
mc = ModelCheckpoint(filepath, monitor='val_accuracy', mode='max', verbose=1, save_best_only=True)
es = [mc]
history = full_model.fit(
train_generator, steps_per_epoch=TRAIN_STEPS_PER_EPOCH, epochs=50,
validation_data = val_generator, validation_steps=VAL_STEPS_PER_EPOCH, verbose=1,callbacks=[es]
)
print('Tiempo de procesamiento (secs): ', time.time()-tic)
plt.figure(1)
# summarize history for accuracy
plt.subplot(211)
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('Model Accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Training', 'Validation'], loc='lower right')
# summarize history for loss
plt.subplot(212)
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model Loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Training', 'Validation'], loc='upper right')
plt.tight_layout()
plt.show()
full_model.load_weights("/content/drive/MyDrive/MODELOS/vgg19_binary/weights-improvement-30-0.87.hdf5")
scores = full_model.evaluate(
test_imgs_scaled,
test_labels_enc,
batch_size=64,
verbose=0,
workers=1,
use_multiprocessing=False,
return_dict=False,
)
print("%s: %.2f%%" % (full_model.metrics_names[1], scores[1]*100))
###Output
accuracy: 86.30%
|
notebooks/tratamento_casos_por_regiao.ipynb | ###Markdown
Tratamento dos dados relacionados aos casos e óbitos de Hepatite no brasil por região Importações dos pacotes
###Code
import pandas as pd
from funcoes import limpa_casos_regiao, limpa_obitos
###Output
_____no_output_____
###Markdown
Bases de dados utilizadas [casos_hepA_por_regiao.csv](https://github.com/PedroHCAlmeida/projeto_um_mal_silencioso/blob/main/dados_brutos/casos_hepA_por_regiao.csv) : arquivo csv com os dados de casos e óbitos de Hepatite A por região entre 2001 e 2006[casos_hepB_por_regiao.csv](https://github.com/PedroHCAlmeida/projeto_um_mal_silencioso/blob/main/dados_brutos/casos_hepB_por_regiao.csv) : arquivo csv com os dados de casos e óbitos de Hepatite B por região entre 2001 e 2006[casos_hepC_por_regiao.csv](https://github.com/PedroHCAlmeida/projeto_um_mal_silencioso/blob/main/dados_brutos/casos_hepC_por_regiao.csv) : arquivo csv com os dados de casos e óbitos de Hepatite C por região entre 2001 e 2006[MonitoramentoHepatites-Norte.xls](https://github.com/PedroHCAlmeida/projeto_um_mal_silencioso/blob/main/dados_brutos/MonitoramentoHepatites-Norte.xls) : arquivo xls com várias tabelas relacionadas aos casos e óbitos de Hepatites da região Norte entre 2007 e 2019[MonitoramentoHepatites-Nordeste.xls](https://github.com/PedroHCAlmeida/projeto_um_mal_silencioso/blob/main/dados_brutos/MonitoramentoHepatites-Nordeste.xls) : arquivo xls com várias tabelas relacionadas aos casos e óbitos de Hepatites da região Nordeste entre 2007 e 2019[MonitoramentoHepatites-Sul.xls](https://github.com/PedroHCAlmeida/projeto_um_mal_silencioso/blob/main/dados_brutos/MonitoramentoHepatites-Sul.xls) : arquivo xls com várias tabelas relacionadas aos casos e óbitos de Hepatites da região Sul entre 2007 e 2019[MonitoramentoHepatites-Sudeste.xls](https://github.com/PedroHCAlmeida/projeto_um_mal_silencioso/blob/main/dados_brutos/MonitoramentoHepatites-Sudeste.xls) : arquivo xls com várias tabelas relacionadas aos casos e óbitos de Hepatites da região Sudeste entre 2007 e 2019[MonitoramentoHepatites-Centro-Oeste.xls](https://github.com/PedroHCAlmeida/projeto_um_mal_silencioso/blob/main/dados_brutos/MonitoramentoHepatites-Centro-Oeste.xls) : arquivo xls com várias tabelas relacionadas aos casos e óbitos de Hepatites da região Centro-Oeste entre 2007 e 2019[serie_2001_2020_TCU_populacao.xls](https://github.com/PedroHCAlmeida/projeto_um_mal_silencioso/blob/main/dados_brutos/serie_2001_2020_TCU_populacao.xls) : arquivo xls com dados com dados da estimativa da população brasileira feita pelo IBGE entre 2001 e 2020 Leitura dos dados
###Code
#Leitura dos dados relacionados aos casos por região de cada Hepatatite entre 2001 e 2006
casos_A = pd.read_csv('../dados_brutos/casos_hepA_por_regiao.csv', encoding='ISO-8859-1', skiprows=4,
skipfooter=13, sep=';', usecols=[0,1,2,3,4,5], engine='python', na_values='-')
casos_B = pd.read_csv('../dados_brutos/casos_hepB_por_regiao.csv', encoding='ISO-8859-1', skiprows=4,
skipfooter=13, sep=';', usecols=[0,1,2,3,4,5], engine='python', na_values='-')
casos_C = pd.read_csv('../dados_brutos/casos_hepC_por_regiao.csv', encoding='ISO-8859-1', skiprows=4,
skipfooter=13, sep=';', usecols=[0,1,2,3,4,5], engine='python', na_values='-')
#Leitura dos dados relacionados aos casos por Hepatite de cada região entre 2007 e 2019
casos_norte_recentes = pd.read_html('../dados_brutos/MonitoramentoHepatites-Norte.xls', thousands='.', na_values='-')
casos_nordeste_recentes = pd.read_html('../dados_brutos/MonitoramentoHepatites-Nordeste.xls', thousands='.', na_values='-')
casos_sudeste_recentes = pd.read_html('../dados_brutos/MonitoramentoHepatites-Sul.xls', thousands='.', na_values='-')
casos_sul_recentes = pd.read_html('../dados_brutos/MonitoramentoHepatites-Sudeste.xls', thousands='.', na_values='-')
casos_centro_recentes = pd.read_html('../dados_brutos/MonitoramentoHepatites-Centro-Oeste.xls', thousands='.', na_values='-')
#Leitura dos dados de estimativa populacional por estado e por ano e limitando entre 2001 e 2019
pop_2001_2019 = pd.read_excel('../dados_brutos/serie_2001_2020_TCU_populacao.xls', skiprows=4,
skipfooter=10, usecols=range(0,20))
###Output
_____no_output_____
###Markdown
Funções
###Code
help(limpa_casos_regiao)
help(limpa_obitos)
###Output
Help on function limpa_obitos in module funcoes:
limpa_obitos(dados: pandas.core.frame.DataFrame, nome_valores: str)
Função que recebe o DataFrame relacionado aos óbitos de cada virus e realiza algumas transformações:
Retira as colunas que agrupam diversos anos, coluna 'Total' e coluna '2000-2006'
Retira a palavra 'Hepatite' antes do vírus(A,B,C)
Renomeia a coluna Óbitos por virus
Retira os dados relacionados à Hepatite D
Transforma as colunas relacionadas aos Anos em apenas uma coluna('Ano') e atribui o 'nome_valores' como nome da coluna com os valores numéricos
Trasnforma o tipo de dados da coluna 'Ano' em inteiro e a coluna com os dados numéricos para float(podem haver valores nulos) a fim de evitar problemas e facilitar a manipulação
Parâmetros:
dados : DataFrame onde estão os dados mais recentes dos óbitos de hepatite, tipo : pd.DataFrame
hep : o nome que a coluna com os valores numéricos vai se referir, podendo ser a região do DataFrame ou a palavra 'Obitos' por exemplo, tipo : str
Retorno :
dados : retorna um Dataframe com os dados sobre os óbitos de hepatite A, B e C, tipo : pd.DataFrame
###Markdown
Tratando os dados
###Code
#Dividindo as tabelas entre hepatite A, B e C e óbitos por hepatite da região Norte entre 2007 e 2019
casos_norte_recentes_A = casos_norte_recentes[1]
casos_norte_recentes_B = casos_norte_recentes[3]
casos_norte_recentes_C = casos_norte_recentes[5]
obitos_norte = casos_norte_recentes[10]
#Dividindo as tabelas entre hepatite A, B e C e óbitos por hepatite da região Nordeste entre 2007 e 2019
casos_nordeste_recentes_A = casos_nordeste_recentes[1]
casos_nordeste_recentes_B = casos_nordeste_recentes[3]
casos_nordeste_recentes_C = casos_nordeste_recentes[5]
obitos_nordeste = casos_nordeste_recentes[10]
#Dividindo as tabelas entre hepatite A, B e C e óbitos por hepatite da região Sudeste entre 2007 e 2019
casos_sudeste_recentes_A = casos_sudeste_recentes[1]
casos_sudeste_recentes_B = casos_sudeste_recentes[3]
casos_sudeste_recentes_C = casos_sudeste_recentes[5]
obitos_sudeste = casos_sudeste_recentes[10]
#Dividindo as tabelas entre hepatite A, B e C e óbitos por hepatite da região Sul entre 2007 e 2019
casos_sul_recentes_A = casos_sul_recentes[1]
casos_sul_recentes_B = casos_sul_recentes[3]
casos_sul_recentes_C = casos_sul_recentes[5]
obitos_sul = casos_sul_recentes[10]
#Dividindo as tabelas entre hepatite A, B e C e óbitos por hepatite da região Centro-Oeste entre 2007 e 2019
casos_centro_recentes_A = casos_centro_recentes[1]
casos_centro_recentes_B = casos_centro_recentes[3]
casos_centro_recentes_C = casos_centro_recentes[5]
obitos_centro = casos_centro_recentes[10]
###Output
_____no_output_____
###Markdown
Agora que as tabelas estão divididas devidamente utilizarei a função 'limpa_casos_regiao' que irá realizar todas as transformações necessárias e unir os dados de Hepatite por região
###Code
casos_norte_recentes = limpa_casos_regiao(casos_norte_recentes_A, casos_norte_recentes_B, casos_norte_recentes_C, 'Norte')
casos_nordeste_recentes = limpa_casos_regiao(casos_nordeste_recentes_A, casos_nordeste_recentes_B, casos_nordeste_recentes_C, 'Nordeste')
casos_sudeste_recentes = limpa_casos_regiao(casos_sudeste_recentes_A, casos_sudeste_recentes_B, casos_sudeste_recentes_C, 'Sudeste')
casos_sul_recentes = limpa_casos_regiao(casos_sul_recentes_A, casos_sul_recentes_B, casos_sul_recentes_C, 'Sul')
casos_centro_recentes = limpa_casos_regiao(casos_centro_recentes_A, casos_centro_recentes_B, casos_centro_recentes_C, 'Centro-Oeste')
###Output
_____no_output_____
###Markdown
Agora que todas as tabelas relacionados os dados mais recentes estão no mesmo formato irei juntar todas em apenas uma tabela
###Code
casos_reg_recentes = pd.merge(casos_norte_recentes,
pd.merge(casos_nordeste_recentes,
pd.merge(casos_sudeste_recentes,
pd.merge(casos_sul_recentes,casos_centro_recentes,
on=['Ano', 'virus']),
on=['Ano', 'virus']),
on=['Ano', 'virus']),
on=['Ano', 'virus'])
casos_reg_recentes.head()
###Output
_____no_output_____
###Markdown
Agora que os dados recentes estão todos tratados vou colocar os dados mais antigos entre 2001 e 2006 no mesmo formato
###Code
#Renomenando as colunas
casos_A.columns = ['Ano', 'Norte', 'Nordeste', 'Sudeste', 'Sul', 'Centro-Oeste']
casos_B.columns = ['Ano', 'Norte', 'Nordeste', 'Sudeste', 'Sul', 'Centro-Oeste']
casos_C.columns = ['Ano', 'Norte', 'Nordeste', 'Sudeste', 'Sul', 'Centro-Oeste']
#Cirando a coluna 'virus' para indicar o vírus da Hepatite correspondente da tabela
casos_A['virus'] = 'A'
casos_B['virus'] = 'B'
casos_C['virus'] = 'C'
#Juntando todas as tabelas
casos_reg = casos_A.append(casos_B.append(casos_C))
#Ordenando as colunas para mesma ordem dos dados mais recentes
casos_reg = casos_reg[['Ano', 'virus', 'Norte', 'Nordeste', 'Sudeste', 'Sul', 'Centro-Oeste']]
casos_reg.head()
#Juntando os dados mais antigos com mais recentes
casos_reg = casos_reg.append(casos_reg_recentes)
#Transformando as colunas relacionadas às regiões em uma coluna só 'Regiao'
casos_reg = pd.melt(casos_reg , id_vars=['Ano', 'virus'], var_name='Regiao', value_name='Casos')
casos_reg
###Output
_____no_output_____
###Markdown
Agora irei usar a função 'limpa_obitos' para realizar as transformações nas bases de dados relacionadas aos óbitos por Hepatite de cada região
###Code
obitos_norte = limpa_obitos(obitos_norte, 'Norte')
obitos_nordeste = limpa_obitos(obitos_nordeste, 'Nordeste')
obitos_sudeste = limpa_obitos(obitos_sudeste, 'Sudeste')
obitos_sul = limpa_obitos(obitos_sul, 'Sul')
obitos_centro = limpa_obitos(obitos_centro, 'Centro-Oeste')
###Output
_____no_output_____
###Markdown
Agora vou juntar os dados relacionados aos óbitos em uma tabela só
###Code
obitos_reg = pd.merge(obitos_norte,
pd.merge(obitos_nordeste,
pd.merge(obitos_sudeste,
pd.merge(obitos_sul,obitos_centro,
on=['Ano', 'virus']),
on=['Ano', 'virus']),
on=['Ano', 'virus']),
on=['Ano', 'virus'])
#Transformando as colunas relacionadas às regiões em uma coluna só 'Regiao'
obitos_reg = pd.melt(obitos_reg , id_vars=['Ano', 'virus'], var_name='Regiao', value_name='Obitos')
obitos_reg
#Juntando os dados de casos e óbitos
dados_reg = pd.merge(casos_reg, obitos_reg, on=['Ano', 'virus', 'Regiao'], how='left')
dados_reg
###Output
_____no_output_____
###Markdown
Agora que os dados de casos e óbitos em apenas uma tabela vou tratar os dados relacionados à população para criar taxas relativas às populações
###Code
#Eliminando as linhas nulas dos dados de população
pop_2001_2019 = pop_2001_2019.dropna()
#Filtrando os dados onde a coluna 'Unidades da Federação' está relacionada à alguma região usando regular expressions
pop_2001_2019 = pop_2001_2019[pop_2001_2019['Unidades da Federação'].str.match(r'Região\s.*')]
#Renomenando a coluna 'Unidades da Federação' para 'Regiao'
pop_2001_2019 = pop_2001_2019.rename(columns={'Unidades da Federação':'Regiao'})
#Eliminando a Palavra 'Região' antes do nome da região
pop_2001_2019['Regiao'] = pop_2001_2019['Regiao'].str.replace(r'Região\s', '')
#Transformando as colunas relacionadas aos anos em uma coluna só 'Ano'
pop_2001_2019 = pd.melt(pop_2001_2019, 'Regiao', var_name='Ano', value_name='Pop')
#Tranformando à coluna 'Ano' para o tipo inteiro
pop_2001_2019['Ano'] = pop_2001_2019['Ano'].astype('int64')
pop_2001_2019
#Juntando os dados de casos e óbitos com dados de estimativa populacional
dados_reg = pd.merge(dados_reg, pop_2001_2019, on=['Regiao','Ano'])
#Criando a coluna 'taxa_incid_por100k' que indica a taxa de incidência das Hepatites a cada 100.000 habitantes
dados_reg['taxa_incid_por100k'] = dados_reg['Casos'] * 100000 / dados_reg['Pop']
#Criando a coluna 'taxa_obitos_por100k' que indica a taxa de óbitos por Hepatites a cada 100.000 habitantes
dados_reg['taxa_obitos_por100k'] = dados_reg['Obitos'] * 100000 / dados_reg['Pop']
dados_reg
#Removendo coluna 'Pop' pois não será mais utilizada
dados_reg = dados_reg.drop(columns='Pop')
#Salvando todos os dados na pasta 'dados_tratados'
dados_reg.set_index('Ano').to_csv('../dados_tratados/casos_obitos_por_regiao')
###Output
_____no_output_____ |
LinAl_006.ipynb | ###Markdown
Lecture 13 is a review for the 1st exam. Sort of wrap up. 5x3 U r = 3N(U) = {[0/0/0]}B = [U/2U] Reduced echelon form[U/0]r=3C = [U,U/U,0]-> [U,U/0,-U] - > [U,0/0,-U] -> [U,0/0,U]r = r(U)x2 = 6dim N(C.T) ?C - 10x6 =>dim N(C.T) = 10 - r = 6 Ax = [2/4/2]x=[2/0/0]+c[1/1/0]+d[0/0/1]A is 3x3r = 1dim N(A) = 2A = [1,?,?/2,?,?/1,?,?]A = [1,?,0/2,?,0/1,?,0]A = [1,-1,0/2,-2,0/1,-1,0]
###Code
A = np.array([[1,-1,0],
[2,-2,0],
[1,-1,0]])
b = np.array([2,4,2])
np.dot(A,[2,0,0])
np.dot(A,[1,1,0])
np.dot(A,[0,0,1])
###Output
_____no_output_____
###Markdown
Ax=b can be solved if b is in a columnspace. => b is a multiple of [1,2,1] If N(A) is 0-vector and A is a square matrix (m=n) = > N(A.T) is 0-vector as well. T or F B^2 = 0 => B=0 - Fe.g. B = [[0,1], [0,0]] T or F nxn independent columns. Ax=b is always solvable - Tthey form basis. Also A is invertible.
###Code
B = np.dot(np.array([[1,1,0],
[0,1,0],
[1,0,1]]),np.array([[1,0,-1,2],
[0,1,1,-1],
[0,0,0,0]]))
B.shape
###Output
_____no_output_____
###Markdown
N(CD) = N(D) if C is invertible.dim N(B) = 2 min 33
###Code
B
###Output
_____no_output_____ |
iot-temperature-to-aws.ipynb | ###Markdown
Sending raspberry pi data to AWS dynamodb table We use a raspberry to capture temperature data and send it to a dynamo DB table through AWS IOT gateway.- data is tranmitted in json to IOT gateway through MQTT protocol- IOT gateway listens to a specific MQTT topic and uses a rule to push part of the message to a dynamodDB table.- A separate jupyter notebook displays the data PrerequisiteAn AWS access and AWS CLI configured with sufficient credentials.See https://viastudio.com/creating-an-aws-dynamodb-table-from-the-command-line/ Create the dynamoDB table The structure of our tableOur table will have the following structure:- deviceid (primary partition key)- timestamp (primary sort key)- data (MQTT message sent by the IOT thing).We keep the default of Read and Write capacity units to the defalts (RCU: 5 WCU: 5) Get info about an existing table Our table was previously created with web console. We can get details with the ``` describe-table``` command
###Code
%%bash
source ~/awscli/bin/activate
aws dynamodb describe-table --table-name=iot_data2
###Output
TABLE 1528126779.189 262 arn:aws:dynamodb:eu-west-1:814098365754:table/iot_data2 c81fdaa8-f486-4ede-978f-e08e9a04c3ff iot_data2 34498 ACTIVE
ATTRIBUTEDEFINITIONS deviceid S
ATTRIBUTEDEFINITIONS timestamp S
KEYSCHEMA deviceid HASH
KEYSCHEMA timestamp RANGE
PROVISIONEDTHROUGHPUT 0 5 5
###Markdown
Table definition in json formatwe store a minimal definition in iot_data_table_def.json
###Code
%cat iot_data_table_def.json
###Output
{
"AttributeDefinitions": [
{
"AttributeName": "deviceid",
"AttributeType": "S"
},
{
"AttributeName": "timestamp",
"AttributeType": "S"
}
],
"TableName": "iot_data",
"KeySchema": [
{
"AttributeName": "deviceid",
"KeyType": "HASH"
},
{
"AttributeName": "timestamp",
"KeyType": "RANGE"
}
],
"ProvisionedThroughput": {
"WriteCapacityUnits": 5,
"ReadCapacityUnits": 5
}
}
###Markdown
Create the table in CLI
###Code
%%bash
source ~/awscli/bin/activate
aws dynamodb create-table --cli-input-json file://iot_data_table_def.json
###Output
TABLEDESCRIPTION 1529937491.346 0 arn:aws:dynamodb:eu-west-1:814098365754:table/iot_data ffd35ba0-35ab-46d3-8ec6-aebf73f35b38 iot_data 0 CREATING
ATTRIBUTEDEFINITIONS deviceid S
ATTRIBUTEDEFINITIONS timestamp S
KEYSCHEMA deviceid HASH
KEYSCHEMA timestamp RANGE
PROVISIONEDTHROUGHPUT 0 5 5
###Markdown
Grant access to the table to an IOT role We create an IOT role (the IOT gateway will later impersonate this role to get wreite access to dynamo db).A policy, that allows write on the dynamo table is then attached to this role. Create an IOT role (to impersonnate iot service)Role name: `iot_sensor_role`, its assumed policy is in `iot_sensor_role.json`
###Code
%cat iot_sensor_role.json
%%bash
source ~/awscli/bin/activate
aws iam create-role --role-name iot_sensor_role --assume-role-policy-document file://iot_sensor_role.json
###Output
ROLE arn:aws:iam::814098365754:role/iot_sensor_role 2018-06-25T15:47:59.316Z / AROAI7REHXSWLIMDGNMT4 iot_sensor_role
ASSUMEROLEPOLICYDOCUMENT 2012-10-17
STATEMENT sts:AssumeRole Allow
PRINCIPAL iot.amazonaws.com
###Markdown
Add a policy to allow table write accessThis policy grants write access to the dynamo db table. We use the table arn (account name followed by table name): `arn:aws:dynamodb:eu-west-1:814098365754:table/iot_data`.This arn has can be retrived from the table creation output.
###Code
%cat iot_table_write_policy.json
###Output
{
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "dynamodb:PutItem",
"Resource": "arn:aws:dynamodb:eu-west-1:814098365754:table/iot_data"
}
}
###Markdown
create the policy
###Code
%%bash
source ~/awscli/bin/activate
aws iam create-policy --policy-name iot_table_write_policy --policy-document file://iot_table_write_policy.json
###Output
POLICY arn:aws:iam::814098365754:policy/iot_table_write_policy 0 2018-06-25T15:55:53.578Z v1 True / ANPAJMEGK3TQUBEPC7QW4 iot_table_write_policy 2018-06-25T15:55:53.578Z
###Markdown
attach table write policy to iot_sensor_role
###Code
%%bash
source ~/awscli/bin/activate
aws iam attach-role-policy --role-name iot_sensor_role --policy-arn arn:aws:iam::814098365754:policy/iot_table_write_policy
###Output
_____no_output_____
###Markdown
Add the passrole permission
###Code
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "Stmt1",
"Effect": "Allow",
"Action": [
"iam:PassRole"
],
"Resource": [
"arn:aws:iam::814098365754:role/iot_sensor_role"
]
}
]
}
###Output
_____no_output_____
###Markdown
Register an IOT thing Register a thing on AWS iot, we name it `lab-temp-sensor-1`.
###Code
%%bash
source ~/awscli/bin/activate
aws iot create-thing --thing-name=lab-temp-sensor-1
###Output
arn:aws:iot:eu-west-1:814098365754:thing/lab-temp-sensor-1 1ad6c1b2-c315-46f5-b5a4-ab82b4c93112 lab-temp-sensor-1
###Markdown
Create the rule The rule pushes messages received on the /lab/temperature topic to the dynamo db table.Needs the correct role arn as well as table details and mapping.
###Code
#### List existing rules
{
"sql": "SELECT * FROM 'iot/test'",
"ruleDisabled": false,
"awsIotSqlVersion": "2016-03-23",
"actions": [{
"dynamoDB": {
"tableName": "my-dynamodb-table",
"roleArn": "arn:aws:iam::814098365754:role/iot_sensor_role",
"hashKeyField": "topic",
"hashKeyValue": "${topic(2)}",
"rangeKeyField": "timestamp",
"rangeKeyValue": "${timestamp()}"
}
}]
}
aws iot create-topic-rule --rule-name my-rule --topic-rule-payload file://my-rule.json
###Output
_____no_output_____ |
notebooks/03_MIMICII_Chest_XRay_Reports.ipynb | ###Markdown
Set up our MySQL connection with SQL Alchemy (this helps us to read directly into Pandas DataFrames
###Code
conn = pymysql.connect(host="35.233.174.193",port=3306,
user=getpass.getpass("Enter username for MIMIC2 database"),
passwd=getpass.getpass("Enter password for MIMIC2 database"),
db='mimic2')
iconn = get_mimic_connection()
###Output
_____no_output_____
###Markdown
Before we move ahead, we will do some counts of patients, admissions and notes to ensure connectivity and also get a sense of the dataset
###Code
display(pd.read_sql_query('SELECT count(*) as PatientCount from d_patients', conn))
display(pd.read_sql_query('SELECT count(*) as AdmissionCount from admissions', conn))
display(pd.read_sql_query('SELECT count(*) as NoteCount from noteevents', conn))
iconn.table("d_patients").count().execute()
###Output
_____no_output_____
###Markdown
MIMIC-II (and MIMIC-III) has tables for Admissions, ICD-9 codes, notes and many other pieces of data
###Code
display(pd.read_sql_query('SELECT * from admissions LIMIT 5', conn))
display(pd.read_sql_query('SELECT * from icd9 LIMIT 5', conn))
display(pd.read_sql_query('SELECT * from noteevents LIMIT 5', conn))
# now let's get a frame of Patient/Admit/Pneumonia
pneumonia_query = """
SELECT
a.subject_id
,a.hadm_id
,a.admit_dt
,(CASE WHEN pneu.HADM_ID IS NOT NULL THEN 1 ELSE 0 END) as Encounter_Pneumonia_Diagnosis
FROM admissions a
LEFT JOIN
(
SELECT
d.HADM_ID
FROM icd9 d
WHERE
(code like '486%%')
GROUP BY d.HADM_ID
) pneu
ON a.HADM_ID = pneu.HADM_ID
"""
pat_admit_pneumonia_df = pd.read_sql_query(pneumonia_query, conn)
display(pat_admit_pneumonia_df)
# let's get a count of how many PNEUMONIA vs NO-PNEUMONIA admits we have
pneumonia_admit_count_df = pat_admit_pneumonia_df.groupby('Encounter_Pneumonia_Diagnosis').size()
display(pneumonia_admit_count_df)
# before pulling note text, let's get a distribution of how many RADIOLOGY reports
# typically exist per admission
visit_rad_report_count_query = """
SELECT
n.hadm_id
,count(*) as rad_note_count
FROM d_patients p
INNER JOIN noteevents n
ON n.subject_id = p.subject_id
WHERE
Category = 'RADIOLOGY_REPORT'
AND (text like '%%CHEST (PORTABLE AP)%%' OR text like '%%CHEST (PA & LAT)%%')
AND n.hadm_id IS NOT NULL
GROUP BY n.hadm_id
ORDER BY count(*) DESC
"""
visit_rad_report_count_df = pd.read_sql_query(visit_rad_report_count_query, conn)
display(visit_rad_report_count_df.head(10))
###Output
_____no_output_____
###Markdown
Scipy had useful methods for describing distributions like our count of chest x-rays per encounter
###Code
rad_note_counts = visit_rad_report_count_df['rad_note_count'].values
scipy.stats.describe(rad_note_counts)
###Output
_____no_output_____
###Markdown
Notes in MIMIC have a category (e.g. "RADIOLOGY_REPORT") and within the text there are often "sub categories" on the second line of the file. Pulling the appropriate sub categories as a few "like" statements does the job, but it is worth looking at some of these on your own
###Code
# before pulling note text, let's get a distribution of how many RADIOLOGY reports
# typically exist per admission
visit_rad_report_count_query = """
SELECT
n.hadm_id
,count(*) as rad_note_count
FROM d_patients p
INNER JOIN noteevents n
ON n.subject_id = p.subject_id
WHERE
Category = 'RADIOLOGY_REPORT'
AND (text like '%%CHEST (PORTABLE AP)%%' OR text like '%%CHEST (PA & LAT)%%')
AND n.hadm_id IS NOT NULL
GROUP BY n.hadm_id
ORDER BY count(*) DESC
"""
visit_rad_report_count_df = pd.read_sql_query(visit_rad_report_count_query, conn)
display(visit_rad_report_count_df)
###Output
_____no_output_____
###Markdown
Some patients have only one radiology report but several have multiple. This graph looks at that distribution
###Code
rad_note_count_grouping = visit_rad_report_count_df.groupby('rad_note_count').size()
#display(rad_note_count_grouping)
note_count_bins = rad_note_count_grouping.index.values
#print(note_count_bins)
note_frequencies = rad_note_count_grouping.values
#print(note_frequencies)
fig = plt.figure(figsize=(16, 8))
plt.xlabel('Total Radiology Chest X-Ray Notes per visit')
plt.ylabel('Total Visits')
plt.bar(note_count_bins, note_frequencies)
###Output
_____no_output_____
###Markdown
We can then can pull these notes into a frame
###Code
# now let's pull a frame of all the FIRST (sorted by text which begins with date) CHEST X-RAY notes
chest_xray_note_query = """
SELECT
subject_id
,hadm_id
,LTRIM(RTRIM(text)) as text
FROM noteevents
WHERE category = 'RADIOLOGY_REPORT'
AND (text like '%%CHEST (PORTABLE AP)%%' OR text like '%%CHEST (PA & LAT)%%')
AND subject_id is not NULL
AND hadm_id is not NULL
GROUP BY subject_id, hadm_id, text
"""
chest_xray_note_df = pd.read_sql_query(chest_xray_note_query, conn)
display(chest_xray_note_df.head(10))
###Output
_____no_output_____
###Markdown
Much like a SQL "join" we can combine our frame which has ICD-9 codes with the frame that has notes so that we can sample from these intelligently
###Code
pneumonia_note_df = pd.merge(pat_admit_pneumonia_df, chest_xray_note_df, on = ['subject_id', 'hadm_id'])
display(pneumonia_note_df.head(20))
###Output
_____no_output_____
###Markdown
We sampled notes where the encounter was coded for Pneumonia (ICD-9 code 486.* ) and where it was not coded. We performed stratified sampling of one percentage of notes from one and the remainder from the other. We won't show that, but this is how we set up the group project
###Code
pneumonia_note_count_df = pneumonia_note_df.groupby('Encounter_Pneumonia_Diagnosis').size()
display(pneumonia_note_count_df)
# now let's list out some of the notes where Pneumonia WAS diagnosed
pneumonia_positive_notes = pneumonia_note_df[pneumonia_note_df['Encounter_Pneumonia_Diagnosis'] == 1]['text'].head(1).values
for note in pneumonia_positive_notes:
print(note)
#sys.stdout.write(note)
# now let's list out some of the notes where Pneumonia WAS diagnosed
pneumonia_negative_notes = pneumonia_note_df[pneumonia_note_df['Encounter_Pneumonia_Diagnosis'] == 0]['text'].head(1).values
with open("notes.txt", "w") as f0:
for note in pneumonia_negative_notes:
#print(note)
f0.write(note)
###Output
_____no_output_____
###Markdown
We can use a widgets to be able to drag back and forth between the set to display them easily
###Code
# This function let's us iterate through all documents and view the markup
def view_documents(reports):
@interact(i=ipywidgets.IntSlider(min=0, max=len(reports)-1))
def _view_document(i):
report_html = reports[i].replace('\n', '<br>')
display(HTML(report_html))
chest_xray_list = list(chest_xray_note_df['text'].values)
view_documents(chest_xray_list)
###Output
_____no_output_____
###Markdown
It be useful to use these chest x-ray radiology reports to get an idea of some of the language in these reports For example, let's look at what kinds of words and counts we see in this dataset
###Code
%%time
MAX_REPORTS_FOR_WORD_COUNT = 50
# let's start by collecting words from all texts
chest_xray_words = []
STOPWORDS= frozenset([w.upper() for w in STOPWORDS])
sampled_xray_list = chest_xray_list[:MAX_REPORTS_FOR_WORD_COUNT]
for text in sampled_xray_list:
words = TextBlob(text.lower()).words
# extend() adds all elements from another list
chest_xray_words.extend(words)
chest_xray_word_set = set(chest_xray_words)
print('Total unique words in Chest X-ray reports : {0}'.format(len(chest_xray_word_set)))
# and then we can see the most common words in this set of documents
Counter(chest_xray_words).most_common(30)
###Output
_____no_output_____ |
examples/active_light_cw_photonics.ipynb | ###Markdown
Table of Contents1 Flux on object1.1 Side by side comparison2 Signal2.1 IR Pulse2.2 Solar3 Noise3.1 Noise sources3.2 Noise comparison4 SNR5 All component
###Code
import numpy as np
import pandas as pd
import os
import sys
from IPython.display import display, HTML
from plotly.offline import init_notebook_mode, iplot
import plotly.graph_objs as go
init_notebook_mode(connected=True) # for Jupyter Lab notebook
CURRENT_DIR = os.path.dirname(os.getcwd())
sys.path.append(os.path.dirname(CURRENT_DIR+'/func'))
from func.photonic_func import Photonic
def overide_phototonic(photonic):
photonic.sensor_.loc['ToF_850','DkSig_e_s'] = 50000 # [e-/sec]
photonic.sensor_.loc['ToF_850', 'epi_thick_um'] = 9.5 # [um]
photonic.update_photonic()
return photonic
photonic = Photonic()
display(HTML(photonic.config_.to_html()))
display(HTML(photonic.sensor_.to_html()))
display(HTML(photonic.light_.to_html()))
display(HTML(photonic.op_.to_html()))
###Output
_____no_output_____
###Markdown
Flux on object Side by side comparison
###Code
# test_cfg = ['fake_tof_night_850','fake_tof_day_850',
# 'fake_tof_night_940','fake_tof_day_940',
# 'fake_tof_night_1375','fake_tof_day_1375',
# 'fake_tof_night_1550','fake_tof_day_1550']
# data = [0,1,2,3,4,5,6,7, 8, 9, 10, 11, 12, 13, 14, 15]
test_cfg = ['Lidar_2_axes', 'Lidar_2_axes_2']
data = len(test_cfg) * [None] * 2
for i, cfg in enumerate(test_cfg):
photonic = Photonic(config=cfg)
photonic = overide_phototonic(photonic)
dist_vec = np.array([0.5,1,2,5,10, 20, 50, 100, 200])
trace0 = go.Scatter(x=dist_vec,
y=1000 * photonic.wallFlux(dist_vec=dist_vec), # Select 'lines', 'markers' or 'lines+markers'
mode='lines+markers', name=cfg[9:]+' IR')
trace1 = go.Scatter(x=dist_vec,
y=1000 * photonic.wallFlux(dist_vec=dist_vec, light_type='solar'), mode='lines+markers', # Select 'lines', 'markers' or 'lines+markers'
name=(cfg[9:]+' Solar'))
data[2*i] = trace0
data[2*i+1] = trace1
layout = dict(title='Photonic simulation - Flux on wall - Fake ToF - Constrained to Same Illumination Power',
xaxis=dict(title='Wall Distance [m]', type='log'), # Select 'log' or 'linear'
yaxis=dict(title='Flux [mW/m**2]',type='log'), # Select 'log' or 'linear'
template='plotly_dark',
barmode='group',
hovermode='x')
iplot(dict(data=data, layout=layout))
###Output
_____no_output_____
###Markdown
Signal IR Pulse
###Code
test_cfg = ['fake_tof_night_850','fake_tof_day_850',
'fake_tof_night_940','fake_tof_day_940',
'fake_tof_night_1375','fake_tof_day_1375',
'fake_tof_night_1550','fake_tof_day_1550']
data = [0,1,2,3,4,5,6,7]
data2 = [0,1,2,3,4,5,6,7]
# test_cfg = ['fake_tof_day_940','fake_tof_day_1375']
# data = [0,1]
for i, cfg in enumerate(test_cfg):
photonic = Photonic(config=cfg)
photonic = overide_phototonic(photonic)
dist_vec = np.array([0.5,0.7,1,1.5,2,3,4,5,7,10])
signal = photonic.photoelectron2(dist_vec=dist_vec)
trace0 = go.Scatter(x=dist_vec, y=signal, # Select 'lines', 'markers' or 'lines+markers'
mode='lines+markers', name=cfg[9:])
data[i] = trace0
layout = dict(title='Photonic simulation - Signal',
xaxis=dict(title='Wall Distance [m]', type='log'), # Select 'log' or 'linear'
yaxis=dict(title='Noise [e-]',type='log'), # Select 'log' or 'linear'
template='plotly_dark',
barmode='group',
hovermode='x')
iplot(dict(data=data, layout=layout))
###Output
_____no_output_____
###Markdown
Solar
###Code
test_cfg = ['fake_tof_day_850','fake_tof_day_940','fake_tof_day_1375','fake_tof_day_1550']
data = [0,1,2,3]
# test_cfg = ['fake_tof_day_940','fake_tof_day_1375']
# data = [0,1]
for i, cfg in enumerate(test_cfg):
photonic = Photonic(config=cfg)
photonic = overide_phototonic(photonic)
dist_vec = np.array([0.5,0.7,1,1.5,2,3,4,5,7,10])
solar=photonic.photoelectron(
siliconFlux=photonic.siliconFlux(
wall_flux=photonic.wallFlux(dist_vec=dist_vec,light_type='solar')))
trace0 = go.Scatter(x=dist_vec, y=solar, # Select 'lines', 'markers' or 'lines+markers'
mode='lines+markers', name=cfg[9:])
data[i] = trace0
layout = dict(title='Photonic simulation - Solar Flux',
xaxis=dict(title='Wall Distance [m]', type='log'), # Select 'log' or 'linear'
yaxis=dict(title='Noise [e-]',type='log'), # Select 'log' or 'linear'
template='plotly_dark',
barmode='group',
hovermode='x')
iplot(dict(data=data, layout=layout))
photonic = Photonic()
solar_W_m2_um, wavelength_um = photonic.solarSpectrum_W_m2_um()
data = go.Scatter(x=wavelength_um, y=solar_W_m2_um, # Select 'lines', 'markers' or 'lines+markers'
mode='lines+markers')
layout = dict(title='Solar Spectrum',
xaxis=dict(title='Wavelength [µm]', type='linear'), # Select 'log' or 'linear'
yaxis=dict(title='Noise [e-]',type='linear'), # Select 'log' or 'linear'
template='plotly_dark',
barmode='group',
hovermode='x')
iplot(dict(data=data, layout=layout))
###Output
_____no_output_____
###Markdown
Noise Noise sources
###Code
# test_cfg = ['fake_tof_night_850','fake_tof_day_850',
# 'fake_tof_night_940','fake_tof_day_940',
# 'fake_tof_night_1375','fake_tof_day_1375',
# 'fake_tof_night_1550','fake_tof_day_1550']
# data = [0,1,2,3,4,5,6,7]
cfg = 'fake_tof_day_1375'
# data = [0]
# for i, cfg in enumerate(test_cfg):
photonic = Photonic(config=cfg)
photonic = overide_phototonic(photonic)
dist_vec = np.array([0.5,0.7,1,1.5,2,3,4,5,7,10])
signal, noise, SNR = photonic.signal_to_noise_ratio(dist_vec=dist_vec)
sz = len(noise['noise_photon_shot'])
trace0 = go.Scatter(x=dist_vec, y=noise['noise_photon_shot'], # Select 'lines', 'markers' or 'lines+markers'
mode='lines+markers', name='photon shot '+cfg[9:])
trace1 = go.Scatter(x=dist_vec, y=sz*[noise['dark_noise']], # Select 'lines', 'markers' or 'lines+markers'
mode='lines+markers', name='dark '+cfg[9:])
trace2 = go.Scatter(x=dist_vec, y=sz*[noise['kTC_noise']], # Select 'lines', 'markers' or 'lines+markers'
mode='lines+markers', name='kTC '+cfg[9:])
data = [trace0, trace1, trace2]
layout = dict(title='Photonic simulation - Major Noise Sources',
xaxis=dict(title='Wall Distance [m]', type='log'), # Select 'log' or 'linear'
yaxis=dict(title='Noise [e-]',type='log'), # Select 'log' or 'linear'
template='plotly_dark',
barmode='group',
hovermode='x')
iplot(dict(data=data, layout=layout))
###Output
_____no_output_____
###Markdown
Noise comparison
###Code
test_cfg = ['fake_tof_night_850','fake_tof_day_850',
'fake_tof_night_940','fake_tof_day_940',
'fake_tof_night_1375','fake_tof_day_1375',
'fake_tof_night_1550','fake_tof_day_1550']
data = [0,1,2,3,4,5,6,7]
data2 = [0,1,2,3,4,5,6,7]
# test_cfg = ['fake_tof_day_940','fake_tof_day_1375']
# data = [0,1]
for i, cfg in enumerate(test_cfg):
photonic = Photonic(config=cfg)
photonic = overide_phototonic(photonic)
dist_vec = np.array([0.5,0.7,1,1.5,2,3,4,5,7,10])
signal, noise, SNR = photonic.signal_to_noise_ratio(dist_vec=dist_vec)
trace0 = go.Scatter(x=dist_vec, y=sum(noise.values()), # Select 'lines', 'markers' or 'lines+markers'
mode='lines+markers', name=cfg[9:])
data[i] = trace0
trace1 = go.Scatter(x=dist_vec, y=SNR, # Select 'lines', 'markers' or 'lines+markers'
mode='lines+markers', name=cfg[9:])
data2[i] = trace1
trace20 = go.Scatter(x=dist_vec, y=10*[5], # Select 'lines', 'markers' or 'lines+markers'
mode='lines', name='Low SNR limit for ToF')
data2.append(trace20)
layout = dict(title='Photonic simulation - Summed Noise',
xaxis=dict(title='Wall Distance [m]', type='log'), # Select 'log' or 'linear'
yaxis=dict(title='Noise [e-]',type='log'), # Select 'log' or 'linear'
template='plotly_dark',
barmode='group',
hovermode='x')
iplot(dict(data=data, layout=layout))
###Output
_____no_output_____
###Markdown
SNR
###Code
layout = dict(title='Photonic simulation - SNR',
xaxis=dict(title='Wall Distance [m]', type='log'), # Select 'log' or 'linear'
yaxis=dict(title='SNR',type='log'), # Select 'log' or 'linear'
template='plotly_dark',
barmode='group',
hovermode='x')
iplot(dict(data=data2, layout=layout))
###Output
_____no_output_____
###Markdown
All component
###Code
photonic = Photonic(config='fake_tof_day_1375')
photonic = overide_phototonic(photonic)
dist_vec = np.array([0.5,0.7,1,1.5,2,3,4,5,7,10])
trace0 = go.Scatter(x=dist_vec,
y=1000 * photonic.wallFlux(dist_vec=dist_vec), mode='lines+markers', # Select 'lines', 'markers' or 'lines+markers'
name='wallFlux [W/m^2]')
trace01 = go.Scatter(x=dist_vec,
y=1000 * photonic.wallFlux(dist_vec=dist_vec, light_type='solar'), mode='lines+markers', # Select 'lines', 'markers' or 'lines+markers'
name='Solar wallFlux [W/m^2]')
trace1 = go.Scatter(x=dist_vec,
y=1000 * photonic.siliconFlux2(dist_vec=dist_vec), mode='lines+markers',
name='siliconFlux [W/m^2]')
trace2 = go.Scatter(x=dist_vec,
y=photonic.photoelectron2(dist_vec=dist_vec), mode='lines+markers',
name='photoelectrons / frame')
signal, noise, SNR = photonic.signal_to_noise_ratio(dist_vec=dist_vec)
trace3 = go.Scatter(x=dist_vec,
y=SNR, mode='lines+markers',
name='SNR')
trace4 = go.Scatter(x=[0.5],
y=[0.2], mode='text', textposition='top right',
name='text', text=['Photonic simulation of light created by a light source attached to a camera<br>'
+'1. Flux is calculated on a wall at a certain distance assuming CW lighting mode<br>'
+'2. Flux on the focal plane of the silicon sensor as imaged from the wall thru the lens<br>'
+'3. Photoelectrons per a single burst collect in the photodiode of the pixel'])
data = [trace0, trace01, trace1, trace2, trace3, trace4]
layout = dict(title='Photonic simulation - Flux on wall/sensor, PE count, SNR (fake_tof_day_1375)' ,
xaxis=dict(title='Wall Distance [m]', type='log'), # Select 'log' or 'linear'
yaxis=dict(title='',type='log'), # Select 'log' or 'linear'
template='plotly_dark',
barmode='group',
hovermode='x')
iplot(dict(data=data, layout=layout))
###Output
_____no_output_____ |
Homework/10 AutoEncoders/628_HW_10_part_1_.ipynb | ###Markdown
Question 1. Use the Python Code to test MNIST image data: "EE628A_autoEncoder_demo.py" Calculate the difference between the decoded images and the original images. Sort the difference values and find the top-10 images with the most autoencoder errors. Show the top-10 images and comment on your findings (see if the images are really different from the most of the rest).
###Code
import tensorflow as tf
print(tf.__version__)
from keras.layers import Input, Dense
from keras.models import Model
# this is the size of our encoded representations
encoding_dim = 32 # 32 floats -> compression of factor 24.5, assuming the input is 784 floats
# this is our input placeholder
input_img = Input(shape=(784,))
# "encoded" is the encoded representation of the input
encoded = Dense(encoding_dim, activation='relu')(input_img)
# "decoded" is the lossy reconstruction of the input
decoded = Dense(784, activation='sigmoid')(encoded)
# this model maps an input to its reconstruction
autoencoder = Model(input_img, decoded)
# this model maps an input to its encoded representation
encoder = Model(input_img, encoded)
# create a placeholder for an encoded (32-dimensional) input
encoded_input = Input(shape=(encoding_dim,))
# retrieve the last layer of the autoencoder model
decoder_layer = autoencoder.layers[-1]
# create the decoder model
decoder = Model(encoded_input, decoder_layer(encoded_input))
autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')
from keras.datasets import mnist
import numpy as np
(x_train, _), (x_test, _) = mnist.load_data()
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))
x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:])))
print (x_train.shape)
print (x_test.shape)
autoencoder.fit(x_train, x_train,
epochs=50,
batch_size=256,
shuffle=True,
validation_data=(x_test, x_test))
# encode and decode some digits
# note that we take them from the *test* set
encoded_imgs = encoder.predict(x_test)
decoded_imgs = decoder.predict(encoded_imgs)
# use Matplotlib (don't ask)
import matplotlib.pyplot as plt
n = 10 # how many digits we will display
plt.figure(figsize=(20, 4))
for i in range(n):
# display original
ax = plt.subplot(2, n, i + 1)
plt.imshow(x_test[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# display reconstruction
ax = plt.subplot(2, n, i + 1 + n)
plt.imshow(decoded_imgs[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
from keras.layers import Input, Dense
from keras.models import Model
# this is the size of our encoded representations
encoding_dim = 32 # 32 floats -> compression of factor 24.5, assuming the input is 784 floats
# this is our input placeholder
input_img = Input(shape=(784,))
# "encoded" is the encoded representation of the input
encoded = Dense(encoding_dim, activation='relu')(input_img)
# "decoded" is the lossy reconstruction of the input
decoded = Dense(784, activation='sigmoid')(encoded)
# this model maps an input to its reconstruction
autoencoder = Model(input_img, decoded)
# this model maps an input to its encoded representation
encoder = Model(input_img, encoded)
# create a placeholder for an encoded (32-dimensional) input
encoded_input = Input(shape=(encoding_dim,))
# retrieve the last layer of the autoencoder model
decoder_layer = autoencoder.layers[-1]
# create the decoder model
decoder = Model(encoded_input, decoder_layer(encoded_input))
autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')
from keras.datasets import mnist
import numpy as np
(x_train, _), (x_test, _) = mnist.load_data()
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))
x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:])))
print(x_train.shape)
print(x_test.shape)
autoencoder.fit(x_train, x_train,
epochs=50,
batch_size=256,
shuffle=True,
validation_data=(x_test, x_test))
# encode and decode some digits
# note that we take them from the *test* set
encoded_imgs = encoder.predict(x_test)
decoded_imgs = decoder.predict(encoded_imgs)
# calculating difference(absolute) in test images
diff = np.absolute(x_test-decoded_imgs)
# summing the difference
diff = np.sum(diff, axis = 1)
print(diff.shape)
# finding the top 10 max difference
indices = np.argsort(diff)
# use Matplotlib (don't ask)
import matplotlib.pyplot as plt
n = 10 # how many digits we will display
plt.figure(figsize=(20, 4))
for i in range(n):
# for reversed
j = indices[len(indices)-1-i]
# display original
ax = plt.subplot(2, n, i + 1)
plt.imshow(x_test[j].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# display reconstruction
ax = plt.subplot(2, n, i + 1 + n)
plt.imshow(decoded_imgs[j].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
plt.figure(figsize=(20, 4))
for i in range(n):
# display original
ax = plt.subplot(2, n, i + 1)
plt.imshow(x_test[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# display reconstruction
ax = plt.subplot(2, n, i + 1 + n)
plt.imshow(decoded_imgs[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
###Output
(60000, 784)
(10000, 784)
Epoch 1/50
235/235 [==============================] - 7s 24ms/step - loss: 0.6961 - val_loss: 0.6960
Epoch 2/50
235/235 [==============================] - 5s 20ms/step - loss: 0.6959 - val_loss: 0.6958
Epoch 3/50
235/235 [==============================] - 3s 12ms/step - loss: 0.6956 - val_loss: 0.6956
Epoch 4/50
235/235 [==============================] - 3s 13ms/step - loss: 0.6954 - val_loss: 0.6954
Epoch 5/50
235/235 [==============================] - 4s 15ms/step - loss: 0.6952 - val_loss: 0.6952
Epoch 6/50
235/235 [==============================] - 4s 15ms/step - loss: 0.6951 - val_loss: 0.6950
Epoch 7/50
235/235 [==============================] - 4s 15ms/step - loss: 0.6949 - val_loss: 0.6948
Epoch 8/50
235/235 [==============================] - 3s 15ms/step - loss: 0.6947 - val_loss: 0.6946
Epoch 9/50
235/235 [==============================] - 4s 18ms/step - loss: 0.6945 - val_loss: 0.6945
Epoch 10/50
235/235 [==============================] - 4s 16ms/step - loss: 0.6943 - val_loss: 0.6943
Epoch 11/50
235/235 [==============================] - 4s 17ms/step - loss: 0.6941 - val_loss: 0.6941
Epoch 12/50
235/235 [==============================] - 4s 17ms/step - loss: 0.6939 - val_loss: 0.6939
Epoch 13/50
235/235 [==============================] - 4s 17ms/step - loss: 0.6938 - val_loss: 0.6937
Epoch 14/50
235/235 [==============================] - 4s 17ms/step - loss: 0.6936 - val_loss: 0.6936
Epoch 15/50
235/235 [==============================] - 4s 16ms/step - loss: 0.6934 - val_loss: 0.6934
Epoch 16/50
235/235 [==============================] - 4s 17ms/step - loss: 0.6932 - val_loss: 0.6932
Epoch 17/50
235/235 [==============================] - 4s 17ms/step - loss: 0.6931 - val_loss: 0.6930
Epoch 18/50
235/235 [==============================] - 4s 17ms/step - loss: 0.6929 - val_loss: 0.6929
Epoch 19/50
235/235 [==============================] - 4s 16ms/step - loss: 0.6927 - val_loss: 0.6927
Epoch 20/50
235/235 [==============================] - 4s 17ms/step - loss: 0.6926 - val_loss: 0.6925
Epoch 21/50
235/235 [==============================] - 4s 17ms/step - loss: 0.6924 - val_loss: 0.6924
Epoch 22/50
235/235 [==============================] - 4s 16ms/step - loss: 0.6922 - val_loss: 0.6922
Epoch 23/50
235/235 [==============================] - 4s 16ms/step - loss: 0.6921 - val_loss: 0.6920
Epoch 24/50
235/235 [==============================] - 4s 16ms/step - loss: 0.6919 - val_loss: 0.6919
Epoch 25/50
235/235 [==============================] - 4s 16ms/step - loss: 0.6918 - val_loss: 0.6917
Epoch 26/50
235/235 [==============================] - 4s 17ms/step - loss: 0.6916 - val_loss: 0.6915
Epoch 27/50
235/235 [==============================] - 4s 16ms/step - loss: 0.6914 - val_loss: 0.6914
Epoch 28/50
235/235 [==============================] - 4s 18ms/step - loss: 0.6913 - val_loss: 0.6912
Epoch 29/50
235/235 [==============================] - 4s 16ms/step - loss: 0.6911 - val_loss: 0.6910
Epoch 30/50
235/235 [==============================] - 4s 17ms/step - loss: 0.6909 - val_loss: 0.6908
Epoch 31/50
235/235 [==============================] - 4s 16ms/step - loss: 0.6908 - val_loss: 0.6907
Epoch 32/50
235/235 [==============================] - 4s 16ms/step - loss: 0.6906 - val_loss: 0.6905
Epoch 33/50
235/235 [==============================] - 4s 16ms/step - loss: 0.6904 - val_loss: 0.6903
Epoch 34/50
235/235 [==============================] - 4s 16ms/step - loss: 0.6902 - val_loss: 0.6902
Epoch 35/50
235/235 [==============================] - 4s 16ms/step - loss: 0.6901 - val_loss: 0.6900
Epoch 36/50
235/235 [==============================] - 4s 17ms/step - loss: 0.6899 - val_loss: 0.6898
Epoch 37/50
235/235 [==============================] - 4s 16ms/step - loss: 0.6897 - val_loss: 0.6896
Epoch 38/50
235/235 [==============================] - 4s 16ms/step - loss: 0.6895 - val_loss: 0.6894
Epoch 39/50
235/235 [==============================] - 4s 16ms/step - loss: 0.6894 - val_loss: 0.6893
Epoch 40/50
235/235 [==============================] - 4s 16ms/step - loss: 0.6892 - val_loss: 0.6891
Epoch 41/50
235/235 [==============================] - 4s 16ms/step - loss: 0.6890 - val_loss: 0.6889
Epoch 42/50
235/235 [==============================] - 4s 16ms/step - loss: 0.6888 - val_loss: 0.6887
Epoch 43/50
235/235 [==============================] - 4s 16ms/step - loss: 0.6886 - val_loss: 0.6885
Epoch 44/50
235/235 [==============================] - 4s 17ms/step - loss: 0.6884 - val_loss: 0.6883
Epoch 45/50
235/235 [==============================] - 4s 15ms/step - loss: 0.6882 - val_loss: 0.6881
Epoch 46/50
235/235 [==============================] - 4s 16ms/step - loss: 0.6880 - val_loss: 0.6879
Epoch 47/50
235/235 [==============================] - 4s 16ms/step - loss: 0.6878 - val_loss: 0.6877
Epoch 48/50
235/235 [==============================] - 4s 16ms/step - loss: 0.6876 - val_loss: 0.6875
Epoch 49/50
235/235 [==============================] - 4s 16ms/step - loss: 0.6874 - val_loss: 0.6872
Epoch 50/50
235/235 [==============================] - 4s 16ms/step - loss: 0.6872 - val_loss: 0.6870
(10000,)
|
_build/html/_sources/intro-to-jupyter.ipynb | ###Markdown
Getting Started with Jupyter NotebooksCreated by [Nathan Kelber](http://nkelber.com) and Ted Lawless for [JSTOR Labs](https://labs.jstor.org/) under [Creative Commons CC BY License](https://creativecommons.org/licenses/by/4.0/)For questions/comments/improvements, email [email protected].___ **Getting Started with Jupyter Notebooks****Description:** This lesson introduces [Jupyter notebooks](https://docs.tdm-pilot.org/key-terms/jupyter-notebook) and [Python](https://docs.tdm-pilot.org/key-terms/python) for absolute beginners. If you are completely new to text analysis, this is the place to start. **Use Case:** For Learners (Additional explanation, not ideal for researchers)**Difficulty:** Beginner**Completion time:** 15 minutes**Knowledge Required:** None**Knowledge Recommended:** None**Data Format:** None**Libraries Used:** `time` to demonstrate code cell execution**Research Pipeline:** None___ [](https://youtu.be/3jZYC9rGrNg) IntroductionWelcome to your first [Jupyter notebook](https://docs.tdm-pilot.org/key-terms/jupyter-notebook). [Jupyter notebooks](https://docs.tdm-pilot.org/key-terms/jupyter-notebook) are documents that contain both computer code (like [Python](https://docs.tdm-pilot.org/key-terms/python)) alongside explanatory images, figures, videos, and links. Most importantly, the code in a [Jupyter notebook](https://docs.tdm-pilot.org/key-terms/jupyter-notebook) can be executed, modified, and deleted. As you explore this notebook, please feel free to modify the text, the code, and to generally play around with the environment. You can always [launch another instance of this notebook](https://docs.tdm-pilot.org/intro-to-jupyter-notebooks/) that will restore its original configuration. Later, you may learn how to create, modify, and save your own notebooks to share with others. CellsSimilar to the way an essay is composed of paragraphs, Jupyter notebooks are composed of [cells](https://docs.tdm-pilot.org/key-terms/cell). A [cell](https://docs.tdm-pilot.org/key-terms/cell) is like a container for a particular kind of content. There are essentially two kinds of content in Jupyter notebooks:1. [Markdown Cells](https://docs.tdm-pilot.org/key-terms/markdown-cell)- These can contain text, images, video, and other kinds of explanatory content you might find on a regular website. This cell is a markdown cell.2. [Code Cells](https://docs.tdm-pilot.org/key-terms/code-cell)- These can contain code written in a variety of languages.A [code cell](https://docs.tdm-pilot.org/key-terms/code-cell) can be distinguished from a [markdown cell](https://docs.tdm-pilot.org/key-terms/markdown-cell) by the fact that it contains a pair of brackets with a colon to its left, like so ``[ ]:``
###Code
# This is a code cell
###Output
_____no_output_____
###Markdown
A [markdown cell](https://docs.tdm-pilot.org/key-terms/markdown-cell) provides information, but a [code cell](https://docs.tdm-pilot.org/key-terms/code-cell) can be executed to perform an action. The [code cell](https://docs.tdm-pilot.org/key-terms/code-cell) above does not contain any executable content, only a text comment. We can tell the text in the [code cell](https://docs.tdm-pilot.org/key-terms/code-cell) is a comment because it is prefixed by a ````. In Python, any time a line is prefaced by a ```` that line is a comment and will not be executed if the code is run. In a [code cell](https://docs.tdm-pilot.org/key-terms/code-cell), comments are also blueish-green in color. Hello World: Your First CodeIt is traditional in programming education to begin with a program that prints ``Hello World``. In Python, this is a simple task. We will use the ``print()`` function. This function simply prints out whatever is inside the parentheses (). We will pass the quotation "Hello World" to the print function like so:```print("Hello World")```Write this code into the following [code cell](https://docs.tdm-pilot.org/key-terms/code-cell) below. To execute our code, we have a couple options: Option One Click the code cell you wish to run and then push the "Run" button above. Option TwoClick the [code cell](https://docs.tdm-pilot.org/key-terms/code-cell) you wish to run and press Ctrl + Enter (Windows) or shift + return (OS X) on your keyboard.Type ```print("Hello World")``` into the box below and then run the cell. Try this!* Does it matter if you use single or double quotes?* Can you also insert a comment into the code cell?* Can you write code and a comment on a single line? Which must come first? After your code runs, you'll receive any output and a number will appear in the pair of brackets `[ ]:` to the left of the code cell to show the order the cell was run. If your code is complicated or takes some time to execute, an asterisk * will be displayed in the pair of brackets `[*]:` while the code executes. Execute the code cell below which:1. Prints "Waiting 5 seconds..."2. Waits 5 seconds3. Prints "Done"As the program is running, watch the pair of brackets and you will see the code is running `[*]:`.
###Code
print('Waiting 5 seconds...')
import time
time.sleep(5)
print('Done')
###Output
_____no_output_____ |
L17/5_VAE_celeba_latent-arithmetic.ipynb | ###Markdown
STAT 453: Deep Learning (Spring 2021) Instructor: Sebastian Raschka ([email protected]) Course website: http://pages.stat.wisc.edu/~sraschka/teaching/stat453-ss2021/ GitHub repository: https://github.com/rasbt/stat453-deep-learning-ss21--- VAE Latent Space Arithmetic
###Code
%load_ext watermark
%watermark -a 'Sebastian Raschka' -v -p torch
###Output
Author: Sebastian Raschka
Python implementation: CPython
Python version : 3.8.8
IPython version : 7.21.0
torch: 1.8.1+cu111
###Markdown
Imports
###Code
import torch
import torchvision
import torch.nn as nn
import matplotlib.pyplot as plt
import numpy as np
###Output
_____no_output_____
###Markdown
Import utility functions
###Code
from helper_data import get_dataloaders_celeba
from helper_data import compute_average_faces
from helper_plotting import plot_modified_faces
##########################
### SETTINGS
##########################
# Device
CUDA_DEVICE_NUM = 3
DEVICE = torch.device(f'cuda:{CUDA_DEVICE_NUM}' if torch.cuda.is_available() else 'cpu')
print('Device:', DEVICE)
# Hyperparameters
RANDOM_SEED = 123
BATCH_SIZE = 5000
###Output
Device: cuda:3
###Markdown
Dataset
###Code
##########################
### Dataset
##########################
custom_transforms = torchvision.transforms.Compose([
torchvision.transforms.CenterCrop((128, 128)),
torchvision.transforms.ToTensor(),
#torchvision.transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
train_loader, valid_loader, test_loader = get_dataloaders_celeba(
batch_size=BATCH_SIZE,
train_transforms=custom_transforms,
test_transforms=custom_transforms,
num_workers=2)
torch.manual_seed(RANDOM_SEED)
for images, labels in train_loader:
print('Image batch dimensions:', images.size())
print('Image label dimensions:', labels.size())
#print(labels[:10])
break
EXAMPLE_IMAGE = images[1]
###Output
Image batch dimensions: torch.Size([5000, 3, 128, 128])
Image label dimensions: torch.Size([5000, 40])
###Markdown
1) Image Manipulation in Original Space Compute Average Faces
###Code
avg_img_with_feat, avg_img_without_feat = compute_average_faces(
feature_idx=31, # smiling
image_dim=(3, 128, 128),
data_loader=train_loader,
device=None,
encoding_fn=None)
###Output
_____no_output_____
###Markdown
Average Smiling Face
###Code
fig, ax = plt.subplots(figsize=(2, 2))
ax.imshow((avg_img_with_feat).permute(1, 2, 0))
plt.show()
###Output
_____no_output_____
###Markdown
Average Non-Smiling Face
###Code
fig, ax = plt.subplots(figsize=(2, 2))
ax.imshow((avg_img_without_feat).permute(1, 2, 0))
plt.show()
###Output
_____no_output_____
###Markdown
Manipulate Example Face Image
###Code
fig, ax = plt.subplots(figsize=(2, 2))
ax.imshow(EXAMPLE_IMAGE.permute(1, 2, 0))
plt.show()
diff = (avg_img_with_feat - avg_img_without_feat)
plot_modified_faces(original=images[1],
diff=diff)
plt.tight_layout()
plt.show()
###Output
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
###Markdown
2) Image Manipulation in Latent Space
###Code
##########################
### MODEL
##########################
class Reshape(nn.Module):
def __init__(self, *args):
super().__init__()
self.shape = args
def forward(self, x):
return x.view(self.shape)
class Trim(nn.Module):
def __init__(self, *args):
super().__init__()
def forward(self, x):
return x[:, :, :128, :128]
class VAE(nn.Module):
def __init__(self):
super().__init__()
self.encoder = nn.Sequential(
nn.Conv2d(3, 32, stride=2, kernel_size=3, bias=False, padding=1),
nn.BatchNorm2d(32),
nn.LeakyReLU(0.1, inplace=True),
nn.Dropout2d(0.25),
#
nn.Conv2d(32, 64, stride=2, kernel_size=3, bias=False, padding=1),
nn.BatchNorm2d(64),
nn.LeakyReLU(0.1, inplace=True),
nn.Dropout2d(0.25),
#
nn.Conv2d(64, 64, stride=2, kernel_size=3, bias=False, padding=1),
nn.BatchNorm2d(64),
nn.LeakyReLU(0.1, inplace=True),
nn.Dropout2d(0.25),
#
nn.Conv2d(64, 64, stride=2, kernel_size=3, bias=False, padding=1),
nn.BatchNorm2d(64),
nn.LeakyReLU(0.1, inplace=True),
nn.Dropout2d(0.25),
#
nn.Flatten(),
)
self.z_mean = torch.nn.Linear(4096, 200)
self.z_log_var = torch.nn.Linear(4096, 200)
self.decoder = nn.Sequential(
torch.nn.Linear(200, 4096),
Reshape(-1, 64, 8, 8),
#
nn.ConvTranspose2d(64, 64, stride=2, kernel_size=3),
nn.BatchNorm2d(64),
nn.LeakyReLU(0.1, inplace=True),
nn.Dropout2d(0.25),
#
nn.ConvTranspose2d(64, 64, stride=2, kernel_size=3, padding=1),
nn.BatchNorm2d(64),
nn.LeakyReLU(0.1, inplace=True),
nn.Dropout2d(0.25),
#
nn.ConvTranspose2d(64, 32, stride=2, kernel_size=3, padding=1),
nn.BatchNorm2d(32),
nn.LeakyReLU(0.1, inplace=True),
nn.Dropout2d(0.25),
#
nn.ConvTranspose2d(32, 3, stride=2, kernel_size=3, padding=1),
#
Trim(), # 3x129x129 -> 3x128x128
nn.Sigmoid()
)
def reparameterize(self, z_mu, z_log_var):
eps = torch.randn(z_mu.size(0), z_mu.size(1)).to(z_mu.get_device())
z = z_mu + eps * torch.exp(z_log_var/2.)
return z
def encoding_fn(self, x):
x = self.encoder(x)
z_mean, z_log_var = self.z_mean(x), self.z_log_var(x)
encoded = self.reparameterize(z_mean, z_log_var)
return encoded
def forward(self, x):
x = self.encoder(x)
z_mean, z_log_var = self.z_mean(x), self.z_log_var(x)
encoded = self.reparameterize(z_mean, z_log_var)
decoded = self.decoder(encoded)
return encoded, z_mean, z_log_var, decoded
###Output
_____no_output_____
###Markdown
Load model:
###Code
model = VAE()
model.load_state_dict(torch.load('vae_celeba_02.pt', map_location=torch.device('cpu')))
model.to(DEVICE);
###Output
_____no_output_____
###Markdown
Compute Average Faces in Latent Space -- More or Less Smiling
###Code
avg_img_with_feat, avg_img_without_feat = compute_average_faces(
feature_idx=31, # smiling
image_dim=200,
data_loader=train_loader,
device=DEVICE,
encoding_fn=model.encoding_fn)
diff = (avg_img_with_feat - avg_img_without_feat)
example_img = EXAMPLE_IMAGE.unsqueeze(0).to(DEVICE)
with torch.no_grad():
encoded = model.encoding_fn(example_img).squeeze(0).to('cpu')
plot_modified_faces(original=encoded,
decoding_fn=model.decoder,
device=DEVICE,
diff=diff)
plt.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
Compute Average Faces in Latent Space -- With or Without Glasses
###Code
avg_img_with_feat, avg_img_without_feat = compute_average_faces(
feature_idx=15, # eyeglasses
image_dim=200,
data_loader=train_loader,
device=DEVICE,
encoding_fn=model.encoding_fn)
diff = (avg_img_with_feat - avg_img_without_feat)
example_img = EXAMPLE_IMAGE.unsqueeze(0).to(DEVICE)
with torch.no_grad():
encoded = model.encoding_fn(example_img).squeeze(0).to('cpu')
plot_modified_faces(original=encoded,
decoding_fn=model.decoder,
device=DEVICE,
diff=diff)
plt.tight_layout()
plt.show()
###Output
_____no_output_____ |
tutorials/old_generation_notebooks/jupyter/2- Hardcore DL.ipynb | ###Markdown
 Hardcore DL by Spark NLP Explain Documents with Deep Learning
###Code
import sys
import time
#Spark ML and SQL
from pyspark.ml import Pipeline, PipelineModel
from pyspark.sql.functions import array_contains
from pyspark.sql import SparkSession
from pyspark.sql.types import StructType, StructField, IntegerType, StringType
#Spark NLP
import sparknlp
from sparknlp.pretrained import PretrainedPipeline
from sparknlp.annotator import *
from sparknlp.common import RegexRule
from sparknlp.base import *
###Output
_____no_output_____
###Markdown
Let's create a Spark Session for our app Let's take a look at what's behind `sparknlp.start()` function:
###Code
spark = sparknlp.start()
print("Spark NLP version: ", sparknlp.version())
print("Apache Spark version: ", spark.version)
pipeline = PretrainedPipeline('explain_document_dl', lang='en')
###Output
_____no_output_____
###Markdown
We simply send the text we want to transform and the pipeline does the work.
###Code
text = 'He would love to visit many beautful cities wth you. He lives in an amazing country like Germany or Pakistan.'
result = pipeline.annotate(text)
###Output
_____no_output_____
###Markdown
We can see the output of each annotator below. This one is doing so many things at once!
###Code
list(result.keys())
result['sentence']
result['lemma']
list(zip(result['checked'], result['pos']))
result
###Output
_____no_output_____
###Markdown
 Hardcore DL by Spark NLP Explain Documents with Deep Learning
###Code
import sys
import time
#Spark ML and SQL
from pyspark.ml import Pipeline, PipelineModel
from pyspark.sql.functions import array_contains
from pyspark.sql import SparkSession
from pyspark.sql.types import StructType, StructField, IntegerType, StringType
#Spark NLP
import sparknlp
from sparknlp.pretrained import PretrainedPipeline
from sparknlp.annotator import *
from sparknlp.common import RegexRule
from sparknlp.base import *
###Output
_____no_output_____
###Markdown
Let's create a Spark Session for our app Let's take a look at what's behind `sparknlp.start()` function:
###Code
spark = sparknlp.start()
print("Spark NLP version: ", sparknlp.version())
print("Apache Spark version: ", spark.version)
pipeline = PretrainedPipeline('explain_document_dl', lang='en')
###Output
explain_document_dl download started this may take some time.
Approx size to download 168.4 MB
[OK!]
###Markdown
We simply send the text we want to transform and the pipeline does the work.
###Code
text = 'He would love to visit many beautful cities wth you. He lives in an amazing country like Germany or Pakistan.'
result = pipeline.annotate(text)
###Output
_____no_output_____
###Markdown
We can see the output of each annotator below. This one is doing so many things at once!
###Code
list(result.keys())
result['sentence']
result['lemma']
list(zip(result['checked'], result['pos']))
result
###Output
_____no_output_____ |
PerturbationTheoryPk.ipynb | ###Markdown
Nonlinear bias using generalized tracers and power spectraThis example showcases how to do nonlinear biasing with the generalized tracers and 2D power spectra implemented in CCL.For more on generalized tracers and power spectra, see GeneralizedTracers.ipynb
###Code
import numpy as np
import pylab as plt
import pyccl as ccl
import pyccl.nl_pt as pt
import pyccl.ccllib as lib
%matplotlib inline
###Output
_____no_output_____
###Markdown
Note that the perturbation theory functionality lives within `pyccl.nl_pt`. PreliminariesLet's just begin by setting up a cosmology and some biases
###Code
# Cosmology
cosmo = ccl.Cosmology(Omega_c=0.27, Omega_b=0.045, h=0.67, A_s=2.1e-9, n_s=0.96)
# Biases for number counts
b_1 = 2.0 # constant values for now
b_2 = 1.0
b_s = 1.0
# Biases for IAs. Will be converted to the input c_IA values below.
a_1 = 1.
a_2 = 0.5
a_d = 0.5
###Output
_____no_output_____
###Markdown
PT tracersPower spectra are Fourier-space correlations between two quantities. In CCL the quantities you want to correlate are defined in terms of so-called `PTTracers`. IA normalizationBut before that, a few notes about the normalization of the IA biases
###Code
# Define a redshift range and associated growth factor:
z = np.linspace(0,1,128)
gz = ccl.growth_factor(cosmo, 1./(1+z))
# Let's convert the a_IA values into the correctly normalized c_IA values:
Om_m = cosmo['Omega_m']
rho_crit = lib.cvar.constants.RHO_CRITICAL
rho_m = lib.cvar.constants.RHO_CRITICAL * cosmo['Omega_m']
Om_m_fid = 0.3 # or could use DES convention and just remove Om_m/Om_m_fid
c_1_t = -1*a_1*5e-14*rho_crit*cosmo['Omega_m']/gz
c_d_t = -1*a_d*5e-14*rho_crit*cosmo['Omega_m']/gz
c_2_t = a_2*5*5e-14*rho_crit*cosmo['Omega_m']**2/(Om_m_fid*gz**2) # Blazek2019 convention
c_2_t = a_2*5*5e-14*rho_crit*cosmo['Omega_m']/(gz**2) # DES convention
# Or we just use the built-in function for IA normalization
c_1,c_d,c_2 = pt.translate_IA_norm(cosmo, z, a1=a_1, a1delta=a_d, a2=a_2,
Om_m2_for_c2 = False)
###Output
_____no_output_____
###Markdown
TracersOK, now that we have the biases, let's create three `PTTracer`s. One for number counts (galaxy clustering), one for intrinsic alignments and one for matter.
###Code
# Number counts
ptt_g = pt.PTNumberCountsTracer(b1=b_1, b2=b_2, bs=b_s)
# Intrinsic alignments
ptt_i = pt.PTIntrinsicAlignmentTracer(c1=(z,c_1), c2=(z,c_2), cdelta=(z,c_d))
ptt_i_nla = pt.PTIntrinsicAlignmentTracer(c1=(z,c_1)) # to compare using the standard WLTracer
# Matter
ptt_m = pt.PTMatterTracer()
# Note that we've assumed constant biases for simplicity, but you can also make them z-dependent:
bz = b_1 / gz
ptt_g_b = pt.PTNumberCountsTracer(b1=(z, bz))
###Output
_____no_output_____
###Markdown
PT calculatorAnother object, `PTWorkspace` takes care of initializing FastPT (essentially precomputing some of the stuff it needs to get you PT power spectra). You'll need one of these before you can compute P(k)s.
###Code
# The `with_NC` and `with_IA` flags will tell FastPT to initialize the right things.
# `log10k_min/max and nk_per_decade will define the sampling in k you should use.
ptc = pt.PTCalculator(with_NC=True, with_IA=True,
log10k_min=-4, log10k_max=2, nk_per_decade=20)
###Output
_____no_output_____
###Markdown
PT power spectraLet's compute some power spectra! We do so by calling `get_pt_pk2d` with whatever tracers you want to cross-correlate. This will return a `Pk2D` object that you can then evaluate at whatever scale and redshift you want.
###Code
# Galaxies x galaxies.
# If `tracer2` is missing, an auto-correlation for the first tracer is assumed.
pk_gg = pt.get_pt_pk2d(cosmo, ptt_g, ptc=ptc)
# Galaxies x matter
pk_gm = pt.get_pt_pk2d(cosmo, ptt_g, tracer2=ptt_m, ptc=ptc)
# Galaxies x IAs
pk_gi = pt.get_pt_pk2d(cosmo, ptt_g, tracer2=ptt_i, ptc=ptc)
# IAs x IAs
pk_ii, pk_ii_bb = pt.get_pt_pk2d(cosmo, ptt_i, tracer2=ptt_i, ptc=ptc, return_ia_ee_and_bb=True)
pk_ii_nla = pt.get_pt_pk2d(cosmo, ptt_i_nla, tracer2=ptt_i_nla, ptc=ptc,)
# IAs x matter
pk_im = pt.get_pt_pk2d(cosmo, ptt_i, tracer2=ptt_m, ptc=ptc)
# Matter x matter
pk_mm = pt.get_pt_pk2d(cosmo, ptt_m, tracer2=ptt_m, ptc=ptc)
###Output
_____no_output_____
###Markdown
**Note:** FastPT is not yet able to compute IAs x galaxies in a consistent way. What CCL does is to use the full non-linear model for IAs, but use a linear bias for galaxies. OK, let's now plot a few of these!
###Code
# Let's plot everything at z=0
ks = np.logspace(-3,2,512)
ps = {}
ps['gg'] = pk_gg.eval(ks, 1., cosmo)
ps['gi'] = pk_gi.eval(ks, 1., cosmo)
ps['gm'] = pk_gm.eval(ks, 1., cosmo)
ps['ii'] = pk_ii.eval(ks, 1., cosmo)
ps['im'] = pk_im.eval(ks, 1., cosmo)
ps['mm'] = pk_mm.eval(ks, 1., cosmo)
plt.figure()
for pn, p in ps.items():
plt.plot(ks, abs(p), label=pn)
plt.loglog()
plt.legend(loc='upper right', ncol=2,
fontsize=13, labelspacing=0.1)
plt.ylim([1E-2, 5E5])
plt.xlabel(r'$k\,\,[{\rm Mpc}^{-1}]$', fontsize=15)
plt.ylabel(r'$P(k)\,\,[{\rm Mpc}^{3}]$', fontsize=15)
plt.show()
###Output
_____no_output_____
###Markdown
We can also compute the B-mode power spectrum for intrinsic alignments:
###Code
pk_ii_bb = pt.get_pt_pk2d(cosmo, ptt_i, ptc=ptc, return_ia_bb=True)
plt.figure()
plt.plot(ks, pk_ii.eval(ks, 1., cosmo), label='IA, $E$-modes')
plt.plot(ks, pk_ii_bb.eval(ks, 1., cosmo), label='IA, $B$-modes')
plt.loglog()
plt.legend(loc='lower left', fontsize=13)
plt.ylim([1E-5, 5E0])
plt.xlabel(r'$k\,\,[{\rm Mpc}^{-1}]$', fontsize=15)
plt.ylabel(r'$P(k)\,\,[{\rm Mpc}^{3}]$', fontsize=15)
plt.show()
###Output
_____no_output_____
###Markdown
Angular power spectraWe can now use these P(k)s to compute angular power spectra, passing them to `ccl.angular_cl`.Let's illustrate this specifically for the usual 3x2pt. We will define three standard tracers (not `PTTracer`s, but the ones used to compute angular power spectra), one for number counts, one for weak lensing shear, and one for intrinsic alignments, which is also a WeakLensingTracer. The first one will be associated with `PTNumberCountsTracer`, the second with `PTMatterTracer`, and the third with `PTIntrinsicAlignmentTracer`.
###Code
z = np.linspace(0, 1.5, 1024)
nz = np.exp(-((z-0.7)/0.1)**2)
# Number counts
# We give this one a bias of 1, since we've taken care of galaxy bias at the P(k) level.
t_g = ccl.NumberCountsTracer(cosmo, False, dndz=(z, nz), bias=(z, np.ones_like(z)))
# Lensing
t_l = ccl.WeakLensingTracer(cosmo, dndz=(z, nz))
# Intrinsic alignments
# Note the required settings to isolate the IA term and set the IA bias to 1,
# since we have added the IA bias terms at the P(k) level.
t_i = ccl.WeakLensingTracer(cosmo, dndz=(z, nz), has_shear=False, ia_bias=(z, np.ones_like(z)), use_A_ia=False)
t_i_nla = ccl.WeakLensingTracer(cosmo, dndz=(z, nz), has_shear=False, ia_bias=(z, np.ones_like(z)), use_A_ia=True)
###Output
_____no_output_____
###Markdown
Now compute power spectra. Note how we pass the P(k)s we just calculated as `p_of_k_a`.
###Code
ell = np.unique(np.geomspace(2,1000,100).astype(int)).astype(float)
cls={}
cls['gg'] = ccl.angular_cl(cosmo, t_g, t_g, ell, p_of_k_a=pk_gg)
cls['gG'] = ccl.angular_cl(cosmo, t_g, t_l, ell, p_of_k_a=pk_gm)
cls['GG'] = ccl.angular_cl(cosmo, t_l, t_l, ell, p_of_k_a=pk_mm)
cls['GI'] = ccl.angular_cl(cosmo, t_l, t_i, ell, p_of_k_a=pk_im)
cls['GI,NLA'] = ccl.angular_cl(cosmo, t_l, t_i_nla, ell)
cls['II'] = ccl.angular_cl(cosmo, t_i, t_i, ell, p_of_k_a=pk_ii)
cls['II,NLA'] = ccl.angular_cl(cosmo, t_i_nla, t_i_nla, ell)
###Output
_____no_output_____
###Markdown
Plot away!
###Code
plt.figure()
for cn, c in cls.items():
if c[0]>0:
plt.plot(ell, c, label=cn)
else:
plt.plot(ell, abs(c), '--', label=cn)
plt.loglog()
plt.legend(loc='lower left', ncol=1, fontsize=13)
plt.xlabel(r'$\ell$', fontsize=15)
plt.ylabel(r'$C_\ell$', fontsize=15)
plt.show()
###Output
_____no_output_____ |
demo/neighbors/distance_to_other_labels.ipynb | ###Markdown
Our starting point is a label image and another label image, where some of the labels in the first image are selected from.
###Code
label_image = cle.artificial_tissue_2d()
cle.imshow(label_image, labels=True)
random_vector = np.random.random((1, int(label_image.max() + 1)))
sparse_labels = cle.exclude_labels_with_values_out_of_range(random_vector, label_image, minimum_value_range=0, maximum_value_range=0.3)
cle.imshow(sparse_labels, labels=True)
###Output
_____no_output_____
###Markdown
We now count for every label in `label_image`, how many labels are proximal to it in the `sparse_labels` image. For measuring the distance, we use the centroid distance.
###Code
distance_map = cle.average_distance_to_n_nearest_other_labels_map(label_image, sparse_labels, n=1)
cle.imshow(distance_map)
###Output
_____no_output_____ |
algo_sac_a_dos.ipynb | ###Markdown
Problème du sac à dos Pour bien comprendre : situation n°1 : Un cambrioleur ne peut emporter que 40 kg sur son dos dans son sac. Il a le choix d'emporter certains des objets suivants :| | Poids (masse en kg) | Valeur (prix de revente) ||---------|:-----:|:------:|| objet A | 15 | 500 || objet B | 24 | 400 || objet C | 9 | 350 || objet D | 25 | 750 || objet E | 5 | 400 || objet F | 12 | 800 || objet G | 2 | 1400 || objet H | 18 | 550 |Il va se demander quels objets choisir pour obtenir une valeur totale maximale tout en ne dépassant pas 40 kg. Pour bien comprendre : situation n°2On part en vacances avec une clef USB de 8 Go. Nous souhaitons copier sur cette clef des fichiers vidéos pour lesquels la taille n'est pas proportionnelle à la durée car les fichiers sont de formats différents et de résolutions différentes.| | Poids (taille en octets) | Valeur (durée en minutes) ||---------|:-------:|:-----:|| vidéo A | 4.5 Go | 114 || vidéo B | 630 Mo | 85 || vidéo C | 3.35 Go | 40 || vidéo D | 85 Mo | 4 || vidéo E | 2.15 Go | 18 || vidéo F | 2.71 Go | 80 || vidéo G | 320 Mo | 5 || vidéo H | 3.7 Go | 86 || vidéo I | 2.4 Go | 64 || vidéo J | 6.4 Go | 12 |On se demande quelles vidéos copier sur la clef pour obtenir une durée totale maximale tout en ne dépassant pas 8 Go. **Question :** Pour la situation n°1 : - Quelle est la sélection que l'on cherche à effectuer ? - Quelle est la contrainte ? - Quelle est l'optimisation recherchée ? **Question :** Pour la situation n°2 : - Quelle est la sélection que l'on cherche à effectuer ? - Quelle est la contrainte ? - Quelle est l'optimisation recherchée ? Pour ceux qui se diraient qu'il est inutile de faire un algorithme pour de si petites données, merci d'aller voir en bas de notebook. On peut être amené à utiliser cet algorithme sur de très gros jeux de données. Algorithme glouton **Question :** Pour la situation n°1 : - Pourquoi le cambrioleur a sans doute intérêt à emporter l'objet G ? - Pourquoi le cambrioleur n'a sans doute pas intérêt à emporter l'objet B ? **Question :** Pour la situation n°2 : - Pourquoi a-t-on sans doute intérêt à emporter la vidéo B ? - Pourquoi n'a-t-on sans doute pas intérêt à emporter la vidéo J ? Mise en oeuvre de l'algorithme glouton Les deux questions précédentes nous montrent une règle de choix pertinente pour mettre en place un algorithme glouton : on va choisir en premier les objets qui ont la plus grande valeur par unité de poids. Ainsi : - Pour la situation 1, l'objet G a une valeur de 700 euros par unité de poids en kg (1400/2 = 700) alors que l'objet B a une valeur d'environ 16.7 euros par unité de poids en kg (400/24 = 16.666...) - Pour la situation 2, la vidéo B a une valeur d'environ 134.9 minutes par unité de poids en Go (85/0.630 = 134.9...) alors que la vidéo J a une valeur de 1.875 minute par unité de poids en Go (12/6.4 = 1.875) Règle de choix : À chaque étape prendre l'objet ayant le plus grand rapport valeur/poids parmi les objets dont le poids ne fait pas dépasser le poids total autorisé. Représentation des donnéesLes données seront représentées sous forme de tables (listes de dictionnaires) :
###Code
table_1 = [
{'nom' : 'objet A', 'poids' : 15, 'valeur' : 500},
{'nom' : 'objet B', 'poids' : 24, 'valeur' : 400},
{'nom' : 'objet C', 'poids' : 9, 'valeur' : 350},
{'nom' : 'objet D', 'poids' : 25, 'valeur' : 750},
{'nom' : 'objet E', 'poids' : 5, 'valeur' : 400},
{'nom' : 'objet F', 'poids' : 12, 'valeur' : 800},
{'nom' : 'objet G', 'poids' : 2, 'valeur' : 1400},
{'nom' : 'objet H', 'poids' : 18, 'valeur' : 550}
]
table_2 = [
{'nom' : 'video A', 'poids' : 4.5, 'valeur' : 114},
{'nom' : 'video B', 'poids' : 0.63, 'valeur' : 85},
{'nom' : 'video C', 'poids' : 3.35, 'valeur' : 40},
{'nom' : 'video D', 'poids' : 0.085, 'valeur' : 4},
{'nom' : 'video E', 'poids' : 2.15, 'valeur' : 18},
{'nom' : 'video F', 'poids' : 2.71, 'valeur' : 80},
{'nom' : 'video G', 'poids' : 0.32, 'valeur' : 5},
{'nom' : 'video H', 'poids' : 3.7, 'valeur' : 86},
{'nom' : 'video I', 'poids' : 2.4, 'valeur' : 64},
{'nom' : 'video J', 'poids' : 6.4, 'valeur' : 12}
]
###Output
_____no_output_____
###Markdown
On peut facilement accéder à un champ d'une des deux tables :
###Code
table_1[0]['nom']
table_1[0]['valeur']
table_2[3]['poids']
###Output
_____no_output_____
###Markdown
Implémentation de l'algorithme gloutonOn va procéder ainsi : 1.Trier la table par ordre décroisant selon le rapport valeur/poids 2.Parcourir la table triée de haut en bas : - Si le poids de l'objet ne fait pas dépasser le poids total autorisé : l'emporter - Sinon : ne pas l'emporter Trier la table **Question:** Créer une fonction `rapport_valeur_sur_poids` qui prend en paramètre un dictionnaire `dico_objet` (similaire aux dictionnaires présents dans les deux tables `table_1` et `table_2` ci-dessus) et renvoie le rapport de la valeur divisée par le poids de l'objet. Quelques assertions devant être vérifiées par votre fonction sont données ci-dessous.
###Code
def rapport_valeur_poids(dico_objet):
#à compléter
assert( rapport_valeur_poids({'nom' : 'objet D', 'poids' : 25, 'valeur' : 750}) == 30 )
assert( rapport_valeur_poids({'nom' : 'objet G', 'poids' : 2, 'valeur' : 1400}) == 700 )
assert( rapport_valeur_poids({'nom' : 'video J', 'poids' : 6.4, 'valeur' : 12}) == 1.875 )
###Output
_____no_output_____
###Markdown
**Question:** Créer une fonction `donner_poids` qui prend en paramètre un dictionnaire `dico_objet` (similaire aux dictionnaires présents dans les deux tables `table_1` et `table_2` ci-dessus) et renvoie le poids de l'objet. Quelques assertions devant être vérifiées par votre fonction sont données ci-dessous.
###Code
def donner_poids(dico_objet):
#à compléter
assert( donner_poids({'nom' : 'objet D', 'poids' : 25, 'valeur' : 750}) == 25 )
assert( donner_poids({'nom' : 'objet G', 'poids' : 2, 'valeur' : 1400}) == 2 )
assert( donner_poids({'nom' : 'video J', 'poids' : 6.4, 'valeur' : 12}) == 6.4 )
###Output
_____no_output_____
###Markdown
**Question:** Créer une fonction `creer_table_triee` qui prend en paramètre une table d'objets `table_objets` (table similaire aux deux tables `table_1` et `table_2` ci-dessus) et renvoie une copie triée en deux étapes de cette table : - triee selon le poids des objets décroissant - puis triee selon le rapport valeur/poids décroissant On utilisera pour cela les deux fonctions clef de tri `donner_poids` et `rapport_valeur_sur_poids`. **Si besoin, retourner voir son cours et ses exercices sur le traitement de donnée en tables.**
###Code
def creer_table_triee(table_objets):
#à compléter
assert( creer_table_triee(table_1) ==
[{'nom': 'objet G', 'poids': 2, 'valeur': 1400},
{'nom': 'objet E', 'poids': 5, 'valeur': 400},
{'nom': 'objet F', 'poids': 12, 'valeur': 800},
{'nom': 'objet C', 'poids': 9, 'valeur': 350},
{'nom': 'objet A', 'poids': 15, 'valeur': 500},
{'nom': 'objet H', 'poids': 18, 'valeur': 550},
{'nom': 'objet D', 'poids': 25, 'valeur': 750},
{'nom': 'objet B', 'poids': 24, 'valeur': 400}]
)
assert( creer_table_triee(table_2) ==
[{'nom': 'video B', 'poids': 0.63, 'valeur': 85},
{'nom': 'video D', 'poids': 0.085, 'valeur': 4},
{'nom': 'video F', 'poids': 2.71, 'valeur': 80},
{'nom': 'video I', 'poids': 2.4, 'valeur': 64},
{'nom': 'video A', 'poids': 4.5, 'valeur': 114},
{'nom': 'video H', 'poids': 3.7, 'valeur': 86},
{'nom': 'video G', 'poids': 0.32, 'valeur': 5},
{'nom': 'video C', 'poids': 3.35, 'valeur': 40},
{'nom': 'video E', 'poids': 2.15, 'valeur': 18},
{'nom': 'video J', 'poids': 6.4, 'valeur': 12}]
)
###Output
_____no_output_____
###Markdown
Parcourir la table triée et sélectionner les objets **Question:** Compléter la fonction `selectionner` qui prend en paramètre une table `table_objets` (table similaire aux deux tables `table_1` et `table_2` ci-dessus) ainsi qu'un poids maximal `poids_max` et retourne dans une table `table_selection` la sélection d'objets obtenue selon l'algorithme glouton. On rappelle que pour ajouter un élément `elt` dans une liste `L` on peut utiliser l'instruction `L.append(elt)`.
###Code
def selectionner( table_objets, poids_max):
table_triee = creer_table_triee(table_objets)
poids_total = 0
table_selection = []
#à compléter
return table_selection
assert( selectionner( table_1, 40) ==
[{'nom': 'objet G', 'poids': 2, 'valeur': 1400},
{'nom': 'objet E', 'poids': 5, 'valeur': 400},
{'nom': 'objet F', 'poids': 12, 'valeur': 800},
{'nom': 'objet C', 'poids': 9, 'valeur': 350}]
)
assert( selectionner( table_2, 8) ==
[{'nom': 'video B', 'poids': 0.63, 'valeur': 85},
{'nom': 'video D', 'poids': 0.085, 'valeur': 4},
{'nom': 'video F', 'poids': 2.71, 'valeur': 80},
{'nom': 'video I', 'poids': 2.4, 'valeur': 64},
{'nom': 'video G', 'poids': 0.32, 'valeur': 5}]
)
###Output
_____no_output_____
###Markdown
**Question :** Les deux solutions fournies ne sont pas les meilleures possibles. Essayer de trouver des solutions meilleures que celles fournies par l'algorithme. **Question :** Dans la table ci-dessous, la valeur représente le score moyen de certains joueurs (à un jeu, à un sport) et le poids l'indemnité qu'ils exigent pour faire partie d'une équipe lors d'un tournoi. Vous disposez d'un budget égal à 500 : en utilisant la fonctin que vous venez de coder, constituez votre équipe la plus grande possible.
###Code
table_3 = [
{"nom":"atuffell0","poids":78,"valeur":186},
{"nom":"alacroux1","poids":35,"valeur":71},
{"nom":"lesposita2","poids":31,"valeur":90},
{"nom":"ascandred3","poids":53,"valeur":182},
{"nom":"cheathcoat4","poids":78,"valeur":173},
{"nom":"mpechan5","poids":69,"valeur":94},
{"nom":"kmurison6","poids":33,"valeur":97},
{"nom":"cschwandermann7","poids":95,"valeur":179},
{"nom":"khanrott8","poids":53,"valeur":56},
{"nom":"wkiln9","poids":93,"valeur":162},
{"nom":"tpaolilloa","poids":63,"valeur":67},
{"nom":"aboudab","poids":71,"valeur":131},
{"nom":"dgribbinsc","poids":44,"valeur":179},
{"nom":"vdavittd","poids":30,"valeur":131},
{"nom":"ssalmonde","poids":66,"valeur":51},
{"nom":"svawtonf","poids":32,"valeur":45},
{"nom":"coculleng","poids":58,"valeur":47},
{"nom":"lstandenh","poids":46,"valeur":103},
{"nom":"cshoardi","poids":30,"valeur":68},
{"nom":"mowlnerj","poids":75,"valeur":51},
{"nom":"mondrichk","poids":75,"valeur":128},
{"nom":"mpatterfieldl","poids":97,"valeur":143},
{"nom":"sduttm","poids":42,"valeur":90},
{"nom":"ryuryshevn","poids":78,"valeur":50},
{"nom":"cwillettso","poids":36,"valeur":176},
{"nom":"cmuldowniep","poids":92,"valeur":100},
{"nom":"hgabbitasq","poids":82,"valeur":188},
{"nom":"vclaughtonr","poids":72,"valeur":60},
{"nom":"bnoldas","poids":36,"valeur":173},
{"nom":"hurquhartt","poids":61,"valeur":160},
{"nom":"ghalkyardu","poids":55,"valeur":199},
{"nom":"gallredv","poids":56,"valeur":91},
{"nom":"bfritschelw","poids":93,"valeur":178},
{"nom":"nrobothamx","poids":44,"valeur":112},
{"nom":"tmcginny","poids":52,"valeur":152},
{"nom":"avallintinez","poids":62,"valeur":175},
{"nom":"santcliffe10","poids":42,"valeur":174},
{"nom":"radrien11","poids":67,"valeur":119},
{"nom":"lmordie12","poids":46,"valeur":194},
{"nom":"cprosch13","poids":73,"valeur":74},
{"nom":"wscain14","poids":94,"valeur":94},
{"nom":"gripping15","poids":91,"valeur":103},
{"nom":"ybatterton16","poids":93,"valeur":161},
{"nom":"ckernan17","poids":75,"valeur":106},
{"nom":"mhousecroft18","poids":67,"valeur":84},
{"nom":"gprudence19","poids":68,"valeur":89},
{"nom":"flamberto1a","poids":100,"valeur":65},
{"nom":"dgammon1b","poids":40,"valeur":166},
{"nom":"jkidde1c","poids":69,"valeur":200},
{"nom":"amewrcik1d","poids":90,"valeur":54},
{"nom":"fpyke1e","poids":97,"valeur":114},
{"nom":"mfellows1f","poids":80,"valeur":188},
{"nom":"cknoton1g","poids":36,"valeur":113},
{"nom":"nharrema1h","poids":42,"valeur":192},
{"nom":"vtomasik1i","poids":40,"valeur":64},
{"nom":"scoping1j","poids":46,"valeur":185},
{"nom":"mdyball1k","poids":34,"valeur":50},
{"nom":"dvelde1l","poids":80,"valeur":112},
{"nom":"kconkay1m","poids":45,"valeur":193},
{"nom":"dglanister1n","poids":86,"valeur":195},
{"nom":"rhobell1o","poids":88,"valeur":167},
{"nom":"lseakes1p","poids":93,"valeur":130},
{"nom":"twootton1q","poids":62,"valeur":132},
{"nom":"agooderridge1r","poids":49,"valeur":121},
{"nom":"tkilcullen1s","poids":80,"valeur":180},
{"nom":"ssteinor1t","poids":38,"valeur":81},
{"nom":"theller1u","poids":47,"valeur":102},
{"nom":"jpetrozzi1v","poids":87,"valeur":141},
{"nom":"iivanitsa1w","poids":41,"valeur":78},
{"nom":"lkohn1x","poids":43,"valeur":114},
{"nom":"afinlater1y","poids":81,"valeur":159},
{"nom":"mbrogioni1z","poids":81,"valeur":52},
{"nom":"fcrinson20","poids":45,"valeur":73},
{"nom":"mgreedyer21","poids":49,"valeur":74},
{"nom":"ccheyenne22","poids":33,"valeur":200},
{"nom":"hwinterbourne23","poids":56,"valeur":90},
{"nom":"oblampied24","poids":34,"valeur":90},
{"nom":"cbydaway25","poids":34,"valeur":158},
{"nom":"kslocumb26","poids":69,"valeur":107},
{"nom":"jherion27","poids":98,"valeur":49},
{"nom":"vhallagan28","poids":36,"valeur":198},
{"nom":"jcanada29","poids":31,"valeur":187},
{"nom":"zleavey2a","poids":94,"valeur":146},
{"nom":"klownes2b","poids":36,"valeur":144},
{"nom":"lmuzzillo2c","poids":46,"valeur":140},
{"nom":"uarnal2d","poids":60,"valeur":190},
{"nom":"rclem2e","poids":93,"valeur":126},
{"nom":"fstuehmeyer2f","poids":30,"valeur":63},
{"nom":"dchinery2g","poids":78,"valeur":164},
{"nom":"zeilers2h","poids":46,"valeur":51},
{"nom":"jcordingly2i","poids":38,"valeur":192},
{"nom":"fstollard2j","poids":93,"valeur":134},
{"nom":"adannell2k","poids":62,"valeur":47},
{"nom":"cbryenton2l","poids":38,"valeur":81},
{"nom":"mcardinal2m","poids":72,"valeur":79},
{"nom":"escattergood2n","poids":38,"valeur":67},
{"nom":"arecord2o","poids":57,"valeur":170},
{"nom":"cbertl2p","poids":47,"valeur":183},
{"nom":"ssprott2q","poids":40,"valeur":67},
{"nom":"fegell2r","poids":57,"valeur":126},
{"nom":"eferrie2s","poids":33,"valeur":153},
{"nom":"mjizhaki2t","poids":31,"valeur":149},
{"nom":"lolsson2u","poids":64,"valeur":76},
{"nom":"alorentzen2v","poids":33,"valeur":157},
{"nom":"mdominik2w","poids":44,"valeur":110},
{"nom":"rmckenny2x","poids":74,"valeur":132},
{"nom":"bdavydenko2y","poids":92,"valeur":115},
{"nom":"mkienl2z","poids":38,"valeur":102},
{"nom":"mgroger30","poids":68,"valeur":186},
{"nom":"haggett31","poids":40,"valeur":186},
{"nom":"phaggata32","poids":44,"valeur":180},
{"nom":"ptrobridge33","poids":77,"valeur":194},
{"nom":"dbold34","poids":30,"valeur":144},
{"nom":"mgagg35","poids":84,"valeur":131},
{"nom":"hellerbeck36","poids":34,"valeur":54},
{"nom":"cthredder37","poids":65,"valeur":70},
{"nom":"kfilisov38","poids":99,"valeur":174},
{"nom":"ktamburo39","poids":70,"valeur":99},
{"nom":"ssawer3a","poids":96,"valeur":140},
{"nom":"dtribell3b","poids":71,"valeur":153},
{"nom":"ahartill3c","poids":95,"valeur":169},
{"nom":"aboanas3d","poids":30,"valeur":148},
{"nom":"ttreagust3e","poids":86,"valeur":191},
{"nom":"abasey3f","poids":90,"valeur":96},
{"nom":"ngerraty3g","poids":42,"valeur":174},
{"nom":"amunford3h","poids":31,"valeur":93},
{"nom":"fmacalaster3i","poids":34,"valeur":139},
{"nom":"ahabbin3j","poids":46,"valeur":64},
{"nom":"hcurme3k","poids":64,"valeur":154},
{"nom":"echeshire3l","poids":31,"valeur":79},
{"nom":"aloxton3m","poids":81,"valeur":69},
{"nom":"pnewe3n","poids":33,"valeur":143},
{"nom":"cbonniface3o","poids":68,"valeur":94},
{"nom":"ebaynard3p","poids":86,"valeur":126},
{"nom":"jketts3q","poids":59,"valeur":155},
{"nom":"tpattillo3r","poids":85,"valeur":46},
{"nom":"llindro3s","poids":56,"valeur":129},
{"nom":"bholton3t","poids":96,"valeur":158},
{"nom":"rcahen3u","poids":81,"valeur":88},
{"nom":"kchave3v","poids":59,"valeur":104},
{"nom":"cwymer3w","poids":59,"valeur":141},
{"nom":"jemloch3x","poids":65,"valeur":156},
{"nom":"mferrero3y","poids":52,"valeur":184},
{"nom":"tcallan3z","poids":45,"valeur":93},
{"nom":"ccodlin40","poids":32,"valeur":45},
{"nom":"gpaxeford41","poids":75,"valeur":182},
{"nom":"apawlicki42","poids":32,"valeur":96},
{"nom":"vhardisty43","poids":51,"valeur":96},
{"nom":"jlobb44","poids":91,"valeur":140},
{"nom":"spaolacci45","poids":98,"valeur":121},
{"nom":"obullivent46","poids":100,"valeur":138},
{"nom":"tpatek47","poids":47,"valeur":162},
{"nom":"vhully48","poids":56,"valeur":108},
{"nom":"nweekland49","poids":84,"valeur":191},
{"nom":"smcclelland4a","poids":66,"valeur":185},
{"nom":"lheadey4b","poids":38,"valeur":153},
{"nom":"ebrumby4c","poids":71,"valeur":118},
{"nom":"ebelmont4d","poids":85,"valeur":117},
{"nom":"nmcdyer4e","poids":80,"valeur":189},
{"nom":"tdelcastel4f","poids":46,"valeur":194},
{"nom":"ganlay4g","poids":90,"valeur":191},
{"nom":"jspraberry4h","poids":63,"valeur":197},
{"nom":"cemps4i","poids":100,"valeur":52},
{"nom":"jsalvin4j","poids":67,"valeur":139},
{"nom":"mallden4k","poids":100,"valeur":132},
{"nom":"wwillcocks4l","poids":93,"valeur":159},
{"nom":"caspey4m","poids":86,"valeur":47},
{"nom":"sluto4n","poids":42,"valeur":150},
{"nom":"mwicher4o","poids":67,"valeur":94},
{"nom":"hbrosenius4p","poids":98,"valeur":82},
{"nom":"twhoston4q","poids":100,"valeur":150},
{"nom":"ptaks4r","poids":69,"valeur":192},
{"nom":"mjanew4s","poids":54,"valeur":67},
{"nom":"vbeggan4t","poids":94,"valeur":146},
{"nom":"bnewns4u","poids":72,"valeur":161},
{"nom":"aandresen4v","poids":79,"valeur":57},
{"nom":"epearn4w","poids":84,"valeur":121},
{"nom":"gpointing4x","poids":33,"valeur":118},
{"nom":"kgradon4y","poids":98,"valeur":65},
{"nom":"dstrelitz4z","poids":93,"valeur":164},
{"nom":"vtreacher50","poids":69,"valeur":193},
{"nom":"vbartkowiak51","poids":92,"valeur":139},
{"nom":"clagden52","poids":59,"valeur":138},
{"nom":"htrace53","poids":44,"valeur":53},
{"nom":"ocopsey54","poids":49,"valeur":57},
{"nom":"lspary55","poids":61,"valeur":142},
{"nom":"efantonetti56","poids":82,"valeur":103},
{"nom":"crouchy57","poids":55,"valeur":121},
{"nom":"ibentje58","poids":32,"valeur":175},
{"nom":"ccharity59","poids":86,"valeur":102},
{"nom":"ckhomich5a","poids":92,"valeur":160},
{"nom":"lbangs5b","poids":93,"valeur":98},
{"nom":"tscotsbrook5c","poids":74,"valeur":91},
{"nom":"mknutton5d","poids":62,"valeur":153},
{"nom":"etimperley5e","poids":39,"valeur":49},
{"nom":"cfoord5f","poids":52,"valeur":181},
{"nom":"hkorda5g","poids":96,"valeur":175},
{"nom":"jgoor5h","poids":74,"valeur":124},
{"nom":"cmaffey5i","poids":90,"valeur":157},
{"nom":"sfuzzard5j","poids":69,"valeur":49},
{"nom":"mbrickdale5k","poids":72,"valeur":85},
{"nom":"bphipp5l","poids":44,"valeur":82},
{"nom":"kblaxeland5m","poids":64,"valeur":50},
{"nom":"cginnane5n","poids":78,"valeur":136},
{"nom":"jteesdale5o","poids":96,"valeur":99},
{"nom":"tdyshart5p","poids":86,"valeur":198},
{"nom":"wlauritzen5q","poids":66,"valeur":115},
{"nom":"dnorthall5r","poids":67,"valeur":108},
{"nom":"kturfes5s","poids":59,"valeur":114},
{"nom":"kdingate5t","poids":70,"valeur":116},
{"nom":"coliff5u","poids":48,"valeur":169},
{"nom":"lgarment5v","poids":75,"valeur":177},
{"nom":"mshevlin5w","poids":45,"valeur":175},
{"nom":"pwatkins5x","poids":74,"valeur":113},
{"nom":"dbraithwait5y","poids":57,"valeur":100},
{"nom":"gduckit5z","poids":67,"valeur":87},
{"nom":"hwillcot60","poids":72,"valeur":139},
{"nom":"aofergus61","poids":76,"valeur":145},
{"nom":"tkeasey62","poids":61,"valeur":172},
{"nom":"ebrookesbie63","poids":39,"valeur":191},
{"nom":"atilby64","poids":36,"valeur":82},
{"nom":"barne65","poids":84,"valeur":126},
{"nom":"akenchington66","poids":34,"valeur":148},
{"nom":"jkilcullen67","poids":84,"valeur":72},
{"nom":"dgauntlett68","poids":53,"valeur":161},
{"nom":"tdaubeny69","poids":46,"valeur":69},
{"nom":"ejaniszewski6a","poids":48,"valeur":171},
{"nom":"sdunthorn6b","poids":48,"valeur":161},
{"nom":"czmitrichenko6c","poids":62,"valeur":110},
{"nom":"anutbrown6d","poids":78,"valeur":82},
{"nom":"sspinige6e","poids":89,"valeur":157},
{"nom":"soutibridge6f","poids":69,"valeur":198},
{"nom":"lswindlehurst6g","poids":90,"valeur":49},
{"nom":"rblague6h","poids":82,"valeur":71},
{"nom":"mlefevre6i","poids":32,"valeur":75},
{"nom":"cbeamand6j","poids":41,"valeur":176},
{"nom":"vcole6k","poids":38,"valeur":76},
{"nom":"sduckworth6l","poids":57,"valeur":149},
{"nom":"wmuehler6m","poids":40,"valeur":91},
{"nom":"rkeeping6n","poids":74,"valeur":88},
{"nom":"dtapping6o","poids":44,"valeur":110},
{"nom":"mtinniswood6p","poids":59,"valeur":64},
{"nom":"tmacgow6q","poids":91,"valeur":168},
{"nom":"dbodd6r","poids":81,"valeur":70},
{"nom":"kloveguard6s","poids":31,"valeur":183},
{"nom":"rhuffey6t","poids":63,"valeur":103},
{"nom":"hmacallan6u","poids":95,"valeur":88},
{"nom":"ktenbroek6v","poids":69,"valeur":130},
{"nom":"jcharette6w","poids":72,"valeur":171},
{"nom":"zmcimmie6x","poids":55,"valeur":98},
{"nom":"wbarents6y","poids":46,"valeur":114},
{"nom":"mwilder6z","poids":90,"valeur":156},
{"nom":"afilip70","poids":93,"valeur":172},
{"nom":"bsouthcott71","poids":55,"valeur":127},
{"nom":"pstedmond72","poids":88,"valeur":181},
{"nom":"gleedal73","poids":45,"valeur":162},
{"nom":"jmuehle74","poids":60,"valeur":57},
{"nom":"fpenhaligon75","poids":68,"valeur":130},
{"nom":"kconaghy76","poids":74,"valeur":118},
{"nom":"bproschke77","poids":83,"valeur":85},
{"nom":"blope78","poids":97,"valeur":52},
{"nom":"dbrunstan79","poids":55,"valeur":77},
{"nom":"htolley7a","poids":45,"valeur":73},
{"nom":"speto7b","poids":43,"valeur":111},
{"nom":"oinnocenti7c","poids":37,"valeur":200},
{"nom":"blaffranconi7d","poids":66,"valeur":127},
{"nom":"ahaslen7e","poids":58,"valeur":176},
{"nom":"hmazey7f","poids":50,"valeur":189},
{"nom":"rbewlie7g","poids":93,"valeur":114},
{"nom":"bpiccop7h","poids":41,"valeur":146},
{"nom":"egisborne7i","poids":48,"valeur":76},
{"nom":"dwye7j","poids":34,"valeur":159},
{"nom":"kfarnworth7k","poids":31,"valeur":166},
{"nom":"bbale7l","poids":50,"valeur":146},
{"nom":"ubecom7m","poids":59,"valeur":53},
{"nom":"lreedy7n","poids":97,"valeur":137},
{"nom":"tvalenta7o","poids":79,"valeur":141},
{"nom":"gfulford7p","poids":48,"valeur":104},
{"nom":"jcheves7q","poids":37,"valeur":145},
{"nom":"ajakeman7r","poids":41,"valeur":58},
{"nom":"olaffling7s","poids":60,"valeur":62},
{"nom":"sedwicker7t","poids":100,"valeur":155},
{"nom":"wmccaffrey7u","poids":56,"valeur":98},
{"nom":"mvogel7v","poids":31,"valeur":90},
{"nom":"ystolz7w","poids":48,"valeur":85},
{"nom":"usmallacombe7x","poids":75,"valeur":162},
{"nom":"gmattiuzzi7y","poids":78,"valeur":95},
{"nom":"pempleton7z","poids":83,"valeur":51},
{"nom":"psamter80","poids":89,"valeur":189},
{"nom":"bcotesford81","poids":78,"valeur":144},
{"nom":"gjura82","poids":61,"valeur":148},
{"nom":"aspinks83","poids":53,"valeur":152},
{"nom":"mofeeny84","poids":98,"valeur":107},
{"nom":"lfautly85","poids":61,"valeur":170},
{"nom":"cfrostdick86","poids":34,"valeur":147},
{"nom":"dmcwaters87","poids":93,"valeur":47},
{"nom":"kbruton88","poids":96,"valeur":168},
{"nom":"alimbert89","poids":52,"valeur":105},
{"nom":"acapelle8a","poids":55,"valeur":165},
{"nom":"mtrenholm8b","poids":35,"valeur":94},
{"nom":"wreck8c","poids":88,"valeur":102},
{"nom":"ldelacour8d","poids":41,"valeur":48},
{"nom":"kstubs8e","poids":55,"valeur":170},
{"nom":"bbilby8f","poids":99,"valeur":145},
{"nom":"lsimmgen8g","poids":59,"valeur":63},
{"nom":"dsarfatti8h","poids":56,"valeur":81},
{"nom":"jtees8i","poids":59,"valeur":171},
{"nom":"pyurasov8j","poids":36,"valeur":152},
{"nom":"dayce8k","poids":68,"valeur":132},
{"nom":"bokenden8l","poids":71,"valeur":149},
{"nom":"clocal8m","poids":39,"valeur":188},
{"nom":"rdeards8n","poids":42,"valeur":110},
{"nom":"dsawley8o","poids":63,"valeur":121},
{"nom":"rscutts8p","poids":34,"valeur":70},
{"nom":"rdumbell8q","poids":71,"valeur":161},
{"nom":"hwinterscale8r","poids":91,"valeur":103},
{"nom":"gduggan8s","poids":97,"valeur":151},
{"nom":"kshooter8t","poids":65,"valeur":191},
{"nom":"agilardone8u","poids":70,"valeur":70},
{"nom":"fhedlestone8v","poids":85,"valeur":168},
{"nom":"wrunnalls8w","poids":53,"valeur":149},
{"nom":"esommerton8x","poids":92,"valeur":122},
{"nom":"mkarpman8y","poids":46,"valeur":84},
{"nom":"sslafford8z","poids":51,"valeur":158},
{"nom":"aghio90","poids":75,"valeur":171},
{"nom":"bgerriessen91","poids":74,"valeur":163},
{"nom":"gswarbrigg92","poids":94,"valeur":197},
{"nom":"lskentelbury93","poids":51,"valeur":84},
{"nom":"akarlolak94","poids":53,"valeur":99},
{"nom":"bcastells95","poids":85,"valeur":156},
{"nom":"beasbie96","poids":66,"valeur":123},
{"nom":"kvalentinetti97","poids":41,"valeur":142},
{"nom":"rwickrath98","poids":81,"valeur":81},
{"nom":"stoyne99","poids":100,"valeur":153},
{"nom":"bbodega9a","poids":67,"valeur":136},
{"nom":"dlarmuth9b","poids":72,"valeur":75},
{"nom":"mfyers9c","poids":77,"valeur":93},
{"nom":"mbellhouse9d","poids":83,"valeur":115},
{"nom":"cmaclardie9e","poids":40,"valeur":65},
{"nom":"tmorales9f","poids":92,"valeur":198},
{"nom":"ihucquart9g","poids":49,"valeur":137},
{"nom":"lsearchfield9h","poids":93,"valeur":122},
{"nom":"rduetsche9i","poids":68,"valeur":117},
{"nom":"wforrester9j","poids":38,"valeur":140},
{"nom":"emartusewicz9k","poids":73,"valeur":64},
{"nom":"mmacanulty9l","poids":96,"valeur":69},
{"nom":"lgenese9m","poids":41,"valeur":119},
{"nom":"pwatt9n","poids":82,"valeur":192},
{"nom":"kjosum9o","poids":90,"valeur":188},
{"nom":"bcastagneri9p","poids":92,"valeur":57},
{"nom":"hrafter9q","poids":70,"valeur":196},
{"nom":"bfrary9r","poids":57,"valeur":45},
{"nom":"rbridgstock9s","poids":96,"valeur":100},
{"nom":"caxon9t","poids":49,"valeur":195},
{"nom":"dtillett9u","poids":83,"valeur":52},
{"nom":"rwaghorn9v","poids":63,"valeur":86},
{"nom":"gpolendine9w","poids":47,"valeur":88},
{"nom":"jtredwell9x","poids":94,"valeur":82},
{"nom":"adebellis9y","poids":61,"valeur":98},
{"nom":"mkaes9z","poids":84,"valeur":56},
{"nom":"hdeningtona0","poids":80,"valeur":82},
{"nom":"msturgesa1","poids":82,"valeur":195},
{"nom":"bsteelea2","poids":36,"valeur":166},
{"nom":"ctwinbornea3","poids":64,"valeur":180},
{"nom":"gtissingtona4","poids":53,"valeur":166},
{"nom":"dlangelaana5","poids":58,"valeur":134},
{"nom":"selgooda6","poids":32,"valeur":175},
{"nom":"cgallagera7","poids":41,"valeur":116},
{"nom":"ssamesa8","poids":84,"valeur":165},
{"nom":"dedgleya9","poids":44,"valeur":114},
{"nom":"mlauaa","poids":44,"valeur":91},
{"nom":"jlarwayab","poids":50,"valeur":131},
{"nom":"esagarac","poids":53,"valeur":100},
{"nom":"mpresseyad","poids":52,"valeur":59},
{"nom":"mdoolanae","poids":35,"valeur":161},
{"nom":"jkleslaf","poids":88,"valeur":135},
{"nom":"kkeerag","poids":72,"valeur":184},
{"nom":"hkoppsah","poids":86,"valeur":132},
{"nom":"pstuerai","poids":57,"valeur":118},
{"nom":"wyeomansaj","poids":59,"valeur":69},
{"nom":"shunnak","poids":39,"valeur":150},
{"nom":"bwynrahameal","poids":66,"valeur":124},
{"nom":"mdetoileam","poids":82,"valeur":137},
{"nom":"cdarlingtonan","poids":91,"valeur":143},
{"nom":"charcourtao","poids":76,"valeur":110},
{"nom":"acondyap","poids":47,"valeur":153},
{"nom":"nblakemoreaq","poids":54,"valeur":124},
{"nom":"gmcnabar","poids":67,"valeur":123},
{"nom":"hbatrickas","poids":80,"valeur":193},
{"nom":"chubatschat","poids":79,"valeur":154},
{"nom":"ebarkeau","poids":49,"valeur":129},
{"nom":"elouchav","poids":94,"valeur":190},
{"nom":"rlaurentinaw","poids":39,"valeur":131},
{"nom":"ostansallax","poids":71,"valeur":77},
{"nom":"mchettleay","poids":78,"valeur":65},
{"nom":"rmccromleyaz","poids":65,"valeur":92},
{"nom":"sledwardb0","poids":80,"valeur":122},
{"nom":"egarwillb1","poids":99,"valeur":169},
{"nom":"mshepeardb2","poids":79,"valeur":180},
{"nom":"jdaveranb3","poids":87,"valeur":83}]
#à compléter
###Output
_____no_output_____ |
Analysis of Credit Card Defaulters.ipynb | ###Markdown
Table of Contents1 Data Loading and Preprocesssing2 EDA2.1 Univariate Analysis2.2 Bivariate Analysis2.3 Correlation2.4 Building a Profile of a High-Risk Customer2.5 Sources
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
plt.rcParams['figure.figsize'] = [15, 8]
from scipy import stats
import warnings
warnings.filterwarnings('ignore')
###Output
_____no_output_____
###Markdown
Data Loading and Preprocesssing
###Code
df = pd.read_excel("default_credit.xls")
df.head()
df.shape
df.info()
df.describe().T
df.isna().sum().sum()
df.rename(columns={"default payment next month":"default"}, inplace=True)
df.columns = df.columns.str.strip().str.replace(' ', '_').str.lower()
preprocessed_df = df.copy()
## Unique values in each categorical columns
print("Sex", preprocessed_df.sex.unique())
print('Education', preprocessed_df.education.unique())
print('Pay_0', preprocessed_df.pay_0.unique())
print('Marriage', preprocessed_df.marriage.unique())
print('Default', preprocessed_df.default.unique())
###Output
Sex [2 1]
Education [2 1 3 5 4 6 0]
Pay_0 [ 2 -1 0 -2 1 3 4 8 7 5 6]
Marriage [1 2 3 0]
Default [1 0]
###Markdown
- The EDUCATION column has 7 unique values, but as per our data description, we have only 4 unique values, so we are going to club categories 0, 5, and 6 with category 4
###Code
fill = (df.education == 0) | (df.education == 5) | (df.education == 6)
preprocessed_df.loc[fill, 'education'] = 4
df.loc[fill, 'education'] = 4
## marriage column also has 4 unique values but according to our data we only have three categories
fill = (df.marriage == 0)
preprocessed_df.loc[fill, 'marriage'] = 2
df.loc[fill, 'marriage'] = 2
preprocessed_df = preprocessed_df.rename(columns={'pay_0': 'pay_1'})
preprocessed_df.head()
###Output
_____no_output_____
###Markdown
EDA Univariate Analysis
###Code
## Map categorical data
preprocessed_df.sex = preprocessed_df.sex.map({1:'Male', 2:'Female'})
preprocessed_df.default = preprocessed_df.default.map({0:'No', 1:'Yes'})
preprocessed_df.education = preprocessed_df.education.map({1:'Graduate School',
2:'University',
3:'High School',
4:'Others'})
preprocessed_df.marriage = preprocessed_df.marriage.map({1:'Married',
2:'Single',
3:'Divorced'})
def plot_categorical_count(df, col, hue=None, hue_order=None, is_pct=True, figsize=(12,6)):
plt.figure(figsize=figsize)
g = sns.countplot(data=df, x=col, hue=hue, hue_order=None)
for rect in g.patches:
h = rect.get_height()
w = rect.get_width()
x = rect.get_x()
y = rect.get_y()
g.annotate(f"{h}", (x+w/2, h), va='bottom', ha='center', size=14)
g.spines['top'].set_visible(False)
g.spines['left'].set_visible(False)
g.spines['right'].set_visible(False)
plt.show()
if is_pct:
print()
print("Percentage share of each category:")
print(df[col].value_counts(normalize=True)*100)
plot_categorical_count(preprocessed_df, 'default')
plot_categorical_count(preprocessed_df, 'sex')
plot_categorical_count(preprocessed_df, 'education')
plot_categorical_count(preprocessed_df, 'marriage')
###Output
_____no_output_____
###Markdown
Bivariate Analysis
###Code
def default_df(df, col):
df1 = pd.crosstab(index=df[col], columns=df.default, margins=True)
df1.rename(columns={'No':'total_no_default', 'Yes':'total_yes_default', 'All':'total_default'}, inplace=True)
df2 = pd.crosstab(index=df[col], columns=df.default, normalize='index', margins=True)
df2.rename(columns={'No':'pct_no_default', 'Yes':'pct_yes_default'}, inplace=True)
final_df = pd.concat([df1, df2], axis=1)
return final_df
plot_categorical_count(preprocessed_df, col='sex', hue='default', is_pct=False)
default_df(preprocessed_df, 'sex')
###Output
_____no_output_____
###Markdown
- around 24% of male customers have defaulted and around 20% of female customers have defaulted.
###Code
plot_categorical_count(preprocessed_df, col='education', hue='default', is_pct=False)
default_df(preprocessed_df, 'education')
plot_categorical_count(preprocessed_df, col='marriage', hue='default', is_pct=False)
default_df(preprocessed_df, 'marriage')
pd.crosstab(preprocessed_df.pay_1, preprocessed_df.default, margins=True)
###Output
_____no_output_____
###Markdown
- we can see that the maximum count of defaults falls under subcategory 2—that is, a payment delay for the last 2 months. This implies that a customer who has missed payments for 2 continuous months has a high probability of default.
###Code
## Balance Limit
sns.histplot(data=preprocessed_df, x='limit_bal', hue='default',
kde=True, line_kws={'ls':'--', 'lw':2})
plt.show()
sns.boxplot(data=preprocessed_df, x='default', y='limit_bal')
plt.show()
df.groupby('default')['limit_bal'].agg(['mean', 'median', 'std'])
## hypothesis test to check whether average balance for dafaulters and non-defaulters are same
res = stats.ttest_ind(preprocessed_df.limit_bal.loc[preprocessed_df.default=='Yes'],
preprocessed_df.limit_bal.loc[preprocessed_df.default=='No'])
print(f"P-Value: {res[1]:.3f}")
###Output
P-Value: 0.000
###Markdown
- we can infer that customers with higher balances have a lower likelihood of default than customers with lower balance amounts.
###Code
## Age
sns.histplot(data=preprocessed_df, x='age', hue='default',
kde=True, line_kws={'ls':'--', 'lw':2})
plt.show()
sns.boxplot(data=preprocessed_df, x='default', y='age')
plt.show()
## hypothesis test to check whether average age for dafaulters and non-defaulters are same
res = stats.ttest_ind(preprocessed_df.age.loc[preprocessed_df.default=='Yes'],
preprocessed_df.age.loc[preprocessed_df.default=='No'])
print(f"P-Value: {res[1]:.3f}")
age_df = pd.crosstab(preprocessed_df.age, preprocessed_df.default, normalize='index')
plt.bar(x=age_df.index, height=age_df.No, label='Non-Defaulter')
plt.bar(x=age_df.index, height=age_df.Yes, bottom=age_df.No, label='Defaulter')
plt.xticks(ticks=range(20,81))
plt.xlabel("Age")
plt.ylabel("Percentage")
plt.title("Percentage of Defaulters and Non-Defaulters")
plt.legend()
plt.show()
## Pay delays
def display_pay_delays(df, col):
x = pd.crosstab(index=df[col], columns=df.default, normalize='index')
x = x.style.highlight_max(color='orange', axis=1)
return x
# repayment status in September
display_pay_delays(preprocessed_df, 'pay_1')
# repayment status in August
display_pay_delays(preprocessed_df, 'pay_2')
# repayment status in July
display_pay_delays(preprocessed_df, 'pay_3')
## pay amount in September
preprocessed_df.groupby('default')['pay_amt1'].agg(['min', 'max', 'mean', 'median', 'std']).T
###Output
_____no_output_____
###Markdown
- People with high pay amount in month September are less likely to default than people with less pay amount. Correlation
###Code
corr_matrix = df.corr(method='spearman')
plt.figure(figsize=(20,15))
sns.heatmap(corr_matrix, cmap='Pastel1', annot=True, fmt='.2g', mask=np.triu(corr_matrix))
plt.show()
corr_matrix.iloc[:-1, -1].plot.bar(color='orange')
plt.xlabel("Features")
plt.ylabel("Correlation")
plt.title("Correlation of default feature with all other features", fontdict={'size':16})
plt.show()
###Output
_____no_output_____ |
#3pascal_od_api.ipynb | ###Markdown
Imports
###Code
import tensorflow as tf
import tensorflow_hub as hub
import tensorflow_datasets as tfds
tfds.disable_progress_bar()
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('dark_background')
import tensorflow_probability as tfp
###Output
_____no_output_____
###Markdown
PascalVOCIn this kernel we will use imagenette to demo the Object Detection inference.
###Code
train_ds = tfds.load('voc', split='train')
test_ds = tfds.load('voc', split='validation')
def preprocess(element):
image = element["image"]
return image
train_ds = train_ds.map(preprocess).shuffle(64)
test_ds = test_ds.map(preprocess)
for image in train_ds.take(1):
print(image.shape)
###Output
(375, 500, 3)
###Markdown
Object Detection APIHere we will be using the Mask-RCNN model to infer the objects from the images.
###Code
# Apply image detector on a single image.
detector = hub.load("https://tfhub.dev/tensorflow/mask_rcnn/inception_resnet_v2_1024x1024/1")
image = next(iter(train_ds))
plt.imshow(image)
plt.show()
detector_output = detector(image[None, ...])
boxes = detector_output["detection_boxes"]
scores = detector_output["detection_scores"]
# Define categorical distribution
dist = tfp.distributions.Categorical(probs=scores)
# Generate a sample from categorical distribution - this serves as an index
index = dist.sample(4)
boxes = tf.gather(boxes, tf.squeeze(index), axis=1)
boxes.shape
colors = np.array([[1.0, 0.0, 0.0]])
image_bbox = tf.image.draw_bounding_boxes(
tf.cast(image[None, ...], tf.float32)/255., boxes, colors
)
plt.imshow(tf.squeeze(image_bbox))
plt.show()
###Output
_____no_output_____
###Markdown
> We not only infer the bboxes we also crop the objects from the images.
###Code
for box in boxes[0]:
im_height, im_width, _ = image.shape
ymin = box[0]
xmin = box[1]
ymax = box[2]
xmax = box[3]
(xminn, xmaxx, yminn, ymaxx) = int(xmin * im_width), int(xmax * im_width), int(ymin * im_height), int(ymax * im_height)
cropped_image = tf.image.crop_to_bounding_box(
image,
yminn,
xminn,
ymaxx - yminn,
xmaxx - xminn)
crop = tf.image.resize(cropped_image, (256,256))
plt.imshow(crop/255.)
plt.show()
###Output
_____no_output_____ |
skl_regression.ipynb | ###Markdown
本文包括两个ridge L2 regression model,两个模型的因变量分别为:zDEF、zEMOF。主要难点在于报告p值与mse,因为机器学习不太关注p值,因此调用了statsmodels包来解决该问题。此外,本文使用了seaborn包绘制预测图。首先,通过kfold函数准备cross validation然后,使用sklearn包中的linear_model建立回归模型,并返回了p值,r值与mse值----这一切封装在get_rs函数中。此外,main函数将结果转化为更容易观看的dataframe格式最后,进行了绘图,并保存图片。此文的目的在于提供一个参考模板,以便不熟悉sklearn模块的同学使用。因此,其中的参数与函数是非常方便大家自定义的。注意,本文并没有解决permutation test如何运行的问题。
###Code
df = pd.read_csv('df.csv')
df
def kfold(group='fold',X=['zOFG'],y='zDEF'):
for i in pd.unique(df[group]):
X_train = df.loc[df[group]!=i,X]
y_train = df.loc[df[group]!=i,[y]]
X_test = df.loc[df[group]==i,X]
y_test = df.loc[df[group]==i,[y]]
yield (X_train,y_train,X_test,y_test,i)
def get_rs(X_train,y_train,X_test,y_test):
reg = linear_model.Ridge(alpha=.5)
reg.fit(X_train, y_train)
y_pred = reg.predict(X_test)
# rMSE r p
rMSE = metrics.mean_squared_error(y_test, y_pred)
# formula = 'y ~ x1 + x2 + x3'
from scipy.stats import zscore as zsc
rOLS = sm.OLS(zsc(y_test),zsc(y_pred))
rs = rOLS.fit()
r = rs.params
p = rs.pvalues
return rMSE,r[0],p[0],y_pred,y_test
###Output
_____no_output_____
###Markdown
zOFG对zDEF的预测
###Code
def main(dfs,group='fold',X=['zOFG'],y='zDEF'):
rs = {}
pyy = pd.DataFrame(columns=['y_pred','y_test','fold'])
for i,j,k,l,m in kfold(group,X,y):
rMSE,r,p,y_pred,y_test= get_rs(i,j,k,l)
rs.setdefault("MSE", []).append(rMSE)
rs.setdefault("r", []).append(r)
rs.setdefault("p", []).append(p)
temp = pd.DataFrame({'y_pred':y_pred[:,0],'y_test':y_test.values[:,0]})
temp['fold'] = m
pyy = pyy.append(temp)
return pd.DataFrame(rs),pyy
rs,pyy = main(dfs)
rs
np.mean(rs,axis=0)
plt.figure(figsize=(5,12))
#sns.regplot(data=df2,x='y', y='predicted', ci=95)
sns.lmplot(x="y_pred",y="y_test",hue="fold",data=pyy)
plt.xlabel("predicted zOFG")
plt.ylabel("actual zOFG")
plt.savefig(u'Prediction performance.pdf')
###Output
_____no_output_____
###Markdown
zmPFC、zIPL、zBA10对zEMOF的预测
###Code
rs1,pyy1 = main(dfs,'fold',['zmPFC','zIPL','zBA10'],'zDEF')
rs1
np.mean(rs1,axis=0)
plt.figure(figsize=(5,12))
sns.lmplot(x="y_pred",y="y_test",hue="fold",data=pyy1)
plt.xlabel("predicted zEMOF")
plt.ylabel("actual zEMOF")
plt.savefig(u'Prediction performance2.pdf')
###Output
_____no_output_____ |
Python/PythonBasics.ipynb | ###Markdown
Arithmetic Operation
###Code
3 + 5
3 - 5
3 * 6
4 / 3
4 // 3
"Akshay" + 45
"Akshay" + "45"
"Akshay" * 3
###Output
_____no_output_____
###Markdown
Comparison Operator
###Code
45 > 5
5 <= 6
###Output
_____no_output_____
###Markdown
Logical Operators
###Code
0 and 3
3 and 0
1 and 3
1 or 3
3 or 1
###Output
_____no_output_____
###Markdown
Variables and data types
###Code
a = 5
print(a)
b = a
a = 3
print(a)
print(b)
type(a)
type(True)
###Output
_____no_output_____
###Markdown
Conditional Statement
###Code
###Output
_____no_output_____
###Markdown
Question : Check if a number is even
###Code
x = 4
if x % 2 == 0:
print("Even")
else:
print("odd")
###Output
Even
###Markdown
Loop
###Code
for i in range(5):
print("hi")
for i in range(5,10):
print(i)
for i in range(5,10):
if(i % 2 != 0):
print(i)
for i in range(5, 10, 2):
print(i)
###Output
5
7
9
###Markdown
Functions
###Code
def max_number(n1, n2):
if n1 > n2:
return n1
else:
return n2
max_number(5, 76)
###Output
_____no_output_____
###Markdown
List in Python
###Code
list = [1, "Akshay", 2, "Bahadur"]
list
list[1]
list[1:4]
list[-1]
list.append(3)
list
list.extend(["Hello", 7])
list
list.append(["Hello", 7])
list
list.remove("Hello")
list
del list[0]
list
for i in list:
print (i)
###Output
Akshay
2
Bahadur
3
3
7
['Hello', 7]
###Markdown
Dictionary
###Code
dict = {"Ramesh" : 150, "Akshay" : 200}
dict
dict = {"Ramesh" : [150, 120], "Akshay": [0, 100,21]}
dict
dict["Akshay"]
dict["Akshay"].append(1)
dict
dict["Raghav"]=100
dict
dict.update({"Sarthak" : [100, 0], "Umesh" : 12})
dict
del dict["Ramesh"]
dict
###Output
_____no_output_____
###Markdown
Standard Library and modules
###Code
## Standard library -> Developed and can be used effectively by python
## Modules -> which are developed b others and are imported for usage
## Package is a collection of modules
## from Operator.Arithmetic import addition
###Output
_____no_output_____ |
LectureNotebooks/7_SupervisedMachineLearning.ipynb | ###Markdown
Supervised Machine Learning - RegressionSumudu Tennakoon, PhDTo learn more about Python, refeer to the following websites* Python : www.python.org* W3Schools : www.w3schools.com/pythonTo learn more about the Python packages we explore in this notebook, refeer to the following websites* NumPy : www.numpy.org* Matplotlib : www.matplotlib.org* Pandas : https://pandas.pydata.org* Scikit-Learn : https://scikit-learn.org/* Seaborn: https://seaborn.pydata.org/* StatsModel : https://www.statsmodels.org
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
###Output
_____no_output_____
###Markdown
Polynomial Regression Generate Dataset
###Code
x = np.random.normal(0, 1, 25)
print(F"x = {x}")
y = 3.14 + 0.5*x + 2*(x**2) - 1.5*(x**3) + np.random.normal(-1, 1, 25)
print(F"y = {y}")
data = pd.DataFrame(data={"x":x, "y":y})
data
data.plot(x="x", y="y", kind="scatter")
from sklearn.linear_model import LinearRegression
X = data[['x']]
y = data['y']
model = LinearRegression()
model.fit(X, y)
y_pred = model.predict(X)
data['y_pred'] = y_pred
data.head()
f,ax0 = plt.subplots()
#ax1 = ax0.twinx()
data.plot(x='x', y='y', kind='scatter', ax=ax0)
data.plot(x='x', y='y_pred', kind='line', c='red', ax=ax0)
plt.show()
from sklearn.preprocessing import PolynomialFeatures
polynomial_features= PolynomialFeatures(degree=3)
data[['x0', 'x1', 'x2', 'x3']] = polynomial_features.fit_transform(data[["x"]])
data[['x', 'x0', 'x1', 'x2', 'x3', 'y']].head()
X = data[['x0', 'x1', 'x2', 'x3']]
y = data['y']
model = LinearRegression()
model.fit(X, y)
y_pred = model.predict(X)
data['y_pred'] = y_pred
data[['x0', 'x1', 'x2', 'x3', 'y', 'y_pred']] .head()
f,ax0 = plt.subplots()
#ax1 = ax0.twinx()
data.plot(x='x', y='y', kind='scatter', ax=ax0)
data.plot(x='x', y='y_pred', kind='line', c='red', ax=ax0)
plt.show()
f,ax0 = plt.subplots()
#ax1 = ax0.twinx()
data.plot(x='x', y='y', kind='scatter', ax=ax0)
data.sort_values(by='x').plot(x='x', y='y_pred', kind='line', c='red', ax=ax0) # sort dstapoints
plt.show()
from sklearn.metrics import mean_squared_error, r2_score
# Evaluate
mse = mean_squared_error(data['y'], data['y_pred'])
print("MSE:", mse)
rmse = np.sqrt(mse)
print("RMSE:", rmse)
r2 = r2_score(data['y'], data['y_pred'])
print("R2:", r2)
# Degree = 2
polynomial_features= PolynomialFeatures(degree=2)
data[['x0', 'x1', 'x2']] = polynomial_features.fit_transform(data[["x"]])
X = data[['x0', 'x1', 'x2']]
y = data['y']
model = LinearRegression()
model.fit(X, y)
y_pred = model.predict(X)
data['y_pred_d2'] = y_pred
f,ax0 = plt.subplots()
#ax1 = ax0.twinx()
data.plot(x='x', y='y', kind='scatter', ax=ax0)
data.sort_values(by='x').plot(x='x', y='y_pred_d2', kind='line', c='red', ax=ax0) # sort dstapoints
plt.show()
# Evaluate
mse = mean_squared_error(data['y'], data['y_pred_d2'])
print("MSE:", mse)
rmse = np.sqrt(mse)
print("RMSE:", rmse)
r2 = r2_score(data['y'], data['y_pred_d2'])
print("R2:", r2)
# Degree = 10
polynomial_features= PolynomialFeatures(degree=10)
data[['x0', 'x1', 'x2', 'x3', 'x4', 'x5', 'x6', 'x7', 'x8', 'x9', 'x10']] = polynomial_features.fit_transform(data[["x"]])
X = data[['x0', 'x1', 'x2', 'x3', 'x4', 'x5', 'x6', 'x7', 'x8', 'x9', 'x10']]
y = data['y']
model = LinearRegression()
model.fit(X, y)
y_pred = model.predict(X)
data['y_pred_d10'] = y_pred
f,ax0 = plt.subplots()
#ax1 = ax0.twinx()
data.plot(x='x', y='y', kind='scatter', ax=ax0)
data.sort_values(by='x').plot(x='x', y='y_pred_d10', kind='line', c='red', ax=ax0) # sort dstapoints
plt.show()
# Evaluate
mse = mean_squared_error(data['y'], data['y_pred_d10'])
print("MSE:", mse)
rmse = np.sqrt(mse)
print("RMSE:", rmse)
r2 = r2_score(data['y'], data['y_pred'])
print("R2:", r2)
data[['x', 'y', 'y_pred', 'y_pred_d2', 'y_pred_d10']].head()
###Output
_____no_output_____
###Markdown
Overfitting and Underfitting
###Code
f,ax0 = plt.subplots()
#ax1 = ax0.twinx()
data.plot(x='x', y='y', kind='scatter', ax=ax0)
data.sort_values(by='x').plot(x='x', y='y_pred_d2', kind='line', c='red', ax=ax0)
data.sort_values(by='x').plot(x='x', y='y_pred_d10', kind='line', c='magenta', ax=ax0)
data.sort_values(by='x').plot(x='x', y='y_pred', kind='line', c='green', linewidth=2, ax=ax0)
###Output
_____no_output_____
###Markdown
Linear Regression
###Code
file_name = 'https://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/winequality-red.csv'
# Load CSV File
data = pd.read_csv(file_name, sep=';')
data.sample(20)
data.info()
data.describe(include='all').transpose()
data['id'] = data.index+1
data.head()
data.columns
data = data[[ 'id', 'fixed acidity', 'volatile acidity', 'citric acid', 'residual sugar',
'chlorides', 'free sulfur dioxide', 'total sulfur dioxide', 'density',
'pH', 'sulphates', 'alcohol', 'quality']]
data.head()
sns.pairplot(data[[ 'fixed acidity', 'volatile acidity', 'citric acid', 'residual sugar',
'chlorides', 'free sulfur dioxide', 'total sulfur dioxide', 'density',
'pH', 'sulphates', 'alcohol', 'quality']])
plt.show()
correlation_matrix = data[[ 'fixed acidity', 'volatile acidity', 'citric acid', 'residual sugar',
'chlorides', 'free sulfur dioxide', 'total sulfur dioxide', 'density',
'pH', 'sulphates', 'alcohol', 'quality']].corr()
correlation_matrix
sns.heatmap(correlation_matrix.abs())
from sklearn import linear_model
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.model_selection import train_test_split
X = [ 'fixed acidity', 'volatile acidity', 'citric acid', 'residual sugar',
'chlorides', 'free sulfur dioxide', 'total sulfur dioxide', 'density',
'pH', 'sulphates', 'alcohol'
]
y = ['quality']
X_train, X_test, y_train, y_test = train_test_split(data[X], data[y], test_size=0.3, random_state=42)
X_train.head()
y_train.head()
model = linear_model.LinearRegression()
print(model)
y_actual = 'quality'
y_predict = 'prected_quality'
correlation_matrix[y_actual].sort_values()
# Seelct variables
X = ['alcohol']
#
# Fit
model.fit(X_train[X], y_train[y_actual])
#Find model parameters
coefficients = model.coef_
intercept = model.intercept_
print(pd.DataFrame(data={'features':X, 'coefficients':coefficients}))
print('\n') # Add new line to print
print(F"Intercept = {intercept}")
result = y_test
result[y_predict] = model.predict(X_test[X])
result['abs_difference'] = (result[y_actual] - result[y_predict]).abs()
result[[y_actual, y_predict, 'abs_difference']]
result['abs_difference'].describe()
mse = mean_squared_error(result[y_actual], result[y_predict])
print("MSE:", mse)
rmse = np.sqrt(mse)
print("RMSE:", rmse)
r2 = r2_score(result[y_actual], result[y_predict])
print("R2:", r2)
###Output
_____no_output_____
###Markdown
2nd Iteration
###Code
y_actual = 'quality'
y_predict = 'prected_quality'
correlation_matrix[y_actual].sort_values()
# Seelct variables
X = ['alcohol', 'volatile acidity']
# Fit
model.fit(X_train[X], y_train[y_actual])
#Find model parameters
coefficients = model.coef_
intercept = model.intercept_
print(pd.DataFrame(data={'features':X, 'coefficients':coefficients}))
print('\n') # Add new line to print
print(F"Intercept = {intercept}")
result = y_test
result[y_predict] = model.predict(X_test[X])
mse = mean_squared_error(result[y_actual], result[y_predict])
print("MSE:", mse)
rmse = np.sqrt(mse)
print("RMSE:", rmse)
r2 = r2_score(result[y_actual], result[y_predict])
print("R2:", r2)
###Output
_____no_output_____
###Markdown
3rd Iteration
###Code
y_actual = 'quality'
y_predict = 'prected_quality'
correlation_matrix[y_actual].abs().sort_values()
correlation_matrix[y_actual].abs().sort_values().index
# Seelct variables
X = ['alcohol', 'volatile acidity', 'sulphates']
# Fit
model.fit(X_train[X], y_train[y_actual])
#Find model parameters
coefficients = model.coef_
intercept = model.intercept_
print(pd.DataFrame(data={'features':X, 'coefficients':coefficients}))
print('\n') # Add new line to print
print(F"Intercept = {intercept}")
result = y_test
result[y_predict] = model.predict(X_test[X])
mse = mean_squared_error(result[y_actual], result[y_predict])
print("MSE:", mse)
rmse = np.sqrt(mse)
print("RMSE:", rmse)
r2 = r2_score(result[y_actual], result[y_predict])
print("R2:", r2)
# Seelct variables
X = ['fixed acidity',
'chlorides', 'density', 'total sulfur dioxide', 'citric acid',
'sulphates', 'volatile acidity', 'alcohol']
# Fit
model.fit(X_train[X], y_train[y_actual])
#Find model parameters
coefficients = model.coef_
intercept = model.intercept_
print(pd.DataFrame(data={'features':X, 'coefficients':coefficients}))
print('\n') # Add new line to print
print(F"Intercept = {intercept}")
result = y_test
result[y_predict] = model.predict(X_test[X])
mse = mean_squared_error(result[y_actual], result[y_predict])
print("MSE:", mse)
rmse = np.sqrt(mse)
print("RMSE:", rmse)
r2 = r2_score(result[y_actual], result[y_predict])
print("R2:", r2)
###Output
_____no_output_____
###Markdown
Normalization
###Code
for column in X_train.columns:
print(F"min({column}): {X_train[column].min()}")
print(F"max({column}): {X_train[column].max()}")
X_train[column] = ( X_train[column] - X_train[column].min() ) / ( X_train[column].max() - X_train[column].min() )
for column in X_test.columns:
print(F"min({column}): {X_test[column].min()}")
print(F"max({column}): {X_test[column].max()}")
X_test[column] = ( X_test[column] - X_test[column].min() ) / ( X_test[column].max() - X_test[column].min() )
# Seelct variables
X = X_train.columns
# Fit
model.fit(X_train[X], y_train[y_actual])
#Find model parameters
coefficients = model.coef_
intercept = model.intercept_
print(pd.DataFrame(data={'features':X, 'coefficients':coefficients}))
print('\n') # Add new line to print
print(F"Intercept = {intercept}")
result = y_test
result[y_predict] = model.predict(X_test[X])
mse = mean_squared_error(result[y_actual], result[y_predict])
print("MSE:", mse)
rmse = np.sqrt(mse)
print("RMSE:", rmse)
r2 = r2_score(result[y_actual], result[y_predict])
print("R2:", r2)
###Output
_____no_output_____
###Markdown
Last update 2021-10-16 by Sumudu Tennakoon
###Code
###Output
_____no_output_____ |
week1/notebooks/Day-1-Introduction-to-numpy.ipynb | ###Markdown
Week 0 `Numpy`, Scientific and numeric library `Numerical Python` popularly known as `Numpy` is core library for scientific computing. It provides high performance multi-dimensional array object and tools for working with these objects. In the following sections, we will be working on the `Numpy` library and its uses. We will compare the performance of some `Numpy` operations with their classic `Python` equivalent and discuss the uses of the library. Let us begin! 1. Comparing `Python` List vs `Numpy` array (This section is optional and can be skipped) ***You DON'T need to memorize this code - it is just to give you a comparison to know why NumPy is better. Reason: It could be overwhelming sometimes as a beginner so don't hesitate to simply skip section 1 and jump to section 2. However, please ensure you read the conclusion*** Consider the following snippet adapted from https://webcourses.ucf.edu/courses/. We start importing the `numpy` and `timeit` libraries. `Numpy`, as we have stated, enables scientific computing and high performance tasks executions with multi-dimensional or n-dimensional arrays. The `Timer` module from `timeit` library is used for the purpose of calculating the time that certain scripts take.
###Code
import numpy as np
from timeit import Timer
###Output
_____no_output_____
###Markdown
With imported libraries, we proceed to create two example arrays with numbers in the range [0, 9999]:
###Code
size_of_vec = 10000
X_list = range(size_of_vec)
Y_list = range(size_of_vec)
X = np.arange(size_of_vec)
Y = np.arange(size_of_vec)
###Output
_____no_output_____
###Markdown
To compare the performance of two scripts, one explicitly developed with `Python` and another with `Numpy`, we defined two user functions:
###Code
def pure_python_version(): #Explicitly Python based with lists
Z = []
for i in range(len(X_list)):
Z.append(X_list[i] + Y_list[i])
def numpy_version(): #Explicitly Numpy based with vectorization
Z = X + Y
###Output
_____no_output_____
###Markdown
We call the developed functions and measure the time it takes to execute them once:
###Code
timer_obj1 = Timer("pure_python_version()",
"from __main__ import pure_python_version")
timer_obj2 = Timer("numpy_version()",
"from __main__ import numpy_version")
print("Pure python version:",timer_obj1.timeit(10))
print("Numpy version:",timer_obj2.timeit(10))
###Output
Pure python version: 0.11837320000086038
Numpy version: 0.0002834999995684484
###Markdown
As we can see, the vectorized sum approach with `Numpy` is much faster than the purely `Python` based approach. 2.Creating `Numpy` array from `Python` list Let's start by creating a `numpy` array from a predefined list with te values 165, 170, 171, 180, 189, and 178:
###Code
heights = [165, 170, 171, 180, 189, 178]
print(type(heights))
heights_np = np.array(heights)
print(type(heights_np))
###Output
<class 'list'>
<class 'numpy.ndarray'>
###Markdown
The type of object defined at first is a `list`. After conversion with the `.array()` function, the object is converted into a `ndarray` object. This means that we have created an array of multiple dimensions (`n` dimensions) from a `list`. In this case, the `numpy` array is a **one-dimensional array**.Now let's see how to create a **two-dimensional array**:
###Code
weights = np.array([[50, 45, 56, 78],[78, 89, 59, 90],[89, 78, 69, 70],[67, 69, 89, 70],[90,89, 80, 84],[89, 59, 90, 78]])
print(weights)
###Output
[[50 45 56 78]
[78 89 59 90]
[89 78 69 70]
[67 69 89 70]
[90 89 80 84]
[89 59 90 78]]
###Markdown
3. Exploring some of the key attributes of `ndarray` objects Multidimensional arrays have the following important attributes:- `ndim`: number of dimensions of the array- `shape`: shape of the array in the format `(number_rows, number_columns)`- `size`: total number of elements- `dtypes`: type of data stored in the array- `strides`: number of bytes that must be moved to store each row and column in memory, in the format `(number_bytes_files, number_bytes_columns)`Let's see an example:
###Code
print("dimension:", weights.ndim)
print("shape:", weights.shape)
print("size:", weights.size)
print("dtype:", weights.dtype)
print("strides:", weights.strides)
###Output
dimension: 2
shape: (6, 4)
size: 24
dtype: int32
strides: (16, 4)
###Markdown
The exemplified arrangement has:- `ndim` of 2 because it is a two-dimensional array.- `shape` of (6, 4) as it is made up of 6 rows and 4 columns.- `size` of 24 since it has 24 elements in total, 6 elements per column or what is the same, 4 elements per row.- int32 `dtypes` because each element of the array is a 32-bit (4-byte) integer- `strides` of (16, 4) since 16 bytes (4 integers of 4 bytes in the rows) are needed to store each row in memory and 4 bytes (1 integer per column) to store each column in memory. Exercise 1Convert the two-dimensional `ndarray` `weights` into a three-dimensional object without changing its shape.
###Code
weights.reshape(1, 6, 4)
###Output
_____no_output_____
###Markdown
4. Exploring some of the key functions defined for `numpy` arrays In this section we are going to explore some of the most important functions of `numpy` arrays:1. `zeros(shape=(n,m))`: Allows to create a zero-array with the shape (`n` rows, `m` columns)2. `arange(start=i, stop=j, step=u)`: creates a one-dimensional array whose first value is `i` inclusive, last value is `j` exclusive, and each value varies `s` steps from the previous.3. `linspace(start=i, stop=j, num=n)`: creates a one-dimensional array whose first value is `i` inclusive, last value is `j` inclusive and contains `n` values in total. Each value differs from the previous one with the same magnitude that differs from the next.4. `full(shape=(n,m), fill_value=f)`: Allows to create an array with the shape (`n` rows,` m` columns), where all positions have the value `f`.Let's delve into each of them: 4.1. np.zeros() The `zeros(shape=(n,m), dtypes)` function creates a zero-array with the shape (`n` rows, `m` columns) and data types of `dtypes`:
###Code
x = np.zeros(shape=(3,5), dtype ="int32")
print(x)
###Output
[[0 0 0 0 0]
[0 0 0 0 0]
[0 0 0 0 0]]
###Markdown
As you can notice, we have created a two-dimensional array of zeros of three rows and five columns where each element is a 32-bit integer. 4.2. np.arange() The `arange(start=i, stop=j, step=u)` function creates a one-dimensional array whose first value is `i` inclusive, last value is `j` exclusive, and each value varies `s` steps from the previous:
###Code
x = np.arange(start=100, stop=1000, step=100, dtype="int32")
print(x)
###Output
[100 200 300 400 500 600 700 800 900]
###Markdown
This function has allowed us to create a one-dimensional array that starts at 100, ends at 1000 (exclusive) and progresses from 100 to 100, with 32-bit integer values. 4.3. np.linspace() The `linspace(start=i, stop=j, num=n)` function creates a one-dimensional array whose first value is `i` inclusive, last value is `j` inclusive and contains `n` values in total. Each value differs from the previous one with the same magnitude that differs from the next. Consider the following example:
###Code
x_lin = np.linspace(start=10, stop=50, num=30)
print(x_lin)
###Output
[10. 11.37931034 12.75862069 14.13793103 15.51724138 16.89655172
18.27586207 19.65517241 21.03448276 22.4137931 23.79310345 25.17241379
26.55172414 27.93103448 29.31034483 30.68965517 32.06896552 33.44827586
34.82758621 36.20689655 37.5862069 38.96551724 40.34482759 41.72413793
43.10344828 44.48275862 45.86206897 47.24137931 48.62068966 50. ]
###Markdown
We have created a one-dimensional array that varies linearly from 10 to 50 inclusive, for a total of 30 floating numbers. 4.4. np.full() The `full(shape=(n,m), fill_value=f)` function allows to create an array with the shape (`n` rows,` m` columns), where all positions have the value `f`.
###Code
x_ful = np.full(shape=(5,6), fill_value=3)
print(x_ful)
###Output
[[3 3 3 3 3 3]
[3 3 3 3 3 3]
[3 3 3 3 3 3]
[3 3 3 3 3 3]
[3 3 3 3 3 3]]
###Markdown
We see that a two-dimensional array of 5 rows and 6 columns has been created, all with a value of 3 at their positions. 5. Exploring additional attributes and functions Let's review three additional functions: `.reshape()`, `.flatten()` and `.ravel()`. 5.1. Reshaping the array Let's reshape the `weights` numpy array. First take a look to the contant and the shape of `weights`:
###Code
weights
weights.shape
###Output
_____no_output_____
###Markdown
The reshaping procedure is done using the `.reshape((n1,m1))` function, which receives as input parameters a tuple of two values `n1` and `m1` that is, the new shape of the array to be created from the original array:
###Code
weights = weights.reshape((4,6))
weights
weights.shape
###Output
_____no_output_____
###Markdown
Can you see the difference? We have changed the shape of the `weights` array, from 6 rows and 4 columns, to 4 rows and 6 columns. The way the values are distributed in the new array is from left-to-right then top-to-bottom. Let's add a new dimension through reshaping the current `weights` array:
###Code
weights = weights.reshape((2,6,2))
weights
weights.shape
###Output
_____no_output_____
###Markdown
Now, the array is three-dimensionally estructured. Two bi-dimensional (2D) arrays conform the new array, and each of the 2D arrays have two rows and six columns. 5.2. Flattening the array The `.flatten()` function returns a copy of an array collapsed into one dimension, no matter how many dimensions the array has. Consider the `weights` two-dimensional array:
###Code
weights
###Output
_____no_output_____
###Markdown
If we flatten the array, we are re-organizing their elements in a one-dimensional array, as follows:
###Code
weights_flattened = weights.flatten()
weights_flattened
###Output
_____no_output_____
###Markdown
5.3. Raveling the array The `.ravel()` function returns a flattened view of an array collapsed into one dimension. It works identically to the `.flatten()` function, although a copy in memory is not achieve, just a flatten view of the final result. Consider the `weights` two-dimensional array:
###Code
weights_raveld = weights.ravel()
weights_raveld
###Output
_____no_output_____
###Markdown
Some key differences between flattening and raveling the array are:- `ravel()` function simply returns a flattened view of Numpy array. If you try to modify this view, you will end up with that same changes in the original array. `flatten()` function returns a flattened copy in memory of the array, so that new changes can be made without affecting the original array.- `ravel()` does not occupy memory, being faster than `flatten()`, which occupies memory when copying the flattened objects. Exercise 2Create an array of 51 elements starting at 100 and ending at 500, using the two functions `np.linspace()` and `np.arange()`. Arrays must have the same content, with the names `array_lin` and `array_ara`, respectively. Verify that the arrays have the same content with the `np.array_equal()` function.
###Code
array_lin = np.arange(100, 501, 8)
array_ara = np.linspace(100, 500, 51)
print(array_lin)
print(array_ara)
np.array_equal(array_lin, array_ara)
###Output
_____no_output_____
###Markdown
6.Array indexing To access the content of an array we can use indexing through brackets `[ ]`. When using the brackets, we can access the elements on the list by: 1. Using a **positive single index** starting from 02. Using a **negative single index** starting from -13. Using **positive index intervals** using the `start:end:step` notation to specify starting and ending values as well as the step.4. Using **negative index intervals** using the `start:end:step` notation to specify negative index starting and ending values as well as the step.Occasionally, we can:- Get ride of the the `step` value as `sart:end`, so that by default we slice the data with a `step` of 1. - Get ride of the `start` value as `:fin:step`, and hence our `start` index will be 0, by default.- Omit the `end` value as `start::step`, specifying the final position as the `end` index by default. - Specify the range as `::step`, and hence the `start` position will be 0 and the end position will be the last one of the array.Let's see how indexing works using the `weights` and `heights_np` arrays:
###Code
weights
weights_or = weights.reshape((6,4))
weights_or
heights_np
###Output
_____no_output_____
###Markdown
6.1 Using a positive single index When using positive indexing, it is important to consider the first position of the array to be 0:
###Code
print("Accessing single element in 1D array:", heights_np[2])
print("Accessing single element in 2D array:", weights_or[1][3])
heights_np[7]
###Output
_____no_output_____
###Markdown
**Why are we getting this error message?**Well guessed! It is because position 7 does not exist in the `heights_np` array, it is totally out of the bounds defined by the array's shape and size. The array has 6 elements, the last element being in position 5. 6.2 Using a negative single index starting When using negative indexing, it is important to consider the last position of the array to be -1:
###Code
print("Accessing single element in 1D array:", heights_np[-4])
print("Accessing single element in 2D array:", weights_or[-5][-1])
heights_np[-8]
###Output
_____no_output_____
###Markdown
**Why are we getting this error message again?**Well guessed! It is because position -8 does not exist in the `heights_np` array, it is totally out of the bounds defined by the array's shape and size. The array has 6 elements, the last element being in position -1 and the first element being in position -6. 6.3 Using positive index intervals When using positive interval indexing `start:end:step`, the starting value is inclusive and the ending value is exlusive. Here are somoe examples:
###Code
heights_np[:2] # The default start value is 0
heights_np[2:] # The default end value is the last value of the array
heights_np[2:3] # The ending value is exlusive
weights[:2, ::2]
weights[:3, 3::]
weights[:3, :3, :1]
###Output
_____no_output_____
###Markdown
6.4 Using negative index intervals When using positive interval indexing `start:end:step`, the negative starting value is inclusive and the negative ending value is exlusive. Here are somoe examples:
###Code
heights_np[:-4] # Equivalent to heights_np[:2]
heights_np[-4:] # Equivalent to heights_np[2:]
heights_np[-4:-3] # Equivalent to heights_np[2:3]
weights[:2, -3::] # Equivalent to weights[:3, 3::]
weights[:3, :-3, :-1] # Equivalent to weights[:3, :3, :1]
###Output
_____no_output_____
###Markdown
Exercise 3Consider the `weights_or` array:1. Select all the values that are in the even positions in the rows and in the odd positions in the columns. Create a new array named `weights_custom1` with these values.2. Express the `weights_custom1` array flattened with an in-memory copy. Call the new array `weights_custom2`.3. Select items in positions 2 to 4 inclusive with negative indexing. Name the output array as `weights_custom3`.
###Code
weights_or
# Answer #1
weights_custom1 = weights_or[1::2, 0::2]
# Answer #2
weights_custom2 = weights_custom1.flatten()
print(weights_custom2)
# Answer #3
weights_custom3 = weights_custom2[-5:-2]
print(weights_custom3)
###Output
[59 67 89]
###Markdown
7.Manipulating `Numpy` arrays Arrays can be manipulated using arithmetic, logical, or relational operations in an element-wise way. Let's see how these operations are applied, using our arrays `weights_or`, `heights_np`, and and some other arrays that we will create. 7.1. Arithmetic operations We are going to operate the content of the arrays with the four traditional arithmetic operations, addition, subtraction, product, and division. Let's first define our arrays again:
###Code
weights_or
heights_2 = np.array([165, 175, 180, 189, 187, 186])
print('heights_np:', heights_np)
print('heights_2: ', heights_2)
###Output
heights_np: [165 170 171 180 189 178]
heights_2: [165 175 180 189 187 186]
###Markdown
Let's add the content of the two arrays element-wise:
###Code
heights_add = heights_np + heights_2
heights_add
###Output
_____no_output_____
###Markdown
The `np.add()` function allows adding the content of arrays element-wise:
###Code
added = np.add(heights_2, heights_np)
added
###Output
_____no_output_____
###Markdown
Exercise 4Since we have seen how to add element-wise elements of one-dimensional arrays:1. Calculate the subtraction, multiplication and division between the `heights_np` and `heights_2` arrays. Compare the results by using the functions: `np.subtract()`, `np.multiply()`, and `np.divide()`.2. Calculate the product element-wise of the multiplicative inverses ($1/x$) between the arrays `heights_np` and `heights_2`, using numpy functions. For instante, if an element in `heights_np` $x1=5$ and an element in `heights_2` $y1=4$, then the result should be $z=(1/x1)*(1/y1)=1/20=0.05$.
###Code
# Answer substraction
np.subtract(heights_np, heights_2)
heights_np - heights_2
# Answer multiplication
# Answer division
np.divide(1,heights_np) * np.divide(1, heights_2)
# Answer multiplicative inverse
###Output
_____no_output_____
###Markdown
7.2. Logical operations Logical operations are mathematical expressions whose result is a Boolean value of 0 (False) or 1 (True). Among the most common logical operations are the disjunction `or`, conjunction `and`, and negation `not`operations, among others. Let's see an example:
###Code
x = np.array([True, True, False, False])
y = np.array([True, False, True, False])
np.logical_or(x,y)
np.logical_and(x,y)
np.logical_not(x)
###Output
_____no_output_____
###Markdown
7.3.Comparison - Relational operators The comparison operators allow us to compare the values of the content of `numpy` arrays element-wise. Some operators of interest (a) equality operator `np.equal()`, (b) less than operator `np.less()`/`np.less_equal()`, (c) greater than operator `np.greater()`/`np.greater_equal()`, and (d) difference operator `np.not_equal()`. It is important to note that the output will always be Boolean 0 (False) or 1 (True).Let's see some examples:
###Code
x = np.array([1, 8, 3, 7, 3, 21])
y = np.array([4, 8, 1, 7, 6, 9])
np.equal(x,y)
np.not_equal(x,y)
np.less_equal(x,y)
np.greater_equal(x,y)
np.array_equal(x,y) # Comparing the entire content of both arrays
x = np.array([1, 8, 3, 7, 3, 21])
y = np.array(list((1, 8, 3, 7, 3, 21)))
np.array_equal(x,y) # Comparing the entire content of both arrays
###Output
_____no_output_____
###Markdown
8. Broadcasting `numpy` has the ability of operating arrays of different shapes during arithmetic operations using **broadcasting**, so that arithmetic operations on arrays are done on corresponding elements. The boradcasting operation replicates one of the arrays along the dimensions the other, if there is a mismatch of shapes. Consider the following arrays:
###Code
heights_np = heights_np.reshape((6,1))
heights_np
weights
###Output
_____no_output_____
###Markdown
We are going to add the elements of both arrays:
###Code
broad_np = heights_np + weights
broad_np
###Output
_____no_output_____
###Markdown
Although the arrays have different dimensions, `numpy` makes a sum for the corresponding elements in common rows and columns, in such a way that the elements of the column vector `heights_np` are added with each column of the two-dimensional vector `weights`.Let's look at one more example:
###Code
x = np.ones((3,4))
y = np.random.random((5,1,4))
x
y
z = x + y
z
###Output
_____no_output_____
###Markdown
Here each row in array `y` has been paired with rows in array `x`, since they have the same amount of column. The result is an array with triple the rows of array `y` and the same number of columns. Exercise 5Propose an array `y` such that the operation $x + y$ results in the array `z`.```x = [[14, 15, 18], [62, 90, 98], [71, 73, 90], [40, 24, 17], [11, 81, 14], [26, 81, 31]]z = [[24, 40, 58], [72, 115, 138], [81, 98, 130], [50, 49, 57], [21, 106, 54], [36, 106, 71]]```
###Code
y = np.array([10, 25, 40])
x = np.array([[14, 15, 18],
[62, 90, 98],
[71, 73, 90],
[40, 24, 17],
[11, 81, 14],
[26, 81, 31]])
y + x
###Output
_____no_output_____
###Markdown
9.Matrix multiplication Let's delve into the element-wise and dot product multiplication between matrices (two-dimensional arrays). First, we define two matrices:
###Code
A = np.array([[1,1,8],[0,1,9],[9,0,8]])
print("Matrix A:\n", A, '\n')
B = np.array([[2,0,0],[3,4,9],[7,8,9]])
print('MATRIX B:\n', B, '\n')
###Output
Matrix A:
[[1 1 8]
[0 1 9]
[9 0 8]]
MATRIX B:
[[2 0 0]
[3 4 9]
[7 8 9]]
###Markdown
The product between the two matrices can be executed with the classic arithmetic operator `*`:
###Code
print("Element wise multiplication:\n", A*B, '\n')
###Output
Element wise multiplication:
[[ 2 0 0]
[ 0 4 81]
[63 0 72]]
###Markdown
The dot product of matrices can be executed with the `@` operator or with the numpy `np.dot()` function:
###Code
print("Matrix product:\n", A@B, '\n') # matrix A = (2 ,3) , matrix B= (3,4), output matrix =( 2,4)
print("Dot product:\n", A.dot(B), '\n')
###Output
Matrix product:
[[61 68 81]
[66 76 90]
[74 64 72]]
Dot product:
[[61 68 81]
[66 76 90]
[74 64 72]]
###Markdown
10. Arrays with `random` numbers A random number is a result of a variable combination specified by a distribution function. When no distribution is specified, it is assumed that the continuous uniform distribution in the interval [0,1) is used. Some functions for generating random numpy numbers are:- `np.random.random()`: returns random floats in the half-open interval [0.0, 1.0)- `np.random.randint(low, high)`: returns random integers from low (inclusive) to high (exclusive).- `np.random.normal()`: returns random samples from a normal (Gaussian) distribution.Let's see some examples:
###Code
np.random.random((4,3))
np.random.randint(10, 20, size=(2, 4))
np.random.normal(size=10)
###Output
_____no_output_____
###Markdown
Ee can also specify a `seed`, so that the sequence of random numbers is repeatable (if you execute the following code several times, you will get the same random results):
###Code
from numpy.random import seed
from numpy.random import rand
# Seed random number generator
seed(42)
# Generate random numbers between 0-1
values = rand(10)
print(values)
###Output
[0.37454012 0.95071431 0.73199394 0.59865848 0.15601864 0.15599452
0.05808361 0.86617615 0.60111501 0.70807258]
###Markdown
11. Concatenate, and stack `Numpy` arrays The **concatenation** `np.concatenate()` is a process of joining several arrays to form one, on the same axis. **Stacking** `np.stack()` is a process of joining arrays on a new axis. Let's dive a little bit more on this concepts with practical examples:
###Code
my_array = np.array([1,2,34,5])
x = np.array([1,4,5,6])
print('x: \t ', x)
print('my_array: ', my_array)
print('Append:\n',np.append(my_array,x))
y = np.append(my_array, x)
# Concatentate `my_array` and `x`
print('\nConcatenate:\n',np.concatenate((my_array,x)))
# Stack arrays vertically (row-wise)
print("Stack row wise:")
print(np.vstack((my_array, x)))
# Stack arrays horizontally
print("Stack horizantally:")
print(np.hstack((my_array,x)))
print("\nAnother way:")
print(np.r_[my_array,x])
# Stack arrays column-wise
print("Stack column wise:")
print(np.column_stack(( my_array,x)))
print("\nColumn wise repeat:")
print(np.c_[ my_array,x])
###Output
Stack column wise:
[[ 1 1]
[ 2 4]
[34 5]
[ 5 6]]
Column wise repeat:
[[ 1 1]
[ 2 4]
[34 5]
[ 5 6]]
###Markdown
As you have seen, when we concatenate the arrays, we do it on the same axis. When stacking arrays, we do it on a new axis. 12. Visualize `Numpy` array To visualize the content of a numpy array we can make use of the `matplotlib.pyplot` library, which allows us to visualize data and its distributions, among many other functions.
###Code
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Let's specify an initial state for the Mersenne Twister number generator, a pseudo-random number generator:
###Code
rng = np.random.RandomState(10)
###Output
_____no_output_____
###Markdown
Now we generate random values of two normal distributions with different mean and standard deviation, a distribution of mean 0 and another of mean 5, stacking them in a single array horizontally:
###Code
a = np.hstack((rng.normal(size=1000),rng.normal(loc=5, scale=2, size=1000)))
a
###Output
_____no_output_____
###Markdown
Let's visualize the data of the number arrangement of the two distributions, in a histogram with the help of the library `matplotlib.pyplot` to which we have aliased `plt`:
###Code
plt.hist(a, bins='auto')
plt.title("Histogram")
plt.show()
###Output
_____no_output_____
###Markdown
As can be seen, this graph denotes the distribution of the two normal distributions with a mean of 0 and 5.As an additional example, we are creating a meshgrid `np.meshgrid()` with values generated from an array of `numpy` with initial value of 5, final value of -5 (exclusive) and step of 0.01. We have calculated the value of `z` which corresponds to the general equation of a circle, so that we can generate the graph shown below:
###Code
# Create an array
points = np.arange(-5, 5, 0.01)
# Make a meshgrid
xs, ys = np.meshgrid(points, points)
z = np.sqrt(xs ** 2 + ys ** 2)
# Display the image on the axes
plt.imshow(z, cmap=plt.cm.Reds)
# Draw a color bar
plt.colorbar()
# Show the plot
plt.show()
###Output
_____no_output_____
###Markdown
13. Save the numpy ndarray object into a npy file Finally, one of the most important parts of the entire analysis process, the storage of the results. We can do this with the `np.savetxt()` function:
###Code
import numpy as np
x = np.arange(0.0,5.0,1.0)
np.savetxt('test.txt', x, delimiter=',')
###Output
_____no_output_____ |
src/.ipynb_checkpoints/01-FNN-checkpoint.ipynb | ###Markdown
Data Preprocessing
###Code
import os
from glob import glob
import numpy as np
import pandas as pd
from PIL import Image
from tqdm.notebook import tqdm
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset
import torch.optim as optim
from torchvision import transforms
from torchvision.datasets import ImageFolder
from torchvision.utils import make_grid
# check if machine has gpu
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("Running on",device)
# data path that training set is located
path = "../data/fruits/fruits-360/"
# this joins the path + folder and each files e.g. '../data/fruits/fruits-360/Training/Apple Braeburn/115_100.jpg'
files_training = glob(os.path.join(path,'Training', '*/*.jpg'))
num_images = len(files_training)
print('Number of images in Training file:', num_images)
# just to see how many images we have for each label, minimum one and average one, with nice printing style
min_images = 1000
im_cnt = []
class_names = []
print('{:18s}'.format('class'), end='')
print('Count:')
print('-' * 24)
for folder in os.listdir(os.path.join(path, 'Training')):
folder_num = len(os.listdir(os.path.join(path,'Training',folder)))
im_cnt.append(folder_num)
class_names.append(folder)
print('{:20s}'.format(folder), end=' ')
print(folder_num)
num_classes = len(class_names)
print("\nMinumum images per category:", np.min(im_cnt), 'Category:', class_names[im_cnt.index(np.min(im_cnt))])
print('Average number of Images per Category: {:.0f}'.format(np.array(im_cnt).mean()))
print('Total number of classes: {}'.format(num_classes))
# Just to guess pop_mean and pop_std
tensor_transform = transforms.Compose([transforms.ToTensor()])
training_data = ImageFolder(os.path.join(path, 'Training'), tensor_transform)
data_loader = torch.utils.data.DataLoader(training_data, batch_size=512, shuffle=True)
%time
# this part takes a bit long
pop_mean = [0.6840367,0.5786325,0.5037564] # normally it was --> []
pop_std = [0.30334985,0.3599262,0.3913685]
# for i, data in tqdm(enumerate(data_loader, 0)):
# numpy_image = data[0].numpy()
# batch_mean = np.mean(numpy_image, axis=(0,2,3))
# batch_std = np.std(numpy_image, axis=(0,2,3))
# pop_mean.append(batch_mean)
# pop_std.append(batch_std)
# pop_mean = np.array(pop_mean).mean(axis=0)
# pop_std = np.array(pop_std).mean(axis=0)
# that is why I am inserting last values
print(pop_mean)
print(pop_std)
np.random.seed(123)
shuffle = np.random.permutation(num_images)
# split validation images
split_val = int(num_images * 0.2)
print('Total number of images:', num_images)
print('Number images in validation set:',len(shuffle[:split_val]))
print('Number images in train set:',len(shuffle[split_val:]))
class FruitTrainDataset(Dataset):
def __init__(self, files, shuffle, split_val, class_names, transform=transforms.ToTensor()):
self.shuffle = shuffle
self.class_names = class_names
self.split_val = split_val
self.data = np.array([files[i] for i in shuffle[split_val:]])
self.transform=transform
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
img = Image.open(self.data[idx])
name = self.data[idx].split('/')[-2]
y = self.class_names.index(name)
img = self.transform(img)
return img, y
class FruitValidDataset(Dataset):
def __init__(self, files, shuffle, split_val, class_names, transform=transforms.ToTensor()):
self.shuffle = shuffle
self.class_names = class_names
self.split_val = split_val
self.data = np.array([files[i] for i in shuffle[:split_val]])
self.transform=transform
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
img = Image.open(self.data[idx])
name = self.data[idx].split('/')[-2]
y = self.class_names.index(name)
img = self.transform(img)
return img, y
class FruitTestDataset(Dataset):
def __init__(self, path, class_names, transform=transforms.ToTensor()):
self.class_names = class_names
self.data = np.array(glob(os.path.join(path, '*/*.jpg')))
self.transform=transform
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
img = Image.open(self.data[idx])
name = self.data[idx].split('/')[-2]
y = self.class_names.index(name)
img = self.transform(img)
return img, y
data_transforms = {
'train': transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.ToTensor(),
transforms.Normalize(pop_mean, pop_std) # These were the mean and standard deviations that we calculated earlier.
]),
'Test': transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(pop_mean, pop_std) # These were the mean and standard deviations that we calculated earlier.
]),
'valid': transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(pop_mean, pop_std) # These were the mean and standard deviations that we calculated earlier.
])
}
train_dataset = FruitTrainDataset(files_training, shuffle, split_val, class_names, data_transforms['train'])
valid_dataset = FruitValidDataset(files_training, shuffle, split_val, class_names, data_transforms['valid'])
test_dataset = FruitTestDataset("../data/fruits/fruits-360/Test", class_names, transform=data_transforms['Test'])
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=32, shuffle=True)
valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=32, shuffle=True)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=32, shuffle=True)
dataloaders = {'train': train_loader,
'valid': valid_loader,
'Test': test_loader}
dataset_sizes = {
'train': len(train_dataset),
'valid': len(valid_dataset),
'Test': len(test_dataset)
}
def imshow(inp, title=None):
"""Imshow for Tensor."""
inp = inp.numpy().transpose((1, 2, 0))
inp = pop_std * inp + pop_mean
inp = np.clip(inp, 0, 1)
plt.imshow(inp)
if title is not None:
plt.title(title)
plt.pause(0.001)
inputs, classes = next(iter(train_loader))
out = make_grid(inputs)
cats = ['' for x in range(len(classes))]
for i in range(len(classes)):
cats[i] = class_names[classes[i].item()]
imshow(out)
print(cats)
# just to check if shape of train and test sets match
for i,j in zip(train_loader,test_loader):
print(i[0].shape,j[0].shape)
break
###Output
_____no_output_____
###Markdown
Network
###Code
# just to start from the basic NN and to observe how does it perform on data
# with horizontal and vertical flip we have 3x100x100
# batch size was 64 adn reduced to 32 to get better performance
class Net(nn.Module):
def __init__(self):
super().__init__() # initialize the parent class methods
self.fc1 = nn.Linear(3*100*100, 64)
self.fc2 = nn.Linear(64, 64)
self.fc3 = nn.Linear(64, 64)
self.fc4 = nn.Linear(64, 131)
def forward(self,x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = self.fc4(x)
return F.log_softmax(x,dim=1)
net = Net()
print(net)
# move network to GPU
net = Net().to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.7)
# let's train the network, # regular way of training with in sample accuracy
def train(net):
for epoch in tqdm(range(10)):
print("epoch {}".format(epoch))
running_loss = 0.0
correct = 0
total = 0
for i,data in enumerate(train_loader, 0):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data[0].to(device), data[1].to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs.view(-1,3*100*100))
# in sample accuracy calculation
_, predicted = torch.max(outputs, 1)
a = predicted == labels
correct += np.count_nonzero(a.cpu())
total += len(a)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if i % 100 == 99: # print every 100 mini-batches
print('[%d, %5d] loss: %.3f, in sample accuracy: %.3f' %(epoch, i + 1, running_loss / 100, correct/total))
running_loss = 0.0
correct = 0
total = 0
print('Finished Training')
train(net)
###Output
_____no_output_____
###Markdown
fnn_net is the first model second one is different arch. third one is the batch size is 32 instead of 64PATH = "../models/fnn_net_3.pth"torch.save(net.state_dict(),PATH)
###Code
def test(net):
correct = 0
total = 0
with torch.no_grad():
for data in tqdm(test_loader):
images, labels = data[0].to(device), data[1].to(device)
outputs = net(images.view(-1,3*100*100))
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the test images: %d %%' % (
100 * correct / total))
test(net)
# label wise accuracy
class_correct = list(0. for i in range(131))
class_total = list(0. for i in range(131))
with torch.no_grad():
for data in tqdm(test_loader):
images, labels = data[0].to(device), data[1].to(device)
outputs = net(images.view(-1,3*100*100))
_, predicted = torch.max(outputs, 1)
c = (predicted == labels).squeeze()
for i in range(len(labels)):
label = labels[i]
class_correct[label] += c[i].item()
class_total[label] += 1
for i in range(131):
print('Accuracy of %5s : %2d %%' % (
class_names[i], 100 * class_correct[i] / class_total[i]))
# just to show how models can be load
PATH = "../models/fnn_net_3.pth"
net = Net().to(device)
net.load_state_dict(torch.load(PATH))
###Output
_____no_output_____ |
notebooks/api/examples/Protein Atlas.ipynb | ###Markdown
Protein Atlas API methods
###Code
import api_doc
api_doc.get_api_methods_by_tag('Protein Atlas')
###Output
_____no_output_____ |
5_Dataiku_Docker.ipynb | ###Markdown
contents Dataiku Docker
###Code
!mkdir -p dataiku_docker/dataiku/dss
!touch dataiku_docker/Dockerfile
! echo "FROM dataiku/dss" >> dataiku_docker/Dockerfile
!cat dataiku_docker/Dockerfile
###Output
FROM dataiku/dss
###Markdown
http://localhost:8888/edit/dataiku/Dockerfile
###Code
!cd dataiku_docker/ ; docker run -p 10000:10000 -v /Users/Bhill/git/Computer_Vision_Object_Detection/dataiku_docker/dataiku/dss:/home/dataiku/dss -d dataiku/dss
!docker ps -a
!cd dataiku/ ; docker build .
!touch dataiku_docker/docker-compose.yml
###Output
_____no_output_____
###Markdown
http://localhost:8888/edit/docker-compose.yml
###Code
!docker-compose up -d
!docker ps -a
!docker stop computer_vision_object_detection_dataiku_1
!docker rm computer_vision_object_detection_dataiku_1
###Output
computer_vision_object_detection_dataiku_1
|
NewYorkCity_taxi_case_stu_Web.ipynb | ###Markdown
New York City Taxi Ride Duration PredictionIn this case study, we will build a predictive model to predict the duration of taxi ride. We will do the following steps: * Install the dependencies * Load the data as pandas dataframe * Define the outcome variable - the variable we are trying to predict. * Build features with Deep Feature Synthesis using the [featuretools](https://featuretools.com) package. We will start with simple features and incrementally improve the feature definitions and examine the accuracy of the system. Allocate at least 2-3 hours to go through this case study end-to-end Install Dependencies If you have not done so already, download this repository from git. Once you have downloaded this archive, unzip it and cd into the directory from the command line. Next run the command ``./install_osx.sh`` if you are on a mac or ``./install_linux.sh`` if you are on linux. This should install all of the dependencies. If you are on a windows machine, open the requirements.txt folder and make sure to install each of the dependencies listed (featuretools, jupyter, pandas, sklearn, numpy) Once you have installed all of the dependencies, open this notebook. On Mac and Linux, navigate to the directory that you downloaded from git and run ``jupyter notebook`` to be taken to this notebook in your default web browser. When you open the NewYorkCity_taxi_case_study.ipynb file in the web browser, you can step through the code by clicking the ``Run`` button at the top of the page. If you have any questions for how to use Jupyter, refer to google or the discussion forum. Running the Code
###Code
import featuretools as ft
import utils
from utils import load_nyc_taxi_data, compute_features, preview, feature_importances
from sklearn.ensemble import GradientBoostingRegressor
from featuretools.primitives import (Weekend, Minute, Hour, Day, Week, Month,
Weekday, Weekend, Count, Sum, Mean, Median, Std, Min, Max)
import numpy as np
ft.__version__
%load_ext autoreload
%autoreload 2
###Output
_____no_output_____
###Markdown
Step 1: Download and load the raw data as pandas dataframesIf you have not yet downloaded the data it can be downloaded from S3. Once you have downloaded the archive, unzip it and place the nyc-taxi-data folder in the same directory as this script.
###Code
trips, pickup_neighborhoods, dropoff_neighborhoods = load_nyc_taxi_data()
preview(trips, 10)
###Output
_____no_output_____
###Markdown
The ``trips`` table has the following fields* ``id`` which uniquely identifies the trip* ``vendor_id`` is the taxi cab company - in our case study we have data from three different cab companies* ``pickup_datetime`` the time stamp for pickup* ``dropoff_datetime`` the time stamp for drop-off* ``passenger_count`` the number of passengers for the trip* ``trip_distance`` total distance of the trip in miles * ``pickup_longitude`` the longitude for pickup* ``pickup_latitude`` the latitude for pickup* ``dropoff_longitude``the longitude of dropoff * ``dropoff_latitude`` the latitude of dropoff* ``payment_type`` a numeric code signifying how the passenger paid for the trip. 1= Credit card 2= Cash 3= No charge 4= Dispute 5= Unknown 6= Voided* ``trip_duration`` this is the duration we would like to predict using other fields * ``pickup_neighborhood`` a one or two letter id of the neighboorhood where the trip started* ``dropoff_neighborhood`` a one or two letter id of the neighboorhood where the trip ended Step 2: Prepare the DataLets create entities and relationships. The three entities in this data are * trips * pickup_neighborhoods* dropoff_neighborhoodsThis data has the following relationships* pickup_neighborhoods --> trips (one neighboorhood can have multiple trips that start in it. This means pickup_neighborhoods is the ``parent_entity`` and trips is the child entity)* dropoff_neighborhoods --> trips (one neighboorhood can have multiple trips that end in it. This means dropoff_neighborhoods is the ``parent_entity`` and trips is the child entity)In [featuretools (automated feature engineering software package)](https://www.featuretools.com/), we specify the list of entities and relationships as follows:
###Code
entities = {
"trips": (trips, "id", 'pickup_datetime' ),
"pickup_neighborhoods": (pickup_neighborhoods, "neighborhood_id"),
"dropoff_neighborhoods": (dropoff_neighborhoods, "neighborhood_id"),
}
relationships = [("pickup_neighborhoods", "neighborhood_id", "trips", "pickup_neighborhood"),
("dropoff_neighborhoods", "neighborhood_id", "trips", "dropoff_neighborhood")]
###Output
_____no_output_____
###Markdown
Next, we specify the cutoff time for each instance of the target_entity, in this case ``trips``.This timestamp represents the last time data can be used for calculating features by DFS. In this scenario, that would be the pickup time because we would like to make the duration prediction using data before the trip starts. For the purposes of the case study, we choose to only select trips that started after January 12th, 2016.
###Code
cutoff_time = trips[['id', 'pickup_datetime']]
cutoff_time = cutoff_time[cutoff_time['pickup_datetime'] > "2016-01-12"]
preview(cutoff_time, 10)
###Output
_____no_output_____
###Markdown
**Comments:*** The data contains trips with pickup_datetimne from 2016/01/01 to 2016/06/30* Cutoff Time criteria: only select trips that started after January 12th, 2016* Cutoff times make sense. They are needed to make the duration prediction using data before the trip starts; that is why pickup_datetime timestamp is used as cutoff times Step 3: Create baseline features using Deep Feature SynthesisInstead of manually creating features, such as "month of pickup datetime", we can let DFS come up with them automatically. It does this by * interpreting the variable types of the columns e.g categorical, numeric and others * matching the columns to the primitives that can be applied to their variable types* creating features based on these matches Create transform features using transform primitivesAs we described in the video, features fall into two major categories, ``transform`` and ``aggregate``. In featureools, we can create transform features by specifying ``transform`` primitives. Below we specify a ``transform`` primitive called ``weekend`` and here is what it does:* It can be applied to any ``datetime`` column in the data. * For each entry in the column, it assess if it is a ``weekend`` and returns a boolean. In this specific data, there are two ``datetime`` columns ``pickup_datetime`` and ``dropoff_datetime``. The tool automatically creates features using the primitive and these two columns as shown below.
###Code
trans_primitives = [Weekend]
features = ft.dfs(entities=entities,
relationships=relationships,
target_entity="trips",
trans_primitives=trans_primitives,
agg_primitives=[],
ignore_variables={"trips": ["pickup_latitude", "pickup_longitude",
"dropoff_latitude", "dropoff_longitude"]},
features_only=True)
###Output
_____no_output_____
###Markdown
*If you're interested about parameters to DFS such as `ignore_variables`, you can learn more about these parameters [here](https://docs.featuretools.com/generated/featuretools.dfs.htmlfeaturetools.dfs)*Here are the features created.
###Code
print "Number of features: %d" % len(features)
features
###Output
Number of features: 13
###Markdown
Now let's compute the features.
###Code
feature_matrix = compute_features(features, cutoff_time)
preview(feature_matrix, 5)
###Output
_____no_output_____
###Markdown
Step 4: Build the Model To build a model, we* Seperate the data into a porition for ``training`` (75% in this case) and a portion for ``testing`` * Get the log of the trip duration so that a more linear relationship can be found.* Train a model using a ``GradientBoostingRegressor``
###Code
# separates the whole feature matrix into train data feature matrix,
# train data labels, and test data feature matrix
X_train, y_train, X_test, y_test = utils.get_train_test_fm(feature_matrix,.75)
y_train = np.log(y_train+1)
y_test = np.log(y_test+1)
model = GradientBoostingRegressor(verbose=True)
model.fit(X_train, y_train)
model.score(X_test, y_test)
###Output
Iter Train Loss Remaining Time
1 0.4925 1.83m
2 0.4333 1.82m
3 0.3843 1.82m
4 0.3446 1.82m
5 0.3119 1.78m
6 0.2852 1.76m
7 0.2634 1.75m
8 0.2454 1.72m
9 0.2305 1.69m
10 0.2183 1.68m
20 0.1666 1.47m
30 0.1558 1.26m
40 0.1514 1.04m
50 0.1488 49.64s
60 0.1472 38.92s
70 0.1458 28.50s
80 0.1448 18.70s
90 0.1440 9.21s
100 0.1433 0.00s
###Markdown
Step 5: Adding more Transform Primitives* Add ``Minute``, ``Hour``, ``Week``, ``Month``, ``Weekday`` , etc primitives* All these transform primitives apply to ``datetime`` columns
###Code
trans_primitives = [Minute, Hour, Day, Week, Month, Weekday, Weekend]
features = ft.dfs(entities=entities,
relationships=relationships,
target_entity="trips",
trans_primitives=trans_primitives,
agg_primitives=[],
ignore_variables={"trips": ["pickup_latitude", "pickup_longitude",
"dropoff_latitude", "dropoff_longitude"]},
features_only=True)
print "Number of features: %d" % len(features)
features
###Output
Number of features: 25
###Markdown
Now let's compute the features.
###Code
feature_matrix = compute_features(features, cutoff_time)
preview(feature_matrix, 10)
###Output
_____no_output_____
###Markdown
Step 6: Build the new model
###Code
# separates the whole feature matrix into train data feature matrix,
# train data labels, and test data feature matrix
X_train, y_train, X_test, y_test = utils.get_train_test_fm(feature_matrix,.75)
y_train = np.log(y_train+1)
y_test = np.log(y_test+1)
model = GradientBoostingRegressor(verbose=True)
model.fit(X_train,y_train)
model.score(X_test,y_test)
###Output
Iter Train Loss Remaining Time
1 0.4925 2.44m
2 0.4333 2.42m
3 0.3843 2.38m
4 0.3444 2.36m
5 0.3117 2.35m
6 0.2848 2.36m
7 0.2620 2.32m
8 0.2435 2.30m
9 0.2282 2.27m
10 0.2152 2.23m
20 0.1588 2.02m
30 0.1415 1.71m
40 0.1332 1.42m
50 0.1283 1.14m
60 0.1252 52.78s
70 0.1227 38.70s
80 0.1207 25.44s
90 0.1191 12.51s
100 0.1177 0.00s
###Markdown
Step 7: Add Aggregation PrimitivesNow let's add aggregation primitives. These primitives will generate features for the parent entities ``pickup_neighborhoods``, and ``dropoff_neighborhood`` and then add them to the trips entity, which is the entity for which we are trying to make prediction.
###Code
trans_primitives = [Minute, Hour, Day, Week, Month, Weekday, Weekend]
aggregation_primitives = [Count, Sum, Mean, Median, Std, Max, Min]
features = ft.dfs(entities=entities,
relationships=relationships,
target_entity="trips",
trans_primitives=trans_primitives,
agg_primitives=aggregation_primitives,
ignore_variables={"trips": ["pickup_latitude", "pickup_longitude",
"dropoff_latitude", "dropoff_longitude"]},
features_only=True)
print "Number of features: %d" % len(features)
features
feature_matrix = compute_features(features, cutoff_time)
preview(feature_matrix, 10)
###Output
_____no_output_____
###Markdown
Step 8: Build the new model
###Code
# separates the whole feature matrix into train data feature matrix,
# train data labels, and test data feature matrix
X_train, y_train, X_test, y_test = utils.get_train_test_fm(feature_matrix,.75)
y_train = np.log(y_train+1)
y_test = np.log(y_test+1)
# note: this may take up to 30 minutes to run
model = GradientBoostingRegressor(verbose=True)
model.fit(X_train, y_train)
###Output
Iter Train Loss Remaining Time
1 0.4925 6.12m
2 0.4333 5.95m
3 0.3843 5.87m
4 0.3444 5.85m
5 0.3117 5.77m
6 0.2848 5.68m
7 0.2620 5.57m
8 0.2435 5.51m
9 0.2282 5.45m
10 0.2152 5.37m
20 0.1585 4.76m
30 0.1420 4.04m
40 0.1332 3.36m
50 0.1271 2.72m
60 0.1238 2.13m
70 0.1211 1.58m
80 0.1191 1.05m
90 0.1176 31.11s
100 0.1163 0.00s
###Markdown
Step 9: Evalute on test data
###Code
model.score(X_test,y_test)
###Output
_____no_output_____
###Markdown
we can also make predictions using our model
###Code
y_pred = model.predict(X_test)
y_pred = np.exp(y_pred) - 1 # undo the log we took earlier
y_pred[5:]
###Output
_____no_output_____
###Markdown
 Additional AnalysisLet's look at how important each feature was for the model.
###Code
feature_importances(model, feature_matrix.columns, n=15)
###Output
1: Feature: trip_distance, 0.314
2: Feature: HOUR(pickup_datetime), 0.126
3: Feature: HOUR(dropoff_datetime), 0.089
4: Feature: WEEKDAY(pickup_datetime), 0.052
5: Feature: dropoff_neighborhoods.latitude, 0.046
6: Feature: dropoff_neighborhoods.longitude, 0.036
7: Feature: dropoff_neighborhoods.STD(trips.trip_distance), 0.027
8: Feature: dropoff_neighborhoods.MIN(trips.passenger_count), 0.022
9: Feature: dropoff_neighborhoods.MEDIAN(trips.trip_duration), 0.022
10: Feature: pickup_neighborhoods.MEDIAN(trips.trip_distance), 0.021
11: Feature: IS_WEEKEND(pickup_datetime), 0.021
12: Feature: WEEKDAY(dropoff_datetime), 0.020
13: Feature: WEEK(pickup_datetime), 0.019
14: Feature: dropoff_neighborhoods.MEAN(trips.trip_duration), 0.019
15: Feature: MONTH(dropoff_datetime), 0.018
|
2_2_ReverseAD_submit_29299675.ipynb | ###Markdown
Part 3: Reverse Mode Automatic DifferentiationDynamic Reverse mode AD can be implemented by declaring a class to represent a value and the child expressions that the value depends on. We've provided the implementation that was shown in the lecture slides as a basis below, but it's missing some parts that will make it useful.__Tasks:__- Addition (`__add__`) is incomplete - can you finish it? - Can you also implement division (`__truediv__`), subtraction (`__sub__`) and power (`__pow__`)?
###Code
import math
class Var:
def __init__(self, value):
self.value = value
self.children = []
self.grad_value = None
def grad(self):
if self.grad_value is None:
self.grad_value = sum(weight * var.grad()
for weight, var in self.children)
return self.grad_value
def __str__(self):
return str(self.value)
def __mul__(self, other):
z = Var(self.value * other.value)
self.children.append((other.value, z))
other.children.append((self.value, z))
return z
def __add__(self, other):
#TODO: finish me
# YOUR CODE HERE
z = Var(self.value + other.value)
self.children.append((1, z))
other.children.append((1, z))
return z
def __truediv__(self, other):
z = Var(self.value / other.value)
self.children.append((1/other.value, z))
other.children.append((-self.value/other.value**2, z))
return z
def __sub__(self, other):
z = Var(self.value - other.value)
self.children.append((1, z))
other.children.append((-1, z))
return z
def __pow__(self, other):
z = Var(self.value**2)
self.children.append((2*self.value, z))
return z
# TODO: add missing methods
# YOUR CODE HERE
# Tests
a=Var(1) + Var(1) / Var(1) - Var(1)**Var(1)
print(a)
###Output
1.0
###Markdown
Implementing math functionsJust like when we were looking at Forward Mode AD, we also need to implement some core math functions. Here's the sine function for a `Var`:
###Code
def sin(x):
z = Var(math.sin(x.value))
x.children.append((math.cos(x.value), z))
return z
###Output
_____no_output_____
###Markdown
__Task:__ can you implement the _cosine_ (`cos`), _tangent_ (`tan`), and _exponential_ (`exp`) functions in the code block below?
###Code
# TODO: implement additional math functions on dual numbers
def cos(x):
# YOUR CODE HERE
z=Var(math.cos(x.value))
x.children.append((-math.sin(x.value),z))
return z
def tan(x):
# YOUR CODE HERE
z=Var(math.tan(x.value))
x.children.append((1/math.cos(x.value)**2, z))
return z
def exp(x):
# YOUR CODE HERE
z=Var(math.exp(x.value))
x.children.append((math.exp(x.value),z))
return z
# Tests
assert cos(Var(0)).value == 1
assert tan(Var(0)).value == 0
assert exp(Var(0)).value == 1
###Output
_____no_output_____
###Markdown
Time to try it outWe're now in a position to try our implementation.__Tasks:__ - Try running the following code to compute the value of the function $z=x\cdot y+sin(x)$ given $x=0.5$ and $y=4.2$, together with the derivative $\partial z/\partial x$ at that point. - Verify that the result is correct by hand-differentiating the function.
###Code
x = Var(0.5)
y = Var(4.2)
z = x * y + sin(x)
print('z:', z)
z.grad_value = 1.0 #Note that we have to 'seed' the gradient of z to 1 (e.g. ∂z/∂z=1) before computing grads
print('∂z/∂x:',x.grad())
###Output
z: 2.579425538604203
∂z/∂x: 5.077582561890373
###Markdown
__Task:__ Now use the code block below to compute the derivative $\partial z/\partial y$ of the above expression (at the same point $x=0.5, y=4.2$ as above). Store the resultant gradient in the variable `dzdy`. Verify by hand that the result is correct.
###Code
# YOUR CODE HERE
# raise NotImplementedError()
dzdy=y.grad()
print('∂z/∂y:', dzdy)
###Output
∂z/∂y: 0.5
###Markdown
**Answer**:$\partial z/\partial y = x$ at point $x=0.5$ any $y=4.2$ is $0.5$
###Code
assert dzdy == 0.5
###Output
_____no_output_____
###Markdown
Differentiating AlgorithmsNow, let's look at doing something wacky: differentiate an algorithm. For this example, we'll use an algorithm that is in a sense static (in this particular case the upper limit of the for loop is predetermined). However, it is not difficult to see that AD is much more general, and could even be applied to stochastic algorithms (say if we replaced the upper limit of the loop below with `Math.floor(Math.random() * 10)` for example).__Task:__ Consider the following algorithm and in the box below it manually compute the value of $z$ and the gradient $\partial z/\partial x$ at the end of execution.
###Code
x = Var(0.5)
z = Var(1)
for i in range(0,2):
z = (z + Var(i)) * x * x
print("z:",z)
###Output
z: 0.25
z: 0.3125
###Markdown
When $i=0$, $z=1$: $z=x^2=0.25$, $\partial z/\partial x =2*x=1.0$When $i=1$, $z=0.25$: $z=(z+1)x^2=0.3125$, $\partial z/\partial x =2*x*1.25=1.25$ __Task__: Now use the code block below to print out the gradient computed by our reverse AD by storing the result in a variable called `grad`. Does it match?
###Code
# YOUR CODE HERE
# raise NotImplementedError()
x = Var(0.5)
z = Var(1)
for i in range(0,2):
x = Var(0.5)
z = (z + Var(i)) * x * x
z.grad_value = 1.0
print("z:",z)
print("∂z/∂x:",x.grad())
grad=x.grad()
print("∂z/∂x:",grad)
# Tests
assert grad == 1.25
###Output
_____no_output_____
###Markdown
__Task:__ Finally, use the code block below to experiment and test the other math functions and methods you created.
###Code
x = Var(0.5)
z = x * x
print('z:', z)
z.grad_value = 1.0
print('∂z/∂x:',x.grad())
x = Var(0.5)
z = x ** 2
print('z:', z)
z.grad_value = 1.0
print('∂z/∂x:',x.grad())
x = Var(2)
y = Var(4)
z = x/y
print('z:', z)
z.grad_value = 1.0
print('∂z/∂x:',x.grad())
print('∂z/∂y:',y.grad())
x = Var(2)
y = Var(4)
z = x-y
print('z:', z)
z.grad_value = 1.0
print('∂z/∂x:',x.grad())
print('∂z/∂y:',y.grad())
x = Var(0.5)
z = cos(x)
print('z:', z)
z.grad_value = 1.0
print('∂z/∂x:',x.grad())
x = Var(0.5)
z = tan(x)
print('z:', z)
z.grad_value = 1.0
print('∂z/∂x:',x.grad())
x = Var(0.5)
z = exp(x)
print('z:', z)
z.grad_value = 1.0
print('∂z/∂x:',x.grad())
###Output
z: 1.6487212707001282
∂z/∂x: 1.6487212707001282
###Markdown
compute the value of the function $z=x/y+x^2+y^2+cos(x)+tan(x)-exp(x)$ given $x=1$ and $y=2$, together with the derivative $\partial z/\partial x$ and $\partial z/\partial y$ at that point.
###Code
# YOUR CODE HERE
# raise NotImplementedError()
x = Var(1)
y = Var(2)
z = x/y + x*x+ y*y+ cos(x)+ tan(x) - exp(x)
print('z:', z)
z.grad_value = 1.0
print('∂z/∂x:',x.grad())
print('∂z/∂y:',y.grad())
###Output
_____no_output_____ |
hajipata/chapter06/multiclass_classification.ipynb | ###Markdown
multiclass classification
###Code
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
def softmax(v):
return np.exp(v) / np.sum(np.exp(v))
# y: vector
def one_hot_encoder(y):
num_data = y.size
num_class = np.max(y) + 1
Y_one_hot = np.zeros((num_data, num_class))
Y_one_hot[np.arange(num_data), y] = 1
return (Y_one_hot, num_class)
# scalar
def cost(X, Y, W):
wx = np.dot(W, X.T) # k x m
normal_each_example = sum(np.exp(wx)) # 1 x m
hot_wx = wx * Y.T # k x m
numerators = sum(np.exp(hot_wx)) # 1 x m
pi_of_hot_class = numerators / normal_each_example # 1 x m
return -sum(np.log(pi_of_hot_class))
def cost_2(X, Y, W):
wx = np.dot(W, X.T) # k x m
e1 = -np.sum(wx * Y.T) # scalar
s_each_example = np.sum(np.exp(wx), axis = 0) # 1 x m
e2 = +sum(np.log(s_each_example)) # scalar
return e1 + e2
# matrix, its dimension = dim W (k x (n + 1))
def grad_cost(X, Y, W):
wx = np.dot(W, X.T) # k x m
exp_wx = np.exp(wx) # k x m
normal_each_example = sum(exp_wx) # 1 x m
pi_matrix = exp_wx / normal_each_example # k x m, columnwise div
return np.dot((pi_matrix - Y.T), X)
def gradient_decent(X, Y, W, cost_func, grad_cost_func, accuracy_func, step, alpha, epsilon):
costList = []
accuracyList = []
for i in range(step):
costList.append(cost_func(X, Y, W))
accuracyList.append(accuracy_func(X, Y, W, classify))
grad = grad_cost_func(X, Y, W)
W = W - alpha * grad
return W, costList, accuracyList
def classify(W, X):
wx = np.dot(X, W.T) # m x k
return np.argmax(wx, axis = 1) # m x 1
# x: examples
# y: classes
def plot_scatter_2d(x, y, i, j, feature_names = []):
plt.clf()
for k in np.unique(y):
x_k = x[np.argwhere(y == k).flatten()]
plt.scatter(x_k[:, i], x_k[:, j])
if feature_names != []:
plt.xlabel(feature_names[i])
plt.ylabel(feature_names[j])
plt.show()
def divide_training_test_data(X, Y, training_data_rate):
y = np.argmax(Y, axis = 1)
(num_data, num_feature) = X.shape
num_class = np.unique(y).size
sort_indeces = np.arange(num_data)
np.random.shuffle(sort_indeces)
X_sorted = X[sort_indeces]
Y_sorted = Y[sort_indeces]
y_sorted = y[sort_indeces]
X_train = np.empty((0, num_feature))
Y_train = np.empty((0, num_class))
X_test = np.empty((0, num_feature))
Y_test = np.empty((0, num_class))
for k in np.unique(y):
indeces_k = np.argwhere(y == k).flatten()
num_data_k = indeces_k.size
X_k = X_sorted[indeces_k]
Y_k = Y_sorted[indeces_k]
num_train_data_k = np.floor(num_data_k * training_data_rate).astype(int)
X_train = np.vstack((X_train, X_k[:num_train_data_k, :]))
Y_train = np.vstack((Y_train, Y_k[:num_train_data_k, :]))
X_test = np.vstack((X_test, X_k[num_train_data_k:, :]))
Y_test = np.vstack((Y_test, Y_k[num_train_data_k:, :]))
return (X_train, Y_train, X_test, Y_test)
def accuracy(X, Y, W, classify_func):
classified = classify_func(W, X)
is_true_list = classified == np.argmax(Y, axis = 1)
return np.argwhere(is_true_list).size / is_true_list.size
def normalize(X):
(num_data, num_feature) = X.shape
mu_X = sum(X) / num_data
sigma_X = sum((X - mu_X) ** 2) / num_data
return (X - mu_X) / sigma_X
# n: number of features
# m: number of examples
# k: number of classes
# X: m x (n + 1) matrix, each row is an example data, first column is bias (= 1)
# Y: m x k matrix
# W: k x (n + 1) matrix, parameters we want to know
iris = load_iris()
x = iris.data
y = iris.target
(m, n) = x.shape
#plot_scatter_2d(x, y, 2, 3, feature_names = iris.feature_names)
X = np.c_[np.ones((m, 1)), x]
#X = np.c_[np.ones((m, 1)), normalize(x)]
(Y, k) = one_hot_encoder(y)
# initial parameters
W = np.random.rand(k, n + 1) / 10
training_data_rate = 0.8
(X_train, Y_train, X_test, Y_test) = divide_training_test_data(X, Y, training_data_rate)
W, costList, accuracyList = gradient_decent(
X_train,
Y_train,
W,
cost_func = cost,
grad_cost_func = grad_cost,
accuracy_func = accuracy,
step = 1000,
alpha = 0.0005,
epsilon = 0)
# plot cost
plt.clf()
plt.plot(costList)
plt.title("cost")
plt.xlabel("step")
plt.ylabel("cost")
plt.show()
#plot accuracy
plt.clf()
plt.plot(accuracyList)
plt.title("accuracy")
plt.xlabel("step")
plt.ylabel("accuracy")
plt.show()
accuracy_train = accuracy(X_train, Y_train, W, classify)
accuracy_test = accuracy(X_test, Y_test, W, classify)
print("=========================")
print("number of examples: m = {}".format(m))
print("number of features: n = {}".format(n))
print("number of classes: k = {}".format(k))
print("accuracy of training data: {}".format(accuracy_train))
print("accuracy of test data: {}".format(accuracy_test))
###Output
_____no_output_____ |
SNstats.ipynb | ###Markdown
Κατανομή PoissonΗ κατανομή Poisson είναι μια διακριτή κατανομή του αριθμού γεγονότων σε ένα συγκεκριμένο χρονικό διάστημα δεδομένου του μέσου αριθμού γεγονότων $\mu$ για αυτό το διάστημα. Η συνάρτηση πυκνότητας πιθανότητας είναι:$$Pr(x;\mu)=\frac{\mu ^x e^{-\mu}}{x!}$$Η πιθανότερη τιμή της κατανομής καί η διακύμανση είναι:\begin{align}E[x]=\mu && var[x]=\sigma ^2=\mu\end{align} ΠαράδειγμαΣε έναν αγώνα ποδοσφαίρου μπαίνουν κατα μέσο όρο $2.5$ γκόλ. Ποιά είναι η πιθανότητα να μπούν $x$ γκόλ?
###Code
xx=np.linspace(0,8,9,dtype=int)
pr=stats.poisson.pmf(xx,mu=2.5)
plt.bar(xx,pr)
# for mu in np.linspace(0.5,2.5,4):
# pr=stats.poisson(mu).pmf(xx)
# plt.plot(xx,pr,label='$\mu = {:.2f}$'.format(mu))
plt.legend()
###Output
No handles with labels found to put in legend.
###Markdown
We will use the Poissonian Distribution to study the observed SN from earth.From 185 until now (2019) 12 SN have been observed by eye (from wikipedia)
###Code
T=2019-185
N=12
r=N/T
print(f'Rate of SN per year {r:.3}')
###Output
Rate of SN per year 0.00654
###Markdown
Probabillity of seeing one, two (or zero) SN in one year given this rate?
###Code
xx=np.arange(0,3,1,dtype=int)
pr=stats.poisson(r).pmf(xx)
plt.bar(xx,pr,label='$\mu = {:.4f}$'.format(r))
plt.yscale('log');plt.legend()
###Output
_____no_output_____
###Markdown
Seems that we haven't seen a SN from 1604, what is the probabillity of this happening?
###Code
xx=np.arange(0,6,1,dtype=int)
pr=stats.poisson(r*(2019-1604)).pmf(xx)
plt.bar(xx,pr,label='$\mu = {:.4f}$'.format(r))
#plt.yscale('log');
plt.legend()
plt.annotate('No SN from 1604 \n unitl 2019',
xy=(0, 0.07), xycoords='data',
xytext=(0., 0.2),
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='top')
###Output
_____no_output_____
###Markdown
Bayesian Inference
###Code
snt= np.array([185,369,386,393,437,827,902,1006,1054,1181,1572,1604])
tt= np.arange(185,2020,1)
sn=np.zeros(tt.shape)
for i,t in enumerate(tt):
if t in snt:
sn[i]=1
plt.plot(tt,sn)
plt.xlabel('Year');plt.ylabel('SN')
import emcee
import corner
from scipy.special import factorial
def norm(x,x0,s):
return np.exp((x-x0)**2/(2*s**2))/np.sqrt(2*np.pi*s**2)
def lnlike(theta, t, N):
r=theta
return np.sum(np.log(r)*N-r-factorial(N))
def lnprior(theta):
r=theta
if 0 < r < 1:
return 0#np.log(norm(r,1/50/2,1/50/4))
return -np.inf
def lnprob(theta, t, N):
lp = lnprior(theta)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(theta, t, N)
ndim, nwalkers = 1, 256
r0=1e-2
pos = [[np.random.uniform(1e-7,1e-2)] for i in range(nwalkers)]
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, args=(tt, sn))
res=sampler.run_mcmc(pos, 500)
samples = sampler.chain[:, 50:, :].reshape((-1, ndim))
fig = corner.corner(samples, labels=["r"],truths=[N/T])
###Output
_____no_output_____
###Markdown
Does something changed in the SN rate from 185 until now?
###Code
def lnlike(theta, t, N):
r1,r2,tau=theta
return np.nansum(np.where(t<tau,N*np.log(r1)-r1-factorial(N),N*np.log(r2)-r2-factorial(N)))
def lnprior(theta):
r1,r2,tau=theta
if (0 < r1 < 0.06) and (0 < r2 < 0.06) and (185 < tau < 2019):
return 0#np.log(norm(r,1/50/2,1/50/4))
return -np.inf
def lnprob(theta, t, N):
lp = lnprior(theta)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(theta, t, N)
ndim, nwalkers = 3, 512
p0=[N/T,N/T,1000]
p0mi=[0,0,400]
p0ma=[0.1,0.1,2010]
pos = [np.random.uniform(p0mi,p0ma) for i in range(nwalkers)]
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, args=(tt, sn))
res=sampler.run_mcmc(pos, 5000)
samples = sampler.chain[:, 50:, :].reshape((-1, ndim))
fig = corner.corner(samples, labels=["r1",'r2','t'],truths=[12/T,12/T,1000])
disaster_data = np.array([4, 5, 4, 0, 1, 4, 3, 4, 0, 6, 3, 3, 4, 0, 2, 6,
3, 3, 5, 4, 5, 3, 1, 4, 4, 1, 5, 5, 3, 4, 2, 5,
2, 2, 3, 4, 2, 1, 3, np.nan, 2, 1, 1, 1, 1, 3, 0, 0,
1, 0, 1, 1, 0, 0, 3, 1, 0, 3, 2, 2, 0, 1, 1, 1,
0, 1, 0, 1, 0, 0, 0, 2, 1, 0, 0, 0, 1, 1, 0, 2,
3, 3, 1, np.nan, 2, 1, 1, 1, 1, 2, 4, 2, 0, 0, 1, 4,
0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1])
years = np.arange(1851, 1962)
plt.plot(years,disaster_data,'o')
from scipy.special import factorial
factorial([1,2,3])
lnlike([3,2,1940],years, disaster_data)
theta=np.random.uniform(p0mi,p0ma)
r1,r2,tau=theta
t=years
N=disaster_data
print(t[t<tau],N[t<tau]*np.log(r1)-r1-factorial(N[t<tau]))
np.nansum(np.where(t<tau,N*np.log(r1)-r1-factorial(N),N*np.log(r2)-r2-factorial(N)))
lnlike([5.68818417e-02, 1.84081966e+00, 1.85987260e+03],years, disaster_data)
def lnlike(theta, t, N):
r1,r2,tau=theta
#return np.sum(np.where(t<tau,np.log(r1**N*np.exp(-r1)/factorial(N)),np.log(r2**N*np.exp(-r2)/factorial(N))))
return np.nansum(np.where(t<tau,N*np.log(r1)-r1-factorial(N),N*np.log(r2)-r2-factorial(N)))
def lnprior(theta):
r1,r2,tau=theta
if (0 < r1 < 10) and (0 < r2 < 10) and (1851 < tau < 1962):
return 0
return -np.inf
def lnprob(theta, t, N):
lp = lnprior(theta)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(theta, t, N)
ndim, nwalkers = 3, 1024
p0=[4,2,1890]
p0mi=[0,0,1855]
p0ma=[7,7,1960]
pos = [np.random.uniform(p0mi,p0ma) for i in range(nwalkers)]
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, args=(years, disaster_data))
sampler.run_mcmc(pos, 6000);
samples = sampler.chain[:, 50:, :].reshape((-1, ndim));
fig = corner.corner(samples, labels=["r1",'r2','t'],truths=p0)
pars=np.quantile(samples[100:,:],[0.5],axis=0).T
plt.plot(years,disaster_data,'o')
plt.plot(years[years<pars[2]],pars[0]*np.ones(years[years<pars[2]].shape[0]))
plt.plot(years[years>pars[2]],pars[1]*np.ones(years[years>pars[2]].shape[0]))
pars
###Output
_____no_output_____ |
notebook/RMarkdown_En.ipynb | ###Markdown
Group 5- **Le Thai** - *2170083*- **Tran Quang** - *2070426*- **Le Thai Duy** - *2070406*- **Le Nhu Chien** - *1970289*- **Phan Van Trung** - *2170440* 1. Data Collection 1.1. DiscriptionAll data is collected from HopAmChuan store in the Data Warehouse with the purpose is based on the playlist of the user to analyze and recommend similar songs that the user might like them. The Data Warehouse has the Entity Relationship Diagram as the following. Entity Relationship Diagram Pivot TableAggregates the individual items for get the specific table relative to the specific user SongIDbolleroballadbluebossanovarockchachachafoxrhumbabostondiscopopslowslowrocktangovalse 19112410222111000 0 00 0 7073 1 5000000000 0 40 0 769 232205000110 9570 0 4618 1258132230047411614 94603910100301310 2100 0 3368732330021437113 21 0
###Code
require(FactoMineR)
file = 'https://hcmuteduvn-my.sharepoint.com/:t:/g/personal/tquang_sdh20_hcmut_edu_vn/EammePVOfsFGmUFLgxms85sBSGcCGzZVHMQ5k-YMKAmiiQ?download=1'
raw_dat = read.csv(file,header=T,row.names=1)
###Output
_____no_output_____
###Markdown
Correlation Analysis (CA)
###Code
#This line is used for JupyterNotebook Only to Zoom In the Graph
options(repr.plot.width = 8, repr.plot.height = 6, repr.plot.res = 200)
#R-Code-Lines:
res.dat<-CA(raw_dat)
res.dat$eig
###Output
Warning message:
"ggrepel: 30 unlabeled data points (too many overlaps). Consider increasing max.overlaps"
|
Bird_Sound_Recognition_Executable_Version.ipynb | ###Markdown
Load saved model
###Code
model = keras.models.load_model('D:/C Drive Documents/Bird_Sound_Recognition/My_Model')
###Output
_____no_output_____
###Markdown
Testing on new images
###Code
def removeSilence(signal):
return signal[librosa.effects.split(signal)[0][0] : librosa.effects.split(signal)[0][-1]]
def mel_spectogram_generator(audio_name,signal,sample_rate,augmentation,target_path):
S = librosa.feature.melspectrogram(y=signal,sr=sample_rate,
n_fft=N_FFT,
hop_length=HOP_SIZE,
n_mels=N_MELS,
htk=True,
fmin=FMIN,
fmax=sample_rate/2)
plt.figure(figsize=(10, 4))
librosa.display.specshow(librosa.power_to_db(S**2,ref=np.max), fmin=FMIN,y_axis='linear')
plt.axis('off')
plt.savefig(target_path + augmentation + audio_name[:-4] + '.png',bbox_inches='tight',transparent=True, pad_inches=0)
plt.clf()
plt.close("all")
gc.collect()
def read_image(file_path):
print("[INFO] loading and preprocessing image...")
image = load_img(file_path, target_size=(558, 217))
image = img_to_array(image)
image = np.expand_dims(image, axis=0)
image /= 255.
return image
def test_single_image(path):
birds = ['AshyPrinia',
'AsianKoel',
'BlackDrongo',
'CommonMyna',
'CommonTailorbird',
'GreaterCoucal',
'GreenBee-eater',
'IndianRobin',
'LaughingDove',
'White-throatedKingfisher']
images = read_image(path)
time.sleep(.5)
bt_prediction = vgg16.predict(images)
preds = model.predict_proba(bt_prediction)
for idx, bird, x in zip(range(0,10), birds , preds[0]):
print("ID: {}, Label: {} {}%".format(idx, bird, round(x*100,2) ))
print('Final Decision:')
time.sleep(.5)
for x in range(3):
print('.'*(x+1))
time.sleep(.2)
class_predicted = model.predict_classes(bt_prediction)
for idx, bird, x in zip(range(0,10), birds , preds[0]):
if idx == class_predicted[0]:
print("ID: {}, Label: {}".format(class_predicted[0], bird))
return load_img(path)
def predict_bird_sound(source_path,file_name, target_path = 'D:/'):
N_FFT = 1024
HOP_SIZE = 1024
N_MELS = 128
WIN_SIZE = 1024
WINDOW_TYPE = 'hann'
FEATURE = 'mel'
FMIN = 1400
augmentation = ''
signal, sample_rate = librosa.load(source_path + file_name,sr = None)
DNsignal = removeSilence(signal)
mel_spectogram_generator(file_name,DNsignal,sample_rate,'',target_path)
path = target_path + augmentation + file_name[:-4] + '.png'
test_single_image(path)
print("BIRD SOUND RECOGNITION APP - By Karthik Mandapaka")
sleep(1)
print("Welcome")
sleep(2)
while(1):
source_path = input("Please enter Source path: ")
sleep(2)
file_name = input("Please enter the audio file name: ")
sleep(2)
print("Recognizing bird sound")
sleep(0.5)
print('.')
sleep(0.5)
print('..')
sleep(0.5)
print('...')
predict_bird_sound(source_path,file_name)
cont = input("Do you want to identify another bird sound?(Enter 1 for Yes or 0 for No)")
if (cont == '0'): break
# predict_bird_sound('D:/C Drive Documents/Bird_Sound_Recognition/Data for each bird/data/xeno-canto-dataset/AsianKoel/','Eudynamys24591.wav','D:/')
###Output
[INFO] loading and preprocessing image...
ID: 0, Label: AshyPrinia 0.11%
ID: 1, Label: AsianKoel 0.3%
ID: 2, Label: BlackDrongo 0.22%
ID: 3, Label: CommonMyna 0.02%
ID: 4, Label: CommonTailorbird 2.31%
ID: 5, Label: GreaterCoucal 0.22%
ID: 6, Label: GreenBee-eater 96.79%
ID: 7, Label: IndianRobin 0.01%
ID: 8, Label: LaughingDove 0.02%
ID: 9, Label: White-throatedKingfisher 0.0%
Final Decision:
.
..
...
ID: 6, Label: GreenBee-eater
|
judy/.ipynb_checkpoints/SafetyRecommenders_may25-checkpoint.ipynb | ###Markdown
Safety Recommenders Link to the second dataset Adress dataset: http://opendata.dc.gov/datasets/address-points Importing Main Libraries:
###Code
%matplotlib notebook
import IPython
from IPython.display import display
from sqlalchemy import create_engine
import psycopg2
import psycopg2.extras
import pandas as pd
import csv
from numpy import nan as NA
from datetime import datetime
import re
import sys
import numpy as np
import matplotlib.pyplot as plt
import scipy as sp
import sklearn
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import Normalizer
from sklearn import model_selection
from sklearn.model_selection import cross_val_score
from sklearn.metrics import classification_report
from pandas import *
###Output
_____no_output_____
###Markdown
Data Ingestion Modules:
###Code
class Ingestion(object):
"""This is the ingestion class to deal with csv directly from the same directory where the module is"""
def __init__(self, file, sep = ",", header = 0 ):
self.file = file
self.delimiter = sep
self.df = pd.read_csv(file, sep= sep, header=header, engine='python')
def file_csv(self):
return self.df
class IngestionDatabase(object):
""" This is the ingestion class to deal with postgress database """
def __init__(self, database, query):
self.engine = create_engine(database)
self.table_names = self.engine.table_names()
self.con = self.engine.connect()
self.rs = self.con.execute(query)
self.df = pd.DataFrame(self.rs.fetchmany(size=15))
def cols(self):
self.df.columns = self.rs.keys()
return self.df
###Output
_____no_output_____
###Markdown
Creating the Ingestion instances:
###Code
ingest = Ingestion('DC_Crime_Official.csv')
data = ingest.file_csv()
#data = concat(data, ignore_index=True)
###Output
_____no_output_____
###Markdown
Exploring the raw dataset:
###Code
data.head(2)
###Output
_____no_output_____
###Markdown
Initial Data Wrangling of Crime Dataset
###Code
class Wrangling(object):
def __init__(self, data = data):
self.df = data
# drop empty rows
def dropNA(self):
self.df = self.df.dropna(how='all') # this only drop rows with 100% NA
return self.df
def __offense_column(self, text1 ='theft/other', text2 ='theft f/auto', text3 = 'assault w/dangerous weapon',
repl1 = 'theft', repl2 = 'auto theft', repl3 = 'assault with weapon' ):
"""There are 9 categories of offenses here:
This function will transform the caterogies into more readable text
for example : assault w/dangerous weapon = assault with dangerous weapon"""
self.df['offense_text'] = self.df['offense_text'].replace([text1, text2, # add the column name to the arguments.
text3], [repl1, repl2, repl3])
return self.df
def date_time_transformer(self, time = 'start_date', second_date = 'report_date', third_date = 'end_date'):
''' transform into datetime 64 object and eliminate the second date column'''
self.df[time] = pd.to_datetime(self.df['start_date'])
self.df.drop([second_date, third_date], axis = 1, inplace = True)
return self.df
def __latlong_cutter(self):
""" Reduce the presition of the lat long data by cutting them."""
self.newlat = []
self.newlon = []
for item in self.df['latitude']:
item = str(item)
item = float(item[0:6])
self.newlat.append(item)
self.df['latitude'] = self.newlat
for item in self.df['longitude']:
item = str(item)
item = float(item[0:7])
self.newlon.append(item)
self.df['longitude'] = self.newlon
return self.df
def lat_long_rounder(self, decimals = 3):
""" Reduce the presition of the lat long data by rounging decimals"""
self.df['latitude'] = self.df['latitude'].round(decimals = decimals)
self.df['longitude'] = self.df['longitude'].round(decimals = decimals)
return self.df
def adress_format_modifier(self):
"""This columns replace some of the content from the block columns to it is easy to parse it"""
self.splitted = []
# creating the splited column
# this is working. it cannot be transformed into pandas' .replace because it is using the split method
# Note that the built in .replace it does not work properly with integers and neither with large amounts of
# things to change.. This works but it is not very wise to use.
for row in self.df['block']:
row = row.replace("block of ", "")
row = row.replace("street", "St")
row = row.replace("-", "")
row = row.split(' ', 1)
self.splitted.append(row)
self.df['splitted'] = self.splitted
return self.df
def block_parser(self):
""" This is the block parser that separate block in start and en blocks"""
self.startblock = []
self.endblock_1 = []
self.endblock = []
# create column 'startblock'
for row in self.df['splitted']:
row = row[0]
self.startblock.append(row)
self.df['startblock'] = self.startblock
# create column 'endblock_1'
for row in self.df['splitted']:
row = row[-1].lstrip() # enblock_1
row = row.split(' ',1)
self.endblock_1.append(row)
self.df['endblock_1'] = self.endblock_1
# create column 'endblock'
for row in self.df['endblock_1']:
row = row[0]
self.endblock.append(row)
self.df['endblock'] = self.endblock
return self.df
def street_parser(self):
self.street = []
#creating column 'street'
for row in self.df['endblock_1']:
row = row[1]
self.street.append(row)
self.df['street'] = self.street
return self.df
###Output
_____no_output_____
###Markdown
Creating the wrangling instances:
###Code
Wrangled = Wrangling()
Wrangled.dropNA()
Wrangled.date_time_transformer()
Wrangled.lat_long_rounder()
Wrangled.adress_format_modifier()
Wrangled.block_parser()
df = Wrangled.street_parser()
df.info()
###Output
<class 'pandas.core.frame.DataFrame'>
Int64Index: 69204 entries, 0 to 69203
Data columns (total 31 columns):
neighborhood_cluster 68431 non-null object
census_tract 69051 non-null float64
offense_group 69204 non-null object
longitude 69204 non-null float64
offense_text 69204 non-null object
shift 69204 non-null object
yblock 69204 non-null float64
district 69181 non-null float64
ward 69204 non-null int64
year 69204 non-null int64
offense_key 69204 non-null object
bid 12302 non-null object
sector 69175 non-null object
psa 69175 non-null float64
ucrrank 69204 non-null int64
block_group 69051 non-null object
voting_precinct 69204 non-null object
xblock 69204 non-null float64
block 69204 non-null object
start_date 69204 non-null datetime64[ns]
cnn 69204 non-null int64
offense 69204 non-null object
anc 69204 non-null object
method 69204 non-null object
location 69204 non-null object
latitude 69204 non-null float64
splitted 69204 non-null object
startblock 69204 non-null object
endblock_1 69204 non-null object
endblock 69204 non-null object
street 69204 non-null object
dtypes: datetime64[ns](1), float64(7), int64(4), object(19)
memory usage: 16.9+ MB
###Markdown
Dropping repeated columns created during the first wrangling process:
###Code
df = df.drop(columns = ['location', 'endblock_1', 'splitted'])
df.info()
###Output
<class 'pandas.core.frame.DataFrame'>
Int64Index: 69204 entries, 0 to 69203
Data columns (total 28 columns):
neighborhood_cluster 68431 non-null object
census_tract 69051 non-null float64
offense_group 69204 non-null object
longitude 69204 non-null float64
offense_text 69204 non-null object
shift 69204 non-null object
yblock 69204 non-null float64
district 69181 non-null float64
ward 69204 non-null int64
year 69204 non-null int64
offense_key 69204 non-null object
bid 12302 non-null object
sector 69175 non-null object
psa 69175 non-null float64
ucrrank 69204 non-null int64
block_group 69051 non-null object
voting_precinct 69204 non-null object
xblock 69204 non-null float64
block 69204 non-null object
start_date 69204 non-null datetime64[ns]
cnn 69204 non-null int64
offense 69204 non-null object
anc 69204 non-null object
method 69204 non-null object
latitude 69204 non-null float64
startblock 69204 non-null object
endblock 69204 non-null object
street 69204 non-null object
dtypes: datetime64[ns](1), float64(7), int64(4), object(16)
memory usage: 15.3+ MB
###Markdown
Separating datetime into different columns:
###Code
from datetime import datetime
df["start_date"] = pd.to_datetime(df["start_date"])
df["year"] =df["start_date"].dt.year
df["month"] =df["start_date"].dt.month
df["day"] =df["start_date"].dt.day
df["hour"] =df["start_date"].dt.hour
df["minute"] =df["start_date"].dt.minute
df["second"] =df["start_date"].dt.second
# Now, Eliminate the start_date column.
df = df.drop(columns = 'start_date')
###Output
_____no_output_____
###Markdown
Exploration of the dataset and preparation of the dataset for Predictions: Description of the dataset:
###Code
df.describe()
###Output
_____no_output_____
###Markdown
Preparing X and y sets.Dropping Remaining na values:
###Code
df = df.dropna()
X = df.drop(columns = ['ucrrank'])
y = df['ucrrank']
###Output
_____no_output_____
###Markdown
Encoding the Categorical variables:
###Code
from sklearn.preprocessing import LabelEncoder
encoder = LabelEncoder() # it only support one dimentional columns..
for colname,col in X.iteritems(): # look stack overflow
if col is not float:
X[colname] = LabelEncoder().fit_transform(col)
###Output
_____no_output_____
###Markdown
Study of the importance of the features: The thing that is left here to do is to identify the feature that better explain the variability of the dataset:
###Code
X.columns
from sklearn.decomposition import PCA
pca = PCA(n_components = 3) # input a number for feature extraction
features = X
X_ = pca.fit_transform(X)
explained_var = pca.explained_variance_ratio_
explained_var
# Here, one feature explain the most of the variance in the dataset
###Output
_____no_output_____
###Markdown
Creating the Training and test set:
###Code
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size = 0.2, random_state = 0)
###Output
_____no_output_____
###Markdown
Visual Exploration
###Code
#pd.plotting.scatter_matrix(X_train, figsize = (30, 30), marker ='o', hist_kwds = {'bins': 20},
# s = 60, alpha = 0.7)\
axis = X_train.values # Change to numpy array for performance
plt.boxplot(axis, manage_xticks = False)
plt.yscale("symlog")
plt.xlabel("Features")
plt.ylabel("Target Variable")
plt.show()
scaler = StandardScaler()
#scaler = MinMaxScaler()
#scaler = Normalizer()
X_train = scaler.fit(X_train).transform(X_train)
X_test = scaler.fit(X_test).transform(X_test)
###Output
_____no_output_____
###Markdown
Transforming the y labels into numpy arrays to increase computation performance:
###Code
y_train = y_train.values
y_test = y_test.values
plt.boxplot(X_train, manage_xticks = False)
plt.yscale("symlog")
plt.xlabel("Features")
plt.ylabel("Target Variable")
plt.show()
###Output
_____no_output_____
###Markdown
Fitting models and performing predictions over the crime dataset: KNN:
###Code
knn = KNeighborsClassifier(n_neighbors = 10, metric = 'manhattan', weights = 'uniform', algorithm = 'auto')
knn.fit(X_train, y_train)
predicted_knn = knn.predict(X_test)
print("Predictions: {}".format(predicted_knn))
###Output
Predictions: [6 7 6 ... 6 6 7]
###Markdown
Cross Validation:
###Code
scores = cross_val_score(knn, X = X_train, y = y_train)
print ("Cross Validation Scores: {}".format(scores))
report = classification_report(y_test, predicted_knn)
print (report)
###Output
precision recall f1-score support
1 0.00 0.00 0.00 2
2 1.00 0.13 0.24 15
3 0.99 0.99 0.99 77
4 0.88 0.99 0.93 103
5 0.97 0.90 0.93 70
6 0.94 0.99 0.96 1438
7 0.92 0.84 0.88 591
8 0.91 0.67 0.77 93
avg / total 0.93 0.93 0.93 2389
###Markdown
Finding the best parameters for KnnIn this case, gridsearchCV and a simple loop will be used in order to find the optimal hyperparameters for KNN
###Code
type(y_train)
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
params2 = [{'n_neighbors': [1,10,50,100], 'algorithm': ['auto','ball_tree','kd_tree' ],
'weights': ['uniform', 'distance'], 'metric': ['minkowski', 'manhattan']}]
grid_search = GridSearchCV(estimator = knn, param_grid = params2, scoring = 'accuracy', cv = 5, n_jobs = -1)
grid_search = grid_search.fit(X_train, y_train)
accuracy = grid_search.best_score_
best_params = grid_search.best_params_
print(accuracy)
print(best_params)
train_accuracy = []
test_accuracy = []
neighbors = range(1,100,10)
algorithms = ['auto', 'ball_tree', 'kd_tree']
weights = ['uniform', 'distance']
for i in neighbors:
knn = KNeighborsClassifier(n_neighbors = i, metric = 'manhattan', weights = 'distance', algorithm = 'auto')
knn.fit(X_train, y_train)
train_accuracy.append(knn.score(X_train, y_train))
test_accuracy.append(knn.score(X_test, y_test))
plt.plot(neighbors, train_accuracy, label = 'Train set accuracy')
plt.plot(neighbors, test_accuracy, label = 'Test set accuracy')
plt.ylabel("Accuracy")
plt.xlabel("Number of neighbors")
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Kernel SVC:
###Code
from sklearn.svm import SVC
svm = SVC(C = 1000, kernel = 'rbf', gamma = 1)
svm.fit(X_train, y_train)
predicted = svm.predict(X_test)
#print("Predictions: {}".format(predicted))
scores = cross_val_score(svm, X = X_train, y = y_train)
report = classification_report(y_test, predicted)
print (report)
# print ("Cross Validation Scores: {}".format(scores))
###Output
precision recall f1-score support
1 0.00 0.00 0.00 2
2 0.00 0.00 0.00 15
3 1.00 0.01 0.03 77
4 1.00 0.01 0.02 103
5 0.00 0.00 0.00 70
6 0.63 1.00 0.77 1438
7 1.00 0.16 0.27 591
8 1.00 0.01 0.02 93
avg / total 0.74 0.64 0.53 2389
###Markdown
Finding the best parameters for Kernel SVC:
###Code
params = [{'C': [1, 10, 30, 100], 'kernel': ['rbf'], 'gamma': [1, 0.1, 0.01, 0.001]}]
grid_search = GridSearchCV(estimator = svm, param_grid = params, scoring = 'accuracy', cv = 5, n_jobs = -1)
grid_search = grid_search.fit(X_train, y_train)
accuracySVC = grid_search.best_score_
best_paramsSVC = grid_search.best_params_
print(accuracySVC)
print(best_paramsSVC)
train_accuracy = []
test_accuracy = []
Ci = [1,10, 50, 100]
for i in Ci:
svm = SVC(C = i, kernel = 'rbf', gamma = 0.001) # try rbf, linear and poly
svm.fit(X_train, y_train)
train_accuracy.append(svm.score(X_train, y_train))
test_accuracy.append(svm.score(X_test, y_test))
plt.plot(Ci, train_accuracy, label = 'Train set accuracy')
plt.plot(Ci, test_accuracy, label = 'Test set accuracy')
plt.ylabel("Accuracy")
plt.xlabel("C")
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Merging two datasets
###Code
df1 = data[['latitude', 'longitude', 'ucrrank']] # selecting the relevant labels from the wrangled crime dataset (df).
df1.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 69204 entries, 0 to 69203
Data columns (total 3 columns):
latitude 69204 non-null float64
longitude 69204 non-null float64
ucrrank 69204 non-null int64
dtypes: float64(2), int64(1)
memory usage: 1.6 MB
###Markdown
Since here there are no missing values, we can use the whole rows from the crime dataset.
###Code
# ingesting new dataset
ingest2 = Ingestion('Address_Points.csv', sep = ',', header = 0)
data2 = ingest2.file_csv()
data2.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 147181 entries, 0 to 147180
Data columns (total 52 columns):
X 147181 non-null float64
Y 147181 non-null float64
OBJECTID_12 147181 non-null int64
SITE_ADDRESS_PK 147181 non-null int64
ADDRESS_ID 147181 non-null int64
STATUS 147181 non-null object
SSL 146816 non-null object
TYPE_ 147181 non-null object
ENTRANCETYPE 147181 non-null object
ADDRNUM 145278 non-null float64
ADDRNUMSUFFIX 2258 non-null object
STNAME 147181 non-null object
STREET_TYPE 147181 non-null object
QUADRANT 147181 non-null object
CITY 147181 non-null object
STATE 147181 non-null object
FULLADDRESS 145278 non-null object
SQUARE 146822 non-null object
SUFFIX 7304 non-null object
LOT 146819 non-null object
NATIONALGRID 147181 non-null object
ASSESSMENT_NBHD 147120 non-null object
ASSESSMENT_SUBNBHD 120298 non-null object
CFSA_NAME 147179 non-null object
HOTSPOT 0 non-null float64
CLUSTER_ 144055 non-null object
POLDIST 147179 non-null object
ROC 147179 non-null object
PSA 147179 non-null object
SMD 147179 non-null object
CENSUS_TRACT 147177 non-null float64
VOTE_PRCNCT 147179 non-null object
WARD 147179 non-null object
ZIPCODE 147162 non-null float64
ANC 147179 non-null object
NEWCOMMSELECT06 1012 non-null object
NEWCOMMCANDIDATE 1870 non-null object
CENSUS_BLOCK 147177 non-null object
CENSUS_BLOCKGROUP 147177 non-null object
FOCUS_IMPROVEMENT_AREA 0 non-null float64
SE_ANNO_CAD_DATA 0 non-null float64
LATITUDE 147181 non-null float64
LONGITUDE 147181 non-null float64
ACTIVE_RES_UNIT_COUNT 147180 non-null float64
RES_TYPE 147181 non-null object
ACTIVE_RES_OCCUPANCY_COUNT 147180 non-null float64
WARD_2002 147179 non-null object
WARD_2012 147179 non-null object
ANC_2002 147179 non-null object
ANC_2012 147179 non-null object
SMD_2002 147179 non-null object
SMD_2012 147179 non-null object
dtypes: float64(12), int64(3), object(37)
memory usage: 58.4+ MB
###Markdown
Selecting relevant columns and transforming labels into lowercase for standarization:
###Code
df2 = data2[['LATITUDE', 'LONGITUDE']] # selecting the relevant columns.
df2.columns = df2.columns.str.lower() # transforming labels to lowercase
df2.head(5)
df1 = data[['latitude', 'longitude', 'ucrrank']] # selecting the relevant labels from the wrangled crime dataset (df).
df1.ucrrank.describe()
df1.head(5)
###Output
_____no_output_____
###Markdown
Rounding latitude and longitude values of second dataset
###Code
Wrangled2 = Wrangling(df2)
# Wrangled.dropNA()
df2 = Wrangled2.lat_long_rounder()
###Output
/home/franco/.local/lib/python3.6/site-packages/ipykernel_launcher.py:48: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
/home/franco/.local/lib/python3.6/site-packages/ipykernel_launcher.py:49: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
###Markdown
Before merging. In the Adress_point dataset, we need a way to eliminate all the rows from the locations that match the crime dataset.
###Code
lat = []
long = []
for i, j, k, l in zip(df1['latitude'], df2['latitude'], df1['longitude'], df2['longitude']):
if str(j) in str(i) and str(l) in str(k):
lat.append(j)
long.append(l)
###Output
_____no_output_____
###Markdown
This means that we only have 27 blocks when no crime ocurred in Washington DC during the last 2 years.It would be interesting to see where are those blocks:
###Code
# Creating the new dataframe:
second_dataframe = {'latitude': lat, 'longitude': long}
second_dataframe = pd.DataFrame(second_dataframe)
second_dataframe['ucrrank'] = 10
second_dataframe
frames = [df1, second_dataframe]
df_merged = pd.concat(frames, sort = False)
df_merged.head(5)
df_merged.info()
X = df_merged.drop(columns = ['ucrrank'])
y = df_merged['ucrrank']
# no need to encode the labels in this case... no categorical variables:
###Output
_____no_output_____
###Markdown
Creating the Training and test set:
###Code
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size = 0.2, random_state = 0)
###Output
_____no_output_____
###Markdown
Scaling the features:
###Code
scaler = StandardScaler()
#scaler = MinMaxScaler()
#scaler = Normalizer()
X_train = scaler.fit(X_train).transform(X_train)
X_test = scaler.fit(X_test).transform(X_test)
###Output
_____no_output_____
###Markdown
Transforming to numpy array:
###Code
y_test = y_test.values
y_train = y_train.values
###Output
_____no_output_____
###Markdown
Performing simple Knn on merged dataset
###Code
knn = KNeighborsClassifier(n_neighbors = 10, metric = 'manhattan', weights = 'uniform', algorithm = 'auto')
knn.fit(X_train, y_train)
predicted_knn = knn.predict(X_test)
print("Predictions: {}".format(predicted_knn))
scores = cross_val_score(knn, X = X_train, y = y_train)
print ("Cross Validation Scores: {}".format(scores))
report = classification_report(y_test, predicted_knn)
print (report)
###Output
precision recall f1-score support
1 0.00 0.00 0.00 54
2 0.09 0.01 0.01 131
3 0.20 0.16 0.17 810
4 0.15 0.06 0.08 997
5 0.17 0.05 0.08 698
6 0.55 0.69 0.61 5683
7 0.49 0.58 0.53 4449
8 0.21 0.05 0.08 1019
10 0.00 0.00 0.00 6
avg / total 0.43 0.49 0.45 13847
###Markdown
Finding the best parameters for Knn on merged dataset:
###Code
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
params2 = [{'n_neighbors': [1,10,50,100], 'algorithm': ['auto','ball_tree','kd_tree' ],
'weights': ['uniform', 'distance'], 'metric': ['minkowski', 'manhattan']}]
grid_search = GridSearchCV(estimator = knn, param_grid = params2, scoring = 'accuracy', cv = 5, n_jobs = -1)
grid_search = grid_search.fit(X_train, y_train)
accuracy = grid_search.best_score_
best_params = grid_search.best_params_
print(accuracy)
print(best_params)
###Output
0.5144265491838799
{'algorithm': 'ball_tree', 'metric': 'manhattan', 'n_neighbors': 50, 'weights': 'uniform'}
###Markdown
Kernel SVC on merged dataset:
###Code
svm = SVC(C = 10, kernel = 'rbf', gamma = 0.1)
svm.fit(X_train, y_train)
predicted = svm.predict(X_test)
#print("Predictions: {}".format(predicted))
scores = cross_val_score(svm, X = X_train, y = y_train)
report = classification_report(y_test, predicted)
print (report)
# print ("Cross Validation Scores: {}".format(scores))
###Output
precision recall f1-score support
1 0.00 0.00 0.00 54
2 0.00 0.00 0.00 131
3 0.00 0.00 0.00 810
4 0.00 0.00 0.00 997
5 0.00 0.00 0.00 698
6 0.42 0.92 0.57 5683
7 0.38 0.11 0.17 4449
8 0.00 0.00 0.00 1019
10 0.00 0.00 0.00 6
avg / total 0.29 0.41 0.29 13847
###Markdown
Apparently Kernel SVC is not a very good tool to deal with large datasets. Finding the best parameters for Kernel SVC on merged dataset:
###Code
params = [{'C': [1, 10, 30, 100], 'kernel': ['rbf'], 'gamma': [1, 0.1, 0.01, 0.001]}]
grid_search = GridSearchCV(estimator = svm, param_grid = params, scoring = 'accuracy', cv = 5, n_jobs = -1)
grid_search = grid_search.fit(X_train, y_train)
accuracySVC = grid_search.best_score_
best_paramsSVC = grid_search.best_params_
print(accuracySVC)
print(best_paramsSVC)
###Output
_____no_output_____
###Markdown
ANN test on merged dataset
###Code
import keras
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.utils import to_categorical
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from keras.utils import np_utils
###Output
/home/franco/.local/lib/python3.6/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
from ._conv import register_converters as _register_converters
Using TensorFlow backend.
###Markdown
Encoding target variable:
###Code
X = X.values
y = y.values
###Output
_____no_output_____
###Markdown
Encoding the target variables into binary values:
###Code
onehotencoder = OneHotEncoder()
y1 = y.reshape(-1, 1)
y1 = onehotencoder.fit_transform(y1).toarray()
y1 = pd.DataFrame(y1)
y1.columns = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10']
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y1, test_size = 0.2, random_state = 0)
y_train = y_train.values
y_test = y_test.values
model = Sequential()
model.add(Dense(units=2, input_dim=2))
model.add(Activation('relu'))
model.add(Dense(units=2))
model.add(Activation('relu'))
model.add(Dense(units=4))
model.add(Activation('relu'))
model.add(Dense(units=4))
model.add(Activation('relu'))
model.add(Dense(units=4))
model.add(Activation('relu'))
model.add(Dense(units=4))
model.add(Activation('relu'))
model.add(Dense(units=4))
model.add(Activation('relu'))
model.add(Dense(units=4))
model.add(Activation('relu'))
model.add(Dense(units=4))
model.add(Activation('relu'))
model.add(Dense(units=4))
model.add(Activation('relu'))
model.add(Dense(units=4))
model.add(Activation('relu'))
model.add(Dense(units=4))
model.add(Activation('relu'))
model.add(Dense(units=4))
model.add(Activation('relu'))
model.add(Dense(units=4))
model.add(Activation('relu'))
model.add(Dense(units=10))
model.add(Activation('softmax'))
#
model.compile(optimizer = 'adam', loss = 'categorical_crossentropy', metrics = ['accuracy'])
model.fit(X_train, y_train, batch_size = 20, epochs = 100)
from sklearn.neural_network import MLPClassifier
clf = MLPClassifier(solver='lbfgs', alpha=1e-5,
hidden_layer_sizes=(1500,), random_state=1, max_iter = 1000)
clf.fit(X_train, y_train)
predicted = clf.predict(X_test)
report = classification_report(y_test, predicted)
print (report)
###Output
precision recall f1-score support
0 0.00 0.00 0.00 54
1 0.00 0.00 0.00 131
2 0.00 0.00 0.00 810
3 0.00 0.00 0.00 997
4 0.00 0.00 0.00 698
5 0.32 0.03 0.06 5683
6 0.00 0.00 0.00 4449
7 0.00 0.00 0.00 1019
8 0.00 0.00 0.00 0
9 0.00 0.00 0.00 6
avg / total 0.13 0.01 0.02 13847
###Markdown
Ann is not working in this case. Other models: Random Forest Classifier:
###Code
from sklearn.ensemble import RandomForestClassifier
from sklearn.datasets import make_classification
clf = RandomForestClassifier(max_depth= None, min_samples_split = 2, random_state=0, criterion='entropy')
clf.fit(X_train, y_train)
predicted = clf.predict(X_test)
report = classification_report(y_test, predicted)
print (report)
###Output
precision recall f1-score support
0 0.00 0.00 0.00 54
1 0.00 0.00 0.00 131
2 0.22 0.04 0.07 810
3 0.21 0.04 0.06 997
4 0.16 0.02 0.04 698
5 0.66 0.61 0.63 5683
6 0.58 0.48 0.52 4449
7 0.17 0.04 0.07 1019
8 0.00 0.00 0.00 0
9 1.00 0.17 0.29 6
avg / total 0.50 0.41 0.44 13847
###Markdown
Knn and Random forest classifier reported the highest accuracy. This dataset contains only 2 independent features:Latitude and longitude. This means that there are more features tha directly influence the dependent variable:
###Code
###Output
_____no_output_____ |
CodeBlog_FrozenLake.ipynb | ###Markdown
FrozenLake environmentI created this notebook to explore solutions to the OpenAI FrozenLake environment using Temporal Difference control methods, in the context of studying Reinforcement Learning. In __Frozen Lake__ an agent controls the movement of a character in a [4x4](https://gym.openai.com/envs/FrozenLake-v0/) or [8x8](https://gym.openai.com/envs/FrozenLake8x8-v0/) gridworld simulating a frozen lake. Some of the grids are walkable frozen surfaces (F) and others have holes (H). The Agent starts in S and must get to the goal position G.If the agent reaches the __goal__, it receives a __reward of 1__. No rewards are given for any other actions.This notebook can be used for both the 4x4 and 8x8 versions of FrozenLake.We start by defining a function (*visualise_terrain*) for visualizing the terrain generated by a particular instantiation of the environment. We also create a dictionary which specifies action identities from their integer codes.
###Code
def visualise_terrain(envmap, shape=4, colormap='jet'):
env_map = np.zeros((shape, shape), dtype=float)
linear_map = [s for s in envmap if s in ['S', 'F', 'H', 'G']]
number_map = np.zeros((shape**2))
for s in range(len(linear_map)):
if linear_map[s] == 'S':
number_map[s] = 0.7
elif linear_map[s] == 'G':
number_map[s] = 1
elif linear_map[s] == 'H':
number_map[s] = 0
else:
number_map[s] = 0.3
for r in range(shape):
env_map[r,:] = number_map[shape*r:shape*r+shape]
return env_map, colormap
actions = {}
actions[0] = 'L'
actions[1] = 'D'
actions[2] = 'R'
actions[3] = 'U'
###Output
_____no_output_____
###Markdown
AgentNext, we define our Agent. We provide it with attributes **nA** (number of actions available to it), **nS** (number of observables in this environment), **epsilon and epsilon decay rate**, **alpha** (how much to weight information from new episodes when updating) and **gamma** (how much to discount future rewards). We also bake in a **minimum value for epsilon**, to ensure it keeps a baseline level of explorative behaviour, in the long run.We additionally define functions for selecting actions(**select_action**) and updating the agent's internal state (**agent_update**). The former uses an epsilon-greedy policy and the latter uses Q-learning as the method for updating the State-Action Value table Q.
###Code
class Agent:
def __init__(self, nA, nS, epsilon=1, epsilon_decay = 0.5, min_epsilon = 0.001, alpha = 0.8,
alpha_decay = 0.99, gamma = 0.95, alpha_init = 0.8):
self.nA = nA
self.nS = nS
self.Q = np.full((nS, nA), 0.01)
self.epsilon = epsilon
self.epsilon_decay = epsilon_decay
self.min_epsilon = min_epsilon
self.alpha_init = alpha_init
self.alpha = alpha_init
self.gamma = gamma
self.alpha_decay = alpha_decay
def select_action(self, state):
policy_p = ((np.ones(self.nA)*self.epsilon)/self.nA)
greedy_Q = np.argmax(self.Q[state])
policy_p[greedy_Q] = 1 - self.epsilon + (self.epsilon/self.nA)
action = np.random.choice(self.nA, p=policy_p)
return action
def agent_update(self, state, action, reward, next_state, done):
next_action = self.select_action(next_state)
self.Q[state][action] = (1 - self.alpha) * self.Q[state][action] + self.alpha * (reward + self.gamma * np.max(self.Q[next_state]))
def agent_update_MC(self, states, actions, rewards):
discounts = np.array([self.gamma**i for i in range(len(rewards)+1)])
for i, state in enumerate(states):
previous_Q = self.Q[state][actions[i]]
self.Q[state][actions[i]] = previous_Q + self.alpha*(sum(rewards[i:]*discounts[:-(1+i)]) - previous_Q)
def interact_MC(env, agent, num_episodes, window = 100, alpha_tune=False):
windowed_rewards = []
working_rewards = deque(maxlen=window)
goals = 0
for i_episode in range(1, num_episodes+1):
ep_states, ep_actions, ep_rewards = [], [], []
state = env.reset()
agent.epsilon = max(agent.epsilon*agent.epsilon_decay, agent.min_epsilon)
while True:
action = agent.select_action(state)
next_state, reward, done, _ = env.step(action)
ep_states.append(state)
ep_actions.append(action)
ep_rewards.append(reward)
state = next_state
if done:
agent.agent_update_MC(ep_states, ep_actions, ep_rewards)
working_rewards.append(sum(ep_rewards))
if sum(ep_rewards) >=1:
goals += 1
if alpha_tune:
agent.alpha = min(agent.alpha + 0.05*goals, 0.999)
break
if i_episode % window ==0:
windowed_rewards.append(100*np.mean(working_rewards))
print('Episode {}: goal reached in {} of last {} episodes'.format(i_episode, goals, window))
goals = 0
#agent.alpha = 0.1
return windowed_rewards, agent.Q
###Output
_____no_output_____
###Markdown
Agent-Environment InteractionLastly before we train our agent, we define an interaction function which structures the way our agent interacts with the environment and provides a readout of how well it's doing.**interact** trains the agent for a number of episodes, keeps track of reward achieved per episode and whether the agent achieved the goal state or not, and averages rewards obtained over a window of (default) 100 episodes for assessment. During training, it prints the % of episodes over the last 100 in which the agent succeeded, and finally returns the average reward obtained over the defined window of episodes, as well as the Q-table.
###Code
def interact(env, agent, num_episodes, window = 100):
windowed_rewards = []
working_rewards = deque(maxlen=window)
#actions = []
goals = 0
for i_episode in range(1, num_episodes+1):
state = env.reset()
episode_r = 0
while True:
action = agent.select_action(state)
next_state, reward, done, _ = env.step(action)
agent.agent_update(state, action, reward, next_state, done)
episode_r += reward
state = next_state
#actions.append(action)
#print(action)
#env.render()
if done:
working_rewards.append(episode_r)
if episode_r ==1:
goals += 1
break
agent.epsilon = agent.min_epsilon + 0.99*np.exp(-agent.epsilon_decay*i_episode)
if i_episode % window==0:
windowed_rewards.append(100*np.mean(working_rewards))
if i_episode % (window*10)==0:
print('Episode {}: goal reached in {} of last {} episodes'.format(i_episode, goals, window*10))
goals = 0
return windowed_rewards, agent.Q
###Output
_____no_output_____
###Markdown
There is a small trick in the **interact** function code: if an episode terminates with reward, the agent decreases its alpha (learning rate). Therefore, over time, as the Q-table becomes a better and better estimate of the State-Value function, the agent will increasingly weight it over immediate experience, when updating its estimate of the State-Value function. TestingFinally, we will instantiate our chosen environment, call **visualise_terrain** to render our terrain map, instantiate the agent and run the **interact** function to train it. We will start with the (ballpark-reasonable) default agent parameters and run for 5,000 episodes to make sure everything is running ok and the agent is learning something, then visualise its performance over time, in terms of the percentage of episodes terminating in success.
###Code
env = gym.make('FrozenLake-v0')
env_map, colormap = visualise_terrain(env.render(mode='ansi'), shape=int(math.sqrt(env.observation_space.n)))
agent = Agent(nA=env.action_space.n, nS=env.observation_space.n)
rewards, Q_table = interact(env, agent, num_episodes=5000)
plt.plot(range(len(rewards)), rewards)
plt.title('Agent Performance in FrozenLake');
plt.ylabel('% Successful Episodes');
plt.xlabel('Episode batch # (batches of 100 episodes)');
###Output
Episode 1000: goal reached in 230 of last 1000 episodes
Episode 2000: goal reached in 581 of last 1000 episodes
Episode 3000: goal reached in 632 of last 1000 episodes
Episode 4000: goal reached in 569 of last 1000 episodes
Episode 5000: goal reached in 593 of last 1000 episodes
###Markdown
We now visualise its Q-table (left) where each row is a state and each column an action (**l**eft, **d**own, **r**ight, **u**p), value for each cell color-coded from dark blue (0) to deep red (1). To the right, we can see a map of the terrain the agent trained on (start = orange, goal = burgundy, frozen surface = light blue, hole = deep blue), and superimposed on each grid the most valuable action on it, according to the Q-table.
###Code
## Obtain most valuable actions per state and reshape into terrain conformation.
shape = int(math.sqrt(env.observation_space.n))
p_map = [actions[i] for i in np.argmax(Q_table, axis=1)]
p_map = np.reshape(np.array(p_map), (shape,shape))
## Generate subplots.
fig, (ax1, ax2) = plt.subplots(1, 2, sharex=False, sharey=False)
# Color-coded Q-table
ax1.imshow(Q_table, cmap='jet')#, vmin=0, vmax=1)
ax1.set_xticks(ticks=[0,1,2,3])
ax1.set_xticklabels(['l', 'd', 'r', 'u'])
ax1.set_yticks(np.arange(0,shape**2+1,3));
ax1.set_yticklabels(np.arange(1,shape**2+2,3));
ax1.set_ylabel('State')
ax1.set_xlabel('Action')
ax1.set_title('Q-table');
# Terrain-policy
ax2.imshow(env_map, vmin = 0, vmax = 1, cmap=colormap)
ax2.axis('off')
for r in range(shape):
for c in range(shape):
if env_map[c,r] not in [0.0, 1.0]:
ax2.text(r,c, p_map[c,r], c='white', weight='bold')
ax2.set_title('Most likely action per grid of terrain');
###Output
_____no_output_____
###Markdown
Developing & ImprovingNot bad, but can we do better? Let's do some simple parameter exploration, trying out 3 different ranges of epsilon, minimum epsilon value, alpha and gamma. These parameters do interact with each other, so evidently parameter-wise iteration provides limited insight but i) I expect even this simplified approach can yield some improvement relative to the initial parameters & insight into agent performance in this task and ii) exploring the full parameter combinatorial space (I think 66 parameter combinations) would be very time consuming.Let's run 5,000 episodes per exploration and explore 3 values for each of the 4 parameters. We will store the % successes over past 100 episodes and Q-table for each value/parameter and plot that for assessment.
###Code
num_eps_iter = 5000
####
eps_iter = [1, 0.1, 0.001]
rewards_eps = np.empty((num_eps_iter//100, len(eps_iter)))
Qtable_eps = np.empty((env.observation_space.n, env.action_space.n * len(eps_iter)))
print('Exploring epsilon range.')
env = gym.make('FrozenLake-v0')
for eps in eps_iter:
agent = Agent(nA=env.action_space.n, nS=env.observation_space.n, epsilon=eps)
i = eps_iter.index(eps)
print('Iteration {}, epsilon value {}.'.format(i+1, eps))
rewards_eps[:, i], Qtable_eps[:, i*env.action_space.n:i*env.action_space.n+env.action_space.n] = interact(env, agent, num_episodes=num_eps_iter)
print('Done!')
####
####
eps_min_iter = [0.01, 0.00000001, 0.0000000001]
rewards_epsmin = np.empty((num_eps_iter//100, len(eps_iter)))
Qtable_epsmin = np.empty((env.observation_space.n, env.action_space.n * len(eps_iter)))
print('Exploring epsilon min range.')
env = gym.make('FrozenLake-v0')
for eps in eps_min_iter:
agent = Agent(nA=env.action_space.n, nS=env.observation_space.n, min_epsilon=eps)
i = eps_min_iter.index(eps)
print('Iteration {}, epsilon min value {}.'.format(i+1, eps))
rewards_epsmin[:, i], Qtable_epsmin[:, i*env.action_space.n:i*env.action_space.n+env.action_space.n] = interact(env, agent, num_episodes=num_eps_iter)
print('Done!')
####
####
alpha_iter = [0.9, 0.8, 0.1]
rewards_alpha = np.empty((num_eps_iter//100, len(eps_iter)))
Qtable_alpha = np.empty((env.observation_space.n, env.action_space.n * len(eps_iter)))
print('Exploring alpha range.')
env = gym.make('FrozenLake-v0')
for alpha in alpha_iter:
agent = Agent(nA=env.action_space.n, nS=env.observation_space.n, alpha=alpha)
i = alpha_iter.index(alpha)
print('Iteration {}, alpha value {}.'.format(i+1, alpha))
rewards_alpha[:, i], Qtable_alpha[:, i*env.action_space.n:i*env.action_space.n+env.action_space.n] = interact(env, agent, num_episodes=num_eps_iter)
print('Done!')
####
####
gamma_iter = [0.9999, 0.995, 0.95]
rewards_gamma = np.empty((num_eps_iter//100, len(eps_iter)))
Qtable_gamma = np.empty((env.observation_space.n, env.action_space.n * len(eps_iter)))
print('Exploring gamma range.')
env = gym.make('FrozenLake-v0')
for gamma in gamma_iter:
agent = Agent(nA=env.action_space.n, nS=env.observation_space.n, gamma=gamma)
i = gamma_iter.index(gamma)
print('Iteration {}, gamma value {}.'.format(i+1, gamma))
rewards_gamma[:, i], Qtable_gamma[:, i*env.action_space.n:i*env.action_space.n+env.action_space.n] = interact(env, agent, num_episodes=num_eps_iter)
print('Done!')
######
######
print('Parameter exploration completed.')
###Output
Exploring epsilon range.
Iteration 1, epsilon value 1.
Episode 1000: goal reached in 260 of last 1000 episodes
Episode 2000: goal reached in 632 of last 1000 episodes
Episode 3000: goal reached in 628 of last 1000 episodes
Episode 4000: goal reached in 635 of last 1000 episodes
Episode 5000: goal reached in 574 of last 1000 episodes
Iteration 2, epsilon value 0.1.
Episode 1000: goal reached in 248 of last 1000 episodes
Episode 2000: goal reached in 586 of last 1000 episodes
Episode 3000: goal reached in 653 of last 1000 episodes
Episode 4000: goal reached in 631 of last 1000 episodes
Episode 5000: goal reached in 615 of last 1000 episodes
Iteration 3, epsilon value 0.001.
Episode 1000: goal reached in 275 of last 1000 episodes
Episode 2000: goal reached in 605 of last 1000 episodes
Episode 3000: goal reached in 631 of last 1000 episodes
Episode 4000: goal reached in 649 of last 1000 episodes
Episode 5000: goal reached in 679 of last 1000 episodes
Done!
Exploring epsilon min range.
Iteration 1, epsilon min value 0.01.
Episode 1000: goal reached in 306 of last 1000 episodes
Episode 2000: goal reached in 651 of last 1000 episodes
Episode 3000: goal reached in 653 of last 1000 episodes
Episode 4000: goal reached in 621 of last 1000 episodes
Episode 5000: goal reached in 602 of last 1000 episodes
Iteration 2, epsilon min value 1e-08.
Episode 1000: goal reached in 306 of last 1000 episodes
Episode 2000: goal reached in 725 of last 1000 episodes
Episode 3000: goal reached in 670 of last 1000 episodes
Episode 4000: goal reached in 709 of last 1000 episodes
Episode 5000: goal reached in 702 of last 1000 episodes
Iteration 3, epsilon min value 1e-10.
Episode 1000: goal reached in 204 of last 1000 episodes
Episode 2000: goal reached in 545 of last 1000 episodes
Episode 3000: goal reached in 596 of last 1000 episodes
Episode 4000: goal reached in 631 of last 1000 episodes
Episode 5000: goal reached in 645 of last 1000 episodes
Done!
Exploring alpha range.
Iteration 1, alpha value 0.9.
Episode 1000: goal reached in 179 of last 1000 episodes
Episode 2000: goal reached in 614 of last 1000 episodes
Episode 3000: goal reached in 606 of last 1000 episodes
Episode 4000: goal reached in 607 of last 1000 episodes
Episode 5000: goal reached in 627 of last 1000 episodes
Iteration 2, alpha value 0.8.
Episode 1000: goal reached in 278 of last 1000 episodes
Episode 2000: goal reached in 596 of last 1000 episodes
Episode 3000: goal reached in 662 of last 1000 episodes
Episode 4000: goal reached in 668 of last 1000 episodes
Episode 5000: goal reached in 629 of last 1000 episodes
Iteration 3, alpha value 0.1.
Episode 1000: goal reached in 22 of last 1000 episodes
Episode 2000: goal reached in 121 of last 1000 episodes
Episode 3000: goal reached in 250 of last 1000 episodes
Episode 4000: goal reached in 304 of last 1000 episodes
Episode 5000: goal reached in 294 of last 1000 episodes
Done!
Exploring gamma range.
Iteration 1, gamma value 0.9999.
Episode 1000: goal reached in 93 of last 1000 episodes
Episode 2000: goal reached in 363 of last 1000 episodes
Episode 3000: goal reached in 414 of last 1000 episodes
Episode 4000: goal reached in 486 of last 1000 episodes
Episode 5000: goal reached in 403 of last 1000 episodes
Iteration 2, gamma value 0.995.
Episode 1000: goal reached in 325 of last 1000 episodes
Episode 2000: goal reached in 657 of last 1000 episodes
Episode 3000: goal reached in 693 of last 1000 episodes
Episode 4000: goal reached in 680 of last 1000 episodes
Episode 5000: goal reached in 714 of last 1000 episodes
Iteration 3, gamma value 0.95.
Episode 1000: goal reached in 248 of last 1000 episodes
Episode 2000: goal reached in 563 of last 1000 episodes
Episode 3000: goal reached in 645 of last 1000 episodes
Episode 4000: goal reached in 656 of last 1000 episodes
Episode 5000: goal reached in 588 of last 1000 episodes
Done!
Parameter exploration completed.
###Markdown
Let's plot the results and think how we can improve performance.
###Code
fig, (ax1, ax2, ax3, ax4) = plt.subplots(1, 4, sharex=True, sharey=True, figsize=(15,10))
for c in range(3):
ax1.plot(rewards_eps[:,c], lw=0.7)
ax1.set_title('Epsilon')
ax1.legend(eps_iter,loc='lower right')
ax1.set_ylabel('Successful episodes over last 100', fontsize=12)
ax2.plot(rewards_epsmin[:,c], lw=0.7)
ax2.set_title('Minimum Epsilon')
ax2.legend(eps_min_iter ,loc='lower right')
ax3.plot(rewards_alpha[:,c], lw=0.7)
ax3.set_title('Alpha')
ax3.legend(alpha_iter,loc='lower right')
ax4.plot(rewards_gamma[:,c], lw=0.7)
ax4.set_title('Gamma')
ax4.legend(gamma_iter,loc='lower right')
fig.text(0.5, 0.07, 'Episode batch # (100 episodes per batch)', ha='center', fontsize=12);
#fig, (ax1, ax2, ax3, ax4) = plt.subplots(1, 4, sharex=False, sharey=False, figsize=(12,8))
fig, (axes) = plt.subplots(1, 4, sharex=False, sharey=False, figsize=(15,10))
axes = axes.ravel()
axes[0].imshow(Qtable_eps, cmap='jet', vmin=0.01, vmax=1)
axes[0].set_title('Q-table: Epsilon')
axes[1].imshow(Qtable_epsmin, cmap='jet', vmin=0.01, vmax=1)
axes[1].set_title('Q-table: Minimum Epsilon')
axes[2].imshow(Qtable_alpha, cmap='jet', vmin=0.01, vmax=1)
axes[2].set_title('Q-table: Alpha')
axes[3].imshow(Qtable_gamma, cmap='jet', vmin=0.01, vmax=1)
axes[3].set_title('Q-table: Gamma')
params = [eps_iter, eps_min_iter, alpha_iter, gamma_iter]
for a in range(4):
axes[a].vlines(3.5, ymin=0, ymax=15, color='w')
axes[a].vlines(7.5, ymin=0, ymax=15, color='w')
axes[a].set_xticks(ticks=range(12))
axes[a].set_xticklabels(['l', 'd', 'r', 'u']*3)
axes[a].set_yticks(np.arange(0,shape**2+1,3));
axes[a].set_yticklabels(np.arange(1,shape**2+2,3));
axes[a].set_ylabel('State')
axes[a].set_xlabel('Action')
axes[a].text(1.5, 18, params[a][0], ha='center');
axes[a].text(5.5, 18, params[a][1], ha='center');
axes[a].text(9.5, 18, params[a][2], ha='center');
###Output
_____no_output_____
###Markdown
Let's now store the parameters that performed best in our exploration and train a new agent using them.
###Code
nA = env.action_space.n
nS = env.observation_space.n
good_epsilon = 0.001
good_epsilon_min = 0.0000000001
good_alpha = 0.8
good_gamma = 0.995
agent = Agent(nA=nA, nS=nS, epsilon=good_epsilon, min_epsilon=good_epsilon_min, alpha=good_alpha, gamma=good_gamma)
rewards_improved, Q_table_improved = interact(env, agent, num_episodes=5000)
###Output
Episode 1000: goal reached in 317 of last 1000 episodes
Episode 2000: goal reached in 713 of last 1000 episodes
Episode 3000: goal reached in 761 of last 1000 episodes
Episode 4000: goal reached in 761 of last 1000 episodes
Episode 5000: goal reached in 734 of last 1000 episodes
###Markdown
Does performance improve?
###Code
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, sharex=False, sharey=False, figsize=(15,10))
## Success episodes over training time default vs tested parameters
ax1.plot(range(len(rewards)), rewards, label='default parameters')
ax1.set_title('Default vs Tested parameters');
ax1.set_ylabel('% Successful Episodes');
ax1.set_xlabel('Episode batch # (batches of 100 episodes)');
ax1.plot(range(len(rewards_improved)), rewards_improved, label='tested parameters')
ax1.legend();
## Q-table default vs tested parameters
ax2.imshow(Q_table, cmap='jet')#, vmin=0, vmax=1)
ax2.set_xticks(ticks=[0,1,2,3])
ax2.set_xticklabels(['l', 'd', 'r', 'u'])
ax2.set_yticks(np.arange(0,shape**2+1,3));
ax2.set_yticklabels(np.arange(1,shape**2+2,3));
ax2.set_ylabel('State')
ax2.set_xlabel('Action')
ax2.set_title('Q-table default');
ax3.imshow(Q_table_improved, cmap='jet')#, vmin=0, vmax=1)
ax3.set_xticks(ticks=[0,1,2,3])
ax3.set_xticklabels(['l', 'd', 'r', 'u'])
ax3.set_yticks(np.arange(0,shape**2+1,3));
ax3.set_yticklabels(np.arange(1,shape**2+2,3));
ax3.set_ylabel('State')
ax3.set_xlabel('Action')
ax3.set_title('Q-table tested');
## Terrain-action maps with most valuable action for default vs tested parameters
p_map_improved = [actions[i] for i in np.argmax(Q_table_improved, axis=1)]
p_map_improved = np.reshape(np.array(p_map_improved), (shape,shape))
fig, (ax1, ax2) = plt.subplots(1,2, sharex=True, sharey=True, figsize=(15,10))
ax1.imshow(env_map, vmin = 0, vmax = 1, cmap=colormap)
ax1.axis('off')
ax2.imshow(env_map, vmin = 0, vmax = 1, cmap=colormap)
ax2.axis('off')
for r in range(shape):
for c in range(shape):
if env_map[c,r] not in [0.0, 1.0]:
ax1.text(r,c, p_map[c,r], c='white', weight='bold')
ax1.set_title('Terrain-action default');
for r in range(shape):
for c in range(shape):
if env_map[c,r] not in [0.0, 1.0]:
ax2.text(r,c, p_map_improved[c,r], c='white', weight='bold')
ax2.set_title('Terrain-action tested');
###Output
_____no_output_____
###Markdown
A round of simple parameter exploration has improved performance considerable, in the range of 10-20% more sucessful episodes. Inspecting the terrain-action maps, we can see that the agent's policy only changed for the 3 top blocks of the 3rd column. Some notes on interpretationThe greedy policy learnt by the agent can seem a bit odd, judging just from the terrain-action maps. But there's a couple points from the task environment that explain it. The documentation on FrozenLake isn't super detailed, so I've mainly inferred these points from agent behaviour & playing around with FrozenLake (I could be wrong in my assumptions), but its worth considering:- There is no negative reward to incentivise the agent to exit the lake quickly & under Q-learning without this negative reward, every action taken in a state will increase Q(S,A) very slightly;- There is no penalty for falling in a hole; - Taking an action that would bring the agent off-grid just keeps the agent in place;- The environment is stochastic - for every action there is a probability p that the agent will 'slip' and move in a direction different from that intended.Taken together, this means the agent is actually **encouraged** to meander along the terrain.One puzzling aspect is the high value of action 'down' in State 15 (to the left of Goal). Due to environment stochasticity, I do expect every action at State 15 to have a high value, as there is a non-zero probability that an action that is 'not Right' will result in the Goal State. However, I expected that to mean all actions would have a high value, but action 'Right' would be the highest.Closer inspection of 'default' and 'tested' parameter terrain maps suggests a possibility: **the agent learns to get stuck in a safe loop near the Goal state**. Recall the differences between the two maps are all in Column 3. And the result of these differences is that now, starting from State 11 (directly above Goal), there is a loop of actions that leads right into State 15 again:- State 11 (L) --> State 10 (D) --> State 14 (R) --> State 15- Recall the probability p that the agent slips and moves in a random different direction from intended. Once in State 15, the action 'Down' means the agent will either a) with probability p move into a loop entry point ('Up' into State 11 or 'Left' into State 14) or the Goal State; or b) with probability 1-p it will attempt 'Down', which is illegal, and stay in place safely, which doesn't terminate the episode.If this is true, I would expect that **state-action pairs which maintain the loop should have higher values than usual**. Let us test this hypothesis below by comparing the Q(S,A) values for each of them relative to the remaining actions, with relative Q(S,A) values for 50,000 state-action pairs of drawn randomly from the Q-table for comparison.
###Code
# Define loop states, actions and their values relative to the other actions available in that state
loop_states = [11, 10, 14, 15]
loop_actions = [0, 1, 2, 1]
loop_rel_value = []
for s,a in zip(loop_states, loop_actions):
loop_rel_value.append(Q_table_improved[s-1,a]/sum(Q_table_improved[s-1]) )
# Bootstrap by obtaining 50,000 relative values for random state-action pairs
i = 0
boot = []
while i <50000:
for s,a in zip(random.sample(range(1,shape**2),1), random.sample(range(0,3),1)):
#print('State {}, action {}: relative value {}'.format(s, actions[a], round(Q_table[s-1,a]/sum(Q_table[s-1,:]) ,2) ))
boot.append( round(Q_table_improved[s-1,a]/sum(Q_table_improved[s-1,:]) ,2))
i += 1
pcent_larger = [round(100*len(np.where(np.array(boot) <= t)[0])/50000,2) for t in loop_rel_value]
for s,a,v,p in zip(loop_states, loop_actions, loop_rel_value, pcent_larger):
print('State {}, action {}: relative value {} is larger than {}% of 50,000 cases bootstrapped from Q-table.'\
.format(s, actions[a], round(v ,2),p ))
###Output
State 11, action L: relative value 0.46 is larger than 95.61% of 50,000 cases bootstrapped from Q-table.
State 10, action D: relative value 0.41 is larger than 95.61% of 50,000 cases bootstrapped from Q-table.
State 14, action R: relative value 0.35 is larger than 91.43% of 50,000 cases bootstrapped from Q-table.
State 15, action D: relative value 0.3 is larger than 86.93% of 50,000 cases bootstrapped from Q-table.
###Markdown
The data above supports the loop hypothesis. **For the states contained in the loop, actions that maintain the agent in the loop are 'prefered' to their alternatives at a rate higher than preference rates between >90% of 50,000 randomly-drawn S-A pairs**.Let's now train an agent with default parameters in a second environment which **is not slippery**, to confirm that the loop learnt by the agent is a consequence of environmental dynamics and not a mistake made in my programming or setting of weird combinations of parameters
###Code
env_slipless = gym.make('FrozenLake-v0', is_slippery=False)
agent = Agent(nA=nA, nS=nS)#, epsilon=good_epsilon, min_epsilon=good_epsilon_min, alpha=good_alpha, gamma=good_gamma)
rewards_slipless, Q_table_slipless = interact(env_slipless, agent, num_episodes=10000)
env_map_slipless, colormap = visualise_terrain(env_slipless.render(mode='ansi'), shape=int(math.sqrt(env_slipless.observation_space.n)))
shape = int(math.sqrt(env_slipless.observation_space.n))
p_map_slipless = [actions[i] for i in np.argmax(Q_table_slipless, axis=1)]
p_map_slipless = np.reshape(np.array(p_map_slipless), (shape,shape))
plt.plot(range(len(rewards_slipless)), rewards_slipless)
plt.title('Agent Performance in Slipless FrozenLake');
plt.ylabel('% Successful Episodes');
plt.xlabel('Episode batch # (batches of 100 episodes)');
## Generate subplots.
fig, (ax1, ax2) = plt.subplots(1, 2, sharex=False, sharey=False)
# Color-coded Q-table
ax1.imshow(Q_table_slipless, cmap='jet')#, vmin=0, vmax=1)
ax1.set_xticks(ticks=[0,1,2,3])
ax1.set_xticklabels(['l', 'd', 'r', 'u'])
ax1.set_yticks(np.arange(0,shape**2+1,3));
ax1.set_yticklabels(np.arange(1,shape**2+2,3));
ax1.set_ylabel('State')
ax1.set_xlabel('Action')
ax1.set_title('Q-table without slipping');
# Terrain-policy
ax2.imshow(env_map_slipless, vmin = 0, vmax = 1, cmap=colormap)
ax2.axis('off')
for r in range(shape):
for c in range(shape):
if env_map_slipless[c,r] not in [0.0, 1.0]:
ax2.text(r,c, p_map_slipless[c,r], c='white', weight='bold')
ax2.set_title('Most likely action per grid of non-slippery terrain');
###Output
_____no_output_____
###Markdown
There is an important stochastic element to training since not all initiations will lead to the agent figuring out the task. When they do, it achieves near ~100% performance. We can now see that if we take away the slippery dynamic, the policy map resolves into the expected, containing no loops.
###Code
env = gym.make('FrozenLake-v0', is_slippery=True)
nA = env.action_space.n
nS = env.observation_space.n
MC_epsilon = 1
MC_epsilon_min = 0.001
MC_alpha = 0.01
MC_gamma = 1
MC_epsilon_d = 0.9
env_map_MC, colormap = visualise_terrain(env.render(mode='ansi'), shape=int(math.sqrt(env.observation_space.n)))
agent = Agent(nA=nA, nS=nS, epsilon=MC_epsilon,
min_epsilon=MC_epsilon_min, alpha_init=MC_alpha,
gamma=MC_gamma, epsilon_decay=MC_epsilon_d)
rewards_MC, Q_table_MC = interact_MC(env, agent, num_episodes=10000, window=100, alpha_tune=True)
plt.plot(range(len(rewards_MC)), rewards_MC)
plt.title('Agent Performance in FrozenLake');
plt.ylabel('% Successful Episodes');
plt.xlabel('Episode batch # (batches of 100 episodes)');
## Obtain most valuable actions per state and reshape into terrain conformation.
shape = int(math.sqrt(env.observation_space.n))
p_map = [actions[i] for i in np.argmax(Q_table_MC, axis=1)]
p_map = np.reshape(np.array(p_map), (shape,shape))
## Generate subplots.
fig, (ax1, ax2) = plt.subplots(1, 2, sharex=False, sharey=False)
# Color-coded Q-table
ax1.imshow(Q_table_MC, cmap='jet')#, vmin=0, vmax=1)
ax1.set_xticks(ticks=[0,1,2,3])
ax1.set_xticklabels(['l', 'd', 'r', 'u'])
ax1.set_yticks(np.arange(0,shape**2+1,3));
ax1.set_yticklabels(np.arange(1,shape**2+2,3));
ax1.set_ylabel('State')
ax1.set_xlabel('Action')
ax1.set_title('Q-table');
# Terrain-policy
ax2.imshow(env_map_MC, vmin = 0, vmax = 1, cmap=colormap)
ax2.axis('off')
for r in range(shape):
for c in range(shape):
if env_map_MC[c,r] not in [0.0, 1.0]:
ax2.text(r,c, p_map[c,r], c='white', weight='bold')
ax2.set_title('Most likely action per grid of terrain');
###Output
_____no_output_____ |
tests/testing.ipynb | ###Markdown
Inspect workspace
###Code
from survos2.server.config import cfg
survos.run_command('workspace', 'add_session', uri=None, workspace=workspace_name, session='roi1')
survos.run_command('workspace', 'list_sessions', uri=None, workspace=workspace_name)
# add data to workspace
survos.run_command('annotations', 'get_labels', uri=None, workspace=workspace_name, level='001_level')
# add data to workspace
survos.run_command('annotations', 'get_levels', uri=None, workspace=workspace_name)
survos.run_command('features', 'existing', uri=None, workspace=workspace_name)
survos.run_command('superregions', 'existing', uri=None, workspace=workspace_name)
survos.run_command('annotations', 'get_levels', uri=None, workspace=workspace_name)
###Output
[34m[1mDEBUG - Using client <hug.use.Local object at 0x000001DE55E4F900> [0m[32m - survos2.survos[0m:[36mrun_command[0m:[36m111[0m
[34m[1mDEBUG - get request to client: get_levels [0m[32m - survos2.survos[0m:[36mrun_command[0m:[36m114[0m
[34m[1mDEBUG - Local client gave response Response(data={'data': [{'kind': 'level', 'labels': {'2': {'color': '#4339d4', 'idx': 2, 'name': 'Label', 'visible': True}, '3': {'color': '#9d289d', 'idx': 3, 'name': 'Label', 'visible': True}, '4': {'color': '#ee0004', 'idx': 4, 'name': 'Label', 'visible': True}, '5': {'color': '#23c436', 'idx': 5, 'name': 'Label', 'visible': True}}, 'modified': [1], 'name': '001 Level', 'id': '001_level'}, {'kind': 'level', 'labels': {'2': {'color': '#bb3c6f', 'idx': 2, 'name': 'Label', 'visible': True}, '3': {'color': '#af3131', 'idx': 3, 'name': 'Label', 'visible': True}, '4': {'color': '#6f4883', 'idx': 4, 'name': 'Label', 'visible': True}}, 'modified': [1], 'name': '002 Level', 'id': '002_level'}, {'kind': 'level', 'labels': {'2': {'color': '#2f2c99', 'idx': 2, 'name': 'Label', 'visible': True}, '3': {'color': '#98912c', 'idx': 3, 'name': 'Label', 'visible': True}}, 'modified': [1], 'name': '003 Level', 'id': '003_level'}], 'error': False}, status_code=200, headers={'content-type': 'application/json'}) [0m[32m - survos2.survos[0m:[36mrun_command[0m:[36m116[0m
###Markdown
Test create workspace
###Code
import h5py
original_data = h5py.File("D:\\datasets\\mcd_s10_Nuc_Cyt_r1.h5", 'r')
roi_data = original_data['data'][0:100,0:100,0:100]
workspace_config = {'dataset_name': "data",
'datasets_dir': "D:/datasets/",
'vol_fname': 'mcd_s10_Nuc_Cyt_r1.h5',
'workspace_name' : 'test_hunt12'}
from survos2.frontend.main import init_ws, roi_ws
init_ws(workspace_config)
roi_ws(roi_data, 'roi2')
test_workspace_name = 'tinyhunt'
DataModel.g.current_workspace = test_workspace_name
src = DataModel.g.dataset_uri('001_level', group='annotations')
dst = DataModel.g.dataset_uri('009_gaussian_blur', group='features')
src, dst
with DatasetManager(src, out=src, dtype='uint16', fillvalue=0) as DM:
src_dataset = DM.sources[0]
src_arr = src_dataset[:]
print(src_dataset)
#DM.out[:] = testvol
src_dataset.metadata()
src_dataset.set_attr('somethingelse', [1,2,3])
src_dataset.set_attr('geometrydf', pd.DataFrame(np.array([1,2,3])))
# add data to workspace
survos.run_command('features', 'existing', uri=None, workspace=test_workspace_name,
dtype='float32')
# add data to workspace
test_workspace_name = "roi1@epfl_256c"
survos.run_command('features', 'create', uri=None, workspace=test_workspace_name, feature_type='gaussian_blur')
src = DataModel.g.dataset_uri('006_gaussian', group='features')
dst = DataModel.g.dataset_uri('009_gaussian_blur', group='features')
src, dst
with DatasetManager(src, out=dst, dtype='float32', fillvalue=0) as DM:
src_dataset = DM.sources[0]
src_arr = src_dataset[:]
print(src_dataset)
#DM.out[:] = testvol
# add data to workspace
survos.run_command('features',
'create',
uri=None,
workspace=test_workspace_name,
feature_type='frangi')
src_dataset.get_metadata()
import numpy as np
x = np.array([1,2,3])
np.append(x, 4)
with DatasetManager(src, out=dst, dtype='uint32', fillvalue=0) as DM:
src_dataset = DM.sources[0]
src_arr = src_dataset[:]
DM.out[:] = testvol
with DatasetManager(src, out=dst, dtype='uint32', fillvalue=0) as DM:
out_dataset = DM.out
out_dataset[:] = test
###Output
C:/work/diam/data
###Markdown
Test filtering
###Code
workflow_name = './tests/workflows/feature_set.yaml'
#workflow_name = 'tests/workflows/superegion.yaml'
all_params, params = run_workflow(workflow_name)
# with DatasetManager(src, out=dst, dtype='uint32', fillvalue=0) as DM:
# src_dataset = DM.sources[0]
# src_arr = src_dataset[:]
# DM.out[:] = testvol
###Output
_____no_output_____
###Markdown
superregion workflow
###Code
workflow_name = './tests/workflows/supervoxel_pipeline.yaml'
all_params, params = run_workflow(workflow_name)
arr = view_dataset('002_gaussian_blur', 'features', 100)
arr = view_dataset('002_supervoxels', 'regions', 10)
###Output
_____no_output_____
###Markdown
Generating test data
###Code
testvol = np.random.random((99,256,256))# astype(np.uint8)
testvol = np.array([[[0.1761602 , 0.6701295 , 0.13151232, 0.95726678],
[0.4795476 , 0.48114134, 0.0410548 , 0.29893265],
[0.49127266, 0.70298447, 0.42751211, 0.08101552],
[0.73805652, 0.83111601, 0.36852477, 0.38732476]],
[[0.2847222 , 0.96054574, 0.25430756, 0.35403861],
[0.54439093, 0.65897414, 0.1959487 , 0.90714872],
[0.84462152, 0.90754182, 0.02455657, 0.26180662],
[0.1711208 , 0.40122666, 0.54562598, 0.01419861]],
[[0.59280376, 0.42706895, 0.86637913, 0.87831645],
[0.57991401, 0.31989204, 0.85869799, 0.6333411 ],
[0.21539274, 0.63780214, 0.64204493, 0.74425482],
[0.1903691 , 0.81962537, 0.31774673, 0.34812628]],
[[0.40880077, 0.595773 , 0.28856063, 0.19316746],
[0.03195766, 0.62475541, 0.50762591, 0.34700798],
[0.98913461, 0.07883111, 0.96534233, 0.57697606],
[0.71496714, 0.70764578, 0.92294417, 0.91300531]]])
import h5py
test_datadir = "C:\\work\\diam\\b6\\SuRVoS2\\tmp\\"
# add dataset to workspace from file, so save array to file
map_fullpath = os.path.join(test_datadir,"testvol_4x4x4e.h5")
with h5py.File(map_fullpath, 'w') as hf:
hf.create_dataset("data", data=testvol)
testvol.astype(np.float32)
testvol = testvol - np.min(testvol)
testvol / np.max(testvol)
map_fullpath
###Output
_____no_output_____
###Markdown
Gaussian blur
###Code
test_workspace_name = "testing123e"
# create new workspace
survos.run_command("workspace", "create", uri=None, workspace=test_workspace_name)
# add data to workspace
survos.run_command('workspace', 'add_data', uri=None, workspace=test_workspace_name,
data_fname=map_fullpath,
dtype='float32')
# run gaussian_blur
DataModel.g.current_workspace = test_workspace_name
src = DataModel.g.dataset_uri('__data__', None)
dst = DataModel.g.dataset_uri('001_gaussian_blur', group='features')
survos.run_command('features', 'gaussian_blur', uri=None,
src=src,
dst=dst)
with DatasetManager(src, out=dst, dtype='float32', fillvalue=0) as DM:
print(DM.sources[0].shape)
src_dataset = DM.sources[0]
dst_dataset = DM.out
src_arr = src_dataset[:]
dst_arr = dst_dataset[:]
src_arr
dst_arr
%%run_pytest[clean] -qq
test_datadir = "D:\\datasets"
test_workspace_name = "testvol_24"
# make test vol
map_fullpath = os.path.join(test_datadir,"testvol_4x4x4b.h5")
#testvol = np.random.random((4,4,4))# astype(np.uint8)
#with h5py.File(map_fullpath, 'w') as hf:
# hf.create_dataset("data", data=testvol)
# create new workspace
survos.run_command("workspace", "create", uri=None, workspace=test_workspace_name)
# add data to workspace
survos.run_command('workspace', 'add_data', uri=None, workspace=test_workspace_name,
data_fname=map_fullpath,
dtype='float32')
## add dataset to workspace
# run gaussian_blur
DataModel.g.current_workspace = test_workspace_name
src = DataModel.g.dataset_uri('__data__', None)
dst = DataModel.g.dataset_uri('001_gaussian_blur', group='features')
survos.run_command('features', 'gaussian_blur', uri=None,
src=src,
dst=dst)
with DatasetManager(src, out=dst, dtype='float32', fillvalue=0) as DM:
print(DM.sources[0].shape)
src_dataset = DM.sources[0]
dst_dataset = DM.out
src_arr = src_dataset[:]
dst_arr = dst_dataset[:]
def test_feature_shape():
assert dst_arr.shape == (4,4,4)
def test_feature_src():
assert_allclose(src_arr, np.array([[[0.41955446, 0.65139688, 0.50626089, 0.47356243],
[0.5397072 , 0.88715651, 0.57358875, 0.17841908],
[0.84062367, 0.42927081, 1. , 0.601415 ],
[0.22624536, 0.61382118, 0.81198787, 0.45563817]],
[[0.26604622, 0.98411002, 0.9910637 , 0.04614431],
[0.91235452, 0.50873271, 0.9090851 , 0.55183262],
[0.69766631, 0.34353716, 0.79863059, 0.81746442],
[0.69540008, 0.25363482, 0. , 0.98832664]],
[[0.41824034, 0.2947538 , 0.823542 , 0.02814557],
[0.22670235, 0.86729335, 0.28522538, 0.31510756],
[0.25549214, 0.1451409 , 0.30383666, 0.74032794],
[0.50077333, 0.51566668, 0.30102867, 0.72429019]],
[[0.6533611 , 0.66302082, 0.55634748, 0.71185593],
[0.94052145, 0.61666328, 0.9143069 , 0.12840489],
[0.12144252, 0.84725333, 0.92758133, 0.49322578],
[0.54037618, 0.051214 , 0.25104931, 0.87874488]]]))
def test_feature_dst():
assert_allclose(dst_arr, np.array([[[0.1666268 , 0.2257143 , 0.21892974, 0.14923 ],
[0.22236633, 0.30317545, 0.29989442, 0.21237083],
[0.21442178, 0.29576921, 0.29921022, 0.22003345],
[0.14868982, 0.20833325, 0.21602006, 0.16522714]],
[[0.22320336, 0.30302912, 0.29496408, 0.20176643],
[0.29272896, 0.40102041, 0.39890146, 0.28483036],
[0.27765584, 0.38559583, 0.39318019, 0.2930665 ],
[0.18990593, 0.26771539, 0.2803525 , 0.21866953]],
[[0.21965253, 0.29812005, 0.29064476, 0.19943923],
[0.28396353, 0.39044347, 0.38968769, 0.27962744],
[0.26546711, 0.37147775, 0.38107473, 0.28643546],
[0.17921498, 0.25512561, 0.26955697, 0.21300924]],
[[0.16236468, 0.21962528, 0.21409421, 0.14734329],
[0.20654736, 0.28440154, 0.28422096, 0.20433155],
[0.18955445, 0.26714209, 0.27526605, 0.20778845],
[0.12524115, 0.18044972, 0.19238257, 0.15347627]]]))
import h5py
#from survos2.improc.utils import DatasetManager
from torch.testing import assert_allclose
import numpy as np
testvol = np.random.random((4,4,4))# astype(np.uint8)
testvol
###Output
_____no_output_____
###Markdown
Test superregions
###Code
# def supervoxels(
# src: DataURIList,
# dst: DataURI,
# n_segments: Int = 10,
# compactness: Float = 20,
# spacing: FloatList = [1, 1, 1],
# multichannel: SmartBoolean = False,
# enforce_connectivity: SmartBoolean = False,
# ):
from survos2.api.regions import supervoxels
result = survos.run_command('regions', 'create', uri=None, workspace=workspace_name)
result
features_src = DataModel.g.dataset_uri("002_gaussian_blur", group="features")
dst = DataModel.g.dataset_uri(result[0]['id'], group='regions')
arr = view_dataset('002_gaussian_blur', 'features', 10)
result = supervoxels([features_src],
dst,
n_segments=100,
compactness=0.5,
spacing=[1,1,1],
multichannel=False,
enforce_connectivity=False)
result
%matplotlib inline
arr = view_dataset('002_supervoxels', 'regions', 40)
###Output
_____no_output_____
###Markdown
Test prediction
###Code
from survos2.server.superseg import sr_predict
sr_predict(
supervoxel_image,
anno_image,
feature_images,
refine=False,
lam = 1.0,
num_components = 0)
anno_id = "002_level"
region_id = "001_supervoxels"
feature_ids = ["002_gblur", "001_raw"]
classifier_type = "rf"
projection_type = None
refine = False
lam = 1.0,
num_components = 0
# get anno
src = DataModel.g.dataset_uri(anno_id, group="annotations")
with DatasetManager(src, out=None, dtype="uint16", fillvalue=0) as DM:
src_dataset = DM.sources[0]
anno_image = src_dataset[:] & 15
# get superregions
src = DataModel.g.dataset_uri(region_id, group="regions")
with DatasetManager(src, out=None, dtype="uint32", fillvalue=0) as DM:
src_dataset = DM.sources[0]
supervoxel_image = src_dataset[:]
# get features
features = []
for feature_id in feature_ids:
src = DataModel.g.dataset_uri(feature_id, group="features")
logger.debug(f"Getting features {src}")
with DatasetManager(src, out=None, dtype="float32", fillvalue=0) as DM:
src_dataset = DM.sources[0]
logger.debug(f"Adding feature of shape {src_dataset.shape}")
features.append(src_dataset[:])
superseg_cfg = cfg.pipeline
superseg_cfg["type"] = classifier_type
superseg_cfg["predict_params"]["proj"] = projection_type
superseg_cfg
logger.debug(
f"sr_predict with {len(features)} features and anno of shape {anno_image.shape} and sr of shape {supervoxel_image.shape}"
)
segmentation = sr_predict(
supervoxel_image,
anno_image,
features,
superseg_cfg,
refine,
lam,
num_components,
)
plt.imshow(segmentation[12,:])
segmentation.shape
###Output
_____no_output_____
###Markdown
Label parenting
###Code
survos.run_command('annotations', 'get_levels', uri=None, workspace=workspace_name)
result = survos.run_command('annotations', 'add_level', uri=None, workspace=workspace_name)
result
params = dict(level="002_level")
result = survos.run_command('annotations', 'add_label', uri=None, workspace=workspace_name, **params)
result
survos.run_command('annotations', 'get_labels', uri=None, workspace=workspace_name, level='002_level')
label = dict(
idx=2,
name="Labelname",
color="#FF0000",
visible=True,
)
params = dict(level="002_level", )
result = survos.run_command('annotations', 'update_label', uri=None, workspace=workspace_name, **params, **label)
survos.run_command('annotations', 'get_labels', uri=None, workspace=workspace_name, level='002_level')
survos.run_command('annotations', 'get_labels', uri=None, workspace=workspace_name, level='002_level')
survos.run_command('annotations', 'set_label_parent', uri=None, workspace=workspace_name, level='002_level')
from survos2.utils import encode_numpy_bytes, encode_numpy2
import numpy as np
a = np.array([1.0,2.0,3.0,4.0,5.0])
encode_numpy2(a)
encode_numpy_bytes(a)
src = DataModel.g.dataset_uri('001_level', group='annotations')
survos.run_command('roi','pull_anno', uri=None, workspace=workspace_name, roi_fname='vf_down2_roi_0_82_128_256_120_256')
roi_fname='vfcrop4_roi_20_148_20_148_20_148'
roi_parts = roi_fname.split("_")
z_min = roi_parts[2]
z_max = roi_parts[3]
x_min = roi_parts[4]
x_max = roi_parts[5]
y_min = roi_parts[6]
y_max = roi_parts[7]
from survos2.entity.sampler import (sample_bvol,
generate_random_points_in_volume,
viz_bvols,
centroid_to_bvol,
offset_points,
grid_of_points,
sample_marked_patches)
img_vol = np.ones((10,10,10))
result = sample_bvol(img_vol, (2,8,2,8,2,8))
result.shape
assert(result.shape == (6,6,6))
img_vol = np.ones((10,10,10))
result = generate_random_points_in_volume(img_vol, 10, border=(0,0,0))
assert result.shape[0] == 10 and result.shape[1] == 4
points = np.array([[10,10,10,0],[10,20,20,0],[10,30,30,0],[10,40,40,0],[10,50,50,0]])
result = centroid_to_bvol(points)
assert result.shape == (5,6)
points
result = offset_points(points, (10,10,10))
assert result[0][0] == points[0][0] + 10
img_vol = np.ones((32,32,32))
result = grid_of_points(img_vol, (4,4,4), (2,2,2))
assert result[0][0] == 4
assert result.shape[0] == 8
img_vol = np.ones((32,32,32))
result = grid_of_points(img_vol, (4,4,4), (4,4,4))
assert result.shape[0] == 64
img_vol = np.ones((64,64,64))
pts = grid_of_points(img_vol, (4,4,4), (32,32,32))
img_volume = np.random.random((64,64,64))
#padded_anno = (np.random((32,32,32)) > 0.5) * 1.0
locs = np.array([[10,10,10,0],[10,20,20,0],[10,30,30,0],[10,40,40,0],[10,50,50,0]])
result = sample_marked_patches(img_volume, locs, pts, patch_size=(4, 4, 4))
assert result.vols.shape == (5,4,4,4)
result.vols_pts.shape[0] == 5
result.vols_pts[0][0] == [1,1,1,0]
from survos2.entity.entities import (make_bounding_vols, make_entity_mask, make_entity_df, uncrop_pad)
result = make_entity_df(points)
import pandas as pd
assert isinstance(result, pd.DataFrame)
assert result.shape == (5,4)
img_vol.shape
result = uncrop_pad(img_vol, (96,96,96), (16,80,16,80,16,80))
assert result.shape == (96,96,96)
assert result[0][0][0] == 0.0
img_vol = np.ones((128,128,128))
points = np.array([[32,32,32,0],[32,42,42,0],[32,52,52,0],[32,62,62,0],[32,72,72,0]])
result = centroid_to_bvol(points)
result = make_entity_mask(img_vol, points,bvol_dim=(4,4,4))
# returns a padded volume
assert result[0].shape == (136,136,136)
%matplotlib inline
plt.imshow(result[0][32,:])
from survos2.entity.components import measure_components, filter_proposal_mask, filter_small_components, measure_regions, measure_big_blobs
from matplotlib import pyplot as plt
import numpy as np
img = np.zeros((32,32,32))
img[8:12,8:12,8:12] = 1
img[8:12, 24:28,24:28] = 1
result = measure_components(img)
assert result.shape == (2,11)
result = measure_big_blobs([img])
plt.imshow(img[8,:])
result.shape
result = filter_proposal_mask(img, num_erosions=0, num_dilations=3, num_medians=0)
assert result.shape == (32,32,32)
assert np.sum(result) > np.sum(img)
img = np.zeros((32,32,32))
img[8:12,8:12,8:12] = 1
img[8:12, 24:28,24:28] = 1
img[8:12,16:18,16:18] = 1
result= filter_small_components([img], min_component_size=16)[0]
assert np.sum(result) < np.sum(img)
plt.imshow(img[8,:])
img = np.zeros((32,32,32))
img[8:12,8:12,8:12] = 1
result = measure_regions([img.astype(np.uint32)])
assert result[0].shape == (1,11)
###Output
_____no_output_____ |
data/NRELDATA/wind/wind_data_processing.ipynb | ###Markdown
Processes the Accumulated Wind Data for Several PlacesThe data was downloaded from the NREL Wind Integration National Database (WIND) Toolkit
###Code
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import glob
import os
# the list of locations
wind_locations = ['san_bernadino',
'santa_fe',
'dallas',
'lincoln',
'mansfield',
'syracuse']
solar_locations = ['san_bernadino',
'santa_fe',
'dallas',
'champaign',
'mansfield',
'syracuse']
# get the list of files
wind_files_location = {}
solar_files_location = {}
for loc in wind_locations:
files = glob.glob("./"+loc+"/*.csv")
wind_files_location[loc] = files
for i in wind_files_location.values():
i.sort()
wind_files_location
for loc in wind_files_location:
# print(loc)
print(f"Description of data for {loc.capitalize()}")
for i, file in enumerate(wind_files_location[loc]):
# print(i, file)
if i == 0:
df = pd.read_csv(file,
skiprows=1,)
df.index = pd.to_datetime(df[['Year', 'Month', 'Day', 'Hour', 'Minute']])
df.drop(['Year', 'Month', 'Day', 'Hour', 'Minute'], axis=1, inplace=True)
# print(df.head())
else:
tmp_df = pd.read_csv(file,
skiprows=1,)
tmp_df.index = pd.to_datetime(tmp_df[['Year', 'Month', 'Day', 'Hour', 'Minute']])
tmp_df.drop(['Year', 'Month', 'Day', 'Hour', 'Minute'], axis=1, inplace=True)
# print(tmp_df.head())
df = pd.concat([df, tmp_df], axis=0)
df.to_csv('./'+loc+'_complete.csv')
print(df.describe())
###Output
Description of data for San_bernadino
wind speed at 100m (m/s) air temperature at 100m (C)
count 631296.000000 631296.000000
mean 6.348973 18.521221
std 4.068911 8.758513
min 0.020000 -2.700000
25% 3.150000 11.380000
50% 5.460000 17.790000
75% 9.160000 25.550000
max 26.470000 42.060000
Description of data for Santa_fe
wind speed at 100m (m/s) air temperature at 100m (C)
count 631296.000000 631296.000000
mean 6.481125 9.371302
std 3.921077 8.956797
min 0.020000 -23.150000
25% 3.270000 1.910000
50% 5.970000 9.930000
75% 9.210000 17.020000
max 27.770000 28.430000
Description of data for Dallas
wind speed at 100m (m/s) air temperature at 100m (C)
count 631296.000000 631296.000000
mean 6.352682 19.185832
std 3.012923 9.757566
min 0.030000 -23.150000
25% 4.050000 11.840000
50% 6.220000 20.270000
75% 8.490000 26.730000
max 26.140000 40.490000
Description of data for Lincoln
wind speed at 100m (m/s) air temperature at 100m (C)
count 631296.000000 631296.000000
mean 6.940412 11.844361
std 3.242788 11.799541
min 0.020000 -25.210000
25% 4.410000 2.110000
50% 6.880000 13.220000
75% 9.280000 21.690000
max 39.530000 37.780000
Description of data for Mansfield
wind speed at 100m (m/s) air temperature at 100m (C)
count 631296.000000 631296.000000
mean 7.145276 9.993185
std 3.394068 11.174791
min 0.010000 -23.720000
25% 4.570000 0.610000
50% 6.840000 11.230000
75% 9.460000 19.560000
max 27.230000 34.970000
Description of data for Syracuse
wind speed at 100m (m/s) air temperature at 100m (C)
count 631296.000000 631296.000000
mean 6.000699 9.392877
std 3.412044 11.053535
min 0.030000 -24.330000
25% 3.340000 0.710000
50% 5.650000 10.270000
75% 8.230000 18.720000
max 29.630000 34.900000
|
Chapter02/2 FFNN.ipynb | ###Markdown
FFNN using numpy
###Code
%matplotlib inline
import numpy as np
import pandas as pd
import seaborn as sns; sns.set()
from sklearn.metrics import roc_auc_score
# Creating the data
# initiating random number
np.random.seed(11)
data_xor = pd.DataFrame({'x': [0, 1, 0, 1], 'y':[0, 0, 1, 1], 'type': ['0', '1', '1', '0']})
ax = sns.scatterplot(x="x", y="y", hue="type",
data=data_xor)
data_xor.pivot(index='x', columns='y')
import numpy as np
import pandas as pd
from sklearn.metrics import confusion_matrix
from sklearn.metrics import roc_auc_score
from sklearn.metrics import mean_squared_error
import matplotlib
matplotlib.use("TkAgg")
# initiating random number
np.random.seed(11)
#### Creating the dataset
# mean and standard deviation for the x belonging to the first class
mu_x1, sigma_x1 = 0, 0.1
# constat to make the second distribution different from the first
# x1_mu_diff, x2_mu_diff, x3_mu_diff, x4_mu_diff = 0.5, 0.5, 0.5, 0.5
x1_mu_diff, x2_mu_diff, x3_mu_diff, x4_mu_diff = 0, 1, 0, 1
# creating the first distribution
d1 = pd.DataFrame({'x1': np.random.normal(mu_x1, sigma_x1, 1000) + 1,
'x2': np.random.normal(mu_x1, sigma_x1, 1000) + 1,
'type': 0})
d2 = pd.DataFrame({'x1': np.random.normal(mu_x1, sigma_x1, 1000) + 1,
'x2': np.random.normal(mu_x1, sigma_x1, 1000) - 1,
'type': 1})
d3 = pd.DataFrame({'x1': np.random.normal(mu_x1, sigma_x1, 1000) - 1,
'x2': np.random.normal(mu_x1, sigma_x1, 1000) - 1,
'type': 0})
d4 = pd.DataFrame({'x1': np.random.normal(mu_x1, sigma_x1, 1000) - 1,
'x2': np.random.normal(mu_x1, sigma_x1, 1000) + 1,
'type': 1})
data = pd.concat([d1, d2, d3, d4], ignore_index=True)
ax = sns.scatterplot(x="x1", y="x2", hue="type",
data=data)
# Splitting the dataset in training and test set
msk = np.random.rand(len(data)) < 0.8
# Roughly 80% of data will go in the training set
train_x, train_y = data[['x1', 'x2']][msk], data[['type']][msk].values
# Everything else will go into the validation set
test_x, test_y = data[['x1', 'x2']][~msk], data[['type']][~msk].values
def sigmoid(s):
# Activation function
return 1 / (1 + np.exp(-s))
def sigmoid_prime(s):
# Derivative of the sigmoid
return sigmoid(s) * (1 - sigmoid(s))
class FFNN(object):
def __init__(self, input_size=2, hidden_size=2, output_size=1):
# Adding 1 as it will be our bias
self.input_size = input_size + 1
self.hidden_size = hidden_size + 1
self.output_size = output_size
self.o_error = 0
self.o_delta = 0
self.z1 = 0
self.z2 = 0
self.z3 = 0
self.z2_error = 0
# The whole weight matrix, from the inputs till the hidden layer
self.w1 = np.random.randn(self.input_size, self.hidden_size)
# The final set of weights from the hidden layer till the output layer
self.w2 = np.random.randn(self.hidden_size, self.output_size)
def forward(self, X):
# Forward propagation through our network
X['bias'] = 1 # Adding 1 to the inputs to include the bias in the weight
self.z1 = np.dot(X, self.w1) # dot product of X (input) and first set of 3x2 weights
self.z2 = sigmoid(self.z1) # activation function
self.z3 = np.dot(self.z2, self.w2) # dot product of hidden layer (z2) and second set of 3x1 weights
o = sigmoid(self.z3) # final activation function
return o
def backward(self, X, y, output, step):
# Backward propagation of the errors
X['bias'] = 1 # Adding 1 to the inputs to include the bias in the weight
self.o_error = y - output # error in output
self.o_delta = self.o_error * sigmoid_prime(output) * step # applying derivative of sigmoid to error
self.z2_error = self.o_delta.dot(
self.w2.T) # z2 error: how much our hidden layer weights contributed to output error
self.z2_delta = self.z2_error * sigmoid_prime(self.z2) * step # applying derivative of sigmoid to z2 error
self.w1 += X.T.dot(self.z2_delta) # adjusting first of weights
self.w2 += self.z2.T.dot(self.o_delta) # adjusting second set of weights
def predict(self, X):
return forward(self, X)
def fit(self, X, y, epochs=10, step=0.05):
for epoch in range(epochs):
X['bias'] = 1 # Adding 1 to the inputs to include the bias in the weight
output = self.forward(X)
self.backward(X, y, output, step)
my_network = FFNN()
my_network.fit(train_x, train_y, epochs=10000, step=0.001)
pred_y = test_x.apply(my_network.forward, axis=1)
test_y_ = [i[0] for i in test_y]
pred_y_ = [i[0] for i in pred_y]
print('MSE: ', mean_squared_error(test_y_, pred_y_))
print('AUC: ', roc_auc_score(test_y_, pred_y_))
threshold = 0.5
pred_y_binary = [0 if i > threshold else 1 for i in pred_y_]
cm = confusion_matrix(test_y_, pred_y_binary, labels=[0, 1])
print(pd.DataFrame(cm,
index=['True 0', 'True 1'],
columns=['Predicted 0', 'Predicted 1']))
# If we want binary predictions we can do:
# data['predicted'] = [0 if i > threshold else 1 for i in pred_y]
ax = sns.scatterplot(x="x1", y="x2", hue=pred_y_binary,
data=data[~msk])
###Output
_____no_output_____
###Markdown
FFNN using kerasWe will now see hoe it's possible to use Keras to create a simple FFNN.
###Code
train_x[['x1', 'x2']]
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import SGD
from sklearn.metrics import mean_squared_error
import os
from keras.callbacks import ModelCheckpoint, Callback, EarlyStopping, TensorBoard
model = Sequential()
model.add(Dense(2, input_dim=2))
model.add(Activation('tanh'))
model.add(Dense(1))
model.add(Activation('sigmoid'))
sgd = SGD(lr=0.1)
model.compile(loss='mse', optimizer=sgd)
model.fit(train_x[['x1', 'x2']], train_y,batch_size=1, epochs=2)
pred = model.predict_proba(test_x)
print('NSE: ',mean_squared_error(test_y, pred))
basedir = '..'
logs = os.path.join(basedir, 'logs')
tbCallBack = TensorBoard(
log_dir=logs, histogram_freq=0, write_graph=True, write_images=True)
callbacks_list = [tbCallBack]
model.fit(train_x[['x1', 'x2']], train_y, batch_size=1, epochs=10, callbacks=callbacks_list)
tbCallBack = TensorBoard(
log_dir=logs, histogram_freq=0, write_graph=True, write_images=True)
filepath = "weights-improvement-{epoch:02d}-{accuracy:.2f}.hdf5"
checkpoint = ModelCheckpoint(
filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
callbacks_list = [tbCallBack]
###Output
_____no_output_____
###Markdown
It's possible to save the model while training using checkpoints.
###Code
filepath = "checkpoint-{epoch:02d}-{acc:.2f}.hdf5"
checkpoint = ModelCheckpoint(
filepath, monitor='accuracy', verbose=1, save_best_only=False, mode='max')
callbacks_list = [tbCallBack, checkpoint]
model.compile(loss='mse', optimizer=sgd, metrics=['accuracy'])
history = model.fit(train_x[['x1', 'x2']], train_y, batch_size=1, epochs=10, callbacks=callbacks_list)
###Output
Epoch 1/10
3183/3183 [==============================] - 3s 1ms/step - loss: 2.0732e-04 - acc: 1.0000
Epoch 00001: saving model to checkpoint-01-1.00.hdf5
Epoch 2/10
3183/3183 [==============================] - 3s 953us/step - loss: 1.9125e-04 - acc: 1.0000
Epoch 00002: saving model to checkpoint-02-1.00.hdf5
Epoch 3/10
3183/3183 [==============================] - 3s 1ms/step - loss: 1.7748e-04 - acc: 1.0000
Epoch 00003: saving model to checkpoint-03-1.00.hdf5
Epoch 4/10
3183/3183 [==============================] - 3s 862us/step - loss: 1.6555e-04 - acc: 1.0000
Epoch 00004: saving model to checkpoint-04-1.00.hdf5
Epoch 5/10
3183/3183 [==============================] - 3s 929us/step - loss: 1.5510e-04 - acc: 1.0000
Epoch 00005: saving model to checkpoint-05-1.00.hdf5
Epoch 6/10
3183/3183 [==============================] - 3s 928us/step - loss: 1.4589e-04 - acc: 1.0000
Epoch 00006: saving model to checkpoint-06-1.00.hdf5
Epoch 7/10
3183/3183 [==============================] - 3s 916us/step - loss: 1.3771e-04 - acc: 1.0000
Epoch 00007: saving model to checkpoint-07-1.00.hdf5
Epoch 8/10
3183/3183 [==============================] - 4s 1ms/step - loss: 1.3038e-04 - acc: 1.0000
Epoch 00008: saving model to checkpoint-08-1.00.hdf5
Epoch 9/10
3183/3183 [==============================] - 3s 961us/step - loss: 1.2379e-04 - acc: 1.0000
Epoch 00009: saving model to checkpoint-09-1.00.hdf5
Epoch 10/10
3183/3183 [==============================] - 3s 877us/step - loss: 1.1783e-04 - acc: 1.0000
Epoch 00010: saving model to checkpoint-10-1.00.hdf5
|
43_Intro_to_BERT/43_Intro_to_BERT.ipynb | ###Markdown
**Introduction to BERT**Today we will fine-tune BERT to perform sentiment analysis on a dataset of plain-text IMDB movie reviews.In addition to training a model, you will learn how to preprocess text into an appropriate format. **Regular Transformers in NLP.** **BERT**In this workshop, you will:- Load the IMDB dataset- Load a BERT model from TensorFlow Hub- Build your own model by combining BERT with a classifier- Train your own model, fine-tuning BERT as part of that- Save your model and use it to classify sentencesIf you're new to working with the IMDB dataset, please see [Basic text classification](https://www.tensorflow.org/tutorials/keras/text_classification) for more details. About BERT[BERT](https://arxiv.org/abs/1810.04805) and other Transformer encoder architectures have been wildly successful on a variety of tasks in NLP (natural language processing). They compute vector-space representations of natural language that are suitable for use in deep learning models. The BERT family of models uses the Transformer encoder architecture to process each token of input text in the full context of all tokens before and after, hence the name: Bidirectional Encoder Representations from Transformers. BERT models are usually pre-trained on a large corpus of text, then fine-tuned for specific tasks. Setup
###Code
# A dependency of the preprocessing for BERT inputs
!pip install -q tensorflow-text
###Output
[?25l
[K | | 10kB 26.4MB/s eta 0:00:01
[K |▏ | 20kB 33.0MB/s eta 0:00:01
[K |▎ | 30kB 30.8MB/s eta 0:00:01
[K |▍ | 40kB 34.0MB/s eta 0:00:01
[K |▌ | 51kB 33.2MB/s eta 0:00:01
[K |▋ | 61kB 35.9MB/s eta 0:00:01
[K |▊ | 71kB 21.7MB/s eta 0:00:01
[K |▉ | 81kB 23.0MB/s eta 0:00:01
[K |▉ | 92kB 21.2MB/s eta 0:00:01
[K |█ | 102kB 21.2MB/s eta 0:00:01
[K |█ | 112kB 21.2MB/s eta 0:00:01
[K |█▏ | 122kB 21.2MB/s eta 0:00:01
[K |█▎ | 133kB 21.2MB/s eta 0:00:01
[K |█▍ | 143kB 21.2MB/s eta 0:00:01
[K |█▌ | 153kB 21.2MB/s eta 0:00:01
[K |█▋ | 163kB 21.2MB/s eta 0:00:01
[K |█▋ | 174kB 21.2MB/s eta 0:00:01
[K |█▊ | 184kB 21.2MB/s eta 0:00:01
[K |█▉ | 194kB 21.2MB/s eta 0:00:01
[K |██ | 204kB 21.2MB/s eta 0:00:01
[K |██ | 215kB 21.2MB/s eta 0:00:01
[K |██▏ | 225kB 21.2MB/s eta 0:00:01
[K |██▎ | 235kB 21.2MB/s eta 0:00:01
[K |██▍ | 245kB 21.2MB/s eta 0:00:01
[K |██▍ | 256kB 21.2MB/s eta 0:00:01
[K |██▌ | 266kB 21.2MB/s eta 0:00:01
[K |██▋ | 276kB 21.2MB/s eta 0:00:01
[K |██▊ | 286kB 21.2MB/s eta 0:00:01
[K |██▉ | 296kB 21.2MB/s eta 0:00:01
[K |███ | 307kB 21.2MB/s eta 0:00:01
[K |███ | 317kB 21.2MB/s eta 0:00:01
[K |███▏ | 327kB 21.2MB/s eta 0:00:01
[K |███▏ | 337kB 21.2MB/s eta 0:00:01
[K |███▎ | 348kB 21.2MB/s eta 0:00:01
[K |███▍ | 358kB 21.2MB/s eta 0:00:01
[K |███▌ | 368kB 21.2MB/s eta 0:00:01
[K |███▋ | 378kB 21.2MB/s eta 0:00:01
[K |███▊ | 389kB 21.2MB/s eta 0:00:01
[K |███▉ | 399kB 21.2MB/s eta 0:00:01
[K |████ | 409kB 21.2MB/s eta 0:00:01
[K |████ | 419kB 21.2MB/s eta 0:00:01
[K |████ | 430kB 21.2MB/s eta 0:00:01
[K |████▏ | 440kB 21.2MB/s eta 0:00:01
[K |████▎ | 450kB 21.2MB/s eta 0:00:01
[K |████▍ | 460kB 21.2MB/s eta 0:00:01
[K |████▌ | 471kB 21.2MB/s eta 0:00:01
[K |████▋ | 481kB 21.2MB/s eta 0:00:01
[K |████▊ | 491kB 21.2MB/s eta 0:00:01
[K |████▊ | 501kB 21.2MB/s eta 0:00:01
[K |████▉ | 512kB 21.2MB/s eta 0:00:01
[K |█████ | 522kB 21.2MB/s eta 0:00:01
[K |█████ | 532kB 21.2MB/s eta 0:00:01
[K |█████▏ | 542kB 21.2MB/s eta 0:00:01
[K |█████▎ | 552kB 21.2MB/s eta 0:00:01
[K |█████▍ | 563kB 21.2MB/s eta 0:00:01
[K |█████▌ | 573kB 21.2MB/s eta 0:00:01
[K |█████▌ | 583kB 21.2MB/s eta 0:00:01
[K |█████▋ | 593kB 21.2MB/s eta 0:00:01
[K |█████▊ | 604kB 21.2MB/s eta 0:00:01
[K |█████▉ | 614kB 21.2MB/s eta 0:00:01
[K |██████ | 624kB 21.2MB/s eta 0:00:01
[K |██████ | 634kB 21.2MB/s eta 0:00:01
[K |██████▏ | 645kB 21.2MB/s eta 0:00:01
[K |██████▎ | 655kB 21.2MB/s eta 0:00:01
[K |██████▎ | 665kB 21.2MB/s eta 0:00:01
[K |██████▍ | 675kB 21.2MB/s eta 0:00:01
[K |██████▌ | 686kB 21.2MB/s eta 0:00:01
[K |██████▋ | 696kB 21.2MB/s eta 0:00:01
[K |██████▊ | 706kB 21.2MB/s eta 0:00:01
[K |██████▉ | 716kB 21.2MB/s eta 0:00:01
[K |███████ | 727kB 21.2MB/s eta 0:00:01
[K |███████ | 737kB 21.2MB/s eta 0:00:01
[K |███████ | 747kB 21.2MB/s eta 0:00:01
[K |███████▏ | 757kB 21.2MB/s eta 0:00:01
[K |███████▎ | 768kB 21.2MB/s eta 0:00:01
[K |███████▍ | 778kB 21.2MB/s eta 0:00:01
[K |███████▌ | 788kB 21.2MB/s eta 0:00:01
[K |███████▋ | 798kB 21.2MB/s eta 0:00:01
[K |███████▊ | 808kB 21.2MB/s eta 0:00:01
[K |███████▉ | 819kB 21.2MB/s eta 0:00:01
[K |███████▉ | 829kB 21.2MB/s eta 0:00:01
[K |████████ | 839kB 21.2MB/s eta 0:00:01
[K |████████ | 849kB 21.2MB/s eta 0:00:01
[K |████████▏ | 860kB 21.2MB/s eta 0:00:01
[K |████████▎ | 870kB 21.2MB/s eta 0:00:01
[K |████████▍ | 880kB 21.2MB/s eta 0:00:01
[K |████████▌ | 890kB 21.2MB/s eta 0:00:01
[K |████████▋ | 901kB 21.2MB/s eta 0:00:01
[K |████████▋ | 911kB 21.2MB/s eta 0:00:01
[K |████████▊ | 921kB 21.2MB/s eta 0:00:01
[K |████████▉ | 931kB 21.2MB/s eta 0:00:01
[K |█████████ | 942kB 21.2MB/s eta 0:00:01
[K |█████████ | 952kB 21.2MB/s eta 0:00:01
[K |█████████▏ | 962kB 21.2MB/s eta 0:00:01
[K |█████████▎ | 972kB 21.2MB/s eta 0:00:01
[K |█████████▍ | 983kB 21.2MB/s eta 0:00:01
[K |█████████▌ | 993kB 21.2MB/s eta 0:00:01
[K |█████████▌ | 1.0MB 21.2MB/s eta 0:00:01
[K |█████████▋ | 1.0MB 21.2MB/s eta 0:00:01
[K |█████████▊ | 1.0MB 21.2MB/s eta 0:00:01
[K |█████████▉ | 1.0MB 21.2MB/s eta 0:00:01
[K |██████████ | 1.0MB 21.2MB/s eta 0:00:01
[K |██████████ | 1.1MB 21.2MB/s eta 0:00:01
[K |██████████▏ | 1.1MB 21.2MB/s eta 0:00:01
[K |██████████▎ | 1.1MB 21.2MB/s eta 0:00:01
[K |██████████▎ | 1.1MB 21.2MB/s eta 0:00:01
[K |██████████▍ | 1.1MB 21.2MB/s eta 0:00:01
[K |██████████▌ | 1.1MB 21.2MB/s eta 0:00:01
[K |██████████▋ | 1.1MB 21.2MB/s eta 0:00:01
[K |██████████▊ | 1.1MB 21.2MB/s eta 0:00:01
[K |██████████▉ | 1.1MB 21.2MB/s eta 0:00:01
[K |███████████ | 1.1MB 21.2MB/s eta 0:00:01
[K |███████████ | 1.2MB 21.2MB/s eta 0:00:01
[K |███████████ | 1.2MB 21.2MB/s eta 0:00:01
[K |███████████▏ | 1.2MB 21.2MB/s eta 0:00:01
[K |███████████▎ | 1.2MB 21.2MB/s eta 0:00:01
[K |███████████▍ | 1.2MB 21.2MB/s eta 0:00:01
[K |███████████▌ | 1.2MB 21.2MB/s eta 0:00:01
[K |███████████▋ | 1.2MB 21.2MB/s eta 0:00:01
[K |███████████▊ | 1.2MB 21.2MB/s eta 0:00:01
[K |███████████▉ | 1.2MB 21.2MB/s eta 0:00:01
[K |███████████▉ | 1.2MB 21.2MB/s eta 0:00:01
[K |████████████ | 1.3MB 21.2MB/s eta 0:00:01
[K |████████████ | 1.3MB 21.2MB/s eta 0:00:01
[K |████████████▏ | 1.3MB 21.2MB/s eta 0:00:01
[K |████████████▎ | 1.3MB 21.2MB/s eta 0:00:01
[K |████████████▍ | 1.3MB 21.2MB/s eta 0:00:01
[K |████████████▌ | 1.3MB 21.2MB/s eta 0:00:01
[K |████████████▋ | 1.3MB 21.2MB/s eta 0:00:01
[K |████████████▋ | 1.3MB 21.2MB/s eta 0:00:01
[K |████████████▊ | 1.3MB 21.2MB/s eta 0:00:01
[K |████████████▉ | 1.4MB 21.2MB/s eta 0:00:01
[K |█████████████ | 1.4MB 21.2MB/s eta 0:00:01
[K |█████████████ | 1.4MB 21.2MB/s eta 0:00:01
[K |█████████████▏ | 1.4MB 21.2MB/s eta 0:00:01
[K |█████████████▎ | 1.4MB 21.2MB/s eta 0:00:01
[K |█████████████▍ | 1.4MB 21.2MB/s eta 0:00:01
[K |█████████████▍ | 1.4MB 21.2MB/s eta 0:00:01
[K |█████████████▌ | 1.4MB 21.2MB/s eta 0:00:01
[K |█████████████▋ | 1.4MB 21.2MB/s eta 0:00:01
[K |█████████████▊ | 1.4MB 21.2MB/s eta 0:00:01
[K |█████████████▉ | 1.5MB 21.2MB/s eta 0:00:01
[K |██████████████ | 1.5MB 21.2MB/s eta 0:00:01
[K |██████████████ | 1.5MB 21.2MB/s eta 0:00:01
[K |██████████████▏ | 1.5MB 21.2MB/s eta 0:00:01
[K |██████████████▏ | 1.5MB 21.2MB/s eta 0:00:01
[K |██████████████▎ | 1.5MB 21.2MB/s eta 0:00:01
[K |██████████████▍ | 1.5MB 21.2MB/s eta 0:00:01
[K |██████████████▌ | 1.5MB 21.2MB/s eta 0:00:01
[K |██████████████▋ | 1.5MB 21.2MB/s eta 0:00:01
[K |██████████████▊ | 1.5MB 21.2MB/s eta 0:00:01
[K |██████████████▉ | 1.6MB 21.2MB/s eta 0:00:01
[K |███████████████ | 1.6MB 21.2MB/s eta 0:00:01
[K |███████████████ | 1.6MB 21.2MB/s eta 0:00:01
[K |███████████████ | 1.6MB 21.2MB/s eta 0:00:01
[K |███████████████▏ | 1.6MB 21.2MB/s eta 0:00:01
[K |███████████████▎ | 1.6MB 21.2MB/s eta 0:00:01
[K |███████████████▍ | 1.6MB 21.2MB/s eta 0:00:01
[K |███████████████▌ | 1.6MB 21.2MB/s eta 0:00:01
[K |███████████████▋ | 1.6MB 21.2MB/s eta 0:00:01
[K |███████████████▊ | 1.6MB 21.2MB/s eta 0:00:01
[K |███████████████▊ | 1.7MB 21.2MB/s eta 0:00:01
[K |███████████████▉ | 1.7MB 21.2MB/s eta 0:00:01
[K |████████████████ | 1.7MB 21.2MB/s eta 0:00:01
[K |████████████████ | 1.7MB 21.2MB/s eta 0:00:01
[K |████████████████▏ | 1.7MB 21.2MB/s eta 0:00:01
[K |████████████████▎ | 1.7MB 21.2MB/s eta 0:00:01
[K |████████████████▍ | 1.7MB 21.2MB/s eta 0:00:01
[K |████████████████▌ | 1.7MB 21.2MB/s eta 0:00:01
[K |████████████████▌ | 1.7MB 21.2MB/s eta 0:00:01
[K |████████████████▋ | 1.8MB 21.2MB/s eta 0:00:01
[K |████████████████▊ | 1.8MB 21.2MB/s eta 0:00:01
[K |████████████████▉ | 1.8MB 21.2MB/s eta 0:00:01
[K |█████████████████ | 1.8MB 21.2MB/s eta 0:00:01
[K |█████████████████ | 1.8MB 21.2MB/s eta 0:00:01
[K |█████████████████▏ | 1.8MB 21.2MB/s eta 0:00:01
[K |█████████████████▎ | 1.8MB 21.2MB/s eta 0:00:01
[K |█████████████████▎ | 1.8MB 21.2MB/s eta 0:00:01
[K |█████████████████▍ | 1.8MB 21.2MB/s eta 0:00:01
[K |█████████████████▌ | 1.8MB 21.2MB/s eta 0:00:01
[K |█████████████████▋ | 1.9MB 21.2MB/s eta 0:00:01
[K |█████████████████▊ | 1.9MB 21.2MB/s eta 0:00:01
[K |█████████████████▉ | 1.9MB 21.2MB/s eta 0:00:01
[K |██████████████████ | 1.9MB 21.2MB/s eta 0:00:01
[K |██████████████████ | 1.9MB 21.2MB/s eta 0:00:01
[K |██████████████████ | 1.9MB 21.2MB/s eta 0:00:01
[K |██████████████████▏ | 1.9MB 21.2MB/s eta 0:00:01
[K |██████████████████▎ | 1.9MB 21.2MB/s eta 0:00:01
[K |██████████████████▍ | 1.9MB 21.2MB/s eta 0:00:01
[K |██████████████████▌ | 1.9MB 21.2MB/s eta 0:00:01
[K |██████████████████▋ | 2.0MB 21.2MB/s eta 0:00:01
[K |██████████████████▊ | 2.0MB 21.2MB/s eta 0:00:01
[K |██████████████████▉ | 2.0MB 21.2MB/s eta 0:00:01
[K |███████████████████ | 2.0MB 21.2MB/s eta 0:00:01
[K |███████████████████ | 2.0MB 21.2MB/s eta 0:00:01
[K |███████████████████ | 2.0MB 21.2MB/s eta 0:00:01
[K |███████████████████▏ | 2.0MB 21.2MB/s eta 0:00:01
[K |███████████████████▎ | 2.0MB 21.2MB/s eta 0:00:01
[K |███████████████████▍ | 2.0MB 21.2MB/s eta 0:00:01
[K |███████████████████▌ | 2.0MB 21.2MB/s eta 0:00:01
[K |███████████████████▋ | 2.1MB 21.2MB/s eta 0:00:01
[K |███████████████████▊ | 2.1MB 21.2MB/s eta 0:00:01
[K |███████████████████▊ | 2.1MB 21.2MB/s eta 0:00:01
[K |███████████████████▉ | 2.1MB 21.2MB/s eta 0:00:01
[K |████████████████████ | 2.1MB 21.2MB/s eta 0:00:01
[K |████████████████████ | 2.1MB 21.2MB/s eta 0:00:01
[K |████████████████████▏ | 2.1MB 21.2MB/s eta 0:00:01
[K |████████████████████▎ | 2.1MB 21.2MB/s eta 0:00:01
[K |████████████████████▍ | 2.1MB 21.2MB/s eta 0:00:01
[K |████████████████████▌ | 2.2MB 21.2MB/s eta 0:00:01
[K |████████████████████▌ | 2.2MB 21.2MB/s eta 0:00:01
[K |████████████████████▋ | 2.2MB 21.2MB/s eta 0:00:01
[K |████████████████████▊ | 2.2MB 21.2MB/s eta 0:00:01
[K |████████████████████▉ | 2.2MB 21.2MB/s eta 0:00:01
[K |█████████████████████ | 2.2MB 21.2MB/s eta 0:00:01
[K |█████████████████████ | 2.2MB 21.2MB/s eta 0:00:01
[K |█████████████████████▏ | 2.2MB 21.2MB/s eta 0:00:01
[K |█████████████████████▎ | 2.2MB 21.2MB/s eta 0:00:01
[K |█████████████████████▎ | 2.2MB 21.2MB/s eta 0:00:01
[K |█████████████████████▍ | 2.3MB 21.2MB/s eta 0:00:01
[K |█████████████████████▌ | 2.3MB 21.2MB/s eta 0:00:01
[K |█████████████████████▋ | 2.3MB 21.2MB/s eta 0:00:01
[K |█████████████████████▊ | 2.3MB 21.2MB/s eta 0:00:01
[K |█████████████████████▉ | 2.3MB 21.2MB/s eta 0:00:01
[K |██████████████████████ | 2.3MB 21.2MB/s eta 0:00:01
[K |██████████████████████ | 2.3MB 21.2MB/s eta 0:00:01
[K |██████████████████████ | 2.3MB 21.2MB/s eta 0:00:01
[K |██████████████████████▏ | 2.3MB 21.2MB/s eta 0:00:01
[K |██████████████████████▎ | 2.3MB 21.2MB/s eta 0:00:01
[K |██████████████████████▍ | 2.4MB 21.2MB/s eta 0:00:01
[K |██████████████████████▌ | 2.4MB 21.2MB/s eta 0:00:01
[K |██████████████████████▋ | 2.4MB 21.2MB/s eta 0:00:01
[K |██████████████████████▊ | 2.4MB 21.2MB/s eta 0:00:01
[K |██████████████████████▉ | 2.4MB 21.2MB/s eta 0:00:01
[K |██████████████████████▉ | 2.4MB 21.2MB/s eta 0:00:01
[K |███████████████████████ | 2.4MB 21.2MB/s eta 0:00:01
[K |███████████████████████ | 2.4MB 21.2MB/s eta 0:00:01
[K |███████████████████████▏ | 2.4MB 21.2MB/s eta 0:00:01
[K |███████████████████████▎ | 2.4MB 21.2MB/s eta 0:00:01
[K |███████████████████████▍ | 2.5MB 21.2MB/s eta 0:00:01
[K |███████████████████████▌ | 2.5MB 21.2MB/s eta 0:00:01
[K |███████████████████████▋ | 2.5MB 21.2MB/s eta 0:00:01
[K |███████████████████████▋ | 2.5MB 21.2MB/s eta 0:00:01
[K |███████████████████████▊ | 2.5MB 21.2MB/s eta 0:00:01
[K |███████████████████████▉ | 2.5MB 21.2MB/s eta 0:00:01
[K |████████████████████████ | 2.5MB 21.2MB/s eta 0:00:01
[K |████████████████████████ | 2.5MB 21.2MB/s eta 0:00:01
[K |████████████████████████▏ | 2.5MB 21.2MB/s eta 0:00:01
[K |████████████████████████▎ | 2.5MB 21.2MB/s eta 0:00:01
[K |████████████████████████▍ | 2.6MB 21.2MB/s eta 0:00:01
[K |████████████████████████▍ | 2.6MB 21.2MB/s eta 0:00:01
[K |████████████████████████▌ | 2.6MB 21.2MB/s eta 0:00:01
[K |████████████████████████▋ | 2.6MB 21.2MB/s eta 0:00:01
[K |████████████████████████▊ | 2.6MB 21.2MB/s eta 0:00:01
[K |████████████████████████▉ | 2.6MB 21.2MB/s eta 0:00:01
[K |█████████████████████████ | 2.6MB 21.2MB/s eta 0:00:01
[K |█████████████████████████ | 2.6MB 21.2MB/s eta 0:00:01
[K |█████████████████████████▏ | 2.6MB 21.2MB/s eta 0:00:01
[K |█████████████████████████▏ | 2.7MB 21.2MB/s eta 0:00:01
[K |█████████████████████████▎ | 2.7MB 21.2MB/s eta 0:00:01
[K |█████████████████████████▍ | 2.7MB 21.2MB/s eta 0:00:01
[K |█████████████████████████▌ | 2.7MB 21.2MB/s eta 0:00:01
[K |█████████████████████████▋ | 2.7MB 21.2MB/s eta 0:00:01
[K |█████████████████████████▊ | 2.7MB 21.2MB/s eta 0:00:01
[K |█████████████████████████▉ | 2.7MB 21.2MB/s eta 0:00:01
[K |██████████████████████████ | 2.7MB 21.2MB/s eta 0:00:01
[K |██████████████████████████ | 2.7MB 21.2MB/s eta 0:00:01
[K |██████████████████████████ | 2.7MB 21.2MB/s eta 0:00:01
[K |██████████████████████████▏ | 2.8MB 21.2MB/s eta 0:00:01
[K |██████████████████████████▎ | 2.8MB 21.2MB/s eta 0:00:01
[K |██████████████████████████▍ | 2.8MB 21.2MB/s eta 0:00:01
[K |██████████████████████████▌ | 2.8MB 21.2MB/s eta 0:00:01
[K |██████████████████████████▋ | 2.8MB 21.2MB/s eta 0:00:01
[K |██████████████████████████▊ | 2.8MB 21.2MB/s eta 0:00:01
[K |██████████████████████████▊ | 2.8MB 21.2MB/s eta 0:00:01
[K |██████████████████████████▉ | 2.8MB 21.2MB/s eta 0:00:01
[K |███████████████████████████ | 2.8MB 21.2MB/s eta 0:00:01
[K |███████████████████████████ | 2.8MB 21.2MB/s eta 0:00:01
[K |███████████████████████████▏ | 2.9MB 21.2MB/s eta 0:00:01
[K |███████████████████████████▎ | 2.9MB 21.2MB/s eta 0:00:01
[K |███████████████████████████▍ | 2.9MB 21.2MB/s eta 0:00:01
[K |███████████████████████████▌ | 2.9MB 21.2MB/s eta 0:00:01
[K |███████████████████████████▌ | 2.9MB 21.2MB/s eta 0:00:01
[K |███████████████████████████▋ | 2.9MB 21.2MB/s eta 0:00:01
[K |███████████████████████████▊ | 2.9MB 21.2MB/s eta 0:00:01
[K |███████████████████████████▉ | 2.9MB 21.2MB/s eta 0:00:01
[K |████████████████████████████ | 2.9MB 21.2MB/s eta 0:00:01
[K |████████████████████████████ | 2.9MB 21.2MB/s eta 0:00:01
[K |████████████████████████████▏ | 3.0MB 21.2MB/s eta 0:00:01
[K |████████████████████████████▎ | 3.0MB 21.2MB/s eta 0:00:01
[K |████████████████████████████▍ | 3.0MB 21.2MB/s eta 0:00:01
[K |████████████████████████████▍ | 3.0MB 21.2MB/s eta 0:00:01
[K |████████████████████████████▌ | 3.0MB 21.2MB/s eta 0:00:01
[K |████████████████████████████▋ | 3.0MB 21.2MB/s eta 0:00:01
[K |████████████████████████████▊ | 3.0MB 21.2MB/s eta 0:00:01
[K |████████████████████████████▉ | 3.0MB 21.2MB/s eta 0:00:01
[K |█████████████████████████████ | 3.0MB 21.2MB/s eta 0:00:01
[K |█████████████████████████████ | 3.1MB 21.2MB/s eta 0:00:01
[K |█████████████████████████████▏ | 3.1MB 21.2MB/s eta 0:00:01
[K |█████████████████████████████▏ | 3.1MB 21.2MB/s eta 0:00:01
[K |█████████████████████████████▎ | 3.1MB 21.2MB/s eta 0:00:01
[K |█████████████████████████████▍ | 3.1MB 21.2MB/s eta 0:00:01
[K |█████████████████████████████▌ | 3.1MB 21.2MB/s eta 0:00:01
[K |█████████████████████████████▋ | 3.1MB 21.2MB/s eta 0:00:01
[K |█████████████████████████████▊ | 3.1MB 21.2MB/s eta 0:00:01
[K |█████████████████████████████▉ | 3.1MB 21.2MB/s eta 0:00:01
[K |██████████████████████████████ | 3.1MB 21.2MB/s eta 0:00:01
[K |██████████████████████████████ | 3.2MB 21.2MB/s eta 0:00:01
[K |██████████████████████████████ | 3.2MB 21.2MB/s eta 0:00:01
[K |██████████████████████████████▏ | 3.2MB 21.2MB/s eta 0:00:01
[K |██████████████████████████████▎ | 3.2MB 21.2MB/s eta 0:00:01
[K |██████████████████████████████▍ | 3.2MB 21.2MB/s eta 0:00:01
[K |██████████████████████████████▌ | 3.2MB 21.2MB/s eta 0:00:01
[K |██████████████████████████████▋ | 3.2MB 21.2MB/s eta 0:00:01
[K |██████████████████████████████▊ | 3.2MB 21.2MB/s eta 0:00:01
[K |██████████████████████████████▊ | 3.2MB 21.2MB/s eta 0:00:01
[K |██████████████████████████████▉ | 3.2MB 21.2MB/s eta 0:00:01
[K |███████████████████████████████ | 3.3MB 21.2MB/s eta 0:00:01
[K |███████████████████████████████ | 3.3MB 21.2MB/s eta 0:00:01
[K |███████████████████████████████▏| 3.3MB 21.2MB/s eta 0:00:01
[K |███████████████████████████████▎| 3.3MB 21.2MB/s eta 0:00:01
[K |███████████████████████████████▍| 3.3MB 21.2MB/s eta 0:00:01
[K |███████████████████████████████▌| 3.3MB 21.2MB/s eta 0:00:01
[K |███████████████████████████████▌| 3.3MB 21.2MB/s eta 0:00:01
[K |███████████████████████████████▋| 3.3MB 21.2MB/s eta 0:00:01
[K |███████████████████████████████▊| 3.3MB 21.2MB/s eta 0:00:01
[K |███████████████████████████████▉| 3.3MB 21.2MB/s eta 0:00:01
[K |████████████████████████████████| 3.4MB 21.2MB/s eta 0:00:01
[K |████████████████████████████████| 3.4MB 21.2MB/s
[?25h
###Markdown
You will use the AdamW optimizer from [tensorflow/models](https://github.com/tensorflow/models).
###Code
!pip install -q tf-models-official
import os
import shutil
import tensorflow as tf
import tensorflow_hub as hub
import tensorflow_text as text
from official.nlp import optimization # to create AdamW optmizer
import matplotlib.pyplot as plt
tf.get_logger().setLevel('ERROR')
###Output
_____no_output_____
###Markdown
Sentiment AnalysisThis notebook trains a sentiment analysis model to classify movie reviews as *positive* or *negative*, based on the text of the review.You'll use the [Large Movie Review Dataset](https://ai.stanford.edu/~amaas/data/sentiment/) that contains the text of 50,000 movie reviews from the [Internet Movie Database](https://www.imdb.com/). Download the IMDB datasetLet's download and extract the dataset, then explore the directory structure.
###Code
url = 'https://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz'
dataset = tf.keras.utils.get_file('aclImdb_v1.tar.gz', url,
untar=True, cache_dir='.',
cache_subdir='')
dataset_dir = os.path.join(os.path.dirname(dataset), 'aclImdb')
train_dir = os.path.join(dataset_dir, 'train')
# remove unused folders to make it easier to load the data
remove_dir = os.path.join(train_dir, 'unsup')
shutil.rmtree(remove_dir)
###Output
Downloading data from https://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz
84131840/84125825 [==============================] - 4s 0us/step
###Markdown
Next, you will use the `text_dataset_from_directory` utility to create a labeled `tf.data.Dataset`.The IMDB dataset has already been divided into train and test, but it lacks a validation set. Let's create a validation set using an 80:20 split of the training data by using the `validation_split` argument below.Note: When using the `validation_split` and `subset` arguments, make sure to either specify a random seed, or to pass `shuffle=False`, so that the validation and training splits have no overlap.
###Code
AUTOTUNE = tf.data.AUTOTUNE
batch_size = 32
seed = 42
raw_train_ds = tf.keras.preprocessing.text_dataset_from_directory(
'aclImdb/train',
batch_size=batch_size,
validation_split=0.2,
subset='training',
seed=seed)
class_names = raw_train_ds.class_names
train_ds = raw_train_ds.cache().prefetch(buffer_size=AUTOTUNE)
val_ds = tf.keras.preprocessing.text_dataset_from_directory(
'aclImdb/train',
batch_size=batch_size,
validation_split=0.2,
subset='validation',
seed=seed)
val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE)
test_ds = tf.keras.preprocessing.text_dataset_from_directory(
'aclImdb/test',
batch_size=batch_size)
test_ds = test_ds.cache().prefetch(buffer_size=AUTOTUNE)
###Output
Found 25000 files belonging to 2 classes.
Using 20000 files for training.
Found 25000 files belonging to 2 classes.
Using 5000 files for validation.
Found 25000 files belonging to 2 classes.
###Markdown
Let's take a look at a few reviews.
###Code
for text_batch, label_batch in train_ds.take(1):
for i in range(3):
print(f'Review: {text_batch.numpy()[i]}')
label = label_batch.numpy()[i]
print(f'Label : {label} ({class_names[label]})')
###Output
Review: b'"Pandemonium" is a horror movie spoof that comes off more stupid than funny. Believe me when I tell you, I love comedies. Especially comedy spoofs. "Airplane", "The Naked Gun" trilogy, "Blazing Saddles", "High Anxiety", and "Spaceballs" are some of my favorite comedies that spoof a particular genre. "Pandemonium" is not up there with those films. Most of the scenes in this movie had me sitting there in stunned silence because the movie wasn\'t all that funny. There are a few laughs in the film, but when you watch a comedy, you expect to laugh a lot more than a few times and that\'s all this film has going for it. Geez, "Scream" had more laughs than this film and that was more of a horror film. How bizarre is that?<br /><br />*1/2 (out of four)'
Label : 0 (neg)
Review: b"David Mamet is a very interesting and a very un-equal director. His first movie 'House of Games' was the one I liked best, and it set a series of films with characters whose perspective of life changes as they get into complicated situations, and so does the perspective of the viewer.<br /><br />So is 'Homicide' which from the title tries to set the mind of the viewer to the usual crime drama. The principal characters are two cops, one Jewish and one Irish who deal with a racially charged area. The murder of an old Jewish shop owner who proves to be an ancient veteran of the Israeli Independence war triggers the Jewish identity in the mind and heart of the Jewish detective.<br /><br />This is were the flaws of the film are the more obvious. The process of awakening is theatrical and hard to believe, the group of Jewish militants is operatic, and the way the detective eventually walks to the final violent confrontation is pathetic. The end of the film itself is Mamet-like smart, but disappoints from a human emotional perspective.<br /><br />Joe Mantegna and William Macy give strong performances, but the flaws of the story are too evident to be easily compensated."
Label : 0 (neg)
Review: b'Great documentary about the lives of NY firefighters during the worst terrorist attack of all time.. That reason alone is why this should be a must see collectors item.. What shocked me was not only the attacks, but the"High Fat Diet" and physical appearance of some of these firefighters. I think a lot of Doctors would agree with me that,in the physical shape they were in, some of these firefighters would NOT of made it to the 79th floor carrying over 60 lbs of gear. Having said that i now have a greater respect for firefighters and i realize becoming a firefighter is a life altering job. The French have a history of making great documentary\'s and that is what this is, a Great Documentary.....'
Label : 1 (pos)
###Markdown
Loading models from TensorFlow HubHere you can choose which BERT model you will load from TensorFlow Hub and fine-tune. There are multiple BERT models available. - [BERT-Base](https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/3), [Uncased](https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/3) and [seven more models](https://tfhub.dev/google/collections/bert/1) with trained weights released by the original BERT authors. - [Small BERTs](https://tfhub.dev/google/collections/bert/1) have the same general architecture but fewer and/or smaller Transformer blocks, which lets you explore tradeoffs between speed, size and quality. - [ALBERT](https://tfhub.dev/google/collections/albert/1): four different sizes of "A Lite BERT" that reduces model size (but not computation time) by sharing parameters between layers. - [BERT Experts](https://tfhub.dev/google/collections/experts/bert/1): eight models that all have the BERT-base architecture but offer a choice between different pre-training domains, to align more closely with the target task. - [Electra](https://tfhub.dev/google/collections/electra/1) has the same architecture as BERT (in three different sizes), but gets pre-trained as a discriminator in a set-up that resembles a Generative Adversarial Network (GAN). - BERT with Talking-Heads Attention and Gated GELU [[base](https://tfhub.dev/tensorflow/talkheads_ggelu_bert_en_base/1), [large](https://tfhub.dev/tensorflow/talkheads_ggelu_bert_en_large/1)] has two improvements to the core of the Transformer architecture.The model documentation on TensorFlow Hub has more details and references to theresearch literature. Follow the links above, or click on the [`tfhub.dev`](http://tfhub.dev) URLprinted after the next cell execution.The suggestion is to start with a Small BERT (with fewer parameters) since they are faster to fine-tune. If you like a small model but with higher accuracy, ALBERT might be your next option. If you want even better accuracy, chooseone of the classic BERT sizes or their recent refinements like Electra, Talking Heads, or a BERT Expert.Aside from the models available below, there are [multiple versions](https://tfhub.dev/google/collections/transformer_encoders_text/1) of the models that are larger and can yeld even better accuracy but they are too big to be fine-tuned on a single GPU. You will be able to do that on the [Solve GLUE tasks using BERT on a TPU colab](https://www.tensorflow.org/tutorials/text/solve_glue_tasks_using_bert_on_tpu).You'll see in the code below that switching the tfhub.dev URL is enough to try any of these models, because all the differences between them are encapsulated in the SavedModels from TF Hub.
###Code
#@title Choose a BERT model to fine-tune
bert_model_name = 'small_bert/bert_en_uncased_L-4_H-512_A-8' #@param ["bert_en_uncased_L-12_H-768_A-12", "bert_en_cased_L-12_H-768_A-12", "bert_multi_cased_L-12_H-768_A-12", "small_bert/bert_en_uncased_L-2_H-128_A-2", "small_bert/bert_en_uncased_L-2_H-256_A-4", "small_bert/bert_en_uncased_L-2_H-512_A-8", "small_bert/bert_en_uncased_L-2_H-768_A-12", "small_bert/bert_en_uncased_L-4_H-128_A-2", "small_bert/bert_en_uncased_L-4_H-256_A-4", "small_bert/bert_en_uncased_L-4_H-512_A-8", "small_bert/bert_en_uncased_L-4_H-768_A-12", "small_bert/bert_en_uncased_L-6_H-128_A-2", "small_bert/bert_en_uncased_L-6_H-256_A-4", "small_bert/bert_en_uncased_L-6_H-512_A-8", "small_bert/bert_en_uncased_L-6_H-768_A-12", "small_bert/bert_en_uncased_L-8_H-128_A-2", "small_bert/bert_en_uncased_L-8_H-256_A-4", "small_bert/bert_en_uncased_L-8_H-512_A-8", "small_bert/bert_en_uncased_L-8_H-768_A-12", "small_bert/bert_en_uncased_L-10_H-128_A-2", "small_bert/bert_en_uncased_L-10_H-256_A-4", "small_bert/bert_en_uncased_L-10_H-512_A-8", "small_bert/bert_en_uncased_L-10_H-768_A-12", "small_bert/bert_en_uncased_L-12_H-128_A-2", "small_bert/bert_en_uncased_L-12_H-256_A-4", "small_bert/bert_en_uncased_L-12_H-512_A-8", "small_bert/bert_en_uncased_L-12_H-768_A-12", "albert_en_base", "electra_small", "electra_base", "experts_pubmed", "experts_wiki_books", "talking-heads_base"]
map_name_to_handle = {
'bert_en_uncased_L-12_H-768_A-12':
'https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/3',
'bert_en_cased_L-12_H-768_A-12':
'https://tfhub.dev/tensorflow/bert_en_cased_L-12_H-768_A-12/3',
'bert_multi_cased_L-12_H-768_A-12':
'https://tfhub.dev/tensorflow/bert_multi_cased_L-12_H-768_A-12/3',
'small_bert/bert_en_uncased_L-2_H-128_A-2':
'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-2_H-128_A-2/1',
'small_bert/bert_en_uncased_L-2_H-256_A-4':
'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-2_H-256_A-4/1',
'small_bert/bert_en_uncased_L-2_H-512_A-8':
'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-2_H-512_A-8/1',
'small_bert/bert_en_uncased_L-2_H-768_A-12':
'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-2_H-768_A-12/1',
'small_bert/bert_en_uncased_L-4_H-128_A-2':
'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-4_H-128_A-2/1',
'small_bert/bert_en_uncased_L-4_H-256_A-4':
'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-4_H-256_A-4/1',
'small_bert/bert_en_uncased_L-4_H-512_A-8':
'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-4_H-512_A-8/1',
'small_bert/bert_en_uncased_L-4_H-768_A-12':
'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-4_H-768_A-12/1',
'small_bert/bert_en_uncased_L-6_H-128_A-2':
'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-6_H-128_A-2/1',
'small_bert/bert_en_uncased_L-6_H-256_A-4':
'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-6_H-256_A-4/1',
'small_bert/bert_en_uncased_L-6_H-512_A-8':
'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-6_H-512_A-8/1',
'small_bert/bert_en_uncased_L-6_H-768_A-12':
'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-6_H-768_A-12/1',
'small_bert/bert_en_uncased_L-8_H-128_A-2':
'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-8_H-128_A-2/1',
'small_bert/bert_en_uncased_L-8_H-256_A-4':
'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-8_H-256_A-4/1',
'small_bert/bert_en_uncased_L-8_H-512_A-8':
'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-8_H-512_A-8/1',
'small_bert/bert_en_uncased_L-8_H-768_A-12':
'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-8_H-768_A-12/1',
'small_bert/bert_en_uncased_L-10_H-128_A-2':
'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-10_H-128_A-2/1',
'small_bert/bert_en_uncased_L-10_H-256_A-4':
'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-10_H-256_A-4/1',
'small_bert/bert_en_uncased_L-10_H-512_A-8':
'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-10_H-512_A-8/1',
'small_bert/bert_en_uncased_L-10_H-768_A-12':
'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-10_H-768_A-12/1',
'small_bert/bert_en_uncased_L-12_H-128_A-2':
'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-12_H-128_A-2/1',
'small_bert/bert_en_uncased_L-12_H-256_A-4':
'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-12_H-256_A-4/1',
'small_bert/bert_en_uncased_L-12_H-512_A-8':
'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-12_H-512_A-8/1',
'small_bert/bert_en_uncased_L-12_H-768_A-12':
'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-12_H-768_A-12/1',
'albert_en_base':
'https://tfhub.dev/tensorflow/albert_en_base/2',
'electra_small':
'https://tfhub.dev/google/electra_small/2',
'electra_base':
'https://tfhub.dev/google/electra_base/2',
'experts_pubmed':
'https://tfhub.dev/google/experts/bert/pubmed/2',
'experts_wiki_books':
'https://tfhub.dev/google/experts/bert/wiki_books/2',
'talking-heads_base':
'https://tfhub.dev/tensorflow/talkheads_ggelu_bert_en_base/1',
}
map_model_to_preprocess = {
'bert_en_uncased_L-12_H-768_A-12':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'bert_en_cased_L-12_H-768_A-12':
'https://tfhub.dev/tensorflow/bert_en_cased_preprocess/3',
'small_bert/bert_en_uncased_L-2_H-128_A-2':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'small_bert/bert_en_uncased_L-2_H-256_A-4':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'small_bert/bert_en_uncased_L-2_H-512_A-8':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'small_bert/bert_en_uncased_L-2_H-768_A-12':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'small_bert/bert_en_uncased_L-4_H-128_A-2':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'small_bert/bert_en_uncased_L-4_H-256_A-4':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'small_bert/bert_en_uncased_L-4_H-512_A-8':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'small_bert/bert_en_uncased_L-4_H-768_A-12':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'small_bert/bert_en_uncased_L-6_H-128_A-2':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'small_bert/bert_en_uncased_L-6_H-256_A-4':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'small_bert/bert_en_uncased_L-6_H-512_A-8':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'small_bert/bert_en_uncased_L-6_H-768_A-12':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'small_bert/bert_en_uncased_L-8_H-128_A-2':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'small_bert/bert_en_uncased_L-8_H-256_A-4':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'small_bert/bert_en_uncased_L-8_H-512_A-8':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'small_bert/bert_en_uncased_L-8_H-768_A-12':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'small_bert/bert_en_uncased_L-10_H-128_A-2':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'small_bert/bert_en_uncased_L-10_H-256_A-4':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'small_bert/bert_en_uncased_L-10_H-512_A-8':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'small_bert/bert_en_uncased_L-10_H-768_A-12':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'small_bert/bert_en_uncased_L-12_H-128_A-2':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'small_bert/bert_en_uncased_L-12_H-256_A-4':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'small_bert/bert_en_uncased_L-12_H-512_A-8':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'small_bert/bert_en_uncased_L-12_H-768_A-12':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'bert_multi_cased_L-12_H-768_A-12':
'https://tfhub.dev/tensorflow/bert_multi_cased_preprocess/3',
'albert_en_base':
'https://tfhub.dev/tensorflow/albert_en_preprocess/3',
'electra_small':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'electra_base':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'experts_pubmed':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'experts_wiki_books':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'talking-heads_base':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
}
tfhub_handle_encoder = map_name_to_handle[bert_model_name]
tfhub_handle_preprocess = map_model_to_preprocess[bert_model_name]
print(f'BERT model selected : {tfhub_handle_encoder}')
print(f'Preprocess model auto-selected: {tfhub_handle_preprocess}')
###Output
BERT model selected : https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-4_H-512_A-8/1
Preprocess model auto-selected: https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3
###Markdown
The preprocessing modelText inputs need to be transformed to numeric token ids and arranged in several Tensors before being input to BERT. TensorFlow Hub provides a matching preprocessing model for each of the BERT models discussed above, which implements this transformation using TF ops from the TF.text library. It is not necessary to run pure Python code outside your TensorFlow model to preprocess text.The preprocessing model must be the one referenced by the documentation of the BERT model, which you can read at the URL printed above. For BERT models from the drop-down above, the preprocessing model is selected automatically.Note: You will load the preprocessing model into a [hub.KerasLayer](https://www.tensorflow.org/hub/api_docs/python/hub/KerasLayer) to compose your fine-tuned model. This is the preferred API to load a TF2-style SavedModel from TF Hub into a Keras model.
###Code
bert_preprocess_model = hub.KerasLayer(tfhub_handle_preprocess)
###Output
_____no_output_____
###Markdown
Let's try the preprocessing model on some text and see the output:
###Code
text_test = ['this is such an amazing movie!']
text_preprocessed = bert_preprocess_model(text_test)
print(f'Keys : {list(text_preprocessed.keys())}')
print(f'Shape : {text_preprocessed["input_word_ids"].shape}')
print(f'Word Ids : {text_preprocessed["input_word_ids"][0, :12]}')
print(f'Input Mask : {text_preprocessed["input_mask"][0, :12]}')
print(f'Type Ids : {text_preprocessed["input_type_ids"][0, :12]}')
###Output
Keys : ['input_word_ids', 'input_mask', 'input_type_ids']
Shape : (1, 128)
Word Ids : [ 101 2023 2003 2107 2019 6429 3185 999 102 0 0 0]
Input Mask : [1 1 1 1 1 1 1 1 1 0 0 0]
Type Ids : [0 0 0 0 0 0 0 0 0 0 0 0]
###Markdown
As you can see, now you have the 3 outputs from the preprocessing that a BERT model would use (`input_words_id`, `input_mask` and `input_type_ids`).Some other important points:- The input is truncated to 128 tokens. The number of tokens can be customized and you can see more details on the [Solve GLUE tasks using BERT on a TPU colab](https://www.tensorflow.org/tutorials/text/solve_glue_tasks_using_bert_on_tpu).- The `input_type_ids` only have one value (0) because this is a single sentence input. For a multiple sentence input, it would have one number for each input.Since this text preprocessor is a TensorFlow model, It can be included in your model directly. Using the BERT modelBefore putting BERT into your own model, let's take a look at its outputs. You will load it from TF Hub and see the returned values.
###Code
bert_model = hub.KerasLayer(tfhub_handle_encoder)
bert_results = bert_model(text_preprocessed)
print(f'Loaded BERT: {tfhub_handle_encoder}')
print(f'Pooled Outputs Shape:{bert_results["pooled_output"].shape}')
print(f'Pooled Outputs Values:{bert_results["pooled_output"][0, :12]}')
print(f'Sequence Outputs Shape:{bert_results["sequence_output"].shape}')
print(f'Sequence Outputs Values:{bert_results["sequence_output"][0, :12]}')
###Output
Loaded BERT: https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-4_H-512_A-8/1
Pooled Outputs Shape:(1, 512)
Pooled Outputs Values:[ 0.7626284 0.9928099 -0.18611862 0.36673862 0.15233698 0.6550447
0.9681154 -0.94862705 0.00216154 -0.98777324 0.0684273 -0.97630596]
Sequence Outputs Shape:(1, 128, 512)
Sequence Outputs Values:[[-0.28946292 0.3432122 0.3323146 ... 0.2130091 0.7102076
-0.05771127]
[-0.28742066 0.31980988 -0.23018472 ... 0.58454984 -0.21329743
0.7269208 ]
[-0.6615696 0.688769 -0.87432975 ... 0.1087725 -0.26173288
0.47855526]
...
[-0.22561064 -0.28925598 -0.07064444 ... 0.4756608 0.832771
0.40025362]
[-0.2982425 -0.27473134 -0.05450555 ... 0.48849788 1.0955352
0.18163432]
[-0.44378024 0.00930739 0.07223781 ... 0.1729011 1.1833246
0.0789801 ]]
###Markdown
The BERT models return a map with 3 important keys: `pooled_output`, `sequence_output`, `encoder_outputs`:- `pooled_output` to represent each input sequence as a whole. The shape is `[batch_size, H]`. You can think of this as an embedding for the entire movie review.- `sequence_output` represents each input token in the context. The shape is `[batch_size, seq_length, H]`. You can think of this as a contextual embedding for every token in the movie review.- `encoder_outputs` are the intermediate activations of the `L` Transformer blocks. `outputs["encoder_outputs"][i]` is a Tensor of shape `[batch_size, seq_length, 1024]` with the outputs of the i-th Transformer block, for `0 <= i < L`. The last value of the list is equal to `sequence_output`.For the fine-tuning you are going to use the `pooled_output` array. Define your modelYou will create a very simple fine-tuned model, with the preprocessing model, the selected BERT model, one Dense and a Dropout layer.Note: for more information about the base model's input and output you can use just follow the model's url for documentation. Here specifically you don't need to worry about it because the preprocessing model will take care of that for you.
###Code
def build_classifier_model():
text_input = tf.keras.layers.Input(shape=(), dtype=tf.string, name='text')
preprocessing_layer = hub.KerasLayer(tfhub_handle_preprocess, name='preprocessing')
encoder_inputs = preprocessing_layer(text_input)
encoder = hub.KerasLayer(tfhub_handle_encoder, trainable=True, name='BERT_encoder')
outputs = encoder(encoder_inputs)
net = outputs['pooled_output']
net = tf.keras.layers.Dropout(0.1)(net)
net = tf.keras.layers.Dense(1, activation=None, name='classifier')(net)
return tf.keras.Model(text_input, net)
###Output
_____no_output_____
###Markdown
Let's check that the model runs with the output of the preprocessing model.
###Code
classifier_model = build_classifier_model()
bert_raw_result = classifier_model(tf.constant(text_test))
print(tf.sigmoid(bert_raw_result))
###Output
tf.Tensor([[0.406224]], shape=(1, 1), dtype=float32)
###Markdown
The output is meaningless, of course, because the model has not been trained yet.Let's take a look at the model's structure.
###Code
tf.keras.utils.plot_model(classifier_model)
###Output
_____no_output_____
###Markdown
Model trainingYou now have all the pieces to train a model, including the preprocessing module, BERT encoder, data, and classifier. Loss functionSince this is a binary classification problem and the model outputs a probability (a single-unit layer), you'll use `losses.BinaryCrossentropy` loss function.
###Code
loss = tf.keras.losses.BinaryCrossentropy(from_logits=True)
metrics = tf.metrics.BinaryAccuracy()
###Output
_____no_output_____
###Markdown
OptimizerFor fine-tuning, let's use the same optimizer that BERT was originally trained with: the "Adaptive Moments" (Adam). This optimizer minimizes the prediction loss and does regularization by weight decay (not using moments), which is also known as [AdamW](https://arxiv.org/abs/1711.05101).For the learning rate (`init_lr`), we use the same schedule as BERT pre-training: linear decay of a notional initial learning rate, prefixed with a linear warm-up phase over the first 10% of training steps (`num_warmup_steps`). In line with the BERT paper, the initial learning rate is smaller for fine-tuning (best of 5e-5, 3e-5, 2e-5).
###Code
epochs = 5
steps_per_epoch = tf.data.experimental.cardinality(train_ds).numpy()
num_train_steps = steps_per_epoch * epochs
num_warmup_steps = int(0.1*num_train_steps)
init_lr = 3e-5
optimizer = optimization.create_optimizer(init_lr=init_lr,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
optimizer_type='adamw')
###Output
_____no_output_____
###Markdown
Loading the BERT model and trainingUsing the `classifier_model` you created earlier, you can compile the model with the loss, metric and optimizer.
###Code
classifier_model.compile(optimizer=optimizer,
loss=loss,
metrics=metrics)
###Output
_____no_output_____
###Markdown
Note: training time will vary depending on the complexity of the BERT model you have selected.
###Code
print(f'Training model with {tfhub_handle_encoder}')
history = classifier_model.fit(x=train_ds,
validation_data=val_ds,
epochs=epochs)
###Output
Training model with https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-4_H-512_A-8/1
Epoch 1/5
625/625 [==============================] - 161s 249ms/step - loss: 0.5901 - binary_accuracy: 0.6449 - val_loss: 0.3865 - val_binary_accuracy: 0.8376
Epoch 2/5
625/625 [==============================] - 154s 246ms/step - loss: 0.3609 - binary_accuracy: 0.8334 - val_loss: 0.3668 - val_binary_accuracy: 0.8430
Epoch 3/5
625/625 [==============================] - 152s 243ms/step - loss: 0.2744 - binary_accuracy: 0.8818 - val_loss: 0.3902 - val_binary_accuracy: 0.8510
Epoch 4/5
625/625 [==============================] - 152s 243ms/step - loss: 0.2047 - binary_accuracy: 0.9180 - val_loss: 0.4471 - val_binary_accuracy: 0.8522
Epoch 5/5
625/625 [==============================] - 152s 243ms/step - loss: 0.1608 - binary_accuracy: 0.9403 - val_loss: 0.4771 - val_binary_accuracy: 0.8546
###Markdown
Evaluate the modelLet's see how the model performs. Two values will be returned. Loss (a number which represents the error, lower values are better), and accuracy.
###Code
loss, accuracy = classifier_model.evaluate(test_ds)
print(f'Loss: {loss}')
print(f'Accuracy: {accuracy}')
###Output
782/782 [==============================] - 83s 105ms/step - loss: 0.4670 - binary_accuracy: 0.8536
Loss: 0.46696871519088745
Accuracy: 0.8536400198936462
###Markdown
Plot the accuracy and loss over timeBased on the `History` object returned by `model.fit()`. You can plot the training and validation loss for comparison, as well as the training and validation accuracy:
###Code
history_dict = history.history
print(history_dict.keys())
acc = history_dict['binary_accuracy']
val_acc = history_dict['val_binary_accuracy']
loss = history_dict['loss']
val_loss = history_dict['val_loss']
epochs = range(1, len(acc) + 1)
fig = plt.figure(figsize=(10, 6))
fig.tight_layout()
plt.subplot(2, 1, 1)
# "bo" is for "blue dot"
plt.plot(epochs, loss, 'r', label='Training loss')
# b is for "solid blue line"
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
# plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.subplot(2, 1, 2)
plt.plot(epochs, acc, 'r', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend(loc='lower right')
###Output
dict_keys(['loss', 'binary_accuracy', 'val_loss', 'val_binary_accuracy'])
###Markdown
In this plot, the red lines represents the training loss and accuracy, and the blue lines are the validation loss and accuracy. Export for inferenceNow you just save your fine-tuned model for later use.
###Code
dataset_name = 'imdb'
saved_model_path = './{}_bert'.format(dataset_name.replace('/', '_'))
classifier_model.save(saved_model_path, include_optimizer=False)
###Output
WARNING:absl:Found untraced functions such as restored_function_body, restored_function_body, restored_function_body, restored_function_body, restored_function_body while saving (showing 5 of 310). These functions will not be directly callable after loading.
WARNING:absl:Found untraced functions such as restored_function_body, restored_function_body, restored_function_body, restored_function_body, restored_function_body while saving (showing 5 of 310). These functions will not be directly callable after loading.
###Markdown
Let's reload the model so you can try it side by side with the model that is still in memory.
###Code
reloaded_model = tf.saved_model.load(saved_model_path)
###Output
_____no_output_____
###Markdown
Here you can test your model on any sentence you want, just add to the examples variable below.
###Code
def print_my_examples(inputs, results):
result_for_printing = \
[f'input: {inputs[i]:<30} : score: {results[i][0]:.6f}'
for i in range(len(inputs))]
print(*result_for_printing, sep='\n')
print()
examples = [
'this is such an amazing movie!', # this is the same sentence tried earlier
'The movie was great!',
'The movie was meh.',
'The movie was okish.',
'The movie was terrible...'
]
reloaded_results = tf.sigmoid(reloaded_model(tf.constant(examples)))
original_results = tf.sigmoid(classifier_model(tf.constant(examples)))
print('Results from the saved model:')
print_my_examples(examples, reloaded_results)
print('Results from the model in memory:')
print_my_examples(examples, original_results)
###Output
Results from the saved model:
input: this is such an amazing movie! : score: 0.999418
input: The movie was great! : score: 0.997077
input: The movie was meh. : score: 0.968174
input: The movie was okish. : score: 0.051995
input: The movie was terrible... : score: 0.000687
Results from the model in memory:
input: this is such an amazing movie! : score: 0.999418
input: The movie was great! : score: 0.997077
input: The movie was meh. : score: 0.968174
input: The movie was okish. : score: 0.051995
input: The movie was terrible... : score: 0.000687
###Markdown
If you want to use your model on [TF Serving](https://www.tensorflow.org/tfx/guide/serving), remember that it will call your SavedModel through one of its named signatures. In Python, you can test them as follows:
###Code
serving_results = reloaded_model \
.signatures['serving_default'](tf.constant(examples))
serving_results = tf.sigmoid(serving_results['classifier'])
print_my_examples(examples, serving_results)
###Output
input: this is such an amazing movie! : score: 0.999418
input: The movie was great! : score: 0.997077
input: The movie was meh. : score: 0.968174
input: The movie was okish. : score: 0.051996
input: The movie was terrible... : score: 0.000687
|
jupyter_notebooks/check_tensorflow_device.ipynb | ###Markdown
Check which device Tensorflow is using
###Code
import tensorflow as tf
from tensorflow.python.client import device_lib
###Output
_____no_output_____
###Markdown
List the locally available devices
###Code
device_lib.list_local_devices()
###Output
_____no_output_____
###Markdown
Show GPU device name
###Code
tf.test.gpu_device_name()
###Output
_____no_output_____
###Markdown
Is the GPU available?
###Code
tf.test.is_gpu_available(cuda_only=False, min_cuda_compute_capability=None)
###Output
_____no_output_____
###Markdown
What is the Tensorflow Version?
###Code
tf.__version__
###Output
_____no_output_____ |
Python-Notebooks/2.Loops and Recursions.ipynb | ###Markdown
Loops and Recursions**By Arpit Omprakash, Byte Sized Code** Loops While LoopsWhile loops are blocks of code that run till a certain condition evaluates to be true. They are more or less defined in the same way as `if` statements. ```while condition_evaluates_to_true: do_something```
###Code
x = 0
while x < 5:
print("Not there yet, x= " + str(x))
x = x + 1
###Output
Not there yet, x= 0
Not there yet, x= 1
Not there yet, x= 2
Not there yet, x= 3
Not there yet, x= 4
###Markdown
We can shorten the assignment statement by using other **assignment** operators that python provides. **Assignment Operators**- x += 1 is the same as x = x + 1- x \*= 10 is the same as x = x * 10 ... You get the idea
###Code
def attempts(n):
x = 1
while x <= n:
print("Attempt " + str(x))
x += 1
print("Done")
attempts(5)
###Output
Attempt 1
Attempt 2
Attempt 3
Attempt 4
Attempt 5
Done
###Markdown
While loops are traditionally used in cases where a certain condition is required to be met before proceeding. For example:
###Code
name = ""
while name != "arpit":
name = input("Enter your name: ")
print("Name = " + name )
###Output
Enter your name: omprakash
Enter your name: elvis
Enter your name: matt
Enter your name: arpit
Name = arpit
###Markdown
**Common Errors while writing While Loops** - Forgetting to initialize the variable
###Code
while my_var < 5:
print(my_var)
my_var += 1
x = 1
_sum = 0
while x < 10:
_sum += x
x += 1
product = 1
while x < 10:
product *= x
x += 1
print(_sum, product)
###Output
45 1
###Markdown
In the second case, we forgot to initialize the value of x before the second while loop. Thus, the second while loop is never executed. The second error may be difficult to catch as python doesn't give us an error. - Infinite LoopsInfinite loops are the most dreaded problem that one can encounter with a loop. They generally happen when you forget to track your variable and the condition in the `while` loop never evaluates to be false. Thus, the loop continues forever.
###Code
x = 0
while x < 10:
print("ok")
x -= 1
x = 1
while x < 5:
print(x)
###Output
_____no_output_____
###Markdown
However sometimes infinite loops are desirable. For example, if you have ever used the `ping` command in Linux or `ping -t` command in Windows, you might have noticed that the tool runs till it is stopped manually by the user.Even in those cases we need to break the loop at some time:```while True: do_something_cool() if user_requested_to_stop(): break```A **break** statement is used to exit an infinite loop when a certain condition is met. The break statement can also be used to exit a loop early if the code has achieved its objective.
###Code
x = 0
while x < 5:
if x == 3:
break
print(x)
x += 1
###Output
0
1
2
###Markdown
For LoopsFor loops are used to iterate over a given sequence of values. The syntax is as follows:```for item in iterable: do_something_with_item```
###Code
for x in range(5):
print(x)
###Output
0
1
2
3
4
###Markdown
The range function returns an iterable sequence of numbers. - `range(n)` generates values from `0` to `n-1`- `range(m, n)` generates values from `m` to `n-1`- `range(m, n, p)` generates values from `m` to `n-1` in steps of `p`
###Code
for i in range(5, 10):
print(i)
for i in range(5, 10, 2):
print(i)
###Output
5
7
9
###Markdown
You might be wondering why do we have a separate kind of loop, we can write the previous loops even as `while` loops. The answer lies in the power of `for` loops to work with any iterable item, including lists, dictionaries, and strings.
###Code
friends = ["Chandler", "Monica", "Ross", "Rachel", "Phoebe", "Joey"]
for friend in friends:
print("How you doing " + friend + "?")
def to_celsius(x):
return (x - 32) * 5 / 9
for x in range(0, 101, 10):
print(x, to_celsius(x))
###Output
0 -17.77777777777778
10 -12.222222222222221
20 -6.666666666666667
30 -1.1111111111111112
40 4.444444444444445
50 10.0
60 15.555555555555555
70 21.11111111111111
80 26.666666666666668
90 32.22222222222222
100 37.77777777777778
###Markdown
**Nested For Loops**We can use nested for loops to iterate over two iterable items simultaneously and perform some function with both of them. Here's an example:
###Code
adj = ["big", "tasty", "fresh"]
fruits = ["apple", "cherry", "orange"]
for adjective in adj:
for fruit in fruits:
print(adjective + " " + fruit)
###Output
big apple
big cherry
big orange
tasty apple
tasty cherry
tasty orange
fresh apple
fresh cherry
fresh orange
###Markdown
**Common Errors when writing For Loops** - Trying to iterate over something that is not iterable This is a frequent error for beginners as they often confuse data types.
###Code
for x in 25:
print(x)
###Output
_____no_output_____
###Markdown
- Iterating over the wrong data typeLets again greet our friends
###Code
def greet_friends(friends):
for friend in friends:
print("Hi " + friend)
greet_friends(friends)
###Output
Hi Chandler
Hi Monica
Hi Ross
Hi Rachel
Hi Phoebe
Hi Joey
###Markdown
What if we just want to say hi to chandler? Lets try putting his name in the function.
###Code
greet_friends("chandler")
###Output
Hi c
Hi h
Hi a
Hi n
Hi d
Hi l
Hi e
Hi r
###Markdown
What's the problem here? The for loop here iterates over the string that we supplied, thus, we have to enclose the single string in a list before presenting it to the function.
###Code
greet_friends(["chandler"])
###Output
Hi chandler
###Markdown
RecursionThe repeated application of the same procedure to a smaller problem. It lets us tackle complex problems by reducing the problem to a simpler one. In programming, recursion is a way of doing a repetitive task by having a function call itself. A recursive function calls itself usually with a modified parameter till it reaches a specific condition. This is called the base case.Lets dive in to the most classic example of recursion.
###Code
def factorial(n):
# base case
if n == 1:
return 1
# call the same function with a smaller value
return n * factorial(n-1)
print(factorial(10))
###Output
3628800
###Markdown
Lets dissect the function above to understand what's happening under the hood.
###Code
def _factorial(n):
print("Factorial called with " + str(n))
if n == 1:
print("Base case evaluated. Returning 1")
return 1
result = n * _factorial(n-1)
print("Returning " + str(result) + " for factorial of " + str(n))
return result
print(_factorial(5))
###Output
Factorial called with 5
Factorial called with 4
Factorial called with 3
Factorial called with 2
Factorial called with 1
Base case evaluated. Returning 1
Returning 2 for factorial of 2
Returning 6 for factorial of 3
Returning 24 for factorial of 4
Returning 120 for factorial of 5
120
|
Maximum_Entropy-with-VaR-CVaR.ipynb | ###Markdown
CVaR
###Code
var_level = 95
var_95 = np.percentile(pnl, 100 - var_level)
cvar_95 = pnl[pnl <= var_95].mean()
cvar_95
CVaR_port =cvar_95*Portfolio_value
CVaR_port
var_level2 = 99
var_99 = np.percentile(pnl, 100 - var_level2)
cvar_99 = pnl[pnl <= var_99].mean()
CVaR_port99 =cvar_99*Portfolio_value
CVaR_port99
output = [['Portfolio Value', Portfolio_value], ['Daily_VaR_95', Daily_VaR95],['Monthly_VaR95', Monthly_VaR95],['Daily_VaR_99', Daily_VaR99],['Monthly_VaR99', Monthly_VaR99], ['Daily_CVAR_95', CVaR_port],['Daily_CVAR_99', CVaR_port99]]
output2 = pd.DataFrame(output, columns=['Details', " Amount in Mn"])
output2
###Output
_____no_output_____ |
Rename-Resize Images.ipynb | ###Markdown
This function renames files in a directory and if specied will resize the images rename (source_dir, snum, new_ext, resize) where: source_dir is the full path to the directory containing the image files to be processed snum is an integer. If set to 0 the file names will not be changes but the files can be still be converted to a new image format and also be resized if snum is a non 0 integer the files are renamed in numerical sequence starting with the value of snum. note file are renumber with "zeros" padding so that files are processedby python functions based on their numerical order. new_ext is a string specifying the new image format the files should be converted to. note only these extensions are allowed 'jpg', 'jpe', 'jpeg', 'png', 'bmp', 'tiff' if next_ext='same' the files will be kept in their original format resize specifies the new size for the images. If resize='same' the images are new resized if not set to 'same' resize must be a tuple of the form (height,width) where height and width are integers. To use this function cv2 and tqdm must be installed in your working environment Note only files of the type listed under extensions above can be processed. if a file in the source_dir is not of the type that can be processed by cv2.imread, an exception occurs and a message is printed saying the file has been deleted from the directory.
###Code
import os
import cv2
from tqdm import tqdm
def rename (source_dir, snum, new_ext, resize):
ext_list=['jpg', 'jpe', 'jpeg', 'png', 'bmp', 'tiff']
if new_ext not in ext_list and new_ext !='same': # make sure new extension if specified is in acceptable list
msg='\nthe new extension you specified {0} is not in the list of acceptable extensions '
print(msg.format(new_ext))
print('The list of valid extensions is ', ext_list, ' **** program terminated ****')
return
if resize !='same':
if type(resize) is not tuple:
msg='\ the entry for resize {0} is not a proper tuple '
print(msg.format(resize))
print('resize must either same or of the form (width, height) where width and height are integer **** program terminated ****')
return
f_list=[]
if new_ext !='same':
new_ext=new_ext.lower()
source_list=os.listdir(source_dir)
for f in source_list:
f_path=os.path.join(source_dir, f)
if os.path.isdir(f_path)==False:
f_list.append(f) # list of file names in source directory
fc=len(f_list) # determine number of d files to process
pad=0
mod = 10
for i in range(1, fc + 1): # skip i=0 because 0 modulo anything is 0 and we don't want to increment pad
if i % mod == 0:
pad=pad+1
mod =mod * 10
good=0
i=0
for i in tqdm(range(fc)):
f=f_list[i]
f_path=os.path.join(source_dir,f) #full path to the file
filename=os.path.basename(f_path)
index=f_path.rfind('.') # find location of last . in file name
fname=f_path[:index] # name of file - no extension
ext=f_path[index + 1:] # this does not include the period
if ext=='jfif': # jfif files are the same as jpg files so change the extension to jpg
ext='jpg'
if snum !=0: # check if need to rename file
fnew= str(i + snum).zfill(pad+1) # rename the files name with leading zeros
else:
fnew=fname # use original file name
new_path=os.path.join(source_dir, fnew)
if new_ext != 'same': # check if files will have a new extension
fnew=fnew +'.' + new_ext
else:
fnew=fnew + '.' + ext # use old extension
new_path= os.path.join (source_dir, fnew) # full path for the newly renamed file
try:
img=cv2.imread(f_path)
dummy=img.shape
if resize !='same':
img=cv2.resize(img, resize)
os.remove(f_path)
cv2.imwrite(new_path, img)
good=good + 1
except:
print ('image ' ,filename, ' is an invalid image and will be deleted')
os.remove(f_path)
if new_ext=='same':
msg=' {0} files were renamed and saved with the original extensions'.format(good)
else:
msg='{0} files were renamed and saved with extension {1}'.format(good, new_ext)
print (msg)
return
source_dir=r'C:\Temp\BIRDS\predictor test set'
snum=1
new_ext='bmp'
resize='same'
rename (source_dir, snum, new_ext, resize )
###Output
100%|█████████████████████████████████████████████████████████████████████████████████| 20/20 [00:00<00:00, 152.68it/s] |
extra/Operation playground.ipynb | ###Markdown
Thinking in tensorsA hands-on training by [Piotr Migdał](https://p.migdal.pl) (2019) Extra notebook: Operation playground Open in Colab: https://colab.research.google.com/github/stared/thinking-in-tensors-writing-in-pytorch/blob/master/extra/extra%20Operation%20playground.ipynb
###Code
import matplotlib.pyplot as plt
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
###Output
_____no_output_____
###Markdown
What is a neuron
###Code
x_in = torch.tensor([0.5, -1., 3.])
A_single = torch.tensor([[1.], [1.], [0.]])
x_in.matmul(A_single)
A_layer = torch.tensor([[1., 0.2], [1., 0.5], [0., -.1]])
x_in.matmul(A_layer)
A_layer_2 = torch.tensor([[1.], [-1.]])
x_in.matmul(A_layer).matmul(A_layer_2)
x_in.matmul(A_layer).sigmoid().matmul(A_layer_2).sigmoid()
z = torch.tensor([0.5, -2., 1.5])
z.max(dim=0)
z.exp() / z.exp().sum()
F.softmax(z, dim=0)
###Output
_____no_output_____
###Markdown
And now, with `torch.nn` module.
###Code
x = torch.randn(2, 1, 4, 4)
x
###Output
_____no_output_____
###Markdown
Flatten
###Code
x.view(x.size(0), -1)
###Output
_____no_output_____
###Markdown
Activation functions* Element-wise
###Code
x.relu()
F.relu(x)
relu = nn.ReLU()
relu(x)
X = torch.arange(-3, 3, step=0.2)
plt.plot(X.numpy(), X.relu().numpy(), label="ReLU")
plt.plot(X.numpy(), X.sigmoid().numpy(), label="Sigmoid")
plt.plot(X.numpy(), X.tanh().numpy(), label="Tanh")
plt.ylim([-1.5, 1.5])
plt.legend()
###Output
_____no_output_____
###Markdown
Pooling operation
###Code
x
maxpool = nn.MaxPool2d((2, 2))
maxpool(x)
avgpool = nn.AvgPool2d((2, 2))
avgpool(x)
###Output
_____no_output_____
###Markdown
Convolutions
###Code
conv = nn.Conv2d(in_channels=1, out_channels=1, kernel_size=3)
conv(x)
conv = nn.Conv2d(in_channels=1, out_channels=2, kernel_size=3, padding=1)
conv(x)
conv = nn.Conv2d(in_channels=1, out_channels=1, kernel_size=1, padding=1)
conv(x)
###Output
_____no_output_____
###Markdown
DropoutDuring the training phase it "switches off" randomly a fraction of neurons. This prevents network from relaying only on a few neurons.SSee:* [Dropout: A Simple Way to Prevent Neural Networks from Overfitting](http://jmlr.org/papers/volume15/srivastava14a.old/srivastava14a.pdf)* [torch.nn.Dropout](https://pytorch.org/docs/stable/nn.htmldropout-layers)
###Code
dropout = nn.Dropout(p=0.5)
dropout(x)
dropout.eval()
dropout(x)
###Output
_____no_output_____
###Markdown
Batch norm
###Code
bn = nn.BatchNorm2d(num_features=1)
bn(x)
bn(x[:1])
bn(x[:1]).mean(dim=[2, 3])
###Output
_____no_output_____ |
InfoPlease.ipynb | ###Markdown
InfoPlease
###Code
url = "https://www.infoplease.com/culture-entertainment/music/500-songs-shaped-rock"
savename = "infoplease/rock.p"
if not isFile(savename):
data, code = downloadURL(url)
saveFile(idata=data, ifile=savename)
sleep(3)
url = "https://www.infoplease.com/culture-entertainment/music/must-have-recordings"
savename = "infoplease/musthavealbums.p"
if not isFile(savename):
data, code = downloadURL(url)
saveFile(idata=data, ifile=savename)
sleep(3)
def getArtistData(bsdata):
artistData = {}
for ul in bsdata.findAll("ul"):
lis = ul.findAll("li")
for li in lis:
b = li.find('b')
if b is None:
continue
title = li.find('b').text
title = title[1:-1]
b.extract()
artist = li.text.strip()
if artist.endswith(","):
artist = artist[:-1]
#print(artist,'\t',title)
if artistData.get(artist) is None:
artistData[artist] = []
artistData[artist].append(title)
return artistData
bsdata = getHTML("infoplease/rock.p")
artistData = getArtistData(bsdata)
saveFile(idata=artistData, ifile="infoplease/artistData_rock.p", debug=True)
bsdata = getHTML("infoplease/musthavealbums.p")
artistData = getArtistData(bsdata)
saveFile(idata=artistData, ifile="infoplease/artistData_musthavealbums.p", debug=True)
###Output
Saving data to infoplease/artistData_rock.p
--> This file is 12.9kB.
Saved data to infoplease/artistData_rock.p
--> This file is 12.9kB.
Saving data to infoplease/artistData_musthavealbums.p
--> This file is 6.2kB.
Saved data to infoplease/artistData_musthavealbums.p
--> This file is 6.2kB.
###Markdown
MusicOutfitters
###Code
for year in range(1950,2021):
url = "http://www.musicoutfitters.com/topsongs/{0}.htm".format(year)
savename = "musicoutfitters/topsongs_{0}.p".format(year)
if not isFile(savename):
data, code = downloadURL(url)
saveFile(idata=data, ifile=savename)
sleep(3)
break
###Output
Now Downloading http://www.musicoutfitters.com/topsongs/1950.htm
###Markdown
Digital Dream Door
###Code
for year in range(40, 100):
url="https://digitaldreamdoor.com/pages/bg_hits/bg_hits_{0}.html".format(year)
savename="digitaldreamdoor/19{0}.p".format(year)
if not isFile(savename):
data, code = downloadURL(url)
saveFile(idata=data, ifile=savename)
sleep(3)
for year in range(2000, 2020):
url="https://digitaldreamdoor.com/pages/bg_hits/bg_hits_{0}.html".format(year)
savename="digitaldreamdoor/{0}.p".format(year)
if not isFile(savename):
data, code = downloadURL(url)
saveFile(idata=data, ifile=savename)
sleep(3)
for year in range(1960, 2020):
if year == 1999:
continue
url="https://digitaldreamdoor.com/pages/albums_by_year/albums_{0}.html".format(year)
savename="digitaldreamdoor/{0}_albums.p".format(year)
if not isFile(savename):
data, code = downloadURL(url)
saveFile(idata=data, ifile=savename)
sleep(3)
for decade in ["1920", "1930", "1940"]:
url="https://digitaldreamdoor.com/pages/best_songs-{0}s.html".format(decade)
savename="digitaldreamdoor/{0}s_bestsongs.p".format(year)
if not isFile(savename):
data, code = downloadURL(url)
saveFile(idata=data, ifile=savename)
sleep(3)
url="https://digitaldreamdoor.com/pages/music0.html"
savename="digitaldreamdoor.p".format(year)
if not isFile(savename):
data, code = downloadURL(url)
saveFile(idata=data, ifile=savename)
sleep(3)
bsdata = getHTML("digitaldreamdoor.p")
divs = bsdata.findAll("div", {"class": "mus"})
for div in divs:
refs = div.findAll("a")
for ref in refs:
print(ref.text,'\t',ref.attrs['href'])
url = "https://digitaldreamdoor.com/pages/{0}".format(ref.attrs['href'])
savename = "digitaldreamdoor/{0}.p".format(ref.attrs['href'].replace("/", "_"))
if not isFile(savename):
try:
data, code = downloadURL(url)
except:
sleep(10)
saveFile(idata=data, ifile=savename)
sleep(3)
from searchUtilstilstils import findExt
files = findExt("digitaldreamdoor", ".p")
files
###Output
_____no_output_____
###Markdown
PopRadioTop20
###Code
url="https://www.popradiotop20.com/Year/RR.htm"
savename = "popradiotop20.p"
if not isFile(savename):
data, code = downloadURL(url)
saveFile(idata=data, ifile=savename)
sleep(3)
bsdata = getHTML(savename)
for table in bsdata.findAll("table"):
for tr in table.findAll("tr"):
for td in tr.findAll("td"):
ref = td.find("a")
if ref is None:
continue
href = ref.attrs['href']
url = "https://www.popradiotop20.com/Year/{0}".format(href)
savename = "popradiotop20/{0}.p".format(href.replace(".htm", ""))
if not isFile(savename):
data, code = downloadURL(url)
saveFile(idata=data, ifile=savename)
sleep(3)
from searchUtils import findExt
popradioData = {}
files = findExt("popradiotop20", ".p")
for ifile in files:
bsdata = getHTML(ifile)
retval = getPopRadioData(bsdata)
chartName = retval["Chart"]
year = retval["Year"]
songs = retval["Songs"]
if popradioData.get(chartName) is None:
popradioData[chartName] = {}
popradioData[chartName][year] = songs
print("{0: <40}{1: <40}{2: <10}{3}".format(ifile,chartName,year,len(songs)))
bsdata = getHTML("popradiotop20/MB-ALT-2017.p")
#bsdata
retval = getPopRadioData(bsdata, debug=True)
retval
prt20 = popRadioData()
prt20.parse()
###Output
Found 373 files.
Saving data to /Volumes/Piggy/Charts/data/popradiotop20/results/Mediabase_Triple_A.p
--> This file is 20.3kB.
Saved data to /Volumes/Piggy/Charts/data/popradiotop20/results/Mediabase_Triple_A.p
--> This file is 20.3kB.
Saving data to /Volumes/Piggy/Charts/data/popradiotop20/results/Mediabase_AC.p
--> This file is 17.3kB.
Saved data to /Volumes/Piggy/Charts/data/popradiotop20/results/Mediabase_AC.p
--> This file is 17.3kB.
Saving data to /Volumes/Piggy/Charts/data/popradiotop20/results/Mediabase_Alternative.p
--> This file is 19.3kB.
Saved data to /Volumes/Piggy/Charts/data/popradiotop20/results/Mediabase_Alternative.p
--> This file is 19.3kB.
Saving data to /Volumes/Piggy/Charts/data/popradiotop20/results/Mediabase_Active_Rock.p
--> This file is 18.6kB.
Saved data to /Volumes/Piggy/Charts/data/popradiotop20/results/Mediabase_Active_Rock.p
--> This file is 18.6kB.
Saving data to /Volumes/Piggy/Charts/data/popradiotop20/results/Mediabase_Christian_AC.p
--> This file is 13.1kB.
Saved data to /Volumes/Piggy/Charts/data/popradiotop20/results/Mediabase_Christian_AC.p
--> This file is 13.1kB.
Saving data to /Volumes/Piggy/Charts/data/popradiotop20/results/Mediabase_Country.p
--> This file is 19.7kB.
Saved data to /Volumes/Piggy/Charts/data/popradiotop20/results/Mediabase_Country.p
--> This file is 19.7kB.
Saving data to /Volumes/Piggy/Charts/data/popradiotop20/results/Mediabase_Hot_AC.p
--> This file is 19.9kB.
Saved data to /Volumes/Piggy/Charts/data/popradiotop20/results/Mediabase_Hot_AC.p
--> This file is 19.9kB.
Saving data to /Volumes/Piggy/Charts/data/popradiotop20/results/Mediabase_Smooth_Jazz.p
--> This file is 1.5kB.
Saved data to /Volumes/Piggy/Charts/data/popradiotop20/results/Mediabase_Smooth_Jazz.p
--> This file is 1.5kB.
Saving data to /Volumes/Piggy/Charts/data/popradiotop20/results/Mediabase_Smooth_AC.p
--> This file is 6.2kB.
Saved data to /Volumes/Piggy/Charts/data/popradiotop20/results/Mediabase_Smooth_AC.p
--> This file is 6.2kB.
Saving data to /Volumes/Piggy/Charts/data/popradiotop20/results/Mediabase_Top_40.p
--> This file is 20.8kB.
Saved data to /Volumes/Piggy/Charts/data/popradiotop20/results/Mediabase_Top_40.p
--> This file is 20.8kB.
Saving data to /Volumes/Piggy/Charts/data/popradiotop20/results/Mediabase_Rhythmic.p
--> This file is 20.8kB.
Saved data to /Volumes/Piggy/Charts/data/popradiotop20/results/Mediabase_Rhythmic.p
--> This file is 20.8kB.
Saving data to /Volumes/Piggy/Charts/data/popradiotop20/results/Mediabase_Mainstream_Rock.p
--> This file is 11.2kB.
Saved data to /Volumes/Piggy/Charts/data/popradiotop20/results/Mediabase_Mainstream_Rock.p
--> This file is 11.2kB.
Saving data to /Volumes/Piggy/Charts/data/popradiotop20/results/Mediabase_Rock.p
--> This file is 2.6kB.
Saved data to /Volumes/Piggy/Charts/data/popradiotop20/results/Mediabase_Rock.p
--> This file is 2.6kB.
Saving data to /Volumes/Piggy/Charts/data/popradiotop20/results/Mediabase_Urban_AC.p
--> This file is 17.9kB.
Saved data to /Volumes/Piggy/Charts/data/popradiotop20/results/Mediabase_Urban_AC.p
--> This file is 17.9kB.
Saving data to /Volumes/Piggy/Charts/data/popradiotop20/results/Mediabase_Urban.p
--> This file is 20.4kB.
Saved data to /Volumes/Piggy/Charts/data/popradiotop20/results/Mediabase_Urban.p
--> This file is 20.4kB.
Saving data to /Volumes/Piggy/Charts/data/popradiotop20/results/Radio___Records_Adult_Alternative.p
--> This file is 12.2kB.
Saved data to /Volumes/Piggy/Charts/data/popradiotop20/results/Radio___Records_Adult_Alternative.p
--> This file is 12.2kB.
Saving data to /Volumes/Piggy/Charts/data/popradiotop20/results/Radio___Records_Triple_A.p
--> This file is 12.3kB.
Saved data to /Volumes/Piggy/Charts/data/popradiotop20/results/Radio___Records_Triple_A.p
--> This file is 12.3kB.
Saving data to /Volumes/Piggy/Charts/data/popradiotop20/results/Radio___Records_Pop_Adult.p
--> This file is 10.4kB.
Saved data to /Volumes/Piggy/Charts/data/popradiotop20/results/Radio___Records_Pop_Adult.p
--> This file is 10.4kB.
Saving data to /Volumes/Piggy/Charts/data/popradiotop20/results/Radio___Records_Adult_Contemporary.p
--> This file is 11.2kB.
Saved data to /Volumes/Piggy/Charts/data/popradiotop20/results/Radio___Records_Adult_Contemporary.p
--> This file is 11.2kB.
Saving data to /Volumes/Piggy/Charts/data/popradiotop20/results/Radio___Records_Adult_Contemporary.p
--> This file is 4.9kB.
Saved data to /Volumes/Piggy/Charts/data/popradiotop20/results/Radio___Records_Adult_Contemporary.p
--> This file is 4.9kB.
Saving data to /Volumes/Piggy/Charts/data/popradiotop20/results/Radio___Records_AC.p
--> This file is 40.4kB.
Saved data to /Volumes/Piggy/Charts/data/popradiotop20/results/Radio___Records_AC.p
--> This file is 40.4kB.
Saving data to /Volumes/Piggy/Charts/data/popradiotop20/results/Radio___Records_Alternative.p
--> This file is 26.6kB.
Saved data to /Volumes/Piggy/Charts/data/popradiotop20/results/Radio___Records_Alternative.p
--> This file is 26.6kB.
Saving data to /Volumes/Piggy/Charts/data/popradiotop20/results/Radio___Records_Active_Rock.p
--> This file is 21.6kB.
Saved data to /Volumes/Piggy/Charts/data/popradiotop20/results/Radio___Records_Active_Rock.p
--> This file is 21.6kB.
Saving data to /Volumes/Piggy/Charts/data/popradiotop20/results/Radio___Records_Christian_AC.p
--> This file is 6.3kB.
Saved data to /Volumes/Piggy/Charts/data/popradiotop20/results/Radio___Records_Christian_AC.p
--> This file is 6.3kB.
Saving data to /Volumes/Piggy/Charts/data/popradiotop20/results/Radio___Records_Top-40.p
--> This file is 8.4kB.
Saved data to /Volumes/Piggy/Charts/data/popradiotop20/results/Radio___Records_Top-40.p
--> This file is 8.4kB.
Saving data to /Volumes/Piggy/Charts/data/popradiotop20/results/Radio___Records_Contemporary_Hit_Radio.p
--> This file is 17.3kB.
Saved data to /Volumes/Piggy/Charts/data/popradiotop20/results/Radio___Records_Contemporary_Hit_Radio.p
--> This file is 17.3kB.
Saving data to /Volumes/Piggy/Charts/data/popradiotop20/results/Radio___Records_CHR.p
--> This file is 16.2kB.
Saved data to /Volumes/Piggy/Charts/data/popradiotop20/results/Radio___Records_CHR.p
--> This file is 16.2kB.
Saving data to /Volumes/Piggy/Charts/data/popradiotop20/results/Radio___Records_Country.p
--> This file is 64.6kB.
Saved data to /Volumes/Piggy/Charts/data/popradiotop20/results/Radio___Records_Country.p
--> This file is 64.6kB.
Saving data to /Volumes/Piggy/Charts/data/popradiotop20/results/Radio___Records_Hot_AC.p
--> This file is 24.8kB.
Saved data to /Volumes/Piggy/Charts/data/popradiotop20/results/Radio___Records_Hot_AC.p
--> This file is 24.8kB.
Saving data to /Volumes/Piggy/Charts/data/popradiotop20/results/Radio___Records_NAC.p
--> This file is 5.0kB.
Saved data to /Volumes/Piggy/Charts/data/popradiotop20/results/Radio___Records_NAC.p
--> This file is 5.0kB.
Saving data to /Volumes/Piggy/Charts/data/popradiotop20/results/Radio___Records_NAC_Smooth_Jazz.p
--> This file is 9.7kB.
Saved data to /Volumes/Piggy/Charts/data/popradiotop20/results/Radio___Records_NAC_Smooth_Jazz.p
--> This file is 9.7kB.
Saving data to /Volumes/Piggy/Charts/data/popradiotop20/results/Radio___Records_Smooth_Jazz.p
--> This file is 11.4kB.
Saved data to /Volumes/Piggy/Charts/data/popradiotop20/results/Radio___Records_Smooth_Jazz.p
--> This file is 11.4kB.
Saving data to /Volumes/Piggy/Charts/data/popradiotop20/results/Radio___Records_CHR_Pop.p
--> This file is 25.6kB.
Saved data to /Volumes/Piggy/Charts/data/popradiotop20/results/Radio___Records_CHR_Pop.p
--> This file is 25.6kB.
Saving data to /Volumes/Piggy/Charts/data/popradiotop20/results/Radio___Records_CHR_Rhythmic.p
--> This file is 25.8kB.
Saved data to /Volumes/Piggy/Charts/data/popradiotop20/results/Radio___Records_CHR_Rhythmic.p
--> This file is 25.8kB.
Saving data to /Volumes/Piggy/Charts/data/popradiotop20/results/Radio___Records_AOR.p
--> This file is 23.4kB.
Saved data to /Volumes/Piggy/Charts/data/popradiotop20/results/Radio___Records_AOR.p
--> This file is 23.4kB.
Saving data to /Volumes/Piggy/Charts/data/popradiotop20/results/Radio___Records_Rock.p
--> This file is 26.1kB.
Saved data to /Volumes/Piggy/Charts/data/popradiotop20/results/Radio___Records_Rock.p
--> This file is 26.1kB.
Saving data to /Volumes/Piggy/Charts/data/popradiotop20/results/Radio___Records_Urban_AC.p
###Markdown
UDiscoverMusic
###Code
txt="""
10cc
2Pac
50 Cent
A
A Thousand Horses
ABBA
ABC
Aerosmith
Agnetha Fältskog
Alan Jackson
Albert King
Alice Cooper
Alison Krauss
The All-American Rejects
The Allman Brothers Band
Amy Winehouse
Andre Rieu
Andrea Bocelli
Andrew W.K.
Anthrax
Antonio Carlos Jobim
Apache Indian
Arcade Fire
Ariana Grande
Arrested Development
Ashley Campbell
Astrud Gilberto
Aswad
Atlanta Rhythm Section
Audioslave
B
B.B. King
Badfinger
The Band
Barclay James Harvest
Barry White
The Beach Boys
Beastie Boys
The Beatles
Beck
Bee Gees
Belinda Carlisle
Ben Harper
Ben Howard
Benny Andersson
Big Country
Big Star
Bill Evans
Billie Holiday
Billy Currington
Billy Fury
Billy Preston
Björk
Black Eyed Peas
Black Sabbath
Black Uhuru
Blind Faith
Blink-182
Blondie
Blue Cheer
Bo Diddley
Bob Dylan
Bob Marley
Bon Jovi
Bonnie Raitt
Booker T
Boyz II Men
Brantley Gilbert
Brenda Holloway
Brian Eno
The Brothers Johnson
Bruce Springsteen
Bryan Adams
Bryan Ferry
Buddy Guy
Buddy Holly
Burning Spear
Burt Bacharach
C
The Cadillac Three
Camel
Canned Heat
Captain Beefheart
Caravan
Carpenters
Cat Stevens
Charlie Parker
Cheap Trick
The Chemical Brothers
Cher
Chris Cornell
Chris Stapleton
Chuck Berry
Cinderella
The Clash
Climax Blues Band
Coleman Hawkins
Commodores
Common
The Common Linnets
Corinne Bailey Rae
Count Basie
Counting Crows
Craig Armstrong
The Cranberries
Cream
Creedence Clearwater Revival
Crowded House
Culture Club
The Cure
Cutting Crew
D
D’Angelo
DMX
The Damned
Daniel Hope
Danny Wilson & Gary Clark
David Bowie
Dean Martin
Debarge
Deep Purple
Def Leppard
Demi Lovato
Demis Roussos
Derek And The Dominos
Desmond Dekker
Diana Krall
Diana Ross
Diana Ross & The Supremes
Dierks Bentley
Dinah Washington
Dio
Dire Straits
Disclosure
Don Henley
Donna Summer
The Doors
Dr Dre
Drake
Duke Ellington
Dusty Springfield
E
EELS
EPMD
Eagles
Eagles Of Death Metal
Eazy-E
Eddie Cochran
Elbow
Ella Fitzgerald
Elliott Smith
Elton John
Elvis Costello
Elvis Presley
Emeli Sandé
Eminem
Enigma
Eric B. & Rakim
Eric Church
Eric Clapton
Etta James
Evanescence
Eve
Extreme
F
Fairport Convention
Fats Domino
Faust
Fergie
Florence + The Machine
The Flying Burrito Brothers
Four Tops
Foxy Brown
Frank Sinatra
Frank Zappa
Frankie Goes To Hollywood
Freddie Mercury
Free
Frida Lyngstad
G
The Game
Gang Starr
Gary Moore
Gene Krupa
Gene Vincent
Genesis
Gentle Giant
George Benson
George Harrison
George Michael
George Strait
George Thorogood
Georgie Fame
Ghostface Killah
Ginger Baker
Glen Campbell
Gong
Grace Jones
Graham Parker
Grand Funk Railroad
Gregory Isaacs
Gregory Porter
Guns N’ Roses
Gwen Stefani
H
Hank Williams
Heart
Heaven 17
Helmet
Herbie Hancock
Hoobastank
Howlin Wolf
Hoyt Axton
Huey Lewis & The News
The Human League
Humble Pie
I
INXS
Ice Cube
Iggy Pop
Imagine Dragons
Iron Maiden
Isaac Hayes
The Isley Brothers
It Bites
J
J.J. Cale
Jack Bruce
Jack Johnson
Jackson 5
Jacques Brel
Jadakiss
The Jam
James
James Bay
James Blake
James Brown
James Morrison
James Taylor
Jane’s Addiction
Janet Jackson
Japan & David Sylvian
Jay-Z
Jeezy
Jeru the Damaja
Jessie J
Jimi Hendrix
Jimmy Buffett
Jimmy Cliff
Jimmy Eat World
Jimmy Ruffin
Jimmy Smith
Joan Armatrading
Joan Baez
Joe Cocker
Joe Jackson
Joe Sample
Joe Walsh / The James Gang
John Coltrane
John Fogerty
John Lee Hooker
John Lennon
John Martyn
John Mayall
John Mellencamp
John Williams
Johnny Cash
Johnny Gill
Joni Mitchell
Jonny Lang
Joss Stone
Jr. Walker & The All Stars
Julie London
Jurassic 5
Justin Bieber
K
Kacey Musgraves
Kaiser Chiefs
Kanye West
Kate Bush
Katy Perry
Keane
Keith Jarrett
Keith Richards
Keith Urban
Kendrick Lamar
Kenny Burrell
Kevin Coyne
The Killers
Killing Joke
Kim Carnes
The Kinks
Kip Moore
Kiss
The Kooks
Kool And The Gang
L
LL Cool J
Lady A
Lady GaGa
Lana Del Rey
Laura Marling
Led Zeppelin
Lee ‘Scratch’ Perry
Lenny Kravitz
Leon Russell
Lester Young
Level 42
The Libertines
Lightnin’ Hopkins
Lil Wayne
Linton Kwesi Johnson
Lionel Richie
Little Big Town
Little Richard
Lloyd Cole
Lorde
Louis Armstrong
Lucinda Williams
Ludacris
Ludovico Einaudi
Luke Bryan
Lulu
The Lumineers
Lynyrd Skynyrd
M
Maddie & Tae
Madonna
Magazine
The Mamas & The Papas
Marc Almond
Marilyn Manson
Mark Knopfler
Maroon 5
Martha Reeves & The Vandellas
The Marvelettes
Marvin Gaye
Mary Hopkin
Mary J. Blige
Mary Wells
Massive Attack
Master P
The Mavericks
Maxi Priest
McCoy Tyner
Meat Loaf
Megadeth
Melody Gardot
Metallica
Method Man
Michael Jackson
Michael Nyman
Mike & the Mechanics
Mike Oldfield
Miles Davis
Minnie Riperton
The Moody Blues
Morrissey
Motörhead
Muddy Waters
Mumford & Sons
Mötley Crüe
N
N.W.A
Nanci Griffith
Nas
Nat King Cole
Nazareth
Ne-Yo
Neil Diamond
Neil Young
Nelly
Neneh Cherry
New Edition
New York Dolls
Nick Drake
Nicki Minaj
Nik Kershaw
Nina Simone
Nine Inch Nails
Nirvana
The Nitty Gritty Dirt Band
No Doubt
Norah Jones
O
OMD
Ocean Colour Scene
OneRepublic
Onyx
Oscar Peterson
Otis Redding
The Ozark Mountain Daredevils
P
PJ Harvey
Papa Roach
Pat Benatar
Pato Banton
Patsy Cline
Patty Griffin
Paul McCartney and Wings
Paul Simon
Paul Weller
Peaches & Herb
Pearl Jam
Peggy Lee
Pete Townshend
Peter Frampton
Phil Collins
Phil Manzanera
PiL (Public Image Ltd)
Pink Floyd
Placebo
Poco
Poison
The Police
Portishead
Prince
Public Enemy
Pulp
Q
Queen
Queens Of The Stone Age
Quicksilver Messenger Service
Quincy Jones
R
R.E.M.
Rainbow
Rammstein
Ray Charles
Reba McEntire
Red Hot Chili Peppers
Redman
Richie Havens
Rick James
Rick Nelson
Rick Ross
Rick Wakeman
The Righteous Brothers
Rihanna
Ringo Starr
Rise Against
Rob Zombie
Robbie Williams
Robert Cray
Robert Glasper
Robert Palmer
Robert Plant
Rod Stewart
Roger Daltrey
The Rolling Stones
Ronnie Lane
Ronnie Wood
Rory Gallagher
The Roots
Rosanne Cash
Roxy Music
Roy Orbison
Ruff Ryders
Rufus Wainwright
Rush
The Ruts
S
Saint Etienne
Salt-n-Pepa
Sam Cooke
Sam Hunt
Sam Smith
Sammy Hagar
Sandy Denny
Schiller
Scorpions
Scott Walker
Secret Garden
Sensational Alex Harvey Band
Serge Gainsbourg
Sergio Mendes
Sex Pistols
Shaggy
Sham 69
Shania Twain
Sheryl Crow
Simple Minds
Siouxsie & The Banshees
Slayer
Slick Rick
Sly & Robbie
Small Faces
The Smashing Pumpkins
Smokey Robinson
Smokey Robinson & The Miracles
Snoop Dogg
Snow Patrol
Soft Cell
Sonic Youth
Sonny Boy Williamson
Soul II Soul
Soundgarden
Spandau Ballet
Sparks
Spice Girls
Stan Getz
The Statler Brothers
Status Quo
Steel Pulse
Steely Dan
Steppenwolf
Stereo MCs
Stereophonics
Steve Earle
Steve Hackett
Steve Hillage
Steve Miller Band
Steve Winwood
Steven Tyler
Stevie Wonder
Sting
The Style Council
Styx
Sublime
Sum 41
Supertramp
Suzanne Vega
T
T-Bone Walker
T. Rex
Take That
Tammi Terrell
Tangerine Dream
Taylor Swift
Tears For Fears
Teena Marie
Temple Of The Dog
The Temptations
Tesla
Texas
Thelma Houston
Thelonious Monk
Thin Lizzy
Thomas Rhett
Three Dog Night
Tim McGraw
Toby Keith
Tom Jones
Tom Petty
Tom Waits
Toots & The Maytals
Tori Amos
Traffic
Traveling Wilburys
The Tubes
U
U2
UB40
Ultravox
Underworld
V
Van der Graaf Generator
Vangelis
The Velvet Underground
The Verve
Vince Gill
W
The Walker Brothers
Weezer
Wes Montgomery
Wet Wet Wet
will.i.am
Whitesnake
The Who
William Orbit
Willie Nelson
Wilson Pickett
Wishbone Ash
Wolfmother
Y
Yeah Yeah Yeahs
Yello
Yes
Z
Zucchero""".split("\n")
from string import ascii_uppercase
data = []
for line in txt:
artist = line
artist = artist.split(" (born")[0]
artist = artist.split(" (1")[0]
artist = artist.split(" (b")[0]
artist = artist.split(" (c")[0]
artist = artist.split(" (p")[0]
artist = artist.split(", ")[0]
artist = artist.split(" – ")[0]
artist = artist.split(" - ")[0]
artist = artist.replace("(band)", "").strip()
artist = artist.replace("(singer)", "").strip()
artist = artist.replace("(group)", "").strip()
artist = artist.replace("(rapper)", "").strip()
if artist in ascii_uppercase:
continue
data.append(artist)
from pandas import Series
udiscovermusic = DataFrame(Series(data))
udiscovermusic.columns = ["Artists"]
udiscovermusic.head()
saveFile(idata=udiscovermusic, ifile="udiscovermusic/artists.p", debug=True)
###Output
Saving data to udiscovermusic/artists.p
--> This file is 7.0kB.
Saved data to udiscovermusic/artists.p
--> This file is 7.0kB.
###Markdown
MusixMatch
###Code
url="https://www.musixmatch.com/artist/La-Vida-Moderna"
savename = "musixmatch.p"
if not isFile(savename):
data, code = downloadURL(url)
saveFile(idata=data, ifile=savename)
sleep(3)
bsdata = getHTML("musixmatch.p")
bsdata
from fsUtils import isFile
url="https://www.musixmatch.com/search/Dave%20Matthews%20Band"
savename = "musixmatch_search.p"
if not isFile(savename):
data, code = downloadURL(url)
saveFile(idata=data, ifile=savename)
sleep(3)
from webUtils import getHTML
bsdata = getHTML(savename)
bsdata
div = bsdata.find("div", {"id": "search-results"})
if div is not None:
artists = div.find("div", {"class": "search-results"})
print(artists)
from fsUtils import isFile
url="https://www.musixmatch.com/search/Dave%20Matthews%20Band/artists"
savename = "musixmatch_searchartist.p"
if not isFile(savename):
data, code = downloadURL(url)
saveFile(idata=data, ifile=savename)
sleep(3)
bsdata = getHTML(savename)
bsdata
div = bsdata.find("div", {"id": "search-results"})
if div is not None:
artists = div.find("div", {"class": "search-results"})
for ul in artists.findAll("ul"):
for li in ul.findAll("li"):
h2 = li.find("h2")
if h2 is None:
continue
ref = h2.find('a')
if ref is None:
continue
href = ref.attrs['href']
name = ref.text
print("{0: <100}{1}".format(name,href))
url="https://www.musixmatch.com/artist/Dave-Matthews-Band"
savename = "musixmatch_dmb.p"
if not isFile(savename):
data, code = downloadURL(url)
saveFile(idata=data, ifile=savename)
sleep(3)
bsdata = getHTML(savename)
bsdata
for ul in bsdata.findAll("ul"):
for li in ul.findAll("li"):
h2 = li.find("h2")
if h2 is None:
continue
ref = h2.find('a')
if ref is None:
continue
href = ref.attrs['href']
name = ref.text
print("{0: <100}{1}".format(name,href))
url="https://www.musixmatch.com/artist/Dave-Matthews-Band/albums"
savename = "musixmatch_dmbalbums.p"
if not isFile(savename):
data, code = downloadURL(url)
saveFile(idata=data, ifile=savename)
sleep(3)
bsdata = getHTML(savename)
for ul in bsdata.findAll("ul"):
for li in ul.findAll("li"):
h2 = li.find("h2")
if h2 is None:
continue
ref = h2.find('a')
if ref is None:
continue
href = ref.attrs['href']
name = ref.text
print("{0: <100}{1}".format(name,href))
###Output
Come Tomorrow /album/Dave-Matthews-Band/Come-Tomorrow
Again And Again /album/Dave-Matthews-Band/Again-and-Again
That Girl Is You /album/Dave-Matthews-Band/That-Girl-Is-You
Samurai Cop (Oh Joy Begin) /album/Dave-Matthews-Band/Samurai-Cop-Oh-Joy-Begin
Live Trax Vol. 30: The Muse, Nantucket, MA /album/Dave-Matthews-Band/Live-Trax-Vol-30-The-Muse-Nantucket-MA
Live Trax Vol. 29: Blossom Music Center, Cuyahoga Falls, OH (Live) /album/Dave-Matthews-Band/Live-Trax-Vol-29-Blossom-Music-Center-Cuyahoga-Falls-OH
Live Trax Vol. 28: John Paul Jones Arena /album/Dave-Matthews-Band/Live-Trax-Vol-28-John-Paul-Jones-Arena
Live Trax Vol. 27: Luna Park, Buenos Aires, Argentina (Live) /album/Dave-Matthews-Band/Live-Trax-Vol-27-Luna-Park-Buenos-Aires-Argentina
Live Trax Vol. 26: Sleep Train Amphitheater /album/Dave-Matthews-Band/Live-Trax-Vol-26-Sleep-Train-Amphitheater-Live
Live Trax Vol. 23: Whittemoore Center Arena /album/Dave-Matthews-Band/Live-Trax-Vol-23-Whittemoore-Center-Arena
Away From the World /album/Dave-Matthews-Band/Away-From-the-World-3
If Only /album/Dave-Matthews-Band/If-Only
The Collection /album/Dave-Matthews-Band/The-Collection
Mercy /album/Dave-Matthews-Band/Mercy
Live Trax Vol. 22: Montage Mountain /album/Dave-Matthews-Band/Live-Trax-Vol-22-Montage-Mountain
Live Trax, Vol. 21: SOMA (Live) /album/Dave-Matthews-Band/Live-Trax-Vol-21-SOMA
Live Trax, Vol. 20: Wetlands Preserve /album/Dave-Matthews-Band/Live-Trax-Vol-20-Wetlands-Preserve
Live Trax Vol. 20: Wetlands Preserve /album/Dave-Matthews-Band/Live-Trax-Vol-20-Wetlands-Preserve-2
Live At Wrigley Field /album/Dave-Matthews-Band/Live-At-Wrigley-Field-2
Live Trax Vol. 19: Vivo Rio /album/Dave-Matthews-Band/Live-Trax-Vol-19-Vivo-Rio
Live In New York City /album/Dave-Matthews-Band/Live-In-New-York-City-2
Dave Matthews Band (Live In Rio) /album/Dave-Matthews-Band/Dave-Matthews-Band-Live-In-Rio
Dave Matthews Band - Live in Rio /album/283/21269460
Live Trax Vol. 18: Virginia Beach Amphitheatre /album/Dave-Matthews-Band/Live-Trax-Vol-18-Virginia-Beach-Amphitheatre
Live Trax, Vol. 18: Virginia Beach Amphitheater /album/Dave-Matthews-Band/Live-Trax-Vol-18-Virginia-Beach-Amphitheater
Europe 2009 /album/Dave-Matthews-Band/Europe-2009-2
Live Trax Vol. 17: Shoreline Amphitheatre /album/Dave-Matthews-Band/Live-Trax-Vol-17-Shoreline-Amphitheatre-2
Live Trax, Vol. 17: Shoreline Amphitheatre /album/Dave-Matthews-Band/Live-Trax-Vol-17-Shoreline-Amphitheatre
Dave Matthews Band Live In Europe /album/Dave-Matthews-Band/Dave-Matthews-Band-Live-In-Europe
Live Trax, Vol. 16: Riverbend Music Center /album/Dave-Matthews-Band/Live-Trax-Vol-16-Riverbend-Music-Center
Live Trax Vol. 16: Riverbend Music Center /album/Dave-Matthews-Band/Live-Trax-Vol-16-Riverbend-Music-Center-2
Time Bomb /album/283/21269469
You & Me /album/283/21269470
Beach Ball /album/283/21269472
Alligator Pie /album/283/21269473
Write A Song /album/283/21269480
Shake Me Like a Monkey /album/283/21269475
Lying in the Hands of God /album/283/21269474
Why I Am /album/283/21269479
Live Trax Vol. 15: Alpine Valley Music Theatre /album/Dave-Matthews-Band/Live-Trax-Vol-15-Alpine-Valley-Music-Theatre-2
Live Trax, Vol.15: Alpine Valley Music Theatre /album/Dave-Matthews-Band/Live-Trax-Vol-15-Alpine-Valley-Music-Theatre
Big Whiskey and the GrooGrux King /album/Dave-Matthews-Band/Big-Whiskey-and-the-GrooGrux-King-3
Funny the Way It Is /album/283/21269478
Live Trax Vol. 14: Nissan Pavilion /album/Dave-Matthews-Band/Live-Trax-Vol-14-Nissan-Pavilion
Live Trax, Vol. 14: Nissan Pavilion At Stone Ridge /album/Dave-Matthews-Band/Live-Trax-Vol-14-Nissan-Pavilion-At-Stone-Ridge
Live Trax, Vol. 13: Busch Stadium /album/Dave-Matthews-Band/Live-Trax-Vol-13-Busch-Stadium
DMB Live Trax Vol. 13 /album/283/20942133
Live At Mile High Music Festival /album/Dave-Matthews-Band/Live-At-Mile-High-Music-Festival-2
Live Trax 2008 /album/Dave-Matthews-Band/Live-Trax-2008
Live Trax, Vol. 11: SPAC /album/Dave-Matthews-Band/Live-Trax-Vol-11-SPAC
Live Trax Vol. 13: Busch Stadium /album/Dave-Matthews-Band/Live-Trax-Vol-13-Busch-Stadium-3
Live Trax Vol. 11: SPAC /album/Dave-Matthews-Band/Live-Trax-Vol-11-SPAC-2
Live Trax, Vol. 12: L.B. Day Amphitheater /album/Dave-Matthews-Band/Live-Trax-Vol-12-L-B-Day-Amphitheater
Live Trax Vol. 12: L.B. Day Amphitheater /album/Dave-Matthews-Band/Live-Trax-Vol-12-L-B-Day-Amphitheater-2
Louisiana Bayou /album/Dave-Matthews-Band/Louisiana-Bayou
Live At Piedmont Park /album/Dave-Matthews-Band/Live-At-Piedmont-Park-3
Live Trax, Vol. 6: Fenway Park /album/Dave-Matthews-Band/Live-Trax-Vol-6-Fenway-Park
Live Trax, Vol. 10: Pavilion Atlantico /album/Dave-Matthews-Band/Live-Trax-Vol-10-Pavilion-Atlantico
Live Trax Vol. 10: Pavilion Atlantico /album/Dave-Matthews-Band/Live-Trax-Vol-10-Pavilion-Atlantico-2
Live Trax, Vol. 9: MGM Grand Garden Arena /album/Dave-Matthews-Band/Live-Trax-Vol-9-MGM-Grand-Garden-Arena
Live Trax Vol. 9: MGM Grand Garden Arena /album/Dave-Matthews-Band/Live-Trax-Vol-9-MGM-Grand-Garden-Arena-2
Live Trax, Vol. 8: Alpine Valley Music Theatre /album/Dave-Matthews-Band/Live-Trax-Vol-8-Alpine-Valley-Music-Theatre
Live Trax Vol. 8: Alpine Valley Music Theatre /album/Dave-Matthews-Band/Live-Trax-Vol-8-Alpine-Valley-Music-Theatre-2
Live Trax Vol. 7: Hampton Coliseum /album/Dave-Matthews-Band/Live-Trax-Vol-7-Hampton-Coliseum-2
Live Trax, Vol. 7: Hampton Coliseum /album/Dave-Matthews-Band/Live-Trax-Vol-7-Hampton-Coliseum
Dave Matthews Band - The Best Of What's Around - Vol.1 /album/283/21269463
The Best of What's Around, Vol. 1 /album/Dave-Matthews-Band/The-Best-of-What-s-Around-Vol-1
Dave Matthews Band - The Best of What's Around, Vol.1 /album/Dave-Matthews-Band/Dave-Matthews-Band-The-Best-of-What-s-Around-Vol-1
Live Trax, Vol. 5: Meadow Brook Music Festival /album/Dave-Matthews-Band/Live-Trax-Vol-5-Meadow-Brook-Music-Festival
Live Trax Vol. 5: Meadow Brook Music Festival /album/Dave-Matthews-Band/Live-Trax-Vol-5-Meadow-Brook-Music-Festival-2
Live Trax Vol. 6: Fenway Park /album/Dave-Matthews-Band/Live-Trax-Vol-6-Fenway-Park-3
Weekend On The Rocks /album/Dave-Matthews-Band/Weekend-on-the-Rocks
Live Trax, Vol. 4: Classic Amphitheatre /album/Dave-Matthews-Band/Live-Trax-Vol-4-Classic-Amphitheatre
Live Trax Vol. 4: Classic Amphitheatre /album/Dave-Matthews-Band/Live-Trax-Vol-4-Classic-Amphitheatre-2
Stand Up /album/Dave-Matthews-Band/Stand-Up
American Baby /album/Dave-Matthews-Band/American-Baby-2
Live Trax Vol. 3: Meadows Music Theatre /album/Dave-Matthews-Band/Live-Trax-Vol-3-Meadows-Music-Theatre
Live Trax, Vol. 2: Golden Gate Park /album/Dave-Matthews-Band/Live-Trax-Vol-2-Golden-Gate-Park
Live Trax Vol. 2: Golden Gate Park /album/Dave-Matthews-Band/Live-Trax-Vol-2-Golden-Gate-Park-2
Live Trax Vol. 1: Centrum Centre /album/Dave-Matthews-Band/Live-Trax-Vol-1-Centrum-Centre-2
Live Trax, Vol. 1: Centrum Centre /album/Dave-Matthews-Band/Live-Trax-Vol-1-Centrum-Centre
The Gorge /album/283/21269465
The Central Park Concert /album/Dave-Matthews-Band/The-Central-Park-Concert-2
Live At Folsom Field Boulder Colorado /album/Dave-Matthews-Band/Live-At-Folsom-Field-Boulder-Colorado-4
Live At Folsom Field, Boulder, Colorado /album/Dave-Matthews-Band/Live-At-Folsom-Field-Boulder-Colorado
Busted Stuff /album/Dave-Matthews-Band/Busted-Stuff
Live In Chicago 12/19/98 /album/Dave-Matthews-Band/Live-In-Chicago-12-19-98
Live In Chicago 12.19.98 - At the United Center /album/Dave-Matthews-Band/Live-In-Chicago-12-19-98-At-the-United-Center
Live In Chicago 12.19.98 at The United Center /album/Dave-Matthews-Band/Live-In-Chicago-12-19-98-at-The-United-Center-2
Everyday /album/Dave-Matthews-Band/Everyday
Listener Supported [Live] /album/Dave-Matthews-Band/Listener-Supported-Live-2
Listener Supported /album/Dave-Matthews-Band/Listener-Supported
|
notebooks/tide_demo.ipynb | ###Markdown
Compute tide corrections on pointCollection data objects
###Code
import glob
import re
import numpy as np
import pointCollection as pc
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
from pyTMD.compute_tide_corrections import compute_tide_corrections
###Output
_____no_output_____
###Markdown
Compute tides for a series of points
###Code
xlimits = np.array([-740000,520000])
ylimits = np.array([-1430000,-300000])
x=np.linspace(xlimits[0],xlimits[1],24)
y=np.linspace(ylimits[0],ylimits[1],24)
t=np.zeros((24))*3600
D = pc.data().from_dict({'x':x,'y':y,'t':t})
print(D)
D.tide = compute_tide_corrections(D.x,D.y,D.t,
DIRECTORY='/Volumes/ice1/tyler/tide_models',MODEL='CATS2008',
EPOCH=(2000,1,1,0,0,0), TYPE='drift', TIME='utc')
projection = ccrs.Stereographic(central_longitude=0.0,
central_latitude=-90.0, true_scale_latitude=-71.0)
fig,(ax1,ax2) = plt.subplots(ncols=2,subplot_kw=dict(projection=projection))
ax1.scatter(D.x, D.y, c=D.t, transform=projection)
ax2.scatter(D.x, D.y, c=D.tide, transform=projection)
ax1.coastlines('10m')
ax2.coastlines('10m')
###Output
_____no_output_____
###Markdown
Compute tides for an image
###Code
x = np.arange(xlimits[0],xlimits[1]+10000,10000)
y = np.arange(ylimits[1],ylimits[0]-10000,-10000)
xgrid,ygrid = np.meshgrid(x,y)
t = 0.0
G = pc.data().from_dict({'x':xgrid,'y':ygrid,'t':t})
G.tide = compute_tide_corrections(G.x,G.y,G.t,
DIRECTORY='/Volumes/ice1/tyler/tide_models',MODEL='CATS2008',
EPOCH=(2000,1,1,0,0,0), TYPE='grid', TIME='utc')
fig,ax3 = plt.subplots(num=2,subplot_kw=dict(projection=projection))
ax3.imshow(G.tide[:,:,0],extent=(xlimits[0],xlimits[1],ylimits[0],ylimits[1]),
origin='upper', interpolation='nearest')
ax3.coastlines('10m')
###Output
_____no_output_____ |
downloaded_kernels/university_rankings/kernel_171.ipynb | ###Markdown
###Code
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
from subprocess import check_output
print(check_output(["ls", "../input"]).decode("utf8"))
# Any results you write to the current directory are saved as output.
import pandas as pd
import numpy as np
import matplotlib as plt
import seaborn as sns
%matplotlib inline
df = pd.read_csv('../input/timesData.csv')
odf_cp1 = df.copy()
odf_cp1['international_students'].replace('',np.nan, inplace=True) #replace all empty celss with Null value
odf_cp1['international_students'] = odf_cp1['international_students'].str.replace('%', '')
odf_cp1['international_students'] = odf_cp1['international_students'].astype(np.float)
print ("The mean of international student percentage:"
+ str(odf_cp1['international_students'].mean())+"%")
print ("The university with highest percent of international students:")
odf_cp1[odf_cp1['international_students'] == odf_cp1['international_students'].max()]
print ("The universities with lowest percent of international students:")
odf_cp1[odf_cp1['international_students'] == odf_cp1['international_students'].min()].head()
print("The mean of student_staff_ratio is:")
df['student_staff_ratio'].mean()
print("The world_ranking of those universities with student_staff_ratio under 3.0 is:")
df[df['student_staff_ratio']<3].loc[:, ['student_staff_ratio',
'world_rank', 'university_name']]
print ("""To solve: How do I show all at once without writing code for each year?""")
print ("\nThe amount of universities from each country in top 100 world_rank (per year)")
g = df[['country', 'year', 'university_name']][df['world_rank'].str.len() < 3 ]
g.groupby(['year', 'country']).count()
print ('Average university enrollment does not seem to be increasing')
odf_cp2 = df.copy()
odf_cp2['num_students'] = odf_cp2['num_students'].str.replace(',','')
odf_cp2['num_students'] = odf_cp2['num_students'].astype(np.float)
odf_cp2.groupby('year')['num_students'].mean()
"""Cleaning female_male_ratio column"""
odf_cp3 = df.copy()
odf_cp3 = odf_cp3[odf_cp3['female_male_ratio'].str.len()>0] #keep only cells that are not empty
odf_cp3['female_male_ratio'] = odf_cp3['female_male_ratio'].str.replace('-','0')
odf_cp3['female_male_ratio'] = odf_cp3['female_male_ratio']\
.str.split(':', expand=True)#'expand' returns a dataframe
#instead of a list
odf_cp3['female_male_ratio'] = odf_cp3['female_male_ratio']\
.str[0:2] #grabs first 2 characters of the string in cell
odf_cp3['female_male_ratio'] = odf_cp3['female_male_ratio'].astype(np.float)
print('The university with highest percentage of female students')
odf_cp3[odf_cp3['female_male_ratio']==odf_cp3['female_male_ratio'].max()]
print('The percentage of female students has not increased.')
odf_cp3.groupby('year')['female_male_ratio'].mean()
print ('There is no correlation between rank of university and student to staff ratio')
odf_cp5 = df.copy()
# convert world rank columns to float (where necessary)
f = lambda x: int((int(x.split('-')[0]) + int(x.split('-')[1])) / 2) if len(str(x).strip()) > 3 else x
odf_cp5['world_rank'] = odf_cp5['world_rank'].str.replace('=','').map(
f).astype('float')
vis1 = sns.lmplot(data=odf_cp5, x='student_staff_ratio', y='world_rank', \
fit_reg=False, hue='year', size=7, aspect=1)
print('Correlation between university rank and score.')
odf_cp4 = df.copy()
odf_cp4 = odf_cp4[odf_cp4['total_score'].str.len()>1] #cell with values '-' will be dropped
odf_cp4['total_score'] = odf_cp4['total_score'].astype(np.float)
# convert world rank columns to float (where necessary)
f = lambda x: int((int(x.split('-')[0]) + int(x.split('-')[1])) / 2) if len(str(x).strip()) > 3 else x
odf_cp4['world_rank'] = odf_cp4['world_rank'].str.replace('=','').map(
f).astype('float')
vis2 = sns.lmplot(data=odf_cp4, x='total_score', y='world_rank', \
fit_reg=False, hue='year', size=7, aspect=1)
###Output
_____no_output_____ |
callback.ipynb | ###Markdown
###Code
import tensorflow as tf
class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if logs.get('accuracy') > 0.8:
print('Accuracy Achieved, Training Stopped...')
self.model.stop_training = True
callbacks = myCallback()
# Deep Learning Model
model = tf.keras.Sequential([tf.keras.layers.Dense(units=1,
input_shape=[1])])
# Model Training
model.fit(XTrain, YTrain, epochs=10, callbacks=[callbacks])
###Output
_____no_output_____ |
notebooks/Week6_Exercises.ipynb | ###Markdown
Introduction to Programming in Python: FunctionsThis practical is the final one in our exploration of basic concepts in programming. It builds on the previous 3. It is based on an almagamation of course and examples, including the following (which you are welcome to explore on your own!):1. Introduction to programming for Geoscientists (with Python) by Gerard Gorman and Christian Jacobs: http://ggorman.github.io/Introduction-to-programming-for-geoscientists/lecture_series/2. Introduction to scientific programming in Python by the UCL graduate school: http://www.cs.ucl.ac.uk/scipython/index.html3. Programming with Python by Software Carpentry: http://swcarpentry.github.io/python-novice-inflammation/4. CS For All: Introduction to Computer Science and Python Programming by HarveyMuddX at edX: https://www.edx.org/course/cs-all-introduction-computer-science-harveymuddx-cs005x-0**Recommended Reading**: *Think Python*, Sections [3.1](http://greenteapress.com/thinkpython/html/thinkpython004.htmltoc24), [3.5](http://greenteapress.com/thinkpython/html/thinkpython004.htmltoc28)-[3.9](http://greenteapress.com/thinkpython/html/thinkpython004.htmltoc32), [6.1](http://greenteapress.com/thinkpython/html/thinkpython007.htmltoc66) and *SciPy Lecture Notes*, Sections [1.2.4.1](http://www.scipy-lectures.org/intro/language/functions.htmlfunction-definition)-[1.2.4.3](http://www.scipy-lectures.org/intro/language/functions.htmlparameters) MAKE SURE TO EXECUTE THIS CELL BEFORE YOU START YOUR WORK!
###Code
import numpy
import matplotlib.pyplot as pyplot
%matplotlib inline
###Output
_____no_output_____
###Markdown
EXERCISE 1aThe function we defined in the main notebook (**`F_to_C`**) is copied below. First, execute the function cell so that the function has been defined in this notebook.
###Code
def F_to_C(Temp_in_F):
Temp_in_C = (5.0/9.0)*(Temp_in_F-32)
return Temp_in_C
###Output
_____no_output_____
###Markdown
Now, convince yourself that you understand how to call a function by using the function we have already defined (**`F_to_C`**) to calculate the temperature in degrees Celsius when the temperature in Fahrenheit is:- 0- 32- 99Try using different variable names each time.***You do not need to redefine the function. Just use it with different input values for F.***Check your answer using Google. EXERCISE 1bLet's take a closer look at the syntax of function definitions by - as usual - finding out what kind of errors we get when we make a mistake.Try out each of the examples below without changing anything so you see the error message, then fix the error. Also add a comment explaining what the error was. **i. Original**:
###Code
# Define the function
def F_to_C(Temp_in_F)
Temp_in_C = (5.0/9.0)*(Temp_in_F-32)
return Temp_in_C
# Test the function
print( F_to_C(100) )
###Output
_____no_output_____
###Markdown
**i. Fixed**: **ii. Original**:
###Code
# Define the function
def F_to_C(Temp_in_F):
Temp_in_C = (5.0/9.0)*(F_Temp-32)
return Temp_in_C
# Test the function
print( F_to_C(100) )
###Output
_____no_output_____
###Markdown
**ii. Fixed**: **iii. Original**:
###Code
# Define the function
def F_to_C(Temp_in_F):
Temp_in_C = (5.0/9.0)*(Temp_in_F-32)
return Temp_in_C
# Test the function
print( F_to_C_conversion(100) )
###Output
_____no_output_____
###Markdown
**iii. Fixed**: **iv. Original**:
###Code
# Define the function
def F_to_C(Temp_in_F):
Temp_in_C = (5.0/9.0)*(Temp_in_F-32)
return
# Test the function
print( F_to_C(100) )
###Output
_____no_output_____
###Markdown
**iv. Fixed**: **v. Original**:
###Code
# Define the function
def F_to_C(Temp_in_F):
Temp_in_C = (5.0/9.0)*(Temp_in_F-32)
return Temp_in_C
# Test the function
print( F_to_C(100) )
###Output
_____no_output_____
###Markdown
**v. Fixed**: EXERCISE 2aRecall that once we know the solar flux $S$ at a given distance $r$, we can also calculate the effective temperature of a planet $T_e$ in Kelvin (in the absence of an atmosphere):$$T_e(r) = \left (\dfrac{S}{4\times\sigma}\times(1-A) \right )^{0.25}$$where:- $\sigma$ is the Stefan-Boltzmann constant, equal to $5.67\times 10^{-8}$ W/m$^2$/K$^4$- $A$ is the albedo (the amount of solar energy that is reflected by the planet's surface), and is unitless. For now, you can assume $A$ = 0.3.1. Adapt the function `solar_flux` (copied below), so that ***after*** it calculates $S$, it then calculates $T_e$. (**Hint**: leave the existing lines intact but add an extra line after the line that starts `S=`...)2. Give the function a new name (suggested name: effective_temperature), and make sure that it takes $r$ as input and returns $T_e$ as output.3. Test your function by **calling** (i.e. using) it for $r$ values between 1 and 20. Remember, after you ***define*** the function you need to ***use*** the function to get output!4. Plot the result with $T_e$ on the y-axis and $r$ on the x-axis.**Hint**: We programmed this equation in the Week 2 practical. If you want to save yourselves a headache now, copy the function from that practical. *(Side note: when you're programming, you should always look for shortcuts like this that reduce the risk of errors.*)**Extra**: You can actually call one function within another function - so you could leave the function `solar_flux` intact, and use it inside your new `effective_temperature` function.
###Code
# Function to compute the solar flux at a given distance
def solar_flux(r):
# Define constant variables
r0 = 1.00 # Distance from sun to Earth in AU
S0 = 1366 # Solar flux at Earth
# Add Albedo and sigma
A = 0.3 # albedo, unitless
sigma = 5.67e-8 # Stefan-Boltzmann constant, in W/m2/K4
# Use the equation to calculate the solar flux
S = S0*(r0/r)**2
# return solar flux S
return S
# Call the function
# Plot the result
###Output
_____no_output_____
###Markdown
EXERCISE 2b1. Copy your function from Exercise 2a and adapt it so that it returns **both $S$ and $T_e$** as outputs. (**Hint**: You only need to change ONE line!)2. Call (use) your function with $r$ values between 1 and 20, storing the $S$ and $T_e$ output in **two** new variables .3. Plot the output with $S$ on the x-axis and $T_e$ on the y-axis.
###Code
# Copy and adapt the function
# Call the function
# Plot the result
###Output
_____no_output_____
###Markdown
EXERCISE 2c1. Copy your function from Exercise 2b and adapt it so that the albedo (`A` in the equation) becomes an additional input argument (instead of being set as a constant in the function).2. Call (use) your function with $A$ = 0.3 and $r$ values between 1 and 20, storing the $S$ and $T_e$ output and plotting $T_e$ as a function of $r$ (this should look just like the plot in 2a, with $T_e$ on the y-axis and $r$ on the x-axis).3. Repeat step 2, but this time with A=0.9 (an icy planet!). Does the temperature look different? (**Hint**: it should -- check the y-axis values!)
###Code
# Copy and adapt the function
# Call the function with A = 0.3 and plot the result
# Call the function with A = 0.9 and plot the result
###Output
_____no_output_____
###Markdown
EXERCISE 2d1. Copy your function from Exercise 2c and adapt it so that the albedo (`A`) has a default value of 0.3.2. Call (use) the function for $r$ values between 1 and 20, and plot temperature ($T_e$) as a function of distance ($r$). Do this three different times:A. Specify in your call that A = 0.3B. Do not specify a value for AC. Specify that A = 0.6The temperatures that you find for A and B should be the same. The temperature that you find for C should be colder!
###Code
# Copy and adapt the function
# A. Call the function specifying A = 0.3 and plot the result
# B. Call the function without specifying A and plot the result
# C.Call the function specifying A = 0.6 and plot the result
###Output
_____no_output_____
###Markdown
EXERCISE 2eCopy the code you used to make the final plot from Exercise 2d here and modify it so that it:- Uses any color **except** red or blue (bonus points for creativity!).- Shows points and the line through them- Zooms in on the region between 5 and 15 AU. EXERCISE 3: BRINGING IT ALL TOGETHERThis final exercise is designed to give you a chance to practice everything you have learned so far today (and in the last few weeks). Make sure to spend time thinking about what your answers **mean**.This week, we will continue developing an understanding of Daisyworld (look back to the previous practicals and/or the textbook if you don't remember!). Two weeks ago, we made a plot that showed how the temperature of the Daisyworld planet changed as the number of daisies increased. Last week, we made a plot that showed how daisy growth changes as temperature increases.This time we will start putting these two ideas together. EXERCISE 3aWe will start by writing a **function** that calculates the temperature of the planet when the fraction covered by daisies is provided as an input.Luckily, we have already written most of the code! Back in Week 3 (Exercise 5), we determined that **if we know what fraction of the planet is covered by flowers (`frac_flower`) we can calculate the temperature (`Te`**):
###Code
# Define the albedos of flowers and the albedo of soil (unitless)
albedo_flower = 0.75
albedo_soil = 0.4
# The fraction covered by soil is the rest of the planet, or 1.0 - the fraction covered by flowers
frac_soil = 1.0 - frac_flower
# The albedo is the sum of the two albedos after each has been multiplied by its fractional coverage.
albedo = frac_soil * albedo_soil + frac_flower * albedo_flower
# Define the solar flux on Daisyworld, in W/m2
S = 3700.0
# Define the Stefan-Boltzmann constant, in W/m2/K4
sigma = 5.67e-8
# Define the temperature as a function of the albedo
Te = ((S*(1-albedo))/(4*sigma))**0.25
###Output
_____no_output_____
###Markdown
**For this exercise:**1. Copy the code above and modify it so that it becomes a function that takes the fraction of the planet covered by daisies as an **input** and returns the temperature as an **output.** (**Hint**: what does a function need? Think about function names, indentations, colons, arguments and return values...)2. To test that it is working, call (use) the function to find the temperature when:a. 50% of the planet is covered by daisies (i.e., the fraction covered is 0.5).b. 80% of the planet is covered by daisies (i.e., the fraction covered is 0.8).**Hint:** If in step 2 above a and b give you the same value, or you get more than one temperature value as output, then you will know something has gone wrong.***Do not delete the %reset in the cell below*** (this makes sure your program isn't just "remembering" something you did before).
###Code
%reset -s -f
import numpy
import matplotlib.pyplot as pyplot
%matplotlib inline
# Define your function here
# Call your function here
###Output
_____no_output_____
###Markdown
EXERCISE 3bNow we will write a **function** that calculates the growth rate of daisies when the temperature of the planet is provided as an input.Recall from Week 4 that the growth rate of the ***population*** of flowers (in units of *fraction of the planet covered by flowers / year*) is defined as:$$1-0.005 \times (295.5-T_e)^2$$and **can never be below zero** (this would represent daisy death, which we will deal with separately).Good news! We already wrote most of this code in Week 4 (Exercise 5). **If we know the temperature of the planet in Kelvin (`Te`), we can calculate the growth rate (`growth_rate`)**:
###Code
# Test condition for temperature and calculate growth rate
if Te <= 281:
growth_rate=0
elif Te < 310:
growth_rate=1-0.005*(295.5-Te)**2
else:
growth_rate=0
###Output
_____no_output_____
###Markdown
(Some of you might notice this is simpler than the version we used before - that is because we were working with a whole array of temperature values, but now we will just need to use one at a time. **For this exercise:**1. Copy the code above and modify it so that it becomes a function that takes the temperature of the planet as an **input** and returns the growth rate as an **output.**2. To test that it is working, call (use) the function to find the growth rate when:a. the temperature is 289 Kb. the temperature is 269 K3. At which temperature are the daisies growing faster, and why?**Hint:** If the two different temperature inputs in 3 give you the same value, or you get more than one value as output, then you will know something has gone wrong.
###Code
# Define your function here
# Call your function here
# Answer the question here
###Output
_____no_output_____
###Markdown
EXERCISE 3cYou might remember from last week that **flowers die at a constant rate of 0.3** (fraction/year).Using your two functions, you are now going to experiment with different values for the fraction of the planet covered by daisies. For each experiment you should:1. First calculate the temperature of the planet.2. Use that temperature to calculate the flower growth rate.Note that the way we have set up these functions, you can only use ONE input value -- they won't work with arrays. So here you are just going to pick some random values for the fraction of the planet covered by flowers and use the functions you've already written to test them, one at a time.Here's an example, using what we've already found in 3a and 3b. First, we wanted to understand what happens when we start with 0.5 of the planet covered by flowers. In 3a, we used the function defined in 3a with 0.5 as input to calculate the effective temperature. We found that the effective temperature in that scenario was about 289 K. Then in 3b we used the function defined in 3b with 289 K as input to calculate the growth rate. We found the growth rate was about 0.79. In other words, by using our two functions one after another, we found that with 0.5 fractional coverage, the temperature is 289 K and flowers are growing at a rate of 0.79.We did the same thing starting with a starting fraction of 0.8. We called the first function with 0.8 as input (3a), found an effective temperature of 269 K, and used that as input to the second function (3b) to find a growth rate of 0. At 0.8 fractional coverage, the temperature is 269 K and the growth rate is 0 (i.e. flowers are not growing).Your job is to repeat this process choosing different numbers as your starting point. We already know that at 0.5 coverage flowers are growing rapidly, and at 0.8 coverage they are not growing at all. Test out some different numbers (your choice!) to answer the following questions:1. For what range of initial daisy coverage values are the flowers **increasing** in area (i.e. growth > death)? Why?2. In that range (when flowers are increasing), is the temperature of the planet likely to get warmer or colder?3. For what range of daisy coverage values are the flowers **decreasing** in area? Why? What does that mean for the temperature?Use comments to show your thinking as you experiment. You can more cells if you like.**Hint**: DO NOT re-define your functions. All you need to do now is call them with different input values.
###Code
# Call your functions here.
# Extra cell to run more experiments if desired
# Extra cell to run more experiments if desired
# Extra cell to run more experiments if desired
# Extra cell to run more experiments if desired
# To add even more, use the + symbol above (underneath "File")
# Answer the questions here
# Q1
#---
#
#
#
#
# Q2
#---
#
#
#
#
# Q3
#---
#
#
#
#
###Output
_____no_output_____ |
DATA CRAWLING_CH/News Crawling ( The Scotsman).ipynb | ###Markdown
Def (News content)
###Code
def news_content(news_url,a,b):
c=[]
c1=[]
res = requests.get(news_url, headers = headers)
res.encoding = 'utf-8'
soup = BeautifulSoup(res.text, 'lxml')
#print(soup)
for articles in soup.select(a):
if(len(articles.select(b)) > 5):
for j in range(5):
ab=articles.select(b)[j].text
c.append(ab)
print(ab)
return(c)
else:
for i in range(len(articles.select(b))):
ab1=articles.select(b)[i].text
c1.append(ab1)
print(ab1)
return(c1)
###Output
_____no_output_____
###Markdown
Def (Time)
###Code
def news_time(news_url,a):
res = requests.get(news_url, headers = headers)
res.encoding = 'utf-8'
soup = BeautifulSoup(res.text, 'lxml')
#print(soup)
for i in range(len(soup.select('.article-meta__timestamp-item span'))):
print(soup.select('.article-meta__timestamp-item span')[i].text)
def timeS(url,a):
article_time=[]
r = requests.get(url, headers = headers)
content = r.text
#print(content)
soup = BeautifulSoup(content, 'lxml')
for i in range(len(soup.select(a))):
time=soup.select(a)[i].text
article_time.append(time)
return(article_time)
###Output
_____no_output_____
###Markdown
Def (Crawling process)
###Code
def News(datasetname):
client = pymongo.MongoClient(host='localhost', port=27017)
db_News=client.News
collection_News= db_News[datasetname]
article_url=[]
article_title=[]
article_content=[]
article_time=[]
for i in range(len(soup.select('.teaser-matrix__link-primary , .teaser-lead__link-primary , .teaser-splash__link-primary , .teaser__link-primary , .teaser-hero__link-primary'))):
url1=soup.select('.teaser-matrix__link-primary , .teaser-lead__link-primary , .teaser-splash__link-primary , .teaser__link-primary , .teaser-hero__link-primary')[i]['href']
article_url.append(soup.select('.teaser-matrix__link-primary , .teaser-lead__link-primary , .teaser-splash__link-primary , .teaser__link-primary , .teaser-hero__link-primary')[i]['href'])
r = requests.get(url1, headers = headers)
content = r.text
soup1 = BeautifulSoup(content, 'lxml')
title=soup1.select('.article-header__title')
article_time.append(timeS(url1,'.article-meta__timestamp-item span'))
if (len(title)>0):
title=soup1.select('.article-header__title')[0].text
article_title.append(title)
article_content.append(news_content(url1,'.article','p'))
for i in range(len(article_url)):
d = {'article_time' :article_time[i],'article_title': article_title[i], 'article_content' :article_content[i], 'article_url' : article_url[i]}
result=collection_News.insert_one(d)
print(result)
return True
###Output
_____no_output_____
###Markdown
Start the crawling process with the database's name
###Code
News('TheScotsman1210_5')
###Output
Like the crucifix earring hanging from his left ear lobe, Claudio Caniggia is jangling. Or at least his nerves are.
It’s a magnificently surreal scene. A visibly agitated legend of the world game is heading out the theatre door for a smoke. But this is not the world stage, the sort he has graced in the past. This is a stage in Dundee – specifically, the city’s west end, where, following his own appearance, a production of Dick Whittington is due to begin a run.
Caniggia’s already made a request for the pre-show music to be turned up rather than down in an effort to reduce the anxiety he says he is feeling before facing several hundred Dundee fans at a long awaited An Audience With A Global Superstar event.
It seems this superstar functions best amid din and discord.
Perhaps this should be expected of someone once stationed at the Bombonera, home to Boca Juniors and somewhere reckoned to be among the most raucous grounds in world football. Unusually, Caniggia once also turned out every second weekend at River Plate’s Estadio Monumental, another must-visit stadium known for its ear-splitting volume and the inhabitants’ fierce dislike of Boca Juniors. This was displayed in shameful fashion when the Boca Juniors team bus was attacked before the second leg of the Copa Libertadores final at the end of last month.
Edinburgh did the needful last night, albeit opponents Newcastle had one eye on this match and the other on their precarious perch at the foot of the Premiership.
The contest offered any number of match-ups. If the Fijians were having a carrying contest Edinburgh’s Bill Mata beat Tevita Cavubati, and his pass out the back of his hand for Blair Kinghorn’s late try was Michael Jordan-esque.
Home No 9 Henry Pyrgos probably shaded the scrum-half kicking battle, threading a beauty into the south west corner even if he did overcook a box kick a little later. And the front row won their own battle within the wider war, comprehensively, even if they did wait until Edinburgh went 10-7 behind on the scoreboard before making it count with the first scrum penalty of the match. In the second half it was a penalty try directly from a set scrum that finally put some distance between Edinburgh and the visitors, who clung on for longer than anyone expected.
But the most intriguing head-to-head on show was the contest between the two opensides. Hamish Watson is Scotland’s acknowledged starter, Gary Graham the man who would be king.
This wasn’t comparing apples with apples, more apples with apricots.
Kilmarnock go into Saturday’s clash with Celtic top of the league. Joel Sked looks at Steve Clarke’s influence and how they achieved such a feat.
• READ MORE: Celtic boss Brendan Rodgers: No reason why Kilmarnock can’t win league title
“The past few years we’ve suffered, crowds have dwindled through the years, always fighting relegation has only brought along more fears.”
So the song goes around Rugby Park or when the passionate and noisy Kilmarnock travelling support roll into town. It suggests a dispirited and bleak future as the fans revel in fatalism. Except it is the first three lines of a more buoyant and prosperous chant about a forward-looking and upbeat football club.
Suffered. Dwindled. Fears. Three words that set the scene. Three lines to tell a story. A story of a flailing club. One which was lacking in direction and imagination. One without an identity.
Sorry, we're having problems with our video player at the moment, but are working to fix it as soon as we can
Waiting for Video...
A list of the poorest refereeing decisions by SFA officials to befall top flight teams so far this season
READ MORE - Chris Sutton: The standard of refereeing in Scotland is ‘rotten’
Before we begin it’s worth pointing out that the worst decision across the whole of Scottish football this season has already been decided. Even though there are over six months left, it has got to be No.1 because it’s the worst decision we’ve seen for many seasons, maybe even since Les Mottram’s infamous “ghost goal” call at Firhill way back in the early 90s. Which is ironic, because it occurred at the same ground, and at the same end!
Sorry, we're having problems with our video player at the moment, but are working to fix it as soon as we can
Waiting for Video...
Ryan Christie was named November’s player of the month in the Ladbrokes Premiership. Joel Sked looks at the player’s transformation into a key player at Celtic.
• READ MORE: Ryan Christie savouring every moment of career turnaround at Celtic
One of the key turning points in Stuart Armstrong’s career can be traced back to a day in October 2016. The 23rd of October to be precise.
Sorry, we're having problems with our video player at the moment, but are working to fix it as soon as we can
Waiting for Video...
Amber Rudd and her colleagues are deluding themselves with their Brexit plan B, writes Lesley Riddoch
Scots stand today, surveying the collapse of British governance, like folk on lifeboats watching the mighty Titanic start to slip beneath the waves.
Most of us do not vote Conservative and never will. Most of us voted to Remain in the EU in 2016. And now most of us would opt for independence in the EU if a self-harming Brexit goes ahead. And no matter which withdrawal deal is advocated by desperate members of both main political parties south of the Border, each one is more damaging than simply staying in the EU.
Sorry, we're having problems with our video player at the moment, but are working to fix it as soon as we can
Waiting for Video...
There is far more to worry about in the Prime Minister’s plans than just Northern Ireland, writes Brian Monteith
It does look likely that the vote on the Prime Minister’s Withdrawal Agreement will go ahead tomorrow and will be heavily defeated.
There is a large exercise in expectation management emanating from Downing Street, with the possible scale of a defeat being talked up so that if it happens to be less than a hundred it can then be portrayed as not as bad as it might have been and this will justify Theresa May carrying on to try and get some concessions from the EU.
If all goes to plan, and the Prime Minister does not have another rush of blood to the head, I shall be rising to my feet this evening in the House of Commons to explain how I shall be voting on the EU withdrawal deal, and why, writes Liberal Democrat MP Christine Jardine.
I think that, in this most momentous decision for my generation, it is also appropriate to explain it here.
By that I do not mean the party position or the politics of it, but how, as MP for Edinburgh West, I have made my decision.
The debate over our future relationship with the European Union has dominated my elected career – it was the Prime Minister’s decision to go to the country over Brexit which led to my election.
In fact, I was elected on a platform of keeping Scotland in the UK, the UK at the heart of Europe and calling for the public to have the final say on whatever deal was negotiated.
Sorry, we're having problems with our video player at the moment, but are working to fix it as soon as we can
Waiting for Video...
In the foreign country of early 2016, those campaigning in favour of the United Kingdom’s departure from the European Union spoke confidently about how painless the process would be. There was no downside to Brexit, they said, only dividend after dividend.
Along with outlandish claims about funding for the NHS, and simplistic slogans about “taking back control”, Brexiteers insisted the UK’s former partners in the EU would bend over backwards to agree a deal that was good for Britain. Two and a half years after the UK voted by a small margin in favour of Brexit, the words of Eurosceptic campaigners ring hollow.
The EU, unsurprisingly, has put the interests of its remaining members before those of the UK during lengthy and often fractious negotiations.
Boots Riley wraps a devastating critique of capitalism and racism in the US in a funny and entertaining cautionary tale, while Robert Redford bows out in style in his final screen role
Sorry To Bother You (15) *****
The Old Man & the Gun (12A) ****
Released in the US without too much fanfare this past summer, musician-turned-filmmaker Boots Riley’s debut feature Sorry To Bother You has become a genuine phenomenon; a punky Sundance outlier transformed by word-of-mouth into the buzziest movie of the year. That’s appropriate given one of the many interlinked themes in this wild and audacious satirical side-swipe at racial politics and unfettered capitalism in 21st century America is the power of grassroots movements. But it’s also a mark of Riley’s sheer narrative chutzpah that the film never feels didactic as it crams a semester’s worth of radical politics into its swift 100-minute running time.
Starting as a wry take-down of prejudice in the work place, the film introduces us to its broke black protagonist, Cassius “Cash” Green (Lakeith Stanfield), as he’s living in his uncle’s garage with his artist/activist girlfriend Detroit (Creed’s Tessa Thompson). Politically aware, but not politically active, he’s the sort of feckless millennial who’ll push back against demands for rent with half-hearted rhetoric about greedy landlords, even though the person demanding it is a charitable family member going above and beyond the call of duty by tolerating his post-adolescent existential funk. But with said uncle (played by Terry Crews) also facing eviction, Cash is soon forced to get a job. This being a heightened alternate version of contemporary Oakland – where opportunities for non-coders are thin on the ground and gentrification is making it too expensive to live – he ends up working in a call centre that will literally hire anyone who can operate a phone and stick to the script.
Rave reviews for Richard Madden’s lead performance in BBC hit series Bodyguard have secured the Scottish actor his first Golden Globe nomination.
Madden, 32, received a nod in the best actor in a TV drama category. Bodyguard was also nominated for best drama series where it will go up against fellow BBC series Killing Eve.
The recognition comes with some of Britain’s biggest stars set to go head to head for gongs.
Olivia Colman and Emily Blunt were both nominated in the best actress in a comedy or musical category for their turns in The Favourite and Mary Poppins Returns respectively.
Claire Foy will compete against Rachel Weisz in the supporting actress category for their turns in First Man and The Favourite, where Weisz will also take on co-star Emma Stone.
George RR Martin has described Fire And Blood as his equivalent of JRR Tolkien’s The Silmarillion. It is a backstory, telling the events that happened in Westeros 300 years before the events in A Song Of Ice And Fire. But there are significant differences. Tolkien’s work was written before The Lord Of The Rings, submitted for publication, rejected and published posthumously. In addition, it dealt with god-like entities and creation myths. The rejection of The Silmarillion (“too Celtic” according to the publisher; actually ersatz Norse) spurred him into telling a different story in the world he had built. Martin’s work is an extension to the stories already told in various spin-off books – The World Of Ice And Fire, the various short stories in Rogues and Dangerous Women. It is as if, having built his sandpit and played with his toys for a time, he now has set himself to engrave, elaborately, the railway-sleepers of the framework. Most fans will pounce on the book; more would rather they have The Winds Of Winter. But with the television series having outstripped the novels, and with HBO announcing a prequel series, at least they will have a good deal of material with which to work.
And it is work. With the novels comprising A Song Of Ice And Fire, Martin cleverly used a tightly focalised form, with the reader perched on the shoulder of different characters. It meant that readers developed an emotional attachment to what happened to Ned Stark, or Samwell Tarly, or Brienne of Tarth. Fire And Blood tacks in the opposite direction, and the best parts of it are about the history of the Targaryen dynasty. But could any reader feel any twinge of sympathy with sentences like “The lords and knights who came were largely westermen and riverlords; the Lords Tarbeck, Roote, Vance, Charlton, Frey, Paege, Parren, Farman, and Westerlings were among them too, together with Lord Corbray of the Vale, the Bastard of Barrowtown, and the fourth son of the Lord of Griffin’s Roost”. Tell me why I should care about what happens to any of them.
The style affects a cod-archaic: “oft”, “ne’er”, “dastards”, “pot shops”, “member”. (Yes, despite some speeches in favour of female emancipation, there is still a great deal of rape, brothels, women dying in childbirth and suchlike). Many characters will intone in echoes of the King James Bible. Parts of it seem like rather juvenile in-jokes – calling a character Ser Kermit Tully and then having someone else comment that he is “as green as spring grass” is not perhaps the book’s most serious or interesting moment. A reference to “William Stackspear” is perhaps supposed to be funny in a book which features, among other incidents, someone being drowned in a vat of ale. I should add that the book has many illustrations, and they all look like the bad tattoos a second division footballer would have of his new paramour.
The most successful parts of it are the sometimes comically conflicting accounts of previous histories; one by the pious Archmaester Munkun, the other by a court fool called Mushroom. There are two plot lines across this sprawling book which stand out, and neither involves dragons. The first is a recurring obsession with religious fanaticism; time and again politics is pitted against theology. The second is a section towards the end when a foreign princess invites her relatives to set up, in effect, the first Ponzi scheme in Westeros.
Towards the end I realised the game in which I was. This is a work of self-homage, and a sop to producers. We have a Stark coming to King’s Landing to solve a crime (Cregan, this time, not Ned). We have, in Mushroom, a cynical, lascivious, drunken and yet perceptive dwarf (Tyrion checked off). There is a brave, boisterous dragon-riding girl (that takes care of Arya and Daenarys). There is a sinister Lannister, wearing a silk mask to disguise his injuries, who is Hand of the King (can we call Charles Dance to see if he fancies a reprise? If not, at least we have a mask). One-handed knight – yup. Kings poisoned and royals who defenestrate themselves – all good to go. When I read the sentence that one of the supposed sources claims is “plausibly… the product of several hands for the style of the prose varies greatly from episode to episode”, I actually laughed aloud. But not in a good way. Readers keen to find out more about the mythology – the White Walkers, the Children of the Forest, what lies beyond the Sunset Sea, the Doom of Valyria – might find themselves disappointed.
Eduardo Paolozzi and Andy Warhol were contemporaries. Born in 1928, Warhol was just four years the junior. Both were sons of first generation immigrants, but though that doesn’t seem to be refelcted in Warhol’s work, it was definitive in Paolozzi’s. One of his major works, Manuscript of Monte Cassino, is, for instance, a profound rumination on time, displacement and the dislocation of war. It is also monumental. There is nothing remotely comparable in Warhol’s art which seems instead determinedly to reflect the transience and immediacy of contemporary culture. That doesn’t make it any less significant, but it does suggest how much difference there is between the two. Nevertheless the exhibition Andy Warhol and Eduardo Paolozzi: I want to be a machine starts from the proposition that they had a lot in common. Elaborating this idea, it is certainly a rich show of the work of both.
Andy Warhol and Eduardo Paolozzi – I want to be a machine, Scottish National Gallery of Modern Art, Edinburgh ****
There are unfamiliar early drawings by Warhol, for instance, with the smooth and practised lines of fashion plate design. Paolozzi’s early drawings, also well represented, are in contrast fierce and wild, building on the inspiration of Picasso and already using collage, the technique that defines him that he adopted from the Surrealists. He found in it a metaphor for the fragmented experience of the modern world. Warhol did something similar by constantly using the camera as the basis of his work, either translating a photographic image into a print or drawing, or just as it is, but either way stressing its detachment.
They did once exhibit together. It was in 1968 with three others in the Four Seasons Restaurant in New York. Both are also seen as originators of Pop Art, although Paolozzi always dismissed that suggestion with a characteristically derisive snort. He saw Pop Art as trivial. He may have been right. Nevertheless, Pop Art did evolve from an insight that, quite independently, he and Warhol shared: the old boundaries that separate Fine Art from all the other images we make are meaningless. In a kind of visual democracy, all our imagery is significant; art has to embrace that to be relevant.
For Paolozzi, however, the modern was still linked to the past. In several works here he makes the point by collaging diagrams of modern machinery onto images of classical sculpture. In Athena Lemnia von Phidias, for example, the cylinders and crank shaft of an engine are superimposed on a Greek Athena.
Scottish gin has never been more on trend, here are eight of the hottest new Scottish gins we recommend checking out next time you fancy a G and T.
With producers popping up all over the country from the Shetlands to the Borders, there’s never been a better time to be a gin fan.
Whether you’re still a regular club-goer or prefer a quiet night in these days, you’re sure to have fond memories of some of Glasgow’s legendary nightclubs.
From notorious boozers to flashy discos, Glasgow’s had it all. How many of these long-gone nightclubs do you remember?
Telecoms giant BT has appointed Jane Wood to lead its policy and public affairs teams in the UK devolved nations, English regions and Ireland.
Wood, who joins the group from Business in the Community, will also lead the public facing functions of BT Group in Scotland and sit on the BT Scotland board.
The appointment follows the retirement of Brendan Dick, formerly managing director of UK nations and director of BT Scotland. Dick is now chairman of the Openreach board in Scotland.
A former head of corporate affairs at Wallgreen Boots Alliance, Wood has also worked as a ministerial advisor on town centre regeneration, welfare reform and child poverty. She has championed small businesses and the role of social enterprises in communities.
Wood said: “It’s exciting to be joining BT at a time when digital society and innovation is at the core of everything we do, whether that be business, our public services or at home.
Sorry, we're having problems with our video player at the moment, but are working to fix it as soon as we can
Waiting for Video...
The Battle of Glenlivet was fought deep in Speyside less than a year after a decree was passed that Catholics must either give up their faith or emigrate.
The engagement was fought between Catholic forces led by George Gordon, 1st Marquess of Huntly, and Frances Hay, 9th Earl of Erroll against the Protestant army of Archibald Campbell, 7th Earl of Argyll.
READ MORE: Which was the most feared Highland clan?
Sorry, we're having problems with our video player at the moment, but are working to fix it as soon as we can
Waiting for Video...
A blockbuster historical drama telling the story of Scotland’s most famous king has received lukewarm reviews following its gala premiere.
Outlaw King, which tells the story of how Robert the Bruce claimed the Scottish throne, is one of the most-anticipated original productions by American TV streaming giant Netflix.
Starring Californian actor Chris Pine as the heroic king, the big-budget film was shot on location at a variety of Scottish landmarks including Mugdock country park, Linlithgow Palace and Blackness Castle.
It was a landmark property occupying a prime city centre location until a devastating fire reduced it to little more than its sandstone facade.
Recent events in Belfast, which saw the historic Bank Buildings go up in flames, bore more than a resemblance to the blaze which gutted the Mackintosh building at Glasgow School of Art in June.
The intensity of the fire, the shocked public reaction, and a safety cordon erected around the charred remains.
Now fire experts at a Scottish university have pointed to the “uncanny similarities” between the blazes in Glasgow, Belfast, and at the former Littlewoods building in Liverpool which was damaged last week.
In each case, heritage properties under refurbishment - or due to be redeveloped - suffered catastrophic damage sufficient to lead to doubt about the ability to save and preserve them.
Sorry, we're having problems with our video player at the moment, but are working to fix it as soon as we can
Waiting for Video...
Cherries target key Celtic player, Rangers boss in dig at SPFL delegate and which Hoops midfielder has turned down a new deal?
Cherries want McGregor
Bournemouth have earmarked Callum McGregor as a priorty target in the January transfer window, according to reports.
Fashion house Ted Baker has announced its embattled boss Ray Kelvin will take a voluntary leave of absence following fresh allegations about his conduct.
The business, which has its roots in Scotland, said its board had been made aware of “further serious allegations” about the behaviour of its founder and chief executive.
Kelvin had agreed “for the benefit of the business and the people who work in it” to leave his role while the allegations are investigated, said the firm.
This comes after the under-fire retailer yesterday appointed law firm Herbert Smith Freehills to conduct an independent external investigation into harassment allegations levelled against Kelvin, which had led to a petition calling on the fashion brand to take action and accused the executive of enforcing a “hugging” culture.
Kelvin is also accused of asking young female staff to “sit on his knee, cuddle him or let him massage their ears”.
Sorry, we're having problems with our video player at the moment, but are working to fix it as soon as we can
Waiting for Video...
Labour will accuse Theresa May of putting the UK at risk as debate on the Prime Minister’s Brexit deal in the House of Commons turns to the impact on the Union.
Shadow Scottish secretary Lesley Laird will accuse David Mundell of abandoning his “red lines” over the status of Northern Ireland under the Prime Minister’s deal, saying it “risks undermining the integrity of the UK”.
Her comments follow publication of a poll showing that most Scottish voters would prefer independence to either a no-deal Brexit or Mrs May’s deal, although most people said they would back the Union when asked how they would vote in a fresh independence referendum.
Sorry, we're having problems with our video player at the moment, but are working to fix it as soon as we can
Waiting for Video...
A shop owner has vowed to never employ another rail commuter because a key member of his staff has been repeatedly delayed by ScotRail disruption.
It comes as passengers today waited to see whether the train operator’s biggest timetable shake-up for 20 years would bring any improvement.
There have been widespread cancellations for weeks as staff were trained to operate new services.
Sorry, we're having problems with our video player at the moment, but are working to fix it as soon as we can
Waiting for Video...
Amber Rudd and her colleagues are deluding themselves with their Brexit plan B, writes Lesley Riddoch
Scots stand today, surveying the collapse of British governance, like folk on lifeboats watching the mighty Titanic start to slip beneath the waves.
Most of us do not vote Conservative and never will. Most of us voted to Remain in the EU in 2016. And now most of us would opt for independence in the EU if a self-harming Brexit goes ahead. And no matter which withdrawal deal is advocated by desperate members of both main political parties south of the Border, each one is more damaging than simply staying in the EU.
Sorry, we're having problems with our video player at the moment, but are working to fix it as soon as we can
Waiting for Video...
Royal Bank of Scotland has launched an investigation into bullying following claims from a whistleblower that harassment is rife at the state-backed lender.
Staff are subjected to persistent intimidation, threats and humiliation amid a “culture of bullying”, according to private emails seen by the Press Association.
The allegations are linked to the troubled Amethyst project, which has become the focus of a wide-ranging investigation by RBS, and involve two senior managers. It is claimed that workers on the project faced attacks based on personal intimidation and others in which they were forced to change the outcomes of cases to manipulate figures sent to the Financial Conduct Authority.
The composer behind the Downton Abbey soundtrack has said he had to “become English” in order to craft the score that evokes a lost time.
John Lunn, who is from Glasgow, has said the music accompanying the dramas of Downton resonates with audiences who are looking for a faded era.
The composer is set to showcase the soundtrack at a concert next year, held in the grounds of Highclere Castle, Hampshire, which served as the stately home in Downton.
He has agreed that the appeal of the show is tied to the wish for a lost epoch which the drama series conjured up.
Lunn said: “I did listen to music of the time, but music was so overwrought then. It’s not that I don’t like it, I do like Elgar and Vaughan Williams.
Sorry, we're having problems with our video player at the moment, but are working to fix it as soon as we can
Waiting for Video...
There is far more to worry about in the Prime Minister’s plans than just Northern Ireland, writes Brian Monteith
It does look likely that the vote on the Prime Minister’s Withdrawal Agreement will go ahead tomorrow and will be heavily defeated.
There is a large exercise in expectation management emanating from Downing Street, with the possible scale of a defeat being talked up so that if it happens to be less than a hundred it can then be portrayed as not as bad as it might have been and this will justify Theresa May carrying on to try and get some concessions from the EU.
Sorry, we're having problems with our video player at the moment, but are working to fix it as soon as we can
Waiting for Video...
Scotland’s First Minister branded Tory politicians “idiots” as she called on Scots to support independence.
Following an article in the Sunday Telegraph where allies of Boris Johnston likened him to Aslan in CS Lewis’s Narnia series ready to end he rule of the ice queen Theresa May, Nicola Sturgeon took to Twitter, to say Scottish voters could “opt to stay in the real world.”
She wrote: “It’s hard to know whether to laugh or cry. These idiots are actually revelling in the idea that they’re characters in a fantasy world. Scotland, we don’t have to stay in Narnia with them - we can opt to stay in the real world with #independence.”
Theresa May must begin no-deal preparations in earnest if she loses tomorrow’s vote on her Brexit deal, Conservative MPs who back the Prime Minister have said.
It comes as Mrs May was told by leadership rivals that they only way to save her job in the event of a likely defeat is to return to Brussels and demand the EU abandon the Irish border backstop.
Downing Street denied reports that tomorrow’s vote could be pushed back, and the Brexit Secretary Steven Barclay said the Prime Minister would not be going to Brussels before a summit scheduled for the end of the week despite the pressure to demand a renegotiation.
Boris Johnson said Mrs May can stay on as prime minister if her Brexit deal is defeated tomorrow – as long as she returns to Brussels and demands the EU abandons the Irish border backstop.
The former foreign secretary claimed the backstop measure left the UK open to “blackmail” by Brussels and was a “diabolical negotiating position” for a future trade deal.
Breaking club records has come naturally to Brendan Rodgers since he became Celtic’s manager in the summer of 2016, with an Invincibles’ campaign being followed by a double treble and racking up 12 derbies against Rangers without defeat.
Last weekend he became the first manager to bring seven consecutive trophies to Parkhead and, on Thursday, he has the opportunity to add becoming the first Celtic side to reach double figures in points in a European group stage to his list of achievements.
Succeeding in that task would also guarantee involvement in continental competition after Christmas. Having beaten Rosenborg home and away and prevailed against RB Leipzig in Glasgow, a draw against Red Bull Salzburg on Thursday will be sufficient to take them into the round of 32.
Given the quality of the opposition from Austria and the Bundesliga, Rodgers would regard that as a creditable outcome.
“I’d have taken this scenario at the start of the group, absolutely,” he said. “All you can ask for is to have it in your own hands. Thankfully, going into the last game, we have that. If we can come through, it would be absolutely brilliant for us. We’ve faced three good sides in the group – two champions of their country and a top Bundesliga side.
There is an ability to see the bigger picture about a European campaign that for Rangers has effectively come down to a single frame through the viewing lens of the club’s assistant manager Gary McAllister.
Although thoughts are currently focussed on the necessity not to lose further ground in the Premiership today at Dundee following the midweek defeat at home by Aberdeen, by late this afternoon attentions will be firmly trained on continental competition.
Victory over Rapid Vienna on Thursday is the requirement for Steven Gerrard’s side to claim a place in the last 32 of the Europa League. If that is achieved, they would be joined by Celtic in the event of Brendan Rodgers’ men avoiding defeat at home by Salzburg on the same evening.
Scotland hasn’t had two representatives in Europe post-Christmas since Celtic and Rangers were joined at that stage by Aberdeen in 2008. The efforts of the two Glasgow clubs in harvesting coefficient points this season – the total earned in this campaign greater than in any since that high point a decade ago – has already moved Scotland up five places in the Uefa rankings to sit 20th. That matters to 57-times capped Scotland international McAllister as he ponders a final Group G encounter an entirely recast team have worked wonders to turn into an all-or-nothing clash.
“What we’ve created is a cup final, isn’t it? There’s no other way to look at it and that’s going to be the approach,” said the 53-year-old. “It’s big for the club, the kudos that comes with it, for the fans as well and thinking ahead to the coefficient as well for Scottish football will be big. If both of us could get through, it would be a big night for Scottish football, no doubt about that and it’s a massive game. I don’t how many tickets we’ve got, is it a couple of thousand, but I think there might be more there. It will be a great game to be involved in as a player.”
Sorry, we're having problems with our video player at the moment, but are working to fix it as soon as we can
Waiting for Video...
Two people have been arrested for allegedly racially abusing a Motherwell player ahead of the Fir Park side’s Ladbrokes Premiership match with Hearts at Tynecastle.
Visiting player Christian Mbulu, who was an unused substitute during the match, was targeted as the teams performed their pre-match warm-up routines on the pitch.
Video footage which shows as many as 20 fans in the stadium’s main stand shouting comments at the 22-year-old has been handed over to Police Scotland.
Scotland Women will face England in their debut appearance at the World Cup in France next year.
Shelley Kerr’s side are making their first appearance at a Women’s World Cup, and will face the Auld Enemy in their opening game in Nice on June 9.
READ MORE: Scotland secure place at 2019 World Cup
The two British sides will be joined in Group D by 2011 champions Japan, who defeated England in the semi-finals of the previous tournament in 2015 before losing to the USA in the final.
Argentina, who qualified in 2003 and 2007 but have lost all of their games in the competition, are also in the group.
A competition that provides grants to accelerate growth in Scotland’s start-ups has awarded a record £1.8 million to two dozen promising businesses.
The winners of the 13th round of the Scottish Edge contest were confirmed yesterday with Edinburgh charity Turing Trust taking home the newly introduced Social Edge Award.
Scottish Edge recently announced a £1m funding increase from Scottish Enterprise and 24 fledgling businesses have now reaped the benefits with each taking home up to a six-figure share of the bumper prize pot.
Meanwhile, £200,000 has been set aside for two additional prizes of £100,000 each, which will be awarded in March to existing Edge winners seeking funding to scale-up or grow rapidly globally.
The winning businesses came from 39 finalists which pitched their business ideas to an expert panel of judges, chaired by Simon Hannah of Filshill and Kerry Sharp of the Scottish Investment Bank.
Edrington, the spirits group behind whiskies including Highland Park and The Macallan, today said chief executive Ian Curle would retire next year after holding the post for 15 years.
His successor has been named as Scott McCroskie – currently a member of the Edrington board and managing director of The Macallan brand.
Curle joined the business in 1986 through Edrington’s subsidiary Lang Brothers, becoming group operations director in 1997 before succeeding Sir Ian Good as chief executive in 2004. He has been chairman of the North British Distillery since 2002, is a former chairman of the Scotch Whisky Association, and is an advisor to the UK Board of Trade.
McCroskie will become chief executive with effect from 1 April.
Crawford Gillies, chairman of Edrington, said: “Ian has led Edrington to become one of the world’s leading international premium spirit companies. On behalf of the board of directors, I want to thank him for his 32 years of outstanding service, and particularly the 15 years in which he has been a wise and inspiring chief executive.”
Scotland’s brewing sector has the potential become a £1 billion industry by 2030 by building on its “precious” global reputation, says a report released today by the national agency for food and drink.
Trade association Scotland Food & Drink has unveiled an ambitious strategy which aims to double the brewing sector’s annual contribution to the Scottish economy by championing local beer over imported drinks and focusing on quality.
The Brewing Up A Storm report uses the findings by the Brewing Industry Leadership Group, a body formed this year which has been tasked with identifying the challenges of growing the Scottish brewing sector and supply chain.
The new strategy aims to establish new, high value employment opportunities, produce more Scottish beer and increase its value by making Scotland-brewed beer “the most desirable in the world”.
It forms part of the trade association’s wider goal to make the Scottish food and drink sector worth more than £30bn annually by 2030.
A Chinese state-owned engineering services firm has opened its first UK subsidiary, in Dundee, as it looks to tap into the multi-billion market for decommissioning of oil and gas infrastructure in the North Sea.
The China Ocean Engineering Shanghai Co (COES) has invested £500,000 in offices on Dundee’s waterfront and will initially employ up to 15 members of staff.
The operation will focus on both offshore decommissioning and renewables in the UK and Europe. The decommissioning of North Sea platforms and pipelines is estimated to be worth some £17.6 billion for companies in the sector. Up to 100 platforms and 7,500 kilometres of pipelines will need to be completely or partially removed over the next decade.
COES has invested around £400 million to develop offshore construction engineering vessels and equipment to be able to work on some of the largest contracts.
COES Caledonia (UK)’s director general, Norman McLennan, said: “There’s a significant volume of offshore decommissioning and renewables activity projected over the next decade and we’re looking forward to supporting the offshore sector from Dundee.”
The Ladbrokes Premiership has been entertaining and exciting from the opening day, with surprises aplenty but how full have the stands be to witness the excitement?
Courtesy of data from Transfermarkt, we look at the average attendances and what percentage of the 12 top-flight stadiums have been full so far. Scroll through the pages, some clubs might surprise you...
Figures highlighted by Kieran Maguire, a lecturer in football finance at the University of Liverpool, show British football clubs owe more than £3.5billion to banks and owners.
Rangers lead the way in Scotland with borrowings of £22.5million as per their most recent set of accounts for the year 2017-2018.
It is in excess of £10million more than rivals Celtic whose borrowings are £10.8million.
These figures pale in comparison to Premier League clubs with Manchester United's nearly £500million. Brighton, Liverpool and Arsenal are all above £200million.
Elsewhere in Scotland Hibs owe £4million, while Motherwell (£1.9m), Dundee United (£1.9m) and Dundee (£1.3m) come in at over £1million.
Sorry, we're having problems with our video player at the moment, but are working to fix it as soon as we can
Waiting for Video...
Rangers face a rerun of the Billy Gilmour saga after rising star Dapo Mebude turned down the offer of a new deal at Ibrox.
The 17-year-old rejected the chance to extend his stay with the Gers and it’s understood the Light Blues fear losing the London-born forward to an English club.
Scotland Under-21 star Gilmour quit Rangers last year and joined Chelsea, and a number of English clubs are believed to be keeping tabs on Mebude’s situation.
Sorry, we're having problems with our video player at the moment, but are working to fix it as soon as we can
Waiting for Video...
A Celtic fan has posted a video on social media in which he confronts Rangers striker Alfredo Morelos in Glasgow city centre.
In the short clip, which has been shared on Twitter, the fan can be heard to say: “Look who it is... Alfredo. You got the time, Alfredo?”
The Colombian forward ignores the fan and walks down the street, but the camera pans round to show a branch of Western Union, with the Hoops supporter adding: “Must have been in there cashing a giro off Dave King.”
Sorry, we're having problems with our video player at the moment, but are working to fix it as soon as we can
Waiting for Video...
A man has died after falling from the top floor of one of Glasgow’s busiest shopping centres.
The incident happened at Buchanan Galleries about 4:20pm today.
It is understood the man plummeted from from the area at the top of the centre’s elevators.
A caller from Edinburgh has gone viral after viewers noticed that he had used the ‘c word’ in a phone call placed to mid-morning cookery show Saturday Kitchen on the BBC.
‘Dan from Edinburgh’ was introduced as one of the viewers of the weekly show who wanted advice from a panel of chefs on how best to cook Christmas Dinner.
With an unmistakable Edinburgh accent, Dan told the hosts: “You ken what it’s like this time of year, every c***s on about parsnips n aw that, so wits a barry side for Christmas?”
Host Matt Tebbutt seemed to pick up on the profanity despite the thick accent as he looked as if he was struggling to hold back laughter as Dan posed his question.
Ever the pro, however, he quickly moved on and asked the resident chefs their thoughts on parsnips.
Sorry, we're having problems with our video player at the moment, but are working to fix it as soon as we can
Waiting for Video...
The European Court of Justice has ruled that the UK can unilaterally stop Brexit by revoking its Article 50 notification.
Judges in Luxembourg decided that the UK can stay in the EU "under terms that are unchanged" if it decides to change its mind on Brexit "through a democratic process".
It comes on the eve of a crucial vote in the House of Commons on Theresa May's Brexit deal. Scottish politicians who brought the case said "a bright light has switched on above an 'EXIT' sign" that meant a second EU referendum was "closer than ever before".
IT’S an opening night at the Traverse; and the great and good of Scottish theatre gather to see a new work by one of the leaders of the latest generation of Scottish playwrights.
Traverse Theatre, Edinburgh ****
In Kieran Hurley’s new play Mouthpiece, though, that’s not only what’s happening in real life, as Orla O’Loughlin delivers her final production as artistic director of the Traverse; it’s also what’s happening on stage, as this astonishing 90-minute two-handed drama powers to its riveting and challenging climax.
In a sense, the story of Hurley’s play is a simple one: forty-something writer Libby has returned home to live with her uncaring mother in Edinburgh, after her playwriting career in London dwindles and fails.
On Salisbury Crags, contemplating suicide, she encounters Declan, a 17-year-old refugee from a deprived city housing estate, who pulls her back from the brink, and is soon showing her his remarkable drawings. A strange friendship blossoms, and Libby begins to write again; but since Declan is her subject, and her new play composed largely of his words, an increasingly tense and desperate struggle ensues over this middle-class appropriation of working-class experience, culminating in a devastating showdown at the Traverse.
<pymongo.results.InsertOneResult object at 0x0000026B344A4488>
<pymongo.results.InsertOneResult object at 0x0000026B344A4688>
<pymongo.results.InsertOneResult object at 0x0000026B344A4D88>
<pymongo.results.InsertOneResult object at 0x0000026B344A4908>
<pymongo.results.InsertOneResult object at 0x0000026B344A4488>
<pymongo.results.InsertOneResult object at 0x0000026B344A4688>
<pymongo.results.InsertOneResult object at 0x0000026B344A4D88>
<pymongo.results.InsertOneResult object at 0x0000026B344A4908>
<pymongo.results.InsertOneResult object at 0x0000026B344A4488>
<pymongo.results.InsertOneResult object at 0x0000026B344A4688>
<pymongo.results.InsertOneResult object at 0x0000026B33BDEF88>
<pymongo.results.InsertOneResult object at 0x0000026B344A4488>
<pymongo.results.InsertOneResult object at 0x0000026B33BDEF88>
<pymongo.results.InsertOneResult object at 0x0000026B344A4D88>
<pymongo.results.InsertOneResult object at 0x0000026B33BDEF88>
<pymongo.results.InsertOneResult object at 0x0000026B344A4688>
<pymongo.results.InsertOneResult object at 0x0000026B33BDEF88>
<pymongo.results.InsertOneResult object at 0x0000026B344A4488>
<pymongo.results.InsertOneResult object at 0x0000026B33BDEF88>
<pymongo.results.InsertOneResult object at 0x0000026B344A4D88>
<pymongo.results.InsertOneResult object at 0x0000026B33BDEF88>
<pymongo.results.InsertOneResult object at 0x0000026B344A4688>
<pymongo.results.InsertOneResult object at 0x0000026B33BDEF88>
<pymongo.results.InsertOneResult object at 0x0000026B344A4488>
<pymongo.results.InsertOneResult object at 0x0000026B33BDEF88>
<pymongo.results.InsertOneResult object at 0x0000026B344A4D88>
<pymongo.results.InsertOneResult object at 0x0000026B33BDEF88>
<pymongo.results.InsertOneResult object at 0x0000026B344A4688>
<pymongo.results.InsertOneResult object at 0x0000026B33BDEF88>
<pymongo.results.InsertOneResult object at 0x0000026B344A4488>
<pymongo.results.InsertOneResult object at 0x0000026B33BDEF88>
<pymongo.results.InsertOneResult object at 0x0000026B344A4D88>
<pymongo.results.InsertOneResult object at 0x0000026B33BDEF88>
<pymongo.results.InsertOneResult object at 0x0000026B344A4688>
<pymongo.results.InsertOneResult object at 0x0000026B33BDEF88>
<pymongo.results.InsertOneResult object at 0x0000026B344A4488>
<pymongo.results.InsertOneResult object at 0x0000026B33BDEF88>
<pymongo.results.InsertOneResult object at 0x0000026B344A4D88>
<pymongo.results.InsertOneResult object at 0x0000026B33BDEF88>
<pymongo.results.InsertOneResult object at 0x0000026B344A4688>
<pymongo.results.InsertOneResult object at 0x0000026B33BDEF88>
<pymongo.results.InsertOneResult object at 0x0000026B344A4488>
<pymongo.results.InsertOneResult object at 0x0000026B33BDEF88>
<pymongo.results.InsertOneResult object at 0x0000026B344A4D88>
<pymongo.results.InsertOneResult object at 0x0000026B33BDEF88>
<pymongo.results.InsertOneResult object at 0x0000026B344A4688>
|
jupyter-notebooks/TINC diskbuffer.ipynb | ###Markdown
This notebook is meant to be used in conjunction with the diskbuffer.cpp example in TINC.
###Code
from tinc import *
TincVersion()
tclient = TincClient()
[db.id for db in tclient.disk_buffers]
jsonBuffer = tclient.get_disk_buffer('json')
jsonBuffer
ncBuffer = tclient.get_disk_buffer('nc')
ncBuffer
imageBuffer = tclient.get_disk_buffer('image')
imageBuffer
imageBuffer.
###Output
_____no_output_____ |
Notebooks/TripletLoss.ipynb | ###Markdown
Applying different strategy of triplet loss
###Code
LOCAL = False
%load_ext autoreload
%autoreload 2
%load_ext skip_cell
%%skip $LOCAL
#Mounting the drive
import zipfile
from google.colab import drive
drive.mount('/content/drive/')
# %%skip $LOCAL
!cp -a "/content/drive/My Drive/triplets/" .
###Output
_____no_output_____
###Markdown
Setting up tensorboard for PyTorch in Colab Imports
###Code
import copy
import random
import time
import matplotlib.colors as mcolors
import matplotlib.pyplot as plt
import numpy as np
from sklearn.manifold import TSNE
from sklearn.metrics import accuracy_score, make_scorer
from sklearn.model_selection import GridSearchCV
from sklearn.neighbors import KNeighborsClassifier
import torch
from torch import optim
from torch import nn
from torch.utils.data import random_split
from torch.utils.data.sampler import BatchSampler
from torchvision import transforms
from torchvision.datasets import SVHN
from torchvision.models import resnet18
from typing import Union
from PIL import Image
from triplets.datasets import TripletSVHN
from triplets.losses import TripletLoss, TripletSoftLoss, BatchAllTripletLoss, BatchHardTripletLoss, OnlineTripletLoss
from triplets.metrics import mean_average_precision
from triplets.nets import TripletNet
from triplets.train import train
from triplets.extractor import FeatureExtractor
from triplets.samplers import BalancedBatchSampler
from triplets.selectors import AllTripletSelector,HardestNegativeTripletSelector, RandomNegativeTripletSelector, \
SemihardNegativeTripletSelector # Strategies for selecting triplets within a minibatch
from triplets.utils import freeze_layers
from triplets.visualisation import plot_grad_flow
n_features = 512
n_classes = 10
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
SEED = 100
validation_split = 0.2
shuffle_dataset = True
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
np.random.seed(SEED)
random.seed(SEED)
dataset = SVHN(root = 'data/', download=True, split='train')
train_size = int(0.8*len(dataset))
valid_size = len(dataset) - train_size
dataset_train, dataset_valid = random_split(dataset, [train_size, valid_size])
dataset_test = SVHN(root = 'data/', download=True, split='test');
###Output
Using downloaded and verified file: data/train_32x32.mat
Using downloaded and verified file: data/test_32x32.mat
###Markdown
Setting parameters for training model with triplet loss
###Code
batch_size = 32
num_triplets = 1
epochs = 20
model_base = resnet18(pretrained=True)
model_base.eval();
###Output
_____no_output_____
###Markdown
Defining extractor using custom class to extract features from last cnn pretrained layer.
###Code
extractor = FeatureExtractor(model=model_base, n_remove_layers=1, n_features=n_features, device=device)
extracted_resnet = extractor.prepare_model()
#Freezing all but two last layers
extracted_resnet = freeze_layers(extracted_resnet, 2)
preprocess = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
)])
###Output
_____no_output_____
###Markdown
Training with softmax triplet loss Taken fron the paper titled "DEEP METRIC LEARNING USING TRIPLET NETWORK"https://arxiv.org/pdf/1412.6622.pdf
###Code
triplet_train= TripletSVHN(dataset, dataset_train.indices, dataset_valid.indices,
preprocess, 'train', SEED)
triplet_valid = TripletSVHN(dataset, dataset_train.indices, dataset_valid.indices,
preprocess, 'val', SEED)
dataloader_train = torch.utils.data.DataLoader(triplet_train, batch_size=batch_size)
dataloader_valid = torch.utils.data.DataLoader(triplet_valid, batch_size=batch_size)
dataloaders = {'train': dataloader_train, 'val': dataloader_valid}
model = TripletNet(extracted_resnet)
criterion = TripletSoftLoss()
# Observe that all parameters are being optimized
optimizer = optim.Adam(model.parameters(), lr=0.001)
# Decay LR by a factor of 0.1 every 7 epochs
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=7, gamma=0.1)
train(model, dataloaders, criterion, optimizer, scheduler, epochs, device)
###Output
_____no_output_____
###Markdown
Training with triplet loss provided FaceNet paper We are going to use custom triplet dataset, which provides possibility of testing model on fixed sample and creates valid triplets in every iteration.
###Code
triplet_train= TripletSVHN(dataset, dataset_train.indices, dataset_valid.indices,
preprocess, 'train', SEED)
triplet_valid = TripletSVHN(dataset, dataset_train.indices, dataset_valid.indices,
preprocess, 'val', SEED)
dataloader_train = torch.utils.data.DataLoader(triplet_train, batch_size=batch_size)
dataloader_valid = torch.utils.data.DataLoader(triplet_valid, batch_size=batch_size)
dataloaders = {'train': dataloader_train, 'val': dataloader_valid}
model = TripletNet(extracted_resnet)
criterion = TripletLoss()
# Observe that all parameters are being optimized
optimizer = optim.Adam(model.parameters(), lr=0.001)
# Decay LR by a factor of 0.1 every 7 epochs
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=7, gamma=0.1)
train(model, dataloaders, criterion, optimizer, scheduler, epochs, device)
###Output
_____no_output_____
###Markdown
Adding hard mining In order to improve possible triplet selection we are going to use online triplet mining. It allows us to compute possible triplets in every iteration of the dataset, constructing $B^3$ triplets out of computed $B$ **embeddings**. Most of them are not relevant and we are going to use two strategies from which one can select possible triplets. In order to improve possible triplet selection we are going to use online triplet mining. Suppose that you have a batch of faces as input of size $B = P K$ , composed of $P$ different persons with $K$ images each. A typical value is $K = 4$ . The two strategies are:**batch all**: select all the valid triplets, and average the loss on the hard and semi-hard triplets. a crucial point here is to not take into account the easy triplets (those with loss 0 ), as averaging on them would make the overall loss very small this produces a total of $P K ( K − 1 ) ( P K − K )$ triplets $P K$ anchors, $K − 1$ possible positives per anchor, $P K − K$ possible negatives) **batch hard**: for each anchor, select the hardest positive (biggest distance $d ( a , p ) )$ and the hardest negative among the batch this produces $P K$ triplets the selected triplets are the hardest among the batch.[1] https://omoindrot.github.io/triplet-loss
###Code
extractor = FeatureExtractor(model=model_base, n_remove_layers=1, n_features=n_features, device=device)
extracted_resnet = extractor.prepare_model()
extracted_resnet = freeze_layers(extracted_resnet, 2)
train_batch_sampler = BalancedBatchSampler(dataset.labels[dataset_train.indices], n_classes=10, n_samples=10)
dataset = SVHN(root = 'data/', download=True, split='train', transform=preprocess)
train_size = int(0.8*len(dataset))
valid_size = len(dataset) - train_size
dataset_train, dataset_valid = random_split(dataset, [train_size, valid_size])
dataset_test = SVHN(root = 'data/', download=True, split='test', transform=preprocess);
# We'll create mini batches by sampling labels that will be present in the mini batch and number of examples from each class
train_batch_sampler = BalancedBatchSampler(dataset.labels[dataset_train.indices], n_classes=10, n_samples=25)
valid_batch_sampler = BalancedBatchSampler(dataset.labels[dataset_valid.indices], n_classes=10, n_samples=25)
online_train_loader = torch.utils.data.DataLoader(dataset_train, batch_sampler=train_batch_sampler)
online_valid_loader = torch.utils.data.DataLoader(dataset_valid, batch_sampler=valid_batch_sampler)
margin = 1.
lr = 1e-3
optimizer = optim.Adam(extracted_resnet.parameters(), lr=lr, weight_decay=1e-4)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=7, gamma=0.1)
n_epochs = 20
log_interval = 50
dataloaders = {'train': online_train_loader, 'val': online_valid_loader}
criterion = OnlineTripletLoss(margin, SemihardNegativeTripletSelector(margin))
# Observe that all parameters are being optimized
optimizer = optim.Adam(extracted_resnet.parameters(), lr=0.001)
# Decay LR by a factor of 0.1 every 7 epochs
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=7, gamma=0.1)
###Output
_____no_output_____
###Markdown
Optimizing the margin I dedcided to use the loss function with online triplet mining technique in order to converge faster to desirable solution. * The first step was to calculate loss on both training and validation set, choosing model with lowest validation loss. * After that the kNN model was trained on the features obtained from the training with hyperparameter (number of neighbors) found in previous optimilisation (done directly on CNN codes)* One can argue that this strategy is not optimal - we probably should tune both margin and number of neighors at the same time (for example using RandomSearch or BayesianOptimisation) however due to lack of training resourches we are going to stay with this grid search strategy.
###Code
epochs=20
margins = [0.01, 0.005, 0.1, 0.5, 1, 5]
for margin in margins:
criterion = OnlineTripletLoss(margin, AllTripletSelector())
semihard_trained_net, semihard_total_loss_train, semihard_total_loss_val = \
train(extracted_resnet, dataloaders, criterion, optimizer, scheduler, epochs,
device, model_name='hardmine_all')
extractor = FeatureExtractor(model=semihard_trained_net, n_remove_layers=0, n_features=n_features, device=device)
clf = KNeighborsClassifier(n_neighbors=25)
features = extractor.extract_features(dataset_train)
###Output
_____no_output_____ |
contributed/reDisClustersDC2_CL+SL_NanLi_HackurDC2.ipynb | ###Markdown
The distribution of Einstein Radii of massive galaxy clusters in cosmoDC2This is a rough estimation of the distribution Einstein Radii of massive galaxy clusters (> 10^14 Msol/h) in cosmoDC2. The calculation is based on analytic NFW (for DM halos) and SIS (for BCGs) models. There are plenty of room to improve the current results, such as involving ellipticities, substructures, los structures, or even particle data from the N-body simulation. Regardless, this is just a starting point for showing how to build mass models of the objects selected from cosmoDC2 and apply the mass models to the scientific applications interesting you. Should you have any questions or suggestions, please do not hesitate to slack me @linan7788626. Hopefully, we can make this notebook useful for your projects. Done- Created mass models of galaxy clusters with SIE and NFW models.- Calculated the Einstein Radii of the clusters above 10^14 Msol/h (MFOF), with the source plane fixed at zs=3.0.- Compared the distribution of Re with and without BCGs.- Predicted how many clusters in LSST data that have Einstein Radii above 30 arcsec. ToDo- Convert MFOF to M200, or obtain M200 and C200 from addon catalogs for cosmoDC2.- Add scatters to the parameters of the mass models.- Other ways to build more realistic mass models according to the information from cosmoDC2.- Take the redshift distribution of sources into account.- Compare with available observations if possible.
###Code
%matplotlib inline
import numpy as np
import pylab as pl
#--------------------------------------------------------------------
# Cosmology model
#
import pyccl as ccl
cosmo = ccl.Cosmology(Omega_c=0.264, Omega_b=0.045, h=0.71, A_s=2.1e-9, n_s=0.96)
vc = 2.998e5 # speed of light, km/s
G = 4.3011790220362e-09 # Gravity constant, Mpc/h (Msun/h)^-1 (km/s)^2
apr = 206269.43 # arcsec per rad
def Dc(z):
'''
Comoving distance from redshift 0 to redshift z.
'''
res = ccl.comoving_radial_distance(cosmo, 1/(1+z))*cosmo['h']
return res
def Dc2(z1,z2):
'''
Comoving distance from redshift z1 to redshift z2.
'''
Dcz1 = ccl.comoving_radial_distance(cosmo, 1/(1+z1))*cosmo['h']
Dcz2 = ccl.comoving_radial_distance(cosmo, 1/(1+z2))*cosmo['h']
res = Dcz2-Dcz1+1e-8
return res
def Da(z):
'''
Angular diameter distance from redshift 0 to redshift z.
'''
res = Dc(z)/(1+z)
return res
def Da2(z1,z2):
'''
Angular diameter distance from redshift z1 to redshift z2.
'''
res = (Dc(z2)-Dc(z1)+1e-8)/(1.0+z2)
return res
def Dl(z):
'''
Luminosity distance from redshift 0 to redshift z.
'''
res = ccl.luminosity_distance(cosmo, 1/(1+z))*cosmo['h']
return res
def SigmaCrit(z1,z2):
'''
Critical surface density for the case of lens plane at z1 and source plane at z2.
'''
res = (vc*vc/4.0/np.pi/G*Dc(z2)/(Dc(z1)/(1.0+z1))/Dc2(z1,z2))
return res
def rho_crit_matter(z):
#rho_crit_matter, M_sun Mpc^-3 *h*h
res = ccl.background.rho_x(cosmo, 1/(1+z), "matter")/cosmo['h']**2
return res
def dv(z):
Omz = ccl.background.omega_x(cosmo, 1/(1+z), "matter")
ov = 1.0/Omz-1.0
res = 18.8*np.pi*np.pi*(1.0+0.4093*ov**0.9052)
return res
def r200_m200(m,z):
# res = (3.0*m/4.0/np.pi/rho_crit_matter(z)/200.0)**(1.0/3.0)
res = (3.0*m/4.0/np.pi/rho_crit_matter(z)/dv(z))**(1.0/3.0)
return res
def mags_to_vd(mg,mr,zz):
'''
Calculate the velocity dispersion of a galaxy according its apparent magnitudes in g and r bands.
Faber et al. 2007
Parker et al. 2007, Table 1
--- any suggestions on involving more realistic models? ---
'''
Dlum = Dl(zz)
Mabsr = mr-5.0*np.log10(Dlum/cosmo.['h'])-25.0
mrsdss = Mabsr+0.024*(mg-mr)/0.871
mrsdss = mrsdss-0.11
mrstar = (-20.44)+(zz-0.1)*1.5
LbyLstar = 10.0**(-0.4*(mrsdss-mrstar))
res = 142.0*LbyLstar**(1./3.)
return res
def re_sv(sv,z1,z2):
'''
Einstein Radius for an galaxy with velocity dispersion sv at redshift z1,
and the redshift of source plane is z2. The mass model of the galaxy is
Singular Isothermal Ellipsoid (Kormann et al. 1994).
'''
res = 4.0*np.pi*(sv**2.0/vc**2.0)*Da2(z1,z2)/Da(z2)*apr
return res
def sis_kappa(x,y,re,rc=0.0):
'''
convergence map of a SIS model,
inputs are Einstein Radius (re) and the core size (rc).
'''
r = np.sqrt(x*x+y*y)
res = re/(2.0*np.sqrt(r*r+rc*rc))
return res
#-----
def c200_m200_HChild2018(m, z):
'''
Concentration-Mass relation of dark matter halos, where the inputs are
viral mass and the redshift of the halo.
Child et al. 2018, Table 2.
'''
aa = 68.4
dd = -0.347
mm = -0.083
res = aa*(1.0+z)**dd*m**mm
return res
def nfw_kappa(x1_in,x2_in,c,m,z1,z2):
'''
convergence map of a NFW halo,
inputs are:
concentration (c),
viral mass (m),
redshift of lens (z1),
redshift of source (z2)
'''
r200 = r200_m200(m,z1)
rs = r200/c
r = np.sqrt(x1_in*x1_in+x2_in*x2_in)
xx = r*Da(z1)/apr/rs
rhos = rho_crit_matter(z1)*dv(z1)/3.0*c**3.0/(np.log(1.0+c)-c/(1+c))
kappas = rs*rhos/SigmaCrit(z1,z2)
x = np.abs(xx)
x1 = x*x-1.0
x2 = 2.0/np.sqrt(np.abs(1.0-x*x))
x3 = np.sqrt(np.abs(1.0-x)/(1+x))
func_f = x*0.0
idxa = x>0
idxb = x<1
idx1 = idxa&idxb
func_f[idx1]=1.0/x1[idx1]*(1.0-x2[idx1]*np.arctanh(x3[idx1]))
idx2 = x==1
func_f[idx2]=1.0/3.0
idx3 = x>1.0
func_f[idx3]=1.0/x1[idx3]*(1.0-x2[idx3]*np.arctan(x3[idx3]))
res = 2.0*kappas*func_f
return res
def cart2pol(x, y):
'''
convert cartesian coordinates to polar coordinates in a 2D map
'''
rho = np.sqrt(x**2 + y**2)
phi = np.arctan2(y, x)
return(rho, phi)
def find_re(x1_in, x2_in, kappa_in):
'''
Find the Einstein Radius of a given convergence map.
Defination the averaged convergence wthin the Radius equals to 1.0.
https://arxiv.org/pdf/astro-ph/0304162.pdf
'''
rht, pht = cart2pol(x1_in, x2_in)
rht_r = rht.ravel()
pht_r = pht.ravel()
kap_r = kappa_in.ravel()
idx_pol = np.argsort(((rht_r+1e8)**2.0 + pht_r**2.0))
kap_sorted = kap_r[idx_pol]
rht_sorted = rht_r[idx_pol]
kap_c = np.cumsum(kap_sorted)
kap_b = kap_c/(np.arange(len(kap_c))+1.0)
if len(kap_b[np.where(kap_b>1.0)]) < 9:
# print "There is no Einstein Radii in this lens."
return 0.0
else:
idx_re = np.argmin(np.abs(kap_b-1.0))
res = rht_sorted[idx_re]
return res
def make_c_coor(bs,nc):
'''
Draw the mesh grids for a bs*bs box with nc*nc pixels
'''
ds = bs/nc
xx01 = np.linspace(-bs/2.0,bs/2.0-ds,nc)+0.5*ds
xx02 = np.linspace(-bs/2.0,bs/2.0-ds,nc)+0.5*ds
xi2,xi1 = np.meshgrid(xx01,xx02)
return xi1,xi2
'''
Load in cosmoDC2
'''
import GCRCatalogs
# areav100=805.5121
# gc = GCRCatalogs.load_catalog('cosmoDC2_v1.0')
areav114=487.6296
gc = GCRCatalogs.load_catalog('cosmoDC2_v1.1.4_image')
%%time
'''
Grab the parameters for the calculation of Einstein Radii,
Selection functions is central galaxies with halo mass larger than 1e14 Msol/h.
'''
gals_data_dict = gc.get_quantities(['galaxyID',
'mag_true_g_lsst',
'mag_true_r_lsst',
'size_true',
'size_minor_true',
'position_angle_true',
'redshift_true',
'stellar_mass',
'halo_mass',
'is_central'],
filters=['halo_mass>=1e14',
'is_central==True'])
'''
print out how many halos with mass above 1e14 Msol/h
'''
print("There are", len(gals_data_dict['galaxyID']), "galaxy clusters in cosmoDC2 v1.1.4 above $10^{14} M_{\odot}/h$")
'''
convert tables to arrays, and set the redshift of sources at z = 3.0
'''
z_ref = 3.0
idx_sort = np.argsort(gals_data_dict['halo_mass'])[::-1]
m200_main = gals_data_dict['halo_mass'][idx_sort]
magg_main = gals_data_dict['mag_true_g_lsst'][idx_sort]
magr_main = gals_data_dict['mag_true_r_lsst'][idx_sort]
zl_main = gals_data_dict['redshift_true'][idx_sort]
vd_main = mags_to_vd(magg_main, magr_main, zl_main)
re_main = re_sv(vd_main, zl_main, z_ref)
r200_main = r200_m200(m200_main, zl_main)
c200_main = c200_m200_HChild2018(m200_main, zl_main)
'''
Calculate the Einstein Radii, the runtime is about one hour.
'''
nnn = 1024
re_sie = []
re_nfw = []
re_tot = []
for i in range(len(vd_main)):
# box size is set to be 0.5 viral radius (in the unites of arcsec)
bsx = r200_main[i]*0.5/Da(zl_main[i])*apr
xi1, xi2 = make_c_coor(bsx,nnn)
# calculate the Einstein radius of the BCG only
kappa_bcgs = sis_kappa(xi1,xi2,re_main[i],0.0)
re_sie_tmp = find_re(xi1, xi2, kappa_bcgs)
re_sie.append(re_sie_tmp)
# calculate the Einstein radius of the Dark matter halo only
kappa_halo = nfw_kappa(xi1,xi2,c200_main[i],m200_main[i],zl_main[i],z_ref)
re_nfw_tmp = find_re(xi1, xi2, kappa_halo)
re_nfw.append(re_nfw_tmp)
# calculate the Einstein Radius of the cluster with both BCG and DM halo
kappa_tot = kappa_bcgs + kappa_halo
re_tot_tmp = find_re(xi1, xi2, kappa_tot)
re_tot.append(re_tot_tmp)
re_tot_arr = np.array(re_tot)
re_nfw_arr = np.array(re_nfw)
re_sie_arr = np.array(re_sie)
'''
scale up to the survey area of LSST,
and print out how many clusters with Einstein Radii larger than 30 arcsec
'''
arealsst = 18000 # degree^2
print("In LSST data, there are roughly",
int(len(re_tot_arr[np.where(re_tot_arr>30)])*arealsst/areav114),
"galaxy cluster having Einstein Radii larger than 30 arcsec.")
'''
The Distribution of Einstein Radii in Linear Space
'''
import seaborn as sns;sns.set()
pl.figure(figsize=(10, 7))
sns.distplot(re_tot_arr[np.where(re_tot_arr>0.01)],
kde_kws={"color": "b",
"lw": 3,
"label": "DM+BCG"},
hist_kws={"histtype": "bar",
"linewidth": 0,
"alpha": 0.5,
"color": "b"})
sns.distplot(re_nfw_arr[np.where(re_nfw_arr>0.01)],
kde_kws={"color": "r",
"lw": 3,
"label": "DM Only"},
hist_kws={"histtype": "bar",
"linewidth": 0,
"alpha": 0.5,
"color": "r"})
pl.xlim(0.5,35);
'''
The distribution of Einstein Radii in logarithmic Space.
'''
bins = 10**(np.linspace(0,np.log10(35.), 20))
import seaborn as sns;sns.set()
pl.figure(figsize=(10, 7))
sns.distplot(re_tot_arr[np.where(re_tot_arr>0.5)], bins=bins,
kde_kws={"color": "b",
"lw": 3,
"label": "DM+BCG"},
hist_kws={"histtype": "bar",
"linewidth": 0,
"alpha": 0.5,
"color": "b"})
sns.distplot(re_nfw_arr[np.where(re_nfw_arr>0.5)], bins=bins,
kde_kws={"color": "r",
"lw": 3,
"label": "DM Only"},
hist_kws={"histtype": "bar",
"linewidth": 0,
"alpha": 0.5,
"color": "r"})
pl.xscale('log')
pl.xlim(1.0,35);
###Output
_____no_output_____ |
troubleshooting/pytorch_test.ipynb | ###Markdown
My first introduction to pytorch i am starting with 'what is torch.nn really?' (see [here](https://pytorch.org/tutorials/beginner/nn_tutorial.html)) since it is the most concise example code that i can find.
###Code
%matplotlib inline
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
###Output
_____no_output_____
###Markdown
MNIST data setup
###Code
#this is just creating a data folder for the MNIST dataset
from pathlib import Path
import requests
DATA_PATH = Path("data")
PATH = DATA_PATH / "mnist"
PATH.mkdir(parents=True, exist_ok=True)
URL = "http://deeplearning.net/data/mnist/"
FILENAME = "mnist.pkl.gz"
if not (PATH / FILENAME).exists():
content = requests.get(URL + FILENAME).content
(PATH / FILENAME).open("wb").write(content)
###Output
_____no_output_____
###Markdown
let's open the appropriate gzip file in numpy array format (which is stored as a pickle)
###Code
import pickle, gzip
with gzip.open((PATH / FILENAME).as_posix(), "rb") as f:
((x_train, y_train), (x_valid, y_valid), _) = pickle.load(f, encoding="latin-1")
print(x_train.shape, y_train.shape)
print(x_valid.shape, y_valid.shape)
###Output
(50000, 784) (50000,)
(10000, 784) (10000,)
###Markdown
so training data has 50000 28x28 images (stored as a flattened row) whereas validation has 10000 images. let's plot one of the images
###Code
from matplotlib import pyplot
import numpy as np
pyplot.imshow(x_train[0].reshape((28, 28)), cmap='gray')
###Output
_____no_output_____
###Markdown
convert to torch tensors...
###Code
import torch
x_train, y_train, x_valid, y_valid = map(
torch.tensor, (x_train, y_train, x_valid, y_valid)
)
n, c = x_train.shape
x_train, x_train.shape, y_train.min(), y_train.max()
print(x_train, y_train)
print(x_train.shape)
print(y_train.min(), y_train.max())
n, c
###Output
_____no_output_____
###Markdown
ok so these data are pretty readable Neural Network from scratch (no torch.nn)
###Code
import math
weights = torch.randn(784, 10) / math.sqrt(784) #normalizing a randomly initialized weight matrix
weights.requires_grad_(True) #then making it gradable
bias = torch.zeros(10, requires_grad=True) #making a linear bias term of zeros that is linear gradable
def log_softmax(x): #take the exponential probability
return x - x.exp().sum(-1).log().unsqueeze(-1)
def model(xb): #weird @ terma makes the batch train broadcast weights...
return log_softmax(xb @ weights + bias)
bs = 64
xb = x_train[0:bs]
preds = model(xb)
preds[0], preds.shape
def nll(input, target): #just take the input array
return -input[range(target.shape[0]), target].mean()
loss_func = nll
yb = y_train[0:bs]
loss_func(preds, yb)
def accuracy(out, yb):
preds = torch.argmax(out, dim=1)
return (preds == yb).float().mean()
print(accuracy(preds, yb))
###Output
tensor(0.0781)
###Markdown
that is quite shit. let's - select a mini-batch of data (of size bs)- use the model to make a prediction- calculate the loss- `loss.backward()` call to update the model gradient
###Code
from IPython.core.debugger import set_trace
lr = 0.5 # learning rate
epochs = 2 # how many epochs to train for
for epoch in range(epochs):
for i in range((n - 1) // bs + 1):
# set_trace()
start_i = i * bs
end_i = start_i + bs
xb = x_train[start_i:end_i]
yb = y_train[start_i:end_i]
pred = model(xb)
loss = loss_func(pred, yb)
loss.backward()
with torch.no_grad():
weights -= weights.grad * lr
bias -= bias.grad * lr
weights.grad.zero_()
bias.grad.zero_()
print(loss_func(model(xb), yb), accuracy(model(xb), yb))
###Output
tensor(0.0802, grad_fn=<NegBackward>) tensor(1.)
###Markdown
now we can refactor with `torch.nn.functional`
###Code
import torch.nn.functional as F
loss_func = F.cross_entropy
def model(xb):
return xb @ weights + bias
###Output
_____no_output_____
###Markdown
refactor with `nn.Module`
###Code
from torch import nn
class Mnist_Logistic(nn.Module):
def __init__(self):
super().__init__()
self.weights = nn.Parameter(torch.randn(784, 10) / math.sqrt(784))
self.bias = nn.Parameter(torch.zeros(10))
def forward(self, xb):
return xb @ self.weights + self.bias
model = Mnist_Logistic()
callable(model)
vars(model)
linmodel = nn.Linear(2, 3)
linmodel._parameters
with torch.no_grad():
linmodel._parameters['weight'].data = torch.ones(3,2)
linmodel._parameters
with torch.no_grad():
for parameter in linmodel.parameters():
parameter += torch.eye(3)
linmodel._parameters['weight']
params = [param for param in linmodel.parameters()]
with torch.no_grad():
params[0] += torch.eye(3)
params[0] += torch.eye(3)
a = torch.tensor([3.,4.,5.], requires_grad=True)
b = (2.*a**2).sum()
b.backward()
type(a.grad)
seq = nn.Sequential(nn.Linear(2,3), nn.Tanh(), nn.Linear(2,3))
print([param for param in seq.parameters()])
###Output
[Parameter containing:
tensor([[ 0.5388, -0.2660],
[ 0.1873, -0.0792],
[ 0.6674, -0.6481]], requires_grad=True), Parameter containing:
tensor([ 0.3123, -0.3795, 0.6036], requires_grad=True), Parameter containing:
tensor([[-0.2998, -0.5869],
[ 0.3183, -0.4068],
[ 0.4088, -0.5266]], requires_grad=True), Parameter containing:
tensor([-0.6193, -0.6013, 0.0169], requires_grad=True)]
###Markdown
skipping the following refactoring steps:- refactoring using `nn.Linear` - refactor using `nn.optim` refactor using dataset
###Code
from torch.utils.data import TensorDataset
###Output
_____no_output_____
###Markdown
we can iterate over and slice `x_train` and `y_train` more easily with a `TensorDataset`
###Code
train_ds = TensorDataset(x_train, y_train)
xb, yb = train_ds[bs : bs + bs]
print(xb.shape, yb.shape)
###Output
torch.Size([64, 784]) torch.Size([64])
###Markdown
refactor using DataLoader
###Code
from torch.utils.data import DataLoader
train_ds = TensorDataset(x_train, y_train)
train_dl = DataLoader(train_ds, batch_size = bs)
bs
###Output
_____no_output_____
###Markdown
looping over training data is a lot easier now...
###Code
for xb, yb in train_dl:
pass
###Output
_____no_output_____
###Markdown
add validation
###Code
train_ds = TensorDataset(x_train, y_train)
train_dl = DataLoader(train_ds, batch_size = bs, shuffle=True) #shuffling prevents correlation between batches and overfitting
valid_ds = TensorDataset(x_valid, y_valid)
valid_dl = DataLoader(valid_ds, batch_size = bs * 2)
###Output
_____no_output_____
###Markdown
i can track validation as a function of every training epoch... skipping...- create `fit` and `get_data` functions- switch to `CNN`- `nn.Sequential`- wrapping `DataLoader`- using your `GPU` 1. Dummy N-dimensional controlling modules here, i'll attempt to make parameterizable `nn.Module` subclasses in order to parameterize an n-dimensional twisted sequential monte carlo sampler. since i am twisting an ULA sampler, i need to parameterize $A_0$, $b_0$, $c_0$, as well as their $t$ sequence counterparts. the only difference is that $A_t$ and $b_t$ are functions of $x_{t-1}$ since the twisted potentials at iterations $i>0$ are the _summation_ of previous twisting potentials, we need to make a function that aggregates twists...this should be easily implementable as a function that takes a class with a list of appropriate parameters, at which point, it adds the parameters and returns the output control, whether it is a matrix like $A$, a vector like $b$, or a scalar like $c$ making a dummy parameterization of a 1-dimensional function
###Code
from torch import nn
###Output
_____no_output_____
###Markdown
define a model...
###Code
class quad_model(nn.Module):
"""
basic function to variationally perform regression on a 1d quadratic of the form y = a(x-b)**2 + c
"""
def __init__(self):
super().__init__()
self.a = nn.Parameter(torch.randn(1, dtype=torch.double))
self.b = nn.Parameter(torch.randn(1, dtype=torch.double))
self.c = nn.Parameter(torch.randn(1, dtype=torch.double))
#do a test...
test_a = torch.randn(10, 1)
a = torch.randn(1)
test_a**2 @ a
def forward(self, x):
summand = x-self.b
prod = summand**2
# print(prod)
# print(self.a)
mult = prod @ self.a
adder = mult + self.c
return adder
###Output
_____no_output_____
###Markdown
define a loss function (we'll do MSE since our data will have Gaussian noise)
###Code
import torch.nn.functional as F
loss_func = F.mse_loss
###Output
_____no_output_____
###Markdown
i'll make some training, validation, and test data
###Code
num_datapoints = 1000
xs = np.linspace(-1, 5, num_datapoints)
a = 0.1
b = 1.
c = -2.
y_perfect = a * (xs -b)**2 -c
ys_noisy = y_perfect + 0.2*np.random.randn(num_datapoints)
plt.plot(xs, y_perfect)
plt.scatter(xs, ys_noisy)
###Output
_____no_output_____
###Markdown
we should be able to refactor with `Datasets`
###Code
from torch.utils.data import TensorDataset, DataLoader
#xs = torch.reshape(torch.tensor(xs), (num_datapoints, 1))
#ys = torch.reshape(torch.tensor(ys_noisy), (num_datapoints, 1))
#make validation
validation_percent = 0.2
validation_indices = np.random.choice(num_datapoints, size=int(validation_percent*num_datapoints), replace=False)
test_indices = np.array([idx for idx in range(num_datapoints) if idx not in validation_indices])
x_test = torch.from_numpy(xs[test_indices])
y_test = torch.from_numpy(ys_noisy[test_indices])
x_valid = torch.from_numpy(xs[validation_indices])
y_valid = torch.from_numpy(ys_noisy[validation_indices])
###Output
_____no_output_____
###Markdown
let's load the data so we can minibatch!
###Code
train_ds = TensorDataset(x_test, y_test)
train_dl = DataLoader(train_ds, batch_size=bs, shuffle=True)
valid_ds = TensorDataset(x_valid, y_valid)
valid_dl = DataLoader(valid_ds, batch_size = bs*2)
valid_ds.tensors[0].size()
###Output
_____no_output_____
###Markdown
now that the data are appropriately loadable, let's import optim for training
###Code
from torch import optim
x_valid.size()
model = quad_model()
lr=1e-4
opt= optim.SGD(model.parameters(), lr=1e-3)
callable(opt)
num_epochs = 1000
sum_valid_losses = []
import tqdm
for epoch in tqdm.trange(num_epochs):
for _x, _y in train_dl:
__x = _x.reshape(len(_x), 1)
__y = _y.reshape(len(_x), 1)
pred = model(__x)
loss = loss_func(pred, _y)
loss.backward()
opt.step()
opt.zero_grad()
with torch.no_grad():
valid_loss = sum(loss_func(model(torch.reshape(qr, (len(qr), 1))), yr) for qr, yr in valid_dl)
sum_valid_losses.append(valid_loss)
plt.plot(sum_valid_losses)
fitted_a = model._parameters['a'].data.numpy()
fitted_b = model._parameters['b'].data.numpy()
fitted_c = model._parameters['c'].data.numpy()
plt.scatter(xs, ys_noisy)
torched_xs = torch.from_numpy(xs).reshape(len(xs), 1)
torched_model = model(torched_xs)
plt.plot(xs, torched_model.detach().numpy(), linewidth=4, color = 'k')
###Output
_____no_output_____
###Markdown
cool! so it looks like i know how to make a simple regressor to a curve...maybe in the future i can sample the posterior parameter space or something!! presumably, I will be using `Sequential` to build models, but i need to be able to set parameter values appropriately because stupid reasons
###Code
model = nn.Sequential(nn.Linear(2,2, bias=False), nn.ReLU(), nn.Linear(2,2, bias=False))
_data = []
with torch.no_grad():
for param in model.parameters():
#param*=0.
_data.append(param.data)
#param += torch.ones(2,2)
_data
with torch.no_grad():
for idx, param in enumerate(model.parameters()):
param*=0
param += _data[idx]
model[0]._parameters['weight']
###Output
_____no_output_____
###Markdown
Make twisting model for A, b, c, d in $\mathbf{R}^d$ can i make a nn.Sequential() that takes no arguments?
###Code
class Filler(nn.Module):
def __init__(self):
super().__init__()
pass
def forward(self, x):
return torch.zeros(1)
q = nn.Sequential(
Filler(),
nn.Linear(1,5),
nn.Tanh(),
nn.Linear(5,1)
)
r = [i for i in q.modules()]
q._modules.values()
q(input=None)
a = torch.randn(100,100)
%time
a.numpy()
%time
torch.mm(a.t(), a)
a = torch.tensor([[1., 2.]])
a.squeeze()
a.dot(b)
a.size()
b = torch.ones(1)
b = torch.tensor(1.)
b.numpy() + 2.
torch.mm(b, a.unsqueeze(1))
a_batch = torch.randn(4,3)
b_mult = torch.randn(3,1)
torch.matmul(a_batch, b_mult).squeeze().dot(torch.tensor([[1., 2., 3., 4.]]))
model = nn.Linear(5,3)
batch = torch.randn(10, 5)
torch.matmul(model(batch),torch.tensor([1., 2., 3.]))
#phi = xT A x + xT b + c + d
x = torch.randn(10, 24)
A = torch.randn(10, 24, 24)
b = torch.randn(10, 24)
c = torch.randn(10)
d= torch.randn(10)
%time
torch.matmul(x.reshape(10, 24, 1).transpose(1,2), torch.matmul(A, x.reshape(10, 24, 1))).squeeze()
%time
x.view(10, 24, 1).transpose(1,2) * (A * x.view(10, 24, 1))
a = torch.randn(10, 1, 3)
b = torch.randn(10, 1, 3)
a.size() == b.size()
torch.sum(a*b, dim=1)
torch.sum(b*a, dim=1)
torch.matmul(A, x.reshape(10, 24, 1)).size()
torch.mm(x.t(), torch.matmul(A,x))
a = torch.tensor()
np.ones()
torch.
###Output
_____no_output_____ |
机器学习/深入理解集成学习XGBoost及LightGBM/EnsembleLearning-master/lightBGM.ipynb | ###Markdown
LightBGM应用
###Code
import datetime
import numpy as np
import pandas as pd
import lightgbm as lgb
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
%matplotlib inline
# 加载数据集
breast = load_breast_cancer()
# 获取特征值和目标指
X,y = breast.data,breast.target
# 获取特征名称
feature_name = breast.feature_names
# 数据集划分
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
# 数据格式转换
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)
# 参数设置
boost_round = 50 # 迭代次数
early_stop_rounds = 10 # 验证数据若在early_stop_rounds轮中未提高,则提前停止
params = {
'boosting_type': 'gbdt', # 设置提升类型
'objective': 'regression', # 目标函数
'metric': {'l2', 'auc'}, # 评估函数
'num_leaves': 31, # 叶子节点数
'learning_rate': 0.05, # 学习速率
'feature_fraction': 0.9, # 建树的特征选择比例
'bagging_fraction': 0.8, # 建树的样本采样比例
'bagging_freq': 5, # k 意味着每 k 次迭代执行bagging
'verbose': 1 # <0 显示致命的, =0 显示错误 (警告), >0 显示信息
}
# 模型训练:加入提前停止的功能
results = {}
gbm = lgb.train(params,
lgb_train,
num_boost_round= boost_round,
valid_sets=(lgb_eval, lgb_train),
valid_names=('validate','train'),
# 提前停止的轮数
early_stopping_rounds = early_stop_rounds,
evals_result= results)
# 模型预测
y_pred = gbm.predict(X_test, num_iteration=gbm.best_iteration)
y_pred
# 模型评估
lgb.plot_metric(results)
plt.show()
# 绘制重要的特征
lgb.plot_importance(gbm,importance_type = "split")
plt.show()
###Output
_____no_output_____ |
.ipynb_checkpoints/land_expansion-checkpoint 8.ipynb | ###Markdown
Open the data from past notebooks and correct them to only include years that are common between the data structures (>1999).
###Code
with open('VariableData/money_data.pickle', 'rb') as f:
income_data, housing_data, rent_data = pickle.load(f)
with open('VariableData/demographic_data.pickle', 'rb') as f:
demographic_data = pickle.load(f)
with open('VariableData/endowment.pickle', 'rb') as f:
endowment = pickle.load(f)
with open('VariableData/expander.pickle', 'rb') as f:
expander = pickle.load(f)
endowment = endowment[endowment['FY'] > 1997].reset_index()
endowment.drop('index', axis=1, inplace=True)
demographic_data = demographic_data[demographic_data['year'] > 1999].reset_index()
demographic_data.drop('index', axis=1, inplace=True)
income_data = income_data[income_data['year'] > 1999].reset_index()
income_data.drop('index', axis=1, inplace=True)
housing_data = housing_data[housing_data['year'] > 1999].reset_index()
housing_data.drop('index', axis=1, inplace=True)
rent_data = rent_data[rent_data['year'] > 1999].reset_index()
rent_data.drop('index', axis=1, inplace=True)
###Output
_____no_output_____
###Markdown
Read in the data on Harvard owned land and Cambridge's property records. Restrict the Harvard data to Cambridge, MA.
###Code
harvard_land = pd.read_excel("Spreadsheets/2018_building_reference_list.xlsx", header=3)
harvard_land = harvard_land[harvard_land['City'] == 'Cambridge']
cambridge_property = pd.read_excel("Spreadsheets/cambridge_properties.xlsx")
###Output
_____no_output_____
###Markdown
Restrict the Cambridge data to Harvard properties, and only use relevant columns.
###Code
cambridge_property = cambridge_property[cambridge_property['Owner_Name'].isin(['PRESIDENT & FELLOWS OF HARVARD COLLEGE', 'PRESIDENT & FELLOW OF HARVARD COLLEGE'])]
cambridge_property = cambridge_property[['Address', 'PropertyClass', 'LandArea', 'BuildingValue', 'LandValue', 'AssessedValue', 'SalePrice', 'SaleDate', 'Owner_Name']]
###Output
_____no_output_____
###Markdown
Fix the time data.
###Code
cambridge_property['SaleDate'] = pd.to_datetime(cambridge_property['SaleDate'], infer_datetime_format=True)
clean_property = cambridge_property.drop_duplicates(subset=['Address'])
clean_property.head()
clean_property.dtypes
print(type(datetime.date(2000, 1, 1)))
###Output
<class 'datetime.date'>
###Markdown
Only look at properties purchased after 2000.
###Code
recent_property = clean_property[clean_property['SaleDate'] > datetime.date(2000, 1, 1)]
property_numbers = recent_property[['LandArea', 'AssessedValue', 'SalePrice']]
num_recent = recent_property['Address'].count()
sum_properties = property_numbers.sum()
sum_properties
full_property_numbers = clean_property[['LandArea', 'AssessedValue', 'SalePrice']]
sum_full = full_property_numbers.sum()
delta_property = sum_properties / sum_full
delta_property
###Output
_____no_output_____
###Markdown
What can be gathered from above?Since the year 2000, Harvard has increased its presence in Cambridge by about 3%, corresponding to about 2% of its overall assessed value, an increase of 281,219 square feet and \$115,226,500. Although the assessed value increase is so high, Harvard only paid \$57,548,900 for the property at their times of purchase.To make some adjustments for inflation:Note that the inflation rate since 2000 is ~37.8% (https://data.bls.gov/timeseries/CUUR0000SA0L1E?output_view=pct_12mths).
###Code
inflation_data = pd.read_excel("Spreadsheets/inflation.xlsx", header=11)
inflation_data = inflation_data[['Year', 'Jan']]
inflation_data['Year'] = pd.to_datetime(inflation_data['Year'], format='%Y')
inflation_data['CumulativeInflation'] = inflation_data['Jan'].cumsum()
inflation_data.rename(columns={'Year' : 'SaleDate'}, inplace=True)
recent_property['SaleDate'] = recent_property['SaleDate'].dt.year
inflation_data['SaleDate'] = inflation_data['SaleDate'].dt.year
recent_property = pd.merge(recent_property, inflation_data, how="left", on=['SaleDate'])
recent_property = recent_property.drop('Jan', 1)
recent_property['TodaySale'] = (1 + (recent_property['CumulativeInflation'] / 100)) * recent_property['SalePrice']
today_sale_sum = recent_property['TodaySale'].sum()
today_sale_sum
sum_properties['AssessedValue'] - today_sale_sum
###Output
_____no_output_____
###Markdown
Hence, adjusted for inflation, the sale price of the property Harvard has acquired since 2000 is \$65,929,240.The difference between this value and the assessed value of the property (in 2018) is: \$49,297,260, showing that Harvard's property has appreciated in value even more than (twice more than) inflation would account for, illustrating a clear advantageous dynamic for Harvard.
###Code
sorted_df = recent_property.sort_values(by=['SaleDate'])
sorted_df = sorted_df.reset_index().drop('index', 1)
sorted_df['CumLand'] = sorted_df['LandArea'].cumsum()
sorted_df['CumValue'] = sorted_df['AssessedValue'].cumsum()
sorted_df
###Output
_____no_output_____
###Markdown
Graph the results.
###Code
def fitter(x, y, regr_x):
"""
Use linear regression to make a best fit line for a set of data.
Args:
x (numpy array): The independent variable.
y (numpy array): The dependent variable.
regr_x (numpy array): The array used to extrapolate the regression.
"""
slope, intercept, r_value, p_value, std_err = stats.linregress(x, y)
return (slope * regr_x + intercept)
years = sorted_df['SaleDate'].as_matrix()
cum_land = sorted_df['CumLand'].as_matrix()
cum_value = sorted_df['CumValue'].as_matrix()
regr = np.arange(2000, 2012)
line0 = fitter(years, cum_land, regr)
trace0 = go.Scatter(
x = years,
y = cum_land,
mode = 'markers',
name='Harvard Land\n In Cambridge',
marker=go.Marker(color='#601014')
)
fit0 = go.Scatter(
x = regr,
y = line0,
mode='lines',
marker=go.Marker(color='#D2232A'),
name='Fit'
)
data = [trace0, fit0]
layout = go.Layout(
title = "The Change In Harvard's Land in Cambridge Since 2000",
font = dict(family='Gotham', size=18),
yaxis=dict(
title='Land Accumulated Since 2000 (Sq. Feet)'
),
xaxis=dict(
title='Year')
)
fig = go.Figure(data=data, layout=layout)
iplot(fig, filename="land_changes")
graph2_df = pd.DataFrame(list(zip(regr, line0)))
graph2_df.to_csv('graph2.csv')
def grapher(x, y, city, title, ytitle, xtitle, filename):
slope, intercept, r_value, p_value, std_err = stats.linregress(x, y)
fit = slope * x + intercept
trace0 = go.Scatter(
x = x,
y = y,
mode = 'markers',
name=city,
marker=go.Marker(color='#D2232A')
)
fit0 = go.Scatter(
x = x,
y = fit,
mode='lines',
marker=go.Marker(color='#AC1D23'),
name='Linear Fit'
)
data = [trace0, fit0]
layout = go.Layout(
title = title,
font = dict(family='Gotham', size=12),
yaxis=dict(
title=ytitle
),
xaxis=dict(
title=xtitle)
)
fig = go.Figure(data=data, layout=layout)
return iplot(fig, filename=filename)
len(line0)
###Output
_____no_output_____
###Markdown
Restrict the demographic data to certain years (up to 2012) in order to fit the data well.
###Code
demographic_data = demographic_data[demographic_data['year'] < 2011]
rent_data = rent_data[rent_data['year'] < 2011]
housing_data = housing_data[housing_data['year'] < 2011]
x = cum_land
y = pd.to_numeric(demographic_data['c_black']).as_matrix()
z1 = pd.to_numeric(rent_data['cambridge']).as_matrix()
z2 = pd.to_numeric(housing_data['cambridge']).as_matrix()
endow_black = grapher(x, y, "Cambridge", "The Correlation Between Harvard Land Change and Black Population", "Black Population of Cambridge", "Land Change (Sq. Feet)", "land_black")
X = CausalDataFrame({'x': x, 'y': y, 'z1': z1, 'z2': z2})
causal_land_black = X.zplot(x='x', y='y', z=['z1', 'z2'], z_types={'z1': 'c', 'z2': 'c'}, kind='line', color="#D2232A")
fig = causal_land_black.get_figure()
fig.set_size_inches(9, 5.5)
ax = plt.gca()
ax.set_frame_on(False)
ax.get_yaxis().set_visible(False)
ax.legend_.remove()
ax.set_title("The Controlled Correlation Between Land Use (Square Feet) and Black Population", fontproperties=gotham_black, size=10, color="#595959")
ax.set_xlabel("Land Use", fontproperties=gotham_book, fontsize=10, color="#595959")
for tick in ax.get_xticklabels():
tick.set_fontproperties(gotham_book)
tick.set_fontsize(10)
tick.set_color("#595959")
fig.savefig('images/black_land.svg', format='svg', dpi=2400, bbox_inches='tight')
z2
graph9_df = pd.DataFrame(X)
graph9_df.to_csv('graph9.csv')
y = pd.to_numeric(rent_data['cambridge']).as_matrix()
z1 = pd.to_numeric(housing_data['cambridge']).as_matrix()
X = CausalDataFrame({'x': x, 'y': y, 'z1': z1})
causal_land_rent = X.zplot(x='x', y='y', z=['z1'], z_types={'z1': 'c'}, kind='line', color="#D2232A")
fig = causal_land_rent.get_figure()
fig.set_size_inches(9, 5.5)
ax = plt.gca()
ax.set_frame_on(False)
ax.get_yaxis().set_visible(False)
ax.legend_.remove()
ax.set_title("The Controlled Correlation Between Land Use (Square Feet) and Rent", fontproperties=gotham_black, size=10, color="#595959")
ax.set_xlabel("Land Use", fontproperties=gotham_book, fontsize=10, color="#595959")
for tick in ax.get_xticklabels():
tick.set_fontproperties(gotham_book)
tick.set_fontsize(10)
tick.set_color("#595959")
fig.savefig('images/rent_land.svg', format='svg', dpi=1200, bbox_inches='tight')
###Output
_____no_output_____ |
WEEK 1:/BASIC BITWISE OPERATOR INTRO.ipynb | ###Markdown
Basics of bit manipulation Computers do not understand words and numbers the way we do. All the data it receives are encoded at the lowest level to a series of zeros and ones. (0 and 1), and this is the only way it makes sense of any command it’s given. This series of 0 and 1 are known as **bits**. Operations with bits are used in Data compression (data is compressed by converting it from one representation to another, to reduce the space) ,Exclusive-Or Encryption (an algorithm to encrypt the data for safety issues). In order to encode, decode or compress files we have to extract the data at bit level. Bitwise Operations are faster and closer to the system and sometimes optimize the program to a good level. We all known that 1 byte is comprises of 8 bits and int or char can be represented using bits in computers, which we call its binary form**1** and **0**i)$(1101)_{2}$ is a binary from of **13** $(13)_{10}$=1X$2^{3}$+1X$2^{2}$+0X$2^{1}$+1X$2^{0}$ Writing binary from int and vice versa in python
###Code
a=43
print(bin(43))#return a binary string
b="100101010"
print(int(b,2))#takes string of binary as input
k="0b10001101"
print(int(k,2))
print(type(a))
print(type(b))#this is type function returns object type
###Output
298
141
<class 'int'>
<class 'str'>
###Markdown
Bitwise operators in python &(and) operator
###Code
# &
print(13&12) #return integer
#print(bin(13)&bin(14)) #this will give you error as it work only on int type
#print(12.34&23.56) #same in this
print(-13&-17)
print(13&13)
print(13&0) #more about it later
print(0b10101&0b11111)
###Output
12
-29
13
0
21
###Markdown
& operator 1 & 1 = 11 & 0 = 00 & 1 = 00 & 0 = 0 |(or) operator
###Code
''' | if your using it first time it might take time for you to see were it is on your keyboard
( just above your enter key )'''
print(13|14) #return integer
print(14|14)
print(14|0)#takes integer
###Output
15
14
14
###Markdown
| operator 1 | 1 = 11 | 0 = 10 | 1 = 10 | 0 = 0 ^(xor) operator
###Code
print(3^4)
print(0b10101^0b1111)
print(0b10000^0b11111)#can be used to find NOT (flipping the bits 0-->1 and 1-->0)
###Output
7
26
15
###Markdown
^ operator 1 ^ 1 = 11 ^ 0 = 10 ^ 1 = 10 ^ 0 = 0 ~(Python Ones’ complement of a number ‘A’ is equal to -(A+1)) operator
###Code
'''present just above the tab key '''
print(~0)
print(~12)
print(bin(-12))
print(bin(~0))
###Output
-1
-13
-0b1100
-0b1
###Markdown
<<(left shift) operator Left shift operator is a binary operator which shift the some number of bits, in the given bit pattern, to the left and append 0 at the end. Left shift is equivalent to multiplying the bit pattern with $2^{K}$ ( if we are shifting k bits ).
###Code
print(1<<2)
print(3<<0b101010)
print(1<<0b1)
print(2<<0b1)
###Output
4
13194139533312
2
4
###Markdown
1 << 1 = 2 = $2^{1}$1 << 2 = 4 = $2^{2}$ 1 << 3 = 8 = $2^{3}$1 << 4 = 16 = $2^{4}$…1 << n = $2^{n}$ >>(right shift) operator Right shift operator is a binary operator which shift the some number of bits, in the given bit pattern, to the right and append 1 at the end. Right shift is equivalent to dividing the bit pattern with 2*k ( if we are shifting k bits ).
###Code
print(4>>1)
print(20>>2)
###Output
2
5
|
site/ko/hub/tutorials/image_enhancing.ipynb | ###Markdown
Copyright 2019 The TensorFlow Hub Authors.Licensed under the Apache License, Version 2.0 (the "License");Created by @[Adrish Dey](https://github.com/captain-pool) for [Google Summer of Code](https://summerofcode.withgoogle.com/) 2019
###Code
# Copyright 2019 The TensorFlow Hub Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
###Output
_____no_output_____
###Markdown
ESRGAN을 사용한 이미지 초고해상도 TensorFlow.org에서 보기 Google Colab에서 실행하기 GitHub에서 소스 보기 노트북 다운로드하기 이 colab에서는 이미지 향상을 위해 Enhanced Super Resolution Generative Adversarial Network에 TensorFlow 허브 모듈을 사용하는 예를 보여줍니다(*Xintao Wang 등*) [[논문](https://arxiv.org/pdf/1809.00219.pdf)] [[코드](https://github.com/captain-pool/GSOC/)].*(가급적 쌍입방 다운샘플링 이미지 사용).*128 x 128 크기의 이미지 패치에서 DIV2K 데이터세트(쌍입방 다운샘플링 이미지)에 훈련된 모델입니다. **환경 준비하기**
###Code
import os
import time
from PIL import Image
import numpy as np
import tensorflow as tf
import tensorflow_hub as hub
import matplotlib.pyplot as plt
os.environ["TFHUB_DOWNLOAD_PROGRESS"] = "True"
!wget "https://user-images.githubusercontent.com/12981474/40157448-eff91f06-5953-11e8-9a37-f6b5693fa03f.png" -O original.png
# Declaring Constants
IMAGE_PATH = "original.png"
SAVED_MODEL_PATH = "https://tfhub.dev/captain-pool/esrgan-tf2/1"
###Output
_____no_output_____
###Markdown
**도우미 함수 정의하기**
###Code
def preprocess_image(image_path):
""" Loads image from path and preprocesses to make it model ready
Args:
image_path: Path to the image file
"""
hr_image = tf.image.decode_image(tf.io.read_file(image_path))
# If PNG, remove the alpha channel. The model only supports
# images with 3 color channels.
if hr_image.shape[-1] == 4:
hr_image = hr_image[...,:-1]
hr_size = (tf.convert_to_tensor(hr_image.shape[:-1]) // 4) * 4
hr_image = tf.image.crop_to_bounding_box(hr_image, 0, 0, hr_size[0], hr_size[1])
hr_image = tf.cast(hr_image, tf.float32)
return tf.expand_dims(hr_image, 0)
def save_image(image, filename):
"""
Saves unscaled Tensor Images.
Args:
image: 3D image tensor. [height, width, channels]
filename: Name of the file to save to.
"""
if not isinstance(image, Image.Image):
image = tf.clip_by_value(image, 0, 255)
image = Image.fromarray(tf.cast(image, tf.uint8).numpy())
image.save("%s.jpg" % filename)
print("Saved as %s.jpg" % filename)
%matplotlib inline
def plot_image(image, title=""):
"""
Plots images from image tensors.
Args:
image: 3D image tensor. [height, width, channels].
title: Title to display in the plot.
"""
image = np.asarray(image)
image = tf.clip_by_value(image, 0, 255)
image = Image.fromarray(tf.cast(image, tf.uint8).numpy())
plt.imshow(image)
plt.axis("off")
plt.title(title)
###Output
_____no_output_____
###Markdown
경로에서 로드된 이미지의 초고해상도 수행하기
###Code
hr_image = preprocess_image(IMAGE_PATH)
# Plotting Original Resolution image
plot_image(tf.squeeze(hr_image), title="Original Image")
save_image(tf.squeeze(hr_image), filename="Original Image")
model = hub.load(SAVED_MODEL_PATH)
start = time.time()
fake_image = model(hr_image)
fake_image = tf.squeeze(fake_image)
print("Time Taken: %f" % (time.time() - start))
# Plotting Super Resolution Image
plot_image(tf.squeeze(fake_image), title="Super Resolution")
save_image(tf.squeeze(fake_image), filename="Super Resolution")
###Output
_____no_output_____
###Markdown
모델의 성능 평가하기
###Code
!wget "https://lh4.googleusercontent.com/-Anmw5df4gj0/AAAAAAAAAAI/AAAAAAAAAAc/6HxU8XFLnQE/photo.jpg64" -O test.jpg
IMAGE_PATH = "test.jpg"
# Defining helper functions
def downscale_image(image):
"""
Scales down images using bicubic downsampling.
Args:
image: 3D or 4D tensor of preprocessed image
"""
image_size = []
if len(image.shape) == 3:
image_size = [image.shape[1], image.shape[0]]
else:
raise ValueError("Dimension mismatch. Can work only on single image.")
image = tf.squeeze(
tf.cast(
tf.clip_by_value(image, 0, 255), tf.uint8))
lr_image = np.asarray(
Image.fromarray(image.numpy())
.resize([image_size[0] // 4, image_size[1] // 4],
Image.BICUBIC))
lr_image = tf.expand_dims(lr_image, 0)
lr_image = tf.cast(lr_image, tf.float32)
return lr_image
hr_image = preprocess_image(IMAGE_PATH)
lr_image = downscale_image(tf.squeeze(hr_image))
# Plotting Low Resolution Image
plot_image(tf.squeeze(lr_image), title="Low Resolution")
model = hub.load(SAVED_MODEL_PATH)
start = time.time()
fake_image = model(lr_image)
fake_image = tf.squeeze(fake_image)
print("Time Taken: %f" % (time.time() - start))
plot_image(tf.squeeze(fake_image), title="Super Resolution")
# Calculating PSNR wrt Original Image
psnr = tf.image.psnr(
tf.clip_by_value(fake_image, 0, 255),
tf.clip_by_value(hr_image, 0, 255), max_val=255)
print("PSNR Achieved: %f" % psnr)
###Output
_____no_output_____
###Markdown
**출력을 나란히 놓고 비교**
###Code
plt.rcParams['figure.figsize'] = [15, 10]
fig, axes = plt.subplots(1, 3)
fig.tight_layout()
plt.subplot(131)
plot_image(tf.squeeze(hr_image), title="Original")
plt.subplot(132)
fig.tight_layout()
plot_image(tf.squeeze(lr_image), "x4 Bicubic")
plt.subplot(133)
fig.tight_layout()
plot_image(tf.squeeze(fake_image), "Super Resolution")
plt.savefig("ESRGAN_DIV2K.jpg", bbox_inches="tight")
print("PSNR: %f" % psnr)
###Output
_____no_output_____ |
Bab3l.ipynb | ###Markdown
###Code
.--,-``-.
,---,. / / '. ,--,
,' .' \ ,---, / ../ ; ,--.'|
,---.' .' | ,---.'| \ ``\ .`- '| | :
| | |: | | | : \___\/ \ :: : '
: : : / ,--.--. : : : \ : || ' |
: | ; / \ : |,-. / / / ' | |
| : \.--. .-. || : ' | \ \ \ | | :
| | . | \__\/: . .| | / : ___ / : |' : |__
' : '; | ," .--.; |' : |: | / /\ / :| | '.'|
| | | ; / / ,. || | '/ :/ ,,/ ',- .; : ;
| : / ; : .' \ : |\ ''\ ; | , /
| | ,' | , .-./ \ / \ \ .' ---`-'
`----' `--`---' `-'----' `--`-,,-'
.
Results may vary. Godspeed.
A mobile browser friendly audio transcriber and text translator built using txtai
---
---
---
Hardware requirements: Ability to open browser and internet
(don't press play on this cell obv)
###Output
_____no_output_____
###Markdown
---------
###Code
#@title Press play and follow the prompts below. This will upload your audio file and convert filetype automatically. Proceed when finished. { display-mode: "form" }
#@markdown ---
from google.colab import files
import sys
import os
import re
import IPython
from IPython.display import clear_output
uploaded = files.upload()
filename = next(iter(uploaded))
print(uploaded)
for fn in uploaded.keys():
print('User uploaded file "{name}" with length {length} bytes'.format(
name=fn, length=len(uploaded[fn])))
#converting to 16khz .wav from input
!ffmpeg -i $filename -acodec pcm_s16le -ac 1 -ar 16000 out.wav
final_wav = 'out.wav'
#installing and initializing transcription and translation
!pip install git+https://github.com/neuml/txtai#egg=txtai[pipeline]
clear_output()
from txtai.pipeline import Transcription
from txtai.pipeline import Translation
# Create transcription model
transcribe = Transcription("facebook/wav2vec2-large-960h")
# Create translation model
translate = Translation()
clear_output()
#@title Please input the two letter language code for your desired translation output. { display-mode: "form" }
#@markdown Example: es for Espanol.
from IPython.display import Audio, display
text = transcribe("out.wav")
Language = "" #@param {type: "string"}
translated = translate(text, Language)
clear_output()
print('---------------------------------------------------------------')
print('---------------------------------------------------------------')
print('---------------------------------------------------------------')
display(Audio("out.wav"))
print('---------------------------------------------------------------')
print('---------------------------------------------------------------')
print('---------------------------------------------------------------')
print('Transcription:')
print('---------------------------------------------------------------')
print(text)
print('---------------------------------------------------------------')
print('---------------------------------------------------------------')
print('---------------------------------------------------------------')
print('Translated Text:')
print('---------------------------------------------------------------')
print(translated)
print('---------------------------------------------------------------')
print('---------------------------------------------------------------')
print('---------------------------------------------------------------')
print('---------------------------------------------------------------')
print('---------------------------------------------------------------')
#@title Press Play to download .txt file of transcription and translation to your device.
#@markdown This is recommended as translations are lost after browser closes. { display-mode: "form" }
with open('translated.txt', 'w') as f:
f.write('Original:\n')
f.write(text + "\n")
f.write('Translated:\n')
f.write(translated)
files.download('translated.txt')
###Output
_____no_output_____ |
Sentiment classification with Bi-LSTM.ipynb | ###Markdown
Making Word2Vec model from data
###Code
from konlpy.tag import Okt
import gensim
import torch
import torchvision
import numpy as np
import codecs
import os
os.chdir('C:\\Users\\korra\\Desktop\\BiLSTM')
def read_data(filename):
with open('./data/' + filename, encoding='utf-8') as f:
data = [line.split('\t') for line in f.read().splitlines()]
data = data[1:]
return data
train = read_data('ratings_train.txt')
test = read_data('ratings_test.txt')
tagger = Okt()
def tokenize(doc):
return ['/'.join(x) for x in tagger.pos(doc, norm=True, stem=True)]
# train Word2Vec model with skip-gram
tokens = [tokenize(row[1]) for row in train]
print(tokens)
model = gensim.models.Word2Vec(size=300, sg=1, min_alpha=0.025, seed=23)
model.build_vocab(tokens)
for epoch in range(30):
model.train(tokens, total_words=model.corpus_count, epochs=model.epochs)
model.alpha -= 0.002
model.min_alpha = model.alpha
model.save('Word2Vec.model')
model.most_similar('공포/Noun', topn=20)
###Output
_____no_output_____
###Markdown
Sentiment analysis with Bi-directional LSTMThe code below is written to use in Google Colab environment.
###Code
from google.colab import drive
drive.mount('/content/drive/')
## IMPORT MODELS
import gensim
import tensorflow as tf
from tensorflow.contrib.tensorboard.plugins import projector
import codecs
import os
import numpy as np
model = gensim.models.word2vec.Word2Vec.load(os.getcwd() + '/drive/My Drive/Colab Notebook/Word2Vec.model')
model.wv.most_similar('공포/Noun',topn = 20)
w2v = np.zeros((len(model.wv.vocab)-1, model.trainables.layer1_size))
with codecs.open("metadata.tsv",'w+',encoding='utf-8') as file_metadata:
for i,word in enumerate(model.wv.index2word[:len(model.wv.vocab)-1]):
w2v[i] = model.wv[word]
file_metadata.write(word + "\n")
sess = tf.InteractiveSession()
# Create embedding(2D tensor) which has our embeddings ##
with tf.device("/cpu:0"):
embedding = tf.Variable(w2v, trainable = False, name = 'embedding')
tf.global_variables_initializer().run()
path = 'word2vec'
saver = tf.train.Saver()
writer = tf.summary.FileWriter(path, sess.graph)
config = projector.ProjectorConfig()
embed = config.embeddings.add()
embed.tensor_name = 'embedding'
embed.metadata_path = os.getcwd() + '/drive/My Drive/Colab Notebook/data/metadata.tsv'
# Specify the width and height of a single thumbnail.
projector.visualize_embeddings(writer, config)
saver.save(sess, path + '/model.ckpt' , global_step=max_size)
###Output
_____no_output_____
###Markdown
Make Word2Vec and BiLSTM classes
###Code
# make Word2Vec as a class
class Word2Vec():
def tokenize(self, doc):
twitter = Okt()
return ['/'.join(t) for t in twitter.pos(doc, norm=True, stem=True)]
def read_data(self, filename):
with open(filename, 'r',encoding='utf-8') as f:
data = [line.split('\t') for line in f.read().splitlines()]
data = data[1:]
return data
def word2vec_model(self, model_name):
model = gensim.models.word2vec.Word2Vec.load(model_name)
return model
# Convert corpus to vectors
def convert2vec(self, model_name, doc):
word_vec = []
model = gensim.models.word2vec.Word2Vec.load(model_name)
for sent in doc:
sub = []
for word in sent:
if(word in model.wv.vocab):
sub.append(model.wv[word])
else:
sub.append(np.random.uniform(-0.25,0.25,300))
word_vec.append(sub)
return np.array(word_vec)
def zeropad(self, train_batch_X, batch_size, seq_maxlen, vector_size):
zero_pad = np.zeros((batch_size, seq_maxlen, vector_size))
for i in range(batch_size):
zero_pad[i,:np.shape(train_batch_X[i])[0],:np.shape(train_batch_X[i])[1]] = train_batch_X[i]
return zero_pad
def onehot(self, data):
index_dict = {value:index for index,value in enumerate(set(data))}
result = []
for value in data:
one_hot = np.zeros(len(index_dict))
index = index_dict[value]
one_hot[index] = 1
result.append(one_hot)
return np.array(result)
# make Bi-LSTM class
class Bi_LSTM():
def __init__(self, lstm_units, num_class, keep_prob):
self.lstm_units = lstm_units
# Define Bi_LSTM with tensorflow
with tf.variable_scope('forward', reuse = tf.AUTO_REUSE):
self.lstm_fw_cell = tf.nn.rnn_cell.LSTMCell(lstm_units, forget_bias=1.0, state_is_tuple=True)
self.lstm_fw_cell = tf.contrib.rnn.DropoutWrapper(self.lstm_fw_cell, output_keep_prob = keep_prob)
with tf.variable_scope('backward', reuse = tf.AUTO_REUSE):
self.lstm_bw_cell = tf.nn.rnn_cell.LSTMCell(lstm_units, forget_bias=1.0, state_is_tuple=True)
self.lstm_bw_cell = tf.contrib.rnn.DropoutWrapper(self.lstm_bw_cell, output_keep_prob = keep_prob)
with tf.variable_scope('Weights', reuse = tf.AUTO_REUSE):
self.W = tf.get_variable(name="W", shape=[2 * lstm_units, num_class],
dtype=tf.float32, initializer = tf.contrib.layers.xavier_initializer())
self.b = tf.get_variable(name="b", shape=[num_class], dtype=tf.float32,
initializer=tf.zeros_initializer())
def logits(self, X, W, b, seq_len):
(output_fw, output_bw), states = tf.nn.bidirectional_dynamic_rnn(self.lstm_fw_cell, self.lstm_bw_cell,dtype=tf.float32,
inputs = X, sequence_length = seq_len)
# concat final states
outputs = tf.concat([states[0][1], states[1][1]], axis=1)
pred = tf.matmul(outputs, W) + b
return pred
def model_build(self, logits, labels, learning_rate = 0.001):
with tf.variable_scope("loss"):
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits = logits , labels = labels)) # Softmax loss
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss) # Adam Optimizer
return loss, optimizer
def graph_build(self):
self.loss = tf.placeholder(tf.float32)
self.acc = tf.placeholder(tf.float32)
tf.summary.scalar('Loss', self.loss)
tf.summary.scalar('Accuracy', self.acc)
merged = tf.summary.merge_all()
return merged
## For train data
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
W2V = Word2Vec()
train_data = W2V.read_data(os.getcwd() + "/drive/My Drive/Colab Notebook/data/ratings_train.txt")
test_data = W2V.read_data(os.getcwd() + "/drive/My Drive/Colab Notebook/data/ratings_test.txt")
# tokenize train and test data
print("="*10+"Start Tokenizing!\nPlease wait..."+"="*10)
train_tokens = [[W2V.tokenize(row[1]),int(row[2])] for row in train_data if W2V.tokenize(row[1]) != []]
train_tokens = np.array(train_tokens)
test_tokens = [[W2V.tokenize(row[1]),int(row[2])] for row in test_data if W2V.tokenize(row[1]) != []]
test_tokens = np.array(test_tokens)
print("="*10+"Tokenize Finished!"+"="*10)
train_X = train_tokens[:,0]
train_Y = train_tokens[:,1]
test_X = test_tokens[:,0]
test_Y = test_tokens[:,1]
train_Y_ = W2V.onehot(train_Y)
train_X_ = W2V.convert2vec(os.getcwd() + '/drive/My Drive/Colab Notebook/Word2Vec.model',train_X) ## import word2vec model where you have trained before
test_Y_ = W2V.onehot(test_Y)
test_X_ = W2V.convert2vec(os.getcwd() + '/drive/My Drive/Colab Notebook/Word2Vec.model',test_X) ## import word2vec model where you have trained before
# Define basic properties
batch_size = 32
vector_size = 300
train_seq_length = [len(x) for x in train_X]
test_seq_length = [len(x) for x in test_X]
max_seqlen = max(train_seq_length) ## 95
learning_rate = 0.001
lstm_units = 128
num_class = 2
training_epochs = 4
X = tf.placeholder(tf.float32, shape = [None, max_seqlen, vector_size], name = 'X')
Y = tf.placeholder(tf.float32, shape = [None, num_class], name = 'Y')
seq_len = tf.placeholder(tf.int32, shape = [None])
keep_prob = tf.placeholder(tf.float32, shape = None)
BiLSTM = Bi_LSTM(lstm_units, num_class, keep_prob)
with tf.variable_scope("loss", reuse = tf.AUTO_REUSE):
logits = BiLSTM.logits(X, BiLSTM.W, BiLSTM.b, seq_len)
loss, optimizer = BiLSTM.model_build(logits, Y, learning_rate)
prediction = tf.nn.softmax(logits)
correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
init = tf.global_variables_initializer()
total_batch = int(len(train_X) / batch_size)
test_batch = int(len(test_X) / batch_size)
print("Start training!")
model_name = os.getcwd() + '/drive/My Drive/Colab Notebook/BiLSTM_model.ckpt'
saver = tf.train.Saver()
train_acc = []
train_loss = []
test_acc = []
test_loss = []
with tf.Session(config = config) as sess:
start_time = time.time()
sess.run(init)
train_writer = tf.summary.FileWriter(os.getcwd() + '/drive/My Drive/Colab Notebook/data/Bidirectional_LSTM', sess.graph)
merged = BiLSTM.graph_build()
for epoch in range(training_epochs):
avg_acc, avg_loss = 0. , 0.
mask = np.random.permutation(len(train_X_))
train_X_ = train_X_[mask]
train_Y_ = train_Y_[mask]
for step in range(total_batch):
train_batch_X = train_X_[step*batch_size : step*batch_size+batch_size]
train_batch_Y = train_Y_[step*batch_size : step*batch_size+batch_size]
batch_seq_length = train_seq_length[step*batch_size : step*batch_size+batch_size]
train_batch_X = W2V.zeropad(train_batch_X, batch_size, max_seqlen, vector_size)
sess.run(optimizer, feed_dict={X: train_batch_X, Y: train_batch_Y, seq_len: batch_seq_length})
# Compute average loss
loss_ = sess.run(loss, feed_dict={X: train_batch_X, Y: train_batch_Y, seq_len: batch_seq_length, keep_prob : 0.75})
avg_loss += loss_ / total_batch
acc = sess.run(accuracy , feed_dict={X: train_batch_X, Y: train_batch_Y, seq_len: batch_seq_length, keep_prob : 0.75})
avg_acc += acc / total_batch
print("epoch : {:02d} step : {:04d} loss = {:.6f} accuracy= {:.6f}".format(epoch+1, step+1, loss_, acc))
summary = sess.run(merged, feed_dict = {BiLSTM.loss : avg_loss, BiLSTM.acc : avg_acc})
train_writer.add_summary(summary, epoch)
t_avg_acc, t_avg_loss = 0., 0.
print("Test batch could take few minutes")
for step in range(test_batch):
test_batch_X = test_X_[step*batch_size : step*batch_size+batch_size]
test_batch_Y = test_Y_[step*batch_size : step*batch_size+batch_size]
batch_seq_length = test_seq_length[step*batch_size : step*batch_size+batch_size]
test_batch_X = W2V.zeropad(test_batch_X, batch_size, max_seqlen, vector_size)
# Compute average loss
loss2 = sess.run(loss, feed_dict={X: test_batch_X, Y: test_batch_Y, seq_len: batch_seq_length, keep_prob : 1.0})
t_avg_loss += loss2 / test_batch
t_acc = sess.run(accuracy , feed_dict={X: test_batch_X, Y: test_batch_Y, seq_len: batch_seq_length, keep_prob : 1.0})
t_avg_acc += t_acc / test_batch
print("<Train> Loss = {:.6f} Accuracy = {:.6f}".format(avg_loss, avg_acc))
print("<Test> Loss = {:.6f} Accuracy = {:.6f}".format(t_avg_loss, t_avg_acc))
train_loss.append(avg_loss)
train_acc.append(avg_acc)
test_loss.append(t_avg_loss)
test_acc.append(t_avg_acc)
train_loss = pd.DataFrame({"train_loss":train_loss})
train_acc = pd.DataFrame({"train_acc":train_acc})
test_loss = pd.DataFrame({"test_loss":test_loss})
test_acc = pd.DataFrame({"test_acc":test_acc})
df = pd.concat([train_loss,train_acc,test_loss,test_acc], axis = 1)
df.to_csv(os.getcwd() + '/drive/My Drive/Colab Notebook/data/loss_accuracy.csv', sep =",", index=False)
train_writer.close()
duration = time.time() - start_time
minute = int(duration / 60)
second = int(duration) % 60
print("{}minutes {}seconds".format(minute,second))
save_path = saver.save(sess, model_name)
# For test data
test_size = len(test_X)
test_batch = int(test_size / batch_size)
keep_prob = 1.0
model_name = os.getcwd() + "/drive/My Drive/Colab Notebook/BiLSTM_model.ckpt"
init = tf.global_variables_initializer()
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(init)
saver.restore(sess, model_name) # load the variables from disk.
print("model restored")
total_acc = 0
for step in range(test_batch):
test_batch_X = test_X_[step*batch_size : step*batch_size+batch_size]
test_batch_Y = test_Y_[step*batch_size : step*batch_size+batch_size]
batch_seq_length = seq_length[step*batch_size : step*batch_size+batch_size]
test_batch_X = W2V.zeropad(test_batch_X, batch_size, max_seqlen, vector_size)
acc = sess.run(accuracy , feed_dict={X: test_batch_X, Y: test_batch_Y, seq_len: batch_seq_length})
print("step :{0} Accuracy :{1}".format(step+1,acc))
total_acc += acc/test_batch
print("Total Accuracy : {}".format(total_acc))
###Output
_____no_output_____
###Markdown
Music generator
###Code
from subprocess import Popen, PIPE
import magenta
import os
class MagentaMusic(object):
def __init__(self, input_midi_dir, output_dir, model_name):
self.input_midi_dir = input_midi_dir
self.output_dir = output_dir
self.model_name = model_name
self.default_notesequences_dir = "/magenta/midi_result/notesequences_{0}.tfrecord".format(self.model_name)
def create_dataset(self):
magenta.scripts.convert_dir_to_note_sequences.convert_directory(self.input_midi_dir, self.default_notesequences_dir, recursive=True)
def train(self, config="attention_rnn", batch_size=64, rnn_layer_sizes=[64,64], num_training_steps=20000):
param = ["C://Anaconda3/envs/base_3.6/python", "C://Anaconda3/envs/base_3.6/Lib/site-packages/magenta/models/melody_rnn/melody_rnn_train.py",
"--config="+config,
"--run_dir=/magenta/melody_rnn/logdir/run_"+self.model_name,
"--sequence_example_file="+self.default_notesequences_dir,
"--hparams=batch_size={0},rnn_layer_sizes={1}".format(batch_size, str(rnn_layer_sizes).replace(" ", "")),
"--num_training_steps={0}".format(num_training_steps)
]
melody_rnn_train = Popen(param, shell=True, stdout=PIPE, stderr=PIPE)
(stdoutdata, stderrdata) = melody_rnn_train.communicate()
return stdoutdata.decode("cp949"), stderrdata.decode("cp949")
def generate(self, config="attention_rnn", num_outputs=10, num_steps=128, batch_size=64, rnn_layer_sizes=[64,64], primer_melody=[60]):
param = ["C://Anaconda3/envs/base_3.6/python", "C://Anaconda3/envs/base_3.6/Lib/site-packages/magenta/models/melody_rnn/melody_rnn_generate.py",
"--config="+config,
"--run_dir=/magenta/melody_rnn/logdir/run_"+self.model_name,
"--output_dir="+self.output_dir,
"--num_outputs={0}".format(num_outputs),
"--num_steps={0}".format(num_steps),
"--hparams=batch_size={0},rnn_layer_sizes={1}".format(batch_size, str(rnn_layer_sizes).replace(" ", "")),
"--primer_melody={0}".format(primer_melody)
]
melody_rnn_generate = Popen(param, shell=True, stdout=PIPE, stderr=PIPE)
(stdoutdata, stderrdata) = melody_rnn_generate.communicate()
return stdoutdata.decode("cp949"), stderrdata.decode("cp949")
###Output
_____no_output_____
###Markdown
Train MIDI
###Code
# init Magenta music
mm_low = MagentaMusic("/magenta/midi/scale/classic/low", "/magenta/melody_rnn/generated/low", "low")
mm_high = MagentaMusic("/magenta/midi/scale/classic/high", "/magenta/melody_rnn/generated/high", "high")
# change MIDI to notesequences
mm_low.create_dataset()
mm_high.create_dataset()
# train melodyRNN
mm_low.train()
mm_high.train()
###Output
_____no_output_____
###Markdown
Sentiment classification
###Code
sess = tf.Session()
sess.run(init)
saver.restore(sess, model_name)
def predict(sentence):
tokens = W2V.tokenize(sentence)
embedding = convert2vec(os.getcwd() + "/drive/My Drive/Colab Notebook/Word2Vec.model", tokens)
zero_pad = W2V.zeropad(embedding, batch_size, max_seqlen, vector_size)
global sess
result = sess.run(tf.argmax(prediction,1), feed_dict = {X: zero_pad , seq_len: [len(tokens)] } )
if(result == 1):
print("Positive")
mm_high.generate(num_steps=256, primer_melody=[70])
else:
print("Negative")
mm_low.generate(num_steps=64, primer_melody=[45])
while True:
sentence = input("Enter sentence: ")
if(sentence == ''): break
else: predict(sentence)
mm = MagentaMusic("/magenta/midi/scale/classic/low", "/magenta/melody_rnn/generated/low", "low")
mm.train()
mm.generate()
###Output
_____no_output_____ |
spark-churn/Sparkify-local.ipynb | ###Markdown
Sparkify Project
###Code
# import libraries
from pyspark.sql import SparkSession, Window
import pyspark.sql.functions as F
from pyspark.sql.functions import col, udf
from pyspark.sql.types import IntegerType, FloatType
from pyspark.ml.stat import Correlation
from pyspark.ml import Pipeline
from pyspark.ml.feature import VectorAssembler, StandardScaler
from pyspark.ml.evaluation import BinaryClassificationEvaluator, Evaluator
from pyspark.ml.classification import RandomForestClassifier, GBTClassifier
from pyspark.ml.tuning import CrossValidator, ParamGridBuilder
import time
from datetime import datetime
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# create a Spark session
spark = SparkSession.builder\
.master("local")\
.appName('Sparkify')\
.getOrCreate()
spark
###Output
_____no_output_____
###Markdown
Load and Clean DatasetUsing tiny subset (128MB) of the full dataset available (12GB) for local development.Mini dataset: `s3n://udacity-dsnd/sparkify/mini_sparkify_event_data.json`Full dataset: `s3n://udacity-dsnd/sparkify/sparkify_event_data.json`
###Code
# Uncomment to download
# import requests
# url = 'https://udacity-dsnd.s3.amazonaws.com/sparkify/mini_sparkify_event_data.json'
# def download_file(url):
# local_filename = url.split('/')[-1]
# with requests.get(url, stream=True) as r:
# r.raise_for_status()
# with open(local_filename, 'wb') as f:
# for chunk in r.iter_content(chunk_size=8192):
# f.write(chunk)
# return local_filename
# download_file(url)
event_data = "mini_sparkify_event_data.json"
df = spark.read.json(event_data)
# Check schema and column types
df.printSchema()
df.head(5)
df.limit(5).toPandas()
print('Number of rows mini dataset', df.count())
print('Number of columns mini dataset', len(df.columns))
print('Number of duplicated rows in mini dataset', df.count() - df.dropDuplicates().count())
###Output
Number of rows mini dataset 286500
Number of columns mini dataset 18
Number of duplicated rows in mini dataset 0
###Markdown
Let's check our data for numerical columns:
###Code
numCols = [col[0] for col in df.dtypes if not col[1]=='string']; numCols
df.select(numCols).describe().show()
###Output
+-------+------------------+-----------------+--------------------+-----------------+------------------+--------------------+
|summary| itemInSession| length| registration| sessionId| status| ts|
+-------+------------------+-----------------+--------------------+-----------------+------------------+--------------------+
| count| 286500| 228108| 278154| 286500| 286500| 286500|
| mean|114.41421291448516|249.1171819778458|1.535358834084427...|1041.526554973822|210.05459685863875|1.540956889810483...|
| stddev|129.76726201140994|99.23517921058361| 3.291321616327586E9|726.7762634630741| 31.50507848842214|1.5075439608226302E9|
| min| 0| 0.78322| 1521380675000| 1| 200| 1538352117000|
| max| 1321| 3024.66567| 1543247354000| 2474| 404| 1543799476000|
+-------+------------------+-----------------+--------------------+-----------------+------------------+--------------------+
###Markdown
For some informations above (`registration`, `sessionId`, `status`, `ts`) it doesn't make sense to analyze as numbers. We will dive deeper in the next Exploratory Data Analysis session of this notebook. Let's check our data for categorical columns:
###Code
text_cols = [col[0] for col in df.dtypes if col[1]=='string']; text_cols
###Output
_____no_output_____
###Markdown
Although not expected by inspecting the data, `userId` is actually a string.We check other information about our text data (not interested in `firstName` and `lastName`):
###Code
dist_artists = df.select(F.countDistinct('artist').alias('numberOfDistinctArtists')).withColumn("id", F.monotonically_increasing_id())
dist_songs = df.select(['song','artist']).groupBy('song').agg(F.countDistinct('artist').alias('countDistinctArtists')).\
select(F.sum('countDistinctArtists').alias('numberOfDistinctSongs')).withColumn("id", F.monotonically_increasing_id())
dist_user_agents = df.select(F.countDistinct('userAgent').alias('numberOfuserAgents')).withColumn("id", F.monotonically_increasing_id())
dist_locations = df.select(F.countDistinct('location').alias('numberOfDistinctLocations')).withColumn("id", F.monotonically_increasing_id())
text_cols_info = dist_artists.join(dist_songs, "id", "outer")\
.join(dist_user_agents, "id", "outer")\
.join(dist_locations, "id", "outer").drop('id')\
text_cols_info.show()
###Output
+-----------------------+---------------------+------------------+-------------------------+
|numberOfDistinctArtists|numberOfDistinctSongs|numberOfuserAgents|numberOfDistinctLocations|
+-----------------------+---------------------+------------------+-------------------------+
| 17655| 65416| 56| 114|
+-----------------------+---------------------+------------------+-------------------------+
###Markdown
Sparkify's mini dataset contains has 17655 different artists, 65416 songs, users in 114 different locations that use 56 different types of devices/software to access the app.
###Code
# We calculate value counts for:
text_cols_value_counts = ['auth',
'gender',
'level',
'method',
'page',
'status']
for column in text_cols_value_counts:
df.select(column).groupBy(column).count().orderBy('count').show(30, truncate=False)
###Output
+----------+------+
|auth |count |
+----------+------+
|Cancelled |52 |
|Guest |97 |
|Logged Out|8249 |
|Logged In |278102|
+----------+------+
+------+------+
|gender|count |
+------+------+
|null |8346 |
|M |123576|
|F |154578|
+------+------+
+-----+------+
|level|count |
+-----+------+
|free |58338 |
|paid |228162|
+-----+------+
+------+------+
|method|count |
+------+------+
|GET |25436 |
|PUT |261064|
+------+------+
+-------------------------+------+
|page |count |
+-------------------------+------+
|Submit Registration |5 |
|Register |18 |
|Cancel |52 |
|Cancellation Confirmation|52 |
|Submit Downgrade |63 |
|Submit Upgrade |159 |
|Error |258 |
|Save Settings |310 |
|Upgrade |499 |
|About |924 |
|Settings |1514 |
|Help |1726 |
|Downgrade |2055 |
|Thumbs Down |2546 |
|Logout |3226 |
|Login |3241 |
|Roll Advert |3933 |
|Add Friend |4277 |
|Add to Playlist |6526 |
|Thumbs Up |12551 |
|Home |14457 |
|NextSong |228108|
+-------------------------+------+
+------+------+
|status|count |
+------+------+
|404 |258 |
|307 |26430 |
|200 |259812|
+------+------+
###Markdown
Loading, cleaning the dataset and checking for invalid or missing data - for example, records without userids or sessionids.
###Code
#Checking if there are NaNs
df.select([F.count(F.when(F.isnan(c), c)).alias(c+'IsNan') for c in df.columns]).toPandas()
###Output
_____no_output_____
###Markdown
No Nans in the mini-dataset.
###Code
#Checking if there are null values
df.select([F.count(F.when(col(c).isNull(), c)).alias(c+'IsNull') for c in df.columns]).toPandas()
###Output
_____no_output_____
###Markdown
Appearently missing data is correlated (missing counts of columns consistently appear having 8346 or 58392 Null values). Let's check how missing values are correlated:
###Code
# Check null values: 1 is null and 0 not null
df_is_null = df.select([F.when(col(c).isNull(), 1).otherwise(0).alias(c) for c in df.columns])
df_is_null_describe = df_is_null.describe()
df_is_null_describe = df_is_null_describe.filter(
(df_is_null_describe['summary']=='stddev') |
(df_is_null_describe['summary'] == 'max')
)
# Handle the std equals to zero (all values are the same) and without any null value
zero_std_max_cols = [col for col in df_is_null_describe.columns if df_is_null_describe.select(F.collect_list(col)).head().asDict()['collect_list('+col+')'] == ['0.0', '0']]
# Drop all columns with Standard Deviation equals zero and no missing values
df_is_null = df_is_null.drop(*zero_std_max_cols)
# Create vectors
assembler = VectorAssembler(inputCols=df_is_null.columns, outputCol='vector')
assembled = assembler.transform(df_is_null).drop(*df_is_null.columns)
# Calculate and print Pearson correlation matrix for missing values
pearson_corr = Correlation.corr(assembled, 'vector').head()
pearson_corr = pd.DataFrame(data=pearson_corr[0].toArray(), columns=df_is_null.columns, index=df_is_null.columns)
fig, ax = plt.subplots(figsize=(8, 10))
sns.heatmap(pearson_corr, ax=ax, annot=True);
###Output
_____no_output_____
###Markdown
When there's a null in `artist` column also a null in `length` and `song` happen. Hence, this data may be related and length appears to be the length in seconds of songs. Similarly, data related to users are related and when a null happens in either `firstName`, `lastName`, `gender`, `location`, `userAgent` and `registration` the others are null too. The column registration seems to be related to the timestamp of when a user registers himself/herself in the application. Let's check `userId`:
###Code
# userId is string and shold be always with length greater than 0
df.select('userId', F.length(col('userId')).alias('userIdLength')).distinct().orderBy(col('userIdLength')).show(5)
# number of users with userId equals to ''
df.filter(df.userId=='').count()
###Output
_____no_output_____
###Markdown
In the mini dataset there are 8346 users with userId equals to `''` (length of the string userId is zero). Perhaps those users without userId are those who have not signed up yet or that are signed out and are about to log in.We'll drop them from our dataframe (in order for the analysis of individual users make sense):
###Code
# Drop userId equals to ''
df = df.filter(df.userId!='')
# Mini dataset has 286,500 rows. Hence we expect 286,500 - 8,346 = 278,154
df.count()
print(f"There are {df.select('userId').distinct().count()} users in the mini dataset.")
###Output
There are 225 users in the mini dataset.
###Markdown
Exploratory Data Analysis Define ChurnWe create a column `Churn` to use as the label for our model. We choose the `Cancellation Confirmation` events to define the churn, which happen for both paid and free users. We also analyze the `Downgrade` events.
###Code
# Create Churn column
churner_ids = df.select('userId').where(col('page')=='Cancellation Confirmation').toPandas()
churner_ids = churner_ids.values.squeeze().tolist()
print('churner_ids:', churner_ids)
print('\nTotal churners in mini dataset:', len(churner_ids))
all_ids = df.select('userId').distinct().toPandas()
all_ids = all_ids.values.squeeze().tolist()
print('Total distinct users in mini dataset:', len(all_ids))
not_churner_ids = np.setdiff1d(all_ids, churner_ids).tolist()
print('not_churner_ids:', not_churner_ids)
print('\nTotal churners in mini dataset:', len(not_churner_ids))
is_churn = udf(lambda usrIdCol: 1 if usrIdCol in churner_ids else 0, IntegerType())
df = df.withColumn('churn', is_churn('userId'))
df.limit(5).toPandas()
###Output
_____no_output_____
###Markdown
Explore DataAnalyzing the behavior for users who stayed vs users who churned. We explore aggregates on these two groups of users, observing how much of a specific action they experienced per a certain time unit or number of songs played. Without knowing much about the business nor the actual application, we can think the following possible reasons why users cancel the service:1. user is not using Sparkify2. Sparkify doesn't have specific songs/artists3. bad recommendations4. bad experience (streaming getting stuck, interface not intuitive in version of app, too many ads in the app, etc.)5. bugs (commands not responding or don't do what they should, crashes in the app, etc.)6. Sparkify too expensive7. don't have friends using Sparkify8. external influences (some news about company benefits/harms its image, country where users live imposes some limits, cost increase, etc.)From the hypothesis above:- `1.` we can check by counting number of interations in a given timeframe- `2.` we cannot check directly since we don't have logs for search queries which would be better (showing that the search query returned exactly what the user wanted) - `3.` we have the Add to Playlist, Thumbs Down and Thumbs Up `pages` which could indicate the quality of recommendations- `4.` and `5.` we don't have application logs which could indicate loading times, interrupting streaming. However, status could give us some information about the application and the Error and Help `pages`. In addition, userAgent can also give us some information about Sparkify applications that are not behaving as expected (Windows, MacOS, specific browser, platform, etc.). The Roll Advert `page` indicates Advertising events and if it is affecting too much user experience (if we could reduce it or enphasize that user can upgrade plan)- `6.` as "expensive" may be an ambiguous definition and depend on many factors, with the dataset given we won't be able to infer anything.- `7.` Add Friend `page` could indicate that friends are also using app (in the provided dataset we don't have relationships between users which would be better)- `8.` again we would need more data related to the business and context to infer anything hereWe check the data to answer those questions and compare customers who churn with those who don't:
###Code
not_churn_users = df.filter(col('churn')==0)
churn_users = df.filter(col('churn')==1)
num_churn_users = churn_users.select('userId').distinct().count()
num_not_churn_users = not_churn_users.select('userId').distinct().count()
# Sanity check (there should be 225 users in total in the mini dataset)
print('Number of users who churn:', num_churn_users)
print("Number of users who don't churns", num_not_churn_users)
print('Total (should be 225 users):', num_churn_users + num_not_churn_users)
###Output
Number of users who churn: 52
Number of users who don't churns 173
Total (should be 225 users): 225
###Markdown
As we see, there is class imbalance in the minidataset with ~25% of users who churn and ~75% of users who don't churn. Some analysis
###Code
# How many songs do users listen to on average between visiting our home page (calculation shown in the Udacity course)
fun = udf(lambda ishome : int(ishome == 'Home'), IntegerType())
user_window = Window \
.partitionBy('userID') \
.orderBy(F.desc('ts')) \
.rangeBetween(Window.unboundedPreceding, 0)
cusum_churn = churn_users.filter((col('page') == 'NextSong') | (col('page') == 'Home')) \
.select('userID', 'page', 'ts') \
.withColumn('homevisit', fun(col('page'))) \
.withColumn('period', F.sum('homevisit').over(user_window))
cusum_churn = cusum_churn.filter((col('page') == 'NextSong')) \
.groupBy('userID', 'period') \
.agg({'period':'count'}) \
.agg({'count(period)':'avg'}).withColumnRenamed('avg(count(period))', 'churnAvg(count(period))') \
.withColumn("id", F.monotonically_increasing_id())
cusum_not_churn = not_churn_users.filter((col('page') == 'NextSong') | (col('page') == 'Home')) \
.select('userID', 'page', 'ts') \
.withColumn('homevisit', fun(col('page'))) \
.withColumn('period', F.sum('homevisit').over(user_window))
cusum_not_churn = cusum_not_churn.filter((col('page') == 'NextSong')) \
.groupBy('userID', 'period') \
.agg({'period':'count'}) \
.agg({'count(period)':'avg'}).withColumnRenamed('avg(count(period))', 'notChurnAvg(count(period))') \
.withColumn("id", F.monotonically_increasing_id())
result = cusum_churn.join(cusum_not_churn, "id", "outer").drop('id')\
result.show()
###Output
+-----------------------+--------------------------+
|churnAvg(count(period))|notChurnAvg(count(period))|
+-----------------------+--------------------------+
| 22.6612702366127| 23.79175974187143|
+-----------------------+--------------------------+
###Markdown
As we may have expected in the mini-dataset the number of songs users listen to on average between visiting our home page is higher in the not churn group than in the group of users who churn (they use more the Sparkify app, which could indicate they like it more).
###Code
# Calculating number of songs played given time frame
def add_time_columns(df):
# Add hour column
get_hour = udf(lambda x: int(datetime.fromtimestamp(x / 1000.0).hour), IntegerType())
df_time = df.withColumn('hour', get_hour(df.ts))
songs_in_hour = df_time.filter(df_time.page == 'NextSong').groupby('hour').count().orderBy('hour')
songs_in_hour_pd = songs_in_hour.toPandas()
# Add weekday column
get_weekday = udf(lambda x: int(datetime.fromtimestamp(x / 1000.0).weekday()), IntegerType())
df_time = df_time.withColumn('weekday', get_weekday(df_time.ts))
songs_in_weekday = df_time.filter(df_time.page == 'NextSong').groupby('weekday').count().orderBy('weekday')
songs_in_weekday_pd = songs_in_weekday.toPandas()
songs_in_weekday_pd.weekday = songs_in_weekday_pd.weekday.map({0: 'Mon', 1: 'Tue', 2: 'Wed', 3: 'Thu', 4: 'Fri', 5: 'Sat', 6: 'Sun'})
return songs_in_hour_pd, songs_in_weekday_pd
churn_songs_hour, churn_songs_weekday = add_time_columns(churn_users)
not_churn_songs_hour, not_churn_songs_weekday = add_time_columns(not_churn_users)
fig, ax = plt.subplots(1,2, figsize=(12,5))
ax[0].scatter(churn_songs_hour['hour'], churn_songs_hour['count']/num_churn_users, label='churn')
ax[0].scatter(not_churn_songs_hour['hour'], not_churn_songs_hour['count']/num_not_churn_users, label='not churn')
ax[0].set_xlim(-1, 24)
ax[0].set_ylim(0, 1.2 * max(not_churn_songs_hour["count"]/num_not_churn_users))
ax[0].set_xlabel("Hour")
ax[0].set_ylabel("Songs played per user")
ax[0].set_title("Songs played by Hour")
ax[0].legend(loc='best')
ax[1].scatter(churn_songs_weekday['weekday'], churn_songs_weekday['count']/num_churn_users, label='churn')
ax[1].scatter(not_churn_songs_weekday['weekday'], not_churn_songs_weekday['count']/num_not_churn_users, label='not churn')
ax[1].set_xlim(-0.5, 6.5)
ax[1].set_ylim(0, 1.2 * max(not_churn_songs_weekday["count"]/num_not_churn_users))
ax[1].set_xlabel("Week day")
ax[1].set_ylabel("Songs played per user")
ax[1].set_title("Songs played by Week day")
ax[1].legend(loc='best')
fig.tight_layout();
###Output
_____no_output_____
###Markdown
Users that churn and those who don't behave similarly w.r.t. the time, however playing less songs in the churn group.
###Code
# How the number of interactions change over time, since user registrates
earliest = df.select('ts').orderBy(col('ts')).head()
latest = df.select('ts').orderBy(col('ts'), ascending=False).head()
print(f'Earliest record in mini dataset is {datetime.fromtimestamp(earliest.ts / 1000.0)}')
print(f'Latest record in mini dataset is {datetime.fromtimestamp(latest.ts / 1000.0)}')
# Count actions per user per day
# We randomly select 1 user who have churned and 1 user who haven't churned for comparison
churner = np.random.choice(churner_ids, size=1, replace=False).tolist()
not_churner = np.random.choice(not_churner_ids, size=1, replace=False).tolist()
def get_actions_by_day(df, ids):
actions = df.where(df.userId.isin(ids))\
.withColumn('day', F.date_trunc('day', F.from_unixtime(col('ts')/1000)))\
.groupBy('userId', 'day', 'page').agg({'page': 'count'})\
.orderBy('userId','day', 'page')
# We want each line to be a day, and columns for counts of each page action
actions = actions.groupBy(col('userId'), col('day')).pivot('page')\
.agg(F.first('count(page)')).drop('page').orderBy('userId','day')
# In order to compare users, we transform day of the month in a running day number (e.g. if )
first_interactions = actions.select('userId', 'day').groupBy('userId').agg({'day': 'min'})
actions = actions.join(first_interactions, on='userId').withColumn('runningDaysFromFirstInteration', F.datediff('day',col('min(day)'))).drop('min(day)')
# Fill nulls with zeros (no actions of that type in the day)
actions = actions.fillna(0)
return actions
churner_actions = get_actions_by_day(df, churner)
not_churner_actions = get_actions_by_day(df, not_churner)
churner_actions_pd = churner_actions.toPandas()
not_churner_actions_pd = not_churner_actions.toPandas()
churner_actions_pd['churn'] = pd.Series(np.ones(churner_actions_pd.shape[0]))
not_churner_actions_pd['churn'] = pd.Series(np.zeros(churner_actions_pd.shape[0]))
churner_actions_pd.drop('day', axis=1, inplace=True)
not_churner_actions_pd.drop('day', axis=1, inplace=True)
actions = pd.concat([churner_actions_pd, not_churner_actions_pd])
cols = churner_actions_pd.columns[1:-2]
ax_i = int(np.ceil(cols.shape[0]/5))
ax_j = 5
fig, ax = plt.subplots(ax_i, ax_j, figsize=(26,16))
for i in range(ax_i):
for j in range(ax_j):
sns.lineplot(x='runningDaysFromFirstInteration', y=cols[j + 5*i], hue='churn', data=actions, ax=ax[i][j])
ax[i][j].set_title(f'Action "{cols[j + 5*i]}" vs day')
if (j + 5*i) == len(cols)-1:
break
fig.tight_layout();
# Users who Downgrade Their Accounts
# We find when users downgrade their accounts and then flag those log entries.
# Then we use a window function to create 2 phases (0 for pre-downgrade 1 for pos-downgrade) using a cumulative sum for each user.
flag_downgrade_event = udf(lambda x: 1 if x == 'Submit Downgrade' else 0, IntegerType())
df_downgraded = df.withColumn('downgraded', flag_downgrade_event('page'))
windowval = Window.partitionBy('userId').orderBy(F.desc('ts')).rangeBetween(Window.unboundedPreceding, 0)
df_downgraded = df_downgraded.withColumn('phase', F.sum('downgraded').over(windowval))
# Taking userId 12 as example
df_downgraded.select(["userId", "firstname", "ts", "page", "level", "downgraded", "phase"]).where(col('userId') == "12").sort(F.desc("ts"))\
.toPandas().iloc[750:765,:]
###Output
_____no_output_____
###Markdown
Feature Engineering We need to make each row of our dataset to be an user. Possible features (considering only all account time, not aggregations in recent vs distant events):- artist: distinct artists listened- song: distinct songs listened- length: average length of songs listened- gender: one-hot encode (M/F)- itemInSession: average items in session- sessionId: can be used calculate average length of sessions- level: one-hot encode (Free/Paid)- location: percentage of interactions of user at specific location- page: counts by page type- status: counts by status codes- userAgent: get percentage of interactions of user using specific user agent - device, system information, platform, etc.- accountLifeTime: time an user has an account, from the first interaction until the present or the moment he/she cancels account
###Code
# Helper functions for cleaning and calculating percentages
def get_mappings(cols, prefix):
mapping = dict()
for i, c in enumerate(cols):
mapping[prefix+str(i)] = c
return mapping
def calculate_perc(percentage_df , mappings):
for new_name, existing_name in mappings.items():
percentage_df = percentage_df.withColumnRenamed(existing_name, new_name)
percentage_df = percentage_df.withColumn(new_name, col(new_name)/col('userTotalInterations'))
percentage_df = percentage_df.drop('userTotalInterations')
return percentage_df
def compute_features(df):
'''
Function for computing features.
Parameters
----------
input: Spark dataframe with schema of raw dataset and data
Returns
-------
df_features: Spark dataframe with computed features
location_mappings: dict for encoded locations
sys_agent_mappings: dict for encoded system information from user-agent
plat_agent_mappings: dict for encoded platform information from user-agent
'''
# Create `day` column for aggregating days and keeping information about month and year
df_features = df
# print('df_features RAW shape (row, cow)', df_features.count(), len(df_features.columns))
df_features = df_features.withColumn('day', F.date_trunc('day', F.from_unixtime(col('ts')/1000)))
# Create `userAgentSystemInformation` and `userAgentPlatform` columns for retrieving separate information from `userAgent`
df_features = df_features.withColumn('userAgentSystemInformation', F.regexp_extract(col('userAgent'),'(?<=\().+?(?=\))',0))
df_features = df_features.withColumn('userAgentPlatform', F.regexp_extract(col('userAgent'),'(?<=\)).+',0))
df_features = df_features.drop('userAgent')
# Intermediate DF to calculate counts of actions by page type, per user
page_counts = df_features.groupBy('userId', 'page')\
.agg({'page': 'count'})\
.groupBy(col('userId')).pivot('page')\
.agg(F.first('count(page)')).drop('page')\
.fillna(0)
# Intermediate DF to calculate average length of user sessions
session_avg_length = df_features.groupby('userId', 'sessionId')\
.agg(
F.min(col('ts')).alias('startSession'),
F.max(col('ts')).alias('endSession')
)\
.groupby('userId')\
.agg(
F.avg(col('endSession')-col('startSession')).alias('avgSessionLength')
)
# Intermediate DF to calculate percentage of interactions at specific location, per user
location_percentage = df_features.groupBy('userId', 'location').agg({'location': 'count'})
total_interations = df_features.groupBy('userId').agg(F.count('userId').alias('userTotalInterations'))
location_percentage = location_percentage.groupBy(col('userId')).pivot('location')\
.agg(F.first('count(location)'))\
.fillna(0).join(total_interations,on='userId').drop('location')
location_cols = location_percentage.columns
location_cols.remove('userId')
location_cols.remove('userTotalInterations')
# Deal with bad column names
location_mappings = get_mappings(location_cols, 'location_')
location_percentage = calculate_perc(location_percentage , location_mappings)
# Intermediate DF to calculate percentage of interactions using specific user-agent system information, per user
countSysInfo = df_features.groupBy('userId', 'userAgentSystemInformation')\
.agg(
F.count('userAgentSystemInformation').alias('sysInfoCount')
)
total = df_features.groupBy('userId').agg(F.count('userId').alias('userTotalInterations'))
countSysInfo = countSysInfo.groupBy(col('userId')).pivot('userAgentSystemInformation')\
.agg(F.first('sysInfoCount'))\
.fillna(0).join(total,on='userId').drop('userAgentSystemInformation')
sys_cols = countSysInfo.columns
sys_cols.remove('userId')
sys_cols.remove('userTotalInterations')
# Deal with bad column names
sys_agent_mappings = get_mappings(sys_cols, 'sys_agent_')
percentage_sys_info = calculate_perc(countSysInfo , sys_agent_mappings)
# Intermediate DF to calculate percentage of interactions using specific user-agent platform information, per user
countPlat = df_features.groupBy('userId', 'userAgentPlatform')\
.agg(
F.count('userAgentPlatform').alias('platformCount')
)
countPlat = countPlat.groupBy(col('userId')).pivot('userAgentPlatform')\
.agg(F.first('platformCount'))\
.fillna(0).join(total,on='userId').drop('userAgentPlatform')
plat_cols = countPlat.columns
plat_cols.remove('userId')
plat_cols.remove('userTotalInterations')
# Deal with bad column names
plat_agent_mappings = get_mappings(plat_cols, 'plat_agent_')
percentage_plat = calculate_perc(countPlat , plat_agent_mappings)
# print('page_counts shape (row, cow)', page_counts.count(), len(page_counts.columns))
# print('session_avg_length shape (row, cow)', session_avg_length.count(), len(session_avg_length.columns))
# print('location_percentage shape (row, cow)', location_percentage.count(), len(location_percentage.columns))
# print('percentageSysInfo shape (row, cow)', percentage_sys_info.count(), len(percentage_sys_info.columns))
# print('percentagePlat shape (row, cow)', percentage_plat.count(), len(percentage_plat.columns))
df_features = df_features.groupby('userId')\
.agg(
F.countDistinct('artist').alias('distinctArtistsListened'),
F.countDistinct('song').alias('distinctSongsListened'), # simplification, disregarding songs with the same name and different artists
F.avg('length').alias('avgLength'),
F.first(F.when(col('gender') == 'M', 1).otherwise(0)).alias('isMale'),
F.avg('itemInSession').alias('avgItemsInSession'),
F.sum(F.when(col('level') == 'Paid', 1).otherwise(0)).alias('interactionsPaid'),
F.sum(F.when(col('level') == 'Free', 1).otherwise(0)).alias('interactionsFree'),
F.sum(F.when(col('status') == 307, 1).otherwise(0)).alias('statusCount307'),
F.sum(F.when(col('status') == 404, 1).otherwise(0)).alias('statusCount404'),
F.sum(F.when(col('status') == 200, 1).otherwise(0)).alias('statusCount200'),
F.min('day').alias('firstInteraction'),
F.max('day').alias('lastInteraction'),
)
# print('df_features after AGGs shape (row, cow)', df_features.count(), len(df_features.columns))
df_features = df_features.join(page_counts, on='userId')\
.join(session_avg_length, on='userId')\
.join(location_percentage, on='userId')\
.join(percentage_sys_info, on='userId')\
.join(percentage_plat, on='userId')
# print('df_features after JOINs shape (row, cow)', df_features.count(), len(df_features.columns))
df_features = df_features.withColumn('accountLifeTime', F.datediff( col('lastInteraction'), col('firstInteraction') ) )
df_features = df_features.drop('lastInteraction','firstInteraction')
# print('df_features after NEW COL shape (row, cow)', df_features.count(), len(df_features.columns))
# Handle NaNs, Nulls
df_features = df_features.fillna(0)
# print('df_features after FILLNA shape (row, cow)', df_features.count(), len(df_features.columns))
return df_features, location_mappings, sys_agent_mappings, plat_agent_mappings
start = time.time()
df_features, location_mappings, sys_agent_mappings, plat_agent_mappings = compute_features(df)
end = time.time()
print(f'Spent {end-start}s in feature computation')
# Add churn column as label
churn = df.select('userId','churn').distinct()
df_features_label = df_features.join(churn, on='userId')
df_features_label = df_features_label.withColumnRenamed('churn', 'label')
# Drop userId, as it's not a feature
df_features_label = df_features_label.drop('userId')
df_features_label.toPandas().head()
print('df_features_label shape:', df_features_label.count(), len(df_features_label.columns))
# Save features locally, partitioning by date
now = datetime.now()
path = f'processed/{now.year}/{now.month}/{now.day}/{now.hour}/{now.minute}'
print(f'Saving data in {path}...')
df_features_label.write.csv(path, mode='overwrite', header=True)
###Output
Saving data in processed/2020/7/1/21/8...
###Markdown
ModelingWe assemble or features with `VectorAssembler` and split the full dataset into train, validation, and test sets with `randomSplit` DF method:
###Code
num_cols = df_features_label.columns
num_cols.remove('isMale')
len(num_cols)
# After feature calculations the only categorical variable is "isMale", related to gender. All the others are numeric:
num_assembler = VectorAssembler(inputCols = num_cols, outputCol = "numVector")
# Scale numeric features
scaler = StandardScaler(inputCol = "numVector", outputCol = "numScaled", withStd = True, withMean = True)
# Add categorical variable "isMale":
total_assembler = VectorAssembler(inputCols = ['isMale', 'numScaled'], outputCol = 'features')
# Split dataset
train_val_dataset, test_dataset = df_features_label.randomSplit([0.8, 0.2], seed = 0)
print('Number of train+validation dataset examples (should be 80%):', train_val_dataset.count())
print('Number of test dataset examples (should be 20%):', test_dataset.count())
print('Total number of dataset examples:', df_features_label.count())
###Output
Number of train+validation dataset examples (should be 80%): 175
Number of test dataset examples (should be 20%): 50
Total number of dataset examples: 225
###Markdown
A few observationsAlthough we'll be using tree-based ensemble methods, we will standardize the values. If we want to try other models that are sensible to the scale of features, we won't have problems with that.In addition, the feature selection with those ensemble models can be easier (in our case with 204 features), since the models have the special characteristics of filter, wrapper methods and built-in feature selection. We could also apply some feature selection technique (e.g. performing some correlation analysis, recurrent feature selection, etc.) We create our custom evaluator for F1 score metric (which isn't available in Spark 2.4.6):
###Code
# Since in PySpark 2.4.6 (version we are using) there's no F1 score metric in BinaryClassificationEvaluator, we create our own evaluator
class F1Evaluator(Evaluator):
def __init__(self, predictionCol = "prediction", labelCol="label"):
self.predictionCol = predictionCol
self.labelCol = labelCol
def _evaluate(self, dataset):
# Calculate F1 score
tp = dataset.where((dataset.label == 1) & (dataset.prediction == 1)).count()
tn = dataset.where((dataset.label == 0) & (dataset.prediction == 0)).count()
fp = dataset.where((dataset.label == 0) & (dataset.prediction == 1)).count()
fn = dataset.where((dataset.label == 1) & (dataset.prediction == 0)).count()
# Add epsilon avoid division by zero errors
eps = 1e-6
precision = tp / float(tp + fp + eps)
recall = tp / float(tp + fn + eps)
f1 = 2 * precision * recall / float(precision + recall + 0.00001)
return f1
def isLargerBetter(self):
return True
###Output
_____no_output_____
###Markdown
We create 2 pipelines: one for RandomForestClassifier and other for GBTClassifier.
###Code
# create variable to verify if we are running locally. It will take too much time to run locally
spark_master = dict(spark.sparkContext.getConf().getAll())['spark.master']
print('spark_master:', spark_master)
def train_pipeline(model_type, dataset):
'''
Creates and trains pipeline doing Cross Validation.
Only supports RandomForestClassifier and GBTClassifier.
Parameters
----------
model_type: 'rf' or 'gb'. Strings for RandomForestClassifier and GBTClassifier, respectively.
dataset: Spark dataframe with features
Output
------
cv_model: trained model after cross validation
param_grid: param_grid used to train with grid search
'''
if model_type=='rf':
model = RandomForestClassifier()
else:
model = GBTClassifier()
print('Selected', model.__class__.__name__, 'model')
pipeline = Pipeline(stages = [num_assembler, scaler, total_assembler, model])
# We create a grid of parameters to search over with ParamGridBuilder
# If cluster mode: This grid will have 2 x 2 * 2 = 8 parameter settings for CrossValidator to choose from.
# If local model: This grid will have 1 x 1 * 1 = 1 parameter settings for CrossValidator to choose from.
if spark_master=='local':
print('Training locally...')
num_folds=2
if model_type=='rf':
param_grid = ParamGridBuilder()\
.addGrid(
model.numTrees, [10]
).addGrid(
model.maxBins, [16]
).addGrid(
model.maxDepth, [3]
).build()
else:
param_grid = ParamGridBuilder()\
.addGrid(
model.maxIter, [10]
).addGrid(
model.stepSize, [0.3]
).addGrid(
model.maxDepth, [3]
).build()
else:
print('Training in cluster...')
num_folds=3
if model_type=='rf':
param_grid = ParamGridBuilder()\
.addGrid(
model.numTrees, [20, 30]
).addGrid(
model.maxBins, [16, 32]
).addGrid(
model.maxDepth, [3, 5]
).build()
else:
param_grid = ParamGridBuilder()\
.addGrid(
model.maxIter, [150, 300]
).addGrid(
model.stepSize, [0.1,0.3]
).addGrid(
model.maxDepth, [3, 5]
).build()
# We wrap the pipeline in CrossValidator instance and use our F1Evaluator
cv = CrossValidator(estimator=pipeline, estimatorParamMaps=param_grid, evaluator=F1Evaluator(), numFolds=num_folds)
# Run cross-validation, and choose the best set of parameters.
print('Training', model.__class__.__name__,'...')
cv_model = cv.fit(dataset)
return cv_model, param_grid
###Output
_____no_output_____
###Markdown
We train using the the F1 score as evaluator of the Random Forest and Gradient-Boosted Trees models, tuning parameters as necessary with cross validation and pipelines with grid search.
###Code
# Train Random Forest model
start = time.time()
trained_rf, param_grid_rf = train_pipeline('rf', train_val_dataset)
end = time.time()
print(f'Spent {end-start}s for training RandomForestClassifier')
# Save Random Forest model
trained_rf.bestModel.write().overwrite().save('models/mini_rf')
# Train Gradient-Boosted Trees model
start = time.time()
trained_gb, param_grid_gb = train_pipeline('gb', train_val_dataset)
end = time.time()
print(f'Spent {end-start}s for training GBTClassifier')
# Save Gradient-Boosted Trees model
trained_gb.bestModel.write().overwrite().save('models/mini_gb')
###Output
_____no_output_____
###Markdown
Training metrics for models:
###Code
# Average cross-validation metrics for each grid trained with 2 folds
for param_grid, model in [(param_grid_rf, trained_rf), (param_grid_gb, trained_gb)]:
print(f'Training metrics for {model.bestModel.stages[-1].__class__.__name__}:')
for params, metrics in zip(param_grid, model.avgMetrics):
print('Parameters:', params, '\nMetrics:', metrics)
###Output
Training metrics for RandomForestClassificationModel:
Parameters: {Param(parent='RandomForestClassifier_b7f63930a164', name='numTrees', doc='Number of trees to train (>= 1).'): 10, Param(parent='RandomForestClassifier_b7f63930a164', name='maxBins', doc='Max number of bins for discretizing continuous features. Must be >=2 and >= number of categories for any categorical feature.'): 16, Param(parent='RandomForestClassifier_b7f63930a164', name='maxDepth', doc='Maximum depth of the tree. (>= 0) E.g., depth 0 means 1 leaf node; depth 1 means 1 internal node + 2 leaf nodes.'): 3}
Metrics: 0.9999949541916686
Training metrics for GBTClassificationModel:
Parameters: {Param(parent='GBTClassifier_2f85eb99c843', name='maxIter', doc='max number of iterations (>= 0).'): 10, Param(parent='GBTClassifier_2f85eb99c843', name='stepSize', doc='Step size (a.k.a. learning rate) in interval (0, 1] for shrinking the contribution of each estimator.'): 0.3, Param(parent='GBTClassifier_2f85eb99c843', name='maxDepth', doc='Maximum depth of the tree. (>= 0) E.g., depth 0 means 1 leaf node; depth 1 means 1 internal node + 2 leaf nodes.'): 3}
Metrics: 0.9999949541916686
###Markdown
Since we are dealing with RandomForest and Gradient-Boosted Trees models, we can take advantage of feature importances that are calculated and provide useful informatio for interpretability:
###Code
# Display feature importances for RF and GBT (there is a "featureImportances" attribute for both)
col_names = dict(enumerate(['isMale'] + num_cols))
for model in (trained_rf, trained_gb):
importances = model.bestModel.stages[-1].featureImportances
importances = dict(zip(importances.indices, importances.values))
sorted_importances = {k: v for k, v in sorted(importances.items(), key=lambda item: item[1], reverse=True)}
sorted_importances = {col_names[k]: [v] for k, v in sorted_importances.items() if col_names[k]!='label' }
importances_df = pd.DataFrame(data=sorted_importances)
plt.figure(figsize=(20,6))
plt.title(f'Importances for {model.bestModel.stages[-1].__class__.__name__}')
plt.xticks(rotation=30)
sns.barplot(data=importances_df)
print('plat_agent_21:', plat_agent_mappings['plat_agent_21']) # like Gecko
print('location_35:', location_mappings['location_35']) # 'Flint, MI'
print('location_25:', location_mappings['location_25'])# 'Corpus Christi, TX'
###Output
plat_agent_21: like Gecko
location_35: Flint, MI
location_25: Corpus Christi, TX
###Markdown
Interestingly, the models indicate that there are locations where the users has a bigger chance of churning (we could investigate that further if it makes sense due to some external factor). Features related to devices doesn't seem to be relevant as `like Gecko` is not doesn't provide much information (is present in lots of user agent logs). We evaluate our models with the test dataset, for both F1 score and AUC:
###Code
# Test with f1 score and AUC score using the test dataset
f1_evaluator = F1Evaluator()
auc_evaluator = BinaryClassificationEvaluator()
for model in (trained_rf, trained_gb):
# Get predictions using the test dataset
predictions = model.transform(test_dataset)
f1_score = f1_evaluator.evaluate(predictions)
auc = auc_evaluator.evaluate(predictions)
print(f'Model {model.bestModel.stages[-1].__class__.__name__} F1 score on test set:', f1_score)
print(f'Model {model.bestModel.stages[-1].__class__.__name__} AUC score on test set:', auc)
###Output
Model RandomForestClassificationModel F1 score on test set: 0.9999948750250157
Model RandomForestClassificationModel AUC score on test set: 1.0
Model GBTClassificationModel F1 score on test set: 0.9999948750250157
Model GBTClassificationModel AUC score on test set: 1.0
|
Pymaceuticals/versions/pymaceuticals_code_robgauer_v4.ipynb | ###Markdown
Observations and Insights
###Code
#%matplotlib notebook
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import scipy.stats as st
import numpy as np
# Study data files
mouse_metadata_path = "data/Mouse_metadata.csv"
study_results_path = "data/Study_results.csv"
# Read the mouse data and the study results. Load in csv file.
mouse_metadata_df= pd.read_csv(mouse_metadata_path)
mouse_metadata_df
# Read the mouse data and the study results. Load in csv file.
study_results_df = pd.read_csv(study_results_path)
study_results_df
# Combine the data into a single dataset
combined_results_df=pd.merge(mouse_metadata_df,study_results_df,how="outer",on="Mouse ID")
combined_results_df
# Checking the number of mice in the DataFrame.
# mice_instances_combined=combined_results_df["Mouse ID"].count()
# mice_instances_combined
mouse_metadata_df.count()
## DUPLICATE MOUSE IDENTIFIED ##
# Getting the duplicate mice by ID number that shows up for Mouse ID and Timepoint.
duplicate_rows=combined_results_df[combined_results_df.duplicated()]
duplicate_rows
## Optional: Get all the data for the duplicate mouse ID. ##
duplicate_rows=combined_results_df[combined_results_df.duplicated(keep=False)]
print("All Duplicate Rows based on all data columns is :")
print(duplicate_rows)
# Checking the number of mice in the clean DataFrame before dropping duplicate records.
combined_results_df.count()
## REMOVE THE DUPLICATE MOUSE/MICE ##
# Create a clean DataFrame by dropping the duplicate mouse by its ID.
#### LESSON PANDAS DAY 2 -01 ####
#clean_combined_results_df=combined_results_df.drop_duplicates(keep='first')
#print('Duplicate records dropped :\n', clean_combined_results_df)
clean_combined_results_df=combined_results_df.drop_duplicates(inplace=True)
#print(clean_combined_results_df)
# Test to validate that the duplicate record is dropped from the dataset.
duplicate_rows=combined_results_df[combined_results_df.duplicated(keep=False)]
print("All Duplicate Rows based on all data columns is :")
print(duplicate_rows)
# Checking the number of mice in the clean DataFrame.
combined_results_df.count()
###Output
_____no_output_____
###Markdown
Summary Statistics
###Code
# Generate a summary statistics table of mean, median, variance, standard deviation, and SEM of the tumor volume for each regimen
# This method is the most straighforward, creating multiple series and putting them all together at the end.
# For Tumor Volume only use necessary columns
tumor_volume_df=combined_results_df.loc[:,["Drug Regimen","Mouse ID","Timepoint","Tumor Volume (mm3)"]]
tumor_volume_df
# Generate a summary statistics table
drug_regimen_df=tumor_volume_df.groupby(["Drug Regimen"])
drug_regimen_df.describe()
## DRUG REGIMEN VS. TUMOR VOLUME & TIMEPOINT SUMMARY STATISTICS TABLE ##
# Generate a summary statistics table of mean, median, variance, standard deviation, and SEM of the tumor volume for each regimen
tumor_volume_statistics_df=tumor_volume_df.groupby(["Drug Regimen","Timepoint"]).agg({"Tumor Volume (mm3)":["mean","median","var","std","sem"]})
tumor_volume_statistics_df
## DRUG REGIMEN VS. TUMOR VOLUME SUMMARY STATISTICS TABLE ##
tumor_volume_summary=pd.DataFrame(tumor_volume_df.groupby("Drug Regimen").count())
# Generate a summary statistics table of mean, median, variance, standard deviation, and SEM of the tumor volume for each regimen
tumor_volume_summary=tumor_volume_df.groupby(["Drug Regimen"]).agg({"Tumor Volume (mm3)":["mean","median","var","std","sem"]})
#tumor_volume_summary2=tumor_volume_summary[["Mouse ID", "Mean", "Median", "Variance","Standard Deviation","SEM"]]
#tumor_volume_summary=tumor_volume_summary2.rename(columns={"Mouse ID":"Treatments"})
tumor_volume_summary
## DRUG REGIMEN VS. TUMOR VOLUME SUMMARY STATISTICS TABLE OUTPUT ##
#Use groupby to create summary stats by drug regime, add results into columns in summarystats
tumor_volume_summary_output=pd.DataFrame(tumor_volume_df.groupby("Drug Regimen").count())
tumor_volume_summary_output["Mean"] = pd.DataFrame(tumor_volume_df.groupby("Drug Regimen")["Tumor Volume (mm3)"].mean())
tumor_volume_summary_output["Median"] = pd.DataFrame(tumor_volume_df.groupby("Drug Regimen")["Tumor Volume (mm3)"].median())
tumor_volume_summary_output["Variance"] = pd.DataFrame(tumor_volume_df.groupby("Drug Regimen")["Tumor Volume (mm3)"].var())
tumor_volume_summary_output["Standard Deviation"] = pd.DataFrame(tumor_volume_df.groupby("Drug Regimen")["Tumor Volume (mm3)"].std())
tumor_volume_summary_output["SEM"] = pd.DataFrame(tumor_volume_df.groupby("Drug Regimen")["Tumor Volume (mm3)"].sem())
#Clean up columns and rename count column
tumor_volume_summary_output = tumor_volume_summary_output[["Mouse ID", "Mean", "Median", "Variance","Standard Deviation","SEM"]]
tumor_volume_summary_output = tumor_volume_summary_output.rename(columns={"Mouse ID":"Treatments"})
tumor_volume_summary_output
###Output
_____no_output_____
###Markdown
Bar Plots
###Code
bar_pandas_plot=combined_results_df
# Generate a bar plot showing the number of mice per time point for each treatment throughout the course of the study using pandas.
drug_regimen_timepoints_df=combined_results_df.groupby(["Drug Regimen"])
#drug_regimen_timepoints_df.head()
mice_count_df=drug_regimen_timepoints_df['Mouse ID'].count()
#mice_count_df
# Set x and y limits
#x_axis=np.arange(len(datapoints))
#tick_locations=[value for value in x_axis]
#plt.xlim(-0.75, len(x_axis)-.25)
# Chart the data
chart_mice_per_drugregimen_timepoint = mice_count_df.plot(kind="bar", title="Drug Regimen Mice Count Per Timepoint",color='b',legend=False)
#chart_mice_per_drugregimen_timepoint = drug_regimen_timepoints_df.plot(kind="bar", title="Drug Regimen Mice Count Per Timepoint")
chart_mice_per_drugregimen_timepoint.set_xlabel("Drug Regimen")
chart_mice_per_drugregimen_timepoint.set_ylabel("Count of Mice Per Timepoint")
plt.show()
plt.tight_layout()
#bar_plot_data=combined_results_df[["Drug Regimen"]]
#bar_plot_data
# Generate a bar plot showing the number of mice per time point for each treatment throughout the course of the study using pyplot.
###Output
_____no_output_____
###Markdown
Pie Plots
###Code
#gender=combined_results_df.groupby('Sex')
gender_counts=combined_results_df["Sex"].value_counts()
gender_counts
## Generate a pie plot showing the distribution of female versus male mice using pandas - OUTPUT ##
#combined_results_df.groupby('Sex')["Mouse ID"].nunique().plot(kind='pie',title="Drug Regimen Gender Distribution",autopct='%1.1f%%',shadow=True, startangle=25)
combined_results_df.groupby('Sex')["Mouse ID"].nunique().plot(kind='pie',title="Drug Regimen Gender Distribution",autopct='%1.1f%%',shadow=True, startangle=25)
## Generate a pie plot showing the distribution of female versus male mice using pyplot - OUTPUT ##
#gender_counts.plot(kind='pie',title="Drug Regimen Gender Distribution",autopct='%1.1f%%',shadow=True,startangle=205)
#plt.title("Drug Regimen Gender Distribution")
plt.pie(gender_counts,autopct='%1.1f%%',shadow=True,startangle=205)
#plt.axis("equal")
#plt.show()
###Output
_____no_output_____
###Markdown
Quartiles, Outliers and Boxplots
###Code
# Calculate the final tumor volume of each mouse across four of the most promising treatment regimens. Calculate the IQR and quantitatively determine if there are any potential outliers.
# Grab just data for the 4 smallest mean tumor volume regimens
filtered_df = combined_results_df.loc[(combined_results_df["Drug Regimen"] == "Capomulin") | (combined_results_df["Drug Regimen"] == "Ramicane") | (combined_results_df["Drug Regimen"] == "Ceftamin") | (combined_results_df["Drug Regimen"] == "Propriva"), :]
# Sort by Timpepoints based on the latest values
filtered_df = filtered_df.sort_values("Timepoint", ascending = False)
# Dropping duplicates, keeping first value, should be the latest timepoint per mouse
filtered_df = filtered_df.drop_duplicates(subset="Mouse ID", keep='first')
# Determine quartiles
quartiles = filtered_df['Tumor Volume (mm3)'].quantile([.25,.5,.75])
lowerq = quartiles[0.25]
upperq = quartiles[0.75]
iqr = upperq-lowerq
# Determine upper and lower bounds
lower_bound = lowerq - (1.5*iqr)
upper_bound = upperq + (1.5*iqr)
# Print a filtered dataframe of any outliers
outliers_df = filtered_df.loc[(filtered_df['Tumor Volume (mm3)'] > upper_bound) | (filtered_df['Tumor Volume (mm3)' ] < lower_bound), :]
outliers_df
## Did not find any outliers....
## Generate a box plot of the final tumor volume of each mouse across four regimens of interest - OUTPUT ##
Tumor_Volume = filtered_df['Tumor Volume (mm3)']
fig1, ax1 = plt.subplots()
ax1.set_title('Tumor Volume of Mice')
ax1.set_ylabel('Tumor Volume')
ax1.boxplot(Tumor_Volume)
plt.show()
###Output
_____no_output_____
###Markdown
Line and Scatter Plots
###Code
# Generate a line plot of time point versus tumor volume for a mouse treated with Capomulin
# Filter original data for just the Capomulin Drug Regime
Capomulin_df = combined_results_df.loc[(combined_results_df["Drug Regimen"] == "Capomulin"),:]
# Set variables to hold relevant data
timepoint = Capomulin_df["Timepoint"]
tumor_volume = Capomulin_df["Tumor Volume (mm3)"]
## Plot the tumor volume for various mice - OUTPUT ##
tumor_volume_line = plt.plot(timepoint, tumor_volume)
# Show the chart, add labels
plt.xlabel('Timepoint')
plt.ylabel('Tumor Volume')
plt.title('Tumor Volume over Time for Capomulin Mice')
plt.show()
# Generate a scatter plot of mouse weight versus average tumor volume for the Capomulin regimen
# Pull values for x and y values
mouse_weight = Capomulin_df.groupby(Capomulin_df["Mouse ID"])["Weight (g)"].mean()
tumor_volume = Capomulin_df.groupby(Capomulin_df["Mouse ID"])["Tumor Volume (mm3)"].mean()
## Generate a scatter plot of mouse weight versus average tumor volume for the Capomulin regimen - OUTPUT ##
# Create Scatter Plot with values calculated above
plt.scatter(mouse_weight,tumor_volume)
plt.xlabel("Weight of Mouse")
plt.ylabel("Tumor Volume")
plt.show()
###Output
_____no_output_____
###Markdown
Correlation and Regression
###Code
# Calculate the correlation coefficient and linear regression model
# for mouse weight and average tumor volume for the Capomulin regimen
# Pull values for x and y values
mouse_weight = Capomulin_df.groupby(Capomulin_df["Mouse ID"])["Weight (g)"].mean()
tumor_volume = Capomulin_df.groupby(Capomulin_df["Mouse ID"])["Tumor Volume (mm3)"].mean()
# Perform a linear regression on year versus violent crime rate
slope, int, r, p, std_err = st.linregress(mouse_weight, tumor_volume)
# Create equation of line to calculate predicted violent crime rate
fit = slope * mouse_weight + int
## Plot the linear model on top of scatter plot - OUTPUT ##
plt.scatter(mouse_weight,tumor_volume)
plt.xlabel("Weight of Mouse")
plt.ylabel("Tumor Volume")
plt.plot(mouse_weight,fit,"--")
plt.xticks(mouse_weight, rotation=90)
plt.show()
## Caculate correlation coefficient - OUTPUT ##
corr = round(st.pearsonr(mouse_weight,tumor_volume)[0],2)
print(f'The correlation between weight and tumor value is {corr}')
##
###Output
_____no_output_____ |
07_AdvancedConvolution/AdvancedConvolution.ipynb | ###Markdown
Clone the PySodium Library
###Code
!git clone https://github.com/satyajitghana/TSAI-DeepVision-EVA4.0
%cd TSAI-DeepVision-EVA4.0/07_AdvancedConvolution/PySodium/
###Output
/content/TSAI-DeepVision-EVA4.0/07_AdvancedConvolution/PySodium
###Markdown
Run the CIFAR10 Model for 50 Epochs ```Params : 344,032Max Test Accuracy: 80.71```
###Code
!python main.py --config=experiments/cifar10_config.yml --device=0
###Output
[ 2020-03-03 22:54:18,098 - sodium.__main__ ] INFO: Training: {'name': 'CIFAR10_MyNet', 'save_dir': 'saved/', 'seed': 1, 'target_device': 0, 'arch': {'type': 'CIFAR10Model', 'args': {}}, 'augmentation': {'type': 'CIFAR10Transforms', 'args': {}}, 'data_loader': {'type': 'CIFAR10DataLoader', 'args': {'batch_size': 64, 'data_dir': 'data/', 'nworkers': 4, 'shuffle': True}}, 'loss': 'nll_loss', 'optimizer': {'type': 'SGD', 'args': {'lr': 0.008, 'momentum': 0.95}}, 'training': {'epochs': 50}}
[ 2020-03-03 22:54:18,098 - sodium.__main__ ] INFO: Building: sodium.model.model.CIFAR10Model
[ 2020-03-03 22:54:18,194 - sodium.__main__ ] INFO: Using device 0 of available devices [0]
[ 2020-03-03 22:54:27,123 - sodium.__main__ ] INFO: Building: torch.optim.SGD
[ 2020-03-03 22:54:27,123 - sodium.__main__ ] INFO: Building: sodium.data_loader.augmentation.CIFAR10Transforms
[ 2020-03-03 22:54:27,123 - sodium.__main__ ] INFO: Building: sodium.data_loader.data_loaders.CIFAR10DataLoader
Downloading https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz to data/cifar-10-python.tar.gz
170500096it [00:03, 43013895.17it/s]
Extracting data/cifar-10-python.tar.gz to data/
Files already downloaded and verified
[ 2020-03-03 22:54:34,413 - sodium.__main__ ] INFO: Getting loss function handle
[ 2020-03-03 22:54:34,414 - sodium.__main__ ] INFO: Initializing trainer
[ 2020-03-03 22:54:34,414 - sodium.sodium.base.base_trainer ] INFO: Starting training ...
[ 2020-03-03 22:54:34,414 - sodium.sodium.base.base_trainer ] INFO: Training the model for 50 epochs
Training Epoch: 1
epoch=1 loss=1.3414895535 batch_id=781: 100% 782/782 [00:19<00:00, 39.28it/s]
Test set: Accuracy: 27.65
Training Epoch: 2
epoch=2 loss=1.0419018269 batch_id=781: 100% 782/782 [00:19<00:00, 39.30it/s]
Test set: Accuracy: 51.66
Training Epoch: 3
epoch=3 loss=1.1499898434 batch_id=781: 100% 782/782 [00:19<00:00, 39.43it/s]
Test set: Accuracy: 49.23
Training Epoch: 4
epoch=4 loss=0.4848939180 batch_id=781: 100% 782/782 [00:19<00:00, 40.20it/s]
Test set: Accuracy: 50.67
Training Epoch: 5
epoch=5 loss=0.4917156398 batch_id=781: 100% 782/782 [00:19<00:00, 40.66it/s]
Test set: Accuracy: 63.65
Training Epoch: 6
epoch=6 loss=1.1476051807 batch_id=781: 100% 782/782 [00:19<00:00, 40.31it/s]
Test set: Accuracy: 60.48
Training Epoch: 7
epoch=7 loss=0.6393318176 batch_id=781: 100% 782/782 [00:19<00:00, 40.29it/s]
Test set: Accuracy: 66.04
Training Epoch: 8
epoch=8 loss=0.8483913541 batch_id=781: 100% 782/782 [00:19<00:00, 42.75it/s]
Test set: Accuracy: 66.08
Training Epoch: 9
epoch=9 loss=1.2202727795 batch_id=781: 100% 782/782 [00:19<00:00, 40.29it/s]
Test set: Accuracy: 73.16
Training Epoch: 10
epoch=10 loss=0.5469111204 batch_id=781: 100% 782/782 [00:19<00:00, 43.03it/s]
Test set: Accuracy: 70.84
Training Epoch: 11
epoch=11 loss=0.3808762133 batch_id=781: 100% 782/782 [00:19<00:00, 43.52it/s]
Test set: Accuracy: 68.72
Training Epoch: 12
epoch=12 loss=0.3643867671 batch_id=781: 100% 782/782 [00:19<00:00, 42.84it/s]
Test set: Accuracy: 71.09
Training Epoch: 13
epoch=13 loss=0.3913764656 batch_id=781: 100% 782/782 [00:19<00:00, 40.18it/s]
Test set: Accuracy: 70.68
Training Epoch: 14
epoch=14 loss=0.6568995714 batch_id=781: 100% 782/782 [00:19<00:00, 40.30it/s]
Test set: Accuracy: 73.43
Training Epoch: 15
epoch=15 loss=0.9497343898 batch_id=781: 100% 782/782 [00:19<00:00, 40.21it/s]
Test set: Accuracy: 70.06
Training Epoch: 16
epoch=16 loss=0.6844101548 batch_id=781: 100% 782/782 [00:19<00:00, 40.03it/s]
Test set: Accuracy: 72.91
Training Epoch: 17
epoch=17 loss=1.0270491838 batch_id=781: 100% 782/782 [00:19<00:00, 40.25it/s]
Test set: Accuracy: 72.96
Training Epoch: 18
epoch=18 loss=0.5021054745 batch_id=781: 100% 782/782 [00:19<00:00, 40.50it/s]
Test set: Accuracy: 75.45
Training Epoch: 19
epoch=19 loss=0.4757013917 batch_id=781: 100% 782/782 [00:19<00:00, 39.97it/s]
Test set: Accuracy: 77.69
Training Epoch: 20
epoch=20 loss=1.1615256071 batch_id=781: 100% 782/782 [00:19<00:00, 42.96it/s]
Test set: Accuracy: 73.01
Training Epoch: 21
epoch=21 loss=0.5261611938 batch_id=781: 100% 782/782 [00:19<00:00, 42.95it/s]
Test set: Accuracy: 78.16
Training Epoch: 22
epoch=22 loss=0.3504853249 batch_id=781: 100% 782/782 [00:19<00:00, 40.33it/s]
Test set: Accuracy: 75.55
Training Epoch: 23
epoch=23 loss=1.3858498335 batch_id=781: 100% 782/782 [00:19<00:00, 43.55it/s]
Test set: Accuracy: 74.29
Training Epoch: 24
epoch=24 loss=0.4206817150 batch_id=781: 100% 782/782 [00:19<00:00, 43.64it/s]
Test set: Accuracy: 75.75
Training Epoch: 25
epoch=25 loss=1.0595155954 batch_id=781: 100% 782/782 [00:19<00:00, 40.20it/s]
Test set: Accuracy: 78.71
Training Epoch: 26
epoch=26 loss=0.8333079219 batch_id=781: 100% 782/782 [00:19<00:00, 40.18it/s]
Test set: Accuracy: 76.64
Training Epoch: 27
epoch=27 loss=0.7093594074 batch_id=781: 100% 782/782 [00:19<00:00, 43.66it/s]
Test set: Accuracy: 76.52
Training Epoch: 28
epoch=28 loss=0.2618134022 batch_id=781: 100% 782/782 [00:19<00:00, 40.45it/s]
Test set: Accuracy: 74.24
Training Epoch: 29
epoch=29 loss=0.6720687747 batch_id=781: 100% 782/782 [00:19<00:00, 43.60it/s]
Test set: Accuracy: 77.55
Training Epoch: 30
epoch=30 loss=0.5437931418 batch_id=781: 100% 782/782 [00:19<00:00, 40.31it/s]
Test set: Accuracy: 75.38
Training Epoch: 31
epoch=31 loss=0.8022992015 batch_id=781: 100% 782/782 [00:18<00:00, 41.30it/s]
Test set: Accuracy: 75.74
Training Epoch: 32
epoch=32 loss=0.3977631629 batch_id=781: 100% 782/782 [00:19<00:00, 40.65it/s]
Test set: Accuracy: 75.4
Training Epoch: 33
epoch=33 loss=0.4650067091 batch_id=781: 100% 782/782 [00:19<00:00, 40.41it/s]
Test set: Accuracy: 80.09
Training Epoch: 34
epoch=34 loss=1.0284581184 batch_id=781: 100% 782/782 [00:19<00:00, 40.89it/s]
Test set: Accuracy: 79.53
Training Epoch: 35
epoch=35 loss=0.4025305510 batch_id=781: 100% 782/782 [00:19<00:00, 43.57it/s]
Test set: Accuracy: 76.41
Training Epoch: 36
epoch=36 loss=0.5547651052 batch_id=781: 100% 782/782 [00:19<00:00, 40.32it/s]
Test set: Accuracy: 78.87
Training Epoch: 37
epoch=37 loss=0.8995084763 batch_id=781: 100% 782/782 [00:19<00:00, 40.71it/s]
Test set: Accuracy: 75.74
Training Epoch: 38
epoch=38 loss=0.1948822290 batch_id=781: 100% 782/782 [00:19<00:00, 41.08it/s]
Test set: Accuracy: 79.85
Training Epoch: 39
epoch=39 loss=0.3655758798 batch_id=781: 100% 782/782 [00:19<00:00, 40.77it/s]
Test set: Accuracy: 78.95
Training Epoch: 40
epoch=40 loss=0.4775102735 batch_id=781: 100% 782/782 [00:19<00:00, 40.74it/s]
Test set: Accuracy: 77.74
Training Epoch: 41
epoch=41 loss=0.1895846128 batch_id=781: 100% 782/782 [00:18<00:00, 41.16it/s]
Test set: Accuracy: 78.5
Training Epoch: 42
epoch=42 loss=0.2660197020 batch_id=781: 100% 782/782 [00:19<00:00, 40.95it/s]
Test set: Accuracy: 79.73
Training Epoch: 43
epoch=43 loss=0.2093307078 batch_id=781: 100% 782/782 [00:19<00:00, 44.06it/s]
Test set: Accuracy: 78.7
Training Epoch: 44
epoch=44 loss=1.0514261723 batch_id=781: 100% 782/782 [00:19<00:00, 40.69it/s]
Test set: Accuracy: 79.0
Training Epoch: 45
epoch=45 loss=0.9223704934 batch_id=781: 100% 782/782 [00:19<00:00, 40.09it/s]
Test set: Accuracy: 79.77
Training Epoch: 46
epoch=46 loss=0.3726564944 batch_id=781: 100% 782/782 [00:19<00:00, 40.55it/s]
Test set: Accuracy: 80.14
Training Epoch: 47
epoch=47 loss=0.0828369260 batch_id=781: 100% 782/782 [00:19<00:00, 40.51it/s]
Test set: Accuracy: 80.71
Training Epoch: 48
epoch=48 loss=0.0597159266 batch_id=781: 100% 782/782 [00:19<00:00, 40.38it/s]
Test set: Accuracy: 79.78
Training Epoch: 49
epoch=49 loss=0.2069731951 batch_id=781: 100% 782/782 [00:19<00:00, 40.20it/s]
Test set: Accuracy: 79.82
Training Epoch: 50
epoch=50 loss=0.1346652508 batch_id=781: 100% 782/782 [00:19<00:00, 40.50it/s]
Test set: Accuracy: 79.42
[ 2020-03-03 23:12:26,986 - sodium.__main__ ] INFO: Finished!
###Markdown
The Model
###Code
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
class CIFAR10Model(nn.Module):
def __init__(self, dropout_value=0.25):
self.dropout_value = dropout_value # dropout value
super(CIFAR10Model, self).__init__()
# Input Block
self.convblock1 = nn.Sequential(
nn.Conv2d(in_channels=3, out_channels=32,
kernel_size=(3, 3), padding=1, bias=False),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.Dropout(self.dropout_value)
) # output_size = 32
# CONVOLUTION BLOCK 1
self.convblock2 = nn.Sequential(
nn.Conv2d(in_channels=32, out_channels=64,
kernel_size=(3, 3), padding=1, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.Dropout(self.dropout_value)
) # output_size = 32
# TRANSITION BLOCK 1
self.convblock3 = nn.Sequential(
nn.Conv2d(in_channels=64, out_channels=32,
kernel_size=(1, 1), padding=0, bias=False),
) # output_size = 32
self.pool1 = nn.MaxPool2d(2, 2) # output_size = 16
# CONVOLUTION BLOCK 2
# DEPTHWISE CONVOLUTION AND POINTWISE CONVOLUTION
self.depthwise1 = nn.Sequential(
nn.Conv2d(in_channels=32, out_channels=64,
kernel_size=(3, 3), padding=0, groups=32, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.Dropout(self.dropout_value)
) # output_size = 16
self.convblock4 = nn.Sequential(
nn.Conv2d(in_channels=64, out_channels=128,
kernel_size=(1, 1), padding=0, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.Dropout(self.dropout_value)
) # output_size = 16
# TRANSITION BLOCK 2
self.pool2 = nn.MaxPool2d(2, 2) # output_size = 8
# CONVOLUTION BLOCK 3
self.convblock5 = nn.Sequential(
nn.Conv2d(in_channels=128, out_channels=128,
kernel_size=(3, 3), padding=4, dilation=2, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.Dropout(self.dropout_value)
) # output_size = 11
self.convblock6 = nn.Sequential(
nn.Conv2d(in_channels=128, out_channels=128,
kernel_size=(3, 3), padding=1, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.Dropout(self.dropout_value)
) # output_size = 11
# TRANSITION BLOCK 3
self.pool3 = nn.MaxPool2d(2, 2) # output_size = 5
# OUTPUT BLOCK
self.gap = nn.Sequential(
nn.AvgPool2d(kernel_size=5)
) # output_size = 1
self.convblock7 = nn.Sequential(
nn.Conv2d(in_channels=128, out_channels=128,
kernel_size=(1, 1), padding=0, bias=False),
nn.ReLU(),
nn.BatchNorm2d(128),
nn.Dropout(self.dropout_value)
)
self.convblock8 = nn.Sequential(
nn.Conv2d(in_channels=128, out_channels=10,
kernel_size=(1, 1), padding=0, bias=False),
)
self.dropout = nn.Dropout(self.dropout_value)
def forward(self, x):
x = self.convblock1(x)
x = self.convblock2(x)
x = self.convblock3(x)
x = self.pool1(x)
x = self.depthwise1(x)
x = self.convblock4(x)
x = self.pool2(x)
x = self.convblock5(x)
x = self.convblock6(x)
x = self.pool3(x)
x = self.gap(x)
x = self.convblock7(x)
x = self.convblock8(x)
x = x.view(-1, 10)
return F.log_softmax(x, dim=-1)
!pip install torchsummary
from torchsummary import summary
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
print(device)
model = CIFAR10Model().to(device)
summary(model, input_size=(3, 32, 32))
###Output
_____no_output_____ |
ipynb/generate_models_for_residuals.ipynb | ###Markdown
Copyright (c) 2020 Juergen Koefinger, Max Planck Institute of Biophysics, Frankfurt am Main, GermanyReleased under the MIT Licence, see the file LICENSE.txt. Description Helper tool to generate models for residuals using step functions. Models are generated with and without normally distributed noise. You can use these models to explore the statistical power using the IPhython notebooks hplusminus_tests.ipynp and hplusminus_statistical_power.ipynp. Intialization
###Code
import numpy as np
import matplotlib
import matplotlib.pylab as plt
%matplotlib inline
matplotlib.rc('xtick', labelsize=16)
matplotlib.rc('ytick', labelsize=16)
matplotlib.rc('axes', labelsize=16)
###Output
_____no_output_____
###Markdown
Define Model For Residuals Using Step Functions
###Code
# Number of data points
number_data_points = 500
# Define positions of steps in fractions of the number of data points. Fractions have to be in [0,1]. First enttry has to be '0.', last entry has to be '1.'
relative_step_postitions = [0., 0.25, 0.5, 0.75, 1.]
# Define step heights. This array has to have have one fewer entries than 'relative_step_postitions'.
step_heights = np.asarray([0, 1, 0, 1])
# Define a scale factor for the step heights.
# Use the scale factor to tune the signal-to-noise ratio when normally distributed is added below.
scale_factor = 1
# Scale step heights
step_heights = step_heights*scale_factor
relative_step_postitions = np.sort(relative_step_postitions)
if np.any(relative_step_postitions<0)+np.any(relative_step_postitions>1):
print("ERROR: Entries of \"relative_step_postitions\" have to be in [0,1]")
if len(step_heights) != (len(relative_step_postitions)-1):
print("ERROR: \"step_heigths\" has to have one more entry than \"relative_step_postitions\"")
# Set is_true_model=True if you want to generate noise for the true model. That is, the model will equal to zero for all indices.
is_true_model=False
###Output
_____no_output_____
###Markdown
Model Generation
###Code
# Generate step function (i.e., the model) from the information entered above
step_positions=np.asarray(np.round(relative_step_postitions*number_data_points), dtype=np.int)
step_lengths=step_positions[1:]-step_positions[:-1]
model=[]
for i, l in enumerate(step_lengths):
model.append(np.ones(l)*step_heights[i])
model=np.concatenate(model)
model-=model.mean()
# Set is_true_model=True if you want to generate noise for the true model. That is, the model will equal to zero for all indices.
if is_true_model:
model*=0.
# Add normally distributed noise to the model
model_normalized_residuals=model+np.random.normal(0, 1, number_data_points)
###Output
_____no_output_____
###Markdown
Plotting
###Code
plt.plot(model)
plt.grid()
plt.xlabel("index")
plt.ylabel("model")
plt.plot(model_normalized_residuals)
plt.grid()
plt.xlabel("index")
plt.ylabel("model with noise")
###Output
_____no_output_____
###Markdown
Save Model with and without noise to text files
###Code
io_path="./data/"
if is_true_model:
pref="true_"
else:
pref="alternative_"
# Without noise
np.savetxt(io_path+pref+"model.txt", model)
# With noise
np.savetxt(io_path+pref+"model_normalized_residuals.txt", model_normalized_residuals)
###Output
_____no_output_____ |
notebooks/semisupervised/FMNIST/learned-metric/augmented-Y/fmnist-aug-16ex-learned-nothresh-Y-not-augmented.ipynb | ###Markdown
Choose GPU
###Code
%env CUDA_DEVICE_ORDER=PCI_BUS_ID
%env CUDA_VISIBLE_DEVICES=0
import tensorflow as tf
gpu_devices = tf.config.experimental.list_physical_devices('GPU')
if len(gpu_devices)>0:
tf.config.experimental.set_memory_growth(gpu_devices[0], True)
print(gpu_devices)
tf.keras.backend.clear_session()
###Output
[PhysicalDevice(name='/physical_device:GPU:0', device_type='GPU')]
###Markdown
Load packages
###Code
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from tqdm.autonotebook import tqdm
from IPython import display
import pandas as pd
import umap
import copy
import os, tempfile
import tensorflow_addons as tfa
import pickle
###Output
/mnt/cube/tsainbur/conda_envs/tpy3/lib/python3.6/site-packages/tqdm/autonotebook/__init__.py:14: TqdmExperimentalWarning: Using `tqdm.autonotebook.tqdm` in notebook mode. Use `tqdm.tqdm` instead to force console mode (e.g. in jupyter console)
" (e.g. in jupyter console)", TqdmExperimentalWarning)
###Markdown
parameters
###Code
dataset = "fmnist"
labels_per_class = 16 # 'full'
n_latent_dims = 1024
confidence_threshold = 0.0 # minimum confidence to include in UMAP graph for learned metric
learned_metric = True # whether to use a learned metric, or Euclidean distance between datapoints
augmented = True #
min_dist= 0.001 # min_dist parameter for UMAP
negative_sample_rate = 5 # how many negative samples per positive sample
batch_size = 128 # batch size
optimizer = tf.keras.optimizers.Adam(1e-3) # the optimizer to train
optimizer = tfa.optimizers.MovingAverage(optimizer)
label_smoothing = 0.2 # how much label smoothing to apply to categorical crossentropy
max_umap_iterations = 500 # how many times, maximum, to recompute UMAP
max_epochs_per_graph = 10 # how many epochs maximum each graph trains for (without early stopping)
graph_patience = 10 # how many times without improvement to train a new graph
min_graph_delta = 0.0025 # minimum improvement on validation acc to consider an improvement for training
from datetime import datetime
datestring = datetime.now().strftime("%Y_%m_%d_%H_%M_%S_%f")
datestring = (
str(dataset)
+ "_"
+ str(confidence_threshold)
+ "_"
+ str(labels_per_class)
+ "____"
+ datestring
+ '_umap_augmented'
)
print(datestring)
###Output
fmnist_0.0_16____2020_08_25_22_53_12_075808_umap_augmented
###Markdown
Load dataset
###Code
from tfumap.semisupervised_keras import load_dataset
(
X_train,
X_test,
X_labeled,
Y_labeled,
Y_masked,
X_valid,
Y_train,
Y_test,
Y_valid,
Y_valid_one_hot,
Y_labeled_one_hot,
num_classes,
dims
) = load_dataset(dataset, labels_per_class)
###Output
_____no_output_____
###Markdown
load architecture
###Code
from tfumap.semisupervised_keras import load_architecture
encoder, classifier, embedder = load_architecture(dataset, n_latent_dims)
###Output
_____no_output_____
###Markdown
load pretrained weights
###Code
from tfumap.semisupervised_keras import load_pretrained_weights
encoder, classifier = load_pretrained_weights(dataset, augmented, labels_per_class, encoder, classifier)
###Output
WARNING: Logging before flag parsing goes to stderr.
W0825 22:53:14.381406 140518542591808 base.py:272] Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.
Two checkpoint references resolved to different objects (<tensorflow_addons.layers.wrappers.WeightNormalization object at 0x7fcbe007a9e8> and <tensorflow.python.keras.layers.advanced_activations.LeakyReLU object at 0x7fcbe007aa20>).
W0825 22:53:14.383554 140518542591808 base.py:272] Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.
Two checkpoint references resolved to different objects (<tensorflow_addons.layers.wrappers.WeightNormalization object at 0x7fcbe00870f0> and <tensorflow.python.keras.layers.advanced_activations.LeakyReLU object at 0x7fcbe0087f98>).
W0825 22:53:14.405316 140518542591808 base.py:272] Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.
Two checkpoint references resolved to different objects (<tensorflow_addons.layers.wrappers.WeightNormalization object at 0x7fcbeaabecf8> and <tensorflow.python.keras.layers.normalization_v2.BatchNormalization object at 0x7fcbeaabeeb8>).
W0825 22:53:14.409077 140518542591808 base.py:272] Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.
Two checkpoint references resolved to different objects (<tensorflow.python.keras.layers.normalization_v2.BatchNormalization object at 0x7fcbeaabeeb8> and <tensorflow.python.keras.layers.advanced_activations.LeakyReLU object at 0x7fcbeadfd4a8>).
W0825 22:53:14.413197 140518542591808 base.py:272] Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.
Two checkpoint references resolved to different objects (<tensorflow_addons.layers.wrappers.WeightNormalization object at 0x7fcbeadb0c18> and <tensorflow.python.keras.layers.normalization_v2.BatchNormalization object at 0x7fcbeadb0208>).
W0825 22:53:14.416175 140518542591808 base.py:272] Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.
Two checkpoint references resolved to different objects (<tensorflow.python.keras.layers.normalization_v2.BatchNormalization object at 0x7fcbeadb0208> and <tensorflow.python.keras.layers.advanced_activations.LeakyReLU object at 0x7fcbeae540f0>).
W0825 22:53:14.420321 140518542591808 base.py:272] Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.
Two checkpoint references resolved to different objects (<tensorflow_addons.layers.wrappers.WeightNormalization object at 0x7fcbeabaeb38> and <tensorflow.python.keras.layers.normalization_v2.BatchNormalization object at 0x7fcbeabaee10>).
W0825 22:53:14.423249 140518542591808 base.py:272] Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.
Two checkpoint references resolved to different objects (<tensorflow.python.keras.layers.normalization_v2.BatchNormalization object at 0x7fcbeabaee10> and <tensorflow.python.keras.layers.advanced_activations.LeakyReLU object at 0x7fcbeabcc048>).
W0825 22:53:14.430676 140518542591808 base.py:272] Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.
Two checkpoint references resolved to different objects (<tensorflow_addons.layers.wrappers.WeightNormalization object at 0x7fcbeae9d278> and <tensorflow.python.keras.layers.normalization_v2.BatchNormalization object at 0x7fcbeae9d8d0>).
W0825 22:53:14.433633 140518542591808 base.py:272] Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.
Two checkpoint references resolved to different objects (<tensorflow.python.keras.layers.normalization_v2.BatchNormalization object at 0x7fcbeae9d8d0> and <tensorflow.python.keras.layers.advanced_activations.LeakyReLU object at 0x7fcbeae9dac8>).
W0825 22:53:14.437686 140518542591808 base.py:272] Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.
Two checkpoint references resolved to different objects (<tensorflow_addons.layers.wrappers.WeightNormalization object at 0x7fcbeaed2320> and <tensorflow.python.keras.layers.normalization_v2.BatchNormalization object at 0x7fcbeaed2940>).
W0825 22:53:14.441426 140518542591808 base.py:272] Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.
Two checkpoint references resolved to different objects (<tensorflow.python.keras.layers.normalization_v2.BatchNormalization object at 0x7fcbeaed2940> and <tensorflow.python.keras.layers.advanced_activations.LeakyReLU object at 0x7fcbeaed2b70>).
W0825 22:53:14.446441 140518542591808 base.py:272] Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.
Two checkpoint references resolved to different objects (<tensorflow_addons.layers.wrappers.WeightNormalization object at 0x7fcbe3a13668> and <tensorflow.python.keras.layers.normalization_v2.BatchNormalization object at 0x7fcbe3a13978>).
W0825 22:53:14.449349 140518542591808 base.py:272] Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.
Two checkpoint references resolved to different objects (<tensorflow.python.keras.layers.normalization_v2.BatchNormalization object at 0x7fcbe3a13978> and <tensorflow.python.keras.layers.advanced_activations.LeakyReLU object at 0x7fcbe3a13ba8>).
W0825 22:53:14.456667 140518542591808 base.py:272] Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.
Two checkpoint references resolved to different objects (<tensorflow_addons.layers.wrappers.WeightNormalization object at 0x7fcbe01731d0> and <tensorflow.python.keras.layers.normalization_v2.BatchNormalization object at 0x7fcbe01733c8>).
W0825 22:53:14.459514 140518542591808 base.py:272] Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.
Two checkpoint references resolved to different objects (<tensorflow.python.keras.layers.normalization_v2.BatchNormalization object at 0x7fcbe01733c8> and <tensorflow.python.keras.layers.advanced_activations.LeakyReLU object at 0x7fcbe0173668>).
###Markdown
compute pretrained accuracy
###Code
# test current acc
pretrained_predictions = classifier.predict(encoder.predict(X_test, verbose=True), verbose=True)
pretrained_predictions = np.argmax(pretrained_predictions, axis=1)
pretrained_acc = np.mean(pretrained_predictions == Y_test)
print('pretrained acc: {}'.format(pretrained_acc))
###Output
313/313 [==============================] - 2s 6ms/step
313/313 [==============================] - 0s 1ms/step
pretrained acc: 0.7962
###Markdown
get a, b parameters for embeddings
###Code
from tfumap.semisupervised_keras import find_a_b
a_param, b_param = find_a_b(min_dist=min_dist)
###Output
_____no_output_____
###Markdown
build network
###Code
from tfumap.semisupervised_keras import build_model
model = build_model(
batch_size=batch_size,
a_param=a_param,
b_param=b_param,
dims=dims,
encoder=encoder,
classifier=classifier,
negative_sample_rate=negative_sample_rate,
optimizer=optimizer,
label_smoothing=label_smoothing,
embedder = embedder,
)
###Output
_____no_output_____
###Markdown
build labeled iterator
###Code
from tfumap.semisupervised_keras import build_labeled_iterator
labeled_dataset = build_labeled_iterator(X_labeled, Y_labeled_one_hot, augmented, dims, dataset = dataset)
###Output
_____no_output_____
###Markdown
training
###Code
from livelossplot import PlotLossesKerasTF
from tfumap.semisupervised_keras import get_edge_dataset
from tfumap.semisupervised_keras import zip_datasets
###Output
_____no_output_____
###Markdown
callbacks
###Code
# plot losses callback
groups = {'acccuracy': ['classifier_accuracy', 'val_classifier_accuracy'], 'loss': ['classifier_loss', 'val_classifier_loss']}
plotlosses = PlotLossesKerasTF(groups=groups)
history_list = []
current_validation_acc = 0
batches_per_epoch = np.floor(len(X_train)/batch_size).astype(int)
epochs_since_last_improvement = 0
current_umap_iterations = 0
current_epoch = 0
from tfumap.paths import MODEL_DIR, ensure_dir
save_folder = MODEL_DIR / 'semisupervised-keras' / dataset / str(labels_per_class) / datestring
ensure_dir(save_folder / 'test_loss.npy')
for cui in tqdm(np.arange(current_epoch, max_umap_iterations)):
if len(history_list) > graph_patience+1:
previous_history = [np.mean(i.history['val_classifier_accuracy']) for i in history_list]
best_of_patience = np.max(previous_history[-graph_patience:])
best_of_previous = np.max(previous_history[:-graph_patience])
if (best_of_previous + min_graph_delta) > best_of_patience:
print('Early stopping')
break
# make dataset
edge_dataset = get_edge_dataset(
model,
augmented,
classifier,
encoder,
X_train,
Y_masked,
batch_size,
confidence_threshold,
labeled_dataset,
dims,
learned_metric = learned_metric,
dataset=dataset
)
# zip dataset
zipped_ds = zip_datasets(labeled_dataset, edge_dataset, batch_size)
# train dataset
history = model.fit(
zipped_ds,
epochs= current_epoch + max_epochs_per_graph,
initial_epoch = current_epoch,
validation_data=(
(X_valid, tf.zeros_like(X_valid), tf.zeros_like(X_valid)),
{"classifier": Y_valid_one_hot},
),
callbacks = [plotlosses],
max_queue_size = 100,
steps_per_epoch = batches_per_epoch,
#verbose=0
)
current_epoch+=len(history.history['loss'])
history_list.append(history)
# save score
class_pred = classifier.predict(encoder.predict(X_test))
class_acc = np.mean(np.argmax(class_pred, axis=1) == Y_test)
np.save(save_folder / 'test_loss.npy', (np.nan, class_acc))
# save weights
encoder.save_weights((save_folder / "encoder").as_posix())
classifier.save_weights((save_folder / "classifier").as_posix())
# save history
with open(save_folder / 'history.pickle', 'wb') as file_pi:
pickle.dump([i.history for i in history_list], file_pi)
current_umap_iterations += 1
if len(history_list) > graph_patience+1:
previous_history = [np.mean(i.history['val_classifier_accuracy']) for i in history_list]
best_of_patience = np.max(previous_history[-graph_patience:])
best_of_previous = np.max(previous_history[:-graph_patience])
if (best_of_previous + min_graph_delta) > best_of_patience:
print('Early stopping')
plt.plot(previous_history)
(best_of_previous + min_graph_delta) , best_of_patience
###Output
_____no_output_____
###Markdown
save embedding
###Code
z = encoder.predict(X_train)
reducer = umap.UMAP(verbose=True)
embedding = reducer.fit_transform(z.reshape(len(z), np.product(np.shape(z)[1:])))
plt.scatter(embedding[:, 0], embedding[:, 1], c=Y_train.flatten(), s= 1, alpha = 0.1, cmap = plt.cm.tab10)
plt.scatter(embedding[:, 0], embedding[:, 1], c=Y_train.flatten(), s= 1, alpha = 0.1, cmap = plt.cm.tab10)
np.save(save_folder / 'train_embedding.npy', embedding)
###Output
_____no_output_____ |
examples/20newgroups_in_action/data_mining_experiment.ipynb | ###Markdown
1. Loading Dataset (20 newsgroups dataset)
###Code
from sklearn.datasets import fetch_20newsgroups
train_set = fetch_20newsgroups(subset='train', shuffle=True, random_state=42)
train_set.target
###Output
_____no_output_____
###Markdown
1. Loading News Dataset
###Code
base_path = '/Github/Machine-Learning-in-Action/examples/news_data_generator/news_data_generator/spiders/'
file_name = 'timesall.jl'
data_path = base_path + file_name
news = []
with open(data_path, "r+", encoding="utf8") as f:
for item in jsonlines.Reader(f):
news.append(item)
len(news)
## 小数据集用作测试
test = news[:2000]
print(len(test))
from functools import reduce
def list_dict_duplicate_removal(data_list):
run_function = lambda x, y: x if y in x else x + [y]
return reduce(run_function, [[], ] + data_list)
news_set = list_dict_duplicate_removal(news)
len(news_set)
news[0]
remove_tags(''.join(news[0]['content']))
X = []
y = []
for i in news_set:
X.append(i['content'])
y.append(i['module'])
###Output
_____no_output_____
###Markdown
Train Test dataset split
###Code
from sklearn.model_selection import train_test_split
X, y = np.arange(10).reshape((5, 2)), range(5)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=42)
###Output
_____no_output_____
###Markdown
2. Data preprocessing Data Cleaning: regular expersion
###Code
import re
# 过滤不了\\ \ 中文()还有
r1 = u'[a-zA-Z0-9’!"#$%&\'()*+,-./:;<=>?@,。?★、…【】《》?“”‘’![\\]^_`{|}~]+' #用户也可以在此进行自定义过滤字符
# 者中规则也过滤不完全
r2 = "[\s+\.\!\/_,$%^*(+\"\']+|[+——!,。?、~@#¥%……&*()]+"
# \\\可以过滤掉反向单杠和双杠,/可以过滤掉正向单杠和双杠,第一个中括号里放的是英文符号,第二个中括号里放的是中文符号,第二个中括号前不能少|,否则过滤不完全
r3 = "[.!//_,$&%^*()<>+\"'?@#-|:~{}]+|[——!\\\\,。=?、:“”‘’《》【】¥……()]+"
# 去掉括号和括号内的所有内容
r4 = "\\【.*?】+|\\《.*?》+|\\#.*?#+|[.!/_,$&%^*()<>+" "'?@|:~{}#]+|[——!\\\,。=?、:“”‘’¥……()《》【】]"
sentence = "hello! wo?rd!. \n"
cleanr = re.compile('<.*?>') # 匹配HTML标签规则
sentence = re.sub(cleanr, ' ', sentence) # 去除HTML标签
sentence = re.sub(r4, '', sentence)
print(sentence)
for i in tqdm(range(len(train_set.data))):
train_set.data[i] = re.sub(cleanr, ' ', train_set.data[i])
train_set.data[i] = re.sub(r4, '', train_set.data[i])
train_set.data[i] = re.sub('\n\r', '', train_set.data[i]) # TODO: 这里并没有去掉\n
train_set.data[i] = train_set.data[i].lower()
print(train_set.data[:1])
###Output
9%|▉ | 1048/11314 [00:00<00:01, 5221.87it/s]
###Markdown
Data Cleaning: stop words
###Code
import nltk
from nltk.tokenize import word_tokenize
# nltk.download()
# nltk.download('stopwords')
"""引入停用词表"""
from nltk.corpus import stopwords
stop = set(stopwords.words('english'))
print('English Stop Words List:\n', stop)
# sentence = "this is a apple"
# filter_sentence = [
# w for w in sentence.split(' ') if w not in stopwords.words('english')
# ]
# print(filter_sentence)
"""匹配停用词"""
for i in tqdm(range(len(train_set.data))):
train_set.data[i] = " ".join([
w for w in train_set.data[i].split(' ')
if w not in stopwords.words('english')
])
print(train_set.data[:1])
###Output
0%| | 4/11314 [00:00<04:42, 39.98it/s]
###Markdown
Normalization: lemmatization
###Code
"""stemming -- 词干提取(no use)"""
from nltk.stem import SnowballStemmer
# stemmer = SnowballStemmer("english") # 选择语言
# stemmer.stem("leaves") # 词干化单词
"""lemmatization -- 词型还原(use)"""
from nltk.stem import WordNetLemmatizer
nltk.download('wordnet')
wnl = WordNetLemmatizer()
# print(wnl.lemmatize('leaves'))
for i in tqdm(range(len(train_set.data))):
train_set[i] = wnl.lemmatize(train_set.data[i])
print(train_set.data[:1])
###Output
[nltk_data] Error loading wordnet: <urlopen error [Errno 111]
[nltk_data] Connection refused>
100%|██████████| 11314/11314 [00:00<00:00, 61342.63it/s]
###Markdown
Extracting Features
###Code
from sklearn.feature_extraction.text import CountVectorizer
"""build data dict"""
count_vect = CountVectorizer() # 特征向量计数函数
X_train_counts = count_vect.fit_transform(train_set.data) # 对文本进行特征向量处理
print(X_train_counts[:0])
"""TF-IDF: Term Frequency-Inverse Document Frequency"""
from sklearn.feature_extraction.text import TfidfTransformer
tfidf_transformer = TfidfTransformer()
X_train_tfidf = tfidf_transformer.fit_transform(X_train_counts)
print(X_train_tfidf[:0])
print(tfidf_transformer)
###Output
TfidfTransformer(norm='l2', smooth_idf=True, sublinear_tf=False, use_idf=True)
###Markdown
3. Bayes Classifier 3.1 Train Bayes
###Code
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import Pipeline
multinomialNB_pipeline = Pipeline([('Vectorizer',
CountVectorizer(stop_words='english',
max_df=0.5)),
('TF_IDF', TfidfTransformer()),
('MultinomialNB', MultinomialNB())])
multinomialNB_pipeline.fit(train_set.data, train_set.target)
print(" Show gaussianNB_pipeline:\n", multinomialNB_pipeline)
# 自定义文档测试分类器
docs_new = ['God is love', 'OpenGL on the GPU is fast'] # 文档
predicted = multinomialNB_pipeline.predict(docs_new)
print(predicted) # 预测类别 [3 1],一个属于3类,一个属于1类
for doc, category in zip(docs_new, predicted):
print('%r => %s' % (doc, train_set.target_names[category]))
###Output
[15 7]
'God is love' => soc.religion.christian
'OpenGL on the GPU is fast' => rec.autos
###Markdown
3.2 Evaluation Bayes
###Code
from sklearn.metrics import classification_report, confusion_matrix
test_set = fetch_20newsgroups(subset='test', shuffle=True, random_state=42)
docs_test = test_set.data
predicted = multinomialNB_pipeline.predict(docs_test)
print(
classification_report(test_set.target,
predicted,
target_names=test_set.target_names))
# calculate confusion_matrix and plot it
confusion_mat = confusion_matrix(test_set.target, predicted)
fig, ax = plt.subplots(figsize=(10, 10))
sns.heatmap(confusion_mat,
annot=True,
fmt='d',
xticklabels=test_set.target_names,
yticklabels=test_set.target_names)
plt.ylabel('Actual')
plt.xlabel('Predicted')
plt.show()
###Output
precision recall f1-score support
alt.atheism 0.81 0.69 0.74 319
comp.graphics 0.78 0.72 0.75 389
comp.os.ms-windows.misc 0.80 0.72 0.76 394
comp.sys.ibm.pc.hardware 0.67 0.80 0.73 392
comp.sys.mac.hardware 0.87 0.81 0.84 385
comp.windows.x 0.87 0.79 0.83 395
misc.forsale 0.87 0.79 0.83 390
rec.autos 0.89 0.91 0.90 396
rec.motorcycles 0.93 0.96 0.95 398
rec.sport.baseball 0.92 0.92 0.92 397
rec.sport.hockey 0.88 0.98 0.93 399
sci.crypt 0.75 0.96 0.84 396
sci.electronics 0.84 0.65 0.73 393
sci.med 0.92 0.79 0.85 396
sci.space 0.82 0.94 0.88 394
soc.religion.christian 0.62 0.96 0.76 398
talk.politics.guns 0.66 0.94 0.78 364
talk.politics.mideast 0.94 0.94 0.94 376
talk.politics.misc 0.94 0.52 0.67 310
talk.religion.misc 0.95 0.24 0.38 251
accuracy 0.82 7532
macro avg 0.84 0.80 0.80 7532
weighted avg 0.83 0.82 0.81 7532
###Markdown
4. SVM Classifier 4.1 Train svm
###Code
from sklearn.linear_model import SGDClassifier
SGDClassifier_pipline = Pipeline([('Vectorizer',
CountVectorizer(stop_words='english',
max_df=0.5)),
('TF_IDF', TfidfTransformer()),
('SGDClassifier',
SGDClassifier(loss='hinge',
penalty='l2',
alpha=1e-3,
random_state=42))])
print(" Show SGDClassifier_pipline:\n", SGDClassifier_pipline)
###Output
Show SGDClassifier_pipline:
Pipeline(memory=None,
steps=[('Vectorizer',
CountVectorizer(analyzer='word', binary=False,
decode_error='strict',
dtype=<class 'numpy.int64'>, encoding='utf-8',
input='content', lowercase=True, max_df=0.5,
max_features=None, min_df=1,
ngram_range=(1, 1), preprocessor=None,
stop_words='english', strip_accents=None,
token_pattern='(?u)\\b\\w\\w+\\b',
tokenizer=None, voc...
('SGDClassifier',
SGDClassifier(alpha=0.001, average=False, class_weight=None,
early_stopping=False, epsilon=0.1, eta0=0.0,
fit_intercept=True, l1_ratio=0.15,
learning_rate='optimal', loss='hinge',
max_iter=1000, n_iter_no_change=5, n_jobs=None,
penalty='l2', power_t=0.5, random_state=42,
shuffle=True, tol=0.001, validation_fraction=0.1,
verbose=0, warm_start=False))],
verbose=False)
###Markdown
4.2 Evaluation SVM
###Code
SGDClassifier_pipline.fit(train_set.data, train_set.target)
predicted = SGDClassifier_pipline.predict(docs_test)
print(classification_report(test_set.target, predicted, target_names=test_set.target_names))
print(confusion_matrix(test_set.target, predicted))
###Output
precision recall f1-score support
alt.atheism 0.74 0.70 0.71 319
comp.graphics 0.79 0.69 0.74 389
comp.os.ms-windows.misc 0.73 0.78 0.75 394
comp.sys.ibm.pc.hardware 0.72 0.67 0.69 392
comp.sys.mac.hardware 0.81 0.82 0.82 385
comp.windows.x 0.85 0.76 0.81 395
misc.forsale 0.83 0.87 0.85 390
rec.autos 0.91 0.90 0.90 396
rec.motorcycles 0.93 0.96 0.94 398
rec.sport.baseball 0.88 0.92 0.90 397
rec.sport.hockey 0.87 0.98 0.93 399
sci.crypt 0.85 0.96 0.90 396
sci.electronics 0.81 0.62 0.70 393
sci.med 0.89 0.87 0.88 396
sci.space 0.84 0.97 0.90 394
soc.religion.christian 0.73 0.93 0.82 398
talk.politics.guns 0.70 0.93 0.80 364
talk.politics.mideast 0.92 0.93 0.92 376
talk.politics.misc 0.88 0.56 0.69 310
talk.religion.misc 0.79 0.39 0.53 251
accuracy 0.82 7532
macro avg 0.82 0.81 0.81 7532
weighted avg 0.83 0.82 0.82 7532
[[222 1 0 1 0 0 2 0 2 3 0 2 1 9 6 49 2 5
1 13]
[ 2 269 22 8 9 25 3 1 4 9 4 9 5 2 9 2 2 3
0 1]
[ 0 9 308 23 11 11 0 1 2 4 3 7 2 1 6 1 0 1
1 3]
[ 4 8 30 263 21 4 15 2 3 3 2 3 21 2 5 0 2 2
1 1]
[ 1 4 8 23 316 1 9 0 1 4 2 2 6 1 1 0 2 1
3 0]
[ 1 30 42 0 3 302 2 0 1 1 1 2 1 1 6 1 1 0
0 0]
[ 0 2 0 18 7 0 339 8 1 2 3 1 3 2 2 0 1 1
0 0]
[ 1 1 1 2 1 0 10 355 6 1 0 0 9 1 3 0 4 0
1 0]
[ 0 0 0 1 0 0 3 5 384 2 0 0 1 1 0 0 0 0
0 1]
[ 0 0 0 0 1 0 3 0 0 364 28 0 0 0 0 0 1 0
0 0]
[ 0 0 0 0 1 0 0 0 0 3 393 0 0 0 0 2 0 0
0 0]
[ 0 1 1 0 2 0 3 3 0 1 0 380 2 1 0 0 1 0
1 0]
[ 7 5 9 25 11 4 8 9 6 6 3 28 242 5 13 6 2 2
2 0]
[ 2 4 0 0 2 2 4 0 2 5 3 1 4 345 2 7 3 4
5 1]
[ 0 3 0 0 1 0 1 0 0 0 1 1 0 3 381 2 0 0
1 0]
[ 9 0 2 1 0 0 0 0 1 1 0 0 2 1 4 372 0 0
0 5]
[ 0 0 0 1 1 0 2 2 1 2 1 6 0 2 3 0 338 1
3 1]
[ 10 1 0 0 1 4 0 1 0 2 2 1 0 1 0 1 1 350
1 0]
[ 3 1 0 0 1 1 1 0 1 0 3 4 0 4 6 3 101 6
175 0]
[ 40 1 1 0 0 0 2 2 0 1 1 1 0 4 6 63 21 5
4 99]]
###Markdown
4.3 SVM
###Code
from sklearn.svm import LinearSVC
SVC_pipline = Pipeline([('Vectorizer', CountVectorizer()),
('TF_IDF', TfidfTransformer()),
('SVCClassifier', LinearSVC(random_state=42))])
print(" Show SVC_pipline:\n", SVC_pipline)
from sklearn.metrics import classification_report, confusion_matrix
test_set = fetch_20newsgroups(subset='test', shuffle=True, random_state=42)
docs_test = test_set.data
SVC_pipline.fit(train_set.data, train_set.target)
predicted = SVC_pipline.predict(docs_test)
print(classification_report(test_set.target, predicted, target_names=test_set.target_names))
print(confusion_matrix(test_set.target, predicted))
###Output
precision recall f1-score support
alt.atheism 0.82 0.80 0.81 319
comp.graphics 0.76 0.80 0.78 389
comp.os.ms-windows.misc 0.77 0.73 0.75 394
comp.sys.ibm.pc.hardware 0.71 0.76 0.74 392
comp.sys.mac.hardware 0.84 0.86 0.85 385
comp.windows.x 0.87 0.76 0.81 395
misc.forsale 0.83 0.91 0.87 390
rec.autos 0.92 0.91 0.91 396
rec.motorcycles 0.95 0.95 0.95 398
rec.sport.baseball 0.92 0.95 0.93 397
rec.sport.hockey 0.96 0.98 0.97 399
sci.crypt 0.93 0.94 0.93 396
sci.electronics 0.81 0.79 0.80 393
sci.med 0.90 0.87 0.88 396
sci.space 0.90 0.93 0.92 394
soc.religion.christian 0.84 0.93 0.88 398
talk.politics.guns 0.75 0.92 0.82 364
talk.politics.mideast 0.97 0.89 0.93 376
talk.politics.misc 0.82 0.62 0.71 310
talk.religion.misc 0.75 0.61 0.68 251
accuracy 0.85 7532
macro avg 0.85 0.85 0.85 7532
weighted avg 0.85 0.85 0.85 7532
[[254 1 0 1 0 2 1 0 2 0 0 1 1 6 7 22 0 1
1 19]
[ 1 313 12 8 5 17 3 2 1 3 1 4 9 0 4 2 0 1
0 3]
[ 0 17 288 40 7 13 4 1 0 4 0 2 4 2 5 2 0 0
1 4]
[ 0 14 20 297 21 1 13 2 1 1 0 1 19 0 1 0 0 0
0 1]
[ 0 5 4 19 330 0 9 0 0 2 1 1 9 1 0 0 2 0
1 1]
[ 1 35 38 3 3 302 3 1 2 0 0 0 1 1 4 0 1 0
0 0]
[ 0 1 1 10 7 0 353 5 1 2 1 1 5 1 0 0 0 0
1 1]
[ 0 1 0 5 1 1 10 359 6 2 0 0 6 1 0 0 2 0
2 0]
[ 0 0 0 1 0 0 4 9 380 1 0 0 1 1 0 1 0 0
0 0]
[ 0 0 1 0 0 0 4 2 0 378 10 0 1 0 0 0 0 0
1 0]
[ 0 0 0 1 1 0 1 0 0 3 392 1 0 0 0 0 0 0
0 0]
[ 1 3 1 0 2 2 4 2 1 3 0 372 2 0 0 0 2 0
1 0]
[ 0 6 5 23 9 1 5 5 3 2 1 8 310 8 2 1 2 1
0 1]
[ 3 5 3 4 2 2 3 0 1 4 1 1 9 343 1 3 1 3
5 2]
[ 0 7 0 1 1 2 1 0 0 0 1 1 4 5 368 1 1 0
1 0]
[ 4 0 2 1 0 0 0 0 0 1 0 0 3 2 3 372 0 0
0 10]
[ 1 0 0 1 1 0 2 0 1 2 0 4 0 3 2 0 334 1
8 4]
[ 11 1 0 0 1 3 0 1 0 3 0 0 0 0 1 6 2 335
12 0]
[ 3 1 0 0 1 1 1 1 0 0 0 3 0 3 5 4 88 2
192 5]
[ 30 2 1 1 0 0 3 0 0 1 0 0 0 3 4 31 12 1
8 154]]
###Markdown
GridSearch
###Code
from sklearn.model_selection import GridSearchCV
bayes_params = {
'Vectorizer__ngram_range': [(1, 1), (1, 2)],
'TF_IDF__use_idf': (True, False),
'MultinomialNB__alpha': (1e-2, 1e-3),
}
grid = GridSearchCV(multinomialNB_pipeline, bayes_params, cv=5, iid=False, n_jobs=-1)
grid.fit(train_set.data, train_set.target)
# summarize the results of the grid search
print(grid.best_score_)
print(grid.best_params_)
print(grid.best_estimator_)
svm_params = {
'Vectorizer__ngram_range': [(1, 1), (1, 2)],
'TF_IDF__use_idf': (True, False),
'SGDClassifier__alpha': (1e-2, 1e-3),
}
grid = GridSearchCV(SGDClassifier_pipline, svm_params, cv=5, iid=False, n_jobs=-1)
grid.fit(train_set.data, train_set.target)
# summarize the results of the grid search
print(grid.best_score_)
print(grid.best_params_)
print(grid.best_estimator_)
###Output
0.9024208153638664
{'SGDClassifier__alpha': 0.001, 'TF_IDF__use_idf': True, 'Vectorizer__ngram_range': (1, 2)}
Pipeline(memory=None,
steps=[('Vectorizer',
CountVectorizer(analyzer='word', binary=False,
decode_error='strict',
dtype=<class 'numpy.int64'>, encoding='utf-8',
input='content', lowercase=True, max_df=0.5,
max_features=None, min_df=1,
ngram_range=(1, 2), preprocessor=None,
stop_words='english', strip_accents=None,
token_pattern='(?u)\\b\\w\\w+\\b',
tokenizer=None, voc...
('SGDClassifier',
SGDClassifier(alpha=0.001, average=False, class_weight=None,
early_stopping=False, epsilon=0.1, eta0=0.0,
fit_intercept=True, l1_ratio=0.15,
learning_rate='optimal', loss='hinge',
max_iter=1000, n_iter_no_change=5, n_jobs=None,
penalty='l2', power_t=0.5, random_state=42,
shuffle=True, tol=0.001, validation_fraction=0.1,
verbose=0, warm_start=False))],
verbose=False)
|
homework_02_measures/public_homework_02_measures.ipynb | ###Markdown
Práctico 2: Calcular e interpretar medidas de centralidad de nodo en redes reales Inicialización Como siempre, comenzamos poder installar las bibliotecas `IGraph` y `CairoCffi` (necesaria para visualizar grafos).
###Code
!pip install python-igraph
!pip install cairocffi
###Output
_____no_output_____
###Markdown
Luego vamos a descargar algunos datasetsDatos del Club de Karate.
###Code
!wget "https://raw.githubusercontent.com/prbocca/na101_master/master/homework_02_measures/karate.graphml" -O "karate.graphml"
###Output
_____no_output_____
###Markdown
Datos de Blogs sobre el Sida.
###Code
!wget "https://raw.githubusercontent.com/prbocca/na101_master/master/homework_02_measures/aidsblog.edgelist" -O "aidsblog.edgelist"
import igraph as ig
import matplotlib.pyplot as plt
import random
import statistics
import pandas as pd
import numpy as np
###Output
_____no_output_____
###Markdown
1) Análisis inicialVamos a seguir (en python) las secciones 4.1 y 4.2 del libro [SANDR].Recomendamos su lectura en paralelo, para darle más contenido al trabajo de práctico. En lo que resta, agregaremos la nomenclatura [SANDR4.x.y] para referinos a la Sección 4.x.y del libro. Empezamos por cargar el grafo y verificar algunas de sus propiedades.
###Code
g_karate = ig.load("karate.graphml")
print(g_karate.summary())
g_karate.vcount(), g_karate.ecount()
###Output
_____no_output_____
###Markdown
Es un grafo no dirigido con pesos en las aristas:
###Code
g_karate.is_directed()
g_karate.es[0].attributes()
###Output
_____no_output_____
###Markdown
Recordamos como visualizarlo.
###Code
visual_style = dict()
visual_style["bbox"] = (400, 400)
#transformo numero de colores a paleta
id_gen = ig.datatypes.UniqueIdGenerator()
color_indices = [id_gen.add(value) for value in g_karate.vs['color']]
palette = ig.drawing.colors.ClusterColoringPalette(len(id_gen))
colors = [palette[index] for index in color_indices]
visual_style["vertex_color"] = colors
ig.plot(g_karate, **visual_style)
###Output
_____no_output_____
###Markdown
En el siguiente paso, le vamos a pedir que encuentre todos los vecinos del nodo `9` y que encuentre las aristas correspondientes. Recomendamos usar las siguientes funciones.
###Code
help(ig.Graph.neighbors)
help(ig.Graph.get_eid)
neighbors = None
edges = []
### START CODE HERE
### END CODE HERE
print(neighbors)
print(edges)
###Output
_____no_output_____
###Markdown
2) Distribución de gradoComo primera de las herramientas para analizar el gráfo en su totalidad (a diferencia de un nodo en particular), vamos a mirar la distribución de grado. Esto es, un histograma de la frequencia de los grados de todos los vértices en el grafo [SAND4.2.1]. 2.1) Graficar el histograma de la distribución de grado `g_karate`, utilizar la función `ig.Graph.degree()`.
###Code
### START CODE HERE
### END CODE HERE
###Output
_____no_output_____
###Markdown
2.2) Cálculo de la "fortaleza" del grafoEl concepto de fuerza es muy similar al de distribución de grado con una diferencia. En la distribución de grado, el grado se cálcula como la cantidad de aristas de cada vértice. Pero que ocurre si las aristas tienen peso?En este caso, podemos usar la fortaleza y consecuentemente la distribución de la fortaleza [SAND4.2.1].Graficar el histograma de la fortaleza de `g_karate`, utilizar la función `ig.Graph.strength()`.
###Code
### START CODE HERE
### END CODE HERE
###Output
_____no_output_____
###Markdown
2.3) Grado promedio de los vecinos en función del grado propioOtra métrica que ayuda a describir la estructura de un grafo es entender que tan populares son los vecinos de un nodo [SAND4.2.1].Por ejemplo: en un grafo estrella: el grado promedio de los vecinos de todos los nodos menos 1 es `n-1` mientras que el grado promedio del faltante es `1`. Para cada nodo, calcula el promedio de los grados de sus vecinos.
###Code
degree = g_karate.degree()
#lista donde se guarda el promedio del grado de los vecinos
avgerage_degree_neighbours = None
### START CODE HERE
### END CODE HERE
fig, ax = plt.subplots(figsize=(8, 6))
ax.scatter(degree, avgerage_degree_neighbours)
ax.set_xlabel("Degree")
ax.set_ylabel("Neighbour Avg Degree")
ax.set_title("Verage neighbor degree versus vertex degree")
plt.show()
###Output
_____no_output_____
###Markdown
3) Medidas de centralidadHabiendo trabajado con distribuciones relacionadas al grado de los vertices, nos movemos a trabajar con la centralidad de los nodos y como estos valores pueden usarse para describir el grafo [SANDR4.2.2].Nos vamos a concentrar en las siguientes medidas:* Grado* Intermediación (Betweenness)* Cercanía (Closeness)* Valor Propio (Eigenvalue centrality)* Page Rank* Hub / Authority Score 3.1) Ranking de los vértices más importantes del grago `g_karate`
###Code
degree = g_karate.degree()
betweeness = g_karate.betweenness()
closeness = g_karate.closeness()
eig_cent = g_karate.evcent(directed=False)
page_rank = g_karate.pagerank(directed=False)
hub = g_karate.hub_score()
authority = g_karate.authority_score()
df = pd.DataFrame([degree, betweeness, closeness, eig_cent, page_rank, hub, authority]).T
df.columns = ["Degree", "Betweenness", "Closeness", "Eigenvalue Centrality", "Page Rank", "Hub", "Authority"]
df.sort_values("Degree", ascending=False).head(10)
###Output
_____no_output_____
###Markdown
Obtener un dataframe con 5 filas donde cada fila tenga los vértices más importantes según cada medida de centralidad.
###Code
### START CODE HERE
### END CODE HERE
# Qué vertices aparecen en el top 5 de todas las medidas de centralidad
### START CODE HERE
### END CODE HERE
###Output
_____no_output_____
###Markdown
3.2) Observando la utilidad de hub/authority en la red de Blogs sobre el SidaComenzamos cargando la red [SANDR4.2.2].
###Code
g_aids = ig.load("aidsblog.edgelist")
ig.summary(g_aids)
###Output
_____no_output_____
###Markdown
Calculamos las centralidades hub y authority.
###Code
#guardamos los valores de la centralidad en
hub = authority = None
### START CODE HERE
### END CODE HERE
###Output
_____no_output_____
###Markdown
Visualizamos e interpretamos
###Code
fig, ax = plt.subplots(1, 2, figsize=(16, 8))
layout = g_aids.layout_kamada_kawai()
visual_style = {}
visual_style["layout"] = layout
visual_style["bbox"] = (500, 500)
visual_style["margin"] = 10
#Hubs
visual_style["vertex_size"] =10 * np.sqrt(hub_aids)
ax_ = ax[0]
ig.plot(g_aids, **visual_style, target=ax_)
_ = ax_.axis("off")
ax_.set_title("Hubs")
#Authorities
visual_style["vertex_size"] =10 * np.sqrt(authority_aids)
ax_ = ax[1]
ig.plot(g_aids, **visual_style, target=ax_)
_ = ax_.axis("off")
ax_.set_title("Authorities")
plt.show()
###Output
_____no_output_____
###Markdown
4) Redes sociales realesPara bajar a tierra nuestro análisis, y al mismo tiempo practicar hacerlo sobre datos reales, nos vamos a enfocar en un dataset extraido de Twitter.Twitter permite acceder parcialmente a datos de la red utilizando una cuenta dedesarrollador gratuita. El 30/08/2018 a las 11.30am se descargaron los 5000 tweets más recientes sobre Uruguay. 4.1) Cargar y explorar los datos
###Code
!wget "https://raw.githubusercontent.com/prbocca/na101_master/master/homework_02_measures/tweets_uru.csv" -O "tweets_uru.csv"
###Output
_____no_output_____
###Markdown
Esta vez comenzamos el análisis desde los datos crudos (y no desde el grafo).Manipularemos los datos para obtener el grafo de twitter. Esto es lo habitual cuando trabajamos con datos reales.Para esto, vamos a utilizar la biblioteca `pandas` la cual es ubiquita en el ecosistema de Python.Empezamos por cargar el dataset y observar alguas características generales.
###Code
df_tweets = pd.read_csv("tweets_uru.csv")
print(df_tweets.shape)
display(df_tweets.head())
df_tweets.info()
df_tweets.nunique()
###Output
_____no_output_____
###Markdown
El dataset tiene 17 columnas, las que resultan interesantes para este ejercicio son:* `text`: el texto del tweet* `screenName`: el usuario que envia el tweet* `isRetweet`: si el tweet es un retweet o es un texto original. Nota: todos los tweets que son retweets tienen en el campo text: "RT @usuario_original: texto"* `retweetCount`: cantidad de retweets que se hicieron sobre este tweet
###Code
columns = ['text', 'screenName', 'isRetweet', 'retweetCount']
display(df_tweets[columns])
###Output
_____no_output_____
###Markdown
4.2) Tweets más populares, y eliminación del SPAM. Los tweets con más retweets parecen ser spam.
###Code
df_tweets.sort_values("retweetCount", ascending=False).head(10)
###Output
_____no_output_____
###Markdown
Investiguemos más esos tweets
###Code
fig, ax = plt.subplots(figsize=(16, 4))
ax.set_yscale("log")
df_tweets["retweetCount"].hist(bins=100, ax=ax)
###Output
_____no_output_____
###Markdown
Se observa que hay una gran separación en popularidad entre los tweets con unos pocos cientos de retweets, y los que tienen más de 15000 retweets.Parece que podemos hacer un corte en 15000, siendo spam los que tienen más retweets. Observar que eliminamos 28 tweets (de spam).
###Code
df_tweets = df_tweets[df_tweets["retweetCount"] < 15000]
print(df_tweets.shape)
###Output
_____no_output_____
###Markdown
Repetir el histograma de cantidad de retweets
###Code
### START CODE HERE
### END CODE HERE
###Output
_____no_output_____
###Markdown
Mostrar los 5 tweets más populares (con más retweets) que no sean spam.
###Code
### TIPs: ordenar los datos de acuerdo a la columna 'retweetCount'
### START CODE HERE
### END CODE HERE
###Output
_____no_output_____
###Markdown
4.3) Crear la red de quién hace retweet de quién Vamos a crear la red de quién hace retweet de quién. Por tanto no nos sirven los tweets sin retweets.A continuación, procedemos a eliminarlos.Además, vamos a eliminar los tweets con solo un retweet, sino la red quedaría muy densa.Observar que eliminamos cerca de 1500 tweets (que no fueron reenviados o fueron reenviados solo una vez).
###Code
df_tweets = df_tweets[df_tweets["retweetCount"] >= 2]
print(df_tweets.shape)
###Output
_____no_output_____
###Markdown
A continuación, le proponemos extraer una red a partir de estos datos. Para esto, vamos a crear una arista $e = (u,v)$ entre dos nodos $u$ y $v$ si $u$ retweeteo a $v$.Nosotros usando una simple heurística encontramos 2964
###Code
tweet_edges = None #dataframe con dos columnas "source" y "retweeter", con los nombres de usuarios de quien es el original del tweet y quien lo reenvio
### TIPs: solo para los tweets que son retweets, quedarse con el usuario que origina el tweet dentro del campo text
### START CODE HERE
### END CODE HERE
tweet_edges
###Output
_____no_output_____
###Markdown
Una vez que tenemos las aristas, procedemos a crear el grafo dirigido de quién hace retweet de quién.Este grafo tiene 2368 nodos y 2964 aristas.
###Code
g_tweets = ig.Graph.TupleList(tweet_edges.itertuples(index=False), directed=True)
g_tweets.summary()
###Output
_____no_output_____
###Markdown
Una visualización con nombres de los vértices para un grafo tan grande es un gran desafío.A continuación una visualización aceptable.
###Code
random.seed(1234)
visual_style = dict()
visual_style["layout"] = g_tweets.layout_drl(options={'simmer_attraction':0})
visual_style["bbox"] = (1200, 1200)
visual_style["vertex_size"] = 3
visual_style["vertex_color"] = 'red'
visual_style["vertex_label"] = g_tweets.vs["name"]
visual_style["vertex_label_size"] = 4
visual_style["edge_width"] = 0.3
visual_style["edge_arrow_size"] = 0.1
ig.plot(g_tweets, **visual_style)
###Output
_____no_output_____
###Markdown
4.4) Importancia de los usuarios (centralidad de vértices) Como se llama el usuario con más retweets en la red.Solución: `jgamorin`.
###Code
### START CODE HERE
### END CODE HERE
###Output
_____no_output_____
###Markdown
Podemos calcular las métricas de centralidad ya vistas y comprar los usuarios más populares de acuerdo a ellas.Solución (ordenado de más a menos centralidad):| betweeness | hub | authority ||---------------|----------------|-----------------|| jgamorin | jgamorin | Nicomatute19 || Rubiia215 | emekavoces | ElOjoChurrinche || YerbaSaraUy | PabloLarraz10 | bugabea || nacho_uriarte | MaurAntunez | colombopp || Cabrosa18 | Ciudadanos_MVD | juan37778491 |
###Code
### START CODE HERE
### END CODE HERE
###Output
_____no_output_____ |
src/MANOVA_ANCOVA.ipynb | ###Markdown
Preprocessing for (M)AN(C)OVA
###Code
fnames = [
"sham_study.pkl", "vlPFC_study.pkl", "eon_study.pkl", "eoff_study.pkl",
"esham_study.pkl", "evlPFC_study.pkl"
]
studies = []
for a in fnames:
studies.append(
Study.load_from_file(a)
)
df = pd.DataFrame(
{
"group": sum([[i]*len(a) for i,a in enumerate(studies)], []),
"AUROC": sum([a.compute_study_aucs() for a in studies], []),
"hits": sum(
[
[b[0] for b in a.compute_hits_and_FAs()]
for a in studies
], []
),
"FAs": sum(
[
[b[1] for b in a.compute_hits_and_FAs()]
for a in studies
], []
),
"mean_RT": sum(
[
[np.mean(b[1]) for b in a.get_participant_RT()]
for a in studies
], []
),
"std_RT": sum(
[
[np.std(b[1]) for b in a.get_participant_RT()]
for a in studies
], []
),
"age": sum(
[
[
ages[
ages["participant"] == int(
op.split(b)[-1].split("_")[1]
)
]["age"].values
for b in a.fns
]
for a in studies
], []
)
}
)
df["d_prime"] = df["hits"] - df["FAs"]
df["age"] = df["age"].apply(lambda x: 0 if len(x) < 1 else x[0])
df["hits_FAs_avg"] = 1/2*(df["hits"] + df["FAs"])
ru_df = df[df["group"] < 4]
en_df = df[df["group"] >= 4]
###Output
_____no_output_____
###Markdown
ANOVA I have recomputed one-way ANOVA for each accuracy measure (and reaction time).Next slide shows formulae for ANOVA, the one after that shows the results (DF for group and residual, F and PR(>F)) for all data, Russian sample and English sample.
###Code
ANOVA_formulae = [
"AUROC ~ group",
"hits ~ group",
"FAs ~ group",
"mean_RT ~ group",
"d_prime ~ group",
]
ANOVA_PRs_RU = [
anova_lm(ols(a, data=ru_df).fit()) for a in ANOVA_formulae
]
ANOVA_PRs_EN = [
anova_lm(ols(a, data=en_df).fit()) for a in ANOVA_formulae
]
ANOVA_PRs = [
anova_lm(ols(a, data=df).fit()) for a in ANOVA_formulae
]
ANOVA_output = pd.DataFrame(
{
"All group DF": [a.loc["group"]["df"] for a in ANOVA_PRs],
"All residual DF": [a.loc["Residual"]["df"] for a in ANOVA_PRs],
"All F": [a.loc["group"]["F"] for a in ANOVA_PRs],
"All PR(>F)": [a.loc["group"]["PR(>F)"] for a in ANOVA_PRs],
"Russian group DF": [a.loc["group"]["df"] for a in ANOVA_PRs_RU],
"Russian residual DF": [a.loc["Residual"]["df"] for a in ANOVA_PRs_RU],
"Russian F": [a.loc["group"]["F"] for a in ANOVA_PRs_RU],
"Russian PR(>F)": [a.loc["group"]["PR(>F)"] for a in ANOVA_PRs_RU],
"English group DF": [a.loc["group"]["df"] for a in ANOVA_PRs_EN],
"English residual DF": [a.loc["Residual"]["df"] for a in ANOVA_PRs_EN],
"English F": [a.loc["group"]["F"] for a in ANOVA_PRs_EN],
"English PR(>F)": [a.loc["group"]["PR(>F)"] for a in ANOVA_PRs_EN],
}
)
ANOVA_output.index = [a.split(" ~ ")[0] for a in ANOVA_formulae]
ANOVA_output.T
###Output
_____no_output_____
###Markdown
ANCOVA I have computed one-way ANCOVA for each accuracy measure (and reaction time).Next slide shows formulae for ANCOVA, the one after that shows the results (DF for group and residual, F and PR(>F)) for aRussian sample only since I have no age data for English sample.
###Code
ANCOVA_formulae = [
"AUROC ~ C(group) + age",
"hits ~ C(group) + age",
"FAs ~ C(group) + age",
"mean_RT ~ C(group) + age",
"d_prime ~ C(group) + age",
]
ANCOVA_PRs_RU = [
anova_lm(ols(a, data=ru_df).fit()) for a in ANCOVA_formulae
]
ANCOVA_output = pd.DataFrame(
{
"Russian group DF": [a.loc["C(group)"]["df"] for a in ANCOVA_PRs_RU],
"Russian age DF": [a.loc["age"]["df"] for a in ANCOVA_PRs_RU],
"Russian residual DF": [a.loc["Residual"]["df"] for a in ANCOVA_PRs_RU],
"Russian group F": [a.loc["C(group)"]["F"] for a in ANCOVA_PRs_RU],
"Russian age F": [a.loc["age"]["F"] for a in ANCOVA_PRs_RU],
"Russian group PR(>F)": [a.loc["C(group)"]["PR(>F)"] for a in ANCOVA_PRs_RU],
"Russian age": [a.loc["age"]["PR(>F)"] for a in ANCOVA_PRs_RU],
}
)
ANCOVA_output.index = [a.split(" ~ ")[0] for a in ANCOVA_formulae]
ANCOVA_output.T
###Output
_____no_output_____
###Markdown
MANOVA I have computed one-way MANOVA for each accuracy measure (and reaction time).Next slide shows a formula for MANOVA, three last slides show the results for different MANOVA measures (Wilks lambda, Pillai's trace, Hotelling-Lawley trace and Roy's greatest root).
###Code
MANOVA_formulae = [
"AUROC + hits + FAs + mean_RT + d_prime ~ C(group)",
]
MANOVA_PRs_RU = [
MANOVA.from_formula(a, data=ru_df).mv_test().summary() for a in MANOVA_formulae
]
MANOVA_PRs_EN = [
MANOVA.from_formula(a, data=en_df).mv_test().summary() for a in MANOVA_formulae
]
MANOVA_PRs = [
MANOVA.from_formula(a, data=df).mv_test().summary() for a in MANOVA_formulae
]
MANOVA_PRs[0]
MANOVA_PRs_RU[0]
MANOVA_PRs_EN[0]
###Output
_____no_output_____ |
synthetic_classification.ipynb | ###Markdown
Evaluate Virtual Ensemble Uncertainty using internal Catboost functions
###Code
xx, yy = get_grid(600, 200)
ext=600
inputs = np.stack((xx.ravel(), yy.ravel()), axis=1)
inputs_ext = np.array([make_new_coordinates(x,y) for x, y in np.stack((xx.ravel(), yy.ravel()), axis=1)])
preds = ens.ensemble[1].virtual_ensembles_predict(inputs_ext, prediction_type='TotalUncertainty', virtual_ensembles_count=10)
know = preds[:,1]-preds[:,0]
xi = np.linspace(-ext, ext, 1000)
yi = np.linspace(-ext, ext, 1000)
levels = 20
zi_entropy = np.clip(griddata(inputs, preds[:,0], (xi[None, :], yi[:, None]), method='cubic'), 0.0, None)
zi_mutual_information = np.clip(griddata(inputs, preds[:,1], (xi[None, :], yi[:, None]), method='cubic'), 0.0, None)
# Print All figures
# Total Uncertainty
plt.contourf(xi, yi, zi_entropy, cmap=cm.Blues, alpha=0.9, levels=levels)
plt.xlim(-ext, ext)
plt.ylim(-ext, ext)
plt.colorbar()
#plt.savefig('total_uncertainty.png', bbox_inches='tight', dpi=500)
plt.show()
plt.close()
# Data Uncertainty
plt.contourf(xi, yi, zi_mutual_information, cmap=cm.Blues, alpha=0.9, levels=levels)
plt.xlim(-ext, ext)
plt.ylim(-ext, ext)
plt.colorbar()
plt.show()
plt.close()
# Knowledge Uncertainty
zi_know = np.clip(griddata(inputs, know, (xi[None, :], yi[:, None]), method='cubic'), 0.0, None)
plt.contourf(xi, yi, zi_know, cmap=cm.Blues, alpha=0.9, levels=levels)
plt.xlim(-ext, ext)
plt.ylim(-ext, ext)
plt.colorbar()
plt.show()
plt.close()
###Output
_____no_output_____
###Markdown
Evaluate Virtual Ensemble uncertainties manually
###Code
from scipy.special import softmax
xx, yy = get_grid(600, 200)
ext=600
inputs = np.stack((xx.ravel(), yy.ravel()), axis=1)
#print(inputs)
inputs_ext = np.array([make_new_coordinates(x,y) for x, y in np.stack((xx.ravel(), yy.ravel()), axis=1)])
preds = ens.ensemble[1].virtual_ensembles_predict(inputs_ext, prediction_type='VirtEnsembles', virtual_ensembles_count=100)
probs = softmax(preds,axis=2)
unks = ensemble_uncertainties(probs.transpose([1,0,2]))
xi = np.linspace(-ext, ext, 1000)
yi = np.linspace(-ext, ext, 1000)
levels = 20
zi_entropy = np.clip(griddata(inputs, unks['entropy_of_expected'], (xi[None, :], yi[:, None]), method='cubic'), 0.0, None)
zi_mutual_information = np.clip(griddata(inputs, unks['mutual_information'], (xi[None, :], yi[:, None]), method='cubic'), 0.0, None)
zi_data_uncertainty = np.clip(griddata(inputs, unks['expected_entropy'], (xi[None, :], yi[:, None]), method='cubic'), 0.0,
None)
# Print All figures
# Total Uncertainty
plt.contourf(xi, yi, zi_entropy, cmap=cm.Blues, alpha=0.9, levels=levels)
plt.xlim(-ext, ext)
plt.ylim(-ext, ext)
plt.colorbar()
#plt.savefig('total_uncertainty.png', bbox_inches='tight', dpi=500)
plt.show()
plt.close()
# Data Uncertainty
plt.contourf(xi, yi, zi_data_uncertainty, cmap=cm.Blues, alpha=0.9, levels=levels)
plt.xlim(-ext, ext)
plt.ylim(-ext, ext)
plt.colorbar()
plt.show()
#plt.savefig('data_uncertainty.png', bbox_inches='tight', dpi=500, levels=levels)
plt.close()
# Knowledge Uncertainty
plt.contourf(xi, yi, zi_mutual_information, cmap=cm.Blues, alpha=0.9, levels=levels)
plt.xlim(-ext, ext)
plt.ylim(-ext, ext)
plt.colorbar()
plt.show()
plt.close()
###Output
_____no_output_____ |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.