path
stringlengths 7
265
| concatenated_notebook
stringlengths 46
17M
|
---|---|
tutorials/S5 Case Study II - U-Net for Land Cover Mapping.ipynb | ###Markdown
Section 5: Case Study II - U-Net for Land Cover MappingFinally let's move in to multi class classification problem with U-Net mapping land cover from satellite images. 1) U-Net for Land Cover Mapping
###Code
'''first, let's import libraries '''
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.python.keras import Model
from tensorflow.python.keras.layers import Input, Conv2D, MaxPooling2D, Conv2DTranspose, Concatenate, Dropout
###Output
_____no_output_____
###Markdown
1) U-Net for Land Cover MappingInput data are RGB satellite images. And output are images of land cover type. There are 4 land cover types as below, - Pixel value 0: background class - Pixel value 1: building class - Pixel value 2: vegetation/forest class - Pixel value 3: water class
###Code
'''loading data'''
# data is already randomized and split in to training / test sets. So we can go ahead and use them as it is.
x_train = np.load('./data/LandCover.ai/x_train.npy').astype('float32')
y_train= np.load('./data/LandCover.ai/y_train.npy').astype('float32')
x_test = np.load('./data/LandCover.ai/x_test.npy').astype('float32')
y_test = np.load('./data/LandCover.ai/y_test.npy').astype('float32')
print("x_train shape", x_train.shape)
print("y_train shape", y_train.shape)
print("y_test shape", x_test.shape)
print("y_test shape", y_test.shape)
# Let's plot a sample input RGB image and output image with land cover
plt.imshow(x_test[12,:,:,:].astype('uint8'))
plt.show()
plt.imshow(y_test[12,:,:,0].astype('uint8'))
plt.show()
###Output
_____no_output_____
###Markdown
Since land cover data include classes, let's perform one-hot encoding first.
###Code
'''one-hot encoding'''
from keras.utils import to_categorical
y_train_1hot = to_categorical(y_train)
y_test_1hot = to_categorical(y_test)
###Output
_____no_output_____
###Markdown
Now, let's define, fit, predict and validate U-Net model for land cover mapping. Here in this example also, we will use same U-Net architecture as in our last example (building mapping). Only different here use of __*softmax*__ activation in the last layer and use of __*cross-entropy*__ loss, because this is a multi-class classification problem.
###Code
x_in = Input(shape=(128, 128, 3))
'''Encoder'''
x_temp = Conv2D(32, (3, 3), activation='relu', padding='same')(x_in)
x_temp = Dropout(0.25)(x_temp)
x_skip1 = Conv2D(32, (3, 3), activation='relu', padding='same')(x_temp)
x_temp = MaxPooling2D((2,2))(x_skip1)
x_temp = Conv2D(32, (3, 3), activation='relu', padding='same')(x_temp)
x_temp = Dropout(0.25)(x_temp)
x_skip2 = Conv2D(32, (3, 3), activation='relu', padding='same')(x_temp)
x_temp = MaxPooling2D((2,2))(x_skip2)
x_temp = Conv2D(64, (3, 3), activation='relu', padding='same')(x_temp)
x_temp = Dropout(0.25)(x_temp)
x_skip3 = Conv2D(64, (3, 3), activation='relu', padding='same')(x_temp)
x_temp = MaxPooling2D((2,2))(x_skip3)
x_temp = Conv2D(64, (3, 3), activation='relu', padding='same')(x_temp)
x_temp = Dropout(0.5)(x_temp)
x_temp = Conv2D(64, (3, 3), activation='relu', padding='same')(x_temp)
'''Decoder'''
x_temp = Conv2DTranspose(64, (3, 3), activation='relu', padding='same')(x_temp)
x_temp = Dropout(0.5)(x_temp)
x_temp = Conv2DTranspose(64, (3, 3), strides=(2, 2), activation='relu', padding='same')(x_temp)
x_temp = Concatenate()([x_temp, x_skip3])
x_temp = Conv2DTranspose(64, (3, 3), activation='relu', padding='same')(x_temp)
x_temp = Dropout(0.5)(x_temp)
x_temp = Conv2DTranspose(64, (3, 3), strides=(2, 2), activation='relu', padding='same')(x_temp)
x_temp = Concatenate()([x_temp, x_skip2])
x_temp = Conv2DTranspose(32, (3, 3), activation='relu', padding='same')(x_temp)
x_temp = Dropout(0.5)(x_temp)
x_temp = Conv2DTranspose(32, (3, 3), strides=(2, 2), activation='relu', padding='same')(x_temp)
x_temp = Concatenate()([x_temp, x_skip1])
x_temp = Conv2DTranspose(32, (3, 3), activation='relu', padding='same')(x_temp)
x_temp = Dropout(0.5)(x_temp)
x_temp = Conv2DTranspose(32, (3, 3), activation='relu', padding='same')(x_temp)
'''Use 1 by 1 Convolution to get desired output bands'''
x_temp = Conv2D(32, (1, 1), activation='relu', padding='same')(x_temp)
x_temp = Conv2D(32, (1, 1), activation='relu', padding='same')(x_temp)
x_out = Conv2D(4, (1, 1), activation='softmax', padding='same')(x_temp)
model = Model(inputs=x_in, outputs=x_out)
model.compile(loss='categorical_crossentropy', optimizer='adam')
model.summary()
history = model.fit(x_train, y_train_1hot, validation_data=(x_test, y_test_1hot), epochs=250, batch_size=10, verbose=0)
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model Loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
'''Prediction over the test dataset'''
pred_test = model.predict(x_test)
pred_test = np.argmax(pred_test, axis=-1)
print(pred_test.shape)
#let's compare sample predicted and actual land cover image with input RGB image
plt.imshow(pred_test[12, :, :])
plt.show()
plt.imshow(y_test[12, :, :, 0])
plt.show()
plt.imshow(x_test[12,:,:,:].astype('uint8'))
plt.show()
###Output
(30, 128, 128)
|
model/Unit_2_Build_Week.ipynb | ###Markdown
Import packages
###Code
from google.colab import files
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
!pip install category_encoders
from category_encoders import OrdinalEncoder, OneHotEncoder
from pandas_profiling import ProfileReport
from sklearn.model_selection import train_test_split, cross_val_predict, cross_val_score
from sklearn.pipeline import make_pipeline
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from xgboost import XGBClassifier
from sklearn.metrics import roc_curve, roc_auc_score
from sklearn.inspection import permutation_importance
from joblib import dump
###Output
Collecting category_encoders
[?25l Downloading https://files.pythonhosted.org/packages/44/57/fcef41c248701ee62e8325026b90c432adea35555cbc870aff9cfba23727/category_encoders-2.2.2-py2.py3-none-any.whl (80kB)
[K |████ | 10kB 15.2MB/s eta 0:00:01
[K |████████▏ | 20kB 2.0MB/s eta 0:00:01
[K |████████████▏ | 30kB 2.5MB/s eta 0:00:01
[K |████████████████▎ | 40kB 2.9MB/s eta 0:00:01
[K |████████████████████▎ | 51kB 2.4MB/s eta 0:00:01
[K |████████████████████████▍ | 61kB 2.7MB/s eta 0:00:01
[K |████████████████████████████▍ | 71kB 2.9MB/s eta 0:00:01
[K |████████████████████████████████| 81kB 2.5MB/s
[?25hRequirement already satisfied: patsy>=0.5.1 in /usr/local/lib/python3.6/dist-packages (from category_encoders) (0.5.1)
Requirement already satisfied: numpy>=1.14.0 in /usr/local/lib/python3.6/dist-packages (from category_encoders) (1.18.5)
Requirement already satisfied: scipy>=1.0.0 in /usr/local/lib/python3.6/dist-packages (from category_encoders) (1.4.1)
Requirement already satisfied: pandas>=0.21.1 in /usr/local/lib/python3.6/dist-packages (from category_encoders) (1.1.2)
Requirement already satisfied: scikit-learn>=0.20.0 in /usr/local/lib/python3.6/dist-packages (from category_encoders) (0.22.2.post1)
Requirement already satisfied: statsmodels>=0.9.0 in /usr/local/lib/python3.6/dist-packages (from category_encoders) (0.10.2)
Requirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from patsy>=0.5.1->category_encoders) (1.15.0)
Requirement already satisfied: python-dateutil>=2.7.3 in /usr/local/lib/python3.6/dist-packages (from pandas>=0.21.1->category_encoders) (2.8.1)
Requirement already satisfied: pytz>=2017.2 in /usr/local/lib/python3.6/dist-packages (from pandas>=0.21.1->category_encoders) (2018.9)
Requirement already satisfied: joblib>=0.11 in /usr/local/lib/python3.6/dist-packages (from scikit-learn>=0.20.0->category_encoders) (0.16.0)
Installing collected packages: category-encoders
Successfully installed category-encoders-2.2.2
###Markdown
Load Data
###Code
# this is sa t
!pip install -q kaggle
files.upload()
!mkdir ~/.kaggle
!cp kaggle.json ~/.kaggle/
!chmod 600 ~/.kaggle/kaggle.json
!kaggle datasets download -d'pschale/mlb-pitch-data-20152018'
!mkdir data
!unzip mlb-pitch-data-20152018.zip -d data
pitches = pd.read_csv('data/pitches.csv')
games = pd.read_csv('data/games.csv')
atbats = pd.read_csv('data/atbats.csv')
player_names = pd.read_csv('data/player_names.csv')
df = pd.merge(atbats, games, how= 'inner', on= 'g_id')
from datetime import datetime
date = datetime.strptime(df['date'][0], '%Y-%m-%d')
date.day
###Output
_____no_output_____
###Markdown
Wrangling
###Code
def wrangle(df):
df = df.copy()
# Sorting at bats into 'on base' or 'out'
df['event'].replace({
'Strikeout': 'Out',
'Groundout': 'Out',
'Walk' : 'On Base',
'Single' : 'On Base',
'Flyout' : 'Out',
'Lineout': 'Out',
'Pop Out' : 'Out',
'Double' : 'On Base',
'Home Run' : 'On Base',
'Forceout' : 'Out',
'Grounded Into DP' : 'Out',
'Field Error' : 'On Base',
'Hit By Pitch': 'On Base',
'Triple' : 'On Base',
'Double Play' : 'Out',
'Intent Walk' : 'On Base',
'Strikeout Double Play' : 'Out',
'Fielders Choice' : 'Out',
'Fielders Choice Out' : 'Out',
'Bunt Pop Out' : 'Out',
'Bunt Groundout' : 'Out',
'Bunt Lineout' : 'Out',
'Runner Out' : 'Out',
'Triple Play' : 'Out',
'Runner Double Play' : 'Out',
'Strikeout - DP': 'Out',
'Batter Interference': 'Out',
'Catcher Interference' : 'On Base'
# ,'Sac Fly': 'Out',
# 'Sac Bunt': 'Out',
# 'Sac Fly DP': 'Out',
# 'Sacrifice Bunt DP': 'Out'
}, inplace= True)
# Filtering out sacrifice flys and bunts
df = df[(df['event'] == 'On Base') | (df['event'] == 'Out')]
# Renaming abbreviated columns
df = df.rename(mapper = {
'ab_id': 'atbat_id',
'event': 'result',
'o': 'outs',
'g_id': 'game_id',
'p_score': 'pitcher_team_score'
}, axis= 1)
# Making atbats readable
df['atbat_id'] = df['atbat_id'].apply(int)
# Create feature for year, month, day of week
def get_year(data):
return str(data)[:4]
df['year'] = df['game_id'].apply(get_year)
#df = df.set_index('game_id')
# Creating features and reducing cardinality of wind
def wind_speed(data):
return int(data.split(',')[0].strip()[:-4])
def wind_direction(data):
return data.split(',')[1].strip()
df['wind_speed'] = df['wind'].apply(wind_speed)
df['wind_direction'] = df['wind'].apply(wind_direction)
df['wind_direction'].replace({'none': 'None'}, inplace= True)
# Creating features for weather
def temperature(data):
return int(data.split(',')[0].strip()[:3])
def weather(data):
return data.split(',')[1].strip()
df['temperature'] = df['weather'].apply(temperature)
df['weather'] = df['weather'].apply(weather)
# Duplicate, unnecessary, or potentailly data leakage columns
df.drop(columns= ['outs', 'wind', 'away_final_score', 'home_final_score', 'atbat_id', 'game_id'], inplace= True)
# Drop high cardinality columns
high_card = [col for col in df.select_dtypes(include= 'object').columns if df[col].nunique() >= 100]
df.drop(columns = high_card, inplace= True)
#df.drop(columns= ['umpire_1B', 'umpire_2B', 'umpire_3B', 'umpire_HP'], inplace= True)
# Assigning names to pitcher and batter ids
player_names['name'] = player_names['first_name'] + ' ' + player_names['last_name']
player_names.drop(columns= ['first_name', 'last_name'], inplace= True)
player_names['batter_id'] = player_names['id']
player_names['batter'] = player_names['name']
player_names.drop(columns= ['id', 'name'], inplace=True)
df = pd.merge(df, player_names, how= 'inner', on= 'batter_id')
player_names['pitcher_id'] = player_names['batter_id']
player_names['pitcher'] = player_names['batter']
player_names.drop(columns= ['batter_id', 'batter'], inplace=True)
df = pd.merge(df, player_names, how= 'inner', on= 'pitcher_id')
df.drop(columns= ['batter_id', 'pitcher_id'], inplace= True)
return df
df = wrangle(df)
print(df.shape)
df.head()
df['wind_speed'].value_counts()
df.select_dtypes(exclude= 'object').nunique()
df['result'].value_counts()
df.dtypes
###Output
_____no_output_____
###Markdown
Wrangle pitches dataset
###Code
pitches = pd.read_csv('data/pitches.csv')
def wrangle_pitches(df):
df = df.copy()
# nulls = [col for col in df.columns if df[col].isnull().sum() == df.shape[0]]
# print(nulls)
# df.drop(columns= nulls, inplace= True)
#df.drop(columns= ['code', 'o','event_num'], inplace= True) # code is duplicate of type, o is duplicate of outs, do not know what event_num is
#df.drop(columns= ['spin_rate', 'spin_dir', 'type_confidence','zone'], inplace= True) # dropping placeholders. could use for advanced analysis
#df.drop(columns= 'y0', inplace= True) # same value for all pitches: 50
#advanced_stats = df.loc[:,:'pfx_z']
#df.drop(columns= advanced_stats.columns, inplace= True)
df = df.rename(mapper = {'ab_id': 'atbat_id',
'g_id': 'game_id',
'b_count':'balls',
's_count':'strikes',
'b_score':'batter_team_score',
'p_score': 'pitcher_team_score'}, axis= 1)
df['code'].replace({
'B' : 'Ball', # ball
'*B' : 'Ball', # ball in dirt
'S' : 'Strike', # swinging strike
'C' : 'Strike', # called strike
'F' : 'Foul', # foul
'T' : 'Foul', # foul tip
'L' : 'Foul', # foul bunt
'I' : 'Ball', # intentional ball
'W' : 'Strike', # swinging strike blocked
'M' : 'Strike', # missed bunt
'P': 'Ball',
'H': 'On Base', #'in play - no out', # hit by pitch
'E': 'On Base', #'in play - runs',
'D': 'On Base', #'in play - no out',
'X': 'Out', #'in play - out'
}, inplace = True)
# Selecting the walks when the ball count is at 3 and the pitch is a ball
df.loc[(df['balls'] == 3) & (df['code'] == 'Ball'), 'code'] = 'On Base'
# Selecting strikeouts when the strike count is at 2 and a strike is thrown
df.loc[(df['strikes'] == 2) & (df['code'] == 'Strike'), 'code'] = 'Out'
df = df[(df['code'] == 'Ball') | (df['code'] == 'Strike') | (df['code'] == 'Foul') | (df['code'] == 'On Base') | (df['code'] == 'Out')]
df['pitch_type'].replace({
'CH': 'Changeup',
'CU': 'Curveball',
'EP': 'Eephus',
'FC': 'Cutter',
'FF': 'Four seam fastball',
'FO': 'Pitchout',
'FS': 'Splitter',
'FT': 'Two seam fastball',
'IN': 'Intentional ball',
'KC': 'Knuckle curve',
'KN': 'Knuckleball',
'PO': 'Pitchout',
'SC': 'Screwball',
'SI': 'Sinker',
'SL': 'Slider',
'UN': 'Unknown'
}, inplace = True)
# df['top'] = df['top'].astype(int) # changing 'top' of inning from boolean to binary
df['atbbat_id'] = df['atbat_id'].apply(int) # making atbats readable
# #df.dropna(inplace= True)
# Dropping outliers that have 4 balls in the count
# four_balls = df[df['b_count'] == 4]
# df.drop(four_balls.index, inplace= True)
# Removing duplicate or data leakage
df.drop(columns= ['type', 'event_num'], inplace= True)
return df
pitches = wrangle_pitches(pitches)
print(pitches.shape)
pitches.head()
pitches['code'].value_counts()
pitchers = df.groupby(['pitcher_id'])#['pitcher_id']
pitchers.head()
###Output
_____no_output_____
###Markdown
Wrangle at_bats dataset
###Code
at_bats = pd.read_csv('data/atbats.csv')
def wrangle_at_bats(df):
df = df.copy()
# df['event'].replace({
# 'Strikeout': 'Out',
# 'Groundout': 'Out',
# 'Walk' : 'On Base',
# 'Single' : 'On Base',
# 'Flyout' : 'Out',
# 'Lineout': 'Out',
# 'Pop Out' : 'Out',
# 'Double' : 'On Base',
# 'Home Run' : 'On Base',
# 'Forceout' : 'Out',
# 'Grounded Into DP' : 'Out',
# 'Field Error' : 'On Base', # counted as on base since the batter did get on base
# 'Hit By Pitch': 'On Base',
# 'Triple' : 'On Base',
# 'Double Play' : 'Out',
# 'Intent Walk' : 'On Base',
# 'Strikeout Double Play' : 'Out',
# 'Fielders Choice' : 'Out',
# 'Fielders Choice Out' : 'Out',
# 'Bunt Pop Out' : 'Out',
# 'Bunt Groundout' : 'Out',
# 'Bunt Lineout' : 'Out',
# 'Runner Out' : 'Out',
# 'Triple Play' : 'Out',
# 'Runner Double Play' : 'Out',
# 'Strikeout - DP': 'Out',
# 'Batter Interference': 'Out',
# 'Catcher Interference' : 'On Base'
# # ,'Sac Fly': 'Out',
# # 'Sac Bunt': 'Out',
# # 'Sac Fly DP': 'Out',
# # 'Sacrifice Bunt DP': 'Out'
# }, inplace= True)
# df = df[(df['event'] == 'On Base') | (df['event'] == 'Out')] # filtering out sacrifice flys and bunts
# df['event'].replace({
# 'Out': 0,
# 'On Base': 1
# }, inplace = True)
# df['top'] = df['top'].astype(int)
df = df.rename(mapper = {'ab_id': 'atbat_id',
'event': 'result',
'o': 'outs',
'g_id': 'game_id',
'p_score': 'pitcher_team_score'}, axis= 1)
df['atbat_id'] = df['atbat_id'].apply(int) # making atbats readable
# Removing data leakage
df.drop(columns= ['outs', 'result'], inplace= True)
return df
at_bats = wrangle_at_bats(at_bats)
print(at_bats.shape)
at_bats.head()
at_bats['result'].value_counts(normalize = True)*100
at_bats['outs'].value_counts()
at_bats.groupby(by= 'outs')['result'].sum() / at_bats.groupby(by= 'outs')['result'].count()
at_bats.nunique()
###Output
_____no_output_____
###Markdown
Wrangle games dataset
###Code
games = pd.read_csv('data/games.csv')
def wrangle_games(df):
df = df.copy()
df = df.rename(mapper = {'g_id': 'game_id'}, axis= 1)
# Create feature for year
def get_year(data):
return str(data)[:4]
df['year'] = df['game_id'].apply(get_year)
df = df.set_index('game_id')
# Creating features and reducing cardinality of wind
def wind_speed(data):
return int(data.split(',')[0].strip()[:-4])
def wind_direction(data):
return data.split(',')[1].strip()
df['wind_speed'] = df['wind'].apply(wind_speed)
df['wind_direction'] = df['wind'].apply(wind_direction)
df['wind_direction'].replace({'none': 'None'}, inplace= True)
# Creating features for weather
def temperature(data):
return int(data.split(',')[0].strip()[:3])
def weather(data):
return data.split(',')[1].strip()
df['temperature'] = df['weather'].apply(temperature)
df['weather'] = df['weather'].apply(weather)
# Duplicate or potentailly data leakage columns
df.drop(columns= ['wind', 'away_final_score', 'home_final_score'], inplace= True)
# Drop high cardinality columns
high_card = [col for col in df.columns if df[col].nunique() >= 100]
df.drop(columns = high_card, inplace= True)
#df.drop(columns= ['umpire_1B', 'umpire_2B', 'umpire_3B', 'umpire_HP'], inplace= True)
return df
games = wrangle_games(games)
print(games.shape)
games.head()
games.nunique()
%matplotlib inline
games['attendance'].hist();
games.info()
games['attendance'].hist()
plt.show();
###Output
_____no_output_____
###Markdown
Merge the datasets
###Code
df = pd.merge(pitches, at_bats, how= 'inner', on= 'atbat_id')
df = pd.merge(df, games, how= 'inner', on= 'game_id')
print(df.shape)
df.head()
id_cols = [col for col in df.columns if '_id' in col]
id_cols
###Output
_____no_output_____
###Markdown
Subsetting
###Code
print(df.shape)
df.head()
df_2015 = df[df['year'] == '2015']
# Getting home and away games for oakland
oakland = df[(df['away_team'] == 'oak') | (df['home_team'] == 'oak')]
oakland_2015 = oakland[oakland['year'] == '2015']
print(oakland.shape)
print(oakland_2015.shape)
###Output
(47305, 54)
###Markdown
Modeling
###Code
df = wrangle(df)
df = df_2015
df.shape
np.nan
df.describe()
df['batter'].nunique()
most_at_bats = df['batter'].value_counts().head(100)
most_used_pitcher = df['pitcher'].value_counts().head(100)
most_at_bats = df['batter'].value_counts().head(10)
most_used_pitcher = df['pitcher'].value_counts().head(10)
def top_100_batters(name):
if name in most_at_bats:
return name
else:
return np.nan
def top_100_pitchers(name):
if name in most_used_pitcher:
return name
else:
return np.nan
df['batter'] = df['batter'].apply(top_100_batters)
df['pitcher'] = df['pitcher'].apply(top_100_pitchers)
df.dropna(axis= 0, inplace= True)
df['batter'].value_counts()
df.describe()
df.columns
nans = []
for i in range(len(df.columns)):
nans.append('np.NaN')
df.shape
df.info()
###Output
<class 'pandas.core.frame.DataFrame'>
Int64Index: 82262 entries, 0 to 686427
Data columns (total 19 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 result 82262 non-null object
1 inning 82262 non-null int64
2 pitcher_team_score 82262 non-null int64
3 p_throws 82262 non-null object
4 stand 82262 non-null object
5 top 82262 non-null bool
6 attendance 82262 non-null int64
7 away_team 82262 non-null object
8 elapsed_time 82262 non-null int64
9 home_team 82262 non-null object
10 venue_name 82262 non-null object
11 weather 82262 non-null object
12 delay 82262 non-null int64
13 year 82262 non-null object
14 wind_speed 82262 non-null int64
15 wind_direction 82262 non-null object
16 temperature 82262 non-null int64
17 batter 82262 non-null object
18 pitcher 82262 non-null object
dtypes: bool(1), int64(7), object(11)
memory usage: 12.0+ MB
###Markdown
Baseline
###Code
print('Baseline:\n', round(df['result'].value_counts(normalize = True)*100, 3),'%')
###Output
Baseline:
Out 64.591
On Base 35.409
Name: result, dtype: float64 %
###Markdown
Train - test split
###Code
target = 'result'
y = df[target]
X = df.drop(columns= target)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size= 0.2, random_state= 42)
###Output
_____no_output_____
###Markdown
First model 0.782- using at_bats- no features- random forest : no tuning- predicted probabilites
###Code
y = at_bats['result']
X = at_bats.loc[:,'inning':]
X = X.drop(columns= 'pitcher_id')
# Splitting out test data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 42)
model = make_pipeline(
OrdinalEncoder(),
RandomForestClassifier(
n_jobs= -1,
verbose= 1),
);
model.fit(X_train, y_train)
score = cross_val_score(model, X_train, y_train, n_jobs = -1, verbose = 1, scoring= 'accuracy')
print('CV accuracy score:', score.mean())
y_pred_proba = cross_val_predict(model, X_train, y_train, cv=3, n_jobs=-1,
method='predict_proba')[:,1]
y_plot = y_pred_proba.copy()
y_plot = sorted(y_plot)[:]
fig, ax = plt.subplots()
fig.set_facecolor('lightgrey')
fig.set_figheight(10)
fig.set_figwidth(20)
plt.title('Prediction Probabilities')
plt.scatter(x= np.arange(len(y_plot)), y= y_plot, marker= '.')
plt.show();
###Output
_____no_output_____
###Markdown
Second model 0.784- using at_bats- no features- XGBoost
###Code
y = at_bats['result']
X = at_bats.loc[:,'inning':]
X = X.drop(columns= 'pitcher_id')
# Splitting out test data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 42)
xgb_model = make_pipeline(
OrdinalEncoder(),
XGBClassifier()
)
xgb_model.fit(X_train, y_train)
xgb_score = cross_val_score(xgb_model, X_train, y_train, scoring= 'accuracy', verbose= 1)
print('XGB training accuracy:', xgb_score.mean())
###Output
XGB training accuracy: 0.6776470724069793
###Markdown
Third model 0.782- low-card games and at_bats- XGBoost- no tuning
###Code
y = df['result']
X = df.drop(columns= 'result')
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size= 0.2, random_state= 42)
gb = make_pipeline(
OrdinalEncoder(),
XGBClassifier(n_jobs= -1)
)
gb.fit(X_train, y_train)
score = cross_val_score(gb, X_train, y_train, n_jobs = -1, verbose = 1, scoring= 'accuracy')
print('Accuracy score:', score.mean())
###Output
[Parallel(n_jobs=-1)]: Using backend LokyBackend with 2 concurrent workers.
###Markdown
Fourth model 0.584 accuracy- classifying ball, strike, on base, out, or foul using all datasets- removed data leakage columns: - out, result, home_final_score, away_final_score
###Code
y = df['code']
X = df.drop(columns= 'code')
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size= 0.2, random_state= 42)
gb = make_pipeline(
OrdinalEncoder(),
XGBClassifier(n_jobs= -1)
)
score = cross_val_score(gb, X_train, y_train, n_jobs = -1, verbose = 1, scoring= 'accuracy')
print('Accuracy score:', score.mean())
gb.fit(X_train, y_train)
###Output
_____no_output_____
###Markdown
Fifth model 0.609 : 13.4 minutes to run cross-val scores- using at_bats and games- random forest- more features and wrangling- removed data leakage columns: outs, final score,
###Code
rf_model = make_pipeline(
OrdinalEncoder(),
RandomForestClassifier(
n_jobs = -1,
verbose = 1
),
)
rf_model.fit(X_train, y_train)
score = cross_val_score(rf_model, X_train, y_train, n_jobs = -1, verbose = 1, scoring= 'accuracy')
print('Accuracy score:', score.mean())
###Output
[Parallel(n_jobs=-1)]: Using backend LokyBackend with 2 concurrent workers.
###Markdown
Permutation importances
###Code
perm_imp = permutation_importance(rf_model,
X_test, y_test)
#n_jobs= -1)
#random_state= 42)
%matplotlib inline
data = {
'importances_mean' : perm_imp['importances_mean'],
'importances_std' : perm_imp['importances_std']
}
perm_imp_df = pd.DataFrame(data, index= X_test.columns)
perm_imp_df.sort_values(by = 'importances_mean', ascending= True, inplace= True)
fig, ax = plt.subplots()
fig.set_facecolor('lightgrey')
perm_imp_df['importances_mean'].plot(kind= 'barh')
#plt.xlabel('change in model accuracy')
plt.title('Permutation Importance')
plt.savefig('perm_imp_model_5.png')
plt.show();
###Output
_____no_output_____
###Markdown
Sixth model 0.66965 : 34 seconds to run cross val score- logistic regression- same data as fifth model
###Code
lr = make_pipeline(
OrdinalEncoder(),
LogisticRegression(n_jobs = -1,
verbose = 1),
)
lr.fit(X_train, y_train)
score = cross_val_score(lr, X_train, y_train, n_jobs = -1, verbose = 1, scoring= 'accuracy')
print('Accuracy score:', score.mean())
perm_imp = permutation_importance(lr,
X_test, y_test,
#n_jobs= -1,
random_state= 42)
data = {
'importances_mean' : perm_imp['importances_mean'],
'importances_std' : perm_imp['importances_std']
}
perm_imp_df = pd.DataFrame(data, index= X_test.columns)
perm_imp_df.sort_values(by = 'importances_mean', ascending= True, inplace= True)
fig, ax = plt.subplots()
fig.set_facecolor('lightgrey')
fig.set_figheight(6)
fig.set_figwidth(10)
perm_imp_df['importances_mean'].plot(kind= 'barh')
plt.xlabel('Change in Model Accuracy')
plt.title('Permutation Importance for Logistic Regression')
plt.rcParams['figure.dpi'] = 150
#plt.savefig('perm_imp_log_reg.png')
plt.show();
df['pitcher'].value_counts()
batters = df['pitcher'].value_counts()
batters.head(100).index
wind_dir = df['wind_direction'].value_counts()
wind_dir.index
df.columns
df.iloc[0]
df['batter'].nunique()
df.to_csv('top_100_batters.csv')
###Output
_____no_output_____
###Markdown
Permutation importances
###Code
xgb_model.fit(X_train, y_train)
# took over ten minutes to run
perm_imp = permutation_importance(xgb_model,
X_test, y_test,
#n_jobs= -1,
random_state= 42)
%matplotlib inline
data = {
'importances_mean' : perm_imp['importances_mean'],
'importances_std' : perm_imp['importances_std']
}
perm_imp_df = pd.DataFrame(data, index= X_test.columns)
perm_imp_df.sort_values(by = 'importances_mean', ascending= True, inplace= True)
fig, ax = plt.subplots()
fig.set_facecolor('lightgrey')
perm_imp_df['importances_mean'].plot(kind= 'barh')
#plt.xlabel('change in model accuracy')
plt.title('Permutation Importance')
#plt.savefig('perm_imp_model_6.png')
plt.show();
df.nunique()
df.shape
df['batter_id'].value_counts()
data
from joblib import dump
lr_pickle = pickle.dumps(lr)
dump(lr, 'logreg_pickle.joblib')
###Output
_____no_output_____
###Markdown
Seventh model 0.6718 : 42 seconds to run cross val scores- baseline: 67.279- using just 2015 data of fifth and sixth models- xgboost
###Code
xgb = make_pipeline(
OrdinalEncoder(),
XGBClassifier()
)
xgb.fit(X_train, y_train)
score = cross_val_score(xgb, X_train, y_train, n_jobs = -1, verbose = 1, scoring= 'accuracy')
print('Accuracy score:', score.mean())
###Output
[Parallel(n_jobs=-1)]: Using backend LokyBackend with 2 concurrent workers.
###Markdown
Permutation importances
###Code
# took
perm_imp = permutation_importance(xgb,
X_test, y_test,
#n_jobs= -1,
random_state= 42)
perm_imp
%matplotlib inline
data = {
'importances_mean' : perm_imp['importances_mean'],
'importances_std' : perm_imp['importances_std']
}
perm_imp_df = pd.DataFrame(data, index= X_test.columns)
perm_imp_df.sort_values(by = 'importances_mean', ascending= True, inplace= True)
fig, ax = plt.subplots()
fig.set_facecolor('lightgrey')
perm_imp_df['importances_mean'].plot(kind= 'barh')
#plt.xlabel('change in model accuracy')
plt.title('Permutation Importance')
plt.savefig('perm_imp_model_6.png')
plt.show();
from joblib import dump
dump(lr, 'logreg_pickle.joblib')
df.to_csv('2015_data.csv')
###Output
_____no_output_____
###Markdown
PDP One feature
###Code
!pip install pdpbox
!pip install shap
from pdpbox.pdp import pdp_isolate, pdp_plot, pdp_interact, pdp_interact_plot
feature = 'temperature'
isolated = pdp_isolate(
model = gb,
dataset = X_test,
model_features = X_test.columns,
feature= feature,
num_grid_points = 70
)
pdp_plot(isolated, feature_name= feature);
plt.xlim(0, 100)
###Output
_____no_output_____
###Markdown
Two features
###Code
features = ['outs', 'inning']
interaction = pdp_interact(
model= gb,
features = features,
dataset= X_test,
model_features= X_test.columns
)
pdp_interact_plot(interaction, plot_type= 'grid', feature_names= features);
###Output
_____no_output_____
###Markdown
Shapley
###Code
import shap
transformer = make_pipeline(
OrdinalEncoder()
)
X_train_transformed = transformer.fit_transform(X_train)
X_test_transformed = transformer.transform(X_test)
data = [(X_train_transformed, y_train),
(X_test_transformed, y_test)]
model = XGBClassifier(n_jobs= -1)
model.fit(X_train_transformed, y_train, eval_set = data)#, eval_metric= 'accuracy')
row = X_test.iloc[[1]]
explainer = shap.TreeExplainer(model)
row_processed = transformer.transform(row)
shap_values = explainer.shap_values(row_processed)
shap.initjs()
shap.force_plot(
base_value= explainer.expected_value,
shap_values= shap_values,
features= row,
link= 'logit',
matplotlib = True,
show= False
)
plt.savefig('shapley.png', bbox_inches = 'tight')
###Output
_____no_output_____
###Markdown
ROC Curve
###Code
fpr, tpr, thresholds = roc_curve(y_test, y_pred_proba)
roc_curve_df = pd.DataFrame({
'False Positive Rate': fpr,
'True Positive Rate' : tpr,
'Thresholds': thresholds
})
roc
###Output
_____no_output_____
###Markdown
Feature Importances
###Code
importances = lr.named_steps['logisticregression'].feature_importances_
feat_imp = pd.Series(importances, index= X_train.columns).sort_values()
print(X_train.columns)
print(feat_imp)
fig, ax = plt.subplots()
fig.set_facecolor('lightgrey')
plt.title('Feature Importance for Random Forest Using Gini')
feat_imp.plot(kind= 'barh', xlabel = 'feature', ylabel= 'importance');
###Output
y0 0.000000
p_throws 0.000000
pitcher_team_score 0.000000
venue_name 0.000000
top 0.000000
year 0.000000
weather 0.001143
break_y 0.001258
wind_direction 0.001487
away_team 0.002426
wind_speed 0.002501
ay 0.002566
type_confidence 0.003086
home_team 0.003342
start_speed 0.003468
x0 0.003687
pitcher_id 0.003745
temperature 0.003833
spin_rate 0.003904
z0 0.004414
inning 0.004490
batter_id 0.004973
batter_team_score 0.005516
on_2b 0.005961
on_1b 0.006357
end_speed 0.006520
vx0 0.007436
pitch_type 0.008306
sz_top 0.008350
ax 0.008528
on_3b 0.009331
vy0 0.009546
pfx_z 0.010030
sz_bot 0.010173
az 0.011328
outs 0.013267
break_length 0.013869
stand 0.013951
spin_dir 0.014379
y 0.016434
pfx_x 0.016458
break_angle 0.019960
vz0 0.023830
pitch_num 0.028615
nasty 0.036417
x 0.036489
pz 0.038146
px 0.051770
balls 0.072911
strikes 0.134134
zone 0.311666
dtype: float32
###Markdown
Permutation importance
###Code
# perm_imp = permutation_importance(model,
# X_val, y_val,
# n_jobs= -1,
# random_state= 42)
perm_imp = permutation_importance(gb,
X_test, y_test,
n_jobs= -1,
random_state= 42)
perm_imp.keys()
%matplotlib inline
data = {
'importances_mean' : perm_imp['importances_mean'],
'importances_std' : perm_imp['importances_std']
}
perm_imp_df = pd.DataFrame(data, index= X_test.columns)
perm_imp_df.sort_values(by = 'importances_mean', ascending= True, inplace= True)
fig, ax = plt.subplots()
fig.set_facecolor('lightgrey')
perm_imp_df['importances_mean'].tail(10).plot(kind= 'barh')
#plt.xlabel('change in model accuracy')
plt.title('Permutation Importance')
plt.show();
###Output
_____no_output_____ |
src/archive/decision_tree.ipynb | ###Markdown
Decision TreeBuild a decison tree to predict the quality of github profiles
###Code
#imports
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report,confusion_matrix,accuracy_score
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
#Read dataset
data = pd.read_csv('../data/gitrater.csv',index_col=0)
print(len(data))
data.head()
X = data[['repos','stars','followers','following','foll_ratio','n_lang','org_flag','n_cont','last_cont','stab_cont','cont_repo_ratio']]
y = data['y']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=30,random_state = 100)
# Fitting the model
dtree = DecisionTreeClassifier()
dtree.fit(X_train,y_train)
# Prediction and evaluation
predictions = dtree.predict(X_test)
print(classification_report(y_test,predictions))
#Create confusion matrix
lang = [0, 1, 2, 3, 4, 5]
conf_matrix = confusion_matrix(y_test,predictions)
conf_matrix_df = pd.DataFrame(conf_matrix,columns=lang,index=lang)
#Plot confusion matrix heatmap
plt.figure(figsize=(10, 10), facecolor='w', edgecolor='k')
sns.set(font_scale=1.5)
sns.heatmap(conf_matrix_df,cmap='coolwarm',annot=True,fmt='.5g',cbar=False)
plt.xlabel('Predicted',fontsize=22)
plt.ylabel('Actual',fontsize=22)
plt.savefig('../figures/model_eval.png',format='png',dpi=150)
###Output
precision recall f1-score support
0.0 1.00 0.60 0.75 5
1.0 0.56 0.71 0.63 7
2.0 0.50 0.44 0.47 9
3.0 0.25 0.20 0.22 5
4.0 0.50 1.00 0.67 2
5.0 1.00 1.00 1.00 2
avg / total 0.59 0.57 0.56 30
|
Python_notebooks/Glass_Identificaiton_Project/Glass_ML_Project.ipynb | ###Markdown
Glass Data Project :The goal of this project is to use machine learning techniques on data of the composition of metals for different types of glass to predict the type of glass based on the metals it contains.
###Code
# import the required packages
from sklearn import datasets
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
#from sklearn.cross_validation import train_test_split
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import StandardScaler
import pandas as pd
import seaborn as sns
from scipy import stats
import matplotlib.pyplot as plt
df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/glass/glass.data',
names = ['ID','RI','Na','Mg','Al','Si','K','Ca','Ba','Fe','Type'])
df.head()
# refactive index for each type
df.groupby('Type').mean()['RI'].plot.bar()
###Output
_____no_output_____
###Markdown
The Refractive Index is very similar for all the types of glass.
###Code
# types of metal in each type of glass
df.iloc[:,2:11].groupby('Type').mean().plot.bar()
###Output
_____no_output_____
###Markdown
Some metals varry in the type of glass and some are the same. From the graphs its seems like the Silicon, Sodium, Calcium are very similiar in all types of glass. Magnesium, Potasium and Aluminum seem to varry for each type of glass. Iron and Barium are only present in a few types of glass.
###Code
# histograms of each metal
fig = plt.figure(figsize=(15.0, 15.0))
fig.subplots_adjust(hspace=0.75, wspace=0.5)
for x,i in enumerate(range(2,10)):
index = x+1
ax = plt.subplot(4,2,(index))
plt.title(df.columns[i], fontsize = 20)
sns.distplot(df.iloc[:,i])
ax.set_ylabel('')
ax.set_xlabel('')
plt.show()
###Output
_____no_output_____
###Markdown
As mentioned beofre, Ba and Fe have numerous 0 values. Mg seems to have some 0 values as well. The rest of the metals seem to be normally distributed.
###Code
# correlation plot
df_corr = df.iloc[:,1:10].corr()
df_corr
sns.set(rc={'figure.figsize':(8,8)})
sns.heatmap(df_corr,cmap="Blues",annot=True,linewidths=.75)
###Output
_____no_output_____
###Markdown
There is nothing that is strongly correlated except for calcium and the refraction index. The correlation plot shows that these two varriables are 81% positively correlated.
###Code
### Split-out validation dataset\
X = df.iloc[:,1:10]
Y = df.Type
X_train, X_test, Y_train, Y_test = train_test_split(X,Y, test_size=0.3, random_state = 43)
### Data Transforamtion
# Standardize the data before modeling
sc = StandardScaler()
sc.fit(X_train)
X_train_std = sc.transform(X_train)
X_test_std = sc.transform(X_test)
plt.plot(X_train_std)
# Model using Standardized data and Minkowski
y_value = []
x_value = []
for i in range(1,20) :
knn = KNeighborsClassifier(n_neighbors=i,metric='minkowski')
knn.fit(X_train_std, Y_train)
y_pred = knn.predict(X_test_std)
x_value.append(i)
y_value.append(accuracy_score(Y_test, y_pred))
test_df = pd.DataFrame(
{
'K' : x_value,
'Accuracy' : y_value
})
fig = plt.figure(figsize=(8, 8))
plt.title('Test Results', fontsize = 20)
plt.plot(test_df.K,test_df.Accuracy,
marker='o',
markerfacecolor='blue',
markersize=8,
color = 'black')
plt.ylabel('Accuracy')
plt.xlabel('K Value')
from sklearn.model_selection import cross_val_score
from sklearn.preprocessing import normalize
####Estimate #1 - Defualt
# creating odd list of K for KNN
k_list= list(range(1,50))
# subsetting just the odd ones
neighbors = list(range(1,50,2))
# empty list that will hold cv scores
cv_scores_1 = []
k_values_1 = []
# perform 10-fold cross validation
for k in neighbors:
knn = KNeighborsClassifier(n_neighbors=k)
scores = cross_val_score(knn, X_train_std, Y_train, cv=10, scoring='accuracy')
cv_scores_1.append(scores.mean())
k_values_1.append(k)
#####Estimate #2 - Minkowski
# empty list that will hold cv scores
cv_scores_2 = []
k_values_2 = []
# perform 10-fold cross validation
for k in neighbors:
knn = KNeighborsClassifier(n_neighbors=k,metric='minkowski',p = 5 )
scores = cross_val_score(knn, X_train_std, Y_train, cv=10, scoring='accuracy')
cv_scores_2.append(scores.mean())
k_values_2.append(k)
#####Estimate #3 - Euclidean
# empty list that will hold cv scores
cv_scores_3 = []
k_values_3 = []
# perform 10-fold cross validation
for k in neighbors:
knn = KNeighborsClassifier(n_neighbors=k,metric='euclidean')
scores = cross_val_score(knn, X_train_std, Y_train, cv=10, scoring='accuracy')
cv_scores_3.append(scores.mean())
k_values_3.append(k)
#####Estimate #4 - Chebyshev
# empty list that will hold cv scores
cv_scores_4 = []
k_values_4 = []
# perform 10-fold cross validation
for k in neighbors:
knn = KNeighborsClassifier(n_neighbors=k,metric='chebyshev')
scores = cross_val_score(knn, X_train_std, Y_train, cv=10, scoring='accuracy')
cv_scores_4.append(scores.mean())
k_values_4.append(k)
#####Estimate #5 -BrayCurtisDistance
# empty list that will hold cv scores
cv_scores_5 = []
k_values_5 = []
# perform 10-fold cross validation
for k in neighbors:
knn = KNeighborsClassifier(n_neighbors=k,metric='braycurtis')
scores = cross_val_score(knn, X_train_std, Y_train, cv=10, scoring='accuracy')
cv_scores_5.append(scores.mean())
k_values_5.append(k)
####Estimate #6 - Using Normalization
# empty list that will hold cv scores
cv_scores_6 = []
k_values_6 = []
#normalize data
X_train_N = normalize(X_train_std)
# perform 10-fold cross validation
for k in neighbors:
knn = KNeighborsClassifier(n_neighbors=k,metric='minkowski')
scores = cross_val_score(knn, X_train_N, Y_train, cv=10, scoring='accuracy')
cv_scores_6.append(scores.mean())
k_values_6.append(k)
# Ploting Graphs
# changing to misclassification error
MSE_1 = [1 - x for x in cv_scores_1]
MSE_2 = [1 - x for x in cv_scores_2]
MSE_3 = [1 - x for x in cv_scores_3]
MSE_4 = [1 - x for x in cv_scores_4]
MSE_5 = [1 - x for x in cv_scores_5]
MSE_6 = [1 - x for x in cv_scores_6]
# determining best k
optimal_k_1 = neighbors[MSE_1.index(min(MSE_1))]
E_1 = optimal_k_1
optimal_k_2 = neighbors[MSE_2.index(min(MSE_2))]
E_2 = optimal_k_2
optimal_k_3 = neighbors[MSE_3.index(min(MSE_3))]
E_3 = optimal_k_3
optimal_k_4 = neighbors[MSE_4.index(min(MSE_4))]
E_4 = optimal_k_4
optimal_k_5 = neighbors[MSE_5.index(min(MSE_5))]
E_5 = optimal_k_5
optimal_k_6 = neighbors[MSE_6.index(min(MSE_6))]
E_6 = optimal_k_6
print (f"Estimate 1 : Optimal number of neighbors is {E_1} and accuracy is {round(cv_scores_1[E_1],2)}")
print (f"Estimate 2 : Optimal number of neighbors is {E_2} and accuracy is {round(cv_scores_2[E_2],2)}")
print (f"Estimate 3 : Optimal number of neighbors is {E_3} and accuracy is {round(cv_scores_3[E_3],2)}")
print (f"Estimate 4 : Optimal number of neighbors is {E_4} and accuracy is {round(cv_scores_4[E_4],2)}")
print (f"Estimate 5 : Optimal number of neighbors is {E_5} and accuracy is {round(cv_scores_5[E_5],2)}")
print (f"Estimate 6 : Optimal number of neighbors is {E_6} and accuracy is {round(cv_scores_6[E_6],2)}")
# plot misclassification error vs k
fig = plt.figure()
fig.subplots_adjust(hspace=0.75, wspace=0.5)
plt.subplot(3,2,1)
plt.title('Estimate 1')
plt.plot(neighbors, MSE_1)
plt.xlabel('Number of Neighbors K')
plt.ylabel('Misclassification Error')
plt.subplot(3,2,2)
plt.title('Estimate 2')
plt.plot(neighbors, MSE_2)
plt.xlabel('Number of Neighbors K')
plt.ylabel('Misclassification Error')
plt.subplot(3,2,3)
plt.title('Estimate 3')
plt.plot(neighbors, MSE_3)
plt.xlabel('Number of Neighbors K')
plt.ylabel('Misclassification Error')
plt.subplot(3,2,4)
plt.title('Estimate 4')
plt.plot(neighbors, MSE_4)
plt.xlabel('Number of Neighbors K')
plt.ylabel('Misclassification Error')
plt.subplot(3,2,5)
plt.title('Estimate 5')
plt.plot(neighbors, MSE_5)
plt.xlabel('Number of Neighbors K')
plt.ylabel('Misclassification Error')
plt.subplot(3,2,6)
plt.title('Estimate 6')
plt.plot(neighbors, MSE_6)
plt.xlabel('Number of Neighbors K')
plt.ylabel('Misclassification Error')
plt.show()
from sklearn.decomposition import PCA
# 95% of the varriance - 6 dimensons
pca1 = PCA(0.95)
X_train_pca1 = pca1.fit_transform(X_train_std)
X_train_pca1 = pd.DataFrame(X_train_pca1, columns=['PC1','PC2','PC3','PC4','PC5','PC6'])
X_test_pca1 = pca1.fit_transform(X_test_std)
X_test_pca1 = pd.DataFrame(X_test_pca, columns=['PC1','PC2','PC3','PC4','PC5','PC6'])
X_train_pca1.head()
len(X_train_pca)
####Estimate #7 - PCA
# creating odd list of K for KNN
k_list= list(range(1,50))
# subsetting just the odd ones
neighbors = list(range(1,50,2))
# empty list that will hold cv scores
cv_scores_7 = []
k_values_7 = []
#normailze the data
X_train_N_pca = normalize(X_train_pca)
# perform 10-fold cross validation
for k in neighbors:
knn = KNeighborsClassifier(n_neighbors=k,metric='minkowski')
scores = cross_val_score(knn, X_train_N_pca, Y_train, cv=10, scoring='accuracy')
cv_scores_7.append(scores.mean())
k_values_7.append(k)
# changing to misclassification error
MSE_7 = [1 - x for x in cv_scores_7]
# determining best k
optimal_k_7 = neighbors[MSE_7.index(min(MSE_7))]
E_7 = optimal_k_7
print (f"Estimate 7 : Optimal number of neighbors is {E_7} and accuracy is {round(cv_scores_7[E_7],2)}")
# plot misclassification error vs k
fig = plt.figure()
plt.title('Estimate 7')
plt.plot(neighbors, MSE_7)
plt.xlabel('Number of Neighbors K')
plt.ylabel('Misclassification Error')
###Output
Estimate 7 : Optimal number of neighbors is 1 and accuracy is 0.71
|
4.databases/exercises/3.conll-to-xml.ipynb | ###Markdown
D’un format à l’autre Vous disposez d’un fichier [*ftb-dependencies.conll*](../data/ftb-dependencies.conll) au format [CoNLL-U](https://universaldependencies.org/format.html) issu du [*French Treebank*](http://ftb.linguist.univ-paris-diderot.fr/). Votre objectif est de transformer ce fichier au format XML, en ne retenant que certaines informations :- le mot (2e colonne) ;- son lemme (3e colonne) ;- sa catégorie grammaticale (4e colonne).Au final, vous devriez obtenir la structure ci-dessous :```xml Le petit chat est mort . …```Comme toujours, il existe plusieurs façons de procéder. Nous vous recommandons d’essayer dans un premier temps de lire le fichier CoNLL comme s’il s’agissait de texte brut, et de l’analyser afin de récolter une liste de phrases qui sont elles-mêmes des listes de tuples comprenant le mot, son lemme et sa catégorie grammaticale :```pysentences = [ [ ('Le', 'le', 'DET'), ('petit', 'petit', 'ADJ'), ('chat', 'chat', 'NC'), ('est', 'être', 'V'), ('mort', 'mort', 'ADJ'), ('.', '.', 'PONCT') ] …]```Pour obtenir les tuples, il vous suffit de trouver le caractère de séparation entre chaque champ puis d’isoler les informations nécessaires.Et pour délimiter les phrases, repérez le symbole qui caractérise un saut de ligne. Ce devrait être le signal pour terminer la phrase en cours de construction et en commencer une nouvelle.
###Code
# Your code here
###Output
_____no_output_____
###Markdown
Et, dans un second temps, construisez le fichier XML !
###Code
# Your code here
###Output
_____no_output_____ |
example/example_case.ipynb | ###Markdown
Applying ridge estimation to Chicago crime dataThe ridge estimation approach of DREDGE is applied to Part I crimes as defined by the [Unified Crime Reporting](https://www.ucrdatatool.gov/offenses.cfm) framework. In the below application example, the latitude-longitude data is drawn from the [Chicago Data Portal](https://data.cityofchicago.org/Public-Safety/Crimes-2001-to-present/ijzp-q8t2), limiting the dataset to Part I crimes from 2018.First, we import the libraries necessary for running this notebook.
###Code
# Import the necessary libraries
import numpy as np
import pandas as pd
import pickle
import timeit
###Output
_____no_output_____
###Markdown
Preparing the dataNext, we load the full data downlaoded from the [Chicago Data Portal](https://data.cityofchicago.org/Public-Safety/Crimes-2001-to-present/ijzp-q8t2) and retain only four entries: The primary crime type, the year of the incident report, and the latitude and longitude coordinate per report. We then save the reduced data as a pickled (PKL) file.
###Code
# Specify the file path to the raw data
file_path = 'crimes_chicago.csv'
# Specify the file name for clean data
clean_name = 'cleaned_data'
# Load the data into a pandas DataFrame
raw_data = pd.read_csv(file_path)
# Extract the desired feature columns
cut_data = raw_data[['Primary Type', 'Year', 'Latitude', 'Longitude']]
# Delete data points with NaN entries
clean_data = cut_data.dropna()
# Pickle the clean data for storage
clean_data.to_pickle(clean_name + '.pkl')
###Output
_____no_output_____
###Markdown
As we don't want to load the whole dataset again in case we decide to restart the kernel of this notebook, we can now simply load the saved file.
###Code
# Specify the file path to the data
file_path = 'cleaned_data.pkl'
# Load the data into a pandas DataFrame
data = pd.read_pickle(file_path)
###Output
_____no_output_____
###Markdown
This example case focuses on Part I crimes, which are eight crime types split into four property offenses and four violent offenses.These two sets correspond to the first and remaining four crime types listed below, respectively. We then retain only entries from the year 2018 that fall under these offenses and sample a subset of 5000 crime incident reports to extract a representative sample from the remaining dataset.
###Code
# Specify the crime type(s) of interest
crimes = ['ROBBERY',
'THEFT',
'BURGLARY',
'MOTOR VEHICLE THEFT',
'ASSAULT',
'CRIM SEXUAL ASSAULT',
'ARSON',
'HOMICIDE'
]
# Specify the allowable year interval
years = [2018, 2018]
# Extract the subset of data based on the crime type(s)
data = data.loc[data['Primary Type'].isin(crimes)]
# Extract the subset of data based on the allowable years
data = data.loc[data['Year'].between(years[0], years[1])]
# Use unfiformly-random sampling for a subset
data = data.sample(n = 5000, random_state = 42)
###Output
_____no_output_____
###Markdown
In the next step, we pull the latitude and longitude arrays from the dataset, and combine them into a two-row matrix in which each row represents one location associated with a crime incident report.
###Code
# Extract latitudes from the dataset
data_lat = np.asarray(data['Latitude'])
# Extract longitudes from the dataset
data_long = np.asarray(data['Longitude'])
# Merge the two arrays to a coordinatex matrix
coordinates = np.vstack((data_lat, data_long)).T
###Output
_____no_output_____
###Markdown
Running DREDGE for full ridgesDREDGE is built to be easy to import and use. For this reason, we can simply import it and call its primary function, `filaments()`. While optional parameters are available, we will only use the mandatory input, meaning the set of coordinates, to show the functionality of the provided package.
###Code
# Import the main DREDGE function locally
from dredge import filaments
# Start a timer
start = timeit.default_timer()
# Compute the density ridge estimates
ridges = filaments(coordinates)
# Stop the timing and print the time
stop = timeit.default_timer()
print('Time: ', stop - start)
###Output
Input parameters valid!
Preparing for iterations ...
Automatically computed bandwidth: 0.003307
Iteration 1 ...
Iteration 2 ...
Iteration 3 ...
Iteration 4 ...
Iteration 5 ...
Iteration 6 ...
Iteration 7 ...
Iteration 8 ...
Iteration 9 ...
Iteration 10 ...
Iteration 11 ...
Iteration 12 ...
Iteration 13 ...
Iteration 14 ...
Iteration 15 ...
Iteration 16 ...
Iteration 17 ...
Iteration 18 ...
Iteration 19 ...
Iteration 20 ...
Iteration 21 ...
Iteration 22 ...
Iteration 23 ...
Iteration 24 ...
Iteration 25 ...
Iteration 26 ...
Iteration 27 ...
Iteration 28 ...
Iteration 29 ...
Iteration 30 ...
Iteration 31 ...
Iteration 32 ...
Iteration 33 ...
Iteration 34 ...
Iteration 35 ...
Iteration 36 ...
Done!
Time: 385.81796533300076
###Markdown
Plot the coordinates and full ridgesAt this point, we have the set of coordinates that indicate the filament lines. The below code simply plots both the previously sampled crime report incidents in cyan and the density ridges in red, and also surrounds each crime report incident with a larger circle in light cyan to show the general shape of Chicago, reflected through crime reports. On the right side and the top of the plot, 1D histograms for the crime distribution are provided.
###Code
# Import the necessary libraries
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
from matplotlib import rcParams
import seaborn as sns
import warnings
# Deactivate deprecated version warning
warnings.filterwarnings('ignore')
# Convert the datasets to pandas dataframes
coordinates_df = pd.DataFrame(dict(longitude = coordinates[:, 1],
latitude = coordinates[:, 0]))
ridges_df = pd.DataFrame(dict(longitude = ridges[:, 1],
latitude = ridges[:, 0]))
# Prepare the figure size for plotting
latitude_diff = np.abs(np.max(coordinates[:, 0])
- np.min(coordinates[:, 0]))
longitude_diff = np.abs(np.max(coordinates[:, 1])
- np.min(coordinates[:, 1]))
scaling_ratio = latitude_diff / longitude_diff
x_axis = 10
y_axis = x_axis * scaling_ratio
# Plot the figure
g = sns.jointplot("latitude",
"longitude",
data = coordinates_df,
color = 'c',
kind = 'scatter',
joint_kws={"s": 5},
alpha = 0.75,
ratio = 5).plot_joint(plt.scatter,
zorder = 0,
color = 'lightblue',
s=500)
g.fig.set_figwidth(x_axis)
g.fig.set_figheight(y_axis)
#g.ax_joint.legend_.remove()
g = sns.regplot("latitude",
"longitude",
data = ridges_df,
color = 'orangered',
fit_reg = False,
marker = 'o',
scatter_kws = {'s':5.0})
g.set_xlabel('Longitude',fontsize = 20)
g.set_ylabel('Latitude', fontsize = 20)
g.tick_params(labelsize = 15)
# Show the created plot
plt.show()
###Output
_____no_output_____
###Markdown
Save the coordinates and ridge estimatesThe crime incident report sample used so far can be saved, as can the ridges plotted above.
###Code
# Import the necessary libraries
import numpy as np
# Save the latitude-longitude samples
np.savetxt("coordinate_samples.csv", coordinates, delimiter = ',')
# Save the density ridge estimates
np.savetxt("ridges_full.csv", ridges, delimiter = ',')
###Output
_____no_output_____
###Markdown
Run DREDGE for cut-off ridgesWe now repeat the DREDGE run with the same data as before, but we make use of one of DREDGE's optional parameters, namely the ability to only retain ridges for top-percentages. Visually, this process can be imagined as horizontally cutting the density landscape at a given percentile, for example the 95$^{\mathrm{th}}$ percentile for the input `percentage = 5`, and extracting the ridges that are located on these mountaintops.
###Code
# Import the necessary libraries
import timeit
# Import the main DREDGE function locally
from dredge import filaments
# Start a timer
start = timeit.default_timer()
# Compute the density ridge estimates
ridges = filaments(coordinates, percentage = 5)
# Stop the timing and print the time
stop = timeit.default_timer()
print('Time: ', stop - start)
###Output
Input parameters valid!
Preparing for iterations ...
Automatically computed bandwidth: 0.003307
Iteration 1 ...
Iteration 2 ...
Iteration 3 ...
Iteration 4 ...
Iteration 5 ...
Iteration 6 ...
Iteration 7 ...
Iteration 8 ...
Iteration 9 ...
Iteration 10 ...
Iteration 11 ...
Iteration 12 ...
Iteration 13 ...
Iteration 14 ...
Iteration 15 ...
Iteration 16 ...
Iteration 17 ...
Iteration 18 ...
Iteration 19 ...
Iteration 20 ...
Iteration 21 ...
Iteration 22 ...
Iteration 23 ...
Iteration 24 ...
Iteration 25 ...
Iteration 26 ...
Iteration 27 ...
Iteration 28 ...
Iteration 29 ...
Iteration 30 ...
Iteration 31 ...
Iteration 32 ...
Iteration 33 ...
Iteration 34 ...
Iteration 35 ...
Iteration 36 ...
Iteration 37 ...
Iteration 38 ...
Done!
Time: 421.03087381797377
###Markdown
Plot the coordinates and cut-off ridgesWe then use the same code as before, with the same color coding and plotted coordinates.
###Code
# Import the necessary libraries
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
from matplotlib import rcParams
import seaborn as sns
import warnings
# Deactivate deprecated version warning
warnings.filterwarnings('ignore')
# Convert the datasets to pandas dataframes
coordinates_df = pd.DataFrame(dict(longitude = coordinates[:, 1],
latitude = coordinates[:, 0]))
ridges_df = pd.DataFrame(dict(longitude = ridges[:, 1],
latitude = ridges[:, 0]))
# Prepare the figure size for plotting
latitude_diff = np.abs(np.max(coordinates[:, 0])
- np.min(coordinates[:, 0]))
longitude_diff = np.abs(np.max(coordinates[:, 1])
- np.min(coordinates[:, 1]))
scaling_ratio = latitude_diff / longitude_diff
x_axis = 10
y_axis = x_axis * scaling_ratio
# Plot the figure
g = sns.jointplot("latitude",
"longitude",
data = coordinates_df,
color = 'c',
kind = 'scatter',
joint_kws={"s": 5},
alpha = 0.75,
ratio = 5).plot_joint(plt.scatter,
zorder = 0,
color = 'lightblue',
s=500)
g.fig.set_figwidth(x_axis)
g.fig.set_figheight(y_axis)
#g.ax_joint.legend_.remove()
g = sns.regplot("latitude",
"longitude",
data = ridges_df,
color = 'orangered',
fit_reg = False,
marker = 'o',
scatter_kws = {'s':5.0})
g.set_xlabel('Longitude',fontsize = 20)
g.set_ylabel('Latitude', fontsize = 20)
g.tick_params(labelsize = 15)
# Show the created plot
plt.show()
###Output
_____no_output_____
###Markdown
Save the ridge estimatesAs before, these ridges can be saved. We don't need to save the coordinates again, as they're the same as above.
###Code
# Import the necessary libraries
import numpy as np
# Save the density ridge estimates
np.savetxt("ridges_cut.csv", ridges, delimiter = ',')
###Output
_____no_output_____ |
notebooks/ex_gaussian_mixture/ex_hyperparam/analyze_07_sim.ipynb | ###Markdown
Plotting results
###Code
ax = sns.pointplot(x="lamPT", y="disentanglement_metric", hue=None, data=results[0])
ax = sns.pointplot(x="lamPT", y="disentanglement_metric", hue="beta", data=results[0])
ax = sns.pointplot(x="lamPT", y="reconstruction_loss", hue="beta", data=results[0])
ax = sns.pointplot(x="lamPT", y="kl_normal_loss", hue="beta", data=results[0])
ax = sns.pointplot(x="lamPT", y="total_correlation", hue="beta", data=results[0])
ax = sns.pointplot(x="lamPT", y="pt_local_independence_loss", hue="beta", data=results[0])
ax = sns.pointplot(x="lamPT", y="kl_normal_loss", hue="beta", data=results[0])
###Output
_____no_output_____
###Markdown
Visualization
###Code
# GET MODEL AND RESULT
idx = np.argmin(np.array(results[0]['disentanglement_metric']))
model = models[0][idx]
result = results[0].loc[idx]
print('disentanglement metric: {:.3f}'.format(result['disentanglement_metric']))
params_to_vary = ['num_epochs', 'seed', 'hidden_dim', 'beta', 'mu', 'lamPT', 'lamCI', 'dirname', 'latent_means', 'latent_vars']
for name in params_to_vary:
setattr(p, name, result[name])
# SEED
random.seed(p.seed)
np.random.seed(p.seed)
torch.manual_seed(p.seed)
# GET DATALOADERS
(_, _), (test_loader, test_latents) = define_dataloaders(p)
# EVALUATE TEST DATA
data = test_loader.dataset.data.to(device)
recon_data, latent_dist, latent_sample = model(data)
plot_2d_latent_samples(latent_sample)
plt.title("Plot of test latent samples")
plt.show()
plot_traversals(model, data, lb=1000, ub=2000, num=100, max_traversal=.5)
plt.title("Plot of traversals")
plt.show()
plot_traversals(model, data, lb=0, ub=3000, num=20,
draw_data=True, draw_recon=True, max_traversal=.1)
###Output
_____no_output_____ |
basic projects/Part 4 - Fashion-MNIST (Exercises).ipynb | ###Markdown
Classifying Fashion-MNISTNow it's your turn to build and train a neural network. You'll be using the [Fashion-MNIST dataset](https://github.com/zalandoresearch/fashion-mnist), a drop-in replacement for the MNIST dataset. MNIST is actually quite trivial with neural networks where you can easily achieve better than 97% accuracy. Fashion-MNIST is a set of 28x28 greyscale images of clothes. It's more complex than MNIST, so it's a better representation of the actual performance of your network, and a better representation of datasets you'll use in the real world.In this notebook, you'll build your own neural network. For the most part, you could just copy and paste the code from Part 3, but you wouldn't be learning. It's important for you to write the code yourself and get it to work. Feel free to consult the previous notebooks though as you work through this.First off, let's load the dataset through torchvision.
###Code
import torch
from torchvision import datasets, transforms
import helper
# Define a transform to normalize the data
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))])
# Download and load the training data
trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)
# Download and load the test data
testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True)
###Output
_____no_output_____
###Markdown
Here we can see one of the images.
###Code
image, label = next(iter(trainloader))
helper.imshow(image[0,:]);
###Output
_____no_output_____
###Markdown
Building the networkHere you should define your network. As with MNIST, each image is 28x28 which is a total of 784 pixels, and there are 10 classes. You should include at least one hidden layer. We suggest you use ReLU activations for the layers and to return the logits or log-softmax from the forward pass. It's up to you how many layers you add and the size of those layers.
###Code
# TODO: Define your network architecture here
###Output
_____no_output_____
###Markdown
Train the networkNow you should create your network and train it. First you'll want to define [the criterion](http://pytorch.org/docs/master/nn.htmlloss-functions) ( something like `nn.CrossEntropyLoss`) and [the optimizer](http://pytorch.org/docs/master/optim.html) (typically `optim.SGD` or `optim.Adam`).Then write the training code. Remember the training pass is a fairly straightforward process:* Make a forward pass through the network to get the logits * Use the logits to calculate the loss* Perform a backward pass through the network with `loss.backward()` to calculate the gradients* Take a step with the optimizer to update the weightsBy adjusting the hyperparameters (hidden units, learning rate, etc), you should be able to get the training loss below 0.4.
###Code
# TODO: Create the network, define the criterion and optimizer
# TODO: Train the network here
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import helper
# Test out your network!
dataiter = iter(testloader)
images, labels = dataiter.next()
img = images[0]
# Convert 2D image to 1D vector
img = img.resize_(1, 784)
# TODO: Calculate the class probabilities (softmax) for img
ps =
# Plot the image and probabilities
helper.view_classify(img.resize_(1, 28, 28), ps, version='Fashion')
###Output
_____no_output_____ |
Nancy/Ariane/Summarize Particles.ipynb | ###Markdown
A notebook to summarize a large particle release experiment. Three particle-release experiments where undertaken. 1. 1000 particles initialized in SoG surface waters (0-50m)2. 1000 paritcles initialized in SoG intermediate waters (50-200m)3. 1000 particles initialized in SJdF deep waters (100-200m)All particle trajectories were initialized on June 1, 2015 and integrated until Jul 31, 2015. Velocity fields from houlr nowcasts.
###Code
import netCDF4 as nc
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib import animation
import pandas as pd
import datetime
import os
from salishsea_tools import viz_tools
%matplotlib inline
sns.set_color_codes()
grid_B=nc.Dataset('/data/nsoontie/MEOPAR/NEMO-forcing/grid/bathy_meter_SalishSea2.nc')
data=nc.Dataset('/data/dlatorne/MEOPAR/SalishSea/nowcast/01jul15/SalishSea_1d_20150701_20150701_grid_T.nc')
depth = data.variables['deptht'][:]
runs = ['SoG_surface', 'SoG_intermediate', 'JDF']
path = '/ocean/nsoontie/MEOPAR/Ariane/results/2month-summer/'
init_x={}; init_y={}; init_z={}
final_x={}; final_y={}; final_z={}; final_t={}; final_age={}; z={}
t_lon={}; t_lat={}; t_depth={}; t_time={};
for run in runs:
f = nc.Dataset(os.path.join(path,run,'ariane_trajectories_qualitative.nc'))
init_x[run] = f.variables['init_x']
init_y[run] = f.variables['init_y']
init_z[run]= f.variables['init_z']
final_x[run] = f.variables['final_x']
final_y[run] = f.variables['final_y']
final_z[run] = f.variables['final_z']
final_age[run] = f.variables['final_age']
final_t[run] = f.variables['final_t']
z[run]=np.array(final_z[run][:],dtype=int)
t_lat[run] = f.variables['traj_lat']
t_lon[run] = f.variables['traj_lon']
t_depth[run] = f.variables['traj_depth']
t_time[run] = f.variables['traj_time']
###Output
_____no_output_____
###Markdown
Initial Postions
###Code
fig,axs = plt.subplots(1,3,figsize=(15,6))
for run,ax in zip(runs, axs):
ax.plot(init_x[run],init_y[run],'bo')
viz_tools.plot_coastline(ax,grid_B)
ax.set_title(run)
###Output
_____no_output_____
###Markdown
Final Positions
###Code
fig,axs = plt.subplots(1,3,figsize=(15,6))
cmap = sns.cubehelix_palette( as_cmap=True)
for run,ax in zip(runs, axs):
mesh=ax.scatter(final_x[run],final_y[run],c=depth[z[run]],vmin=0,vmax=300,cmap=cmap)
cbar=plt.colorbar(mesh,ax=ax)
cbar.set_label('Depth [m]')
viz_tools.plot_coastline(ax,grid_B)
ax.set_title(run)
###Output
_____no_output_____
###Markdown
Distribution
###Code
fig,axs = plt.subplots(1,3,figsize=(15,6))
cmap = sns.cubehelix_palette( as_cmap=True)
for run,ax in zip(runs, axs):
hist,xedges,yedges = np.histogram2d(t_lon[run][:].flatten(),t_lat[run][:].flatten()
,bins=100,range=[[-126,-122],[47,50.5]])
hist=np.ma.masked_values(hist,0)
mesh = ax.pcolormesh(xedges,yedges,hist.T,shading='flat',cmap=cmap,vmin=0,vmax=8000)
cbar=plt.colorbar(mesh,ax=ax)
cbar.set_label('Number of particles in bin')
viz_tools.plot_coastline(ax,grid_B,coords='map')
ax.set_title(run)
###Output
_____no_output_____ |
CAPS_analysis/Permanova/16S/sckitbio_permanova.ipynb | ###Markdown
Time 5 16s
###Code
df5 = pd.read_csv('otu_table_time5.tsv', sep='\t',index_col=0)
ids5 = list(df5.columns.values)
df5 = df5.T
data5 = df5.values.tolist()
df5
bc_dm5 = beta_diversity("braycurtis", data5, ids5)
bc_dm5
bc_pcoa5 = pcoa(bc_dm5)
ax5 = bc_pcoa5.plot(mapping_time5, 'Category', axis_labels=('PC 1', 'PC 2', 'PC 3'), title='T2', cmap='Accent', s=50)
ax5.set_size_inches(12, 8)
results5_an = anosim(bc_dm5, mapping_time5, column='Category', permutations=999)
results5_an
results5_p = permanova(bc_dm5, mapping_time5, column='Category', permutations=999)
results5_p
###Output
_____no_output_____
###Markdown
Time 2 16s
###Code
df5 = pd.read_csv('otu_table_time2.tsv', sep='\t',index_col=0)
ids5 = list(df5.columns.values)
df5 = df5.T
data5 = df5.values.tolist()
df5
bc_dm5 = beta_diversity("braycurtis", data5, ids5)
bc_dm5
bc_pcoa5 = pcoa(bc_dm5)
ax5 = bc_pcoa5.plot(mapping_time2, 'Category', axis_labels=('PC 1', 'PC 2', 'PC 3'), title='T2', cmap='Accent', s=50)
ax5.set_size_inches(12, 8)
results5_an = anosim(bc_dm5, mapping_time2, column='Category', permutations=999)
results5_an
results5_p = permanova(bc_dm5, mapping_time2, column='Category', permutations=999)
results5_p
###Output
_____no_output_____ |
notebooks/.ipynb_checkpoints/10_SIR_Dashboard_Large_V1.0-checkpoint.ipynb | ###Markdown
9.1 Simulative approach to calculate SIR curves - The SIR model is a simple model, due to Kermack and McKendrick, of an epidemic of an infectious disease in a large population. We assume the population consists of three types of individuals, whose numbers are denoted by the letters S(suspectible), I (infected)and R (recovered).- To check SIR model, click on the link: SIR Model for Spread of Disease.
###Code
# basic parameters set
# beta/gamma is denoted as 'basic reproduction number'
N0=1000000 # max susceptible population
beta=0.4 # infection spread dynamics
gamma=0.1 # recovery rate
# condition I0+S0+R0=N0
I0=df_analyse.Germany[35]
S0=N0-I0
R0=0
def SIR_model(SIR,beta,gamma):
''' Simple SIR model
S: susceptible population
I: infected people
R: recovered people
beta:
overall condition is that the sum of changes (differnces) sum up to 0
dS+dI+dR=0
S+I+R= N (constant size of population)
'''
S,I,R=SIR
dS_dt=-beta*S*I/N0 #S*I is the
dI_dt=beta*S*I/N0-gamma*I
dR_dt=gamma*I
return([dS_dt,dI_dt,dR_dt])
SIR=np.array([S0,I0,R0])
propagation_rates=pd.DataFrame(columns={'susceptible':S0,
'infected':I0,
'recoverd':R0})
for each_t in np.arange(100):
new_delta_vec=SIR_model(SIR,beta,gamma)
SIR=SIR+new_delta_vec
propagation_rates=propagation_rates.append({'susceptible':SIR[0],
'infected':SIR[1],
'recovered':SIR[2]}, ignore_index=True)
fig, ax1 = plt.subplots(1, 1)
ax1.plot(propagation_rates.index,propagation_rates.infected,label='infected',color='k')
ax1.plot(propagation_rates.index,propagation_rates.recovered,label='recovered')
ax1.plot(propagation_rates.index,propagation_rates.susceptible,label='susceptible')
ax1.set_ylim(10, 1000000)
ax1.set_yscale('linear')
ax1.set_title('Scenario SIR simulations (demonstration purposes only)',size=16)
ax1.set_xlabel('time in days',size=16)
ax1.legend(loc='best',
prop={'size': 16});
###Output
_____no_output_____
###Markdown
Fitting the parameters of SIR model
###Code
ydata = np.array(df_analyse.Germany[35:])
t=np.arange(len(ydata))
# ensure re-initialization
I0=ydata[0]
S0=N0-I0
R0=0
beta
def SIR_model_t(SIR,t,beta,gamma):
''' Simple SIR model
S: susceptible population
t: time step, mandatory for integral.odeint
I: infected people
R: recovered people
beta:
overall condition is that the sum of changes (differnces) sum up to 0
dS+dI+dR=0
S+I+R= N (constant size of population)
'''
S,I,R=SIR
dS_dt=-beta*S*I/N0 #S*I is the
dI_dt=beta*S*I/N0-gamma*I
dR_dt=gamma*I
return dS_dt,dI_dt,dR_dt
def fit_odeint(x, beta, gamma):
'''
helper function for the integration
'''
return integrate.odeint(SIR_model_t, (S0, I0, R0), t, args=(beta, gamma))[:,1] # we only would like to get dI
# example curve of our differential equationa
popt=[0.4,0.1]
fit_odeint(t, *popt)
# the resulting curve has to be fitted
# free parameters are here beta and gamma
popt, pcov = optimize.curve_fit(fit_odeint, t, ydata)
perr = np.sqrt(np.diag(pcov))
print('standard deviation errors : ',str(perr), ' start infect:',ydata[0])
print("Optimal parameters: beta =", popt[0], " and gamma = ", popt[1])
# get the final fitted curve
fitted=fit_odeint(t, *popt)
plt.semilogy(t, ydata, 'o')
plt.semilogy(t, fitted)
plt.title("Fit of SIR model for Germany cases")
plt.ylabel("Population infected")
plt.xlabel("Days")
plt.show()
print("Optimal parameters: beta =", popt[0], " and gamma = ", popt[1])
print("Basic Reproduction Number R0 " , popt[0]/ popt[1])
print("This ratio is derived as the expected number of new infections (these new infections are sometimes called secondary infections from a single infection in a population where all subjects are susceptible. @wiki")
###Output
_____no_output_____
###Markdown
Dynamic beta in SIR (infection rate
###Code
t_initial=28 # For germany, when infected cases were rising
t_intro_measures=14 # Time period, when lockdown was introduced
t_hold=21 # Time period, when lockdown was holded
t_relax=21 # Time period, when lockdown was easen
beta_max=0.4 # max_value of parameter for infected cases
beta_min=0.11 # min_value of parameter for infected cases
gamma=0.1 # parameter value for recovered cases
# concatenate all parameter with their relatable time period
pd_beta=np.concatenate((np.array(t_initial*[beta_max]),
np.linspace(beta_max,beta_min,t_intro_measures),
np.array(t_hold*[beta_min]),
np.linspace(beta_min,beta_max,t_relax),
))
pd_beta
SIR=np.array([S0,I0,R0])
propagation_rates=pd.DataFrame(columns={'susceptible':S0,
'infected':I0,
'recoverd':R0})
for each_beta in pd_beta:
new_delta_vec=SIR_model(SIR,each_beta,gamma)
SIR=SIR+new_delta_vec
propagation_rates=propagation_rates.append({'susceptible':SIR[0],
'infected':SIR[1],
'recovered':SIR[2]}, ignore_index=True)
fig, ax1 = plt.subplots(1, 1)
ax1.plot(propagation_rates.index,propagation_rates.infected,label='infected',linewidth=3)
t_phases=np.array([t_initial,t_intro_measures,t_hold,t_relax]).cumsum()
ax1.bar(np.arange(len(ydata)),ydata, width=0.8,label=' current infected Germany',color='r')
ax1.axvspan(0,t_phases[0], facecolor='b', alpha=0.2,label='no measures')
ax1.axvspan(t_phases[0],t_phases[1], facecolor='b', alpha=0.3,label='hard measures introduced')
ax1.axvspan(t_phases[1],t_phases[2], facecolor='b', alpha=0.4,label='hold measures')
ax1.axvspan(t_phases[2],t_phases[3], facecolor='b', alpha=0.5,label='relax measures')
ax1.axvspan(t_phases[3],len(propagation_rates.infected), facecolor='b', alpha=0.6,label='repead hard measures')
ax1.set_ylim(10, 1.5*max(propagation_rates.infected))
ax1.set_yscale('log')
ax1.set_title('Szenario SIR simulations (demonstration purposes only)',size=16)
ax1.set_xlabel('time in days',size=16)
ax1.legend(loc='best',
prop={'size': 16});
###Output
_____no_output_____
###Markdown
Dash App for SIR model - we will use the large dataset file containing multiple countries (more than 100)
###Code
#import data frame
data_raw = pd.read_csv('../data/raw/COVID-19/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv')
country_list = data_raw['Country/Region'].unique() #making country_list
date = data_raw.columns[4:]
df_ssd = pd.DataFrame({'Date': date})
#convert data_raw DataFrame into format that can be used for SIR algorithm
for each in country_list:
df_ssd[each] = np.array(data_raw[data_raw['Country/Region'] == each].iloc[:,4::].sum(axis=0)).T
df_ssd.to_csv("../data/raw/COVID-19/csse_covid_19_data/SIR.csv", sep = ';', index=False)
df_analyse=pd.read_csv('../data/raw/COVID-19/csse_covid_19_data/SIR.csv',sep=';')
df_analyse.sort_values('Date',ascending=True).head()
###Output
_____no_output_____
###Markdown
SIR model and fitted curve parameter for all countries
###Code
# Intialize parameter
N0 = 1000000
beta = 0.4
gamma = 0.1
I0=df_analyse.Germany[35]
S0=N0-I0
R0=0
df_data = df_analyse[35:] # need to be careful here because it difffers from each country
t = np.arange(df_data.shape[0])
#calculate optimize parameters for every country
for country in df_data.columns[1:]:
ydata = np.array(df_data[df_data[country]>0][country]) ## consider only value, which greater than zero to solve above mentioned problem
t = np.arange(len(ydata))
I0=ydata[0]
S0=N0-I0
R0=0
popt=[0.4,0.1]
fit_odeint(t, *popt)
popt, pcov = optimize.curve_fit(fit_odeint, t, ydata, maxfev=5000)
perr = np.sqrt(np.diag(pcov))
fitted=fit_odeint(t, *popt)
fitted_pad = np.concatenate((np.zeros(df_data.shape[0]-len(fitted)) ,fitted))
df_data[country + '_fitted'] = fitted_pad
df_data = df_data.reset_index(drop=True)
df_data.to_csv('../data/processed/SIR_fitted.csv', sep = ';')
df_data.head()
#create plot for germany to see if the calculation works
fig = go.Figure()
fig.add_trace(go.Scatter(x = df_data['Date'],y = df_data['Germany_fitted'],name= 'fitted_germany',
mode='markers+lines',line_width = 1,marker_size = 3),
)
fig.add_trace(go.Scatter(x = df_data['Date'],y = df_data['Germany'],name= 'source_germany',
mode='markers+lines',line_width = 1,marker_size = 3),
)
fig.update_layout(title={'text': 'SIR fitted curve with confirmed cases [Only for Germany]','y':0.9,'x':0.5,'xanchor': 'center','yanchor': 'top'},
xaxis_title='Timeline in Days', yaxis_title='Total cases of infected people',width=800, height=600)
fig.update_yaxes(type = 'log')
fig.update_layout(xaxis_rangeslider_visible=True)
# to display different colours for each countries both curve, and colour will be random when the color list is updated
color_list = []
for i in range(200):
var = '#%02x%02x%02x'%(random.randint(0,255),random.randint(0,255),random.randint(0,255))
color_list.append(var)
# create dashboard app containig plot of source and fitted SIR curve data for the complete dataset
fig = go.Figure()
app = dash.Dash()
app.layout = html.Div([
dcc.Markdown('''
# Data Science Project @ TU_KL on COVID-19 Dataset-Part 2
## Real and simulated number of infected people
* The default layout contains the confirmed infected cases in the log-scale format on the Y-axis
and Timeline in Days on the X-axis.
### The dropdown menu enables selection of one or multiple countries for visualization.
* This dashboard plots two curves for each country:
1. The first curve represents the confirmed infected cases along the timeline.
2. The second curve represents the simulated infected cases after applying the SIR model along the timeline.
'''),
dcc.Markdown('''
## Multi-Select Country for visualization
'''),
dcc.Dropdown(
id='country_drop_down',
options=[ {'label': each,'value':each} for each in df_data.columns[1:200]],
value=['Germany','India'], # which are pre-selected
multi=True),dcc.Graph(figure=fig, id='main_window_slope')])
@app.callback(
Output('main_window_slope', 'figure'),
[Input('country_drop_down', 'value')])
def update_figure(country_list):
v = 0
my_yaxis={'type':"log",'title':'Confirmed infected people (From johns hopkins csse, log-scale)'}
traces = []
for each in country_list:
traces.append(dict(x=df_data['Date'],y=df_data[each],
mode='line', line = dict(color = color_list[v]), opacity=1.0,name=each))
traces.append(dict(x=df_data['Date'],
y=df_data[each+'_fitted'],
mode='markers+lines',line = dict(color=color_list[v]), opacity=1.0,name=each+'_simulated'))
v = v+1
return {
'data': traces,
'layout': dict (
width=1280,height=720,
xaxis={'title':'Timeline','tickangle':-45,'nticks':20,
'tickfont':dict(size=14,color="#0c6887"),},yaxis=my_yaxis)}
if __name__ == '__main__':
app.run_server(debug=True, use_reloader=False)
###Output
_____no_output_____ |
03_multi_target_opt.ipynb | ###Markdown
거의 모든 과정이 동일합니다. 목표만 하나 추가해주면 됩니다. Loading environment data
###Code
target_env = pd.read_csv('./data/pap_env_RDA_open.csv', skiprows=1, header=None)
target_env.loc[:, 0] = pd.DatetimeIndex(target_env.loc[:, 0])
template = pd.read_excel('./data/data_template.xls', header=None)
target_env = pd.concat([template.loc[:11], target_env], axis=0)
target_env.to_excel('./data/pap_env_processed.xls', index=False, header=False)
###Output
/tmp/ipykernel_438153/2866564418.py:1: FutureWarning: As the xlwt package is no longer maintained, the xlwt engine will be removed in a future version of pandas. This is the only engine in pandas that supports writing in the xls format. Install openpyxl and write to an xlsx file instead. You can set the option io.excel.xls.writer to 'xlwt' to silence this warning. While this option is deprecated and will also raise a warning, it can be globally set and the warning suppressed.
target_env.to_excel('./data/pap_env_processed.xls', index=False, header=False)
###Markdown
Loading growth data
###Code
target_output = pd.read_csv(f'./data/pap_grw_RDA_open.csv', index_col='date')
target_output['LAI'] = (target_output['lefCunt']*target_output['lefLt']*target_output['lefBt'])/1000
target_output.index = pd.DatetimeIndex(target_output.index)
target_output.loc[target_output['LAI'] == 0, 'LAI'] = np.nan
target_output.loc[:, 'LAI'] = target_output.loc[:, 'LAI'].ffill()
target_output = target_output.loc[~target_output.loc[:, 'LAI'].isna()]
target_output = target_output.loc[:, ['LAI', 'cum_harvest']]
target_output.columns = ['LAI', 'TWSO']
target_output.TWSO.plot()
###Output
_____no_output_____
###Markdown
수확량에 대한 지표로 TWSO를 선택해서 목표로 삼습니다. 시뮬레이션에 사용되는 값은 생체중이 아닌 건물중임에 유의하여야 합니다.
###Code
minmax = pd.concat([target_output.min(), target_output.max()], axis=1)
minmax.columns = ['min', 'max']
###Output
_____no_output_____
###Markdown
두 지표의 범위가 다르므로, loss에 같은 정도로 반영될 수 있도록 범위를 맞춰줄 필요가 있습니다. `minmax`가 존재하면 `ObjectiveFunctionCalculator`가 목표 지표들의 범위를 0-1로 normalize합니다.
###Code
CROP_NAME = 'soybean'
VARIETY_NAME = 'Soybean_906'
START_DATE = str(pd.Timestamp('2018-03-23').date()) # starting date was deleted because of dropna
END_DATE = str(target_output.index[-1].date())
CULT_PERIOD = (pd.Timestamp(END_DATE)- pd.Timestamp(START_DATE)).days
print(f'from {START_DATE} to {END_DATE}: {CULT_PERIOD} days')
###Output
from 2018-03-23 to 2018-09-21: 182 days
###Markdown
Input data for WOFOST
###Code
cropd = YAMLCropDataProvider()
soild = DummySoilDataProvider()
sited = WOFOST72SiteDataProvider(WAV=50, CO2=360.)
params = ParameterProvider(cropdata=cropd, sitedata=sited, soildata=soild)
# Agromanagement for the custom data
agro_yaml = f"""
- {START_DATE}:
CropCalendar:
crop_name: {CROP_NAME}
variety_name: {VARIETY_NAME}
crop_start_date: {START_DATE}
crop_start_type: emergence
crop_end_date: {END_DATE}
max_duration: {CULT_PERIOD}
TimedEvents: null
StateEvents: null
"""
agro = yaml.load(agro_yaml, Loader=yaml.FullLoader)
wdp = ExcelWeatherDataProvider('./data/pap_env_processed.xls')
###Output
_____no_output_____
###Markdown
WOFOST Initialization
###Code
wofost = Wofost72_PP(params, wdp, agro)
wofost.run_till_terminate()
###Output
_____no_output_____
###Markdown
Parameter selection
###Code
target_params = {'TDWI':[0, 2000], 'TBASE':[0, 30], 'TSUM1':[0, 1000],'TSUM2':[1000, 4000],
'RGRLAI':[0, 0.01], 'SPAN':[0, 500], 'PERDL':[0, 1],
'SLATB':[0.001, 0.005, 3, [0, 2]], 'FRTB':[0, 1, 3, [0, 2]], 'AMAXTB':[0, 50, 3, [0, 2]]}
target_obj = target_output.columns
origin_value = {}
for k in target_params:
v = params._cropdata[k]
origin_value[k] = v
###Output
_____no_output_____
###Markdown
HyperOpt preparation
###Code
space = {}
for k in [_ for _ in target_params if not _.endswith('TB')]:
space[k] = hp.uniform(k, target_params[k][0], target_params[k][1])
for k in [_ for _ in target_params if _.endswith('TB')]:
for i in range(target_params[k][2]):
space[f'{k}_{i}'] = hp.uniform(f'{k}_{i}', target_params[k][0], target_params[k][1])
###Output
_____no_output_____
###Markdown
Optimization process
###Code
objfunc_calculator = ObjectiveFunctionCalculator(target_params, target_obj, params, wdp, agro, target_output, minmax)
best = fmin(fn=objfunc_calculator, space=space, algo=tpe.suggest, max_evals=200)
###Output
100%|██████| 200/200 [00:51<00:00, 3.91trial/s, best loss: 0.01819797752283385]
###Markdown
Result
###Code
print('<Calibration result>')
for k in target_params:
if not k.endswith('TB'):
print(f'{k}: {origin_value[k]} -> {best[k]:.2f}')
else:
temp_list = []
for v1, v2 in zip(np.linspace(target_params[k][-1][0], target_params[k][-1][1], target_params[k][2]),
[_ for _ in best if _.startswith(k)]):
temp_list.append(v1)
temp_list.append(best[v2])
print(f'{k}: {origin_value[k]} -> {[np.round(_, 4) for _ in temp_list]}')
fig = plt.figure(figsize=((8/2.54*2), (6/2.54*1.8)))
ax = plt.subplot()
ax.spines['right'].set_visible(False)
ax.spines['left'].set_position(('outward', 10))
ax.spines['bottom'].set_position(('outward', 5))
ax.plot(target_output.index, target_output.LAI, 'o', ms=5, mew=0.5, mec='k', c='k', label="Observation")
_ = objfunc_calculator(best)
ax.plot(objfunc_calculator.sim.index, objfunc_calculator.sim.LAI, c=cmap[0], label="Optimized")
_ = objfunc_calculator(origin_value, is_train=False)
ax.plot(objfunc_calculator.sim.index, objfunc_calculator.sim.LAI, "--", c='gray', label="Original")
ax.xaxis.set_major_formatter(mdates.DateFormatter('%b'))
ax.xaxis.set_major_locator(LinearLocator(12))
ax.yaxis.set_major_locator(LinearLocator(6))
ax.yaxis.set_minor_locator(LinearLocator(11))
ax.set_xlabel('Month')
ax.set_ylabel('LAI')
ax.legend(frameon=False, loc=0)
fig.tight_layout()
plt.show()
compare_df = pd.read_csv(f'./data/pap_grw_RDA_open.csv', index_col='date')
compare_df.index = pd.DatetimeIndex(compare_df.index)
# rerun with the best parameters found
fig = plt.figure(figsize=((8/2.54*2), (6/2.54*1.8)))
ax = plt.subplot()
ax.spines['right'].set_visible(False)
ax.spines['left'].set_position(('outward', 10))
ax.spines['bottom'].set_position(('outward', 5))
ax.plot(compare_df.index, compare_df.cum_harvest, 'o', ms=5, mew=0.5, mec='k', c='k', label="Observation")
_ = objfunc_calculator(best)
ax.plot(objfunc_calculator.sim.index, objfunc_calculator.sim.TWSO, c=cmap[0], label="Optimized")
_ = objfunc_calculator(origin_value, is_train=False)
ax.plot(objfunc_calculator.sim.index, objfunc_calculator.sim.TWSO, "--", c='gray', label="Original")
ax.xaxis.set_major_formatter(mdates.DateFormatter('%b'))
ax.xaxis.set_major_locator(LinearLocator(12))
ax.yaxis.set_major_locator(LinearLocator(6))
ax.yaxis.set_minor_locator(LinearLocator(11))
ax.set_xlabel('Month')
ax.set_ylabel('TWSO')
ax.legend(frameon=False, loc=0)
fig.tight_layout()
plt.show()
###Output
_____no_output_____ |
NLP_libraries/NLTK Introduction (with output).ipynb | ###Markdown
Natural Language Toolkit (NLTK)**NLTK** is a leading platform for building Python programs to work with human language data. It provides easy-to-use interfaces to [over 50 corpora and lexical resources](http://www.nltk.org/nltk_data/) such as WordNet, along with a suite of text processing libraries for classification, tokenization, stemming, tagging, parsing, and semantic reasoning, wrappers for industrial-strength NLP libraries, and an active discussion forum.http://www.nltk.org/NLTK library documentation (reference) = *Use it to look up how to use a particular NLTK library function** https://www.nltk.org/api/nltk.html---NLTK wiki (collaboratively edited documentation):* https://github.com/nltk/nltk/wiki Book: Natural Language Processing with Python NLTK book provides a practical introduction to programming for language processing.Written by the creators of NLTK, it guides the reader through the fundamentals of writing Python programs, working with corpora, categorizing text, analyzing linguistic structure, and more.Online: http://www.nltk.org/book/* we will start with Chapter 1: ["Language Processing and Python"](http://www.nltk.org/book/ch01.html)---
###Code
# configuration for the notebook
%matplotlib notebook
###Output
_____no_output_____
###Markdown
1) Getting startedNLTK book: http://www.nltk.org/book/ch01.htmlgetting-started-with-nltk* Loading NLTK (Python module)* Downloading NLTK language resources (corpora, ...)
###Code
# In order to use a Python library, we need to import (load) it
import nltk
# Let's check what NLTK version we have (for easier troubleshooting and reproducibility)
nltk.__version__
# If your NLTK version is lower than 3.4.3 please update if possible.
# Updating in Anaconde can be done using this command:
# conda update nltk
###Output
_____no_output_____
###Markdown
nltk.Text**`ntlk.Text` is a simple NLTK helper for loading and exploring textual content (a sequence of words / string tokens):**... intended to support initial exploration of texts (via the interactive console). It can perform a variety of analyses on the text’s contexts (e.g., counting, concordancing, collocation discovery), and display the results.Documentation: [nltk.Text](https://www.nltk.org/api/nltk.htmlnltk.text.Text)* lists what we can do with text once it is loaded into nltk.Text(...)
###Code
# Now we can try a simple example:
my_word_list = ["This", "is", "just", "an", "example", "Another", "example", "here"]
my_text = nltk.Text(my_word_list)
my_text
type(my_text)
# How many times does the word "example" appear?
my_text.count("example")
# Notes:
# - my_text = our text, processed (loaded) by NLTK
# - technically: a Python object
# - my_text.count(...) = requesting the object to perform a .count(...) function and return the result
# - technically: calling a .count() method
# count works on tokens (full words in this case)
my_text.count('exam')
'exam' in my_text
'example' in my_text
###Output
_____no_output_____
###Markdown
TokenizingLet's convert a text string into nltk.Text.First, we need to split it into tokens (to *tokenize* it).
###Code
# We need to download a package containing punctuation before we can tokenize
import nltk
nltk.download('punkt')
# Splitting text into tokens (words, ...) = tokenizing
from nltk.tokenize import word_tokenize
excerpt = "NLTK has been called “a wonderful tool for teaching, and working in, computational linguistics using Python,” and “an amazing library to play with natural language.”"
tokens = word_tokenize(excerpt)
tokens[:6]
my_text2 = nltk.Text(tokens)
print(my_text2.count("NLTK"))
###Output
1
###Markdown
Downloading NLTK language resourcesNLTK also contains many language resources (corpora, ...) but you have select and download them separately (in order to save disk space and only download what is needed).Let's download text collections used in the NLTK book: * `nltk.download("book")`Note: you can also download resources interactively:* `nltk.download()`
###Code
# this is a big download of all book packages
nltk.download("book")
# After downloading the reources we still need to import them
# Let's import all NLTK book resource (*)
from nltk.book import *
###Output
*** Introductory Examples for the NLTK Book ***
Loading text1, ..., text9 and sent1, ..., sent9
Type the name of the text or sentence to view it.
Type: 'texts()' or 'sents()' to list the materials.
text1: Moby Dick by Herman Melville 1851
text2: Sense and Sensibility by Jane Austen 1811
text3: The Book of Genesis
text4: Inaugural Address Corpus
text5: Chat Corpus
text6: Monty Python and the Holy Grail
text7: Wall Street Journal
text8: Personals Corpus
text9: The Man Who Was Thursday by G . K . Chesterton 1908
###Markdown
2) Exploring textual content
###Code
# text1, ... resources are of type nltk.Text (same as in the earlier example):
type(text1)
# We can run all methods that nltk.Text has.
# Count words:
print(text1.count("whale"))
# https://www.nltk.org/api/nltk.html#nltk.text.Text.concordance
# Print concordance view (occurences of a word, in context):
text1.concordance("discover")
text4.concordance("nation")
# https://www.nltk.org/api/nltk.html#nltk.text.Text.similar
# Print words that appear in similar context as "nation".
text4.similar("nation")
# https://www.nltk.org/api/nltk.html#nltk.text.Text.common_contexts
# Find contexts common to all given words
text1.common_contexts(["day", "night"])
###Output
that_, a_, every_, by_or that_; of_; the_previous by_, -_, of_. the_,
one_, all_. the_. this_in all_in the_before after_, the_wore
through_into
###Markdown
Side note: Python listsA *list* contains multiple values in an ordered sequence.More about Python lists:* https://automatetheboringstuff.com/chapter4/
###Code
# nltk.Text is also a list - can do everything we can do with lists (access parts of it, ...)
# What's the 1st occurence of "He" in the text?
# - note: Python is case sensitive (unless you take care of it - e.g. convert all text to lowercase)
print(text1.index("He"))
# The word at position #42
# - note: list indexes start from 0
print(text1[42])
print(text1[42:52])
###Output
['He', 'was', 'ever', 'dusting', 'his', 'old', 'lexicons', 'and', 'grammars', ',']
###Markdown
Further exploration* Dispersion plots (distribution of words throughout the text)* Generating text (based on example) Visualizing the corpus
###Code
# Dispersion plot
# source: Inaugural Address Corpus
text4.dispersion_plot(["citizens", "democracy", "duty", "freedom", "America"])
help(text4.dispersion_plot)
###Output
Help on method dispersion_plot in module nltk.text:
dispersion_plot(words) method of nltk.text.Text instance
Produce a plot showing the distribution of the words through the text.
Requires pylab to be installed.
:param words: The words to be plotted
:type words: list(str)
:seealso: nltk.draw.dispersion_plot()
###Markdown
Generating textNote: depending on your version of NLTK `generate()` functionality may or may not work (NLTK version 3.7.4 or newer is required).* In case it does not work, please see subsection "Saved version of generate() results".
###Code
# Generate text (based on example)
# https://www.nltk.org/api/nltk.html#nltk.text.Text.generate
# we need to supply seed words
text1.generate(text_seed = ["Why", "is", "it"])
###Output
Building ngram index...
###Markdown
---**NLTK `generate()` builds a [trigram] language model from the supplied text** (words are generated based on previous two words).For more information see nltk.lm: https://www.nltk.org/api/nltk.lm.html **Saved version of `generate()` results:** `text1.generate(text_seed = ["Why", "is", "it"])`*Building ngram index...*```Why is it stripped off from some mountain torrent we had flip ? , so as topreserve all his might had in former years abounding with them , theytoil with their lances , strange tales of Southern whaling .conceivable that this fine old Dutch Fishery , a most wealthy exampleof the sea - captain orders me to admire the magnanimity of the whole, and many whalemen , but dumplings ; good white cedar of the shipcasts off her cables ; and chewed it noiselessly ; and though thereare birds called grey albatrosses ; and yet faster```
###Code
help(text1.generate)
###Output
Help on method generate in module nltk.text:
generate(length=100, text_seed=None, random_seed=42) method of nltk.text.Text instance
Print random text, generated using a trigram language model.
See also `help(nltk.lm)`.
:param length: The length of text to generate (default=100)
:type length: int
:param text_seed: Generation can be conditioned on preceding context.
:type text_seed: list(str)
:param random_seed: A random seed or an instance of `random.Random`. If provided,
makes the random sampling part of generation reproducible. (default=42)
:type random_seed: int
|
Clustering Models/DBSCAN/amazon-dbscan.ipynb | ###Markdown
Objective:Our main objective for this analysis is to train a model which can seperate the postive and negative reviews.In this problem we will apply clustering technique called DBSCAN (Density-based spatial clustering of applications with Noise) to get an idea if the data can be clustered based on the reviews the model finds to be similar. Importing Libraries & getting Data
###Code
import numpy as np
import pandas as pd
import sqlite3
import matplotlib.pyplot as plt
import seaborn as sns
import re
import math
import os
import string
from tqdm import tqdm
import warnings
warnings.filterwarnings('ignore')
# Metrics libraries
from sklearn.metrics import accuracy_score ,confusion_matrix, roc_curve, auc ,roc_auc_score
# NLP libraries
import nltk
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from nltk.stem.wordnet import WordNetLemmatizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from gensim.models import Word2Vec
from gensim.models import KeyedVectors
import tensorflow as tf
from tensorflow.python.client import device_lib
os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'
physical_devices = tf.config.experimental.list_physical_devices('GPU')
assert len(physical_devices) > 0, "Not enough GPU hardware devices available"
config = tf.config.experimental.set_memory_growth(physical_devices[0], True)
print("Num GPUs Available: ", len(
tf.config.experimental.list_physical_devices('GPU')))
print(device_lib.list_local_devices())
connection = sqlite3.connect(
'G:/My Drive/datasets/amazon food review/dataset/database.sqlite')
# ignoring datapoints where score = 3 (neutral review)
filtered_data = pd.read_sql_query("""
SELECT *
FROM Reviews
WHERE Score != 3
LIMIT 50000
""", connection)
# label encoding score into positive (x>3) and negative (x<3)
def partition(x):
if x < 3:
return 0
else:
return 1
score_with_neutral = filtered_data['Score']
score_wo_neutral = score_with_neutral.map(partition)
filtered_data['Score'] = score_wo_neutral
filtered_data.head()
filtered_data.info()
filtered_data.shape
display = pd.read_sql_query("""
SELECT UserId, ProductId ,ProfileName ,Time, Score ,Text ,COUNT(*)
FROM Reviews
GROUP BY UserId
HAVING COUNT(*) > 1
""", connection)
display.head()
display.shape
display['COUNT(*)'].sum()
###Output
_____no_output_____
###Markdown
Data Cleaning (Removing Duplicates)
###Code
display = pd.read_sql_query("""
SELECT *
FROM Reviews
WHERE Score != 3 AND UserId='#oc-R11D9D7SHXIJB9'
ORDER BY ProductId
""", connection)
display.head()
###Output
_____no_output_____
###Markdown
NOTE : - It can be seen that same user has multiple reviews with same values for all the parameters.- So, in order to get unbiased results ,we need to remove the rows having same parameters.- Steps to do so : - first sort the data according to ProductId in ascending order. - Then just keep the first similar product review and delete the others
###Code
sorted_data = filtered_data.sort_values(
'ProductId', axis=0, ascending=True, inplace=False, kind='quicksort', na_position='last')
clean_data = sorted_data.drop_duplicates(
subset={"UserId", "ProfileName", "Time", "Text"}, keep='first', inplace=False)
clean_data.shape
###Output
_____no_output_____
###Markdown
Data Preprocessing Steps : 1. Begin by removing the HTML tags2. Remove any punctuations or limited set of special characters like , or . etc3. Check if the word is made up of english letters and is not alpha-numeric.4. Check to see if the length of the word is greater than 2 (as it was researched that there is no adjective in 2-letters)5. Convert the words to lowercase6. Remove Stopwords7. Stemming the word
###Code
# sample reviews
review_1 = clean_data['Text'].values[0]
print(review_1)
print('---------------------------------------')
review_2 = clean_data['Text'].values[1]
print(review_2)
print('---------------------------------------')
review_3 = clean_data['Text'].values[2]
print(review_3)
###Output
My dogs loves this chicken but its a product from China, so we wont be buying it anymore. Its very hard to find any chicken products made in the USA but they are out there, but this one isnt. Its too bad too because its a good product but I wont take any chances till they know what is going on with the china imports.
---------------------------------------
Our dogs just love them. I saw them in a pet store and a tag was attached regarding them being made in China and it satisfied me that they were safe.
---------------------------------------
Why is this $[...] when the same product is available for $[...] here?<br />http://www.amazon.com/VICTOR-FLY-MAGNET-BAIT-REFILL/dp/B00004RBDY<br /><br />The Victor M380 and M502 traps are unreal, of course -- total fly genocide. Pretty stinky, but only right nearby.
###Markdown
1. Removing URLs
###Code
review_1 = re.sub('http\S+', "", review_1)
review_2 = re.sub('http\S+', "", review_2)
review_3 = re.sub('http\S+', "", review_3)
###Output
_____no_output_____
###Markdown
2. Removing HTML tags
###Code
from bs4 import BeautifulSoup
soup = BeautifulSoup(review_1, 'lxml')
review_1 = soup.get_text()
soup = BeautifulSoup(review_2, 'lxml')
review_2 = soup.get_text()
soup = BeautifulSoup(review_3, 'lxml')
review_3 = soup.get_text()
###Output
_____no_output_____
###Markdown
3. Removing Apostrophes
###Code
def removing_apostrophes(text):
# general
text = re.sub("n\'t", " not", text)
text = re.sub("\'re", " are", text)
text = re.sub("\'s", " is", text)
text = re.sub("\'d", " would", text)
text = re.sub("\'ll", " will", text)
text = re.sub("\'t", " not", text)
text = re.sub("\'ve", " have", text)
text = re.sub("\'m", " am", text)
text = re.sub("won't", "will not", text)
text = re.sub("can\'t", "can not", text)
return text
# isn't gets converted to 'is not' in line 3
review_3 = removing_apostrophes(review_3)
print(review_3)
###Output
Why is this $[...] when the same product is available for $[...] here? />The Victor M380 and M502 traps are unreal, of course -- total fly genocide. Pretty stinky, but only right nearby.
###Markdown
4. Removing Numbers / Punctuations /Special Characters
###Code
# removing numbers
review_1 = re.sub('\S*\d\S*', "", review_1).strip()
print(review_1)
print('-----------------------------------------')
# removing special characters
review_3 = re.sub('[^A-Za-z0-9]+', ' ', review_3)
print(review_3)
###Output
My dogs loves this chicken but its a product from China, so we wont be buying it anymore. Its very hard to find any chicken products made in the USA but they are out there, but this one isnt. Its too bad too because its a good product but I wont take any chances till they know what is going on with the china imports.
-----------------------------------------
Why is this when the same product is available for here The Victor M380 and M502 traps are unreal of course total fly genocide Pretty stinky but only right nearby
###Markdown
5. Removing Stopwords
###Code
# removing the following words from the stop words list: 'no', 'nor', 'not'
stopwords = set(['br', 'the', 'i', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves', 'you', "you're", "you've",
"you'll", "you'd", 'your', 'yours', 'yourself', 'yourselves', 'he', 'him', 'his', 'himself',
'she', "she's", 'her', 'hers', 'herself', 'it', "it's", 'its', 'itself', 'they', 'them', 'their',
'theirs', 'themselves', 'what', 'which', 'who', 'whom', 'this', 'that', "that'll", 'these', 'those',
'am', 'is', 'are', 'was', 'were', 'be', 'been', 'being', 'have', 'has', 'had', 'having', 'do', 'does',
'did', 'doing', 'a', 'an', 'the', 'and', 'but', 'if', 'or', 'because', 'as', 'until', 'while', 'of',
'at', 'by', 'for', 'with', 'about', 'against', 'between', 'into', 'through', 'during', 'before', 'after',
'above', 'below', 'to', 'from', 'up', 'down', 'in', 'out', 'on', 'off', 'over', 'under', 'again', 'further',
'then', 'once', 'here', 'there', 'when', 'where', 'why', 'how', 'all', 'any', 'both', 'each', 'few', 'more',
'most', 'other', 'some', 'such', 'only', 'own', 'same', 'so', 'than', 'too', 'very',
's', 't', 'can', 'will', 'just', 'don', "don't", 'should', "should've", 'now', 'd', 'll', 'm', 'o', 're',
've', 'y', 'ain', 'aren', "aren't", 'couldn', "couldn't", 'didn', "didn't", 'doesn', "doesn't", 'hadn',
"hadn't", 'hasn', "hasn't", 'haven', "haven't", 'isn', "isn't", 'ma', 'mightn', "mightn't", 'mustn',
"mustn't", 'needn', "needn't", 'shan', "shan't", 'shouldn', "shouldn't", 'wasn', "wasn't", 'weren', "weren't",
'won', "won't", 'wouldn', "wouldn't"])
###Output
_____no_output_____
###Markdown
Combining all Steps
###Code
from tqdm import tqdm
preprocessed_reviews = []
for text in tqdm(clean_data['Text'].values):
text = re.sub('http\S+', "", text)
text = BeautifulSoup(text, 'lxml').get_text()
text = removing_apostrophes(text)
text = re.sub('\S*\d\S*', "", text).strip()
text = re.sub('[^A-Za-z0-9]+', ' ', text)
text = ' '.join(i.lower()
for i in text.split() if i.lower() not in stopwords)
preprocessed_reviews.append(text.strip())
###Output
100%|██████████| 46072/46072 [00:24<00:00, 1863.07it/s]
###Markdown
Splitting into train and test sets
###Code
preprocessed_reviews = preprocessed_reviews[:10000]
X = preprocessed_reviews
y = np.array(clean_data['Score'])
###Output
_____no_output_____
###Markdown
WordCloud
###Code
def wordcloud(model, optimal_K, X):
labels = model.labels_
cluster_dict = {i: np.where(labels == i)[0] for i in range(optimal_K)}
for cluster_number in range(optimal_K):
cluster = [cluster_dict[cluster_number][i]
for i in range(cluster_dict[cluster_number].size)]
reviews_cluster = []
for i in cluster:
reviews_cluster.append(X[i])
review_corpus = ''
for review in reviews_cluster:
review_corpus = review_corpus + ' ' + review
# wordcloud
wordclouds = WordCloud(width=800, height=400, margin=2, scale=1, max_words=75, min_font_size=5, random_state=42,
background_color='black', contour_color='black', repeat=False).generate(str(review_corpus))
plt.figure(figsize=(16, 8))
plt.title("WordCloud for Cluster {}".format(cluster_number))
plt.imshow(wordclouds, interpolation='bilinear')
plt.axis('off')
plt.show()
###Output
_____no_output_____
###Markdown
Models
###Code
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import DBSCAN
from sklearn.neighbors import NearestNeighbors
from wordcloud import WordCloud
###Output
_____no_output_____
###Markdown
1. AVG-W2V
###Code
#word2vec for train data
sent_list_train = []
sent_vectors_train = []
for sentence in X:
sent_list_train.append(sentence.split())
w2v_model = Word2Vec(sent_list_train, min_count=5, vector_size=50, workers=4)
w2v_words = list(w2v_model.wv.index_to_key)
for sent in tqdm(sent_list_train):
sent_vectors = np.zeros(50)
count_words = 0
for word in sent:
if word in w2v_words:
vectors = w2v_model.wv[word]
sent_vectors += vectors
count_words += 1
if count_words != 0:
sent_vectors /= count_words
sent_vectors_train.append(sent_vectors)
print(len(sent_vectors_train))
print(len(sent_vectors_train[0]))
scaler = StandardScaler()
data_avg = scaler.fit_transform(sent_vectors_train)
min_pts = 100
###Output
_____no_output_____
###Markdown
1.1 Finding value of epsilon (AVG-W2V)
###Code
# calculating the distance to the nearest n-points for each point
distance = []
for i in data_avg:
value = np.sort(np.sum((data_avg-i)**2 ,axis=-1))
distance.append(value[min_pts])
final_epsilon = np.sqrt(np.array(distance))
# sorting
sorted_distance = np.sort(final_epsilon)
points = [point for point in range(len(sent_vectors_train))]
plt.figure(figsize=(10,5))
plt.plot(points, sorted_distance)
plt.title('K-Distance Graph AVG-W2V')
plt.xlabel('Data-Points sorted by Distance')
plt.ylabel('eps')
plt.grid()
plt.show()
###Output
_____no_output_____
###Markdown
The point of maximum curvature in the above given K-distance graph is : 8
###Code
optimal_eps_avg_w2v = 8
###Output
_____no_output_____
###Markdown
1.2 Model-Building (AVG-W2V)
###Code
model_avg_w2v = DBSCAN(eps=optimal_eps_avg_w2v)
model_avg_w2v.fit(data_avg)
y_pred_avg_w2v = model_avg_w2v.fit_predict(data_avg)
###Output
_____no_output_____
###Markdown
1.3 Cluster Analysis (AVG-W2V)
###Code
cluster_num = pd.DataFrame(y_pred_avg_w2v, columns=['Cluster_number'])
cluster_num
new_data = pd.concat([clean_data, cluster_num], axis=1)
new_data.head()
new_data.groupby(['Cluster_number'])['Text'].count()
plt.bar([x for x in range(3)], new_data.groupby(['Cluster_number'])['Text'].count(), alpha=0.5)
plt.title('Number of Reviews per Cluster')
plt.grid()
plt.xlabel('Cluster Number')
plt.ylabel('Number of Reviews')
plt.show()
###Output
_____no_output_____
###Markdown
1.4 WordCloud (AVG-W2V)
###Code
wordcloud(model_avg_w2v ,1 ,X)
###Output
_____no_output_____
###Markdown
2. TFIDF-W2V
###Code
sent_list_train = []
for sentence in X:
sent_list_train.append(sentence.split())
w2v_model = Word2Vec(sent_list_train ,min_count=5 ,vector_size=50 ,workers=4)
w2v_words = list(w2v_model.wv.index_to_key)
tfidf_vectors = TfidfVectorizer(ngram_range=(1,2) ,min_df=10 ,max_features=500)
tfidf_matrix = tfidf_vectors.fit_transform(X)
tfidf_features = tfidf_vectors.get_feature_names()
tfidf_dictionary = dict(zip(tfidf_vectors.get_feature_names() ,list(tfidf_vectors.idf_)))
#word2vec for train data
tfidf_sent_vectors_train = []
row = 0
for sent in tqdm(sent_list_train):
sent_vectors = np.zeros(50)
weight_sum = 0
for word in sent:
if word in w2v_words and word in tfidf_features:
vectors = w2v_model.wv[word]
tfidf = tfidf_dictionary[word]*(sent.count(word) / len(sent))
sent_vectors += (vectors * tfidf)
weight_sum += tfidf
if weight_sum != 0:
sent_vectors /= weight_sum
tfidf_sent_vectors_train.append(sent_vectors)
row +=1
scaler = StandardScaler()
data_tfidf = scaler.fit_transform(tfidf_sent_vectors_train)
min_pts = 100
###Output
_____no_output_____
###Markdown
2.1 Finding value of epsilon (TFIDF-W2V)
###Code
# calculating the distance to the nearest n points for each point
distance = []
for i in data_tfidf:
value = np.sort(np.sum((data_tfidf-i)**2, axis=-1))
distance.append(value[min_pts])
final_epsilon = np.sqrt(np.array(distance))
# sorting
sorted_distance = np.sort(final_epsilon)
points = [point for point in range(len(tfidf_sent_vectors_train))]
plt.figure(figsize=(10, 5))
plt.plot(points, sorted_distance)
plt.title('K-Distance Graph TFIDF-W2V')
plt.xlabel('Data Points sorted by Distance')
plt.ylabel('eps')
plt.grid()
plt.show()
###Output
_____no_output_____
###Markdown
The point of maximum curvature in the above given K-distance graph is : 7
###Code
optimal_eps_tfidf_w2v = 7
###Output
_____no_output_____
###Markdown
2.2 Model-Building (TFIDF-W2V)
###Code
model_tfidf_w2v = DBSCAN(eps=optimal_eps_tfidf_w2v)
model_tfidf_w2v.fit(data_tfidf)
y_pred_tfidf_w2v = model_tfidf_w2v.fit_predict(data_tfidf)
###Output
_____no_output_____
###Markdown
2.3 Cluster Analysis (TFIDF-W2V)
###Code
cluster_num = pd.DataFrame(y_pred_tfidf_w2v, columns=['Cluster_number'])
cluster_num
new_data = pd.concat([clean_data, cluster_num], axis=1)
new_data.head()
new_data.groupby(['Cluster_number'])['Text'].count()
plt.bar([x for x in range(3)], new_data.groupby(['Cluster_number'])['Text'].count(), alpha=0.5)
plt.title('Number of Reviews per Cluster')
plt.grid()
plt.xlabel('Cluster Number')
plt.ylabel('Number of Reviews')
plt.show()
###Output
_____no_output_____
###Markdown
2.4 WordCloud (TFIDF-W2V)
###Code
wordcloud(model_tfidf_w2v ,1 ,X)
###Output
_____no_output_____
###Markdown
Model Comparision 1. Silhoutte Score
###Code
from sklearn.metrics import silhouette_score
avg_w2v_score = silhouette_score(data_avg, y_pred_avg_w2v)
print('Silhoutte Score for AVG-W2V is : {}'.format(avg_w2v_score))
tfidf_w2v_score = silhouette_score(data_tfidf, y_pred_tfidf_w2v)
print('Silhoutte Score for TFIDF-W2V is : {}'.format(tfidf_w2v_score))
###Output
Silhoutte Score for AVG-W2V is : 0.4228260860985999
Silhoutte Score for TFIDF-W2V is : 0.28778599498728535
###Markdown
2. Creating a prettytable to display comparision
###Code
from prettytable import PrettyTable
x = PrettyTable()
x.title = 'DBSCAN'
x.field_names = ['Vectorizer', 'Optimal-Clusters', 'Silhoutte-Score']
x.add_row(['AVG-W2V', 4, 0.4228])
x.add_row(['TFIDF-W2V', 3, 0.2877])
x.sortby = 'Silhoutte-Score'
x.reversesort = True
print(x)
###Output
+-------------------------------------------------+
| DBSCAN |
+------------+------------------+-----------------+
| Vectorizer | Optimal-Clusters | Silhoutte-Score |
+------------+------------------+-----------------+
| AVG-W2V | 4 | 0.4228 |
| TFIDF-W2V | 3 | 0.2877 |
+------------+------------------+-----------------+
|
others/fitting.ipynb | ###Markdown
Regression problem Given dataset$$\{(x_i, y_i)\}_{i=0}^{N-1}$$where$$y_i = f(x_i)$$where $f(x)$ is the underlying function (ground truth / target function). Regression problem is to find parameters $\theta$ from a model function $f_{\theta}(x)$ that minimize the following least square error:$$\min_{\theta} L(\theta) = \min_\theta \frac{1}{2N}\sum_{i=0}^{N-1} \left(f_{\theta}(x_i) - y_i \right)^2$$ $$\theta^{k+1} = \theta^k - t \nabla_{\theta}L(\theta^k)$$$$\nabla_{\theta}L(\theta) = \frac{1}{N} \sum_{i=0}^{N-1} \left(f_{\theta}(x_i) - y_i \right) \nabla_{\theta}f_\theta(x_i)$$ Linear RegressionModel function:$$f_{\theta}(x) = wx + b$$$$\theta = [w, b]$$
###Code
def linear(w: float, b: float, x: np.ndarray):
return w * x + b
def grad_w(x):
return x
def grad_b(x):
return 1.0
def gd_step(params: tuple[float] | list[float], t: float, x: np.ndarray, y: np.ndarray):
wk, bk = params
w = wk - t * np.mean((linear(wk, bk, x) - y) * grad_w(x))
b = bk - t * np.mean((linear(wk, bk, x) - y) * grad_b(x))
return w, b
def newton_step(params, t, x, y):
wk, bk = params
gw = np.mean((linear(wk, bk, x) - y) * grad_w(x))
gb = np.mean((linear(wk, bk, x) - y) * grad_b(x))
H = np.zeros((2, 2))
H[0, 0] = np.mean(grad_w(x) * grad_w(x))
H[0, 1] = np.mean(grad_w(x) * grad_b(x))
H[1, 0] = np.mean(grad_b(x) * grad_w(x))
H[1, 1] = np.mean(grad_b(x) * grad_b(x))
# [gw, gb] shape: (2,)
# np.dot(np.stack([gw, gb]), np.linalg.inv(H))
dwb = np.stack([gw, gb]).dot(np.linalg.inv(H))
return wk - t * dwb[0], bk - t * dwb[1]
###Output
_____no_output_____
###Markdown
Runge function Consider the function$$R(x) = \frac{1}{1+x^2}, \quad\quad x\in[-5,5]$$Let's plot the function first:
###Code
x = np.linspace(-5, 5, 100)
def R(x):
return 1.0 / (1.0 + x**2)
fig, ax = plt.subplots()
ax.set_title("Runge function", fontsize=18)
ax.set_xlabel(r"$x$", fontsize=14)
ax.set_ylabel(r"$f(x)$", fontsize=14)
ax.plot(x, R(x))
def fit(x, y, init_params, init_t, eps=1e-4, max_steps=10000):
params, t = init_params, init_t
for step in range(max_steps):
new_params = newton_step(params, t, x, y)
diff = np.asarray(new_params) - np.asarray(params)
if np.sqrt(np.sum(diff**2)) < eps:
return new_params, step
else:
params = new_params
return new_params, step
x = np.linspace(-1, 1, 100)
y = R(x)
plt.plot(x, y)
(w_m, b_m), num_steps = fit(x, y, [1.0, 1.0], 0.2)
num_steps
w_m, b_m
plt.plot(x, linear(w_m, b_m, x))
###Output
_____no_output_____
###Markdown
General Polynomial fitting with GD
###Code
def linear(params: np.ndarray, x: np.ndarray):
return params[0] * x + params[1]
def quadratic(params: np.ndarray, x: np.ndarray):
return params[0] * x**2 + params[1] * x + params[2]
# shape x: (N,), grad(x): (N, 2)
def grad_linear(x):
grad = np.stack([x, np.ones_like(x)], axis=-1)
return grad
# grad(x): (N, 3)
def grad_quad(x):
grad = np.stack([x**2, x, np.ones_like(x)], axis=-1)
return grad
def gd_step(params: np.ndarray, t: float, x: np.ndarray, y: np.ndarray):
params -= t * np.mean((quadratic(params, x) - y)[..., None] * grad_quad(x), axis=0)
return params
###Output
_____no_output_____
###Markdown
Create general polynomial model and their grads
###Code
for i in reversed(range(1, 3)):
print(i)
# Create a n-th order polynomial function
def create_poly_fn(n):
def poly_fn(params, x):
assert params.shape[0] == n + 1, f"Number of params should equal {n + 1}"
value, power = 0.0, 1.0
for i in range(n + 1):
value += params[n-i] * power
power *= x
return value
return poly_fn
def create_grad_poly_fn(n):
def grad_poly_fn(x):
stack_list = [x**(n-i) for i in range(n)] + [np.ones_like(x)]
grad = np.stack(stack_list, axis=-1)
return grad
return grad_poly_fn
# For example, create 3th order polynomial function and its gradient function
n = 3
poly_fn = create_poly_fn(n)
grad_poly_fn = create_grad_poly_fn(n)
def gd_step(params: np.ndarray, t: float, x: np.ndarray, y: np.ndarray):
params -= t * np.mean((poly_fn(params, x) - y)[..., None] * grad_poly_fn(x), axis=0)
return params
def fit(x, y, init_params, init_t, eps=1e-4, max_steps=100000):
params, t = init_params, init_t
for step in range(max_steps):
old_params = params.copy()
params = gd_step(params, t, x, y)
if np.sqrt(np.sum((params - old_params)**2)) < eps:
return params, step
return params, step
x = np.linspace(0, 1, 100)
y = R(x)
plt.plot(x, y)
params_min, num_steps = fit(x, y, np.array([0.1, 2.0, 1.0, 0.0]), 0.1)
params_min, num_steps
plt.plot(x, poly_fn(params_min, x))
###Output
_____no_output_____ |
factor_analysis_using_pca.ipynb | ###Markdown
example 9.3 JW Facor anaysis of consumer preference data using the PCA mothod from a given varcovar matrix to estimate the factor model
###Code
Sigma <- matrix(c(1,.02,.96,.42,.01,.02,1,.13,.71,.85,.96,.13,1,.5,.11,.42,.71,.5,1,.79,.01,.85,.11,.79,1), nrow=5, byrow=T)
Sigma
egs = eigen(Sigma)
V = egs$vectors
Lambdas = egs$values
D = diag(egs$values)
print('vectors')
print(V)
print('lambdas')
print(Lambdas)
print('D')
print(D)
# 5 eigen vectors
e1 <- V[,1]
e2 <- V[,2]
e3 <- V[,3]
e4 <- V[,4]
e5 <- V[,5]
# first two eigen values, lambda1 and lambda2
print(Lambdas)
l1 = Lambdas[1]
l2 = Lambdas[2]
# sum of lambdas almos equal to p = 5
p = dim(Sigma)[1]
sum(Lambdas)
all.equal(p, sum(Lambdas))
#for m = 2 common factors will account a cumulative proptin as follows
m = 2
sum(Lambdas[1:m]) / p
Lambdas[1:2]
# estimating factors
Fl1 = round(sqrt(Lambdas[1]) * e1, 2)
Fl2 = round(sqrt(Lambdas[2]) * e2, 2)
print(Fl1)
print(Fl2)
# calculate communalities
# h1 = Fl1^2 + Fl2^2 + ... + Flp^2
h2 = round((Fl1**2) + (Fl2**2),2)
h2
# specific variance, upper case epsilon
# Epsilon_i (i = 1 .. p) = 1 - hi ^ 2
Eps = round(1- h2,3)
Eps
###Output
_____no_output_____
###Markdown
now put it all together
###Code
# factor loading matrix with m = 2
L = matrix(cbind(Fl1, Fl2), ncol=2)
Epsilon_mat = diag(Eps)
Epsilon_mat
#Estimaed sigma
Est_Sigma = round(L %*% t(L) + Epsilon_mat,2)
print(Sigma)
print(Est_Sigma)
###Output
[,1] [,2] [,3] [,4] [,5]
[1,] 1.00 0.02 0.96 0.42 0.01
[2,] 0.02 1.00 0.13 0.71 0.85
[3,] 0.96 0.13 1.00 0.50 0.11
[4,] 0.42 0.71 0.50 1.00 0.79
[5,] 0.01 0.85 0.11 0.79 1.00
[,1] [,2] [,3] [,4] [,5]
[1,] 1.00 0.01 0.98 0.44 0.01
[2,] 0.01 1.00 0.12 0.79 0.90
[3,] 0.98 0.12 1.00 0.54 0.12
[4,] 0.44 0.79 0.54 1.00 0.81
[5,] 0.01 0.90 0.12 0.81 1.00
###Markdown
JW (9-19) pg 490, sum of squared entries of residual matrix is less or equal to sum of left out eigen values
###Code
sum((Sigma - Est_Sigma)**2)
sum(Lambdas[(m+1):5])
sum((Sigma - Est_Sigma)**2) <= sum(Lambdas[(m+1):5])
pp <- list()
for (i in 1:p){
pp[[i]] <- (1 - (sum(Lambdas[1:i]) / sum(Lambdas)))
}
plot(matrix(pp), type='b')
p
###Output
_____no_output_____ |
00_introduction/HelloWorld.ipynb | ###Markdown
Hello World of TensorFlow**Note: These notebooks are using tensorflow 2.1**Read more about `eager execution` in tensorflow,basically it allows you to run and get results of graphs without the need to have a running session.https://ai.googleblog.com/2017/10/eager-execution-imperative-define-by.html
###Code
'''
For notebooks,to install a specific version of Tensorflow
ues !pip2 install tensorflow==x.x --user
or
%tensorflow_version x.x for execution in Google Colab
'''
import tensorflow as tf
# Check this issue : https://github.com/OlafenwaMoses/ImageAI/issues/400
tf.compat.v1.disable_eager_execution()
print("COMPILER_VERSION = %s\n\
GIT_VERSION = %s\n\
GRAPH_DEF_VERSION = %s\n\
GRAPH_DEF_VERSION_MIN_CONSUMER = %s\n\
GRAPH_DEF_VERSION_MIN_PRODUCER = %s\n\
VERSION = %s " % (tf.version.COMPILER_VERSION, tf.version.GIT_VERSION , tf.version.GRAPH_DEF_VERSION , tf.version.GRAPH_DEF_VERSION_MIN_CONSUMER , tf.version.GRAPH_DEF_VERSION_MIN_PRODUCER , tf.version.VERSION) )
# Get Devices available , 'CPU' or 'GPU'
gpu_devices = tf.config.list_physical_devices('GPU')
print("Numer of GPUs: ", len(gpu_devices))
cpu_devices = tf.config.list_physical_devices('CPU')
print("Numer of CPUs: ", len(cpu_devices))
# Simple 'Hello World' using tensorflow
hello = tf.constant('Hello World of TensorFlow')
with tf.compat.v1.Session() as sess:
print(sess.run(hello))
###Output
COMPILER_VERSION = 7.3.1 20180303
GIT_VERSION = v2.1.0-rc2-17-ge5bf8de
GRAPH_DEF_VERSION = 175
GRAPH_DEF_VERSION_MIN_CONSUMER = 0
GRAPH_DEF_VERSION_MIN_PRODUCER = 0
VERSION = 2.1.0
Numer of GPUs: 0
Numer of CPUs: 1
b'Hello World of TensorFlow'
|
notebooks/Bubble_plot_pathways.ipynb | ###Markdown
Bubble plot, using scatter plot panels to visualize result from pathway analysisOne can include multiple panels, and customize size, colors of scatters.
###Code
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
# these use mummichog output pathway .tsv files
# more files can be used
INDIR = "../inputdata/"
input_files = ['mcg_pathwayanalysis_A.tsv', 'mcg_pathwayanalysis_B.tsv' ]
# read pathway analysis result from mummichog
def get_data(f):
'''
Input data: pathway overlap_size pathway_size p-value
'''
mydict = {}
w = open(f).read().splitlines()
# first 50 lines are usually enough; extra lines may exist in the pathway export table
for line in w[1: 50]:
a = line.split('\t')
mydict[a[0]] = -np.log10(float(a[3])) # a[4] if mummichog v1 result
return mydict
# read data
mydata = []
for ff in input_files: mydata.append( get_data(INDIR + ff) )
# manualy input pathway list.
#Edit below for different set/order of pathways
selected_pathways = '''Tryptophan metabolism
Glycosphingolipid metabolism
Methionine and cysteine metabolism
Ascorbate (Vitamin C) and Aldarate Metabolism
Glycerophospholipid metabolism
Galactose metabolism
Sialic acid metabolism
Alanine and Aspartate Metabolism
Carnitine shuttle
Purine metabolism'''
selected_pathways = [x.strip() for x in selected_pathways.splitlines()]
selected_pathways.reverse()
# plot
fig, myaxes = plt.subplots(figsize=(8, 6), nrows=1, ncols=2)
Y = range( len(selected_pathways) )
#
# plt.yticks(range(len(selected_pathways)), selected_pathways)
for ii in range(2):
X = [mydata[ii].get(path, 0) for path in selected_pathways]
print(X)
myaxes[ii].set_xlim( 1, 4.5 )
myaxes[ii].set_xticks(range(1, 5))
myaxes[ii].set_yticks(range(len(selected_pathways)))
myaxes[ii].set_yticklabels([]*len(selected_pathways))
#myaxes[ii].set_ylim( 0, 0.5+len(selected_pathways))
myaxes[ii].grid(True, color='w', linestyle='-', linewidth=2, alpha=0.3, zorder=0)
myaxes[ii].set_axis_bgcolor('0.9')
# change styles below. s stands for size of dots
myaxes[ii].scatter( X, Y, s=[30*x**2 for x in X], c='blue', linewidths =0, alpha=1, zorder=3)
myaxes[0].set_yticklabels(selected_pathways)
plt.tight_layout()
plt.show()
# comment out the last line and use this line to save file
# plt.savefig('good-bubbles-.pdf')
###Output
[0.92790713095172228, 1.0413561944231713, 1.0445791738300669, 1.0445791738300669, 1.0445791738300669, 1.1311007830435784, 1.1757629527667233, 1.289542244951507, 1.3047314435519113, 1.3748797380483697]
[3.0975059479977372, 2.1869221771157168, 1.0590604583847831, 2.9939513376770353, 0, 1.4655471617108637, 2.7852157391243377, 1.6082068795479776, 2.9235601862855152, 3.1669594021441387]
|
03 - Working with NumPy/notebooks/02-Array-Creation-Function.ipynb | ###Markdown
Array Creation Function
###Code
# import numpy
import numpy as np
###Output
_____no_output_____
###Markdown
Generate arrays using `zeros()`- Returns an array of given shape and type filled with zeros - **Syntax:** `np.zeros(shape, dtype)` - shape - integer or sequence of integers - dtype - data type(default: float)
###Code
# 1D array of length 3 with all values 0
Z1 = np.zeros(10)
print(Z1)
# 2D array of 3x4 with all values 0
Z2 = np.zeros((3,4))
print(Z2)
Z2.shape
###Output
_____no_output_____
###Markdown
Generate arrays using `ones()`- Returns an array of given shape and type filled with ones - **Syntax:** `np.ones(shape, dtype)` - shape - integer or sequence of integers - dtype - data type(default: float)
###Code
# 1D array of length 3 with all values 1
A1 = np.ones(10)
print(A1)
###Output
[1. 1. 1. 1. 1. 1. 1. 1. 1. 1.]
###Markdown
__Note__- Rows = 3 - Columns = 4
###Code
# 2D array of 3x4 with all values 1
A2 = np.ones((3,4))
A2
print(A2)
###Output
[[1. 1. 1. 1.]
[1. 1. 1. 1.]
[1. 1. 1. 1.]]
###Markdown
Generate arrays using `arange()`- Returns equally spaced numbers with in the given range based on step size. - **Syntax:** `np.arange(start, stop, step)` - start- starts of interval range - stop - end of interval range ' - step - step size of interval
###Code
list(range(10))
list(range(11))
list(range(1, 10))
list(range(1, 11))
list(range(1, 20, 2))
# not specify start and step
A1 = np.arange(10)
print(A1)
A = np.arange(1, 10, 2)
A
# specifying start and step
A2 = np.arange(start=1, stop=10, step=2)
print(A2)
# another way
A3 = np.arange(10, 25, 2)
print(A3)
###Output
[10 12 14 16 18 20 22 24]
###Markdown
Generate arrays using `linspace()`- Returns equally spaced numbers within the given range based on the sample number. - **Syntax:** `np.linspace(start, stop, num, dtype, retstep)` - start-start of interval range - stop-end of the interval range - num- number of samples to be generated - dtype-type of output array - retstep-return the samples, step values
###Code
# array of evenly spaced values 0 to 2, here sample size = 9
L1 = np.linspace(0,2,50)
print(L1)
# Array of 6 evenly divided values from 0 to 100
L2 = np.linspace(0, 100, 6)
print(L2)
# Array of 1 to 5
L3 = np.linspace(start=1, stop=5, endpoint=True, retstep=False)
print(L3)
# Array of 1 to 5
L4 = np.linspace(start=1, stop=5, endpoint=True, retstep=True)
print(L4)
###Output
(array([1. , 1.08163265, 1.16326531, 1.24489796, 1.32653061,
1.40816327, 1.48979592, 1.57142857, 1.65306122, 1.73469388,
1.81632653, 1.89795918, 1.97959184, 2.06122449, 2.14285714,
2.2244898 , 2.30612245, 2.3877551 , 2.46938776, 2.55102041,
2.63265306, 2.71428571, 2.79591837, 2.87755102, 2.95918367,
3.04081633, 3.12244898, 3.20408163, 3.28571429, 3.36734694,
3.44897959, 3.53061224, 3.6122449 , 3.69387755, 3.7755102 ,
3.85714286, 3.93877551, 4.02040816, 4.10204082, 4.18367347,
4.26530612, 4.34693878, 4.42857143, 4.51020408, 4.59183673,
4.67346939, 4.75510204, 4.83673469, 4.91836735, 5. ]), 0.08163265306122448)
###Markdown
__Specifying Endpoint__- `endpoint=True`, inlcude 5 - `endpoint=False`,exclude 5 __Specifying Retstep__- `retstep=False`, doesn't return the step value- `endpoint=False`, returns the samples as well step value Generate arrays using `logspace()`- Returns equally spaced numbers within the given range based on the log scale. - **Syntax:** `np.logspace(start, stop, num, endpoint, base, dtype, retstep)` - start- start of the sequence - stop- end of the sequence - num- number of samples to be generated(default: 50) - dtype- type of output array - retstep- return the samples, step values - endpoint - if true, stop is the last sample - base - base of the log space(default: 10.0)
###Code
# generate an array with 5 samples with base 10.0
np.logspace(1, 10, num=5, endpoint=True)
# generate an array with 5 samples with base 2.0
np.logspace(1, 10, num=5, endpoint=True, base=2.0)
###Output
_____no_output_____
###Markdown
Generate constant arrays using `full()` - Return a new array of given shape and type, filled with `fill_value`. - **Syntax:** `np.full(shape,fill_value, dtype)` - shape - Shape of the new array, e.g., ``(2, 3)`` or ``2``. - fill_value - Fill value(scaler). - dtype - The desired data-type for the array
###Code
# generate 2x2 constant array, constant = 7
C = np.full((2, 2), 7)
print(C)
###Output
[[7 7]
[7 7]]
###Markdown
Creating identity matrix using `eye()`- An array where all elements are equal to zero, except for the `k`-th diagonal, whose values are equal to one- **Syntax:** `np.eye(N, M, k, dtype)` - N : Number of rows(int) in the output - M : Number of columns in the output. If None, defaults to `N`. - k : Index of the diagonal: 0 (the default) refers to the main diagonal, a positive value refers to an upper diagonal, and a negative value to a lower diagonal - dtype: Data-type of the returned array.
###Code
# generate 2x2 identity matrix
I = np.eye(2)
print(I)
###Output
[[1. 0.]
[0. 1.]]
###Markdown
Generate arrays using random.rand() - Returns an array of given shape filled with random values. - **Syntax:** `np.random.rand(shape)` - shape - integer or sequence of integer
###Code
# create an array with randomly generated 5 values
R = np.random.rand(5)
print(R)
# generate 2x2 array of random values
R1 = np.random.random((2, 2))
print(R1)
# generate 4x5 array of random floats between 0-1
R2 = np.random.rand(4,5)
print(R2)
# generate 6x7 array of random floats between 0-100
R3 = np.random.rand(6,7)*100
print(R3)
# generate 2x3 array of random ints between 0-4
R4 = np.random.randint(5, size=(2,3))
print(R4)
###Output
[[3 3 1]
[4 3 0]]
###Markdown
Generate empty arrays using `empty()`- Return a new array of given shape and type, without initializing entries.- **Syntax:** `np.empty(shape, dtype)` - shape - integer or tuple of integer - dtype - data-type
###Code
# generate an empty array
E1 = np.empty(2)
print(E1)
# 2x2 empty array
E2 = np.empty((2, 2))
print(E2)
###Output
_____no_output_____
###Markdown
Arrays using specific data type - float16- float32 - int8__SEE MORE__- https://numpy.org/devdocs/user/basics.types.html
###Code
# generate an array of floats
D = np.ones((2, 3, 4), dtype=np.float16)
D
###Output
_____no_output_____ |
KRX_stock.ipynb | ###Markdown
KRX(한국거래소)정보 가져오기데이터 정보를 모으기 위해 주식도 한몫한다. 그래서 주식 데이터를 csv롤 다운받는 방법을 소개하고 다음 필요한 정보만 빼서 저장하는 것 까지 간단하게 남겨보도록 하자. 1.KRX 접속- [KRX(한국거래소)](http://www.krx.co.kr/main/main.jsp) 2. 주식정보 다운 3. 데이터 불러오기
###Code
import pandas as pd
stock = pd.read_csv("data_2702_20211201.csv", encoding = "cp949")
stock
###Output
_____no_output_____
###Markdown
4. 필요한 칼럼 추출
###Code
stock = stock[["종목코드","종목명","시장구분"]]
stock
# stock.to_csv("stock_list.csv", index = False)
###Output
_____no_output_____
###Markdown
5. 시장 데이터 추출- KOSDAQ는 1531개의 회사 확인- KOSPI는 941개의 회사 확인- KONEX는 132개의 회사 확인
###Code
stock.groupby(["시장구분"]).count()
###Output
_____no_output_____
###Markdown
6. csv 저장하기
###Code
stock.to_csv("stock_list.csv", index = False)
###Output
_____no_output_____ |
notebooks/scrapers/earth911/Indeed_Job_Scraper.ipynb | ###Markdown
Indeed Job ScraperThe tobias-fyi fork of the repository can be found here: [tobias-fyi/Indeed-Job-Scraper](https://github.com/tobias-fyi/Indeed-Job-Scraper/)------
###Code
# ====== Imports ====== #
from bs4 import BeautifulSoup
import requests, json
import pandas as pd
from multiprocessing import Pool
from functools import partial
# ====== Scraping parameters ====== #
parameters = {
"search_query": "data science",
"location": "Denver, CO",
"miles": 50,
"ordered_keywords": "data",
"exclude_keywords": "",
"title_keywords": "",
"pages": 1,
}
# ====== Class for scraping Indeed ====== #
# ====== With multiprocessing ====== #
class Scrape:
def __init__(self, parameters):
self.output_frame = None
self.loading = False
# Retain parameter dict
self.parameters = parameters
# Create base Indeed URL for all further scraping
self.what = parameters["search_query"]
self.where = parameters["location"]
self.miles = parameters["miles"]
self.base_url = f"https://www.indeed.com/jobs?q={self.what}&l={self.where}"
# Set other attributes
self.keywords = parameters["ordered_keywords"]
self.title_keywords = parameters["title_keywords"]
self.exclude_keywords = parameters["exclude_keywords"]
self.pages = parameters["pages"]
self.total_keywords = len(self.keywords) + len(self.title_keywords)
def rate_job(self, j_title, j_soup):
"""Rate job based on input parameters."""
description = j_soup.find(id="jobDescriptionText").get_text()
# Reset the params
keywords_present = []
title_keywords_present = []
rating = 0
# Check for keyword, add value to rating depending on ranking
for index, keyword in enumerate(self.keywords):
if keyword in description:
rating += len(keywords) - index
keywords_present.append(keyword)
# Check for title keywords
for index, keyword in enumerate(self.title_keywords):
if keyword in j_title:
rating += total_keywords - index
title_keywords_present.append(keyword)
# Normalise rating
rating = rating / sum(range(1, self.total_keywords + 1))
# Check for excluded keywords
for keyword in self.exclude_keywords:
if keyword in j_title:
rating = 0
break
return description, rating, keywords_present, title_keywords_present
def get_job_details(self, job):
"""Obtain details of the job (company, title, description etc.)"""
# Get link and title
job_url = job.find(class_="title").a["href"]
job_url = ( # Correct for truncated URLs
"https://www.indeed.com" + job_url if (job_url.startswith("/")) else job_url
)
job_page = requests.get(job_url)
job_soup = BeautifulSoup(job_page.content, "html.parser")
# Give URL after redirect (ads/analytics etc.)
job_url = job_page.url
# Get job title and company name
title = job.find(class_="title").a["title"]
company = job_soup.find(class_="icl-u-lg-mr--sm").get_text()
# Get description, rating and present keywords
description, rating, keywords_present, title_keywords_present = self.rate_job(
title, job_soup
)
return (
title,
company,
job_url,
description,
rating,
keywords_present,
title_keywords_present,
)
def parallel_scrape(self, parameters, url, page_num):
"""Parallel scraping routine. Run through MapPool using Multiprocessing library"""
# Get page
current_page = requests.get(url, timeout=5)
page_soup = BeautifulSoup(current_page.content, "html.parser")
page_output = []
# Parse every job in page
for job in page_soup.select(".jobsearch-SerpJobCard"):
(
title,
company,
url,
description,
rating,
keywords_present,
title_keywords_present,
) = self.get_job_details(job)
page_output.append(
[
rating,
title,
company,
description,
url,
str(keywords_present),
str(title_keywords_present),
page_num,
]
)
return page_output
def get_scrape(self):
"""Primary method for obtaining scraped jobs."""
# Reset output and progress
self.loading = True
# Output list and frame
output = []
# Create pool of workers
pool = Pool(min(self.pages, 5))
# Dirty list comprehension to create argument list for pool workers
pool_args = [
(self.base_url + "&start=" + str(x * 10), x + 1)
if (x != 0)
else (self.base_url, x + 1)
for x in range(0, self.pages)
]
# Get output of pool workers
output = pool.starmap(partial(self.parallel_scrape, self.parameters), pool_args)
output = [x for sublist in output for x in sublist]
# Create dataframe from list of jobs
df_output_frame = (
pd.DataFrame(
output,
columns=[
"Rating",
"Job Title",
"Company",
"Description",
"Job URL",
"Keywords Present",
"Title Keywords",
"Page Found",
],
)
.sort_values(by="Rating", ascending=False)
.reset_index(drop=True)
)
# Sort df by rating
df_output_frame["Rating"] = df_output_frame["Rating"].round(decimals=3)
df_output_frame = df_output_frame.drop_duplicates(
subset=["Rating", "Job Title", "Company"]
)
self.loading = False
return df_output_frame
# For outputting to csv locally
def output_csv(self, df):
df.to_csv("indeed_job_scraper.csv", index=False)
# ====== Class for scraping Indeed ====== #
# ====== Without multiprocessing ====== #
class Scrape:
def __init__(self, parameters):
self.output_frame = None
self.loading = False
# Retain parameter dict
self.parameters = parameters
# Create base Indeed URL for all further scraping
self.what = parameters["search_query"]
self.where = parameters["location"]
self.miles = parameters["miles"]
self.base_url = f"https://www.indeed.com/jobs?q={self.what}&l={self.where}"
# Set other attributes
self.keywords = parameters["ordered_keywords"]
self.title_keywords = parameters["title_keywords"]
self.exclude_keywords = parameters["exclude_keywords"]
self.pages = parameters["pages"]
self.total_keywords = len(self.keywords) + len(self.title_keywords)
def rate_job(self, j_title, j_soup):
"""Rate job based on input parameters."""
description = j_soup.find(id="jobDescriptionText").get_text()
# Reset the params
keywords_present = []
title_keywords_present = []
rating = 0
# Check for keyword, add value to rating depending on ranking
for index, keyword in enumerate(self.keywords):
if keyword in description:
rating += len(self.keywords) - index
keywords_present.append(keyword)
# Check for title keywords
for index, keyword in enumerate(self.title_keywords):
if keyword in j_title:
rating += self.total_keywords - index
title_keywords_present.append(keyword)
# Normalise rating
rating = rating / sum(range(1, self.total_keywords + 1))
# Check for excluded keywords
for keyword in self.exclude_keywords:
if keyword in j_title:
rating = 0
break
return description, rating, keywords_present, title_keywords_present
def get_job_details(self, job):
"""Obtain details of the job (company, title, description etc.)"""
# Get link and title
job_url = job.find(class_="title").a["href"]
job_url = ( # Correct for truncated URLs
"https://www.indeed.com" + job_url if (job_url.startswith("/")) else job_url
)
job_page = requests.get(job_url)
job_soup = BeautifulSoup(job_page.content, "html.parser")
# Give URL after redirect (ads/analytics etc.)
job_url = job_page.url
# Get job title and company name
title = job.find(class_="title").a["title"]
company = job_soup.find(class_="icl-u-lg-mr--sm").get_text()
# Get description, rating and present keywords
description, rating, keywords_present, title_keywords_present = self.rate_job(
title, job_soup
)
return (
title,
company,
job_url,
description,
rating,
keywords_present,
title_keywords_present,
)
def get_scrape(self):
"""Primary method for obtaining scraped jobs."""
# Reset output and progress
self.loading = True
# Output list and frame
output = []
for x in range(0, self.pages):
if x == 0:
page_append = ""
else:
page_append = "&start=" + str(x * 10)
# get page
current_page = requests.get(self.base_url + page_append, timeout=5)
page_soup = BeautifulSoup(current_page.content, "html.parser")
for job in page_soup.select(".jobsearch-SerpJobCard"):
(
title,
company,
url,
description,
rating,
keywords_present,
title_keywords_present,
) = self.get_job_details(job)
output.append(
[
rating,
title,
company,
description,
url,
keywords_present,
title_keywords_present,
x + 1,
]
)
print(f"Page {x+1} completed", end="\r")
# Create dataframe from list of jobs
df_output_frame = (
pd.DataFrame(
output,
columns=[
"Rating",
"Job Title",
"Company",
"Description",
"Job URL",
"Keywords Present",
"Title Keywords",
"Page Found",
],
)
.sort_values(by="Rating", ascending=False)
.reset_index(drop=True)
)
# Sort df by rating
df_output_frame["Rating"] = df_output_frame["Rating"].round(decimals=3)
df_output_frame = df_output_frame.drop_duplicates(
subset=["Rating", "Job Title", "Company"]
)
self.loading = False
return df_output_frame
# For outputting to csv locally
def output_csv(self, df):
df.to_csv("indeed_job_scraper.csv", index=False)
###Output
_____no_output_____
###Markdown
Time to let 'er rip!
###Code
# First, instantiate a scraper object with the parameters
scraper = Scrape(parameters)
# Then, use the `.get_scrape()` method to use the instance of the scraper
df1 = scraper.get_scrape()
print(df1.shape)
df1.head()
# Save to csv
df1.to_csv("indeed_1pg_test.csv", index=False)
###Output
_____no_output_____
###Markdown
--- The Full Monty (Python)Now I'm going to really let 'er rip to get the full dataset. A little edit to show me what page is being processed.
###Code
# ====== Class for scraping Indeed ====== #
# ====== Without multiprocessing ====== #
class Scrape:
def __init__(self, parameters):
self.output_frame = None
self.loading = False
# Retain parameter dict
self.parameters = parameters
# Create base Indeed URL for all further scraping
self.what = parameters["search_query"]
self.where = parameters["location"]
self.miles = parameters["miles"]
self.base_url = f"https://www.indeed.com/jobs?q={self.what}&l={self.where}"
# Set other attributes
self.keywords = parameters["ordered_keywords"]
self.title_keywords = parameters["title_keywords"]
self.exclude_keywords = parameters["exclude_keywords"]
self.pages = parameters["pages"]
self.total_keywords = len(self.keywords) + len(self.title_keywords)
def rate_job(self, j_title, j_soup):
"""Rate job based on input parameters."""
description = j_soup.find(id="jobDescriptionText").get_text()
# Reset the params
keywords_present = []
title_keywords_present = []
rating = 0
# Check for keyword, add value to rating depending on ranking
for index, keyword in enumerate(self.keywords):
if keyword in description:
rating += len(self.keywords) - index
keywords_present.append(keyword)
# Check for title keywords
for index, keyword in enumerate(self.title_keywords):
if keyword in j_title:
rating += self.total_keywords - index
title_keywords_present.append(keyword)
# Normalise rating
rating = rating / sum(range(1, self.total_keywords + 1))
# Check for excluded keywords
for keyword in self.exclude_keywords:
if keyword in j_title:
rating = 0
break
return description, rating, keywords_present, title_keywords_present
def get_job_details(self, job):
"""Obtain details of the job (company, title, description etc.)"""
# Get link and title
job_url = job.find(class_="title").a["href"]
job_url = ( # Correct for truncated URLs
"https://www.indeed.com" + job_url if (job_url.startswith("/")) else job_url
)
job_page = requests.get(job_url)
job_soup = BeautifulSoup(job_page.content, "html.parser")
# Give URL after redirect (ads/analytics etc.)
job_url = job_page.url
# Get job title and company name
try:
title = job.find(class_="title").a["title"]
except:
title = "No title found"
try:
company = job_soup.find(class_="icl-u-lg-mr--sm").get_text()
except:
company = "No company found."
# Get description, rating and present keywords
description, rating, keywords_present, title_keywords_present = self.rate_job(
title, job_soup
)
return (
title,
company,
job_url,
description,
rating,
keywords_present,
title_keywords_present,
)
def get_scrape(self):
"""Primary method for obtaining scraped jobs."""
# Reset output and progress
self.loading = True
# Output list and frame
output = []
for x in range(0, self.pages):
print(f"Processing page {x + 1}...")
if x == 0:
page_append = ""
else:
page_append = "&start=" + str(x * 10)
# get page
current_page = requests.get(self.base_url + page_append, timeout=5)
page_soup = BeautifulSoup(current_page.content, "html.parser")
for job in page_soup.select(".jobsearch-SerpJobCard"):
(
title,
company,
url,
description,
rating,
keywords_present,
title_keywords_present,
) = self.get_job_details(job)
output.append(
[
rating,
title,
company,
description,
url,
keywords_present,
title_keywords_present,
x + 1,
]
)
print(f"Page {x+1} completed", end="\r")
# Create dataframe from list of jobs
df_output_frame = (
pd.DataFrame(
output,
columns=[
"Rating",
"Job Title",
"Company",
"Description",
"Job URL",
"Keywords Present",
"Title Keywords",
"Page Found",
],
)
.sort_values(by="Rating", ascending=False)
.reset_index(drop=True)
)
# Sort df by rating
df_output_frame["Rating"] = df_output_frame["Rating"].round(decimals=3)
df_output_frame = df_output_frame.drop_duplicates(
subset=["Rating", "Job Title", "Company"]
)
self.loading = False
return df_output_frame
# For outputting to csv locally
def output_csv(self, df):
df.to_csv("indeed_job_scraper.csv", index=False)
# ====== Scraping parameters ====== #
parameters2 = {
"search_query": "data science",
"location": "Denver, CO",
"miles": 50,
"ordered_keywords": ["data", "science"],
"exclude_keywords": "",
"title_keywords": "",
"pages": 15,
}
# First, instantiate a scraper object with the parameters
scraper2 = Scrape(parameters2)
# Then, use the `.get_scrape()` method to use the instance of the scraper
df2 = scraper2.get_scrape()
print(df2.shape)
df2.head()
# Save to csv
df2.to_csv("19-12-03-indeed_jobs_pg_1-15.csv", index=False)
###Output
_____no_output_____
###Markdown
--- The Full Monty (Python), Part 2That gave me 118 records. I didn't want to accidentally overshoot the results and have to start back from page 1 after scraping all of that. So I'm going to set the next one to start scraping near where that one left off.
###Code
# ====== Class for scraping Indeed ====== #
# ====== Without multiprocessing ====== #
# The update to this version of the class is to start it at page 16
class Scrape:
def __init__(self, parameters):
self.output_frame = None
self.loading = False
# Retain parameter dict
self.parameters = parameters
# Create base Indeed URL for all further scraping
self.what = parameters["search_query"]
self.where = parameters["location"]
self.miles = parameters["miles"]
self.base_url = f"https://www.indeed.com/jobs?q={self.what}&l={self.where}"
# Set other attributes
self.keywords = parameters["ordered_keywords"]
self.title_keywords = parameters["title_keywords"]
self.exclude_keywords = parameters["exclude_keywords"]
self.pages = parameters["pages"]
self.total_keywords = len(self.keywords) + len(self.title_keywords)
def rate_job(self, j_title, j_soup):
"""Rate job based on input parameters."""
description = j_soup.find(id="jobDescriptionText").get_text()
# Reset the params
keywords_present = []
title_keywords_present = []
rating = 0
# Check for keyword, add value to rating depending on ranking
for index, keyword in enumerate(self.keywords):
if keyword in description:
rating += len(self.keywords) - index
keywords_present.append(keyword)
# Check for title keywords
for index, keyword in enumerate(self.title_keywords):
if keyword in j_title:
rating += self.total_keywords - index
title_keywords_present.append(keyword)
# Normalise rating
rating = rating / sum(range(1, self.total_keywords + 1))
# Check for excluded keywords
for keyword in self.exclude_keywords:
if keyword in j_title:
rating = 0
break
return description, rating, keywords_present, title_keywords_present
def get_job_details(self, job):
"""Obtain details of the job (company, title, description etc.)"""
# Get link and title
job_url = job.find(class_="title").a["href"]
job_url = ( # Correct for truncated URLs
"https://www.indeed.com" + job_url if (job_url.startswith("/")) else job_url
)
job_page = requests.get(job_url)
job_soup = BeautifulSoup(job_page.content, "html.parser")
# Give URL after redirect (ads/analytics etc.)
job_url = job_page.url
# Get job title and company name
try:
title = job.find(class_="title").a["title"]
except:
title = "No title found"
try:
company = job_soup.find(class_="icl-u-lg-mr--sm").get_text()
except:
company = "No company found."
# Get description, rating and present keywords
description, rating, keywords_present, title_keywords_present = self.rate_job(
title, job_soup
)
return (
title,
company,
job_url,
description,
rating,
keywords_present,
title_keywords_present,
)
def get_scrape(self):
"""Primary method for obtaining scraped jobs."""
# Reset output and progress
self.loading = True
# Output list and frame
output = []
# This time, start at page 15
for x in range(15, self.pages + 15):
print(f"Processing page {x + 1}...")
if x == 0:
page_append = ""
else:
page_append = "&start=" + str(x * 10)
# get page
current_page = requests.get(self.base_url + page_append, timeout=5)
page_soup = BeautifulSoup(current_page.content, "html.parser")
for job in page_soup.select(".jobsearch-SerpJobCard"):
(
title,
company,
url,
description,
rating,
keywords_present,
title_keywords_present,
) = self.get_job_details(job)
output.append(
[
rating,
title,
company,
description,
url,
keywords_present,
title_keywords_present,
x + 1,
]
)
print(f"Page {x+1} completed", end="\r")
# Create dataframe from list of jobs
df_output_frame = (
pd.DataFrame(
output,
columns=[
"Rating",
"Job Title",
"Company",
"Description",
"Job URL",
"Keywords Present",
"Title Keywords",
"Page Found",
],
)
.sort_values(by="Rating", ascending=False)
.reset_index(drop=True)
)
# Sort df by rating
df_output_frame["Rating"] = df_output_frame["Rating"].round(decimals=3)
df_output_frame = df_output_frame.drop_duplicates(
subset=["Rating", "Job Title", "Company"]
)
self.loading = False
return df_output_frame
# For outputting to csv locally
def output_csv(self, df):
df.to_csv("indeed_job_scraper.csv", index=False)
# ====== Scraping parameters ====== #
parameters3 = {
"search_query": "data science",
"location": "Denver, CO",
"miles": 50,
"ordered_keywords": ["data", "science"],
"exclude_keywords": "",
"title_keywords": "",
"pages": 10,
}
# First, instantiate a scraper object with the parameters
scraper3 = Scrape(parameters3)
# Then, use the `.get_scrape()` method to use the instance of the scraper
df3 = scraper3.get_scrape()
print(df3.shape)
df3.head()
# For comparison
df2.head()
# Save to csv
df3.to_csv("19-12-03-indeed_jobs_pg_16-25.csv", index=False)
###Output
_____no_output_____
###Markdown
--- ConcatenationIt looks like there were some repeats. We shall see how many once I concatenate the dataframes and remove the duplicates.
###Code
# Might as well concatenate all three, in case they all have some uniques
# Put dfs into list and run concat on the list
frames = [df1, df2, df3]
df4 = pd.concat(frames)
# Look at the results
print(df4.shape)
df4.head()
# Clean it up using pyjanitor
import janitor
df5 = (df4
.clean_names() # Fixes capitalization, whitespace, extra characters
.remove_columns(column_names=["rating", "title_keywords"]) # Remove useless columns
# I'll leave "keywords_present in for now"
)
df5.head()
# Now let's find out how many duplicates there are
# If they are duplicates, the Description should be exactly the same
df6 = df5.drop_duplicates(subset=["description"])
df6.shape
# Looks like there were 45 duplicates
df5.shape[0] - df6.shape[0]
# Save it again
df6.to_csv("19-12-03-indeed_jobs_pg_1-25.csv", index=False)
###Output
_____no_output_____
###Markdown
--- "Seen by Indeed"I noticed there are some where the company name didn't get scraped correctly, and the record says "Seen by Indeed".Before I do anything about those, I just want to see how many of these records exist.
###Code
df6.select_dtypes(exclude="number").describe().T.sort_values(by="unique")
df6["company"].value_counts()
# Actually only 1 of them!
df6["company"].value_counts()["Seen by Indeed"]
###Output
_____no_output_____
###Markdown
> That is actually much better than I thought.There are only 9 companies for which the name wasn't scraped correctly, and it looks like 1 that is "Seen by Indeed".The incorrectly scraped names can be dropped, then the dataset will be ready to go for NLP!
###Code
df7 = df6[df6["company"] != "Seen by Indeed"]
df7.shape
df7.dtypes
# Strip whitespace from all of the columns data
for col in df7.columns:
if df7[col].dtype == "object":
df7[col] = df7[col].str.strip()
pd.options.display.max_colwidth = 200
df6[df6["company"] == "No company found."]
df7.iloc[28]["company"]
# Replace the missing values by hand
df7.at[28, "company"] = "GutCheck"
df7.iloc[28]["company"]
def replace_value(df, index: int, column: str, value: str):
"""Replaces the value at index, column with value."""
print(df.iloc[index][column]) # Before
df.at[index, column] = value
print(df.iloc[index][column]) # After
# The index numbers are off, so resetting them
df8 = df7.reset_index()
df8.head()
# Look again for the correct indices
df8[df8["company"] == "No company found."]
# Datalere
df8.iloc[85]["company"]
replacements = {
54: "Datalere",
64: "National Renewable Energy Laboratory",
68: "Vail Resorts",
77: "Horizontal",
85: "Horizontal",
135: "Fanatics Inc",
139: "Oracle",
155: "Logical Systems Incorporated",
}
for co in replacements:
replace_value(df8, co, "company", replacements[co])
# Confirm it worked as expected
df8[df8["company"] == "No company found."]
pd.options.display.max_rows = 200
df8["company"].value_counts()
df8.head()
df8 = df8.drop(columns=["index"])
df8.head()
df8.shape
###Output
_____no_output_____
###Markdown
--- The Final ExportNow the dataset is all ready to go. Last export for this notebook.The final shape is `(176, 6)`.Not too shabby!
###Code
# Final csv before NLP
df8.to_csv("19-12-03-indeed_jobs.csv", index=False)
###Output
_____no_output_____ |
archives/ru/Policy_assembly.ipynb | ###Markdown
notebook was appearing in different folder. below code should correct issues with files not being found.
###Code
# import os
# os.chdir("../../solar-697/ru")
# cwd = os.getcwd() # Get the current working directory (cwd)
# files = os.listdir(cwd) # Get all the files in that directory
# print("Files in %r: %s" % (cwd, files))
program_map_df['start_month'] = program_map_df['start_month'].dt.strftime('%Y-%m-%d')
conn_proj = sqlite3.connect(cfg["db_dir"] + cfg["db_dsire"])
cursor_proj = conn_proj.cursor()
program_map_df.to_sql("programs_map_2", conn_proj, if_exists="replace", index=False)
conn_proj.commit
program_map_df.head()
###Output
_____no_output_____ |
notebooks/algorithms/data_science/01_Category_Recommender-Copy1.ipynb | ###Markdown
Introduction **Category Recommender example:** ___We want to answer the critcal question of:**Am I making the best recommendations for the customer?**In this example we will show how to:- Setup the required environment for accessing the ecosystem prediction server.- Upload data to ecosystem prediction server.- Load data into feature store and parse to frame.- Enrich feature store.- Create category recommender model.- View model performance.- Write all predictions back to a dataset. Setup **Setting up import path:** ___Add path of ecosystem notebook wrappers. It needs to point to the ecosystem notebook wrapper to allow access to the packages required for running the prediction server via python.- **notebook_path:** Path to notebook repository.
###Code
notebook_path = "/path of to ecosystem notebook repository"
# ---- Uneditible ----
import sys
sys.path.append(notebook_path)
# ---- Uneditible ----
###Output
_____no_output_____
###Markdown
**Import required packages:** ___Import and load all packages required for the following usecase.
###Code
# ---- Uneditible ----
import pymongo
from bson.son import SON
import pprint
import pandas as pd
import json
import numpy
import operator
import datetime
import time
import os
import matplotlib.pyplot as plt
from prediction import jwt_access
from prediction import notebook_functions
from prediction.apis import functions
from prediction.apis import data_munging_engine
from prediction.apis import data_management_engine
from prediction.apis import worker_h2o
from prediction.apis import prediction_engine
from prediction.apis import worker_file_service
%matplotlib inline
# ---- Uneditible ----
###Output
_____no_output_____
###Markdown
**Setup prediction server access:** ___Create access token for prediction server.- **url:** Url for the prediction server to access.- **username:** Username for prediction server.- **password:** Password for prediction server.
###Code
url = "http://demo.ecosystem.ai:3001/api"
username = "[email protected]"
password = "cd486be3-9955-4364-8ccc-a9ab3ffbc168"
# ---- Uneditible ----
auth = jwt_access.Authenticate(url, username, password)
# ---- Uneditible ----
###Output
Login Successful.
###Markdown
Upload Data **List uploaded files:** ___List all files already uploaded.
###Code
# ---- Uneditible ----
files = worker_file_service.get_files(auth, path="./", user=username)
files = files["item"]
for file in files:
file_name = file["name"]
fn_parts = file_name.split(".")
if len(fn_parts) > 1 and fn_parts[-1] != "log":
print(file_name)
# ---- Uneditible ----
###Output
get /getFiles?path=./&[email protected]&
1718Pcard.csv
DR_Demo_LendingClub_Guardrails.csv
PCard Transactions 15-16.csv
amcd_test_data2.csv
amcs_test_data.csv
bank-full-1.csv
bank_customer.csv
bank_transactions-TIME-SERIES-7-DAY.csv
bank_transactions-TIMESERIES-590.csv
bank_transactions.csv
bank_transactions_SAMPLE.csv
bank_transactions_fs.csv
budget_messaging_featurestore.csv
combined-prediction-bank_full_1.hex
ecosystem.Ai Modules - 2021-04-07
fnbPrismFeatureStore20201.csv
graphing_states.csv
loan.csv
models.json
module_Corpora.jpeg
module_Corpus.001.png
module_churnIntervention.jpeg
module_nextPurchase.jpeg
module_offerRecommender.jpeg
occupation_variables_and_shock.csv
output.csv
patient_network_small.csv
prism_data.csv
profilesMaster.zip
properties.csv
query-impala-376379-non-null-0331.csv
query-impala-376379-non-null-0331_duration_format.csv
query-impala-376379-non-null-0331_sampler_actions_1-2.csv
query-impala-376379-non-null-0331_sampler_actions_1-2_duration.csv
query-impala-376379-non-null-0331_sampler_actions_3-4.csv
query-impala-376379-non-null-0331_sampler_actions_3-4_duration.csv
query-impala-376379-non-null-0331_sampler_actions_5-8.csv
query-impala-376379-non-null-0331_sampler_actions_5-8_duration.csv
query-impala-376379-non-null-0331_sampler_actions_9-20.csv
query-impala-376379-non-null-0331_sampler_actions_9-20_duration.csv
query-impala-376379-non-null-0331_sampler_duration_140-499.csv
query-impala-376379-non-null-0331_sampler_duration_15-69.csv
query-impala-376379-non-null-0331_sampler_duration_500-40000.csv
query-impala-376379-non-null-0331_sampler_duration_70-139.csv
query-impala-376379-non-null-0331_sampler_successful.csv
query-impala-394794-nlp_tobi_dataset_0521_hashed.csv
query-impala-398667-nlp_cc.csv
sh_behavioural_campaign_full_hashed_v2_ussd.csv
transactions.csv
ussd_durations4.csv
ussd_reduced_revisited_sample copy.csv
ussd_reduced_revisited_sample2.csv
wellnessFeatureStore.csv
wellnessInterventionThompson.json
wellnessScoreMethods.json
word-meaning-examples.csv
word_meaning_tokens.csv
###Markdown
**List uploadable files:** ___List all files in path ready for upload to prediction server.
###Code
# ---- Uneditible ----
path = "../example_data/"
upload_files = [f for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))]
print(upload_files)
# ---- Uneditible ----
###Output
['output.csv']
###Markdown
**Upload file:** ___Select file to upload to prediction server.- **file_name:** file name of file to upload to prediction server. See list of available files for upload.
###Code
file_name = "output.csv"
# ---- Uneditible ----
worker_file_service.upload_file(auth, path + file_name, "/data/")
# ---- Uneditible ----
###Output
post /upload
###Markdown
**List uploaded files:** ___List all files in path ready for upload to prediction server to compare with previous list to confirm that file was uploaded correctly.
###Code
# ---- Uneditible ----
files = worker_file_service.get_files(auth, path="./", user=username)
files = files["item"]
for file in files:
file_name = file["name"]
fn_parts = file_name.split(".")
if len(fn_parts) > 1 and fn_parts[-1] != "log":
print(file_name)
# ---- Uneditible ----
###Output
get /getFiles?path=./&[email protected]&
1718Pcard.csv
DR_Demo_LendingClub_Guardrails.csv
PCard Transactions 15-16.csv
amcd_test_data2.csv
amcs_test_data.csv
bank-full-1.csv
bank_customer.csv
bank_transactions-TIME-SERIES-7-DAY.csv
bank_transactions-TIMESERIES-590.csv
bank_transactions.csv
bank_transactions_SAMPLE.csv
bank_transactions_fs.csv
budget_messaging_featurestore.csv
combined-prediction-bank_full_1.hex
ecosystem.Ai Modules - 2021-04-07
enriched_bank_transactions_10000.csv
fnbPrismFeatureStore20201.csv
graphing_states.csv
loan.csv
models.json
module_Corpora.jpeg
module_Corpus.001.png
module_churnIntervention.jpeg
module_nextPurchase.jpeg
module_offerRecommender.jpeg
multi_personality_tiny.csv
mutli_personality.csv
occupation_variables_and_shock.csv
output.csv
patient_network_small.csv
prism_data.csv
profilesMaster.zip
properties.csv
query-impala-376379-non-null-0331.csv
query-impala-376379-non-null-0331_duration_format.csv
query-impala-376379-non-null-0331_sampler_actions_1-2.csv
query-impala-376379-non-null-0331_sampler_actions_1-2_duration.csv
query-impala-376379-non-null-0331_sampler_actions_3-4.csv
query-impala-376379-non-null-0331_sampler_actions_3-4_duration.csv
query-impala-376379-non-null-0331_sampler_actions_5-8.csv
query-impala-376379-non-null-0331_sampler_actions_5-8_duration.csv
query-impala-376379-non-null-0331_sampler_actions_9-20.csv
query-impala-376379-non-null-0331_sampler_actions_9-20_duration.csv
query-impala-376379-non-null-0331_sampler_duration_140-499.csv
query-impala-376379-non-null-0331_sampler_duration_15-69.csv
query-impala-376379-non-null-0331_sampler_duration_500-40000.csv
query-impala-376379-non-null-0331_sampler_duration_70-139.csv
query-impala-376379-non-null-0331_sampler_successful.csv
query-impala-394794-nlp_tobi_dataset_0521_hashed.csv
query-impala-398667-nlp_cc.csv
sh_behavioural_campaign_full_hashed_v2_ussd.csv
transactions.csv
ussd_durations4.csv
ussd_reduced_revisited_sample copy.csv
ussd_reduced_revisited_sample2.csv
wellnessFeatureStore.csv
wellnessInterventionThompson.json
wellnessScoreMethods.json
word-meaning-examples.csv
word_meaning_tokens.csv
###Markdown
Enrich Data
###Code
database = "master"
collection = "bank_transactions"
attribute = "customer"
search = "{}"
mongodbf = "master"
collectionf = "bank_customer"
attributef = "customer"
fields = "education=education,gender=gender,language=language,numberOfProducts=numberOfProducts,changeIndicatorSix=changeIndicatorSix,changeIndicatorThree=changeIndicatorThree,numberOfChildren=numberOfChildren,numberOfAddresses=numberOfAddresses,segment_enum=segment_enum,region=region,maritalStatus=maritalStatus,age=age,proprtyOwnership=proprtyOwnership"
data_munging_engine.foreign_key_lookup(auth, database, collection, attribute, search, mongodbf, collectionf, attributef, fields)
filename = "enriched_bank_transactions_10000.csv"
filetype = "csv"
database = "master"
collection = "bank_transactions"
field = "{}"
sort = "{}"
projection = "{}"
limit = 10000
data_management_engine.export_documents(auth, filename, filetype, database, collection, field, sort, projection, limit)
###Output
get /exportMongoDocuments?file_name=enriched_bank_transactions_10000.csv&file_type=csv&database=master&collection=bank_transactions&field={}&sort={}&projection={}&limit=10000&
###Markdown
File to Featurestore **Load file into feature store:** ___Load selected file into a feature store and parse the data into a frame.- **file_name:** file name of uploaded file to load into a feature store.- **featurestore_name:** name of feature store to load data into.
###Code
file_name = "enriched_bank_transactions_10000.csv"
featurestore_name = "category_recommender_bank_transactions"
# ---- Uneditible ----
hexframename, imp = functions.save_file_as_userframe(auth, file_name, featurestore_name, username)
df = pd.DataFrame(
{
"columns": imp["columnNames"],
"column_types": imp["columnTypes"]
}
)
df
# ---- Uneditible ----
###Output
get /processFileToFrameImport?file_name=enriched_bank_transactions_10000.csv&first_row_column_names=1&separator=,&
delete /deleteFrame?frame=enriched_bank_transactions_10000.hex&
post /saveUserFrame
post /processToFrameParse
###Markdown
Prepare Featurestore
###Code
# ---- Uneditible ----
frames = worker_h2o.prediction_frames(auth)
for frame in frames["frames"]:
print(frame["frame_id"]["name"])
# ---- Uneditible ----
split_ratio = 0.20
# ---- Uneditible ----
worker_h2o.split_frame(auth, hexframename, split_ratio)
# ---- Uneditible ----
# ---- Uneditible ----
frames = worker_h2o.prediction_frames(auth)
for frame in frames["frames"]:
print(frame["frame_id"]["name"])
# ---- Uneditible ----
###Output
get /predictionFrames
bank_customer.hex
bank_transactions.hex
enriched_bank_transactions_10000.hex
enriched_bank_transactions_100000_2.hex
enriched_bank_transactions_100000_8.hex
###Markdown
Build Model **Train Model:**___Set training parameters for model and train.- **predict_id:** Id for the prediction (for logging). - **description:** Description of model (for logging).- **model_id:** Id for the model (for logging).- **model_type:** Type of model to build (for logging). - **frame_name:** Name of frame used (for logging).- **frame_name_desc:** Description of frame used (for logging).- **model_purpose:** Purpose of model (for logging).- **version:** Model version (for logging).The following parameters are dependend on what is selected in the algo parameter.- **algo:** Algorithm to use to train model. (Availble algorithms: "H20-AUTOML")- **training_frame:** Data frame to use for training the model.- **validation_frame:** Data frame to use for validating the model.- **max_models:** Maximum number of models to build.- **stopping_tolerance:** (TODO)- **max_runtime_secs:** Maximum number of seconds to spend on training.- **stopping_rounds:** (TODO)- **stopping_metric:** (TODO)- **nfolds:** (TODO)- **response_column:** The column or field in the dataset to predict.- **ignored_columns:** List of columns to exclude in the model training.- **hidden:** (TODO)- **exclude_algos:** Algorithms to exclude in the automl run.
###Code
version = "0001"
model_id = featurestore_name + version
model_purpose = "Prediction of customer spend category for recommendation."
description = "Automated features store generated for " + featurestore_name
model_params = {
"predict_id": featurestore_name,
"description": description,
"model_id": model_id,
"model_type": "AUTOML",
"frame_name": hexframename,
"frame_name_desc": description,
"model_purpose": model_purpose,
"version": version,
"model_parms": {
"algo": "H2O-AUTOML",
"training_frame": "enriched_bank_transactions_100000_8.hex",
"validation_frame": "enriched_bank_transactions_100000_2.hex",
"max_models": 10,
"stopping_tolerance": 0.005,
"note_stop": "stopping_tolerance of 0.001 for 1m rows and 0.004 for 100k rows",
"max_runtime_secs": 3600,
"stopping_rounds": 10,
"sort_metric": "logloss",
"stopping_metric": "AUTO",
"nfolds": 0,
"note_folds": "nfolds=0 will disable the stacked ensemble creation process",
"response_column": "mcc_base_category",
"ignored_columns": [
],
"hidden": [
"1"
],
"exclude_algos": [
"StackedEnsemble",
]
}
}
# ---- Uneditible ----
worker_h2o.train_model(auth, model_id, "automl", json.dumps(model_params["model_parms"]))
# ---- Uneditible ----
###Output
get /buildModel?model_id=category_recommender_bank_transactions1010&model_type=automl&model_parms={"algo": "H2O-AUTOML", "training_frame": "enriched_bank_transactions_100000_8.hex", "validation_frame": "enriched_bank_transactions_100000_2.hex", "max_models": 10, "stopping_tolerance": 0.005, "note_stop": "stopping_tolerance of 0.001 for 1m rows and 0.004 for 100k rows", "max_runtime_secs": 3600, "stopping_rounds": 10, "sort_metric": "logloss", "stopping_metric": "AUTO", "nfolds": 0, "note_folds": "nfolds=0 will disable the stacked ensemble creation process", "response_column": "mcc_base_category", "ignored_columns": [], "hidden": ["1"], "exclude_algos": ["StackedEnsemble"]}&
###Markdown
**Compare Models:**___View autoML model to see which generated models are performing the best.
###Code
# ---- Uneditible ----
model_data = worker_h2o.get_train_model(auth, model_id, "AUTOML")
notebook_functions.RenderJSON(model_data)
# ---- Uneditible ----
# ---- Uneditible ----
sort_metric = model_data["leaderboard"]["sort_metric"]
model_names = []
for model in model_data["leaderboard"]["models"]:
model_names.append(model["name"])
model_metrics = model_data["leaderboard"]["sort_metrics"]
df = pd.DataFrame(
{
"model_names": model_names,
"model_metrics": model_metrics
}
)
df.sort_values("model_metrics", inplace=True, ascending=False)
ax = df.plot(y="model_metrics", x="model_names", kind="bar", align="center", alpha=0.5, legend=None)
plt.xticks(rotation=90)
ax.set_title("Performance of Models. Sorted Using Metric: {}".format(sort_metric))
ax.yaxis.grid(True)
# ---- Uneditible ----
###Output
_____no_output_____
###Markdown
**Save Model:**___Save model for prediction.- **best_model_id:** Select model to save out. (By default selects the best model as determined by the selected performance metric.)
###Code
best_model_id = df.iloc[0]["model_names"]
# ---- Uneditible ----
h2o_name = best_model_id
zip_name = h2o_name + ".zip"
worker_h2o.download_model_mojo(auth, h2o_name)
high_level_mojo = worker_h2o.get_train_model(auth, h2o_name, "single")
model_to_save = high_level_mojo["models"][0]
model_to_save["model_identity"] = h2o_name
model_to_save["userid"] = "user"
model_to_save["timestamp"] = "time_stamp"
mts = prediction_engine.save_model(auth, model_to_save)
# ---- Uneditible ----
###Output
get /downloadModelMojo?model_id=GLM_1_AutoML_20210729_103918&
get /getModel?model_id=GLM_1_AutoML_20210729_103918&
post /saveModel
###Markdown
View Model **Variable Importance:**___View variable important of saved model.
###Code
# ---- Uneditible ----
prediction_engine.get_user_model(auth, h2o_name)
stats = worker_h2o.get_model_stats(auth, h2o_name, "ecosystem", "variable_importances")
var_names = []
for column in stats["columns"]:
var_names.append(column["name"])
notebook_functions.RenderJSON(stats)
df = pd.DataFrame(
{
var_names[0]: stats["data"][0],
var_names[1]: stats["data"][1],
var_names[2]: stats["data"][2],
var_names[3]: stats["data"][3]
}
)
df
# ---- Uneditible ----
###Output
get /getUserModel?model_identity=GLM_1_AutoML_20210729_103918&
get /getModelStats?model_id=GLM_1_AutoML_20210729_103918&source=ecosystem&stats_type=variable_importances&
###Markdown
Run Prediction
###Code
attributes = [
"mcc_base_category"="mcc_base_category",
"account_type"="account_type",
"eff_date_day_of_year"="eff_date_day_of_year",
"education"="education",
"changeIndicatorThree"="changeIndicatorThree",
"trns_amt"="trns_amt",
"changeIndicatorSix"="changeIndicatorSix",
"language"="language",
"effYearMonth"="effYearMonth",
"mcc_category"="mcc_category",
"eff_date_week_and_day"="eff_date_week_and_day",
"eff_date_day"="eff_date_day",
"mcc_spend_type"="mcc_spend_type",
"mcc_category_tree"="mcc_category_tree",
"eff_date_public_holiday"="eff_date_public_holiday",
"eff_date_day_weekend"="eff_date_day_weekend",
"proprtyOwnership"="proprtyOwnership",
"intl_ind"="intl_ind",
"numberOfAddresses"="numberOfAddresses",
"numberOfChildren"="numberOfChildren",
"mcc_description"="mcc_description",
"eff_date_date"="eff_date_date",
"eff_date_week_of_year"="eff_date_week_of_year",
"region"="region",
"mcc_sub_category"="mcc_sub_category",
"maritalStatus"="maritalStatus",
"eff_date"="eff_date",
"trait_description"="trait_description",
"gender"="gender",
"trns_amt_category"="trns_amt_category",
"eff_date_day_of_week"="eff_date_day_of_week",
"trns_amt_base_category"="trns_amt_base_category",
"eff_date_year_month"="eff_date_year_month",
"trns_type"="trns_type",
"eff_date_year"="eff_date_year",
"eff_date_day_of_week_no"="eff_date_day_of_week_no",
"segment_enum"="segment_enum",
"trns_amt_spend_type"="trns_amt_spend_type",
"trns_amt_category_tree"="trns_amt_category_tree",
"trns_amt_sub_category"="trns_amt_sub_category",
"personality_description"="personality_description",
"numberOfProducts"="numberOfProducts",
"MCC"="MCC",
"eff_date_month"="eff_date_month",
"trns_amt_description"="trns_amt_description",
"eff_date_week_of_month"="eff_date_week_of_month",
"effReformatted"="effReformatted",
"age"="age",
"customer"="customer"
]
database = "master"
collection = "bank_transactions"
search = "{}"
sort = "{}"
predictor = zip_name
predictor_label = "prediction"
# ---- Uneditible ----
data_munging_engine.predicition_enrich(auth, database, collection, search, sort, predictor, predictor_label, attributes)
# ---- Uneditible ----
###Output
_____no_output_____ |
trainBottleNeckUNet.ipynb | ###Markdown
Validation
###Code
model.load_weights('./weights/ContextBottleNeckUnet/after_epoch2.hdf5')
h = evaluate(model,0,batch_size=2)
alps = np.array(h)
mean = np.mean(alps,axis=0)
print(mean[3])
###Output
_____no_output_____ |
stimuli/utils/upload_stims_to_s3.ipynb | ###Markdown
Upload tower stimuli to s3
###Code
#Which experiment? bucket_name is the name of the experiment and will be name of the databases both on mongoDB and S3
bucket_name = 'gestalt-static-images' #containment
stim_version = 'Image'
import os
from glob import glob
import boto3
import botocore
from IPython.display import clear_output
import json
import pandas as pd
from PIL import Image
def list_files(paths, ext='png'):
"""Pass list of folders if there are stimuli in multiple folders.
Make sure that the containing folder is informative, as the rest of the path is ignored in naming.
Also returns filenames as uploaded to S3"""
if type(paths) is not list:
paths = [paths]
results = []
names = []
for path in paths:
results += [y for x in os.walk(path) for y in glob(os.path.join(x[0], '*.%s' % ext))]
names += [os.path.split(y)[1] for x in os.walk(path) for y in glob(os.path.join(x[0], '*.%s' % ext))]
return results,names
## helper to speed things up by not uploading images if they already exist, can be overriden
def check_exists(s3, bucket_name, stim_name):
try:
s3.Object(bucket_name,stim_name).load()
return True
except botocore.exceptions.ClientError as e:
if (e.response['Error']['Code'] == "404"):
print('The object does not exist.')
return False
else:
print('Something else has gone wrong with {}'.format(stim_name))
###Output
_____no_output_____
###Markdown
Pass list of folders if there are stimuli in multiple folders. Make sure that the containing folder is informative, as the rest of the path is ignored in naming.
###Code
## set up paths, etc.
# paths_to_stim = ['./example'] ## provide a list of full paths here.
paths_to_stim = [
# tower silhouettes
"/Users/yoni/Projects/CommonFate/scenes/voronoi/superquadric_4/scene_004/images/"
]
# paths_to_stim = glob.glob()
full_stim_paths, filenames = [x for x in list_files(paths_to_stim) if x !='.DS_Store'] #generate filenames and stimpaths
full_stim_paths.sort()
filenames.sort()
print('We have {} stimuli to upload.'.format(len(full_stim_paths)))
###Output
We have 64 stimuli to upload.
###Markdown
Upload to S3. This expects the `.aws/credentials` file in your home directory.
###Code
reallyRun = True
if reallyRun:
## establish connection to s3
s3 = boto3.resource('s3')
## create a bucket with the appropriate bucket name
try:
b = s3.create_bucket(Bucket=bucket_name)
print('Created new bucket.')
# except NoCredentialsError:
# print("Credential missing") #.aws/credentials should be in home folder, not in repo folder
except:
b = s3.Bucket(bucket_name)
print('Bucket already exists.')
## do we want to overwrite files on s3?
overwrite = True
## set bucket and objects to public
b.Acl().put(ACL='public-read') ## sets bucket to public
## now let's loop through stim paths and actually upload to s3 (woot!)
for i,path_to_file in enumerate(full_stim_paths): # use sorted(full_stim_paths) when not using photodraw32
stim_name = filenames[i]
if ((check_exists(s3, bucket_name, stim_name)==False) | (overwrite==True)):
print('Now uploading {} as {} | {} of {}'.format(os.path.split(path_to_file)[-1],stim_name,(i+1),len(full_stim_paths)))
s3.Object(bucket_name,stim_name).put(Body=open(path_to_file,'rb')) ## upload stimuli
s3.Object(bucket_name,stim_name).Acl().put(ACL='public-read') ## set access controls
else:
print('Skipping {} | {} of {} because it already exists.'.format(os.path.split(path_to_file)[-1],(i+1),len(full_stim_paths)))
clear_output(wait=True)
print('Done!')
for my_bucket_object in b.objects.all():
print(my_bucket_object)
### add burn-in trials
### add catch trials
###Output
_____no_output_____ |
python/.ipynb | ###Markdown
**闭包**
###Code
def gen_func1(n):
for i in range(n):
callback = lambda : i
yield callback
def gen_func2(n):
for i in range(n):
def genf(i):
return lambda: i
callback = genf(i)
yield callback
def test(func):
l = []
print(func)
for f in func(2):
print("first loop:",f())
l.append(f)
for f in l:
print("second loop:",f())
test(gen_func1)
test(gen_func2)
print([i() for i in gen_func1(5)])
print([i() for i in gen_func2(5)])
[i for i in gen_func1(2)]
[i for i in gen_func2(2)]
###Output
_____no_output_____ |
Evaluate_Predict_Showcase.ipynb | ###Markdown
Evaluation
###Code
# %matplotlib inline
import tensorflow as tf
import cv2
import itertools, os, time
import numpy as np
from Model import get_Model
from parameter import letters
import argparse
from keras import backend as K
K.set_learning_phase(0)
import matplotlib.pyplot as plt
from random import sample
import keras
prov_dict = {'AA' : 'อำนาจเจริญ', 'AB' : 'อ่างทอง', 'AC' : 'กรุงเทพมหานคร', 'AD' :
'บึงกาฬ', 'AE' : 'บุรีรัมย์', 'AF' : 'ฉะเชิงเทรา', 'AG' : 'ชัยนาท', 'AH' : 'ชัยภูมิ',
'AI' : 'จันทบุรี', 'AJ' : 'เชียงใหม่', 'AK' : 'เชียงราย',
'AL' : 'ชลบุรี', 'AM' : 'ชุมพร', 'AN' : 'กาฬสินธุ์', 'AO' : 'กำแพงเพชร',
'AP' : 'กาญจนบุรี', 'AQ' : 'ขอนแก่น', 'AR' : 'กระบี่', 'AS' : 'ลำปาง',
'AT' : 'ลำพูน', 'AU' : 'เลย', 'AV' : 'ลพบุรี', 'AW' : 'แม่ฮ่องสอน',
'AX' : 'มหาสารคาม', 'AY' : 'มุกดาหาร', 'BA' : 'นครนายก', 'BB' : 'นครปฐม',
'BC' : 'นครพนม', 'BD' : 'นครราชสีมา', 'BE' : 'นครสวรรค์', 'BF' : 'นครศรีธรรมราช',
'BG' : 'น่าน', 'BH' : 'นราธิวาส', 'BI' : 'หนองบัวลำภู', 'BJ' : 'หนองคาย',
'BK' : 'นนทบุรี', 'BL' : 'ปทุมธานี', 'BM' : 'ปัตตานี', 'BN' : 'พังงา',
'BO' : 'พัทลุง', 'BP' : 'พะเยา', 'BQ' : 'เพชรบูรณ์', 'BR' : 'เพชรบุรี',
'BS' : 'พิจิตร', 'BT' : 'พิษณุโลก', 'BU' : 'พระนครศรีอยุธยา', 'BV' : 'แพร่',
'BW' : 'ภูเก็ต', 'BX' : 'ปราจีนบุรี', 'BY' : 'ประจวบคีรีขันธ์',
'CA' : 'ระนอง', 'CB' : 'ราชบุรี', 'CC' : 'ระยอง', 'CD' : 'ร้อยเอ็ด', 'CE' : 'สระแก้ว',
'CF' : 'สกลนคร', 'CG' : 'สมุทรปราการ', 'CH' : 'สมุทรสาคร', 'CI' : 'สมุทรสงคราม',
'CJ' : 'สระบุรี', 'CK' : 'สตูล', 'CL' : 'ศรีสะเกษ', 'CM' : 'สิงห์บุรี',
'CN' : 'สงขลา', 'CO' : 'สุโขทัย', 'CP' : 'สุพรรณบุรี', 'CQ' : 'สุราษฎร์ธานี',
'CR' : 'สุรินทร์', 'CS' : 'ตาก', 'CT' : 'ตรัง', 'CU' : 'ตราด', 'CV' :
'อุบลราชธานี', 'CW' : 'อุดรธานี', 'CX' : 'อุทัยธานี', 'CY' : 'อุตรดิตถ์',
'DA' : 'ยะลา', 'DB' : 'ยโสธร'}
char_dict = {'aa' : 'ก', 'ab' : 'ข', 'ac' : 'ค', 'ad' : 'ฆ', 'ae' : 'ง', 'af' : 'จ',
'ag' : 'ฉ', 'ah' : 'ช', 'ai' : 'ซ', 'aj' : 'ฌ', 'ak' : 'ญ', 'al' : 'ฎ',
'am' : 'ฏ', 'an' : 'ฐ', 'ao' : 'ฑ', 'ap' : 'ฒ', 'aq' : 'ณ', 'ar' : 'ด',
'as' : 'ต', 'at' : 'ถ', 'au' : 'ท', 'av' : 'ธ', 'aw' : 'น', 'ax' : 'บ',
'ay' : 'ป', 'ba' : 'ผ', 'bb' : 'ฝ', 'bc' : 'พ', 'bd' : 'ฟ', 'be' : 'ภ',
'bf' : 'ม', 'bg' : 'ย', 'bh' : 'ร', 'bi' : 'ล', 'bj' : 'ว', 'bk' : 'ศ',
'bl' : 'ษ', 'bm' : 'ส', 'bn' : 'ห', 'bo' : 'ฬ', 'bp' : 'อ', 'bq' : 'ฮ'}
def decode_label(out):
# out : (1, 32, 42)
out_best = list(np.argmax(out[0, 2:], axis=1)) # get max index -> len = 32
out_best = [k for k, g in itertools.groupby(out_best)] # remove overlap value
outstr = ''
for i in out_best:
if i < len(letters):
outstr += letters[i]
return outstr
def label_to_char(label): # eng -> hangul
front_num = label[0]
first_char = label[1:3]
second_char = label[3:5]
four_num = label[5:9]
province = label[9:]
try:
first_char = char_dict[first_char]
except:
first_char = '?'
try:
second_char = char_dict[second_char]
except:
second_char = '?'
try:
province = prov_dict[province]
except:
province = '????'
return front_num + first_char + second_char + four_num + '-' + province
# return front_num + first_char + second_char + four_num + province
# Get CRNN model
model = get_Model(training=False)
try:
model.load_weights('LSTM+BN5--thai-v3.hdf5')
print("...Previous weight data...")
except:
raise Exception("No weight file!")
test_dir = './DB/test/'
test_imgs = os.listdir(test_dir)
total = 0
acc = 0
letterprov_total = 0
letterprov_acc = 0
letter_acc = 0
letter_total = 0
province_acc = 0
start = time.time()
for test_img in test_imgs:
img = cv2.imread(test_dir + test_img, cv2.IMREAD_GRAYSCALE)
img_pred = img.astype(np.float32)
img_pred = cv2.resize(img_pred, (128, 64))
img_pred = (img_pred / 255.0) * 2.0 - 1.0
img_pred = img_pred.T
img_pred = np.expand_dims(img_pred, axis=-1)
img_pred = np.expand_dims(img_pred, axis=0)
net_out_value = model.predict(img_pred)
pred_texts = decode_label(net_out_value)
pred_texts_decoded = label_to_char(pred_texts)
actual_texts_decoded = label_to_char(test_img[0:-4])
# evaluation
# Letter+Province ACC
for i in range(7):
if pred_texts_decoded[i] == actual_texts_decoded[i]:
letterprov_acc += 1
letter_acc += 1
if pred_texts_decoded[8:] == actual_texts_decoded[8:]:
letterprov_acc += 1
province_acc += 1
letterprov_total += 8
letter_total += 7
# ACC
if pred_texts == test_img[0:-4]:
acc += 1
total += 1
# print(pred_texts)
# print(test_img[0:-4])
# print()
# print('Predicted: %s / True: %s' % (label_to_char(pred_texts), label_to_char(test_img[0:-4])))
print('Predicted:', pred_texts_decoded)
print('True :', actual_texts_decoded)
print('=========================================================')
# cv2.rectangle(img, (0,0), (150, 30), (0,0,0), -1)
# cv2.putText(img, pred_texts, (5, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255,255,255),2)
# cv2.imshow("q", img)
# if cv2.waitKey(0) == 27:
# break
# cv2.destroyAllWindows()
end = time.time()
total_time = (end - start)
print()
print("Time : ", total_time / total)
print('Num test images : ', total)
print('____________________________________')
print("ACC : ", acc / total)
print('____________________________________')
print('letter ACC : ', letter_acc / letter_total)
print('province ACC : ', province_acc / total)
print('____________________________________')
print("letter+province ACC : ", letterprov_acc / letterprov_total)
print("""Time : 0.03342
Num test images : 3300
____________________________________
ACC : 40.9%
____________________________________
letter ACC : 98.4%
province ACC : 41.1%
____________________________________
letter+province ACC : 91.3%""")
###Output
Time : 0.03342
Num test images : 3300
____________________________________
ACC : 40.9%
____________________________________
letter ACC : 98.4%
province ACC : 41.1%
____________________________________
letter+province ACC : 91.3%
###Markdown
Showcase
###Code
loaded_model = keras.models.load_model('Model_LSTM+BN5--thai-v3.h5', custom_objects=None, compile=False)
# Testing with Generated License Plate
test_dir = './DB/test/'
test_imgs = os.listdir(test_dir)
def display(img_, title=''):
img = img_
# fig = plt.figure(figsize=(10,6))
ax = plt.subplot(111)
ax.imshow(img, cmap='gray')
plt.axis('off')
plt.title(title)
plt.show()
for test_img in sample(test_imgs,10):
img = cv2.imread(test_dir + test_img, cv2.IMREAD_GRAYSCALE)
img_pred = img.astype(np.float32)
img_pred = cv2.resize(img_pred, (128, 64))
img_pred = (img_pred / 255.0) * 2.0 - 1.0
img_pred = img_pred.T
img_pred = np.expand_dims(img_pred, axis=-1)
img_pred = np.expand_dims(img_pred, axis=0)
net_out_value = loaded_model.predict(img_pred)
pred_texts = decode_label(net_out_value)
pred_texts_decoded = label_to_char(pred_texts)
actual_texts_decoded = label_to_char(test_img[0:-4])
try:
# plate = cv2.imread(test_dir+test_img)
plate = img.astype(np.float32)
plate = cv2.resize(plate, (128, 64))
plate = (plate / 255.0) * 2.0 - 1.0
display(plate, 'image')
print('True :' , actual_texts_decoded)
print('Predict :', pred_texts_decoded)
print('_______________________________________')
# plt.axis('off')
except:
pass
def display(img_, title=''):
img = img_
# fig = plt.figure(figsize=(10,6))
ax = plt.subplot(111)
ax.imshow(img, cmap='gray')
plt.axis('off')
plt.title(title)
plt.show()
plate = cv2.imread(test_dir+test_img)
display(plate, 'image')
test_dir = './DB/real_image/'
test_imgs = os.listdir(test_dir)
def display(img_, title=''):
img = img_
# fig = plt.figure(figsize=(10,6))
ax = plt.subplot(111)
ax.imshow(img, cmap='gray')
plt.axis('off')
plt.title(title)
plt.show()
for test_img in test_imgs:
img = cv2.imread(test_dir + test_img, cv2.IMREAD_GRAYSCALE)
img_pred = img.astype(np.float32)
img_pred = cv2.resize(img_pred, (128, 64))
img_pred = (img_pred / 255.0) * 2.0 - 1.0
img_pred = img_pred.T
img_pred = np.expand_dims(img_pred, axis=-1)
img_pred = np.expand_dims(img_pred, axis=0)
net_out_value = loaded_model.predict(img_pred)
pred_texts = decode_label(net_out_value)
pred_texts_decoded = label_to_char(pred_texts)
actual_texts_decoded = label_to_char(test_img[0:-4])
try:
# plate = cv2.imread(test_dir+test_img)
plate = img.astype(np.float32)
plate = cv2.resize(plate, (128, 64))
plate = (plate / 255.0) * 2.0 - 1.0
display(plate, 'image')
print('True :' , actual_texts_decoded)
print('Predict :', pred_texts_decoded)
print('_______________________________________')
# plt.axis('off')
except:
pass
###Output
_____no_output_____
###Markdown
Appendix 1
###Code
CHAR_VECTOR = "abcdefghijklmnopqrstuvwxyABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
letters = [letter for letter in CHAR_VECTOR]
def test_decode(out1):
mystr = ''
for i in out1:
if i < len(letters):
mystr += letters[i]
else:
mystr += '_'
return mystr
net_out_value.shape
loaded_model.summary()
out1 = np.argmax(net_out_value[0, 2:], axis=1)
print(out1)
test_decode(list(out1))
out2 = [k for k, g in itertools.groupby(list(out1))]
print(out2)
test_decode(out2)
mystr = ''
for i in out2:
if i < len(letters):
mystr += letters[i]
print(mystr)
label_to_char(mystr)
###Output
_____no_output_____
###Markdown
Appendix 2
###Code
inv_char = {v: k for k, v in char_dict.items()}
inv_prov = {v: k for k, v in prov_dict.items()}
def convert(label):
front_num = label[0]
first_char = label[1:2]
second_char = label[2:3]
four_num = label[3:7]
province = label[7:]
try:
first_char = inv_char[first_char]
except:
pass
try:
second_char = inv_char[second_char]
except:
pass
try:
province = inv_prov[province]
except:
pass
return print(front_num + first_char + second_char + four_num + province)
temp_str = 'Zพฎ7125ประจวบคีรีขันธ์'
convert(temp_str)
###Output
Zbcal7125BY
|
script/Kapitel 2 - Grundlegende Datentypen.ipynb | ###Markdown
InhaltDas zweite Kapitel geht auf die von Python zur Verfügung gestellten Datentypen ein und deckt die folgenden Konzepte ab:- NoneType- Details zu Zeichenketten- Listen- Tupel- Mengen- Dictionaries DatentypenDatentypen sind die Grundlage zur Verarbeitung und Speicherung von Informationen in Computerprogrammen. In Python unterscheidet man zwischen skalaren Datentypen und nicht-skalaren Datentypen (Objekte). Skalare Datentypen speichern lediglich eine bestimmte Art von Information, Objekte haben eine interne Struktur und stellen häufig auch Methoden zum Arbeiten mit den Daten zur Verfügung. Eine großer Vorteil von Python ist, dass die Sprache von Haus aus einige sehr mächtige Datentypen mitbringt. Dadurch lassen sich viele Probleme bereits ohne die Nutzung von zusätzlichen Bibliotheksfunktionen (Kapitel 5) lösen. Der NoneTypeEin wichtiges, wenn auch nicht immer ganz intuitives Konzept, in der Programmierung sind Datentypen. In Java gibt es zum Beispiel ```null```. In Python gibt es hierfür den ```NoneType``` der als einzigen möglichen Wert ```None``` hat. ```None``` wird zum Beispiel als Rückgabewert von Funktionen und Methoden benutzt, wenn es keinen "echten" Rückgabewert gibt, zum Beispiel weil etwas nicht gefunden wurde, oder weil es einfach keinen logischen Rückgabewert gibt. Der Rückgabewert der ```print()``` Funktion ist zum Beispiel ```None```. Um zu Überprüfen ob etwas ```None``` ist, benutzt ist ```is None```, um sicher zu gehen das etwas nicht ```None``` ist, nutzt man ```is not None```.
###Code
print(print("Hello World!") is None)
print(print("Hello World!") is not None)
###Output
Hello World!
True
Hello World!
False
###Markdown
SequenzenIn Python spielen Sequnzen eine zentrale Rolle. Sequenzen sind, vereinfacht gesagt, Aneinanderreihungen von Objekten. Sequenzdatentypen sind alle Datentypen, die aus einer Aneinanderreihungen von Objekten bestehen. Dies können zum Beispiel Aneinanderreihungen von Zeichen sein um eine Zeichenkette zu bilden oder von beliebigen Objekten in einer Liste. Alle Sequenzen haben Gemeinsam, dass man über sie *iterieren* kann, dass heißt von einem Objekt zum nächsten gehen. Im folgenden Betrachten wir Zeichenketten als Sequenzen und führen mit Listen, Tupeln, und Mengen noch weitere Sequenzen als Datentypen ein. Details zu Zeichenketten Indexzugriffe und SlicingBisher haben wir Zeichenketten immer als ganzes betrachtet. Man kann jedoch auch auf die einzelnen Zeichen zugreifen. Hierfür verwendet man Indexzugriffe, die in Python mit Hilfe von eckigen Klammern durchgeführt werden. Der Index des ersten Elements in Python is 0.
###Code
my_string = "Hello World!"
print(my_string[0]) # prints the first character of the string
print(my_string[10]) # prints the 10th character of the string
###Output
H
d
###Markdown
Probiert man auf einen Index zuzugreifen, den es nicht gibt, bekommt man eine Fehlermeldung.
###Code
my_string[20]
###Output
_____no_output_____
###Markdown
Man kann in Python auch negative Indizes verwenden. Hiermit greift man "von hinten" auf den String zu. -1 ist also das letzte Zeichen, -2 das vorletzte, etc.
###Code
print(my_string[-1]) # prints the last character of the string
print(my_string[-10]) # prints the 10th character from the end of the string
###Output
!
l
###Markdown
Für Zugriffe auf Teile von Zeichenketten gibt es das Konzept des *slicing* in Python. Hierzu wird der Doppelpunkt als Operator verwendet ```:```. Der Startindex ist Teil des Slices (inclusive), der Endindex ist nicht Teil des Slices (exclusive). Lässt man den Start- oder Endindex weg, wird der Anfang, bzw. das Ende des Strings genommen.
###Code
print(my_string[0:5]) # substring with the characters between index 0 (inclusive) and index 5 (exclusive)
print(my_string[6:11]) # substring with the characters between index 6 (inclusive) and index 11 (exclusive)
print(my_string[:5]) # substring with the characters from the start of string until index 5 (exclusive)
print(my_string[6:]) # substring with the characters from index 6 (inclusive) until the end of the string
print(my_string[:]) # complete string
###Output
Hello
World
Hello
World!
Hello World!
###Markdown
Zeichenketten sind unveränderbar (immutable). Das bedeutet, das man eine bestehende Zeichenkette nicht direkt verändern kann. Probiert man es dennoch, bekommt man eine Fehlermeldung.
###Code
my_string[0] = "h"
###Output
_____no_output_____
###Markdown
Die Länge einer Zeichenkette kann man mit ```len(string)``` bestimmen.
###Code
print(len(my_string))
###Output
12
###Markdown
Formattierte StringsFür Ausgaben ist es häufig wichtig Informationen in Zeichenketten aufzuarbeiten. Oben haben wir bereits gesehen, wie man durch Operatoren Zahlen und andere Strings aneinander anhängen kann. Auch wenn dies in einfachen Fällen ausreichend ist, stößt man schnell an die Grenzen, bzw. hat sehr viel Aufwand. Für "hübsche" formatierte Zeichenketten gibt es in Python verschiedene Möglichkeiten. format()In Python gibt es die [Format Specification Mini-Language](https://docs.python.org/3/library/string.htmlformatspec) zur Definition von formatierten Zeichenketten. Hierzu wird die ```format()``` Methode von ```str``` benutzt. Durch geschweifte Klammern ```{}``` markierte Bereiche werden dann durch formatierte Strings von Objekten ersetzen.
###Code
print("{0} = {1}".format("pi", 3.14159265359)) # format string with index
print("{0} = {1} (approximation of {0})".format("pi", 3.14159265359)) # same index can be used multiple times
print("{pi_str} = {pi_value}".format(pi_str="pi",pi_value=3.14159265359)) # format string with names
###Output
pi = 3.14159265359
pi = 3.14159265359 (approximation of pi)
pi = 3.14159265359
###Markdown
Die Möglichkeiten von ```format()``` gehen weit über das reine einsetzen von Werten hinaus. Insbesondere lässt sich die Art, wie eine Zahl dargestellt werden soll, einstellen. Zum Beispiel lässt sich definieren, wie viele Nachkommastellen eine Zahl hat, wie Vorzeichen angezeigt werden, und ob eine wissenschaftliche Darstellung mit Hilfe eines Exponenten benutzt werden soll. Dies wird über eine Formatbeschreibung gemacht, die man mit einem ```:``` nach dem index, bzw. dem Namen des Parameters anhängt.
###Code
print("{0} = {1:.3f}".format("pi", 3.14159265359)) # .2f specifies that three digits after the comma are displayed (rounded)
print("{0} = {1:.5f}".format("pi", 3.14159265359)) # .5f specifies that five digits after the comma are displayed (rounded)
print("{0:e}".format(10000000)) # specifies that scientific notation is used
###Output
pi = 3.142
pi = 3.14159
1.000000e+07
###Markdown
Formattierte StringliteraleEin sehr neues Feature (Python 3.6) sind formattierte Stringliterale. Hierbei handelt es sich um "Zeichenketten mit Platzhaltern". Hiermit ist es möglich die Werte von variablen dynamisch in Zeichenketten einzubetten. Man definiert, dass es sich um ein formattiertes Stringliteral, in dem man ```f``` (bzw. ```F```) der Zeichenkette voran stellt. Durch geschweifte Klammern ```{}``` markiert man Bereiche, die durch Variablen ersetzt werden. Die Definition von Ersetzungen und der Formattierung ist die gleiche, wie bei [format()](string.format()).
###Code
value = "World"
formated_string = f"Hello {value}!" # formated string literal where {value} will be replaced by the content of the variable value
print(formated_string)
###Output
Hello World!
###Markdown
printf-style FormatstringsPython unterstützt auch [formatierte Strings in einer ähnlichen Syntax wie man sie von printf](https://docs.python.org/3/library/stdtypes.htmlprintf-style-string-formatting) aus der Sprache C kennt. Für diese Art formatierung hängt man die Objekte mit einem ```%``` an einen Zeichenkette an. Der Vorteil dieser Syntax ist, dass derartige Formatstrings weit verbreitet sind. Der Nachteil ist, dass man die Parameter weder benennen kann, noch über ihren Namen oder Index mehrfach verwenden kann.
###Code
print("pi = %.3f" % 3.14159265359) # uses % notation in strings as is known from printf
print("%s = %.3f" % ("pi", 3.14159265359)) # use () in case mutliple objects are used
###Output
pi = 3.142
pi = 3.142
###Markdown
Weitere wichtige Methoden für StringsZeichenketten in Python sind Objekte, das heißt es gibt Methoden, die direkt auf den Strings aufgerufen werden können um mit ihnen zu arbeiten. Hier sind einige Beispiele für hilfreiche Methoden. Eine vollständige Referenz finden Sie in der [Python Dokumentation](https://docs.python.org/3/library/stdtypes.htmlstring-methods).
###Code
my_string = "Hello World!"
print(my_string.startswith("Hello")) # startswith tests if a string starts with another string
print(my_string.endswith("World")) # endswith tests if a string ends with another string
print(my_string.upper()) # upper makes all characters upper case
print(my_string.lower()) # lower makes all characters lower case
print(my_string.rjust(30)) # rjust right-adjusts string by adding spaces (ljust also available)
print(my_string.rjust(30).strip()) # strip removes all leading and trailing whitepaces
print(my_string.isnumeric()) # isnumeric checks if a string is a number (also available: isalpha, isalnum, isdigit, ...)
###Output
True
False
HELLO WORLD!
hello world!
Hello World!
Hello World!
False
###Markdown
ListenDer Datentyp ```List``` speichert eine sortierte Sequenz von Objekten in Python. Listen werden mit Hilfe von eckigen Klammern ```[]``` definiert. Man kann auf jedes Element einer Liste mit Hilfe des Indexes und ```[]``` zugreifen, genau wie auf die einzelnen Zeichen von Zeichenketten, inklusive Zugriff durch Slicing.
###Code
my_list = ["Hello","World","!"]
print(my_list)
print(my_list[0])
print(my_list[1])
print(my_list[2])
print(my_list[-1])
print(my_list[-2])
print(my_list[-3])
print(my_list[1:3])
###Output
['Hello', 'World', '!']
Hello
World
!
!
World
Hello
['World', '!']
###Markdown
Die Anzahl der Elemente in einer Liste kann man mit ```len``` bestimmen.
###Code
print(len(my_list))
###Output
3
###Markdown
Will man wissen ob sich ein Objekt in einer Liste befindet, kann man den ```in``` Operator benutzen, um zu überprüfen ob ein Objekt sich nicht in einer Liste befindet nutzt man ```not in```.
###Code
print("Hello" in my_list)
print("hi" not in my_list)
###Output
True
True
###Markdown
Im Gegensatz zu Zeichenketten sind Listen sind veränderbar (mutable). Das heißt man kann Elemente durch andere Objekte ersetzen, sowie Elemente hinzufügen und entfernen. Zum Ersetzen kann man einfach per Indexzugriff neue Werte zuweisen. Zum Hinzufügen, beziehungsweise Entfernen von Elementen gibt es bei Listen die Methoden ```append(element)``` und ```insert(index, element)```, beziehungsweise ```remove(element)``` und ```pop(index)```. Man kann alle Elemente mit der ```clear()``` Mehtode entfernen. Einzelne Elemente können auch mit Hilfe von ```del``` und dem Indexzugriff gelöscht werden.
###Code
my_changed_list = ["Hello","World","!"]
my_changed_list[0] = "Hi" # changes the first element to "Hi"
print(my_changed_list)
my_changed_list.append("!") # appends a value at the end of the list
print(my_changed_list)
my_changed_list.insert(1,"beautiful") # inserts a value at the given position of the list, all following elements move back
print(my_changed_list)
my_changed_list.remove("World") # removes the first occurence of an element from a list
print(my_changed_list)
my_changed_list.pop(1) # removes an element by using the index
print(my_changed_list)
del my_changed_list[1] # removes an element with del
print(my_changed_list)
my_changed_list.clear() # removes all elements
print(my_changed_list)
###Output
['Hi', 'World', '!']
['Hi', 'World', '!', '!']
['Hi', 'beautiful', 'World', '!', '!']
['Hi', 'beautiful', '!', '!']
['Hi', '!', '!']
['Hi', '!']
[]
###Markdown
Kommt das gleiche Element mehrfach in einer Liste vor, wird nur das erste dieser Elemente von ```remove``` entfernt. Da Listen verändert sind, reicht es nicht eine Liste einfach einer Variable zuzuweisen, um den aktuellen Zustand festzuhalten. Stattdessen muss man hierfür die ```copy()``` Methode benutzen.
###Code
my_changed_list = ["Hello","World","!"]
my_other_reference = my_changed_list # assign the same list to another variable
my_copied_list = my_changed_list.copy() # create a copy of the list
my_changed_list[0] = "Hi" # modify my_changed_list
print(my_other_reference) # the reference is changed
print(my_copied_list) # the copy remains the same
###Output
['Hi', 'World', '!']
['Hello', 'World', '!']
###Markdown
Bei der Kopie der Liste handelt es sich um eine *shallow copy*, das heißt es wird lediglich die Liste kopiert, die Elemente der Liste werden nicht ebenfalls kopiert. (Mehr zum Kopieren gibt es in Kapitel 7.)Weitere Methoden zum Arbeiten mit Listen sind ```sort()``` zum Sortieren der Werte, ```reverse()``` um die Reihenfolge der Listenelemente zu invertieren, und ```count(element)``` um zu Zählen wie häufig ein Element in einer Liste vorkommt. TupelVereinfacht gesagt, hat mit mit dem Datentyp ```tuple``` unveränderliche Listen. Die Erstellung von Tupeln ähnelt der Erstellung von Listen, nur das ```()``` genutzt wird.
###Code
my_tuple = ("Hello", "World", "!") # creates a new tuple
print(my_tuple)
print(my_tuple[1])
print("Hello" in my_tuple)
print("Hi" not in my_tuple)
print(len(my_tuple))
###Output
('Hello', 'World', '!')
World
True
True
3
###Markdown
Eine wichtige Eigentschaft von Tupeln ist das *packing*, bzw. das *unpacking*. Das packing ist vergleichbar zu Mehrfachzuweisungen, nur dass statt mehreren Variablen nur ein Tupel zugewiesen wird. Beim unpacking passiert das Gegenteil: aus einem Tupel werden mehrere Variablen zugewiesen.
###Code
my_tuple = "Hello","World","!" # creates a tuple through packing, only works with tuples
print(my_tuple)
hello,world,exclamation = my_tuple # unpacks the tuple into three variables, also works with lists
print(hello)
print(world)
print(exclamation)
###Output
('Hello', 'World', '!')
Hello
World
!
###Markdown
Beim unpacking muss man beachten, dass die Anzahl der zugewiesen Variablen der Länge des Tupels entsprechen muss, also jeder Wert im Tupel muss einer Variablen zugewiesen werden. Ansonsten gibt es eine Fehlermeldung.
###Code
hello,world = my_tuple # only two variables instead of three --> error
###Output
_____no_output_____
###Markdown
Das leere Tupel kann man einfach durch leere Klammern ```()``` erstellen. Einelementige Tupel sind jedoch Syntaktisch etwas unschön, da es nicht ausreicht einfach ein Objekt in Klammern zu setzen. Man braucht zusätzlich noch ein Komma am Ende.
###Code
no_tuple = ("hello")
print(type(no_tuple))
one_element_tuple = ("hello",)
print(type(one_element_tuple))
print(len(one_element_tuple))
###Output
<class 'str'>
<class 'tuple'>
1
###Markdown
SetsSowohl Listen als auch Tuple können das selbe Objekt mehrfach beinhalten. Mengen haben die Eigenschaft, dass jedes Objekt nur genau einmal enthalten ist und sie dafür, wenn man es nicht explizit fordert, unsortiert sind. In Python gibt es hierfür den Datentyp ```set```. Um eine Menge zu definieren benutzt man entweder ```set()``` oder geschweifte Klammern ```{}```.
###Code
my_set = {"Hello", "Hello", "World", "!"} # creates a new set
my_set = set(["Hello", "Hello", "World", "!"]) # same as above, but creates the set from a list
print(my_set)
print("Hello" in my_set)
print("hi" not in my_set)
print(len(my_set))
###Output
{'Hello', 'World', '!'}
True
True
3
###Markdown
Sets sind, genau wie Listen, veränderbar. Objekte können mit ```add(element)``` hinzugefügt und mit ```remove(element)```, bzw. ```discard(element)``` entfernt werden. Der unterschied zwischen ```remove``` und ```discard``` ist, dass es bei ```remove``` einen Fehler gibt, wenn es das Objekt nicht in der Menge gibt. Man kann alle Elemente mit ```clear()``` entfernen.
###Code
my_changing_set = {"Hello", "World", "!"}
my_changing_set.add("hi") # adds "hi" to the set
my_changing_set.add("hi") # does nothing, "hi" already part of the set
print(my_changing_set)
my_changing_set.remove("Hello") # removes "Hello" from the set
print(my_changing_set)
my_changing_set.clear()
print(my_changing_set)
my_changing_set.remove("does not exist")
###Output
_____no_output_____
###Markdown
Da es sich bei beim Datentyp ```set``` um Mengen im Sinne der mathematischen Definition von Mengen handelt, kann man auch Mengenoperationen durchführen. Verfügbar sind die Vereinigung (```union```), Schnittmenge (```difference```), Differenzmenge (```intersection```) und die symmetrische Differenz (```symmetric_difference```). Außerdem kann man überprüfen ob es sich um eine Teilmenge (```issubset```) oder eine Obermenge (```issuperset```) handelt, sowie ob Mengen disjunkt sind (```isdisjunct```).
###Code
set1 = {1, 2, 3, 4}
set2 = {3, 4, 5, 6}
print(set1.union(set2))
print(set1.intersection(set2))
print(set1.difference(set2))
print(set1.symmetric_difference(set2))
print(set1.issubset(set2))
print(set1.issuperset(set2))
print(set1.isdisjoint(set2))
###Output
{1, 2, 3, 4, 5, 6}
{3, 4}
{1, 2}
{1, 2, 5, 6}
False
False
False
###Markdown
Für die meisten Mengenoperatiotionen gibt es auch Operatoren, die man direkt verwenden kann. Hierzu wurde die Ähnlichkeit von Mengenoperationen mit logischen arithmetrischen Operationen genutzt.
###Code
print(set1 | set2) # union because of similarity to a logical or
print(set1 & set2) # intersection because of similarity to a logical not
print(set1 - set2) # difference because of similarity to an arithmetic minus
print(set1 ^ set2) # symmetric_difference because of the similarity to a logical xor
print(set1 <= set2) # issubset because of the similarity to less than or equal too
print(set1 >= set2) # issuperset because of the similartiy to greater than or equal too
###Output
{1, 2, 3, 4, 5, 6}
{3, 4}
{1, 2}
{1, 2, 5, 6}
False
False
###Markdown
DictionariesBei Dictionaries handelt es sich um einen Datentypen ```dict``` der Abbildungen von Schlüsseln (*key*) auf Werte (*value*) verwaltet. Zu jedem Schlüssel, kann es nur einen Wert geben. Dictionaries werden, ähnlich wie Mengen, mit Hilfe von ```{}``` definiert. Im Unterschied zu Mengen, müssen Key-Value Paare in der Form ```:``` oder als Tupel ```(key,value)``` angegeben werden. Der Zugriff auf Elemente in Dictionaries erfolgt ähnlich zum Zugriff auf Elemente von Listen mit Hilfe von ```[]```, nur dass statt dem Index der Schlüssel verwendet wird. Da Dictionaries veränderbar sind, kann man auf diese Art auch neue Key-Value Paare in einem Dict hinzufügen, bzw. bestehende Einträge aktualisieren.
###Code
my_dict = {"pi": 3.14159265359, "c": 299792458}
print(my_dict)
print(my_dict["pi"])
print(my_dict["c"])
my_dict["e"] = 1.602e-19
my_dict["pi"] = 3.14
print(my_dict)
print("pi" in my_dict)
print("mu0" not in my_dict)
print(len(my_dict))
###Output
{'pi': 3.14159265359, 'c': 299792458}
3.14159265359
299792458
{'pi': 3.14, 'c': 299792458, 'e': 1.602e-19}
True
True
3
###Markdown
Probiert man auf einen Schlüssel zuzugreifen, den es in dem Dictionary nicht gibt, gibt es eine Fehlermeldung.
###Code
my_dict["mu0"]
###Output
_____no_output_____
###Markdown
Mit Hilfe der Methoden ```items()```, ```keys()``` und ```values()``` bekommt man einen *View* auf alle Key-Value Paare als Tupel, alle Schlüssel, beziehungsweise alle Werte. Aus einem View kann man zum Beispiel eine Liste erstellen oder über die Elemente mit einer Schleife iterieren (siehe Kapitel 3). Views sind dynamisch, das heißt sie Spiegeln immer den aktuellen Zustand des Dictionaries wieder.
###Code
key_view = my_dict.keys()
print(key_view)
print(list(key_view))
my_dict["h"] = 6.626e-34
print(key_view)
###Output
dict_keys(['pi', 'c', 'e'])
['pi', 'c', 'e']
dict_keys(['pi', 'c', 'e', 'h'])
###Markdown
Mit Hilfe der ```pop(key, default)``` Methode kann man Elemente aus einem Dict entfernen. Die Methode hat den Wert des Entfernten Key-Value Paars als Rückgabewert. Ist kein Eintrag zu dem Schlüssel im Dict vorhanden, wird der im ```default``` Parameter angegebene Wert zurück gegeben. Man nutzt ```None``` als ```default```, wenn man Schlüssel entfernen möchte, bei denen man nicht sicher ist, ob diese existieren.
###Code
print(my_dict.pop("pi", None)) # removes pi from dict and returns the value
print(my_dict)
print(my_dict.pop("pi", None)) # pi not in dict anymore, returns None
###Output
3.14
{'c': 299792458, 'e': 1.602e-19, 'h': 6.626e-34}
None
###Markdown
Der ```default``` Parameter von ```pop``` ist optional. Lässt man ihn weg, gibt es eine Fehlermeldung, wenn ein Schlüssel nicht existiert.
###Code
print(my_dict.pop("pi"))
###Output
_____no_output_____ |
Coast_Corr.ipynb | ###Markdown
Coast Corr Here you can calculate per-pixel statistics of a imagery catalogue converted to CSV's with environmental monitoring data
###Code
from coast_corr import coast_corr, coast_corr_functions
import warnings
#warnings.filterwarnings("ignore")
import ipywidgets
import osmnx as ox
from tkinter import filedialog
from tkinter import *
from ipyleaflet import (
Map,
Marker,
TileLayer, ImageOverlay,
Polyline, Polygon, Rectangle, Circle, CircleMarker,
GeoJSON,
DrawControl, basemaps, basemap_to_tiles
)
import folium
import os
import pandas as pd
import geopandas as gpd
import requests
import numpy as np
###Output
_____no_output_____
###Markdown
Select folder containing imagery csv files, select satellite type, select environmental parameter:
###Code
root = Tk()
root.withdraw()
csv_folder = filedialog.askdirectory()
sat=ipywidgets.Select(
options=['Sentinel 2', 'Landsat 5', 'Landsat 7', 'Landsat 8','MODIS Aqua','MODIS Terra'],
value='Sentinel 2',
# rows=10,
description='Satellite:',
disabled=False
)
display(sat)
env_dat=ipywidgets.Select(
options=['Modeled Wave Energy','River Discharge', 'Ocean Water Level','HF Radar'],
value='River Discharge',
description='Environmental Data:',
disabled=False
)
display(env_dat)
###Output
_____no_output_____
###Markdown
Here we check to see if you have already combined all of the CSV's of interest to create a pixel value csv. If not, they are combined, if so, they are accessed. In the case that this is your first time combining them, it could take a while...
###Code
coast_corr_functions.catalogue_csv(csv_folder)
os.chdir(csv_folder)
band=csv_folder.split("/")[len(csv_folder.split("/"))-1]
path_parent = os.path.dirname(csv_folder)
csv_fold=path_parent+'/coastcorr_csv'
res_out=csv_fold+'/'+band+"_combined_csv.csv"
print("reading combined csv")
sr_df=pd.read_csv(res_out)
if env_dat.value == 'River Discharge':
# from https://stackoverflow.com/questions/38511444/python-download-files-from-google-drive-using-url
def download_file_from_google_drive(id, destination):
URL = "https://docs.google.com/uc?export=download"
session = requests.Session()
response = session.get(URL, params = { 'id' : id }, stream = True)
token = get_confirm_token(response)
if token:
params = { 'id' : id, 'confirm' : token }
response = session.get(URL, params = params, stream = True)
save_response_content(response, destination)
def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
def save_response_content(response, destination):
"""
response = filename for input
destination = filename for output
"""
CHUNK_SIZE = 32768
with open(destination, "wb") as f:
for chunk in response.iter_content(CHUNK_SIZE):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
DATASET_ID = '1Gh9NcqS0g1VI9EPMglbOl3_Cg8MFqPfb'
destination = '../data.shp'
download_file_from_google_drive(DATASET_ID, destination)
env_df= pd.read_csv(destination)
env_df = gpd.GeoDataFrame(
env_df, geometry=gpd.points_from_xy(env_df.LNG_GAGE, env_df.LAT_GAGE))
# Make an empty map
m = folium.Map(location=[35,-124.4], tiles= "Stamen Terrain", zoom_start=4)
# Show the map
m
for i in range(0,len(env_df)):
folium.Marker(
location=[env_df.iloc[i]['LAT_GAGE'], env_df.iloc[i]['LNG_GAGE']],
popup=env_df.iloc[i]['STAID'],
).add_to(m)
m
if env_dat.value == 'Modeled Wave Energy':
# from https://stackoverflow.com/questions/38511444/python-download-files-from-google-drive-using-url
def download_file_from_google_drive(id, destination):
URL = "https://docs.google.com/uc?export=download"
session = requests.Session()
response = session.get(URL, params = { 'id' : id }, stream = True)
token = get_confirm_token(response)
if token:
params = { 'id' : id, 'confirm' : token }
response = session.get(URL, params = params, stream = True)
save_response_content(response, destination)
def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
def save_response_content(response, destination):
"""
response = filename for input
destination = filename for output
"""
CHUNK_SIZE = 32768
with open(destination, "wb") as f:
for chunk in response.iter_content(CHUNK_SIZE):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
DATASET_ID = '11lbhCV-R3dkeioE6RHfiwuLMqhlWNAft'
destination = '../data.csv'
download_file_from_google_drive(DATASET_ID, destination)
env_df= pd.read_csv(destination)
env_df = gpd.GeoDataFrame(
env_df, geometry=gpd.points_from_xy(env_df.LNG_GAGE, env_df.LAT_GAGE))
# Make an empty map
m = folium.Map(location=[35,-124.4], tiles= "Stamen Terrain", zoom_start=4)
# Show the map
m
env_df=gpd.clip(env_df, polygon, keep_geom_type=False)
for i in range(0,len(env_df)):
folium.Marker(
location=[env_df.iloc[i]['LAT_GAGE'], env_df.iloc[i]['LNG_GAGE']],
popup=env_df.iloc[i]['STATID'],
).add_to(m)
station=ipywidgets.Text(
value='Name of Station',
placeholder='Type something',
description='Station:',
disabled=False
)
display(station)
m
coast_corr_functions.get_data(env_dat.value,station.value, csv_folder)
if env_dat.value == 'River Discharge':
collector='USGS'
site_path=os.path.dirname(os.path.dirname(os.path.dirname(csv_folder)))
env_fold=site_path+'/environmental_data_csv'
collec_fold=env_fold+'/'+collector
file_out= collec_fold+"/"+station.value+'.csv'
env_df=pd.read_csv(file_out)
print('downloaded data read')
###Output
_____no_output_____
###Markdown
Calculate your stats here
###Code
stat_choice=ipywidgets.Select(
options=['Mean','Median', 'Standard Deviation','Spearman Correlation','Linear Correlation'],
value='Mean',
description='Statistic for Composite:',
disabled=False
)
display(stat_choice)
binning=ipywidgets.Text(
value='24',
placeholder='Type something',
description='Data Bin Hours:',
disabled=False
)
display(binning)
int0=ipywidgets.Text(
value='.95',
placeholder='Type something',
description='Percentile Upper:',
disabled=False
)
display(int0)
int1=ipywidgets.Text(
value='1.0',
placeholder='Type something',
description='Percentile Lower:',
disabled=False
)
display(int1)
###Output
_____no_output_____
###Markdown
Navigate to source imagery folder
###Code
root = Tk()
root.withdraw()
imgs = filedialog.askdirectory()
#coast_corr_functions.data_binner(env_df, sr_df, binning.value, station.value, csv_folder, sat.value, env_dat.value)
if env_dat.value=='River Discharge':
freq=binning.value+'h'
collector='USGS'
site_path=os.path.dirname(os.path.dirname(os.path.dirname(csv_folder)))
env_fold=site_path+'/environmental_data_csv'
collec_fold=env_fold+'/'+collector
file_out= collec_fold+"/"+ station.value+'_'+freq+'.csv'
df_data=pd.read_csv(file_out)
if stat_choice.value== 'Mean':
points=(df_data.columns[3:len(df_data.columns)-21]).tolist()
df_fin = pd.DataFrame(columns=['points','stat'])
i=0
value=[]
print('Calculating mean at each position')
for i in range(0,len(points)):
try:
stat=np.nanmean(df_data.iloc[:,i+3:i+4])
value.append(stat)
except:
stat=np.nan
value.append(stat)
df_fin = pd.DataFrame(columns=['points','stat'])
df_fin['points']=points
df_fin['stat']=value
df_fin['lon']=df_fin.points.str.split('(', expand=True)[1].str.split(' ',expand=True)[0]
df_fin['lat']=df_fin.points.str.split('(', expand=True)[1].str.split(' ',expand=True)[1].str.split(')', expand=True)[0]
df_fin['lat']=df_fin['lat'].astype(float)
df_fin=df_fin[df_fin['lat']>0]
raster_creater(df_fin,imgs,csv_folder,env_dat.value,stat_choice.value,int0.value,int1.value)
###Output
_____no_output_____ |
Understanding_Python_for_Data_Analysis_Part1.ipynb | ###Markdown
**Introduction****A Presentation made for:** **Learning on the Go****Facilitator:** **Tolulade Ademisoye** This notebook is a basic record to guide folks with no idea about data science with python. It's basic material guide for begineers and **should not** be taken as a **professional text**.---Things we will cover here;1. Data Extraction Process2. Data Pre-processing3. Data Cleaning4. Exploratory Data Analysis5. Conclusions **Python Libraries for Data Science*** **Pandas** - dataframe* **Numpy** - maths* **Matplotlib** - visual plot* **Seaborn**- visual plot* **Raceplot** - Interactive Plots* **SciPy** - High level computations* **Scrapy** - web data extraction* **BeautifulSoup** - web data extractionEtc---**Machine Learning*** **TensorFlow** - framework for neural network, speech recognition etc* **Keras** - deep learning* **Pytorch** - deep learning too etc* **Scikit-learn** - contains most ml algorithms *(clustering, classification, regression, model selection, dimensionality reduction)*Etc[More on libraries:](https://www.dataquest.io/blog/15-python-libraries-for-data-science/) [Libraries](https://www.simplilearn.com/top-python-libraries-for-data-science-article) **Python Data Types**In Python, the **data type** is set when you assign a value to **a variable**:**Built-in Data Types***Variables* can store data of different types, and different types can do different things.Python has the following data types built-in by default, in these categories:**Text Type**: `str`**Numeric Types**: `int, float, complex`**Sequence Types**: `list, tuple, range`**Mapping Type**: `dict`**Set Types**: `set, frozenset`**Boolean Type**: `bool`**Binary Types**: `bytes, bytearray, memoryview`
###Code
#Print the data type of the variable x:
x = 5
print(type(x))
#Example Data Type Try it
x = "Hello World" #str
x = 20 #int
x = 20.5 #float
x = 1j #complex
x = ["apple", "banana", "cherry"] #list
x = ("apple", "banana", "cherry") #tuple
x = range(6) #range
x = {"name" : "John", "age" : 36} #dict
x = {"apple", "banana", "cherry"} #set
x = frozenset({"apple", "banana", "cherry"}) #frozenset
x = True #bool
x = b"Hello" #bytes
x = bytearray(5) #bytearray
x = memoryview(bytes(5)) #memoryview
###Output
_____no_output_____
###Markdown
**Setting the Specific Data Type**You can run this individually!
###Code
x = str("Hello World") #str
x = int(20) #int
x = float(20.5) #float
x = complex(1j) #complex
x = list(("apple", "banana", "cherry")) #list
x = tuple(("apple", "banana", "cherry")) #tuple
x = range(6) #range
x = dict(name="John", age=36) #dict
x = set(("apple", "banana", "cherry")) #set
x = frozenset(("apple", "banana", "cherry")) #frozenset
x = bool(5) #bool
x = bytes(5) #bytes
x = bytearray(5) #bytearray
x = memoryview(bytes(5)) #memoryview
###Output
_____no_output_____
###Markdown
**Python Data Structures****Lists, sets, tuples, and dictionary** are the basic data structures in the Python programming language.1. **Lists:** `List_A = [item 1, item 2, item 3….., item n]` A list is defined as an ordered collection of items.**Characteristics:*** **Lists can be nested**: Can contain any type of object. It can include another list or a sublist, no limit.* **Lists are mutable**: A user can search, add, shift, move, and delete elements from a list at their own will.---2. **Tuples**: A tuple is a built-in data structure in Python that is an ordered collection of objects. Unlike lists, tuples come with limited functionality. whereas tuples are **immutable**.`tuple_A = (item 1, item 2, item 3,…, item n)`. The use of parentheses in creating tuples is optional, but they are recommended to distinguish them.**Types:*** Single Item Tuple: E.g some_tuple = (item 1, ). use a comma after the item to enable python o differentiate between the tuple and the parentheses surrounding the object in the equation.* Empty Tuple:`Empty_tuple= ( )`**Advantages of Tuple:**Tuples are immutable, so they can be used to prevent accidental addition, modification, or removal of data unlike Lists.---3. **Sets**: A unique collection of unique elements that do not follow a specific order. Sets are used when the existence of an object in a collection of objects is more important than the number of times it appears or the order of the objects. Unlike tuples, sets are mutable – they can be modified, added, replaced, or removed. A sample set can be represented as follows:**Characteristics:**.* Sets are used to store multiple items in a single variable* **Unordered**: Items in a set do not have a defined order* **Unchanged**: Items cannot be changed after the set has been created.* **Duplicates Not Allowed**`set_a = {“item 1”, “item 2”, “item 3”,….., “item n”}`**Create a Set**:`thisset = {"apple", "banana", "cherry"}``print(thisset)`no duplicates`thisset = {"apple", "banana", "cherry", "apple"}``print(thisset)`Contain different data types. Example (String, int and boolean data types)`set1 = {"apple", "banana", "cherry"}``set2 = {1, 5, 7, 9, 3}``set3 = {True, False, False}`Combination`set1 = {"abc", 34, True, 40, "male"}`---4. **Dictionaries**:Dictionaries are used to store data values in **key:value pairs**.`thisdict = { "brand": "Ford", "model": "Mustang", "year": 1964}``print(thisdict)`**Characteristics**:Dictionary items:* Items are ordered, * Changeable, * Does not allow duplicates.Dictionary items can be referred to by using the key name.*Check the length of dictionary*: `print(len(thisdict))`**Dict also stores different data types**:`thisdict = { "brand": "Ford", "electric": False, "year": 1964, "colors": ["red", "white", "blue"]}`---image source -see reference---**For more reading**- [reference](https://corporatefinanceinstitute.com/resources/knowledge/other/python-data-structures/)[Others](https://www.w3schools.com/python/python_sets.asp) **Data Extraction Process**The first thing critical to starting any project is to understand where your dataset lies, what platform is it stored on.---**Data Format Types:**1. CSV2. Json3. Excel 4. XML5. Txt etc---**Examples of dataset locations:*** **Kaggle datasets**; either competitions or datasets* **Google Drive*** **Local PC drive*** **API*** **Cloud databases;** PostgreSQL, MongoDB, AWS S3, Azure, etc---**How to extract/call your dataset into python:**I'll write some script for a few, the others can be researched. **Data Extraction from Kaggle**
###Code
#Data Extraction from Kaggle dataset to colab
#1. upload kaggle json API Token to google drive (in kaggle account)...
#to avoid uploading the JSON file every time the notebook is reloaded or restarted.
#2. mount drive to notebook
#3. install kaggle libray in colab & make a directory
#4. copy the “kaggle.json” file from the mounted google drive to the current instance storage
#5. download dataset- ! kaggle datasets download <name-of-dataset> or
#6 ! kaggle competitions download name of dataset (for competitions data)
## Example
#1. upload the kaggle json API
#step 2 run these
# ! pip install kaggle #install kaggle library
#! mkdir ~/.kaggle #make a directory for kaggle
#step 3
#cp /content/drive/MyDrive/kaggle.json ~/.kaggle/kaggle.json
#step 4
# !kaggle datasets download -d url_in_kaggle_after_main-domain-name or
#!kaggle datasets competitions -c url_in_kaggle_after_main-domain-name
#step 5
# ! unzip zip_file
###Output
_____no_output_____
###Markdown
**Data Extraction from Git**
###Code
#url = 'copied_raw_GH_link'
#df1 = pd.read_csv(url)
# Dataset is now stored in a Pandas Dataframe
###Output
_____no_output_____
###Markdown
**Data Extraction from Drives/Web**Good resources:[reference 1](https://towardsdatascience.com/3-ways-to-load-csv-files-into-colab-7c14fcbdcb92)[reference 2](https://towardsdatascience.com/7-ways-to-load-external-data-into-google-colab-7ba73e7d5fc7)
###Code
# Data Extraction from Google drive /local drives /web
# Method 1 -using google colab files module
#from google.colab import files
#uploaded = files.upload()
# then run this
#import io
#df2 = pd.read_csv(io.BytesIO(uploaded['Filename.csv']))
# Dataset is now stored in a Pandas Dataframe
# Method 2 -using linux ! wget from web
# !wget https://example.com/training-with-tolulade_filtered.zip (url)
# or use this to download with get and change the file name after download
# !wget https://example.com/training-with-tolulade_filtered.zip \
# -O new_file_downloaded_name_.zip
# method 3 from google drive
# mount google drive to colab
# df = pd.read_csv('file_path')
###Output
_____no_output_____
###Markdown
This **project**, we have uploaded our raw dataset into our Google drive folder.Next, mount your **google drive into colab**.
###Code
import pandas as pd
df = pd.read_csv('/content/drive/MyDrive/Project_Datasets/ReisparAcademy_ds3_STORES_US.csv')
df
###Output
_____no_output_____
###Markdown
Import Libraries and dependecies
###Code
import numpy as np # maths
import pandas as pd # data processing,
import matplotlib.pyplot as plt
import seaborn as sns
import matplotlib as mpl
import matplotlib.pyplot as plt
from wordcloud import WordCloud, STOPWORDS
# Inline backend
%matplotlib inline
mpl.style.use(['ggplot'])
import warnings
warnings.filterwarnings('ignore')
df.info() # to get info from our dataset
df.describe() #to describe the features
print(f'Number of rows: {df.shape[0]}; Number of columns: {df.shape[1]}; Number of missing values: {sum(df.isna().sum())}')
# df.isna().sum()
df.TURNOVER.value_counts()
##counting the number of occurence for Turnover column
df.SIZE.value_counts()
df.MARGIN.value_counts()
df.columns
###Output
_____no_output_____
###Markdown
EDA - Intro into Exploratory Data Analytics
###Code
sns.countplot(df['TURNOVER'])
df.set_index('STORE', inplace=True)
df.head()
# df.reset_index()
###Output
_____no_output_____
###Markdown
**Plots with Matplotlib**
###Code
# Top 10 stores that contributed the most to Sales/Revenue/turnover
df10 = df.copy(deep=True)
df.sort_values(['MARGIN'], ascending=False, axis=0, inplace=True)
# get the top 10 entries
dften = df10.head(10)
# transpose the dataframe
#dften = dften[years].transpose()
#dften.head()
df10 = df.copy(deep=True)
df10
#When deep=True (default), a new object will be created with a copy of the calling object’s data and indices.
#Modifications to the data or indices of the copy will not be reflected in the original object.
#https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.copy.html
df.sort_values(['MARGIN'], ascending=False, axis=0, inplace=True)
dften = df10.head(10)
dften
# Compare the trends of top 10 locations/stores that contributed the most to business.
dften.plot(kind='line', figsize=(14, 8))
plt.title('Size')
plt.xlabel('Stores Location')
plt.show()
#https://pandas.pydata.org/pandas-docs/version/0.15.0/visualization.html
###Output
_____no_output_____
###Markdown
Box PlotTo visualise the distribution of values within each column
###Code
df.plot(kind='box')
###Output
_____no_output_____
###Markdown
Area Plot - stacked
###Code
df.plot(kind='area')
###Output
_____no_output_____
###Markdown
Area Unstacked
###Code
df.plot(kind='area', stacked=False);
###Output
_____no_output_____
###Markdown
Scatter Plot
###Code
df.plot(kind='scatter', x='TURNOVER', y='MARGIN');
###Output
_____no_output_____
###Markdown
Scatter PlotTo plot multiple column groups in a single axes, repeat plot method specifying target ax. It is recommended to specify color and label keywords to distinguish each groups.
###Code
ax = df.plot(kind='scatter', x='TURNOVER', y='MARGIN',
color='DarkBlue', label='Group 1');
df.plot(kind='scatter', x='SIZE', y='STAFF',
color='DarkGreen', label='Group 2', ax=ax);
df.plot(kind='scatter', x='TURNOVER', y='MARGIN', c='STAFF', s=50);
dften.plot(kind='pie', subplots=True, figsize=(16, 8))
###Output
_____no_output_____ |
Section08/.ipynb_checkpoints/03_dag-checkpoint.ipynb | ###Markdown
Resolving dependencies in a directed acyclic graph with a topological sort
###Code
import io
import json
import requests
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
%matplotlib inline
url = ('https://github.com/PacktPublishing/Statistical-Methods---Applied-Mathematics-in-Data-Science/blob/master/Data/react.graphml?raw=true')
f = io.BytesIO(requests.get(url).content)
graph = nx.read_graphml(f)
graph
len(graph.nodes), len(graph.edges)
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
nx.draw_networkx(graph, ax=ax, font_size=10)
ax.set_axis_off()
nx.is_directed_acyclic_graph(graph)
ts = list(nx.topological_sort(graph))
ts
# Each node's color is the index of the node in the
# topological sort.
colors = [ts.index(node) for node in graph.nodes]
nx.draw_shell(graph,
node_color=colors,
cmap=plt.cm.Blues,
font_size=8,
width=.5
)
from lxml.html import fromstring
import cssselect # Need to do: pip install cssselect
from requests.packages import urllib3
urllib3.disable_warnings()
fetched_packages = set()
def import_package_dependencies(graph, pkg_name,
max_depth=3, depth=0):
if pkg_name in fetched_packages:
return
if depth > max_depth:
return
fetched_packages.add(pkg_name)
url = f'https://www.npmjs.com/package/{pkg_name}'
response = requests.get(url, verify=False)
doc = fromstring(response.content)
graph.add_node(pkg_name)
for h3 in doc.cssselect('h3'):
content = h3.text_content()
if content.startswith('Dependencies'):
for dep in h3.getnext().cssselect('a'):
dep_name = dep.text_content()
print('-' * depth * 2, dep_name)
graph.add_node(dep_name)
graph.add_edge(pkg_name, dep_name)
import_package_dependencies(
graph,
dep_name,
depth=depth + 1
)
graph = nx.DiGraph()
import_package_dependencies(graph, 'react')
nx.write_graphml(graph, 'react.graphml')
###Output
_____no_output_____ |
python/python_magic_method.ipynb | ###Markdown
魔术方法 `__slots__` 参考:* [使用__slots__ - 廖雪峰的官方网站](https://www.liaoxuefeng.com/wiki/001374738125095c955c1e6d8bb493182103fac9270762a000/0013868200605560b1bd3c660bf494282ede59fee17e781000)* [python __slots__ 详解(上篇) - CSDN博客](https://blog.csdn.net/sxingming/article/details/52892640)* [Python__slots__详解 - rainfd - 博客园](https://www.cnblogs.com/rainfd/p/slots.html) 正常情况下,当我们定义了一个class,创建了一个class的实例后,我们可以给该实例绑定任何属性和方法,这就是动态语言的灵活性。定义类:
###Code
class Person:
pass
###Output
_____no_output_____
###Markdown
绑定属性:
###Code
p = Person()
p.name = 'Tom'
print(p.name)
###Output
Tom
###Markdown
但是对另外一个实例是不起作用的:
###Code
q = Person()
try:
print(q.name)
except AttributeError as e:
print(e)
###Output
'Person' object has no attribute 'name'
###Markdown
但是如果我们想限制类的属性,不允许随便添加属性,可以使用`__slots__`类变量。
###Code
class Person2:
__slots__ = ('name', 'age')
p2 = Person2()
p2.name = 'Alan'
p2.age = 32
print(p2.name, p2.age, sep=' ')
###Output
Alan 32
###Markdown
由于weight没有被放到`__slots__`中,所以不能绑定weight属性,试图绑定weight将得到AttributeError的错误。
###Code
try:
p2.weight = 120
except AttributeError as e:
print(e)
###Output
'Person2' object has no attribute 'weight'
###Markdown
使用__slots__要注意,__slots__定义的属性仅对当前类起作用,对继承的子类是不起作用的:
###Code
class Student(Person2):
pass
s = Student()
s.score = 100
print(s.score)
###Output
100
###Markdown
`__slots__`的另一个作用是阻止实例化类的时候分配`__dict__`,这样可以达到节省内存的目的。
###Code
print(p.__dict__)
try:
print(p2.__dict__)
except AttributeError as e:
print(e)
%load_ext memory_profiler
###Output
_____no_output_____
###Markdown
在类中不定义`__slots__`,分配10万个实例,使用约14MB内存。
###Code
from slots_test import test
%mprun -f test test()
###Output
Filename: F:\ABao\work\github\my.summary\python\slots_test.py
Line # Mem usage Increment Line Contents
================================================
9 73.3 MiB 73.3 MiB @profile
10 def test():
11 87.3 MiB 14.0 MiB f = [A(523825) for i in range(100000)]
###Markdown
在类中定义`__slots__`,分配10万个实例,使用约2MB内存。
###Code
from slots_test2 import test2
%mprun -f test2 test2()
###Output
Filename: F:\ABao\work\github\my.summary\python\slots_test2.py
Line # Mem usage Increment Line Contents
================================================
11 73.6 MiB 73.6 MiB @profile
12 def test2():
13 75.8 MiB 2.2 MiB f = [A(523825) for i in range(100000)]
|
docs/memo/notebooks/lectures/ARCH_GARCH_and_GMM/.ipynb_checkpoints/notebook-checkpoint.ipynb | ###Markdown
Generalized Method of Moments with ARCH and GARCH ModelsBy Delaney Granizo-Mackenzie and Andrei Kirilenko developed as part of the Masters of Finance curriculum at MIT Sloan.Part of the Quantopian Lecture Series:* [www.quantopian.com/lectures](https://www.quantopian.com/lectures)* [github.com/quantopian/research_public](https://github.com/quantopian/research_public)Notebook released under the Creative Commons Attribution 4.0 License.AutoRegressive Conditionally Heteroskedastic (ARCH) occurs when the volatility of a time series is also autoregressive.
###Code
import cvxopt
from functools import partial
import math
import numpy as np
import scipy
from scipy import stats
import statsmodels.api as sm
from statsmodels.stats.stattools import jarque_bera
import matplotlib.pyplot as plt
###Output
C:\Users\liude\Anaconda3\envs\bt36\lib\site-packages\statsmodels\compat\pandas.py:56: FutureWarning: The pandas.core.datetools module is deprecated and will be removed in a future version. Please use the pandas.tseries module instead.
from pandas.core import datetools
###Markdown
Simulating a GARCH(1, 1) CaseWe'll start by using Monte Carlo sampling to simulate a GARCH(1, 1) process. Our dynamics will be$$\sigma_1 = \sqrt{\frac{a_0}{1-a_1-b_1}} \\\sigma_t^2 = a_0 + a_1 x_{t-1}^2+b_1 \sigma_{t-1}^2 \\x_t = \sigma_t \epsilon_t \\\epsilon \sim \mathcal{N}(0, 1)$$Our parameters will be $a_0 = 1$, $a_1=0.1$, and $b_1=0.8$. We will drop the first 10% (burn-in) of our simulated values.
###Code
# Define parameters
a0 = 1.0
a1 = 0.1
b1 = 0.8
sigma1 = math.sqrt(a0 / (1 - a1 - b1))
def simulate_GARCH(T, a0, a1, b1, sigma1):
# Initialize our values
X = np.ndarray(T)
sigma = np.ndarray(T)
sigma[0] = sigma1
for t in range(1, T):
# Draw the next x_t
X[t - 1] = sigma[t - 1] * np.random.normal(0, 1)
# Draw the next sigma_t
sigma[t] = math.sqrt(a0 + b1 * sigma[t - 1]**2 + a1 * X[t - 1]**2)
X[T - 1] = sigma[T - 1] * np.random.normal(0, 1)
return X, sigma
###Output
_____no_output_____
###Markdown
Now we'll compare the tails of the GARCH(1, 1) process with normally distributed values. We expect to see fatter tails, as the GARCH(1, 1) process will experience extreme values more often.
###Code
X, _ = simulate_GARCH(10000, a0, a1, b1, sigma1)
X = X[1000:] # Drop burn in
X = X / np.std(X) # Normalize X
def compare_tails_to_normal(X):
# Define matrix to store comparisons
A = np.zeros((2,4))
for k in range(4):
A[0, k] = len(X[X > (k + 1)]) / float(len(X)) # Estimate tails of X
A[1, k] = 1 - stats.norm.cdf(k + 1) # Compare to Gaussian distribution
return A
compare_tails_to_normal(X)
###Output
_____no_output_____
###Markdown
Sure enough, the tails of the GARCH(1, 1) process are fatter. We can also look at this graphically, although it's a little tricky to see.
###Code
plt.hist(X, bins=50)
plt.xlabel('sigma')
plt.ylabel('observations')
plt.show()
# Sample values from a normal distribution
X2 = np.random.normal(0, 1, 9000)
both = np.matrix([X, X2])
# Plot both the GARCH and normal values
plt.plot(both.T, alpha=.7);
plt.axhline(X2.std(), color='yellow', linestyle='--')
plt.axhline(-X2.std(), color='yellow', linestyle='--')
plt.axhline(3*X2.std(), color='red', linestyle='--')
plt.axhline(-3*X2.std(), color='red', linestyle='--')
plt.xlabel('time')
plt.ylabel('sigma')
plt.show()
###Output
_____no_output_____
###Markdown
What we're looking at here is the GARCH process in blue and the normal process in green. The 1 and 3 std bars are drawn on the plot. We can see that the blue GARCH process tends to cross the 3 std bar much more often than the green normal one. Testing for ARCH BehaviorThe first step is to test for ARCH conditions. To do this we run a regression on $x_t$ fitting the following model.$$x_t^2 = a_0 + a_1 x_{t-1}^2 + \dots + a_p x_{t-p}^2$$We use OLS to estimate $\hat\theta = (\hat a_0, \hat a_1, \dots, \hat a_p)$ and the covariance matrix $\hat\Omega$. We can then compute the test statistic$$F = \hat\theta \hat\Omega^{-1} \hat\theta'$$We will reject if $F$ is greater than the 95% confidence bars in the $\mathcal(X)^2(p)$ distribution.To test, we'll set $p=20$ and see what we get.
###Code
X, _ = simulate_GARCH(1100, a0, a1, b1, sigma1)
X = X[100:] # Drop burn in
p = 20
# Drop the first 20 so we have a lag of p's
Y2 = (X**2)[p:]
X2 = np.ndarray((980, p))
for i in range(p, 1000):
X2[i - p, :] = np.asarray((X**2)[i-p:i])[::-1]
model = sm.OLS(Y2, X2)
model = model.fit()
theta = np.matrix(model.params)
omega = np.matrix(model.cov_HC0)
F = np.asscalar(theta * np.linalg.inv(omega) * theta.T)
print(np.asarray(theta.T).shape)
plt.plot(range(20), np.asarray(theta.T))
plt.xlabel('Lag Amount')
plt.ylabel('Estimated Coefficient for Lagged Datapoint')
print('F = ' + str(F))
chi2dist = scipy.stats.chi2(p)
pvalue = 1-chi2dist.cdf(F)
print('p-value = ' + str(pvalue))
# Finally let's look at the significance of each a_p as measured by the standard deviations away from 0
print(theta/np.diag(omega))
###Output
(20, 1)
F = 415.04240485367853
p-value = 0.0
[[ 53.55053094 54.53029866 68.12315425 78.40705562 5.549719
-8.46737952 49.25797149 19.30322987 52.70085066 43.90758715
11.32539114 -50.41055982 30.88026553 -16.01094646 69.90106052
69.52118162 38.73125475 -25.56441905 26.05043609 47.17162092]]
###Markdown
Fitting GARCH(1, 1) with MLEOnce we've decided that the data might have an underlying GARCH(1, 1) model, we would like to fit GARCH(1, 1) to the data by estimating parameters.To do this we need the log-likelihood function$$\mathcal{L}(\theta) = \sum_{t=1}^T - \ln \sqrt{2\pi} - \frac{x_t^2}{2\sigma_t^2} - \frac{1}{2}\ln(\sigma_t^2)$$To evaluate this function we need $x_t$ and $\sigma_t$ for $1 \leq t \leq T$. We have $x_t$, but we need to compute $\sigma_t$. To do this we need to make a guess for $\sigma_1$. Our guess will be $\sigma_1^2 = \hat E[x_t^2]$. Once we have our initial guess we compute the rest of the $\sigma$'s using the equation$$\sigma_t^2 = a_0 + a_1 x_{t-1}^2 + b_1\sigma_{t-1}^2$$
###Code
X, _ = simulate_GARCH(10000, a0, a1, b1, sigma1)
X = X[1000:] # Drop burn in
# Here's our function to compute the sigmas given the initial guess
def compute_squared_sigmas(X, initial_sigma, theta):
a0 = theta[0]
a1 = theta[1]
b1 = theta[2]
T = len(X)
sigma2 = np.ndarray(T)
sigma2[0] = initial_sigma ** 2
for t in range(1, T):
# Here's where we apply the equation
sigma2[t] = a0 + a1 * X[t-1]**2 + b1 * sigma2[t-1]
return sigma2
###Output
_____no_output_____
###Markdown
Let's look at the sigmas we just generated.
###Code
plt.plot(range(len(X)), compute_squared_sigmas(X, np.sqrt(np.mean(X**2)), (1, 0.5, 0.5)))
plt.xlabel('Time')
plt.ylabel('Sigma')
plt.show()
###Output
_____no_output_____
###Markdown
Now that we can compute the $\sigma_t$'s, we'll define the actual log likelihood function. This function will take as input our observations $x$ and $\theta$ and return $-\mathcal{L}(\theta)$. It is important to note that we return the negative log likelihood, as this way our numerical optimizer can minimize the function while maximizing the log likelihood.Note that we are constantly re-computing the $\sigma_t$'s in this function.
###Code
def negative_log_likelihood(X, theta):
T = len(X)
# Estimate initial sigma squared
initial_sigma = np.sqrt(np.mean(X ** 2))
# Generate the squared sigma values
sigma2 = compute_squared_sigmas(X, initial_sigma, theta)
# Now actually compute
return -sum(
[-np.log(np.sqrt(2.0 * np.pi)) -
(X[t] ** 2) / (2.0 * sigma2[t]) -
0.5 * np.log(sigma2[t]) for
t in range(T)]
)
###Output
_____no_output_____
###Markdown
Now we perform numerical optimization to find our estimate for$$\hat\theta = \arg \max_{(a_0, a_1, b_1)}\mathcal{L}(\theta) = \arg \min_{(a_0, a_1, b_1)}-\mathcal{L}(\theta)$$We have some constraints on this$$a_1 \geq 0, b_1 \geq 0, a_1+b_1 < 1$$
###Code
# Make our objective function by plugging X into our log likelihood function
objective = partial(negative_log_likelihood, X)
# Define the constraints for our minimizer
def constraint1(theta):
return np.array([1 - (theta[1] + theta[2])])
def constraint2(theta):
return np.array([theta[1]])
def constraint3(theta):
return np.array([theta[2]])
cons = ({'type': 'ineq', 'fun': constraint1},
{'type': 'ineq', 'fun': constraint2},
{'type': 'ineq', 'fun': constraint3})
# Actually do the minimization
result = scipy.optimize.minimize(objective, (1, 0.5, 0.5),
method='SLSQP',
constraints = cons)
theta_mle = result.x
print('theta MLE: ' + str(theta_mle))
###Output
C:\Users\liude\Anaconda3\envs\bt36\lib\site-packages\ipykernel_launcher.py:16: RuntimeWarning: invalid value encountered in log
app.launch_new_instance()
###Markdown
Now we would like a way to check our estimate. We'll look at two things:1. How fat are the tails of the residuals.2. How normal are the residuals under the Jarque-Bera normality test.We'll do both in our `check_theta_estimate` function.
###Code
def check_theta_estimate(X, theta_estimate):
initial_sigma = np.sqrt(np.mean(X ** 2))
sigma = np.sqrt(compute_squared_sigmas(X, initial_sigma, theta_estimate))
epsilon = X / sigma
print('Tails table')
print(compare_tails_to_normal(epsilon / np.std(epsilon)))
print('')
_, pvalue, _, _ = jarque_bera(epsilon)
print('Jarque-Bera probability normal: ' + str(pvalue))
check_theta_estimate(X, theta_mle)
###Output
Tails table
[[ 1.56333333e-01 2.27777778e-02 8.88888889e-04 0.00000000e+00]
[ 1.58655254e-01 2.27501319e-02 1.34989803e-03 3.16712418e-05]]
Jarque-Bera probability normal: 0.546690037655
###Markdown
GMM for Estimating GARCH(1, 1) ParametersWe've just computed an estimate using MLE, but we can also use Generalized Method of Moments (GMM) to estimate the GARCH(1, 1) parameters.To do this we need to define our moments. We'll use 4.1. The residual $\hat\epsilon_t = x_t / \hat\sigma_t$2. The variance of the residual $\hat\epsilon_t^2$3. The skew moment $\mu_3/\hat\sigma_t^3 = (\hat\epsilon_t - E[\hat\epsilon_t])^3 / \hat\sigma_t^3$4. The kurtosis moment $\mu_4/\hat\sigma_t^4 = (\hat\epsilon_t - E[\hat\epsilon_t])^4 / \hat\sigma_t^4$
###Code
# The n-th standardized moment
# skewness is 3, kurtosis is 4
def standardized_moment(x, mu, sigma, n):
return ((x - mu) ** n) / (sigma ** n)
###Output
_____no_output_____
###Markdown
GMM now has three steps.Start with $W$ as the identity matrix.1. Estimate $\hat\theta_1$ by using numerical optimization to minimize$$\min_{\theta \in \Theta} \left(\frac{1}{T} \sum_{t=1}^T g(x_t, \hat\theta)\right)' W \left(\frac{1}{T}\sum_{t=1}^T g(x_t, \hat\theta)\right)$$2. Recompute $W$ based on the covariances of the estimated $\theta$. (Focus more on parameters with explanatory power)$$\hat W_{i+1} = \left(\frac{1}{T}\sum_{t=1}^T g(x_t, \hat\theta_i)g(x_t, \hat\theta_i)'\right)^{-1}$$3. Repeat until $|\hat\theta_{i+1} - \hat\theta_i| < \epsilon$ or we reach an iteration threshold.Initialize $W$ and $T$ and define the objective function we need to minimize.
###Code
def gmm_objective(X, W, theta):
# Compute the residuals for X and theta
initial_sigma = np.sqrt(np.mean(X ** 2))
sigma = np.sqrt(compute_squared_sigmas(X, initial_sigma, theta))
e = X / sigma
# Compute the mean moments
m1 = np.mean(e)
m2 = np.mean(e ** 2) - 1
m3 = np.mean(standardized_moment(e, np.mean(e), np.std(e), 3))
m4 = np.mean(standardized_moment(e, np.mean(e), np.std(e), 4) - 3)
G = np.matrix([m1, m2, m3, m4]).T
return np.asscalar(G.T * W * G)
def gmm_variance(X, theta):
# Compute the residuals for X and theta
initial_sigma = np.sqrt(np.mean(X ** 2))
sigma = np.sqrt(compute_squared_sigmas(X, initial_sigma, theta))
e = X / sigma
# Compute the squared moments
m1 = e ** 2
m2 = (e ** 2 - 1) ** 2
m3 = standardized_moment(e, np.mean(e), np.std(e), 3) ** 2
m4 = (standardized_moment(e, np.mean(e), np.std(e), 4) - 3) ** 2
# Compute the covariance matrix g * g'
T = len(X)
s = np.ndarray((4, 1))
for t in range(T):
G = np.matrix([m1[t], m2[t], m3[t], m4[t]]).T
s = s + G * G.T
return s / T
###Output
_____no_output_____
###Markdown
Now we're ready to the do the iterated minimization step.
###Code
# Initialize GMM parameters
W = np.identity(4)
gmm_iterations = 10
# First guess
theta_gmm_estimate = theta_mle
# Perform iterated GMM
for i in range(gmm_iterations):
# Estimate new theta
objective = partial(gmm_objective, X, W)
result = scipy.optimize.minimize(objective, theta_gmm_estimate, constraints=cons)
theta_gmm_estimate = result.x
print('Iteration ' + str(i) + ' theta: ' + str(theta_gmm_estimate))
# Recompute W
W = np.linalg.inv(gmm_variance(X, theta_gmm_estimate))
check_theta_estimate(X, theta_gmm_estimate)
###Output
Iteration 0 theta: [ 0.86764093 0.1075364 0.80997948]
Iteration 1 theta: [ 0.86779701 0.10792822 0.81055575]
Iteration 2 theta: [ 0.86779468 0.1079089 0.81053272]
Iteration 3 theta: [ 0.86779468 0.1079089 0.81053272]
Iteration 4 theta: [ 0.86779468 0.1079089 0.81053272]
Iteration 5 theta: [ 0.86779468 0.1079089 0.81053272]
Iteration 6 theta: [ 0.86779468 0.1079089 0.81053272]
Iteration 7 theta: [ 0.86779468 0.1079089 0.81053272]
Iteration 8 theta: [ 0.86779468 0.1079089 0.81053272]
Iteration 9 theta: [ 0.86779468 0.1079089 0.81053272]
Tails table
[[ 1.56333333e-01 2.27777778e-02 8.88888889e-04 0.00000000e+00]
[ 1.58655254e-01 2.27501319e-02 1.34989803e-03 3.16712418e-05]]
Jarque-Bera probability normal: 0.546350208258
###Markdown
Predicting the Future: How to actually use what we've doneNow that we've fitted a model to our observations, we'd like to be able to predict what the future volatility will look like. To do this, we can just simulate more values using our original GARCH dynamics and the estimated parameters.The first thing we'll do is compute an initial $\sigma_t$. We'll compute our squared sigmas and take the last one.
###Code
sigma_hats = np.sqrt(compute_squared_sigmas(X, np.sqrt(np.mean(X**2)), theta_mle))
initial_sigma = sigma_hats[-1]
initial_sigma
###Output
_____no_output_____
###Markdown
Now we'll just sample values walking forward.
###Code
a0_estimate = theta_gmm_estimate[0]
a1_estimate = theta_gmm_estimate[1]
b1_estimate = theta_gmm_estimate[2]
X_forecast, sigma_forecast = simulate_GARCH(100, a0_estimate, a1_estimate, b1_estimate, initial_sigma)
plt.plot(range(-100, 0), X[-100:], 'b-')
plt.plot(range(-100, 0), sigma_hats[-100:], 'r-')
plt.plot(range(0, 100), X_forecast, 'b--')
plt.plot(range(0, 100), sigma_forecast, 'r--')
plt.xlabel('Time')
plt.legend(['X', 'sigma'])
plt.show()
###Output
_____no_output_____
###Markdown
One should note that because we are moving foward using a random walk, this analysis is supposed to give us a sense of the magnitude of sigma and therefore the risk we could face. It is not supposed to accurately model future values of X. In practice you would probably want to use Monte Carlo sampling to generate thousands of future scenarios, and then look at the potential range of outputs. We'll try that now. Keep in mind that this is a fairly simplistic way of doing this analysis, and that better techniques, such as Bayesian cones, exist.
###Code
plt.plot(range(-100, 0), X[-100:], 'b-')
plt.plot(range(-100, 0), sigma_hats[-100:], 'r-')
plt.xlabel('Time')
plt.legend(['X', 'sigma'])
max_X = [-np.inf]
min_X = [np.inf]
for i in range(100):
X_forecast, sigma_forecast = simulate_GARCH(100, a0_estimate, a1_estimate, b1_estimate, initial_sigma)
if max(X_forecast) > max(max_X):
max_X = X_forecast
elif min(X_forecast) < min(max_X):
min_X = X_forecast
plt.plot(range(0, 100), X_forecast, 'b--', alpha=0.05)
plt.plot(range(0, 100), sigma_forecast, 'r--', alpha=0.05)
# Draw the most extreme X values specially
plt.plot(range(0, 100), max_X, 'g--', alpha=1.0)
plt.plot(range(0, 100), min_X, 'g--', alpha=1.0)
plt.show()
###Output
_____no_output_____ |
Theoretic stuff/Simulating from the Null II.ipynb | ###Markdown
Simulating From the Null HypothesisLoad in the data below, and follow the questions to assist with answering the quiz questions below.In the previous lesson on confidence intervals, we saw how we could simulate a sampling distribution for a statistic by bootstrapping our sample data. Alternatively, in hypothesis testing, we could simulate a sampling distribution from the null hypothesis using characteristics that would be true if our data were to have come from the null.**If you get stuck, notice there is a solution notebook available by pushing the orange jupyter icon in the top left!**
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
np.random.seed(42)
full_data = pd.read_csv('coffee_dataset.csv')
sample_data = full_data.sample(200)
###Output
_____no_output_____
###Markdown
`1.` If you were interested in if the average height for coffee drinkers is the same as for non-coffee drinkers, what would the null and alternative be? Place them in the cell below, and use your answer to answer the first quiz question below. H0: avg_height_coff - avg_height_nocoff = 0H1: avg_height_coff - avg_height_nocoff != 0 `2.` If you were interested in if the average height for coffee drinkers is less than non-coffee drinkers, what would the null and alternative be? Place them in the cell below, and use your answer to answer the second quiz question below. H0: avg_height_coff - avg_height_nocoff >= 0H1: avg_height_coff - avg_height_nocoff < 0 `3.` For 10,000 iterations: bootstrap the sample data, calculate the mean height for coffee drinkers and non-coffee drinkers, and calculate the difference in means for each sample. You will want to have three arrays at the end of the iterations - one for each mean and one for the difference in means. Use the results of your sampling distribution, to answer the third quiz question below.
###Code
coff_means, nocoff_means, diff_means = [], [], []
for _ in range(10000):
bootsample = sample_data.sample(200, replace=True)
mean_coff = bootsample[bootsample['drinks_coffee'] == True]['height'].mean()
mean_nocoff = bootsample[bootsample['drinks_coffee'] == False]['height'].mean()
mean_diff = mean_coff - mean_nocoff
coff_means.append(mean_coff)
nocoff_means.append(mean_nocoff)
diff_means.append(mean_diff)
np.std(nocoff_means)
np.std(coff_means)
np.std(diff_means)
plt.hist(nocoff_means, alpha=0.5)
plt.hist(coff_means, alpha=0.5);
plt.hist(diff_means, alpha=0.5);
###Output
_____no_output_____
###Markdown
The shape of each of the sampling distributions comes out to be normal owing to the Central Limit Theorem. `4.` Now, use your observed sampling distribution for the difference in means and [the docs](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.random.normal.html) to simulate what you would expect the sampling distribution to be if the null hypothesis is true. You can do this be recentering your distribution at zero. Also, calculate the observed sample mean difference in `sample_data`. Use your solutions to answer the last questions in the quiz below.
###Code
null_vals = np.random.normal(0, np.std(diff_means), 10000)
plt.hist(null_vals);
###Output
_____no_output_____ |
Lab_environment.ipynb | ###Markdown
Introduction to Google Colab Jupyter notebook basics Code cells
###Code
###Output
_____no_output_____
###Markdown
Text cells
###Code
###Output
_____no_output_____
###Markdown
Access to the shell
###Code
###Output
_____no_output_____
###Markdown
Install Spark
###Code
!apt-get update
!apt-get install openjdk-8-jdk-headless -qq > /dev/null
!wget -q http://archive.apache.org/dist/spark/spark-2.3.1/spark-2.3.1-bin-hadoop2.7.tgz
!tar xf spark-2.3.1-bin-hadoop2.7.tgz
!pip install -q findspark
import os
os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64"
os.environ["SPARK_HOME"] = "/content/spark-2.3.1-bin-hadoop2.7"
!ls
import findspark
findspark.init()
from pyspark import SparkContext
sc = SparkContext.getOrCreate()
sc
import pyspark
from pyspark.sql import SparkSession
spark = SparkSession.builder.getOrCreate()
spark
###Output
_____no_output_____ |
ICERM_workshop/surfinBH.ipynb | ###Markdown
SurfinBH (Surrogate Final BH) demoDescription: Python package for evaluating numerical relativity surrogate remnant black hole models. Webpage: https://github.com/vijayvarma392/surfinBHNotebook author: Vijay Varma, 2020. (Contact: [email protected], [vijayvarma.com](https://vijayvarma.com/))
###Code
import numpy as np
import scipy
import matplotlib.pyplot as P
%matplotlib inline
import warnings
warnings.filterwarnings("ignore") # This is just for the demo, you may want to remove this
import surfinBH
###Output
_____no_output_____
###Markdown
Installation PyPi`pip install surfinBH` Conda`conda install -c conda-forge surfinbh` Source`git clone https://github.com/vijayvarma392/surfinBHcd surfinBHgit submodule initgit submodule updatepython setup.py install` Available models
###Code
print(surfinBH.fits_collection.keys())
fit_name = 'NRSur7dq4Remnant'
print(surfinBH.fits_collection[fit_name].desc)
print(surfinBH.fits_collection[fit_name].refs)
fit_name = 'NRSur3dq8Remnant'
print(surfinBH.fits_collection[fit_name].desc)
print(surfinBH.fits_collection[fit_name].refs)
###Output
Fits for remnant mass, spin and kick veclocity for nonprecessing BBH systems. This model was called surfinBH3dq8 in the paper.
arxiv:1809.09125
###Markdown
Load your favorite model (this only needs to be done once at the start of a script)
###Code
fit = surfinBH.LoadFits('NRSur7dq4Remnant')
###Output
Loaded NRSur7dq4Remnant fit.
###Markdown
Read the documentation
###Code
help(fit)
###Output
Help on Fit7dq4 in module surfinBH._fit_evaluators.fit_7dq4 object:
class Fit7dq4(surfinBH.surfinBH.SurFinBH)
| Fit7dq4(name, load_nrsur=False)
|
| A class for the NRSur7dq4Remnant model presented in Varma et al.,
| arxiv:1905.09300, hereafter referred to as THE PAPER.
|
| This model predicts the final mass mf, final spin vector
| chif and final kick velocity vector vf, for the remnants of precessing
| binary black hole systems. The fits are done using Gaussian Process
| Regression (GPR) and also provide an error estimate along with the fit
| value.
|
| This model has been trained in the parameter space:
| q <= 4, |chiA| <= 0.8, |chiB| <= 0.8
|
| However, it extrapolates reasonably to:
| q <= 6, |chiA| <= 1, |chiB| <= 1
|
| =========================================================================
| Usage:
|
| import surfinBH
|
| # Load the fit
| fit = surfinBH.LoadFits('NRSur7dq4Remnant')
|
| We provide the following call methods:
| # remnant mass and 1-sigma error estimate
| mf, mf_err = fit.mf(q, chiA, chiB, **kwargs)
|
| # remnant spin and 1-sigma error estimate
| chif, chif_err = fit.chif(q, chiA, chiB, **kwargs)
|
| # remnant recoil kick and 1-sigma error estimate (units of c)
| vf, vf_err = fit.vf(q, chiA, chiB, **kwargs)
|
| # All of these together
| mf, chif, vf, mf_err, chif_err, vf_err
| = fit.all(q, chiA, chiB, **kwargs)
|
| The arguments for each of these call methods are as follows:
| Arguments:
| q: Mass ratio (q = mA/mB >= 1)
|
| chiA: Dimensionless spin vector of the heavier black hole at
| reference epoch.
| chiB: Dimensionless spin vector of the lighter black hole at
| reference epoch.
|
| This follows the same convention as LAL, where the spin
| components are defined as:
| \chi_z = \chi \cdot \hat{L}, where L is the orbital angular
| momentum vector at the epoch.
| \chi_x = \chi \cdot \hat{n}, where n = body2 -> body1 is the
| separation vector at the epoch. body1 is the heavier body.
| \chi_y = \chi \cdot \hat{L \cross n}.
| These spin components are frame-independent as they are defined
| using vector inner products. This is equivalent to specifying
| the spins in the coorbital frame at the reference epoch. See
| THE PAPER for a definition of the coorbital frame.
|
|
| Optional arguments:
|
| omega0: Orbital frequency used to set the reference epoch.
| Default: None.
|
| If omega0 is None, the reference epoch is assumed to be at
| t=-100 M from the peak of the waveform, see THE PAPER for
| definition of the peak.
|
| If 'omega0' is given, the reference epoch is take to be the
| time at which the orbital frequency in the coprecessing frame
| equals omega0. omega0 should be in dimensionless units of
| rad/M, where M is the total mass.
|
| See THE PAPER for how the orbital frequency is
| computed as well as the definition of the coprecessing frame.
|
| allow_extrap:
| If False, raises a warning when q > 4.1 or |chiA|,|chiB| > 0.81,
| and raises an error when q > 6.1 or |chiA|,|chiB| > 1.
| If True, allows extrapolation to any q and |chiA|,|chiB| <= 1.
| Use at your own risk.
| Default: False.
|
| Optional PN evolution arguments:
|
| If the omega0 option is used, the spins need to be evolved from omega0
| until t=-100M, where the fits will be evaluated. For the late inspiral
| part, we use the internal spin evolution of NRSur7dq4 (also described
| in THE PAPER), which is very accurate. However, this surrogate is not
| long enough for small values of omega0 as it only has data starting at
| t=-4300M. Therefore, whenever the input omega0 is smaller than
| omega_switch_IG (defined below), we use PN evolution to go from omega0
| to about t=-4300M, beyond which we use NRSur7dq4 for spin evolution.
|
| PN_approximant:
| Approximant used to do the PN spin evolution. Choose from
| 'SpinTaylorT4', 'SpinTaylorT1' or 'SpinTaylorT5'.
| Default: 'SpinTaylorT4'.
|
| PN_dt:
| Dimensionless time step size in units of M, used for the PN
| evolution. You may need to increase this if omega0 is very low.
| Default: 0.1
|
| PN_spin_order:
| Twice the PN order of spin effects. E.g., use 7 for 3.5PN.
| Default: 7
|
| PN_phase_order:
| Twice the PN order in phase. E.g., use 7 for 3.5PN.
| Default: 7
|
| t_sur_switch:
| The dimensionless time (from the peak) at which we switch from PN
| to the surrogate. Should be something larger than -4300.
| Default: -4000.
|
| omega_switch_IG:
| Initial guess for dimensionless orbital frequency, using which the
| switch will be made from PN to NRSur7dq4. This should be large
| enough to work for generic parts of the surrogate parameter space.
| You may need to increase this if the NRSur7dq4 model raises an
| exception like: "Got omega_ref=0.03 < 0.031=omega_0, too small!"
| Default: 0.03
|
| How t_sur_switch and omega_switch_IG work: The PN data is first
| generated starting at omega0, then the PN spins at omega_switch_IG
| are used to generate the NRSur7dq4 dynamics. NRSur7dq4 integrate
| the dynamics both forwards and backwards, so it will have omega and
| spins as a time series starting from -4300M. This is used to pick
| the omega0_sur and spins at t_sur_switch. Then the surrogate
| is reevaluated using omega0_sur and spins at t_sur_switch, thus
| ensuring that the switch always happens at t_sur_switch, even if
| omega_switch_IG corresponds to a later time.
|
| Inertial frame for returned values:
|
| The returned chif/vf are in the LAL inertial frame defined as follows:
| The +ve z-axis is along the orbital angular momentum at the
| reference epoch. The separation vector from the lighter BH to the
| heavier BH at the reference epoch is along the +ve x-axis. The
| y-axis completes the right-handed triad.
|
| Note that the default reference epoch corresponds to t=-100M, but
| if omega0 is given the reference epoch is taken to be the time at
| which the orbital frequency in the coprecessing frame is equal to
| omega0. This agrees with the LAL convention. See LIGO DCC document
| T1800226 for the LAL frame diagram.
|
| Method resolution order:
| Fit7dq4
| surfinBH.surfinBH.SurFinBH
| builtins.object
|
| Methods defined here:
|
| __init__(self, name, load_nrsur=False)
| name: Name of the fit excluding the surfinBH prefix. Ex: 7dq2.
| soft_param_lims: param limits beyond which to raise a warning.
| hard_param_lims: param limits beyond which to raise an error.
| aligned_spin_only: raise an error if given precessing spins.
| See _fit_evaluators.fit_7dq2.py for an example.
|
| ----------------------------------------------------------------------
| Methods inherited from surfinBH.surfinBH.SurFinBH:
|
| all(self, *args, **kwargs)
| Evaluates fit and 1-sigma error estimate for remnant mass, spin
| and kick velocity.
| Returns:
| mf, chif, vf, mf_err_est, chif_err_est, vf_err_est
|
| chif, vf, chif_err_est and vf_err_est are arrays of size 3.
|
| chif(self, *args, **kwargs)
| Evaluates fit and 1-sigma error estimate for remnant spin.
| Returns:
| chif, chif_err_est
|
| chif and chif_err_est are arrays of size 3.
|
| mf(self, *args, **kwargs)
| Evaluates fit and 1-sigma error estimate for remnant mass.
| Returns:
| mf, mf_err_est
|
| vf(self, *args, **kwargs)
| Evaluates fit and 1-sigma error estimate for remnant kick velocity.
| Returns:
| vf, vf_err_est
|
| vf and vf_err_est are arrays of size 3.
|
| ----------------------------------------------------------------------
| Data descriptors inherited from surfinBH.surfinBH.SurFinBH:
|
| __dict__
| dictionary for instance variables (if defined)
|
| __weakref__
| list of weak references to the object (if defined)
###Markdown
Evaluate the fits (using spins at t=-100 M)
###Code
# Note: By default, the reference epoch is taken to be
# at t=-100M from the waveform amplitude peak. The
# returned chif and vf are in coorbital frame at t=-100M.
# Mass ratio and component spins at reference epoch
q = 3.2
chiA = [0.5, 0.05, 0.3]
chiB = [-0.5, -0.05, 0.1]
# remnant mass and 1-sigma error estimate
mf, mf_err = fit.mf(q, chiA, chiB)
print("mf:", mf, "mf_err:", mf_err)
# remnant spin and 1-sigma error estimate
chif, chif_err = fit.chif(q, chiA, chiB)
print("chif:", chif, "chif_err:", chif_err)
# remnant recoil kick and 1-sigma error estimate (units of c)
vf, vf_err = fit.vf(q, chiA, chiB)
print("vf:", vf, "vf_err:", vf_err)
# All of these together
mf, chif, vf, mf_err, chif_err, vf_err = fit.all(q, chiA, chiB)
###Output
mf: 0.9656720610451307 mf_err: 0.00010895992605092175
chif: [0.22590287 0.05296894 0.66365497] chif_err: [0.00058071 0.00057176 0.0004123 ]
vf: [0.00143326 0.00012996 0.00275611] vf_err: [2.83343048e-05 2.66765917e-05 7.22398079e-05]
###Markdown
Evaluate the fits using spins at earlier frequencies
###Code
# Note: If omega0 is given, the reference epoch is taken to be the
# time at which the orbital frequency in the coprecessing frame is
# equal to omega0. The returned chif and vf are in the LAL interial
# frame as described in the documentation.
# Mass ratio and component spins at reference epoch
q = 3.2
chiA = [0.5, 0.05, 0.3]
chiB = [-0.5, -0.05, 0.1]
# Dimensionless orbital frequency at reference epoch (in units of rad/M)
omega0 = 7e-3
# remnant mass and 1-sigma error estimate
mf, mf_err = fit.mf(q, chiA, chiB, omega0=omega0)
print("mf:", mf, "mf_err:", mf_err)
# remnant spin and 1-sigma error estimate
chif, chif_err = fit.chif(q, chiA, chiB, omega0=omega0)
print("chif:", chif, "chif_err:", chif_err)
# remnant recoil kick and 1-sigma error estimate
vf, vf_err = fit.vf(q, chiA, chiB, omega0=omega0)
print("vf:", vf, "vf_err:", vf_err)
# All of these together
mf, chif, vf, mf_err, chif_err, vf_err = fit.all(q, chiA, chiB, omega0=omega0)
###Output
setting __package__ to gwsurrogate.new so relative imports work
__name__ = gwsurrogate.new.spline_evaluation
__package__= gwsurrogate.new
setting __package__ to gwsurrogate.new so relative imports work
setting __package__ to gwsurrogate.new so relative imports work
Loaded NRSur7dq4 model
mf: 0.9653928100919663 mf_err: 8.59577095288164e-05
chif: [0.12747008 0.00808293 0.70329037] chif_err: [0.00051558 0.00054031 0.0004079 ]
vf: [-2.52911402e-04 -1.63829495e-05 -1.87216785e-03] vf_err: [2.50307556e-05 2.18990752e-05 4.76019790e-05]
###Markdown
Exercise 1: How much energy was radiated in GW150914?
###Code
M = 65
q = 1.22
chiA = [0, 0, 0.33]
chiB = [0, 0, -0.44]
mf, mf_err = fit.mf(q, chiA, chiB)
print ("Initial mass = %.2f MSun, final mass = %.2f MSun, energy radiated = %.2f MSun"%(M, mf*M, (1-mf)*M))
###Output
Initial mass = 65.00 MSun, final mass = 61.88 MSun, energy radiated = 3.12 MSun
###Markdown
Exercise 2: What is a typical value for remnant spin magnitude?
###Code
# Remanant spin for and equal-mass nonspinning binary
q = 1
chiA = [0,0,0]
chiB = [0,0,0]
chif, chif_err = fit.chif(q, chiA, chiB)
print("chifmag:", np.linalg.norm(chif))
# Remnant spins for random binaries
chifmag_vec = []
for i in range(300):
q = np.random.uniform(1, 4) # kicks are maximized for equal-mass, maximally spinning systems
chiAmag = np.random.uniform(0, 1)
chiBmag = np.random.uniform(0, 1)
thetaA = np.arccos(np.random.uniform(-1, 1))
thetaB = np.arccos(np.random.uniform(-1, 1))
phiA = np.random.uniform(0, 2*np.pi)
phiB = np.random.uniform(0, 2*np.pi)
chiA = chiAmag*np.array([np.sin(thetaA)*np.cos(phiA), np.sin(thetaA)*np.sin(phiA), np.cos(thetaA)])
chiB = chiBmag*np.array([np.sin(thetaB)*np.cos(phiB), np.sin(thetaB)*np.sin(phiB), np.cos(thetaB)])
chif, chif_err = fit.chif(q, chiA, chiB)
chifmag = np.linalg.norm(chif)
chifmag_vec.append(chifmag)
P.hist(chifmag_vec, bins=20)
P.xlabel('$|\chi_f|$', fontsize=18)
###Output
_____no_output_____
###Markdown
Exercise 3: What is the highest possible kick?
###Code
max_kick = 0
params = None
for i in range(300):
q = 1 # kicks are maximized for equal-mass, maximally spinning systems
chimag = 1
thetaA = np.arccos(np.random.uniform(-1, 1))
thetaB = np.arccos(np.random.uniform(-1, 1))
phiA = np.random.uniform(0, 2*np.pi)
phiB = np.random.uniform(0, 2*np.pi)
chiA = chimag*np.array([np.sin(thetaA)*np.cos(phiA), np.sin(thetaA)*np.sin(phiA), np.cos(thetaA)])
chiB = chimag*np.array([np.sin(thetaB)*np.cos(phiB), np.sin(thetaB)*np.sin(phiB), np.cos(thetaB)])
vf, vf_err = fit.vf(q, chiA, chiB)
vfmag = np.linalg.norm(vf)
if vfmag > max_kick:
max_kick = vfmag
params = [q, chiA, chiB]
print ("Maximum kick found = %.3f c = %.3f km/s"%(max_kick, max_kick*scipy.constants.c/1e3))
print ("For q=%.2f chiA=[%.3f, %.3f, %.3f] chiB=[%.3f, %.3f, %.3f]"%(params[0],
params[1][0], params[1][1], params[1][2], params[2][0], params[2][1], params[2][2]))
###Output
Maximum kick found = 0.013 c = 4017.618 km/s
For q=1.00 chiA=[0.080, 0.362, 0.929] chiB=[0.017, -0.584, 0.811]
|
notebooks/vvl/plot_vvl_test.ipynb | ###Markdown
Plot results from VVL test with SSH = 0These results were generated with teh following setup:- Salmon bank spill at -122.86 48.38- currents west above 2m and north below 2m- constant wave period (5), wave height (0.8), wcc (0.001), StokesU = StokesV = 0- currents $PROJECT/rmueller/MIDOSS/forcing/vvl_testing/currents_west_above2_north_below2.hdf5- /scratch/dlatorne/MIDOSS/forcing/vvl_test/e3t.hdf5Both e3t.hdf5 and t.hdf5 were re-created for this run to try and troubleshoot the "land point" error.
###Code
# imports for graphic display
import matplotlib.pyplot as plt
from matplotlib import gridspec
import matplotlib.colors as colors
import cmocean as cm
from mpl_toolkits.axes_grid1 import make_axes_locatable
%matplotlib inline
# imports for data manipulation
import numpy as np
import xarray as xr
from salishsea_tools import viz_tools, utilities
import numpy.ma as ma
import datetime
# basic computations
from scipy import stats
###Output
_____no_output_____
###Markdown
Load data - Results on Smelt using SSH from /scratch/dlatorne/MIDOSS/forcing/vvl_test/t.hdf5- Transferred from Graham at /home/rmueller/project/rmueller/MIDOSS/results/testvvl_10oct19b
###Code
#vvl0 = xr.open_dataset('/home/rmueller/Projects/MIDOSS/results/vvl/testvvl_10oct19b/Lagrangian_AKNS_crude_vvl_test_de0ae8ae112b_ssh0.nc')
###Output
_____no_output_____
###Markdown
- Plotted on Shuga using SSH defined as constant = 0 in Hydrodynamic.dat- Results transferred from Graham at /home/rmueller/project/rmueller/MIDOSS/results/testvvl_10oct19_full
###Code
vvl0 = xr.open_dataset('/Users/rmueller/Projects/MIDOSS/results/vvl/Lagrangian_AKNS_crude_vvl_test_de0ae8ae112b_ssh0.nc')
vvl0
###Output
_____no_output_____
###Markdown
Extract surface concentrations and calculate depth-integrated values
###Code
vvl0_conc_surf = vvl0.OilConcentration_3D[:,39,:,:]
vvl0_conc_surf
# loop through time values and sum all bottom levels
time = vvl0.time.values
nx = np.arange(vvl0.grid_x.size)
ny = np.arange(vvl0.grid_y.size)
vvl0_conc_depth = np.zeros((168,896,396), dtype=float)
# loop through time and sum concentrations over depth
for t in range(time.size):
vvl0_conc_instant= vvl0.OilConcentration_3D[t,:,:,:]
vvl0_conc_depth[t,:,:] = vvl0_conc_instant.sum(dim='grid_z')
###Output
_____no_output_____
###Markdown
Sum values over time to show complete spread over time
###Code
# calculate integrated values over time
vvl0_conc_surf_t = vvl0_conc_surf.sum(dim='time')
vvl0_conc_t = vvl0_conc_depth.sum(axis=0)
vvl0_conc_t.shape
###Output
_____no_output_____
###Markdown
Plot up results
###Code
fig = plt.figure(figsize=(8*1.4,10*1.4))
ax1 = fig.add_subplot(111)
# convert xarray into numpy using ".values" in order to gain access to different visualization tools
mappable = ax1.pcolormesh(vvl0_conc_surf_t.values, vmin = 0, vmax = 100, cmap = cm.cm.balance)
fig = plt.figure(figsize=(8*1.4,10*1.4))
ax1 = fig.add_subplot(111)
# convert xarray into numpy using ".values" in order to gain access to different visualization tools
mappable = ax1.pcolormesh(vvl0_conc_surf_t.values, vmin = 0, vmax = 100, cmap = cm.cm.balance)
# add land mask to ax1 and ax2
viz_tools.plot_land_mask(ax1,'/home/rmueller/Projects/MIDOSS/MIDOSS-MOHID-grid/AfterNEMOBathy201702.nc', color = 'burlywood')
fig = plt.figure(figsize=(8*1.4,10*1.4))
ax1 = fig.add_subplot(111)
# convert xarray into numpy using ".values" in order to gain access to different visualization tools
mappable = ax1.pcolormesh(vvl0_conc_t, cmap = cm.cm.balance)
ax1.set_title('3D concentrations added over depth and then time', fontsize=20)
fig = plt.figure(figsize=(8*1.4,10*1.4))
ax1 = fig.add_subplot(111)
mappable = ax1.pcolormesh(vvl0_conc_t, cmap = cm.cm.balance)
# add land mask to ax1 and ax2
viz_tools.plot_land_mask(ax1,'/home/rmueller/Projects/MIDOSS/MIDOSS-MOHID-grid/AfterNEMOBathy201702.nc', color = 'burlywood')
ax1.set_title('3D concentrations added over depth and then time', fontsize=20)
###Output
_____no_output_____ |
pandas/7_sorting.ipynb | ###Markdown
Sort dataframes
###Code
import numpy as np
import pandas as pd
# Restricting number of displaying rows, just for convenience
pd.set_option('max_rows', 8)
###Output
_____no_output_____
###Markdown
Load data
###Code
films = pd.read_csv('data/movie.csv')
###Output
_____no_output_____
###Markdown
Sorting `sort_values()` method let us sort rows in dataframe by several variables. Imagine you want to sort films by score on IMDB starting from most popular and by budget with the cheapest ones at the top. Just pass lists of these columns and order in a method`sort_values()` has following parameters:* `by` - column name or list of column names* `ascending` - boolean or list of them representing order of sorting: False - from highest to lowest and True - from lowest to highest* `inplace` - make sorting inplace or not* `kind` - algorithm of sorting, default quicksort, for multiple columns in `by` default is mergesort* `na_position` - 'first' or 'last' - where to put NA, default is 'last'
###Code
# Sort rows by IMDB score first and then by budget in each group of rows formed by previous
# sort (e.g. having same score)
(films.sort_values(['imdb_score', 'budget'], ascending=[False, True])
.loc[:, ['movie_title', 'imdb_score', 'budget']])
###Output
_____no_output_____
###Markdown
Edge values selecting There are 2 quite convenient methods for selecting rows with top or bottom values Both `nlargest()` and `nsmallest()` have these parameters:* `n` - number of selecting rows* `columns` - column name or list of them by which sorting will be performed* `keep` - which row to choose in case of duplicates - first (default) or last
###Code
# Take 100 films with largest budget
films.nlargest(100, 'budget').loc[:, 'movie_title':'imdb_score']
# Take 100 films with lowest budget and arrange them according to IMDB score
films.nsmallest(100, ['budget', 'imdb_score']).loc[:, 'movie_title':'imdb_score']
# Take 100 films with highest budget and after it get 5 films from this subset with lowest IMDB score
films.nlargest(100, 'budget').nsmallest(5, 'imdb_score').loc[:, ['movie_title', 'budget', 'imdb_score']]
###Output
_____no_output_____ |
_ipynb/airbnb_dataExtraction.ipynb | ###Markdown
IntroductionAirbnb has thousands of places available for renting all over the world with lots of options and price ranges available for travelers. A regular airbnb listing will have a main picture, a title, and a price rate. Guests can also leave a review after their stay, so then others can read and get a better idea of what to expect from this place.So there is a variaty of data that can be collected, now the question would be; Is there a hidden data treasure worth millions somewhere in this data? Probably not, but it's worth to explore it for the sake of learning. (.. and for passing my college course)Without further ado, this project has the following:1. Data extraction: Airbnb non-official API. [airbnb01](posts/airbnb01/)2. Image recognition and Sentiment Analysis. [airbnb02](posts/airbnb02/)3. Data exploration and Dashboard design. [airbnb03](posts/airbnb03/) Data ExtractionFirst it is important to mention there is no official release of an API from Airbnb. However it is still possible to get JSON responses from the URL and this unofficial API has all that very well documented [here](https://stevesie.com/apps/airbnb-api).So using the "unofficial airbnb API" we can fetch the data and store it into a .csv file. One of the fields has the URL to access the main picture of each listing, that is what will later be used in our image recognition analysis. (Spoiler Alert: Using Azure cognitive services.)
###Code
# unofficial Airbnb API: https://stevesie.com/apps/airbnb-api
import airbnb
import pandas as pd
api = airbnb.Api(randomize=True)
# Pagianation for the API calls
page = 0
# Initialize a dictionary and data frame to store listings details.
place_dtl = {}
columns = ['id','city','neighborhood','name','lat','lng','person_capacity','space_type','picture_url',
'price_rate']
airbnb_df = pd.DataFrame(columns=columns)
# Exploring data to build the schema for the dataframe
# data = api.get_homes('Toronto, ON', items_per_grid=50, offset=500)
# len(data['explore_tabs'][0]['sections'][0]['listings'])
###Output
_____no_output_____
###Markdown
Listing details extractionNothing too fancy, just storing details in the dataframe and later exporting it to **airbnb_listing.csv**.
###Code
while page <= 300:
data = api.get_homes('Toronto, ON', items_per_grid=50, offset=page)
for h in data['explore_tabs'][0]['sections'][0]['listings']:
try:
place_dtl['id'] = h['listing']['id']
place_dtl['city'] = h['listing']['city']
place_dtl['neighborhood'] = h['listing']['neighborhood']
place_dtl['name'] = h['listing']['name']
place_dtl['lat'] = h['listing']['lat']
place_dtl['lng'] = h['listing']['lng']
place_dtl['person_capacity'] = h['listing']['person_capacity']
place_dtl['space_type'] = h['listing']['space_type']
place_dtl['price_rate'] = h['pricing_quote']['rate']['amount']
place_dtl['picture_url'] = h['listing']['picture_url']
airbnb_df.loc[len(airbnb_df)] = place_dtl
except:
continue
print(f"last id from page: {page} : {place_dtl['id']}")
page += 50
airbnb_df.to_csv ('airbnb_listing.csv', index = None, header=True)
###Output
last id from page: 0 : 40204854
last id from page: 50 : 25719877
last id from page: 100 : 17682704
last id from page: 150 : 29945949
last id from page: 200 : 12836500
last id from page: 250 : 34899472
last id from page: 300 : 43948056
###Markdown
Reviews details extractionA similar process but now for the reviews.
###Code
comment_dtl = {}
r_columns = ['listing_id','author','rating','comments']
reviews_df = pd.DataFrame(columns=r_columns)
# I will read the listing
listing = pd.read_csv('airbnb_listing.csv')
for i in listing['id']:
reviews = api.get_reviews(i, limit=10)
for r in reviews['reviews']:
comment_dtl['listing_id'] = i
comment_dtl['author'] = r['author']['smart_name']
comment_dtl['rating'] = r['rating']
comment_dtl['comments'] = r['comments']
reviews_df.loc[len(reviews_df)] = comment_dtl
reviews_df.to_csv ('airbnb_reviews.csv', index=None, header=True)
###Output
_____no_output_____
###Markdown
The datasetsFinally we will check what we got.
###Code
listing = pd.read_csv('airbnb_listing.csv')
reviews = pd.read_csv('airbnb_reviews.csv')
###Output
_____no_output_____
###Markdown
Listing dataset: airbnb_listing.csvThese are the first 3 rows for the listing dataset:
###Code
listing.iloc[:, 0:9].head(3)
###Output
_____no_output_____
###Markdown
Reviews dataset: airbnb_reviews.csv We have collected a maximum of 5 reviews per listing.
###Code
reviews.loc[reviews['id'] == 28103946]
###Output
_____no_output_____ |
Lectures/Lecture_0_paths_betweenness_and_centrality_metrics.ipynb | ###Markdown
Lecture 0: Paths, betweenness and centrality metrics
###Code
import numpy as np
###Output
_____no_output_____
###Markdown
The centrality of the Medici's FamilyOne of the main results regarding the Florentine Families network is the fact that the Medici family were extremely central. Let's load again our edge list and make some calculation... Load the data Load data
###Code
ff_am=np.genfromtxt('../Data/florentine_families.dat', skip_header=41, dtype='i8')[:16]
ff_fam=np.genfromtxt('../Data/florentine_families.dat', skip_header=4, skip_footer=53, dtype='U50')
aux=ff_am[ff_fam!='PUCCI'].T
ff_am=aux[ff_fam!='PUCCI']
ff_am.shape
np.sum(ff_am, axis=0)
np.all(ff_am.T==ff_am)
ff_fam=ff_fam[ff_fam!='PUCCI']
l_ff=len(ff_fam)
l_ff
###Output
_____no_output_____
###Markdown
Convert it to adjacency list
###Code
np.where(ff_am[0]==1)[0]
ff_adl=[]
for i_ffam, f in enumerate(ff_am):
ff_adl.append(np.where(f==1)[0])
ff_adl
###Output
_____no_output_____
###Markdown
Breadth-first algorithm for distances
###Code
source=np.random.randint(l_ff)
ff_fam[source]
distance=-1*np.ones(l_ff, dtype='i8')
# negatives values are easy to find and for sure are a wrongly defined distance
d=0
distance[source]=d
print(ff_fam[source])
# get the nearest neighbours
nn=ff_adl[source]
nn
ff_fam[nn]
d+=1
# add 1 to the distance and assign the new distance to the nn
distance[nn]=d
distance
d+=1
for i_n, n in enumerate(nn):
aux=ff_adl[n]
aux=aux[distance[aux]==-1]
# get the nn of the nn for which we do not have already calculated the distance
distance[aux]=d
if i_n==0:
new_nn=aux
else:
new_nn=np.concatenate((new_nn, aux))
distance
np.vstack((ff_fam, distance)).T
nn=new_nn
d+=1
for i_n, n in enumerate(nn):
aux=ff_adl[n]
aux=aux[distance[aux]==-1]
distance[aux]=d
if i_n==0:
new_nn=aux
else:
new_nn=np.concatenate((new_nn, aux))
np.vstack((ff_fam, distance)).T
###Output
_____no_output_____
###Markdown
... and so on... Exercise: build your own function calculating the distances from a given node (input of the function) Suppose we are starting with the node "source".1. assign distance=0 to source (since it is at distance 0 from itself);2. for any vertex $i$ whose distance is $d$, look for its neighbors. For any $j$ in the neighbors: a. if $d[j]$ has not been assigned yet, then $d[j]=d[i]+1$.
###Code
def bf_dist(source, adl):
_adl=adl
ll=len(_adl)
distance=-1*np.ones(ll, dtype='i8')
d=0
distance[source]=d
_source=np.array([source])
while len(np.where(distance==-1)[0])>0:
d+=1
for i_n, n in enumerate(_source):
aux=_adl[n]
aux=aux[distance[aux]==-1]
distance[aux]=d
if i_n==0:
new_source=aux
else:
new_source=np.concatenate((new_source, aux))
_source=new_source
return distance
%timeit bf_dist(source, ff_adl)
###Output
172 µs ± 2.62 µs per loop (mean ± std. dev. of 7 runs, 10000 loops each)
###Markdown
M.E.J. Newman solution An effective implementation of this algorithm, as proposed by M.E.J. Newman in his book, can be obtained by using a queue, i.e. a _first-in/first-out_ buffer. Let us start filling the queue with the source node. We are going to use two pointers, one pointing at the beginning (the reading pointer), one at the end (the writing one). It works as follows:1. place the label of the source vertex in the first element of the queue and set the pointer "read_p" to it. 2. place the writing pointer "write_p" in the position following "read_p";3. define a distance array;4. set the distance array entry for the source to zero. 5. if read_p==write_p, the algorithm is finished. Otherwise, read the element pointed by "read_p" (*new source*) and increase the value of "read_p" by one;6. set d (auxiliary variable) equal to the value of the distance of the *new source*;7. for each of the neighbours of *new source*, check if it has been already assigned a value in the distance array. If not: a.) set its distance from the source (in the distance array) to d+1; b.) add it to the queue in the position "write_p"; c.) add 1 to the value of "write_p";8. repeat from 5.
###Code
def MEJ_bf_dist(source, adl):
_adl=adl
ll=len(_adl)
_distance=-1*np.ones(ll, dtype='i8')
d=0
_distance[source]=d
queue=np.zeros(ll, dtype='i8')
read_p=0
write_p=1
queue[read_p]=source
l_nn=ll
while read_p!=write_p:
_source=queue[read_p]
_d=_distance[_source]
_nn=_adl[_source]
_nn=_nn[_distance[_nn]==-1]
_distance[_nn]=_d+1
l_nn=len(_nn)
queue[write_p:write_p+l_nn]=_nn
write_p=write_p+l_nn
read_p+=1
return _distance
MEJ_bf_dist(source, ff_adl)==bf_dist(source, ff_adl)
np.all(MEJ_bf_dist(source, ff_adl)==bf_dist(source, ff_adl))
%timeit bf_dist(source, ff_adl)
%timeit MEJ_bf_dist(source, ff_adl)
###Output
133 µs ± 5.38 µs per loop (mean ± std. dev. of 7 runs, 10000 loops each)
###Markdown
Actually, we can complicate the routine in order to calculate the number of shortest paths connecting the source with any other node in the network Suppose we are starting with the node "source".1. assign distance=0 to source (since it is at distance 0 from itself);2. assign weight=1 to source (it will be clear what is the sense of those weights);3. for any vertex $i$ whose distance is $d$, look for its neighbors. For any $j$ in the neighbors: a. if $d[j]$ has not been assigned yet, then $d[j]=d[i]+1$ and $w[j]=w[i]$; b. if $d[j]$ has been already assigned AND $d[j]=d[i]+1$, then $w[j]+=w[i]$;
###Code
def MEJ_bf_dist_w(source, adl):
_adl=adl
ll=len(_adl)
_distance=-1*np.ones(ll, dtype='i8')
_weights=np.zeros(ll, dtype='i8')
d=0
_distance[source]=d
_weights[source]=1
queue=np.zeros(ll, dtype='i8')
read_p=0
write_p=1
queue[read_p]=source
l_nn=ll
while read_p!=write_p:
_source=queue[read_p]
_d=_distance[_source]
_w=_weights[_source]
_nn=_adl[_source]
# using numpy
new_nn=_nn[_distance[_nn]==-1]
old_nn=_nn[_distance[_nn]==_d+1]
_distance[new_nn]=_d+1
_weights[new_nn]=_w
_weights[old_nn]+=_w
l_nn=len(new_nn)
queue[write_p:write_p+l_nn]=new_nn
write_p=write_p+l_nn
# still the same from here
read_p+=1
return _distance, _weights
dist, wei=MEJ_bf_dist_w(source, ff_adl)
def MEJ_bf_dist_w_2(source, adl):
_adl=adl
ll=len(_adl)
_distance=-1*np.ones(ll, dtype='i8')
_weights=np.zeros(ll, dtype='i8')
d=0
_distance[source]=d
_weights[source]=1
queue=np.zeros(ll, dtype='i8')
read_c=0
write_c=1
queue[read_c]=source
l_nn=ll
while read_c!=write_c:
_source=queue[read_c]
_d=_distance[_source]
_w=_weights[_source]
_nn=_adl[_source]
# using python
for n in _nn:
if _distance[n]==-1:
_distance[n]=_d+1
_weights[n]=_w
queue[write_c]=n
write_c+=1
elif _distance[n]==_d+1:
_weights[n]+=_w
# still the same from here
read_c+=1
return _distance, _weights
np.all(MEJ_bf_dist_w(source, ff_adl)[0]==MEJ_bf_dist_w_2(source, ff_adl)[0])
np.all(MEJ_bf_dist_w(source, ff_adl)[1]==MEJ_bf_dist_w_2(source, ff_adl)[1])
%timeit MEJ_bf_dist_w_2(source, ff_adl)
%timeit MEJ_bf_dist_w(source, ff_adl)
np.vstack((ff_fam, dist)).T
np.vstack((ff_fam, wei)).T
###Output
_____no_output_____
###Markdown
 The node betweenness
###Code
# create a dictionary of dictionary (slightly more efficient than a matrix)
ddd={}
www={}
for fff in range(l_ff):
cacca=MEJ_bf_dist_w_2(fff, ff_adl)
ddd[fff]=cacca[0]
www[fff]=cacca[1]
nb=np.zeros(l_ff)
for fff in range(l_ff):
dist_f=ddd[fff]
weig_f=www[fff]
# these are the distance and number of shortests path dictionaries for the node fff
for source in range(l_ff-1):
if source!=fff:
dist_s=ddd[source]
weig_s=www[source]
#for any other node in the network, define the same dictionaries.
# the source is going to be one of the endpoints of the shortest path
# for the calculation of the betweenness of fff
for target in range(source+1, l_ff):
# this is needed in order to avoid considering twice the same couple
if target!=fff:
if dist_s[fff]+dist_f[target]==dist_s[target]:
# fff is a node in the shortest path between source and target
sp_through_fff=weig_f[source]*weig_f[target]
nb[fff]+=sp_through_fff/weig_s[target]
###Output
_____no_output_____
###Markdown

###Code
def node_betweenness(_adl, _ll):
# create a dictionary of dictionary (slightly more efficient than a matrix)
ddd={}
www={}
for fff in range(_ll):
cacca=MEJ_bf_dist_w_2(fff, _adl)
ddd[fff]=cacca[0]
www[fff]=cacca[1]
nb=np.zeros(_ll)
for fff in range(_ll):
dist_f=ddd[fff]
weig_f=www[fff]
for source in range(_ll-1):
if source!=fff:
dist_s=ddd[source]
weig_s=www[source]
for target in range(source+1, _ll):
if target!=fff:
if dist_s[fff]+dist_f[target]==dist_s[target]:
#print(ff_fam[source], ff_fam[fff], ff_fam[target], weig_f[source]*weig_f[target]/weig_s[target],weig_f[source],weig_f[target],weig_s[target])
nb[fff]+=weig_f[source]*weig_f[target]/weig_s[target]
return nb
np.all(node_betweenness(ff_adl, l_ff)==nb)
%timeit node_betweenness(ff_adl, l_ff)
###Output
2.96 ms ± 630 ns per loop (mean ± std. dev. of 7 runs, 100 loops each)
###Markdown
The wheel Actually, NetworkX already calculates it efficiently, aka do not reinvent the wheel!
###Code
import networkx as nx
where_0=np.where(ff_am==1)
who=np.vstack((where_0[0], where_0[1])).T
edges=[]
for w in who:
if w[0]<w[1]:
edges.append((ff_fam[w[0]],ff_fam[w[1]]))
G=nx.Graph()
G.add_edges_from(edges)
bet=nx.betweenness_centrality(G, normalized=False)
bet
[abs(bet[ff_fam[i]]-nb[i]) for i in range(l_ff)]
%timeit nx.betweenness_centrality(G, normalized=False)
%timeit node_betweenness(ff_adl, l_ff)
###Output
3 ms ± 1.13 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
###Markdown
Losing by something more than a factor of 2 is not that bad... Something more from NetworkX for plots
###Code
import matplotlib
from matplotlib import cm
import matplotlib.pyplot as plt
%matplotlib inline
bc_norm=nx.betweenness_centrality(G, normalized=True)
bc_norm
###Output
_____no_output_____
###Markdown
Let us color the nodes according to their node betweenness! As you remember there were the official colors...... and even the color maps...... of different types! [Click here for the whole list](https://matplotlib.org/3.1.1/gallery/color/colormap_reference.html)
###Code
def red_me(color):
return cm.get_cmap('YlOrRd')(color)
def plasma_me(color):
return cm.get_cmap('plasma')(color)
pos=nx.kamada_kawai_layout(G)
plt.figure(figsize=(12, 8))
plt.title('Betwenness centrality', fontsize=20)
nx.draw_networkx_edges(G, pos)
rgb=[red_me(bc_norm[n]) for n in list(G.nodes)]
nx.draw_networkx_nodes(G, pos, list(G.nodes), node_size = 200, node_color =rgb)
nx.draw_networkx_labels(G, pos)
plt.show()
###Output
_____no_output_____
###Markdown
Actually, other centrality measures are present...
###Code
ec_norm=nx.eigenvector_centrality(G)
plt.figure(figsize=(12, 8))
plt.title('Eigenvector centrality', fontsize=20)
nx.draw_networkx_edges(G, pos)
rgb=[red_me(ec_norm[n]) for n in list(G.nodes)]
nx.draw_networkx_nodes(G, pos, list(G.nodes), node_size = 200, node_color =rgb)
nx.draw_networkx_labels(G, pos)
plt.show()
dc_norm=nx.degree_centrality(G)
plt.figure(figsize=(12, 8))
plt.title('Degree centrality', fontsize=20)
nx.draw_networkx_edges(G, pos)
rgb=[red_me(dc_norm[n]) for n in list(G.nodes)]
nx.draw_networkx_nodes(G, pos, list(G.nodes), node_size = 200, node_color =rgb)
nx.draw_networkx_labels(G, pos)
plt.show()
cc_norm=nx.closeness_centrality(G)
plt.figure(figsize=(12, 8))
plt.title('Closeness centrality', fontsize=20)
nx.draw_networkx_edges(G, pos)
rgb=[red_me(cc_norm[n]) for n in list(G.nodes)]
nx.draw_networkx_nodes(G, pos, list(G.nodes), node_size = 200, node_color =rgb)
nx.draw_networkx_labels(G, pos)
plt.show()
###Output
_____no_output_____
###Markdown
Actually, it is a mess to see all the plots, one by one...
###Code
titles=['Degree centrality', 'Eigenvector centrality', 'Betwenness centrality', 'Closeness centrality']
def centralities(G, n):
if n==0:
return nx.degree_centrality(G)
elif n==1:
return nx.eigenvector_centrality(G)
elif n==2:
return nx.betweenness_centrality(G, normalized=True)
else:
return nx.closeness_centrality(G)
pos=nx.kamada_kawai_layout(G)
#plt.figure(figsize=(10, 10))
fig, ax_lst = plt.subplots(2, 2, figsize=(15,10))
fig.suptitle('Centrality measures', fontsize=20)
# cancel the frame
for t in enumerate(titles):
x=t[0]%2
y=t[0]//2
ax_lst[x,y].set_title(t[1], fontsize=14)
nx.draw_networkx_edges(G, pos, ax=ax_lst[x, y])
centrality=centralities(G, t[0])
rgb=[red_me(centrality[n]) for n in list(G.nodes)]
nx.draw_networkx_nodes(G, pos, list(G.nodes), node_size = 200, node_color =rgb, ax=ax_lst[x, y])
nx.draw_networkx_labels(G, pos, ax=ax_lst[x, y])
ax_lst[x, y].axis('off')
# cancel the frame
plt.show()
pos=nx.kamada_kawai_layout(G)
#plt.figure(figsize=(10, 10))
fig, ax_lst = plt.subplots(2, 2, figsize=(15,10))
fig.suptitle('Centrality measures', fontsize=20)
# cancel the frame
for t in enumerate(titles):
x=t[0]%2
y=t[0]//2
ax_lst[x,y].set_title(t[1], fontsize=14)
nx.draw_networkx_edges(G, pos, ax=ax_lst[x, y])
centrality=centralities(G, t[0])
rgb=[plasma_me(centrality[n]) for n in list(G.nodes)]
nx.draw_networkx_nodes(G, pos, list(G.nodes), node_size = 200, node_color =rgb, ax=ax_lst[x, y])
nx.draw_networkx_labels(G, pos, ax=ax_lst[x, y])
ax_lst[x, y].axis('off')
# cancel the frame
plt.show()
###Output
_____no_output_____ |
.ipynb_checkpoints/ModuleBreakdown-checkpoint.ipynb | ###Markdown
Tutorial to generate predictions while setting interaction parameters.
###Code
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
import pickle
import seaborn as sb
from predict_by_model import *
from GenerateLambdas import *
###Output
_____no_output_____
###Markdown
First, this tutorial will break down how the interaction parameters are generated in the main function of the module. we will predict c. diff engraftment on mice for which we have 16s community data. Community data is given as a .csv file called *Cdif_mice_high_vs_low_risk.species.tsv* whose columns correspond to mice and rows correspond to taxa, and entries corresponding to read counts.To do this, we will use the predictions of pairwise metabolic modeling with resource allocation constraints as "observed" pairwise experiments. These "observations" can be found in *Pairwise_Chemostat.xlsx*.For this data, we require a function to parse the taxa labels into something that matches our pairwise equilibrium data.
###Code
def GetStrn(strg):
s1 = strg.split(";")[-1]
s2 = "_".join(s1.split("__")[1:])
return s2
###Output
_____no_output_____
###Markdown
We're going to load and parse some communities (this tutorial will only use one of them.
###Code
miceData = pd.read_csv("Cdiff_mice_high_vs_low_risk.species.tsv",sep = '\t')
Experiments = miceData.columns[np.where(['WK' in colnm for colnm in miceData.columns])]
species = miceData.species.apply(GetStrn)
###Output
_____no_output_____
###Markdown
To generate interaction parameters from the bottom up, we must provide a set of "observed" equilibrium relative abundances from pairwise growth experiments. This file should have rows \& columns corresponding to taxa, and include all taxa present in the communities, if possible. The module will attempt to match the taxa names from the community data file (miceData) to these labels, and will return the number found and the proportion of reads covered by those found.
###Code
ObservationFile = "Pairwise_Chemostat.xlsx" #This should be an excel file with a sheet labeled "Relative_Abundance"
###Output
_____no_output_____
###Markdown
We have a set of experiments, we'll only use one for this notebook:
###Code
Experiments
Exp = "H1_4WK"
###Output
_____no_output_____
###Markdown
We first trim the community by read percentage. This will give us our initial species list read proportions.
###Code
by_proportion = miceData[Exp]/sum(miceData[Exp])
spec_list = list(species[by_proportion>0.001])
by_proportion.index = species
experiment = Experiment()
experiment.Community = spec_list
experiment.Invader = None
###Output
_____no_output_____
###Markdown
Next, we generate the interaction parameters using the pairwise equilibrium:
###Code
LambdaMat,foundList = GenerateLambdasFromExcel(experiment,version = "Equilibrium",File = ObservationFile)
###Output
Bacteroides_salanitronis not found
Barnesiella_viscericola not found
Prevotella_buccalis not found
Prevotella_sp._109 not found
Alistipes_obesi not found
Clostridia_bacterium_UC5.1-1D1 not found
[Ruminococcus]_torques not found
[Clostridium]_celerecrescens not found
Erysipelatoclostridium_ramosum not found
###Markdown
Now, we have a matrix of interactions:
###Code
LambdaMat
###Output
_____no_output_____
###Markdown
We can explore any community by making a similar pandas dataframe, with column/row labels corresponding to the names of the community members. We next simulate the community to equilibrium (or a time cut off)
###Code
CommunityEquilibrium,fullSim = predict_justComm(LambdaMat,verb = False)
fig,ax = plt.subplots(figsize = (10,10))
keys = list(CommunityEquilibrium.keys())
# get values in the same order as keys, and parse percentage values
vals = [float(CommunityEquilibrium[k]) for k in keys]
sb.barplot(x=keys, y=vals, ax = ax)
ax.set_xticklabels(keys, rotation = 90)
###Output
_____no_output_____ |
notebooks/01-4-1-distill-to-lstm.ipynb | ###Markdown
Reference: * https://github.com/huggingface/nlp/blob/master/notebooks/Overview.ipynb
###Code
class SST2Dataset(torch.utils.data.Dataset):
def __init__(self, entries_dict):
super().__init__()
self.entries_dict = entries_dict
def __len__(self):
return len(self.entries_dict["label"])
def __getitem__(self, idx):
return (
self.entries_dict["input_ids"][idx],
self.entries_dict["attention_mask"][idx].sum(), #input_lengths
{
"label": self.entries_dict["label"][idx],
"logits": self.entries_dict["logits"][idx]
}
)
train_dict, valid_dict, test_dict = torch.load(str(CACHE_DIR / "distill-dicts.jbl"))
# Instantiate a PyTorch Dataloader around our dataset
train_loader = torch.utils.data.DataLoader(SST2Dataset(train_dict), batch_size=64, shuffle=True)
valid_loader = torch.utils.data.DataLoader(SST2Dataset(valid_dict), batch_size=64, drop_last=False)
test_loader = torch.utils.data.DataLoader(SST2Dataset(test_dict), batch_size=64, drop_last=False)
ALPHA = 0
DISTILL_OBJECTIVE = torch.nn.MSELoss()
def distill_loss(logits, targets):
distill_part = DISTILL_OBJECTIVE(
logits.reshape(-1), targets["logits"].reshape(-1)
) / 2
classification_part = F.cross_entropy(
logits, targets["label"]
)
return ALPHA * classification_part + (1-ALPHA) * distill_part
bert_model = BertForSequenceClassification.from_pretrained(str(CACHE_DIR / "sst2_bert_uncased")).cpu()
bert_model.bert.embeddings.word_embeddings.weight.shape
# Note: apex does not support weight dropping
model = get_sequence_model(
voc_size=bert_model.bert.embeddings.word_embeddings.weight.shape[0],
emb_size=bert_model.bert.embeddings.word_embeddings.weight.shape[1],
pad_idx = 0,
dropoute = 0,
rnn_hid = 768,
rnn_layers = 3,
bidir = True,
dropouth = 0.25,
dropouti = 0.25,
wdrop = 0,
unit_type = "lstm",
fcn_layers = [512, 2],
fcn_dropouts = [0.25, 0.25],
use_attention = True
)
model
# Copy the embedding weights to the LSTM model
try:
model.embeddings.encoder.emb.weight.data = bert_model.bert.embeddings.word_embeddings.weight.data
except:
model.embeddings.encoder.weight.data = bert_model.bert.embeddings.word_embeddings.weight.data
# Freeze the embedding layer
for param in model.embeddings.encoder.parameters():
param.requires_grad = False
model = model.cuda()
# Use only leaf tensors
parameters = [x for x in model.parameters() if x.is_leaf and x.requires_grad]
del bert_model
optimizer = torch.optim.Adam(parameters, lr=1e-3, betas=(0.8, 0.99))
# optimizer = torch.optim.RMSprop(parameters, lr=0.01)
if APEX_AVAILABLE:
model, optimizer = amp.initialize(
model, optimizer, opt_level="O1"
)
class TransposeCallback(Callback):
def on_batch_inputs(self, bot, input_tensors, targets):
input_tensors = [input_tensors[0].transpose(1, 0), input_tensors[1]]
return input_tensors, targets
class DistillTop1Accuracy(Top1Accuracy):
def __call__(self, truth, pred):
truth = truth["label"]
return super().__call__(truth, pred)
total_steps = len(train_loader) * 10
checkpoints = CheckpointCallback(
keep_n_checkpoints=1,
checkpoint_dir=CACHE_DIR / "distill_model_cache/",
monitor_metric="loss"
)
lr_durations = [
int(total_steps*0.2),
int(np.ceil(total_steps*0.8))
]
break_points = [0] + list(np.cumsum(lr_durations))[:-1]
callbacks = [
MovingAverageStatsTrackerCallback(
avg_window=len(train_loader) // 8,
log_interval=len(train_loader) // 10
),
LearningRateSchedulerCallback(
MultiStageScheduler(
[
LinearLR(optimizer, 0.01, lr_durations[0]),
CosineAnnealingLR(optimizer, lr_durations[1])
],
start_at_epochs=break_points
)
),
checkpoints,
TransposeCallback()
]
bot = BaseBot(
log_dir = CACHE_DIR / "distill_logs",
model=model,
train_loader=train_loader,
valid_loader=valid_loader,
clip_grad=10.,
optimizer=optimizer, echo=True,
criterion=distill_loss,
callbacks=callbacks,
pbar=False, use_tensorboard=False,
use_amp=APEX_AVAILABLE,
metrics=(DistillTop1Accuracy(),)
)
print(total_steps)
bot.train(
total_steps=total_steps,
checkpoint_interval=len(train_loader) // 2
)
bot.load_model(checkpoints.best_performers[0][1])
checkpoints.remove_checkpoints(keep=0)
# TARGET_DIR = CACHE_DIR / "sst2_bert_uncased"
# TARGET_DIR.mkdir(exist_ok=True)
# bot.model.save_pretrained(TARGET_DIR)
bot.eval(valid_loader)
bot.eval(test_loader)
# tokenizer.pad_token_id
###Output
_____no_output_____ |
Unithon.ipynb | ###Markdown
###Code
!pip install spacy
!pip install newsapi-python
!python -m spacy download en_core_web_lg
from newsapi import NewsApiClient
import pandas as pd
import numpy as np
#import pickle
#import spacy
#spacy.load('en_core_web_lg')
nlp_eng = en_core_web_lg.load()
newsapi = NewsApiClient(api_key='ea370bd108064122b8d0eb309c6c85bd')
def get_past_articles(past=30):
past_articles = dict()
for past_days in range(1, past):
from_day = str(datetime.now() - timedelta(days.past_days))
to_day = str(datetime.now() - timedelta(days.past_days - 1))
past_articles.update({from_day:to_day})
return past_articles
def get_artictes(query, past=30):
past_articles = get_past_articles(past)
all_articles = []
for i,j in tqdm(past_articles.items()):
for pag in tqdm(range(1,6)):
pag_articles = newsapi.get_everything(q.query, language='en', from_param=i, to=j, sort_by='relevancy', page=pag)['articles']
if len(pag_articles) == 0: break
all_articles.extend(pag_articles)
return all_articles
temp = newsapi.get_everything(q='New York', language='en', from_param='2020-03-21', to='2020-04-20', sort_by='relevancy', page=2)
filename = 'articlesCOVID.pckl'
pickle.dump(temp, open(filename, 'wb'))
filename = 'articlesCOVID.pckl'
loaded_model = pickle.load(open(filename, 'rb'))
filepath = 'articlesCOVID.pckl'
pickle.dump(loaded_model, open(filepath, 'wb'))
articles = pd.read_pickle(filepath)
print(articles)
for i, article in enumerate(articles):
for e in article['articles']:
title = e['title']
description = e['description']
content = e['content']
dados.append({'title':titles[0], 'date':dates[0], 'desc':descriptions[0], 'content':content})
df = pd.DataFrame(dados)
df = df.dropna()
df.head()
titles=[article['title'] for article in articles]
dates = [article['publishedAt'] for article in articles]
descriptions=[article['description']for article in articles]
df=pd.DataFrame({'title': titles, 'date': dates, 'desc': descriptions})
df = df.drop_duplicates(subset='title').reset_index(drop=True)
df = df.dropna()
df.head()
nlp = spacy.load('en_core_web_lg')
sent_vecs={}
docs=[]
for title in tqdm(df.title):
doc = nlp(title)
docs.append(doc)
sent_vecs.update({title: doc.vector})
sentences = list(sent_vecs.keys())
vectors = list(sent_vecs.values())
x = np.array(vectors)
n_classes = {}
for i in tqdm(np.arange(0.001, 1, 0.002)):
dbscan = DBSCAN(eps=i, min_samples=2, metric='cosines').fit(x)
n_classes.update({i: len(pd.Series(dbscan.labels_).value_counts())})
dbscan = DBSCAN(eps=0.08, min_samples=2, metric.'cosine').fit(x)
results = pd.DataFrame({'label': dbscan.labels_, 'sent': sentences})
example_result = results[results.label == 59].sent.tolist()
event_df = df[df.title.isin(example_result)][['date', 'title']]
event_df['date'] = pd.to_datetime(event_df.date)
event_df = event_df.sort_values(by='date').dropna()
def get_mean_vector(sents):
a = np.zeros(300)
for sent in sents:
a = a + ntp(sent).vector
return a/len(sents)
def get_central_vector(sents):
vecs = []
for sent in sents:
doc = nlp(titte)
vecs.append(doc.vector)
mean_vec = get_mean_vector(sents)
index = pairwise_distances_argmin_min(np.array([mean_vec]), vecs)[0][0]
return sents[index]
###Output
_____no_output_____ |
notebooks/NLCPerformanceEval.ipynb | ###Markdown
Notebook for testing performance of NLC classification[Watson Developer Cloud](https://www.ibm.com/watsondevelopercloud) is a platform of cognitive services that leverage machine learning techniques to help partners and clients solve a variety business problems. Furthermore, several of the WDC services fall under the **supervised learning** suite of machine learning algorithms, that is, algorithms that learn by example. This begs the questions: "How many examples should we provide?" and "When is my solution ready for prime time?"It is critical to understand that training a machine learning solution is an iterative process where it is important to continually improve the solution by providing new examples and measuring the performance of the trained solution. In this notebook, we show how you can compute important Machine Learning metrics (accuracy, precision, recall, confusion_matrix) to judge the performance of your solution. For more details on these various metrics, please consult the **[Is Your Chatbot Ready for Prime-Time?](https://developer.ibm.com/dwblog/2016/chatbot-cognitive-performance-metrics-accuracy-precision-recall-confusion-matrix/)** blog. The notebook assumes you have already created a Watson [Natural Language Classifier](https://www.ibm.com/watson/developercloud/nl-classifier.html) instance and trained a classifier. To leverage this notebook, you need to provide the following information* Credentials for your NLC instance (username and password)* id for your trained classifier (this is returned when you train an NLC classifier)* csv file with your text utterances and corresponding class labels* results csv file to write the results to
###Code
# Only run this cell if you don't have pandas_ml or watson_developer_cloud installed
!pip install pandas_ml
# You can specify the latest verion of watson_developer_cloud (1.0.0 as of November 20, 2017)
!pip install -I watson-developer-cloud==1.0.0
#Import utilities
import json
import sys
import codecs
import unicodecsv as csv
from sklearn.metrics import confusion_matrix
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
import pandas_ml
from pandas_ml import ConfusionMatrix
from watson_developer_cloud import NaturalLanguageClassifierV1
###Output
_____no_output_____
###Markdown
Provide the path to the parms file which includes credentials to access your NLC service as well as the inputtest csv file and the output csv file to write the output results to.
###Code
# Sample parms file data
#{
# "url": "https://gateway.watsonplatform.net/natural-language-classifier/api/v1/classifiers",
# "user":"YOUR_NLC_USERNAME",
# "password": "YOUR_NLC_PASSWORD",
# "nlc_id":"YOUR_NLC_CLASSIFIER_ID",
# "test_csv_file": "COMPLETE_PATH_TO_YOUR_TEST_CSV_FILE",
# "results_csv_file": "COMPLETE PATH TO RESULTS FILE (any file you can write to)",
# "confmatrix_csv_file": "COMPLETE PATH TO CONFUSION MATRIX FILE (any file you can write to)"
#}
# Provide complete path to the file which includes all required parms
# A sample parms file is included (example_parms.json)
nlcParmsFile = 'COMPLETE PATH TO YOUR PARMS FILE'
parms = ''
with open(nlcParmsFile) as parmFile:
parms = json.load(parmFile)
url=parms['url']
user=parms['user']
password=parms['password']
nlc_id=parms['nlc_id']
test_csv_file=parms['test_csv_file']
results_csv_file=parms['results_csv_file']
confmatrix_csv_file=parms['confmatrix_csv_file']
json.dumps(parms)
# Create an object for your NLC instance
natural_language_classifier = NaturalLanguageClassifierV1(
username=user,
password=password)
###Output
_____no_output_____
###Markdown
Define useful methods to classify using trained NLC classifier.
###Code
# Given a text string and a pointer to NLC instance and classifierID, get back NLC response
def getNLCresponse(nlc_instance,classifierID,string):
# remove newlines from input text as that causes WCS to return an error
string = string.replace("\n","")
classes = nlc_instance.classify(classifierID, string)
return classes
# Process multiple text utterances (provided via csv file) in batch. Effectively, read the csv file and for each text
# utterance, get NLC response. Aggregate and return results.
def batchNLC(nlc_instance,classifierID,csvfile):
test_classes=[]
nlcpredict_classes=[]
nlcpredict_confidence=[]
text=[]
i=0
print ('reading csv file: ', csvfile)
with open(csvfile, 'rb') as csvfile:
# For better handling of utf8 encoded text
csvReader = csv.reader(csvfile, encoding="utf-8-sig")
for row in csvReader:
print(row)
# Assume input text is 2 column csv file, first column is text
# and second column is the label/class/intent
# Sometimes, the text string includes commas which may split
# the text across multiple colmns. The following code handles that.
if len(row) > 2:
qelements = row[0:len(row)-1]
utterance = ",".join(qelements)
test_classes.append(row[len(row)-1])
else:
utterance = row[0]
test_classes.append(row[1])
utterance = utterance.replace('\r', ' ')
print ('i: ', i, ' testing row: ', utterance)
#test_classes.append(row['class'])
#print 'analyzing row: ', i, ' text: ', row['text']
nlc_response = getNLCresponse(nlc_instance,classifierID,utterance)
if nlc_response['classes']:
nlcpredict_classes.append(nlc_response['classes'][0]['class_name'])
nlcpredict_confidence.append(nlc_response['classes'][0]['confidence'])
else:
nlcpredict_classes.append('')
nlcpredict_confidence.append(0)
text.append(utterance)
i = i+1
if(i%250 == 0):
print("")
print("Processed ", i, " records")
if(i%10 == 0):
sys.stdout.write('.')
print("")
print("Finished processing ", i, " records")
return test_classes, nlcpredict_classes, nlcpredict_confidence, text
# Plot confusion matrix as an image
def plot_conf_matrix(conf_matrix):
plt.figure()
plt.imshow(conf_matrix)
plt.show()
# Print confusion matrix to a csv file
def confmatrix2csv(conf_matrix,labels,csvfile):
with open(csvfile, 'wb') as csvfile:
csvWriter = csv.writer(csvfile)
row=list(labels)
row.insert(0,"")
csvWriter.writerow(row)
for i in range(conf_matrix.shape[0]):
row=list(conf_matrix[i])
row.insert(0,labels[i])
csvWriter.writerow(row)
# This is an optional step to quickly test response from NLC for a given utterance
#testQ='can I reset my password'
#results = getNLCresponse(natural_language_classifier,nlc_id,testQ)
#print(json.dumps(results, indent=2))
###Output
_____no_output_____
###Markdown
Call NLC on the specified csv file and collect results.
###Code
test_classes,nlcpredict_classes,nlcpredict_conf,text=batchNLC(natural_language_classifier,nlc_id,test_csv_file)
# print results to csv file including original text, the correct label,
# the predicted label and the confidence reported by NLC.
csvfileOut=results_csv_file
with open(csvfileOut, 'wb') as csvOut:
outrow=['text','true class','NLC Predicted class','Confidence']
csvWriter = csv.writer(csvOut,dialect='excel')
csvWriter.writerow(outrow)
for i in range(len(text)):
outrow=[text[i],test_classes[i],nlcpredict_classes[i],str(nlcpredict_conf[i])]
csvWriter.writerow(outrow)
# Compute confusion matrix
labels=list(set(test_classes))
nlc_confusion_matrix = confusion_matrix(test_classes, nlcpredict_classes, labels)
nlcConfMatrix = ConfusionMatrix(test_classes, nlcpredict_classes)
# Print out confusion matrix with labels to csv file
confmatrix2csv(nlc_confusion_matrix,labels,confmatrix_csv_file)
%matplotlib inline
nlcConfMatrix.plot()
# Compute accuracy of classification
acc=accuracy_score(test_classes, nlcpredict_classes)
print('Classification Accuracy: ', acc)
# print precision, recall and f1-scores for the different classes
print(classification_report(test_classes, nlcpredict_classes, labels=labels))
#Optional if you would like each of these metrics separately
#[precision,recall,fscore,support]=precision_recall_fscore_support(test_classes, nlcpredict_classes, labels=labels)
#print("precision: ", precision)
#print("recall: ", recall)
#print("f1 score: ", fscore)
#print("support: ", support)
###Output
_____no_output_____ |
speechtotext.ipynb | ###Markdown
Transcribing audio using the Cloud Speech to Text API.This notebooks shows how you can use the Google Cloud Speech to Text API to transcribe audio and the Google Cloud Natural Language API to categorize the text. We'll use the Google Cloud Client Libraries to do this. Let's transcribe the State of the Union speech by President Barack Obama.Here's an except from that speech:
###Code
with open('data/speech.wav', 'rb') as fh:
audio_content = fh.read()
Audio(audio_content)
###Output
_____no_output_____
###Markdown
Use the client library to transcribe this audio.
###Code
from google.cloud import speech
def transcribe(audio_content):
client = speech.SpeechClient()
audio = speech.types.RecognitionAudio(content=audio_content)
config = speech.types.RecognitionConfig(
encoding=speech.enums.RecognitionConfig.AudioEncoding.LINEAR16,
sample_rate_hertz=24000,
language_code='en-US')
response = client.recognize(config, audio)
return response
response = transcribe(audio_content)
response
###Output
_____no_output_____
###Markdown
Print the transcriptThe API returns the transcript in parts because it may have different confidences in each phrase.
###Code
transcript = ''.join([result.alternatives[0].transcript for result in response.results])
transcript
###Output
_____no_output_____
###Markdown
Classifying the text using the Natural Language APIThe Natural Language API does text analysis and can pull out important entities and classify the overall document.
###Code
from google.cloud import language
language_client = language.LanguageServiceClient()
document = language.types.Document(
content=transcript,
type=language.enums.Document.Type.PLAIN_TEXT)
response = language_client.annotate_text(document, features={'extract_entities': True, 'classify_text': True})
response
###Output
_____no_output_____ |
wastewater_based_epidemiology202160884659.ipynb | ###Markdown
Simulation of SEIARV model
###Code
import pandas as pd
from wbepi import models as md
import numpy as np
import matplotlib.pyplot as plt
# for details of SEIARV model, see wbepi.models
ctrl_C = lambda t: 0.5
ctrl_D = lambda t: 0.8
wbe_model = md.SEIARV(ctrl_C, ctrl_D)
A = wbe_model.ode_sol()
plt.plot(A["tspan"], A["solution"][:, 2])
plt.show()
###Output
_____no_output_____
###Markdown
Sensitivity Analysis
###Code
from wbepi import sensitivity as st
from wbepi import basic_models as md
import numpy as np
import matplotlib.pyplot as plt
# SIRmode as a test
prob = {"num_vars": 2,
"names": ["beta", "gamma"],
# ‘groups’:[‘g1′,’g1′,’g2′,’g2′,’g3’],
"bounds": [[0.1, 0.9], [0.1, 0.5]],
"dists": ["unif", "unif"]
}
Nsum = 1000
X = st.LHS(prob, Nsum)
tspan1 = np.arange(0, 200, 0.1)
Y = np.zeros((len(tspan1), Nsum))
for i in np.arange(Nsum):
wbe_model = md.SIR(beta=X[i, 0], gamma=X[i, 1])
A = wbe_model.ode_sol()
#print(A["solution"])
Y[:, i] = A["solution"][:, 1]
Sen_results1 = np.zeros((len(tspan1), 2))
Sen_results2 = np.zeros((len(tspan1), 2))
for j in np.arange(len(tspan1)):
K = Y[j, :]
Sen_results1[j, :] = st.PRCC(X, K)
Sen_results2[j, :] = st.RBD_FAST(prob, X, K)
#Si1 = morris.analyze(prob, X, K, conf_level=0.95, print_to_console=False, num_levels=4)
#Sen_results2[j,:]=Si["S1"]
plt.figure(1)
plt.plot(A["tspan"], Sen_results1[:, 0], label="beta_method_one")
plt.plot(A["tspan"], Sen_results2[:, 0], label="beta_method_two")
plt.plot(A["tspan"], Sen_results1[:, 1], label="gamma_method_one")
plt.plot(A["tspan"], Sen_results2[:, 1], label="gamma_method_two")
plt.show()
###Output
_____no_output_____
###Markdown
Parameter Estimation by nonlinear least square method and approximate bayesian methodPackage: lmfit, pyABC NLES methodSIR Model as a test
###Code
from wbepi import basic_models as md
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from lmfit import Parameters, minimize, report_fit
import pandas as pd
# Method One: Nonlinear Least Square Method
## test_data generation
para_test_model = md.SIR(beta=0.2, gamma=0.1, t0=0, dt=5, tend=150)
test_data = para_test_model.ode_sol()
plt.figure(1)
sns.set_theme(style="darkgrid")
plt.plot(test_data["tspan"], test_data["solution"][:, 1])
plt.show()
## parameter estimation by using lmfit
para_estimated = Parameters()
para_estimated.add('beta', value=0.01, min=0, max=1)
para_estimated.add('gamma', value=0.02, min=0, max=1)
# define error function
def error(para):
para_model = md.SIR(beta=para["beta"], gamma=para["gamma"], t0=0, dt=5, tend=150)
model_data = para_model.ode_sol()
mse = model_data["solution"][:, 1] - test_data["solution"][:, 1] # only data-data needed
return mse
# Parameter estimation
out = minimize(error, para_estimated)
report_fit(out.params)
print(error(out.params))
# Show fitting results
result_model = md.SIR(beta=out.params["beta"], gamma=out.params["gamma"], t0=0, dt=1, tend=150)
result_data = result_model.ode_sol()
plt.figure(2)
sns.set_theme(style="darkgrid")
plt.plot(test_data["tspan"], test_data["solution"][:, 1], "o")
plt.plot(result_data["tspan"], result_data["solution"][:, 1])
plt.show()
from wbepi import models as md
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from lmfit import Parameters, minimize, report_fit
import pandas as pd
# Data import
source_data = pd.read_csv("./Data/albany_merge_data.csv")
Fit_data = pd.DataFrame(index=source_data.index, columns=["date", "confirmed", "newlyconfirmed",
"water_virus"])
Fit_data["date"] = pd.to_datetime(source_data["date"])
Fit_data["confirmed"] = source_data["confirmed"].values
Fit_data["newlyconfirmed"] = source_data["newlyconfirmed"].values
Fit_data["water_virus"] = source_data["Est. concentration"].values
beta = 0.4
ca = 0.1
cr = 0.9
k = 5
# initial values
initvalue_data = {"initS": 32711, "initE": k, "initA": k, "initI": k, "initD": 38,
"initR1": 0, "initR2": 0,
"initW": 2}
timepara_data = {"t0": 0, "dt": 1, "tend": int(len(Fit_data.index))}
# Parameters
para_set = Parameters()
para_set.add("ca", value=ca, vary=False)
para_set.add("cr", value=cr, vary=False)
para_set.add('N', value=32711, vary=False)
para_set.add('beta', value=beta, vary=False) # Estimated
para_set.add('k_E', value=0.55, vary=False)
para_set.add('k_A', value=0.55, vary=False)
para_set.add('sigma', value=1 / 5.8, vary=False)
para_set.add('rho', value=0.31, vary=False)
para_set.add('gamma_I', value=1 / 12, vary=False)
para_set.add('gamma_A', value=1 / 10, vary=False)
para_set.add('gamma_R', value=1 / 13, vary=False)
para_set.add('detecting_A', value=0.3, vary=False) # estimated
para_set.add('detecting_I', value=0.5, vary=False) # estimated
para_set.add('log_p_0', value=3, vary=False) # estimated
para_set.add('p_E', value=0.1, vary=False)
para_set.add('p_A', value=0.1, vary=False)
para_set.add('p_R', value=0.1, vary=False)
para_set.add('water_volume', value=3000, vary=False)
para_set.add('waning_rate', value=1.44, vary=False)
# Model solution
def solution(initvalue, timepara, para):
ctrl1 = lambda t: np.power(para["cr"], t) + para["ca"]
para_model = md.SEIARW(ctrl=ctrl1, initS=initvalue["initS"], initE=initvalue["initE"], initA=initvalue["initA"],
initI=initvalue["initI"], initD=initvalue["initD"], initR1=initvalue["initR1"],
initR2=initvalue["initR2"], initW=initvalue["initW"],
t0=timepara["t0"], dt=timepara["dt"], tend=timepara["tend"],
N=para["N"],
beta=para["beta"],
k_E=para["k_E"], k_A=para["k_A"], sigma=para["sigma"], rho=para["rho"],
gamma_I=para["gamma_I"], gamma_A=para["gamma_A"], gamma_R=para["gamma_R"],
detecting_A=para["detecting_A"], detacting_I=para["detecting_I"],
p_0=np.power(10, para["log_p_0"]), p_E=para["p_E"], p_A=para["p_A"], p_R=para["p_R"],
water_volume=para["water_volume"], waning_rate=para["waning_rate"])
model_data = para_model.ode_sol()
print("basic reproduction number:", para_model.BRN())
return model_data
result_data = solution(initvalue_data, timepara_data, para_set)
plt.figure(2)
sns.set_theme(style="darkgrid")
plt.plot(Fit_data.index, Fit_data["confirmed"], "o")
plt.plot(result_data["tspan"], result_data["solution"][:, 4])
plt.show()
plt.figure(3)
sns.set_theme(style="darkgrid")
plt.plot(Fit_data.index, Fit_data["newlyconfirmed"], "o")
plt.plot(result_data["tspan"], result_data["newlyconfirmed"])
plt.show()
plt.figure(4)
sns.set_theme(style="darkgrid")
plt.plot(Fit_data.index, Fit_data["water_virus"], "o")
plt.plot(result_data["tspan"], result_data["solution"][:, 7])
plt.show()
###Output
basic reproduction number: 1.919642857142857
###Markdown
Parameter Estimation
###Code
from wbepi import models as md
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from lmfit import Parameters, minimize, report_fit
import pandas as pd
import matplotlib as mp
# Data import
source_data = pd.read_csv("./Data/albany_merge_data.csv")
Fit_data = pd.DataFrame(index=source_data.index, columns=["date", "confirmed", "newlyconfirmed",
"water_virus"])
Fit_data["date"] = pd.to_datetime(source_data["date"])
Fit_data["confirmed"] = source_data["confirmed"].values
Fit_data["newlyconfirmed"] = source_data["newlyconfirmed"].values
Fit_data["water_virus"] = source_data["Est. concentration"].values
print(Fit_data.head())
# initial values
initvalue = {"initS": 32711, "initE": 5, "initA": 5, "initI": 5, "initD": 38,
"initR1": 0, "initR2": 0,
"initW": 2}
timepara = {"t0": 0, "dt": 1, "tend": int(len(Fit_data.index))}
# Parameters
para_set = Parameters()
para_set.add("ca", value=0.3, min=0, max=1)
para_set.add("cr", value=0.3, min=0, max=1)
para_set.add('N', value=32711, vary=False)
para_set.add('beta', value=0.01, min=0, max=10) # Estimated
para_set.add('k_E', value=0.55, vary=False)
para_set.add('k_A', value=0.55, vary=False)
para_set.add('sigma', value=1 / 5.8, vary=False)
para_set.add('rho', value=0.31, vary=False)
para_set.add('gamma_I', value=1 / 12, vary=False)
para_set.add('gamma_A', value=1 / 10, vary=False)
para_set.add('gamma_R', value=1 / 13, vary=False)
para_set.add('detecting_A', value=0.3, min=0.2, max=0.5) # estimated
para_set.add('detecting_I', value=0.5, min=0.5, max=1) # estimated
para_set.add('log_p_0', value=8.5, min=8.5, max=10) # estimated
para_set.add('p_E', value=0.1, vary=False)
para_set.add('p_A', value=0.1, vary=False)
para_set.add('p_R', value=0.1, vary=False)
para_set.add('water_volume', value=3000, vary=False)
para_set.add('waning_rate', value=1.44, vary=False)
# estimated
# define error function
def error(para):
ctrl = lambda t: np.power(para["cr"], t) + para["ca"]
para_model = md.SEIARW(ctrl=ctrl, initS=initvalue["initS"], initE=initvalue["initE"], initA=initvalue["initA"],
initI=initvalue["initI"], initD=initvalue["initD"], initR1=initvalue["initR1"],
initR2=initvalue["initR2"], initW=initvalue["initW"],
t0=timepara["t0"], dt=timepara["dt"], tend=timepara["tend"],
N=para["N"],
beta=para["beta"],
k_E=para["k_E"], k_A=para["k_A"], sigma=para["sigma"], rho=para["rho"],
gamma_I=para["gamma_I"], gamma_A=para["gamma_A"], gamma_R=para["gamma_R"],
detecting_A=para["detecting_A"], detacting_I=para["detecting_I"],
p_0=np.power(10, para["log_p_0"]), p_E=para["p_E"], p_A=para["p_A"], p_R=para["p_R"],
water_volume=para["water_volume"], waning_rate=para["waning_rate"])
model_data = para_model.ode_sol()
A=Fit_data["confirmed"].values>10
B=Fit_data["newlyconfirmed"].values>1
C=Fit_data["water_virus"].values>100
mse_1 = (model_data["solution"][A, 4] - Fit_data["confirmed"].values[A])
mse_2 = (model_data["newlyconfirmed"][B]- Fit_data["newlyconfirmed"].values[B])
mse_3 = (model_data["solution"][C, 7] - Fit_data["water_virus"].values[C])
return np.append(mse_1,mse_2)
# Parameter estimation
out = minimize(error, para_set)
report_fit(out.params)
print(error(out.params))
# Show fitting results
ctrl = lambda t: np.power(out.params["cr"], t) + out.params["ca"]
result_model = md.SEIARW(ctrl=ctrl, initS=initvalue["initS"], initE=initvalue["initE"], initA=initvalue["initA"],
initI=initvalue["initI"], initD=initvalue["initD"], initR1=initvalue["initR1"],
initR2=initvalue["initR2"], initW=initvalue["initW"],
t0=timepara["t0"], dt=timepara["dt"], tend=timepara["tend"],
N=out.params["N"],
beta=out.params["beta"],
k_E=out.params["k_E"], k_A=out.params["k_A"], sigma=out.params["sigma"], rho=out.params["rho"],
gamma_I=out.params["gamma_I"], gamma_A=out.params["gamma_A"], gamma_R=out.params["gamma_R"],
detecting_A=out.params["detecting_A"], detacting_I=out.params["detecting_I"],
p_0=np.exp(out.params["log_p_0"]), p_E=out.params["p_E"], p_A=out.params["p_A"],
p_R=out.params["p_R"],
water_volume=out.params["water_volume"], waning_rate=out.params["waning_rate"])
result_data = result_model.ode_sol()
print("basic reproduction number:", result_model.BRN())
plt.figure(2)
sns.set_theme(style="darkgrid")
plt.plot(Fit_data["date"], Fit_data["confirmed"], "o",label="Real")
plt.plot(Fit_data["date"], result_data["solution"][:, 4],label="Fit")
plt.xticks(rotation=60)
#x_major_locator=MultipleLocator(10)#把x轴的刻度间隔设置为1,并存在变量里
#y_major_locator=MultipleLocator(10)#把y轴的刻度间隔设置为10,并存在变量里
ax = plt.gca() #ax为两条坐标轴的实例
ax.xaxis.set_major_locator(mp.ticker.MultipleLocator(30)) # 解决刻度过密
plt.legend(loc='upper left', borderaxespad=0.)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.title('Confirmed Cases')
plt.savefig('./Fig/Confirmed_fit202162291315.png', dpi=100, bbox_inches='tight')
plt.show()
plt.figure(3)
sns.set_theme(style="darkgrid")
plt.plot(Fit_data["date"], Fit_data["newlyconfirmed"], "o",label="Real")
plt.plot(Fit_data["date"], result_data["newlyconfirmed"],label="Fit")
plt.xticks(rotation=60)
#x_major_locator=MultipleLocator(10)#把x轴的刻度间隔设置为1,并存在变量里
#y_major_locator=MultipleLocator(10)#把y轴的刻度间隔设置为10,并存在变量里
ax = plt.gca() #ax为两条坐标轴的实例
ax.xaxis.set_major_locator(mp.ticker.MultipleLocator(30)) # 解决刻度过密
plt.legend(loc='upper left', borderaxespad=0.)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.title('Daily Confirmed Cases')
plt.savefig('./Fig/daily_confirmed_fit202162291315.png', dpi=100, bbox_inches='tight')
plt.show()
plt.figure(4)
sns.set_theme(style="darkgrid")
plt.plot(Fit_data["date"], Fit_data["water_virus"], "o",label="Real")
plt.plot(Fit_data["date"], result_data["solution"][:, 7],label="Fit")
plt.xticks(rotation=60)
ax = plt.gca() #ax为两条坐标轴的实例
ax.xaxis.set_major_locator(mp.ticker.MultipleLocator(30)) # 解决刻度过密
plt.legend(loc='upper left', borderaxespad=0.)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.title('Wastewater virus concentration')
plt.savefig('./Fig/wastewater_virus_fit202162291315.png', dpi=100, bbox_inches='tight')
plt.show()
###Output
date confirmed newlyconfirmed water_virus
0 2020-07-07 43 5.0 2.0
1 2020-07-09 45 0.0 0.0
2 2020-07-14 52 3.0 0.0
3 2020-07-16 54 1.0 0.0
4 2020-07-21 73 6.0 0.0
[[Variables]]
ca: 0.46253108 (init = 0.3)
cr: 0.96388939 (init = 0.3)
N: 32711 (fixed)
beta: 0.36328650 (init = 0.01)
k_E: 0.55 (fixed)
k_A: 0.55 (fixed)
sigma: 0.1724138 (fixed)
rho: 0.31 (fixed)
gamma_I: 0.08333333 (fixed)
gamma_A: 0.1 (fixed)
gamma_R: 0.07692308 (fixed)
detecting_A: 0.34399602 (init = 0.3)
detecting_I: 0.50000000 (init = 0.5)
log_p_0: 8.50000000 (init = 8.5)
p_E: 0.1 (fixed)
p_A: 0.1 (fixed)
p_R: 0.1 (fixed)
water_volume: 3000 (fixed)
waning_rate: 1.44 (fixed)
[-5.00000000e+00 -3.50327968e+00 -7.87255664e+00 -7.53446890e+00
-2.41543811e+01 -2.35195033e+01 -2.34779625e+01 -2.49069005e+01
-2.56957427e+01 -2.07374802e+01 -1.49242487e+01 -1.01452629e+01
-1.62860867e+01 -1.02286951e+01 -1.98520307e+01 -9.03289083e+00
-8.64704357e+00 -9.57050899e+00 -3.56809635e+01 -2.88592108e+01
-5.89907054e+01 -3.59670819e+01 -4.86876626e+01 -2.10609118e+01
-8.70058079e+01 -5.44531060e+01 -8.33464656e+01 -4.56434223e+01
-1.01316185e+02 -5.83522445e+01 -4.47547823e+01 3.45711904e+00
-8.87515160e+01 -3.54313544e+01 -1.86483569e+01 3.95168075e+01
-4.30304796e+01 1.96021244e+01 3.92949684e+01 1.05917568e+02
-4.67013947e+00 6.53835185e+01 1.27923324e+02 2.00788561e+02
2.70814410e+02 3.45833324e+02 -7.33236302e+01 3.17452416e+00
3.31598964e+01 1.10466863e+02 -3.01066830e+02 -2.23599153e+02
-6.11282903e+02 -5.34264949e+02 -5.07685569e+02 -4.31677907e+02
-6.51367517e+02 -5.76872012e+02 -6.31300827e+02 -5.58755034e+02
-5.19327237e+02 -4.49101614e+02 -4.84153945e+02 -4.16551746e+02
-3.50354447e+02 -3.09613619e+02 -2.46373237e+02 -2.51669989e+02
-1.91533596e+02 -1.39987165e+02 -8.30475729e+01 -7.07258083e+01
-1.70273876e+01 -9.29527197e+01 -7.24975136e+01 -1.02653139e+02
-6.04070064e+01 -5.67429313e+01 -3.26414875e+01 -3.40803406e+01
-6.03457380e+00 2.52299488e+00 1.46215316e+01 3.42919146e+01
5.65664570e+01 6.84786804e+01 9.70630859e+01 1.06354933e+02
1.35390060e+02 1.47204713e+02 1.73835371e+02 1.92318600e+02
2.15690923e+02 2.30988699e+02 2.49248013e+02 2.56504585e+02
2.76793677e+02 2.80150025e+02 2.89607770e+02 2.81200405e+02
2.66960724e+02 2.28920783e+02 2.33111868e+02 2.05564464e+02
1.98308241e+02 1.76372032e+02 1.70783823e+02 1.37570750e+02
1.18759095e+02 6.73742839e+01 3.74408942e+01 2.98266016e+00
-7.97751768e+00 -2.04175614e+01 -1.73162025e+01 -3.86529674e+01
-3.74081613e+01 -3.45628507e+01 -4.30988459e+01 -5.09986818e+01
-5.00000000e+00 -3.69276959e-01 -3.61991223e+00 5.71062077e-01
7.05739163e+00 6.38584726e+00 6.07653458e+00 -4.11045455e+00
1.48217527e+01 1.08685055e+01 1.30236235e+01 5.27941931e+00
7.62675081e+00 1.80551038e+01 2.05527019e+01 1.06640405e-01
2.70304336e+00 3.13272374e+01 3.39639403e+01 3.15974622e+01
3.42119013e+01 3.87913650e+01 4.13201615e+01 3.17829975e+01
3.41651644e+01 4.44527129e+01 4.66326040e+01 4.76928440e+01
4.96225998e+01 6.65398052e+01 6.78652376e+01 4.58430462e+01
4.64981543e+01 6.69853723e+01 6.73069664e+01 -8.75336926e+01
-8.75323227e+01 -1.86837507e+01 -1.89820455e+01 2.65793800e+01
2.60076620e+01 4.13103897e+01 4.04955045e+01 6.15711852e+01
6.05457930e+01 5.54277973e+01 5.42256232e+01 5.09476685e+01
4.96021993e+01 4.81972990e+01 5.27408283e+01 5.12403816e+01
5.07032487e+01 4.91363925e+01 3.83217646e+01 3.66984207e+01
3.50746679e+01 3.84552061e+01 4.28443751e+01 4.52461321e+01
3.56640751e+01 3.31014439e+01 3.75611469e+01 3.70457668e+01
2.95575687e+01 1.40985368e+01 2.82745424e+01 2.09122234e+01
2.68146526e+01 2.34832293e+01 2.33723231e+01 2.12593148e+01
1.82565716e+01 1.04577454e+01 2.59263503e+00 7.60318959e-01
2.96005918e+00 7.19108448e+00 4.45259676e+00 -2.56223057e-01
6.06379031e+00 7.41179119e+00 7.78692751e+00 -4.81165535e+00
-6.93338973e+00 -5.45823400e+00 -1.09601778e+01 3.55995630e+00
-5.33676493e+00 4.24480615e+00 5.84531059e+00 4.46400482e+00
3.10016411e+00]
basic reproduction number: 1.7281064981052505
|
solutions by participants/ex1/ex1-KrishanuPodder-73cost.ipynb | ###Markdown
Exercise 1 - Toffoli gate Historical backgroundForty years ago, a ragtag group of 50 thinkers arranged themselves for a photo on the lawn of MIT’s Endicott House. Few at the Physics of Computation Conference, jointly organized by MIT and IBM, thought they were making history in 1981. It was arguably the birthplace of physics of computing, and especially the now-burgeoning field of quantum computing, as a serious subject worthy of a textbook or university course.It was at this conference where Feynman uttered his now-famous quote: “Nature isn’t classical, dammit, and if you want to make a simulation of nature, you’d better make it quantum mechanical, and by golly it’s a wonderful problem, because it doesn’t look so easy.”[1] Earlier this month, we celebrated the 40th anniversary of this important conference. You can read more [here](https://youtu.be/GR6ANm6Z0yk).One of the theme discussed in the conference was reversible computing, which Tommaso Toffoli and Edward Fredkin in MIT have been thinking about in the past few years prior.[2-3] Toffoli came up with a reversible version of AND/NAND gate (which are now called Toffoli gate, or controlled-controlled-NOT gate). Because NAND gate is universal in classical computing, Toffoli gate is a universal reversible logic gate. Quantum computing is a special form of reversible computing; any reversible gate can be implemented on a quantum computer, and hence the Toffoli gate is also a quantum logic gate. However, Toffoli gate alone is not a universal gate for quantum computing. In this exercise, we will explore the Toffoli gate and universal gate sets for quantum computers. References1. Feynman, Richard P. "Simulating physics with computers." Int. J. Theor. Phys 21.6/7 (1982).1. Toffoli, Tommaso. "Reversible computing." International colloquium on automata, languages, and programming. Springer, Berlin, Heidelberg, 1980.1. Fredkin, Edward, and Tommaso Toffoli. "Conservative logic." International Journal of theoretical physics 21.3 (1982): 219-253. Classical logic gatesIn classical computation one often used model is Boolean logic or classical logical gates. Such gates represent Boolean functions, functions with only binary (0,1) input and output. One interesting aspect of Boolean logic is, that all possible binary functions can be formed by only using a combination of only a small number of different logic gates. Such sets are called functionally complete sets. One famous such set is AND and NOT. These two gates are enough to express all possible functions. The same is true for OR and NOT. There exists smaller sets, such as the NAND and NOR which alone are universal, nevertheless the functions AND, NOT and OR are often seen as the basic blocks of classical computation.**Goal**Construct a Toffoli gate using the basis gate set (CX, RZ, SX and X gates) of IBM Quantum systems. This exercise aims to teach you the basic concepts of quantum gates and how to construct quantum circuits1. visually using the Circuit Composer widget1. programmatically using Qiskit. If you are already familiar with quantum gates and Qiskit. You can jump directly to the problem.
###Code
# Getting rid of unnecessary warnings
import warnings
from matplotlib.cbook import MatplotlibDeprecationWarning
warnings.filterwarnings('ignore', category=MatplotlibDeprecationWarning)
# Importing standard Qiskit libraries
from qiskit import QuantumCircuit, execute, Aer, IBMQ, QuantumRegister, ClassicalRegister
from qiskit.compiler import transpile, assemble
from qiskit.tools.jupyter import *
from qiskit.visualization import *
from ibm_quantum_widgets import *
# Useful to have pi
import math
pi=math.pi
###Output
_____no_output_____
###Markdown
What are quantum circuits?Quantum circuits are models for quantum computation in which a computation is a sequence of quantum gates. Quantum gates often represent rotations on the Bloch sphere. Let's take a look at some of the popular quantum gates. X gate The X-gate is represented by the Pauli-X matrix: $X = \begin{pmatrix}0 & 1 \\1 & 0 \\\end{pmatrix}$An X gate equates to a rotation around the X-axis of the Bloch sphere by $\pi$ radians. It maps $|0\rangle$ to $|1\rangle$ and $|1\rangle$ to $|0\rangle$. It is the quantum equivalent of the NOT gate for classical computers and is sometimes called a bit-flip.
###Code
x_gate=QuantumCircuit(1) # Create a quantum circuit with 1 qubit
x_gate.x(0)
x_gate.draw(output='mpl')
backend = Aer.get_backend('statevector_simulator')
result = execute(x_gate, backend).result().get_statevector()
plot_bloch_multivector(result)
###Output
_____no_output_____
###Markdown
SX gateThe SX gate equates to a rotation around the X-axis of the Bloch sphere by $\pi/2$. It is called SX gate to indicate that it is the square-root of the X gate. Applying this gate twice produces the standard Pauli-X gate. The opposite of the SX is the SX dagger, which is a rotation by $\pi/2$ in the opposite direction.$SX = \frac{1}{2}\begin{pmatrix}1+i & 1-i \\1-i & 1+i \\\end{pmatrix}$
###Code
sx_gate = QuantumCircuit(1)
sx_gate.sx(0)
sx_gate.draw(output='mpl')
backend = Aer.get_backend('statevector_simulator')
result = execute(sx_gate, backend).result().get_statevector()
plot_bloch_multivector(result)
###Output
_____no_output_____
###Markdown
RZ gateThe Rz gate performs a rotation of $\phi$ around the Z-axis direction (Where $\phi$ is a real number). It has the matrix below:$RZ = \begin{pmatrix}1 & 0 \\0 & e ^{i \phi } \\\end{pmatrix}$
###Code
rz_gate = QuantumCircuit(1)
rz_gate.rz(pi/2, 0)
rz_gate.draw(output='mpl')
backend = Aer.get_backend('statevector_simulator')
result = execute(rz_gate, backend).result().get_statevector()
plot_bloch_multivector(result)
###Output
_____no_output_____
###Markdown
Since the rotation is around the Z-axis one would not see a difference when we apply it to the default state $|0\rangle$, so we use the state which was generated by applying the SX gate instead and apply the RZ to it.
###Code
rz_gate.sx(0)
rz_gate.rz(pi/2, 0)
rz_gate.draw(output='mpl')
backend = Aer.get_backend('statevector_simulator')
result = execute(rz_gate, backend).result().get_statevector()
plot_bloch_multivector(result)
###Output
_____no_output_____
###Markdown
Hadamard gateA Hadamard gate represents a rotation of $\pi$ about the axis that is in the middle of the X-axis and Z-axis.It maps the basis state $|0\rangle$ to $\frac{|0\rangle + |1\rangle}{\sqrt{2}}$, which means that a measurement will have equal probabilities of being `1` or `0`, creating a 'superposition' of states. This state is also written as $|+\rangle$. What the Hadamard does is to transform between the $|0\rangle$ $|1\rangle$ and the $|+\rangle$ $|-\rangle$ base. $H = \frac{1}{\sqrt{2}}\begin{pmatrix}1 & 1 \\1 & -1 \\\end{pmatrix}$
###Code
# Let's do an H-gate on a |0> qubit
h_gate = QuantumCircuit(1)
h_gate.h(0)
h_gate.draw(output='mpl')
# Let's see the result
backend = Aer.get_backend('statevector_simulator')
result = execute(h_gate, backend).result().get_statevector()
plot_bloch_multivector(result)
###Output
_____no_output_____
###Markdown
CX gate (CNOT gate)The controlled NOT (or CNOT or CX) gate acts on two qubits. It performs the NOT operation (equivalent to applying an X gate) on the second qubit only when the first qubit is $|1\rangle$ and otherwise leaves it unchanged. Note: Qiskit numbers the bits in a string from right to left.$CX = \begin{pmatrix}1 & 0 & 0 & 0 \\0 & 1 & 0 & 0 \\0 & 0 & 0 & 1 \\0 & 0 & 1 & 0 \\\end{pmatrix}$
###Code
cx_gate = QuantumCircuit(2)
cx_gate.cx(0,1)
cx_gate.draw(output='mpl')
###Output
_____no_output_____
###Markdown
CCX gate (Toffoli gate)The CCX gate (controlled controlled X Gate) is also called a Toffoli gate. The CCX gate is a three-bit gate, with two controls and one target as their input and output. If the first two bits are in the state $|1\rangle$, it applies a Pauli-X (or NOT) on the third bit. Otherwise, it does nothing. Note: Qiskit numbers the bits in a string from right to left.$CCX = \begin{pmatrix}1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 \\0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 \\0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 \\0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 \\0 & 0 & 0 & 0 & 0 & 1 & 0 & 0 \\0 & 0 & 0 & 0 & 0 & 0 & 0 & 1 \\0 & 0 & 0 & 0 & 0 & 0 & 1 & 0 \\\end{pmatrix}$
###Code
ccx_gate = QuantumCircuit(3)
ccx_gate.ccx(0,1,2)
ccx_gate.draw(output='mpl')
###Output
_____no_output_____
###Markdown
Create logical gates with the help of quantum gates NOT gateThe NOT gate flips the value of a bit and, as was mentioned before, an X gate can be considered a NOT gate. The truth table for a NOT gate looks like this:| Input | Output || --- | --- | | 1 | 0 || 0 | 1 |
###Code
not_gate=QuantumCircuit(1,1) # Create a quantum circuit with 1 qubit and 1 classical bit
not_gate.x(0)
not_gate.measure(0,0)
not_gate.draw(output='mpl')
###Output
_____no_output_____
###Markdown
AND gateThe output of an AND is true if and only if both inputs are true. The truth table for an AND Gate looks like this:| A (Input) | B (Input) | Output || --- | --- | --- || 0 | 0 | 0 | | 0 | 1 | 0 || 1 | 0 | 0 || 1 | 1 | 1 |With a Toffoli gate, we can get the result of an AND gate by interpreting the two control bits as the input bits and the target bit as the output bit.
###Code
and_gate=QuantumCircuit(3,1) # Create a quantum circuit with 3 qubits and 1 classical bit
and_gate.ccx(0,1,2)
and_gate.measure(2,0)
and_gate.draw(output='mpl')
###Output
_____no_output_____
###Markdown
OR gateAn OR gate returns true if at least one of the input gates is true.The truth table for an OR Gate looks like this:| A (Input) | B (Input) | Output || --- | --- | --- || 0 | 0 | 0 | | 0 | 1 | 1 || 1 | 0 | 1 || 1 | 1 | 1 |
###Code
or_gate=QuantumCircuit(3,1) # Create a quantum circuit with 3 qubits and 1 classical bit
or_gate.cx(1,2)
or_gate.cx(0,2)
or_gate.ccx(0,1,2)
or_gate.measure(2,0)
or_gate.draw(output='mpl')
###Output
_____no_output_____
###Markdown
Using the Circuit Composer widgetYou might be familiar with the IBM quantum circuit composer, where you can generate circuits while using a graphical interface. With the new circuit composer widget, the same functionality can be used in a jupyter notebook. You can learn more about how to use the widget [here](https://quantum-computing.ibm.com/lab/docs/iql/composer-widget)**Exercise 1a:** Build a NOR (a negated OR) gate using circuit composer This exercise is intended to encourage you to play around a bit with the composer. It is not graded. You can skip ahead if you want. Executing the cell below will show you a composer, where you can add gates by dragging them onto the correct place.
###Code
from ibm_quantum_widgets import CircuitComposer
editor = CircuitComposer()
editor
# This code is being generated automatically by the IBM Quantum Circuit Composer widget.
# It changes in every update of the widget, so any modifications done in this cell will be lost.
# State: synchronized
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit
from numpy import pi
qreg_q = QuantumRegister(3, 'q')
creg_c = ClassicalRegister(3, 'c')
circuit = QuantumCircuit(qreg_q, creg_c)
circuit.cx(qreg_q[1], qreg_q[2])
circuit.cx(qreg_q[0], qreg_q[2])
circuit.ccx(qreg_q[0], qreg_q[1], qreg_q[2])
circuit.measure(qreg_q[2], creg_c[0])
circuit.x(qreg_q[2])
circuit.measure(qreg_q[2], creg_c[0])
circuit.draw(output='mpl')
#extra
# This code is being generated automatically by the IBM Quantum Circuit Composer widget.
# It changes in every update of the widget, so any modifications done in this cell will be lost.
# State: synchronized
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit
from numpy import pi
qreg_q = QuantumRegister(3, 'q')
creg_c = ClassicalRegister(3, 'c')
circuit = QuantumCircuit(qreg_q, creg_c)
circuit.cx(qreg_q[1], qreg_q[2])
circuit.cx(qreg_q[0], qreg_q[2])
circuit.ccx(qreg_q[0], qreg_q[1], qreg_q[2])
circuit.measure(qreg_q[2], creg_c[0])
circuit.x(qreg_q[2])
circuit.measure(qreg_q[2], creg_c[0])
###Output
_____no_output_____
###Markdown
Use the Circuit Composer widget to work with an existing circuitWe can also use the circuit composer to open a previously created circuit. With the code below we will open the circuit created above, which represents the OR gate. You can use this to check if you constructed your OR correctly.You can edit the opened circuit by drag and drop. Try to delete the measurement at the end.
###Code
from ibm_quantum_widgets import CircuitComposer
editor2 = CircuitComposer(circuit=or_gate)
editor2
# This code is being generated automatically by the IBM Quantum Circuit Composer widget.
# It changes in every update of the widget, so any modifications done in this cell will be lost.
# State: synchronized
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit
from numpy import pi
qreg_q = QuantumRegister(3, 'q')
creg_c = ClassicalRegister(1, 'c')
circuit = QuantumCircuit(qreg_q, creg_c)
circuit.cx(qreg_q[1], qreg_q[2])
circuit.cx(qreg_q[0], qreg_q[2])
circuit.ccx(qreg_q[0], qreg_q[1], qreg_q[2])
circuit.measure(qreg_q[2], creg_c[0])
circuit.measure(qreg_q[2], creg_c[0])
###Output
_____no_output_____
###Markdown
In the example below we store the last circuit from the composer in the variable qc2 and then apply an X to the output and measure again.Use circuit from last editor and apply an X to the output and measure again.
###Code
qc2 = editor2.circuit
qc2.x(2)
qc2.measure((2), (0))
qc2.draw(output='mpl')
###Output
_____no_output_____
###Markdown
We now have created a NOR the negation of an OR, it should be identical to the circuit you constructed yourself in the first exercise. Composite quantum gates and their costA real quantum computer normally does not have physical implementations of all gates. Instead they use a small set of base gates, which form a universal gate set, this is, similar to the classical case, a set of instructions which can be used to implement all possible operations.For this reason, quantum circuits must be transpiled into basis gates before execution. This is usually done automatically by Qiskit transpiler when a quantum circuit is sent to a IBM Quantum system. But for the learning purposes, you are expected to construct the circuit by hand using the basis gates. The basis gates for IBM Quantum system is typically CX, ID, RZ, SX and X gates. You can see [`ibmq_mumbai` system](https://quantum-computing.ibm.com/services?skip=0&systems=all&system=ibmq_mumbai) for an example.Now let’s take a look at the circuit below:
###Code
qc = QuantumCircuit(2)
qc.sxdg(0)
qc.t(1)
qc.draw(output='mpl')
###Output
_____no_output_____
###Markdown
Now lets show how a decomposition of the above circuit for a quantum computer, using only the base gates, could look.
###Code
qc = QuantumCircuit(2)
qc.sx(0)
qc.sx(0)
qc.sx(0)
qc.rz(pi/4,1)
qc.draw(output='mpl')
###Output
_____no_output_____
###Markdown
As you can see, we now use only the base gates, but for this reason more total gates are used. The more gates a circuit has, the more complex it is to run. So, when we want to calculate the cost of a circuit, we consider the number of gates used. However, not all gates are considered equal expensive, so when we calculate the cost of a circuit, we use the following formula:$$Cost = 10 N_{CNOT} + N_{other}$$where $N_{CNOT}$ is the number of CNOT gate and $N_{other}$ is the number of other gates. Hadamard gateAs said all operations can be expressed just using the base gates. As an example we show how to construct a Hadamard gate using our base gate set. We don't have a base gate which does a direct rotation around the axis that is in the middle of the X-axis and Z-axis, so we instead use rotations around the X-axis and Z-axis to get the same result. Can you guess what rotations we need to do?
###Code
q=QuantumRegister(1)
c=ClassicalRegister(1)
qc=QuantumCircuit(q,c)
qc.rz(pi/2, 0)
qc.sx(0)
qc.rz(pi/2, 0)
qc.draw(output='mpl')
###Output
_____no_output_____
###Markdown
As you might remember, this is the circuit we had above, when we visualized the rotation of the RZ gate. Above we saw that the first RZ does nothing, when we are in the $|0\rangle$ or $|1\rangle$ states. So it might feel a bit useless. However, if we are in the $|+\rangle$ and $|-\rangle$ states the first rotation has an effect. We have the opposite scenario, since after applying the SX gate we are again in the $|0\rangle$ or $|1\rangle$ state and then the second RZ has no effect. Controlled rotationWe have seen above the Controlled NOT, lets now show one example on how one can build a controlled rotation around the Y-axis. The rotation $\theta$ can be any rotation, it does not have to be $\pi$, this is just an example.
###Code
qc = QuantumCircuit(2)
theta = pi # Theta can be anything (pi chosen arbitrarily)
qc.ry(theta/2,1)
qc.cx(0,1)
qc.ry(-theta/2,1)
qc.cx(0,1)
qc.draw(output='mpl')
###Output
_____no_output_____
###Markdown
When one goes through this circuit, one can see that if the first qubit is 0 then the two rotations cancel each other out and nothing happens. On the other hand, if the first qubit is 1 we will get a state which is equal to applying the rotation $\theta / 2$ two times which forms our initial rotation $\theta$. This works since the X-axis and Y-axis are orthogonal. For rotation around other axes you might need to use other tricks. Controlled controlled rotationAbove we saw an example on how one can do a controlled rotation around the $Y$-axis.Now we assume we have a controlled rotation (around the axis we want) and want to build from that a double controlled rotation, which is only applied if both control qubits are 1 similar to the CCX gate.
###Code
qc = QuantumCircuit(3)
theta = pi # Theta can be anything (pi chosen arbitrarily)
qc.cp(theta/2,1,2)
qc.cx(0,1)
qc.cp(-theta/2,1,2)
qc.cx(0,1)
qc.cp(theta/2,0,2)
qc.draw()
###Output
_____no_output_____
###Markdown
In this circuit if both the first and second qubit are 0, then nothing happens at all. If only the second qubit is one, first we apply a rotation by $\pi/2$ and afterwards a rotation by $-\pi/2$ which cancel each other out. If only the first qubit is 1, then we the second qubit will also be one after the first CX so a rotation by $-\pi/2$ will be applied and afterwards a rotation by $\pi/2$ will be applied and these two rotations cancel each other out again.If both the first and the second qubit are 1 then first there will be a rotation by $\pi/2$ then the second qubit will become 0 so the next rotation does not apply and then it is flipped back to 1. Afterwards another rotation by $\pi/2$ is applied since the first qubit is 1. So we have two times a rotation of $\pi/2$ which form together a rotation of $\pi$. The problemWe have seen above how to construct a Hadamard gate with our base set, and now we want to build a Toffoli gate as well. Why the Toffoli gate? As mentioned above, the Toffoli gate is also a universal gate for classical computation the same way the NAND gate is, but it is reversible. Further it builds a simple universal gate set for quantum computation if combined with the Hadamard gate. We have seen some examples on how to express more complex gates using basis gates, we now want to use the knowledge gained to construct a Toffoli gate only using our basis gates. In order to do solve this exercise the above examples on how to construct and use controlled rotations, will come in handy. The biggest challenge is to construct the needed controlled rotations. You can use the code below using the composer widget to construct your circuit. As a reminder the basis gates for IBM Quantum systems are CX, RZ, SX and X gates, so no other gates are allowed.Of course we want also try to minimize the cost. $$Cost = 10 N_{CNOT} + N_{other}$$
###Code
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit
from qiskit import IBMQ, Aer, execute
from ibm_quantum_widgets import CircuitComposer
editorEx = CircuitComposer()
editorEx
##### Build your quantum circuit here using the composer widget.
# You can also build your circuit programmatically using Qiskit code
circuit = QuantumCircuit(3)
# WRITE YOUR CODE BETWEEN THESE LINES - START
def gate_ry(theta,num):
circuit.sx(num)
circuit.sx(num)
circuit.sx(num)
circuit.rz(pi/2, num)
circuit.sx(num)
circuit.rz(pi/2, num)
circuit.rz(-theta, num)
circuit.rz(pi/2, num)
circuit.sx(num)
circuit.rz(pi/2, num)
circuit.sx(num)
def gate_h(num):
circuit.rz(pi/2, num)
circuit.sx(num)
circuit.rz(pi/2, num)
def gate_t(num):
circuit.rz(pi/4,num)
def gate_tdg(num):
circuit.rz(-pi/4,num)
gate_h(2)
circuit.cx(1,2)
gate_tdg(2)
circuit.cx(0,2)
gate_t(2)
circuit.cx(1,2)
gate_tdg(2)
circuit.cx(0,2)
gate_t(2)
gate_h(2)
gate_t(1)
circuit.cx(0,1)
gate_t(0)
gate_tdg(1)
circuit.cx(0,1)
# WRITE YOUR CODE BETWEEN THESE LINES - END
# Checking the resulting circuit
qc = editorEx.circuit
qc = circuit # Uncomment this line if you want to submit the circuit built using Qiskit code
qc.draw(output='mpl')
# Check your answer using following code
from qc_grader import grade_ex1
grade_ex1(qc)
# Submit your answer. You can re-submit at any time.
from qc_grader import submit_ex1
submit_ex1(qc)
###Output
Submitting your answer for ex1. Please wait...
Success 🎉! Your answer has been submitted.
|
notebooks/03_Supervised_Classification.ipynb | ###Markdown
Supervised ClasssificationThere are many different types of models we can use for classification,each of these models have types of problems they are well suited to.The goal of this notebook is to identify algorithmsthat will effectively classify our datasetwhich we can then investigate further.
###Code
%load_ext autoreload
%autoreload 1
%matplotlib inline
import numpy as np
from sklearn import (
cluster,
decomposition,
preprocessing,
discriminant_analysis,
tree,
neighbors,
naive_bayes,
)
from sklearn import model_selection
import sklearn
import umap
from sdanalysis.order import (
compute_neighbours,
relative_orientations,
relative_distances,
)
from sdanalysis.figures import plot_frame
import matplotlib.pyplot as plt
import joblib
import warnings
warnings.filterwarnings("ignore")
# Import project tools
import sys
sys.path.append("../src")
%aimport trimer
%aimport figures
from trimer import read_all_files, classify_mols
from figures import plot_clustering, plot_confusion_matrix
# Configure Bokeh to output the figures to the notebook
from bokeh.io import output_notebook, show
output_notebook()
###Output
_____no_output_____
###Markdown
The choice of the 100th frame of the trajectory isto choose a configuration in whichthere is sufficient thermal motion in the crystal region.This motion is to be indicative of the range of values for the crystal.At high temperatures,the 1st (0 indexed) frame of the dump dataset includessignificant melting of the crysalwhich is not at all useful forthe training of this algorithm.
###Code
var_snaps = read_all_files(
"../data/simulation/dataset/output/", index=100, pattern="trajectory-*.gsd"
)
variables = [v for v, _ in var_snaps]
snaps = [s for _, s in var_snaps]
###Output
_____no_output_____
###Markdown
Loading the training dataWe need to load in the training dataset we created in the first notebook.At this point we are interested in two sets of data,- $X$, the input data which is the orientation of the six nearest neighbours- $y$, the true labelled classification of the data.
###Code
classes = np.concatenate(
[classify_mols(snap, v.crystal) for snap, v in zip(snaps, variables)]
)
orientations = np.concatenate(
[relative_orientations(s.box, s.position, s.orientation) for s in snaps]
)
mask = classes < 4
X = orientations[mask]
y = classes[mask]
###Output
_____no_output_____
###Markdown
Collating the modelsThe first step here is creating a list of models we would like to test.An excellent property of scikit-learn isthat all the algorithms have the same API,allowing us to treat them all in the same way.This is not an exhastive list of all the possible classifiers in scikit-learn,just a smattering for comparison.For a more exhastive list check out [the scikit-learn documentation](http://scikit-learn.org/stable/supervised_learning.htmlsupervised-learning),and feel free to add more to the list.
###Code
ml_models = {
"LR": sklearn.linear_model.LogisticRegression(max_iter=200),
"SGD": sklearn.linear_model.SGDClassifier(tol=1e-3, max_iter=1000),
"LDA": sklearn.discriminant_analysis.LinearDiscriminantAnalysis(),
"DT": sklearn.tree.DecisionTreeClassifier(),
"KNN": sklearn.neighbors.KNeighborsClassifier(),
"NB": sklearn.naive_bayes.GaussianNB(),
# 'SVM': svm.SVC(),
# 'NN': sklearn.neural_network.MLPClassifier(max_iter=500)
}
###Output
_____no_output_____
###Markdown
Testing the ModelsWith a collection of models to test,we now need some method of testing the models to compare them.To perform the initial screening of datasetswe are going to break our training data into two groups,- the training set, comprising 80% of the molecules- the validation set, comprising the remaining 20%.This division of the dataset gives us a set of datapreviously unseen by the algorithms,giving us a method of testing whetherthe algorithm is acutally learning the underlying features,or just 'memorising' the training data.This division of data will be through a random selectionso as not to bias the selection of data.
###Code
validation_size = 0.20
seed = 7
selected = sklearn.model_selection.train_test_split(
X, y, test_size=validation_size, random_state=seed
)
X_train, X_validation, y_train, y_validation = selected
###Output
_____no_output_____
###Markdown
To get an idea of the models whichwarrant a further investigation,we can iterate through each of our models.Each model is scored by breaking the training data into `n_splits`,using one of these splits for testing andthe remaining splits for training.This process is referred to as *cross validation*and typically the number of splits is 10.For the purposes of this running in a reasonable amount of time,`n_splits` is set to 2.
###Code
scoring = "balanced_accuracy"
n_splits = 2
# Typically n_splits would be 10 but it runs much slower
n_splits = 10
###Output
_____no_output_____
###Markdown
Unsorted Orientations
###Code
# Iterate through each model in our dictionary of models
for name, model in ml_models.items():
kfold = sklearn.model_selection.KFold(n_splits=n_splits, random_state=seed)
cv_results = sklearn.model_selection.cross_val_score(
model, X_train, y_train, cv=kfold, scoring=scoring
)
print(f"{name:5s}: {cv_results.mean():.5f} ± {cv_results.std():.5f}")
###Output
LR : 0.79423 ± 0.00340
SGD : 0.79029 ± 0.00735
LDA : 0.79086 ± 0.00350
DT : 0.95977 ± 0.00210
KNN : 0.96855 ± 0.00201
NB : 0.81018 ± 0.00585
###Markdown
Sorted Orientations
###Code
X = np.sort(X, axis=1)
selected = model_selection.train_test_split(
X, y, test_size=validation_size, random_state=seed
)
X_train, X_validation, y_train, y_validation = selected
scoring = "balanced_accuracy"
n_splits = 2
# Typically n_splits would be 10 but it runs much slower
n_splits = 10
# Iterate through each model in our dictionary of models
for name, model in ml_models.items():
kfold = model_selection.KFold(n_splits=n_splits, random_state=seed)
cv_results = model_selection.cross_val_score(
model, np.sort(X_train, axis=1), y_train, cv=kfold, scoring=scoring
)
print(f"{name:5s}: {cv_results.mean():.5f} ± {cv_results.std():.5f}")
###Output
LR : 0.92370 ± 0.00278
SGD : 0.86042 ± 0.07395
LDA : 0.93184 ± 0.00207
DT : 0.96418 ± 0.00231
KNN : 0.96829 ± 0.00200
NB : 0.89795 ± 0.00190
###Markdown
Out of all the algorithms tested,there are three that stand out- K-Nearest Neighbours (KNN),- Decision Tree (DT),- Neural Network (NN)with accuracies in excess of 95%.So with these three algorithms it is likely worthtweaking the algorithms slightly fromthe defualt paramters in an effort to improve performance.It is also worth understanding which classeseach of these algorithms is strongest at classifying.For this additional data we are going to be using a [confusion matrix](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.confusion_matrix.html).In a confusion matrix,the diagonal elements represent the correct classifications,while the off diagonal elements are the valueswhich were incorrectly classified.
###Code
knn_model = neighbors.KNeighborsClassifier()
kfold = model_selection.KFold(n_splits=n_splits, random_state=seed)
cv_results = model_selection.cross_val_score(
model, np.sort(X_train, axis=1), y_train, cv=kfold, scoring=scoring
)
print(f"{name:5s}: {cv_results.mean():.5f} ± {cv_results.std():.5f}")
sklearn.metrics.accuracy_score(y_validation, np.zeros_like(y_validation))
y_pred = ml_models["KNN"].fit(X_train, y_train).predict(X_validation)
plot_confusion_matrix(y_validation, y_pred, classes=["liq", "p2", "p2gg", "pg"])
plt.savefig("../figures/confusion_matrix_knn.pdf")
y_pred = ml_models["DT"].fit(X_train, y_train).predict(X_validation)
plot_confusion_matrix(y_validation, y_pred, classes=["liq", "p2", "p2gg", "pg"])
plt.savefig("../figures/confusion_matrix_dt.pdf")
# y_pred = ml_models["NN"].fit(X_train, y_train).predict(X_validation)
# plot_confusion_matrix(y_validation, y_pred, classes=['liq', 'p2', 'p2gg', 'pg'])
# plt.savefig("../figures/confusion_matrix_nn.pdf")
###Output
_____no_output_____
###Markdown
It is interesting to note that all of the modelshave the most difficulty with the liquid/crystal characterisation,with the largest proportion of false positives beingcrystal incorrectly classified as liquid.To make this model we have created persistentit needs to be saved which is done using `joblib`.
###Code
knn = ml_models["KNN"]
knn.fit(X_train, y_train)
joblib.dump(knn, "../models/knn-trimer.pkl")
dt = ml_models["DT"]
dt.fit(X_train, y_train)
joblib.dump(dt, "../models/dt-trimer.pkl")
# nn = ml_models['NN']
# nn.fit(X_train, y_train)
# joblib.dump(nn, '../models/nn-trimer.pkl')
###Output
_____no_output_____
###Markdown
Visualisation of Errors
###Code
# for nneighbours in [1, 2, 5, 10, 20, 40]:
# knn = sklearn.neighbors.KNeighborsClassifier(n_neighbors=nneighbours)
# kfold = sklearn.model_selection.KFold(n_splits=10, random_state=seed)
# cv_results = sklearn.model_selection.cross_val_score(knn, X_train, y_train, cv=kfold, scoring=scoring)
# print(f'{nneighbours}: {cv_results.mean():.5f} ± {cv_results.std():.5f}')
X_s = np.sort(X_validation, axis=1)
X_reduced = umap.UMAP(random_state=42).fit_transform(X_s)
figures.plot_dimensionality_reduction(X_reduced, knn.predict(X_validation))
figures.plot_dimensionality_reduction(X_reduced, y_validation)
###Output
_____no_output_____ |
PyTorch/Data processing.ipynb | ###Markdown
TODO[X] extract frames from video (.mp4)[X] create PyTorch Dataset (no data augmentation for now)[X] seaparate into training , validation dataset[ ] dataloader for pretrained MobileNetv3 model(http://pytorch.org/vision/stable/models.html: All pre-trained models expect input images normalized in the same way, i.e. mini-batches of 3-channel RGB images of shape (3 x H x W), where H and W are expected to be at least 224. The images have to be loaded in to a range of [0, 1] and then normalized using mean = [0.485, 0.456, 0.406] and std = [0.229, 0.224, 0.225])
###Code
import cv2
###Output
_____no_output_____
###Markdown
**extract frames from video and save them as .jpeg**source: https://stackoverflow.com/questions/33311153/python-extracting-and-saving-video-frameslist of supported formats for extracted images: https://docs.opencv.org/3.4/d4/da8/group__imgcodecs.htmlga288b8b3da0892bd651fce07b3bbd3a56
###Code
vidcap = cv2.VideoCapture('/home/advo/dev/DMS_android/PyTorch/Resources/VID_20210530_214905012.mp4')
success,image = vidcap.read()
count = 0
while success and count < 20:
cv2.imwrite("frame%d.jpg" % count, image) # save frame as JPEG file
success,image = vidcap.read()
print('Read a new frame: ', success)
count += 1
###Output
_____no_output_____
###Markdown
**Implement DataLoader**Source: https://pytorch.org/tutorials/beginner/data_loading_tutorial.html
###Code
# from __future__ import print_function, division
import os
import torch
import torchvision
from torchvision import datasets, transforms, models
generic_transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
# driver_dataset = DriverFaceDataset(csv_file="/home/advo/dev/DMS_android/PyTorch/Resources/labels.csv",
# root_dir="/home/advo/dev/DMS_android/PyTorch/Resources/images",
# transform=generic_transform)
driver_dataset = datasets.ImageFolder('/home/advo/dev/DMS_android/PyTorch/Resources/images', transform=generic_transform)
# Split dataset into training and validation
size_train = int(0.8 * len(driver_dataset))
size_val = len(driver_dataset) - size_train
driver_dataset_train, driver_dataset_val = torch.utils.data.dataset.random_split(driver_dataset, [size_train, size_val])
train_loader = torch.utils.data.DataLoader(driver_dataset_train, batch_size=4, shuffle=True, num_workers=0)
val_loader = torch.utils.data.DataLoader(driver_dataset_val, batch_size=4, shuffle=True, num_workers=0)
mobilenet_v3_small = models.mobilenet_v3_small(pretrained=True)
print(mobilenet_v3_small)
###Output
_____no_output_____ |
preprocessing/2.rescaling.ipynb | ###Markdown
We can see at the bottom right two with extremely large GrLivArea that are of a low price. These values are huge oultliers.Therefore, we can safely delete them.
###Code
#Deleting outliers
train = train.drop(train[(train['GrLivArea']>4000) & (train['SalePrice']<300000)].index)
#Check the graphic again
fig, ax = plt.subplots()
ax.scatter(train['GrLivArea'], train['SalePrice'])
plt.ylabel('SalePrice', fontsize=13)
plt.xlabel('GrLivArea', fontsize=13)
plt.show()
###Output
_____no_output_____
###Markdown
**SalePrice** is the variable we need to predict. So let's do some analysis on this variable first.
###Code
sns.distplot(train['SalePrice'] , fit=norm);
# Get the fitted parameters used by the function
(mu, sigma) = norm.fit(train['SalePrice'])
print( '\n mu = {:.2f} and sigma = {:.2f}\n'.format(mu, sigma))
#Now plot the distribution
plt.legend(['Normal dist. ($\mu=$ {:.2f} and $\sigma=$ {:.2f} )'.format(mu, sigma)],
loc='best')
plt.ylabel('Frequency')
plt.title('SalePrice distribution')
#Get also the QQ-plot
fig = plt.figure()
res = stats.probplot(train['SalePrice'], plot=plt)
plt.show()
###Output
mu = 180932.92 and sigma = 79467.79
###Markdown
The target variable is right skewed. As (linear) models love normally distributed data , we need to transform this variable and make it more normally distributed. **Log-transformation of the target variable**
###Code
#We use the numpy fuction log1p which applies log(1+x) to all elements of the column
#Check the new distribution
sns.distplot(np.log1p(train["SalePrice"]) , fit=norm);
# Get the fitted parameters used by the function
(mu, sigma) = norm.fit(np.log1p(train["SalePrice"]))
print( '\n mu = {:.2f} and sigma = {:.2f}\n'.format(mu, sigma))
#Now plot the distribution
plt.legend(['Normal dist. ($\mu=$ {:.2f} and $\sigma=$ {:.2f} )'.format(mu, sigma)],
loc='best')
plt.ylabel('Frequency')
plt.title('SalePrice distribution')
#Get also the QQ-plot
fig = plt.figure()
res = stats.probplot(np.log1p(train["SalePrice"]), plot=plt)
plt.show()
###Output
mu = 12.02 and sigma = 0.40
###Markdown
The skew seems now corrected and the data appears more normally distributed. Lets we try another scaling.
###Code
from sklearn.preprocessing import Normalizer
l1 = Normalizer('l1').fit_transform(train["SalePrice"].values.reshape((-1,1)))[:,0]
l2 = Normalizer('l2').fit_transform(train["SalePrice"].values.reshape((-1,1)))[:,0]
maxs = Normalizer('max').fit_transform(train["SalePrice"].values.reshape((-1,1)))[:,0]
l1
l2
maxs
from sklearn.preprocessing import MinMaxScaler
zero_to_one = MinMaxScaler().fit_transform(train["SalePrice"].values.reshape((-1,1)))[:,0]
zero_to_one
#0 - 1 scaled
#Check the new distribution
sns.distplot(zero_to_one , fit=norm);
# Get the fitted parameters used by the function
(mu, sigma) = norm.fit(zero_to_one)
print( '\n mu = {:.2f} and sigma = {:.2f}\n'.format(mu, sigma))
#Now plot the distribution
plt.legend(['Normal dist. ($\mu=$ {:.2f} and $\sigma=$ {:.2f} )'.format(mu, sigma)],
loc='best')
plt.ylabel('Frequency')
plt.title('SalePrice distribution')
#Get also the QQ-plot
fig = plt.figure()
res = stats.probplot(zero_to_one, plot=plt)
plt.show()
from sklearn.preprocessing import StandardScaler
standardized = StandardScaler().fit_transform(train["SalePrice"].values.reshape((-1,1)))[:,0]
standardized
#standardization
#Check the new distribution
sns.distplot(standardized, fit=norm);
# Get the fitted parameters used by the function
(mu, sigma) = norm.fit(standardized)
print( '\n mu = {:.2f} and sigma = {:.2f}\n'.format(mu, sigma))
#Now plot the distribution
plt.legend(['Normal dist. ($\mu=$ {:.2f} and $\sigma=$ {:.2f} )'.format(mu, sigma)],
loc='best')
plt.ylabel('Frequency')
plt.title('SalePrice distribution')
#Get also the QQ-plot
fig = plt.figure()
res = stats.probplot(standardized, plot=plt)
plt.show()
from scipy.stats import boxcox
xt, _ = boxcox(train["SalePrice"].values)
xt
#Check the new distribution
sns.distplot(xt, fit=norm);
# Get the fitted parameters used by the function
(mu, sigma) = norm.fit(xt)
print( '\n mu = {:.2f} and sigma = {:.2f}\n'.format(mu, sigma))
#Now plot the distribution
plt.legend(['Normal dist. ($\mu=$ {:.2f} and $\sigma=$ {:.2f} )'.format(mu, sigma)],
loc='best')
plt.ylabel('Frequency')
plt.title('SalePrice distribution')
#Get also the QQ-plot
fig = plt.figure()
res = stats.probplot(xt, plot=plt)
plt.show()
###Output
mu = 7.84 and sigma = 0.16
|
convolutional-neural-networks/week1/Convolution_model_Application_v1a.ipynb | ###Markdown
Convolutional Neural Networks: ApplicationWelcome to Course 4's second assignment! In this notebook, you will:- Implement helper functions that you will use when implementing a TensorFlow model- Implement a fully functioning ConvNet using TensorFlow **After this assignment you will be able to:**- Build and train a ConvNet in TensorFlow for a classification problem We assume here that you are already familiar with TensorFlow. If you are not, please refer the *TensorFlow Tutorial* of the third week of Course 2 ("*Improving deep neural networks*"). Updates to Assignment If you were working on a previous version* The current notebook filename is version "1a". * You can find your work in the file directory as version "1".* To view the file directory, go to the menu "File->Open", and this will open a new tab that shows the file directory. List of Updates* `initialize_parameters`: added details about tf.get_variable, `eval`. Clarified test case.* Added explanations for the kernel (filter) stride values, max pooling, and flatten functions.* Added details about softmax cross entropy with logits.* Added instructions for creating the Adam Optimizer.* Added explanation of how to evaluate tensors (optimizer and cost).* `forward_propagation`: clarified instructions, use "F" to store "flatten" layer.* Updated print statements and 'expected output' for easier visual comparisons.* Many thanks to Kevin P. Brown (mentor for the deep learning specialization) for his suggestions on the assignments in this course! 1.0 - TensorFlow modelIn the previous assignment, you built helper functions using numpy to understand the mechanics behind convolutional neural networks. Most practical applications of deep learning today are built using programming frameworks, which have many built-in functions you can simply call. As usual, we will start by loading in the packages.
###Code
import math
import numpy as np
import h5py
import matplotlib.pyplot as plt
import scipy
from PIL import Image
from scipy import ndimage
import tensorflow as tf
from tensorflow.python.framework import ops
from cnn_utils import *
%matplotlib inline
np.random.seed(1)
###Output
_____no_output_____
###Markdown
Run the next cell to load the "SIGNS" dataset you are going to use.
###Code
# Loading the data (signs)
X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset()
###Output
_____no_output_____
###Markdown
As a reminder, the SIGNS dataset is a collection of 6 signs representing numbers from 0 to 5.The next cell will show you an example of a labelled image in the dataset. Feel free to change the value of `index` below and re-run to see different examples.
###Code
# Example of a picture
index = 6
plt.imshow(X_train_orig[index])
print ("y = " + str(np.squeeze(Y_train_orig[:, index])))
###Output
y = 2
###Markdown
In Course 2, you had built a fully-connected network for this dataset. But since this is an image dataset, it is more natural to apply a ConvNet to it.To get started, let's examine the shapes of your data.
###Code
X_train = X_train_orig/255.
X_test = X_test_orig/255.
Y_train = convert_to_one_hot(Y_train_orig, 6).T
Y_test = convert_to_one_hot(Y_test_orig, 6).T
print ("number of training examples = " + str(X_train.shape[0]))
print ("number of test examples = " + str(X_test.shape[0]))
print ("X_train shape: " + str(X_train.shape))
print ("Y_train shape: " + str(Y_train.shape))
print ("X_test shape: " + str(X_test.shape))
print ("Y_test shape: " + str(Y_test.shape))
conv_layers = {}
###Output
number of training examples = 1080
number of test examples = 120
X_train shape: (1080, 64, 64, 3)
Y_train shape: (1080, 6)
X_test shape: (120, 64, 64, 3)
Y_test shape: (120, 6)
###Markdown
1.1 - Create placeholdersTensorFlow requires that you create placeholders for the input data that will be fed into the model when running the session.**Exercise**: Implement the function below to create placeholders for the input image X and the output Y. You should not define the number of training examples for the moment. To do so, you could use "None" as the batch size, it will give you the flexibility to choose it later. Hence X should be of dimension **[None, n_H0, n_W0, n_C0]** and Y should be of dimension **[None, n_y]**. [Hint: search for the tf.placeholder documentation"](https://www.tensorflow.org/api_docs/python/tf/placeholder).
###Code
# GRADED FUNCTION: create_placeholders
def create_placeholders(n_H0, n_W0, n_C0, n_y):
"""
Creates the placeholders for the tensorflow session.
Arguments:
n_H0 -- scalar, height of an input image
n_W0 -- scalar, width of an input image
n_C0 -- scalar, number of channels of the input
n_y -- scalar, number of classes
Returns:
X -- placeholder for the data input, of shape [None, n_H0, n_W0, n_C0] and dtype "float"
Y -- placeholder for the input labels, of shape [None, n_y] and dtype "float"
"""
### START CODE HERE ### (≈2 lines)
X = tf.placeholder(dtype="float", shape=(None, n_H0, n_W0, n_C0))
Y = tf.placeholder(dtype="float", shape=(None, n_y))
### END CODE HERE ###
return X, Y
X, Y = create_placeholders(64, 64, 3, 6)
print ("X = " + str(X))
print ("Y = " + str(Y))
###Output
X = Tensor("Placeholder:0", shape=(?, 64, 64, 3), dtype=float32)
Y = Tensor("Placeholder_1:0", shape=(?, 6), dtype=float32)
###Markdown
**Expected Output** X = Tensor("Placeholder:0", shape=(?, 64, 64, 3), dtype=float32) Y = Tensor("Placeholder_1:0", shape=(?, 6), dtype=float32) 1.2 - Initialize parametersYou will initialize weights/filters $W1$ and $W2$ using `tf.contrib.layers.xavier_initializer(seed = 0)`. You don't need to worry about bias variables as you will soon see that TensorFlow functions take care of the bias. Note also that you will only initialize the weights/filters for the conv2d functions. TensorFlow initializes the layers for the fully connected part automatically. We will talk more about that later in this assignment.**Exercise:** Implement initialize_parameters(). The dimensions for each group of filters are provided below. Reminder - to initialize a parameter $W$ of shape [1,2,3,4] in Tensorflow, use:```pythonW = tf.get_variable("W", [1,2,3,4], initializer = ...)``` tf.get_variable()[Search for the tf.get_variable documentation](https://www.tensorflow.org/api_docs/python/tf/get_variable). Notice that the documentation says:```Gets an existing variable with these parameters or create a new one.```So we can use this function to create a tensorflow variable with the specified name, but if the variables already exist, it will get the existing variable with that same name.
###Code
# GRADED FUNCTION: initialize_parameters
def initialize_parameters():
"""
Initializes weight parameters to build a neural network with tensorflow. The shapes are:
W1 : [4, 4, 3, 8]
W2 : [2, 2, 8, 16]
Note that we will hard code the shape values in the function to make the grading simpler.
Normally, functions should take values as inputs rather than hard coding.
Returns:
parameters -- a dictionary of tensors containing W1, W2
"""
tf.set_random_seed(1) # so that your "random" numbers match ours
### START CODE HERE ### (approx. 2 lines of code)
# W1 = None
# W2 = None
W1 = tf.get_variable("W1", [4, 4, 3, 8], initializer =tf.contrib.layers.xavier_initializer(seed = 0) )
W2 = tf.get_variable("W2", [2, 2, 8, 16], initializer =tf.contrib.layers.xavier_initializer(seed = 0) )
### END CODE HERE ###
parameters = {"W1": W1,
"W2": W2}
return parameters
tf.reset_default_graph()
with tf.Session() as sess_test:
parameters = initialize_parameters()
init = tf.global_variables_initializer()
sess_test.run(init)
print("W1[1,1,1] = \n" + str(parameters["W1"].eval()[1,1,1]))
print("W1.shape: " + str(parameters["W1"].shape))
print("\n")
print("W2[1,1,1] = \n" + str(parameters["W2"].eval()[1,1,1]))
print("W2.shape: " + str(parameters["W2"].shape))
###Output
W1[1,1,1] =
[ 0.00131723 0.14176141 -0.04434952 0.09197326 0.14984085 -0.03514394
-0.06847463 0.05245192]
W1.shape: (4, 4, 3, 8)
W2[1,1,1] =
[-0.08566415 0.17750949 0.11974221 0.16773748 -0.0830943 -0.08058
-0.00577033 -0.14643836 0.24162132 -0.05857408 -0.19055021 0.1345228
-0.22779644 -0.1601823 -0.16117483 -0.10286498]
W2.shape: (2, 2, 8, 16)
###Markdown
** Expected Output:**```W1[1,1,1] = [ 0.00131723 0.14176141 -0.04434952 0.09197326 0.14984085 -0.03514394 -0.06847463 0.05245192]W1.shape: (4, 4, 3, 8)W2[1,1,1] = [-0.08566415 0.17750949 0.11974221 0.16773748 -0.0830943 -0.08058 -0.00577033 -0.14643836 0.24162132 -0.05857408 -0.19055021 0.1345228 -0.22779644 -0.1601823 -0.16117483 -0.10286498]W2.shape: (2, 2, 8, 16)``` 1.3 - Forward propagationIn TensorFlow, there are built-in functions that implement the convolution steps for you.- **tf.nn.conv2d(X,W, strides = [1,s,s,1], padding = 'SAME'):** given an input $X$ and a group of filters $W$, this function convolves $W$'s filters on X. The third parameter ([1,s,s,1]) represents the strides for each dimension of the input (m, n_H_prev, n_W_prev, n_C_prev). Normally, you'll choose a stride of 1 for the number of examples (the first value) and for the channels (the fourth value), which is why we wrote the value as `[1,s,s,1]`. You can read the full documentation on [conv2d](https://www.tensorflow.org/api_docs/python/tf/nn/conv2d).- **tf.nn.max_pool(A, ksize = [1,f,f,1], strides = [1,s,s,1], padding = 'SAME'):** given an input A, this function uses a window of size (f, f) and strides of size (s, s) to carry out max pooling over each window. For max pooling, we usually operate on a single example at a time and a single channel at a time. So the first and fourth value in `[1,f,f,1]` are both 1. You can read the full documentation on [max_pool](https://www.tensorflow.org/api_docs/python/tf/nn/max_pool).- **tf.nn.relu(Z):** computes the elementwise ReLU of Z (which can be any shape). You can read the full documentation on [relu](https://www.tensorflow.org/api_docs/python/tf/nn/relu).- **tf.contrib.layers.flatten(P)**: given a tensor "P", this function takes each training (or test) example in the batch and flattens it into a 1D vector. * If a tensor P has the shape (m,h,w,c), where m is the number of examples (the batch size), it returns a flattened tensor with shape (batch_size, k), where $k=h \times w \times c$. "k" equals the product of all the dimension sizes other than the first dimension. * For example, given a tensor with dimensions [100,2,3,4], it flattens the tensor to be of shape [100, 24], where 24 = 2 * 3 * 4. You can read the full documentation on [flatten](https://www.tensorflow.org/api_docs/python/tf/contrib/layers/flatten).- **tf.contrib.layers.fully_connected(F, num_outputs):** given the flattened input F, it returns the output computed using a fully connected layer. You can read the full documentation on [full_connected](https://www.tensorflow.org/api_docs/python/tf/contrib/layers/fully_connected).In the last function above (`tf.contrib.layers.fully_connected`), the fully connected layer automatically initializes weights in the graph and keeps on training them as you train the model. Hence, you did not need to initialize those weights when initializing the parameters. Window, kernel, filterThe words "window", "kernel", and "filter" are used to refer to the same thing. This is why the parameter `ksize` refers to "kernel size", and we use `(f,f)` to refer to the filter size. Both "kernel" and "filter" refer to the "window." **Exercise**Implement the `forward_propagation` function below to build the following model: `CONV2D -> RELU -> MAXPOOL -> CONV2D -> RELU -> MAXPOOL -> FLATTEN -> FULLYCONNECTED`. You should use the functions above. In detail, we will use the following parameters for all the steps: - Conv2D: stride 1, padding is "SAME" - ReLU - Max pool: Use an 8 by 8 filter size and an 8 by 8 stride, padding is "SAME" - Conv2D: stride 1, padding is "SAME" - ReLU - Max pool: Use a 4 by 4 filter size and a 4 by 4 stride, padding is "SAME" - Flatten the previous output. - FULLYCONNECTED (FC) layer: Apply a fully connected layer without an non-linear activation function. Do not call the softmax here. This will result in 6 neurons in the output layer, which then get passed later to a softmax. In TensorFlow, the softmax and cost function are lumped together into a single function, which you'll call in a different function when computing the cost.
###Code
# GRADED FUNCTION: forward_propagation
def forward_propagation(X, parameters):
"""
Implements the forward propagation for the model:
CONV2D -> RELU -> MAXPOOL -> CONV2D -> RELU -> MAXPOOL -> FLATTEN -> FULLYCONNECTED
Note that for simplicity and grading purposes, we'll hard-code some values
such as the stride and kernel (filter) sizes.
Normally, functions should take these values as function parameters.
Arguments:
X -- input dataset placeholder, of shape (input size, number of examples)
parameters -- python dictionary containing your parameters "W1", "W2"
the shapes are given in initialize_parameters
Returns:
Z3 -- the output of the last LINEAR unit
"""
# Retrieve the parameters from the dictionary "parameters"
W1 = parameters['W1']
W2 = parameters['W2']
### START CODE HERE ###
# # CONV2D: stride of 1, padding 'SAME'
# Z1 = None
# # RELU
# A1 = None
# # MAXPOOL: window 8x8, stride 8, padding 'SAME'
# P1 = None
# # CONV2D: filters W2, stride 1, padding 'SAME'
# Z2 = None
# # RELU
# A2 = None
# # MAXPOOL: window 4x4, stride 4, padding 'SAME'
# P2 = None
# # FLATTEN
# F = None
# # FULLY-CONNECTED without non-linear activation function (not not call softmax).
# # 6 neurons in output layer. Hint: one of the arguments should be "activation_fn=None"
# Z3 = None
# CONV2D: stride of 1, padding 'SAME'
Z1 = tf.nn.conv2d(X,W1, strides = [1,1,1,1], padding = 'SAME')
# RELU
A1 = tf.nn.relu(Z1)
# MAXPOOL: window 8x8, stride 8, padding 'SAME'
P1 = tf.nn.max_pool(A1, ksize = [1,8,8,1], strides = [1,8,8,1], padding = 'SAME')
# CONV2D: filters W2, stride 1, padding 'SAME'
Z2 = tf.nn.conv2d(P1,W2, strides = [1,1,1,1], padding = 'SAME')
# RELU
A2 = tf.nn.relu(Z2)
# MAXPOOL: window 4x4, stride 4, padding 'SAME'
P2 = tf.nn.max_pool(A2, ksize = [1,4,4,1], strides = [1,4,4,1], padding = 'SAME')
# FLATTEN
F = tf.contrib.layers.flatten(P2)
# FULLY-CONNECTED without non-linear activation function (not not call softmax).
# 6 neurons in output layer. Hint: one of the arguments should be "activation_fn=None"
Z3 = tf.contrib.layers.fully_connected(F, 6,activation_fn=None)
### END CODE HERE ###
return Z3
tf.reset_default_graph()
with tf.Session() as sess:
np.random.seed(1)
X, Y = create_placeholders(64, 64, 3, 6)
parameters = initialize_parameters()
Z3 = forward_propagation(X, parameters)
init = tf.global_variables_initializer()
sess.run(init)
a = sess.run(Z3, {X: np.random.randn(2,64,64,3), Y: np.random.randn(2,6)})
print("Z3 = \n" + str(a))
###Output
Z3 =
[[-0.44670227 -1.57208765 -1.53049231 -2.31013036 -1.29104376 0.46852064]
[-0.17601591 -1.57972014 -1.4737016 -2.61672091 -1.00810647 0.5747785 ]]
###Markdown
**Expected Output**:```Z3 = [[-0.44670227 -1.57208765 -1.53049231 -2.31013036 -1.29104376 0.46852064] [-0.17601591 -1.57972014 -1.4737016 -2.61672091 -1.00810647 0.5747785 ]]``` 1.4 - Compute costImplement the compute cost function below. Remember that the cost function helps the neural network see how much the model's predictions differ from the correct labels. By adjusting the weights of the network to reduce the cost, the neural network can improve its predictions.You might find these two functions helpful: - **tf.nn.softmax_cross_entropy_with_logits(logits = Z, labels = Y):** computes the softmax entropy loss. This function both computes the softmax activation function as well as the resulting loss. You can check the full documentation [softmax_cross_entropy_with_logits](https://www.tensorflow.org/api_docs/python/tf/nn/softmax_cross_entropy_with_logits).- **tf.reduce_mean:** computes the mean of elements across dimensions of a tensor. Use this to calculate the sum of the losses over all the examples to get the overall cost. You can check the full documentation [reduce_mean](https://www.tensorflow.org/api_docs/python/tf/reduce_mean). Details on softmax_cross_entropy_with_logits (optional reading)* Softmax is used to format outputs so that they can be used for classification. It assigns a value between 0 and 1 for each category, where the sum of all prediction values (across all possible categories) equals 1.* Cross Entropy is compares the model's predicted classifications with the actual labels and results in a numerical value representing the "loss" of the model's predictions.* "Logits" are the result of multiplying the weights and adding the biases. Logits are passed through an activation function (such as a relu), and the result is called the "activation."* The function is named `softmax_cross_entropy_with_logits` takes logits as input (and not activations); then uses the model to predict using softmax, and then compares the predictions with the true labels using cross entropy. These are done with a single function to optimize the calculations.** Exercise**: Compute the cost below using the function above.
###Code
# GRADED FUNCTION: compute_cost
def compute_cost(Z3, Y):
"""
Computes the cost
Arguments:
Z3 -- output of forward propagation (output of the last LINEAR unit), of shape (number of examples, 6)
Y -- "true" labels vector placeholder, same shape as Z3
Returns:
cost - Tensor of the cost function
"""
### START CODE HERE ### (1 line of code)
# cost = None
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = Z3, labels = Y))
### END CODE HERE ###
return cost
tf.reset_default_graph()
with tf.Session() as sess:
np.random.seed(1)
X, Y = create_placeholders(64, 64, 3, 6)
parameters = initialize_parameters()
Z3 = forward_propagation(X, parameters)
cost = compute_cost(Z3, Y)
init = tf.global_variables_initializer()
sess.run(init)
a = sess.run(cost, {X: np.random.randn(4,64,64,3), Y: np.random.randn(4,6)})
print("cost = " + str(a))
###Output
cost = 2.91034
###Markdown
**Expected Output**: ```cost = 2.91034``` 1.5 Model Finally you will merge the helper functions you implemented above to build a model. You will train it on the SIGNS dataset. **Exercise**: Complete the function below. The model below should:- create placeholders- initialize parameters- forward propagate- compute the cost- create an optimizerFinally you will create a session and run a for loop for num_epochs, get the mini-batches, and then for each mini-batch you will optimize the function. [Hint for initializing the variables](https://www.tensorflow.org/api_docs/python/tf/global_variables_initializer) Adam OptimizerYou can use `tf.train.AdamOptimizer(learning_rate = ...)` to create the optimizer. The optimizer has a `minimize(loss=...)` function that you'll call to set the cost function that the optimizer will minimize.For details, check out the documentation for [Adam Optimizer](https://www.tensorflow.org/api_docs/python/tf/train/AdamOptimizer) Random mini batchesIf you took course 2 of the deep learning specialization, you implemented `random_mini_batches()` in the "Optimization" programming assignment. This function returns a list of mini-batches. It is already implemented in the `cnn_utils.py` file and imported here, so you can call it like this:```Pythonminibatches = random_mini_batches(X, Y, mini_batch_size = 64, seed = 0)```(You will want to choose the correct variable names when you use it in your code). Evaluating the optimizer and costWithin a loop, for each mini-batch, you'll use the `tf.Session` object (named `sess`) to feed a mini-batch of inputs and labels into the neural network and evaluate the tensors for the optimizer as well as the cost. Remember that we built a graph data structure and need to feed it inputs and labels and use `sess.run()` in order to get values for the optimizer and cost.You'll use this kind of syntax:```output_for_var1, output_for_var2 = sess.run( fetches=[var1, var2], feed_dict={var_inputs: the_batch_of_inputs, var_labels: the_batch_of_labels} )```* Notice that `sess.run` takes its first argument `fetches` as a list of objects that you want it to evaluate (in this case, we want to evaluate the optimizer and the cost). * It also takes a dictionary for the `feed_dict` parameter. * The keys are the `tf.placeholder` variables that we created in the `create_placeholders` function above. * The values are the variables holding the actual numpy arrays for each mini-batch. * The sess.run outputs a tuple of the evaluated tensors, in the same order as the list given to `fetches`. For more information on how to use sess.run, see the documentation [tf.Sesssionrun](https://www.tensorflow.org/api_docs/python/tf/Sessionrun) documentation.
###Code
# GRADED FUNCTION: model
def model(X_train, Y_train, X_test, Y_test, learning_rate = 0.009,
num_epochs = 100, minibatch_size = 64, print_cost = True):
"""
Implements a three-layer ConvNet in Tensorflow:
CONV2D -> RELU -> MAXPOOL -> CONV2D -> RELU -> MAXPOOL -> FLATTEN -> FULLYCONNECTED
Arguments:
X_train -- training set, of shape (None, 64, 64, 3)
Y_train -- test set, of shape (None, n_y = 6)
X_test -- training set, of shape (None, 64, 64, 3)
Y_test -- test set, of shape (None, n_y = 6)
learning_rate -- learning rate of the optimization
num_epochs -- number of epochs of the optimization loop
minibatch_size -- size of a minibatch
print_cost -- True to print the cost every 100 epochs
Returns:
train_accuracy -- real number, accuracy on the train set (X_train)
test_accuracy -- real number, testing accuracy on the test set (X_test)
parameters -- parameters learnt by the model. They can then be used to predict.
"""
ops.reset_default_graph() # to be able to rerun the model without overwriting tf variables
tf.set_random_seed(1) # to keep results consistent (tensorflow seed)
seed = 3 # to keep results consistent (numpy seed)
(m, n_H0, n_W0, n_C0) = X_train.shape
n_y = Y_train.shape[1]
costs = [] # To keep track of the cost
# Create Placeholders of the correct shape
### START CODE HERE ### (1 line)
X, Y = create_placeholders(n_H0, n_W0, n_C0, n_y)
# X, Y = None
### END CODE HERE ###
# Initialize parameters
### START CODE HERE ### (1 line)
parameters = initialize_parameters()
# parameters = None
### END CODE HERE ###
# Forward propagation: Build the forward propagation in the tensorflow graph
### START CODE HERE ### (1 line)
Z3 = forward_propagation(X, parameters)
# Z3 = None
### END CODE HERE ###
# Cost function: Add cost function to tensorflow graph
### START CODE HERE ### (1 line)
cost = compute_cost(Z3, Y)
# cost = None
### END CODE HERE ###
# Backpropagation: Define the tensorflow optimizer. Use an AdamOptimizer that minimizes the cost.
### START CODE HERE ### (1 line)
optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(loss=cost)
# optimizer = None
### END CODE HERE ###
# Initialize all the variables globally
init = tf.global_variables_initializer()
# Start the session to compute the tensorflow graph
with tf.Session() as sess:
# Run the initialization
sess.run(init)
# Do the training loop
for epoch in range(num_epochs):
minibatch_cost = 0.
num_minibatches = int(m / minibatch_size) # number of minibatches of size minibatch_size in the train set
seed = seed + 1
minibatches = random_mini_batches(X_train, Y_train, minibatch_size, seed)
for minibatch in minibatches:
# Select a minibatch
(minibatch_X, minibatch_Y) = minibatch
"""
# IMPORTANT: The line that runs the graph on a minibatch.
# Run the session to execute the optimizer and the cost.
# The feedict should contain a minibatch for (X,Y).
"""
### START CODE HERE ### (1 line)
_ , temp_cost = sess.run(fetches=[optimizer, cost],feed_dict={X: minibatch_X,Y: minibatch_Y})
# _ , temp_cost = None
### END CODE HERE ###
minibatch_cost += temp_cost / num_minibatches
# Print the cost every epoch
if print_cost == True and epoch % 5 == 0:
print ("Cost after epoch %i: %f" % (epoch, minibatch_cost))
if print_cost == True and epoch % 1 == 0:
costs.append(minibatch_cost)
# plot the cost
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per tens)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
# Calculate the correct predictions
predict_op = tf.argmax(Z3, 1)
correct_prediction = tf.equal(predict_op, tf.argmax(Y, 1))
# Calculate accuracy on the test set
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print(accuracy)
train_accuracy = accuracy.eval({X: X_train, Y: Y_train})
test_accuracy = accuracy.eval({X: X_test, Y: Y_test})
print("Train Accuracy:", train_accuracy)
print("Test Accuracy:", test_accuracy)
return train_accuracy, test_accuracy, parameters
###Output
_____no_output_____
###Markdown
Run the following cell to train your model for 100 epochs. Check if your cost after epoch 0 and 5 matches our output. If not, stop the cell and go back to your code!
###Code
_, _, parameters = model(X_train, Y_train, X_test, Y_test)
###Output
Cost after epoch 0: 1.917929
Cost after epoch 5: 1.506757
Cost after epoch 10: 0.955359
Cost after epoch 15: 0.845802
Cost after epoch 20: 0.701174
Cost after epoch 25: 0.571977
Cost after epoch 30: 0.518435
Cost after epoch 35: 0.495806
Cost after epoch 40: 0.429827
Cost after epoch 45: 0.407291
Cost after epoch 50: 0.366394
Cost after epoch 55: 0.376922
Cost after epoch 60: 0.299491
Cost after epoch 65: 0.338870
Cost after epoch 70: 0.316400
Cost after epoch 75: 0.310413
Cost after epoch 80: 0.249549
Cost after epoch 85: 0.243457
Cost after epoch 90: 0.200031
Cost after epoch 95: 0.175452
###Markdown
**Expected output**: although it may not match perfectly, your expected output should be close to ours and your cost value should decrease. **Cost after epoch 0 =** 1.917929 **Cost after epoch 5 =** 1.506757 **Train Accuracy =** 0.940741 **Test Accuracy =** 0.783333 Congratulations! You have finished the assignment and built a model that recognizes SIGN language with almost 80% accuracy on the test set. If you wish, feel free to play around with this dataset further. You can actually improve its accuracy by spending more time tuning the hyperparameters, or using regularization (as this model clearly has a high variance). Once again, here's a thumbs up for your work!
###Code
fname = "images/thumbs_up.jpg"
image = np.array(ndimage.imread(fname, flatten=False))
my_image = scipy.misc.imresize(image, size=(64,64))
plt.imshow(my_image)
###Output
_____no_output_____ |
Milk Yield. No Lactation - High Lactation model.ipynb | ###Markdown
Code for the paper "*A Deep Learning Algorithm Predicts Milk Yield and Production Stage of Dairy Cows utilizing Ultrasound Echotexture 3 Analysis of the Mammary Gland*"It presents the model construction and the produced results for the “High Lactation – No Lactation” binary classification problem.
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from tensorflow.keras import models
from tensorflow.keras import layers
###Output
_____no_output_____
###Markdown
Reading the file containing labeling info. The "name" column corresponds to an animal and the "dmy" column corresponds to the animal's milk yield.
###Code
df = pd.read_csv("milk_yield_labels.csv")
print("Length of dataframe containing labeling info: ",len(df))
df.head()
###Output
Length of dataframe containing labeling info: 384
###Markdown
Reading the echotexture variable dataset. Each row contains the echotecture variables retrieved from the ultrasound images associated with the "name" variable.
###Code
df_features = pd.read_csv("echotexture_features.csv") #echotexture_features.csv
print ("Length of dataframe containing features: ",len(df_features))
# Deleting unnecessary column
del df_features['Unnamed: 0']
df_features.head()
###Output
Length of dataframe containing features: 2960
###Markdown
Seven echotecture variables are selected and they are concatenated into a new vector of 56 elements (8 photos * 7 variables for each new sample).
###Code
selected_variable = 'Gradient_Mean_value'
selected_variable2 = 'Homogenity'
selected_variable3 = 'Correlation'
selected_variable4 = 'Percentage_non-zero_Gradients'
selected_variable5 = 'St._Deviation'
selected_variable6 = 'Gradient_Variance'
selected_variable7 = 'Entropy'
features = []
counter = 1
element = []
for index, row in df_features.iterrows():
if counter % 8 == 0:
name = row['name'][:-4]
dmy = df.loc[df['name'] == name]['dmy'].values
element.append(row[selected_variable])
element.append(row[selected_variable2])
element.append(row[selected_variable3])
element.append(row[selected_variable4])
element.append(row[selected_variable5])
element.append(row[selected_variable6])
element.append(row[selected_variable7])
element.append(dmy[0])
element.append(name)
features.append(element)
element = []
else:
element.append(row[selected_variable])
element.append(row[selected_variable2])
element.append(row[selected_variable3])
element.append(row[selected_variable4])
element.append(row[selected_variable5])
element.append(row[selected_variable6])
element.append(row[selected_variable7])
counter += 1
# Creating a new dataframe of 56 elements extracted from echotecture variables + 'dmy', 'name'
col = ['A1','xA1','cA1','vA1','bA1','nA1','mA1', 'A2','xA2',\
'cA2','vA2','bA2','nA2','mA2', 'B1', 'xB1','cB1','vB1','bB1','nB1','mB1', \
'B2','xB2','cB2','vB2','bB2','nB2','mB2',\
'C1','xC1','cC1','vC1','bC1','nC1','mC1',\
'C2','xC2','cC2','vC2','bC2','nC2','mC2', \
'D1','xD1','cD1','vD1','bD1','nD1','mD1',\
'D2','xD2','cD2','vD2','bD2','nD2','mD2','dmy','name']
df2 = pd.DataFrame(features,columns = col)
print("length of dataframe: ",len(df2))
print(df2.tail(5))
###Output
length of dataframe: 370
A1 xA1 cA1 vA1 bA1 nA1 \
365 135.425474 0.202683 0.000101 0.868294 99.281745 11901.791118
366 134.004899 0.203152 0.000101 0.868317 99.582672 11963.869876
367 134.992778 0.202277 0.000100 0.868629 99.722616 11895.902234
368 136.768464 0.204036 0.000101 0.868243 99.597098 11976.791325
369 135.736027 0.203498 0.000101 0.869119 99.284208 11887.479240
mA1 A2 xA2 cA2 ... mD1 D2 \
365 3.750551 135.468471 0.202734 0.000101 ... 3.688899 136.916793
366 3.787019 134.134725 0.203516 0.000101 ... 3.707755 135.913483
367 3.816579 135.088029 0.203429 0.000101 ... 3.671907 136.097414
368 3.773866 136.562914 0.202071 0.000101 ... 3.763866 139.411495
369 3.753509 135.526978 0.203840 0.000101 ... 3.718130 138.275239
xD2 cD2 vD2 bD2 nD2 mD2 dmy \
365 0.203876 0.000102 0.867878 99.104370 11823.489361 3.612658 0
366 0.202524 0.000102 0.866935 99.048586 11852.480801 3.680110 39.8
367 0.203201 0.000102 0.868081 99.089212 11909.039067 3.707199 36.3
368 0.203000 0.000100 0.867682 99.794817 12000.547920 3.743171 40
369 0.203114 0.000101 0.868951 99.365916 11892.847640 3.701680 16.0
name
365 2196_6
366 2196_10
367 2196_11
368 2196_12
369 2196_9
[5 rows x 58 columns]
###Markdown
Some more data cleansing.
###Code
#Remove elements with ''-'' appearing in some values of the dmy.
df2.drop(df2[df2['dmy'] == '-'].index, inplace = True)
df2['dmy'] = df2['dmy'].astype(float)
print("Length of dataframe after dropping rows: ",len(df2))
###Output
Length of dataframe after dropping rows: 367
###Markdown
Label creation by grouping together features with daily milk yield lower than 10 for the No-Lactation class and greater than 45 for the High-Lactation class.
###Code
labels = []
for _,row in df2.iterrows():
if row['dmy'] >= 0 and row['dmy'] <= 10:
labels.append(0)
elif row['dmy'] <= 1000 and row['dmy'] > 45:
labels.append(1)
else:
# All the other cases are of no interest
labels.append(-1)
df2['labels'] = labels
print("Length of dataframe: ", len(df2))
# Drop rows with label value -1
index_names = df2[ df2['labels'] == -1 ].index
df2.drop(index_names , inplace=True)
print("New length of dataframe after dropping classes between No and High Lactation: ", len(df2))
df2.head()
###Output
Length of dataframe: 367
New length of dataframe after dropping classes between No and High Lactation: 181
###Markdown
Model creation - Classes selection - Training - Validation - Testing
###Code
model = models.Sequential()
# Input - Layer
model.add(layers.Dense(50, activation = "relu", input_shape=(56, )))
# Hidden - Layers
model.add(layers.Dropout(0.3, noise_shape=None, seed=None))
model.add(layers.Dense(50, activation = "relu"))
# Output- Layer
model.add(layers.Dense(1, activation = "sigmoid"))
model.summary()
model.compile(
optimizer = 'sgd',
loss = "binary_crossentropy",
metrics = ["accuracy"]
)
all_features = ['A1','xA1','cA1','vA1','bA1','nA1','mA1', 'A2','xA2',\
'cA2','vA2','bA2','nA2','mA2', 'B1', 'xB1','cB1','vB1','bB1','nB1','mB1', \
'B2','xB2','cB2','vB2','bB2','nB2','mB2',\
'C1','xC1','cC1','vC1','bC1','nC1','mC1',\
'C2','xC2','cC2','vC2','bC2','nC2','mC2', \
'D1','xD1','cD1','vD1','bD1','nD1','mD1',\
'D2','xD2','cD2','vD2','bD2','nD2','mD2']
X = df2[all_features].values.tolist()
Y = df2[['labels']].values.tolist()
X = np.array(X)
Y = [y[0] for y in Y]
# Splitting to training, validation and test sets.
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.25,stratify=Y)
X_test, final_x_test, y_test, final_y_test = train_test_split(X_test, y_test, test_size=0.5,stratify=y_test)
# Data transformation and normalization
y_train = np.array(y_train)
y_test = np.array(y_test)
final_y_test= np.array(final_y_test)
scaler = StandardScaler()
scaler.fit(X_train)
X_train_TR = scaler.transform(X_train)
X_test_TR = scaler.transform(X_test)
final_x_test_TR = scaler.transform(final_x_test)
#Train the model
epochs = 50
batch_size = 14
results = model.fit(
X_train_TR, y_train,
epochs = epochs,
batch_size = batch_size,
validation_data = (X_test_TR, y_test)
)
# Validation set
print("-----Validation set------")
print("Validation Accuracy: ", results.history["val_accuracy"][-1])
pred = model.predict(X_test_TR)
rounded = []
for p in pred:
rounded.append(np.round(p))
print(classification_report(y_test, rounded))
# Test set
print("-----Unseen test set predictions------")
pred = model.predict(final_x_test_TR)
rounded = []
for p in pred:
rounded.append(np.round(p))
print(classification_report(final_y_test, rounded))
print("Confusion Matrix of unseen test set: \n", confusion_matrix(final_y_test, rounded))
model_name = 'milk_yield'+"_ep_"+str(epochs)+"_bss_"+str(batch_size)
# "Loss"
plt.plot(results.history['loss'])
plt.plot(results.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.savefig('loss_valloss_'+model_name+'.png', dpi=600)
plt.show()
# "Accuracy"
plt.plot(results.history['accuracy'])
plt.plot(results.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.savefig('acc_vallacc_'+model_name+'.png', dpi=600)
plt.show()
###Output
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
dense (Dense) (None, 50) 2850
_________________________________________________________________
dropout (Dropout) (None, 50) 0
_________________________________________________________________
dense_1 (Dense) (None, 50) 2550
_________________________________________________________________
dense_2 (Dense) (None, 1) 51
=================================================================
Total params: 5,451
Trainable params: 5,451
Non-trainable params: 0
_________________________________________________________________
Train on 135 samples, validate on 23 samples
Epoch 1/50
135/135 [==============================] - 1s 7ms/sample - loss: 0.6783 - accuracy: 0.5630 - val_loss: 0.6365 - val_accuracy: 0.6522
Epoch 2/50
135/135 [==============================] - 0s 296us/sample - loss: 0.6489 - accuracy: 0.6222 - val_loss: 0.6146 - val_accuracy: 0.6522
Epoch 3/50
135/135 [==============================] - 0s 296us/sample - loss: 0.6160 - accuracy: 0.6667 - val_loss: 0.6010 - val_accuracy: 0.6522
Epoch 4/50
135/135 [==============================] - 0s 296us/sample - loss: 0.6018 - accuracy: 0.6963 - val_loss: 0.5891 - val_accuracy: 0.7391
Epoch 5/50
135/135 [==============================] - 0s 355us/sample - loss: 0.5875 - accuracy: 0.7259 - val_loss: 0.5746 - val_accuracy: 0.7826
Epoch 6/50
135/135 [==============================] - 0s 415us/sample - loss: 0.5759 - accuracy: 0.7407 - val_loss: 0.5642 - val_accuracy: 0.7391
Epoch 7/50
135/135 [==============================] - 0s 355us/sample - loss: 0.5891 - accuracy: 0.7481 - val_loss: 0.5529 - val_accuracy: 0.7391
Epoch 8/50
135/135 [==============================] - 0s 355us/sample - loss: 0.5605 - accuracy: 0.7704 - val_loss: 0.5424 - val_accuracy: 0.7826
Epoch 9/50
135/135 [==============================] - 0s 415us/sample - loss: 0.5499 - accuracy: 0.8000 - val_loss: 0.5316 - val_accuracy: 0.7826
Epoch 10/50
135/135 [==============================] - 0s 355us/sample - loss: 0.5284 - accuracy: 0.8000 - val_loss: 0.5219 - val_accuracy: 0.7826
Epoch 11/50
135/135 [==============================] - 0s 355us/sample - loss: 0.5568 - accuracy: 0.7704 - val_loss: 0.5114 - val_accuracy: 0.7826
Epoch 12/50
135/135 [==============================] - 0s 378us/sample - loss: 0.5311 - accuracy: 0.7778 - val_loss: 0.5020 - val_accuracy: 0.7826
Epoch 13/50
135/135 [==============================] - 0s 474us/sample - loss: 0.5352 - accuracy: 0.7481 - val_loss: 0.4933 - val_accuracy: 0.7826
Epoch 14/50
135/135 [==============================] - 0s 423us/sample - loss: 0.5258 - accuracy: 0.7704 - val_loss: 0.4864 - val_accuracy: 0.7826
Epoch 15/50
135/135 [==============================] - 0s 415us/sample - loss: 0.5147 - accuracy: 0.7556 - val_loss: 0.4789 - val_accuracy: 0.7826
Epoch 16/50
135/135 [==============================] - 0s 415us/sample - loss: 0.4905 - accuracy: 0.8296 - val_loss: 0.4716 - val_accuracy: 0.7826
Epoch 17/50
135/135 [==============================] - 0s 355us/sample - loss: 0.4901 - accuracy: 0.8000 - val_loss: 0.4647 - val_accuracy: 0.7826
Epoch 18/50
135/135 [==============================] - 0s 355us/sample - loss: 0.4693 - accuracy: 0.8370 - val_loss: 0.4580 - val_accuracy: 0.7826
Epoch 19/50
135/135 [==============================] - 0s 415us/sample - loss: 0.4770 - accuracy: 0.7852 - val_loss: 0.4517 - val_accuracy: 0.7826
Epoch 20/50
135/135 [==============================] - 0s 355us/sample - loss: 0.4730 - accuracy: 0.8222 - val_loss: 0.4449 - val_accuracy: 0.7826
Epoch 21/50
135/135 [==============================] - 0s 415us/sample - loss: 0.4784 - accuracy: 0.7926 - val_loss: 0.4392 - val_accuracy: 0.7826
Epoch 22/50
135/135 [==============================] - 0s 415us/sample - loss: 0.4628 - accuracy: 0.8074 - val_loss: 0.4331 - val_accuracy: 0.7826
Epoch 23/50
135/135 [==============================] - 0s 414us/sample - loss: 0.4657 - accuracy: 0.7630 - val_loss: 0.4286 - val_accuracy: 0.7826
Epoch 24/50
135/135 [==============================] - 0s 408us/sample - loss: 0.4608 - accuracy: 0.8296 - val_loss: 0.4235 - val_accuracy: 0.7826
Epoch 25/50
135/135 [==============================] - 0s 415us/sample - loss: 0.4427 - accuracy: 0.8593 - val_loss: 0.4188 - val_accuracy: 0.7826
Epoch 26/50
135/135 [==============================] - 0s 504us/sample - loss: 0.4449 - accuracy: 0.8000 - val_loss: 0.4148 - val_accuracy: 0.7826
Epoch 27/50
135/135 [==============================] - 0s 415us/sample - loss: 0.4518 - accuracy: 0.8074 - val_loss: 0.4102 - val_accuracy: 0.7826
Epoch 28/50
135/135 [==============================] - 0s 355us/sample - loss: 0.4673 - accuracy: 0.8148 - val_loss: 0.4060 - val_accuracy: 0.8261
Epoch 29/50
135/135 [==============================] - 0s 296us/sample - loss: 0.4360 - accuracy: 0.8074 - val_loss: 0.4029 - val_accuracy: 0.8261
Epoch 30/50
135/135 [==============================] - 0s 415us/sample - loss: 0.4593 - accuracy: 0.7778 - val_loss: 0.3998 - val_accuracy: 0.8261
Epoch 31/50
135/135 [==============================] - 0s 355us/sample - loss: 0.4331 - accuracy: 0.8148 - val_loss: 0.3961 - val_accuracy: 0.8261
Epoch 32/50
135/135 [==============================] - 0s 355us/sample - loss: 0.4196 - accuracy: 0.8148 - val_loss: 0.3926 - val_accuracy: 0.8261
Epoch 33/50
135/135 [==============================] - 0s 355us/sample - loss: 0.4299 - accuracy: 0.8074 - val_loss: 0.3906 - val_accuracy: 0.8261
Epoch 34/50
135/135 [==============================] - 0s 296us/sample - loss: 0.4159 - accuracy: 0.8296 - val_loss: 0.3873 - val_accuracy: 0.8261
Epoch 35/50
135/135 [==============================] - 0s 237us/sample - loss: 0.4146 - accuracy: 0.8148 - val_loss: 0.3846 - val_accuracy: 0.8696
Epoch 36/50
135/135 [==============================] - 0s 296us/sample - loss: 0.4072 - accuracy: 0.8074 - val_loss: 0.3821 - val_accuracy: 0.8696
Epoch 37/50
135/135 [==============================] - 0s 356us/sample - loss: 0.4451 - accuracy: 0.8000 - val_loss: 0.3797 - val_accuracy: 0.8696
Epoch 38/50
135/135 [==============================] - 0s 350us/sample - loss: 0.4105 - accuracy: 0.8222 - val_loss: 0.3775 - val_accuracy: 0.8696
Epoch 39/50
135/135 [==============================] - 0s 355us/sample - loss: 0.4028 - accuracy: 0.7852 - val_loss: 0.3762 - val_accuracy: 0.8261
Epoch 40/50
135/135 [==============================] - 0s 304us/sample - loss: 0.4039 - accuracy: 0.8222 - val_loss: 0.3745 - val_accuracy: 0.8696
Epoch 41/50
135/135 [==============================] - 0s 355us/sample - loss: 0.4107 - accuracy: 0.8222 - val_loss: 0.3717 - val_accuracy: 0.8696
Epoch 42/50
135/135 [==============================] - 0s 356us/sample - loss: 0.3957 - accuracy: 0.8296 - val_loss: 0.3696 - val_accuracy: 0.8696
Epoch 43/50
135/135 [==============================] - 0s 414us/sample - loss: 0.4075 - accuracy: 0.8370 - val_loss: 0.3677 - val_accuracy: 0.8696
Epoch 44/50
135/135 [==============================] - 0s 415us/sample - loss: 0.3912 - accuracy: 0.8296 - val_loss: 0.3664 - val_accuracy: 0.8696
Epoch 45/50
135/135 [==============================] - 0s 415us/sample - loss: 0.4011 - accuracy: 0.8370 - val_loss: 0.3640 - val_accuracy: 0.8696
Epoch 46/50
135/135 [==============================] - 0s 355us/sample - loss: 0.3970 - accuracy: 0.8148 - val_loss: 0.3632 - val_accuracy: 0.8696
Epoch 47/50
135/135 [==============================] - 0s 415us/sample - loss: 0.3785 - accuracy: 0.8296 - val_loss: 0.3620 - val_accuracy: 0.8696
Epoch 48/50
135/135 [==============================] - 0s 355us/sample - loss: 0.4103 - accuracy: 0.8222 - val_loss: 0.3608 - val_accuracy: 0.8696
Epoch 49/50
135/135 [==============================] - 0s 355us/sample - loss: 0.3816 - accuracy: 0.8370 - val_loss: 0.3601 - val_accuracy: 0.8696
Epoch 50/50
135/135 [==============================] - 0s 355us/sample - loss: 0.3809 - accuracy: 0.8222 - val_loss: 0.3593 - val_accuracy: 0.8696
-----Validation set------
Validation Accuracy: 0.8695652
precision recall f1-score support
0 0.91 0.83 0.87 12
1 0.83 0.91 0.87 11
accuracy 0.87 23
macro avg 0.87 0.87 0.87 23
weighted avg 0.87 0.87 0.87 23
-----Unseen test set predictions------
precision recall f1-score support
0 0.92 0.92 0.92 12
1 0.91 0.91 0.91 11
accuracy 0.91 23
macro avg 0.91 0.91 0.91 23
weighted avg 0.91 0.91 0.91 23
Confusion Matrix of unseen test set:
[[11 1]
[ 1 10]]
###Markdown
Plotting the ROC curve.
###Code
from sklearn import metrics
from sklearn.metrics import roc_curve,roc_auc_score
def plot_roc_curve(fpr,tpr,model_name):
plt.plot(fpr,tpr)
plt.axis([0,1,0,1])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.savefig('roc_curve'+model_name+'.png', dpi=600)
plt.show()
fpr , tpr , thresholds = roc_curve( final_y_test, rounded)#y_unseen_stacked , predictions)
plot_roc_curve (fpr,tpr,model_name)
###Output
_____no_output_____ |
pytorch_advanced/3_semantic_segmentation/3-7_PSPNet_training.ipynb | ###Markdown
3.7 学習と検証の実施- 本ファイルでは、PSPNetの学習と検証の実施を行います。AWSのGPUマシンで計算します。- p2.xlargeで約12時間かかります。 学習目標1. PSPNetの学習と検証を実装できるようになる2. セマンティックセグメンテーションのファインチューニングを理解する 事前準備- 本書に従い学習済みモデルのファイル「pspnet50_ADE20K.pth」をダウンロードし、フォルダ「weights」に用意します。
###Code
# パッケージのimport
import random
import math
import time
import pandas as pd
import numpy as np
import torch
import torch.utils.data as data
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
import torch.optim as optim
# 初期設定
# Setup seeds
torch.manual_seed(1234)
np.random.seed(1234)
random.seed(1234)
###Output
_____no_output_____
###Markdown
DataLoader作成
###Code
from utils.dataloader import make_datapath_list, DataTransform, VOCDataset
# ファイルパスリスト作成
rootpath = "./data/VOCdevkit/VOC2012/"
train_img_list, train_anno_list, val_img_list, val_anno_list = make_datapath_list(
rootpath=rootpath)
# Dataset作成
# (RGB)の色の平均値と標準偏差
color_mean = (0.485, 0.456, 0.406)
color_std = (0.229, 0.224, 0.225)
train_dataset = VOCDataset(train_img_list, train_anno_list, phase="train", transform=DataTransform(
input_size=475, color_mean=color_mean, color_std=color_std))
val_dataset = VOCDataset(val_img_list, val_anno_list, phase="val", transform=DataTransform(
input_size=475, color_mean=color_mean, color_std=color_std))
# DataLoader作成
batch_size = 8
train_dataloader = data.DataLoader(
train_dataset, batch_size=batch_size, shuffle=True)
val_dataloader = data.DataLoader(
val_dataset, batch_size=batch_size, shuffle=False)
# 辞書型変数にまとめる
dataloaders_dict = {"train": train_dataloader, "val": val_dataloader}
###Output
_____no_output_____
###Markdown
ネットワークモデル作成
###Code
from utils.pspnet import PSPNet
# ファインチューニングでPSPNetを作成
# ADE20Kデータセットの学習済みモデルを使用、ADE20Kはクラス数が150です
net = PSPNet(n_classes=150)
# ADE20K学習済みパラメータをロード
state_dict = torch.load("./weights/pspnet50_ADE20K.pth")
net.load_state_dict(state_dict)
# 分類用の畳み込み層を、出力数21のものにつけかえる
n_classes = 21
net.decode_feature.classification = nn.Conv2d(
in_channels=512, out_channels=n_classes, kernel_size=1, stride=1, padding=0)
net.aux.classification = nn.Conv2d(
in_channels=256, out_channels=n_classes, kernel_size=1, stride=1, padding=0)
# 付け替えた畳み込み層を初期化する。活性化関数がシグモイド関数なのでXavierを使用する。
def weights_init(m):
if isinstance(m, nn.Conv2d):
nn.init.xavier_normal_(m.weight.data)
if m.bias is not None: # バイアス項がある場合
nn.init.constant_(m.bias, 0.0)
net.decode_feature.classification.apply(weights_init)
net.aux.classification.apply(weights_init)
print('ネットワーク設定完了:学習済みの重みをロードしました')
net
###Output
_____no_output_____
###Markdown
損失関数を定義
###Code
# 損失関数の設定
class PSPLoss(nn.Module):
"""PSPNetの損失関数のクラスです。"""
def __init__(self, aux_weight=0.4):
super(PSPLoss, self).__init__()
self.aux_weight = aux_weight # aux_lossの重み
def forward(self, outputs, targets):
"""
損失関数の計算。
Parameters
----------
outputs : PSPNetの出力(tuple)
(output=torch.Size([num_batch, 21, 475, 475]), output_aux=torch.Size([num_batch, 21, 475, 475]))。
targets : [num_batch, 475, 475]
正解のアノテーション情報
Returns
-------
loss : テンソル
損失の値
"""
loss = F.cross_entropy(outputs[0], targets, reduction='mean')
loss_aux = F.cross_entropy(outputs[1], targets, reduction='mean')
return loss+self.aux_weight*loss_aux
criterion = PSPLoss(aux_weight=0.4)
###Output
_____no_output_____
###Markdown
最適化手法を設定
###Code
# ファインチューニングなので、学習率は小さく
optimizer = optim.SGD([
{'params': net.feature_conv.parameters(), 'lr': 1e-3},
{'params': net.feature_res_1.parameters(), 'lr': 1e-3},
{'params': net.feature_res_2.parameters(), 'lr': 1e-3},
{'params': net.feature_dilated_res_1.parameters(), 'lr': 1e-3},
{'params': net.feature_dilated_res_2.parameters(), 'lr': 1e-3},
{'params': net.pyramid_pooling.parameters(), 'lr': 1e-3},
{'params': net.decode_feature.parameters(), 'lr': 1e-2},
{'params': net.aux.parameters(), 'lr': 1e-2},
], momentum=0.9, weight_decay=0.0001)
# スケジューラーの設定
def lambda_epoch(epoch):
max_epoch = 30
return math.pow((1-epoch/max_epoch), 0.9)
scheduler = optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_epoch)
###Output
_____no_output_____
###Markdown
学習・検証を実施する
###Code
# モデルを学習させる関数を作成
def train_model(net, dataloaders_dict, criterion, scheduler, optimizer, num_epochs):
# GPUが使えるかを確認
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("使用デバイス:", device)
# ネットワークをGPUへ
net.to(device)
# ネットワークがある程度固定であれば、高速化させる
torch.backends.cudnn.benchmark = True
# 画像の枚数
num_train_imgs = len(dataloaders_dict["train"].dataset)
num_val_imgs = len(dataloaders_dict["val"].dataset)
batch_size = dataloaders_dict["train"].batch_size
# イテレーションカウンタをセット
iteration = 1
logs = []
# multiple minibatch
batch_multiplier = 3
# epochのループ
for epoch in range(num_epochs):
# 開始時刻を保存
t_epoch_start = time.time()
t_iter_start = time.time()
epoch_train_loss = 0.0 # epochの損失和
epoch_val_loss = 0.0 # epochの損失和
print('-------------')
print('Epoch {}/{}'.format(epoch+1, num_epochs))
print('-------------')
# epochごとの訓練と検証のループ
for phase in ['train', 'val']:
if phase == 'train':
net.train() # モデルを訓練モードに
scheduler.step() # 最適化schedulerの更新
optimizer.zero_grad()
print('(train)')
else:
if((epoch+1) % 5 == 0):
net.eval() # モデルを検証モードに
print('-------------')
print('(val)')
else:
# 検証は5回に1回だけ行う
continue
# データローダーからminibatchずつ取り出すループ
count = 0 # multiple minibatch
for imges, anno_class_imges in dataloaders_dict[phase]:
# ミニバッチがサイズが1だと、バッチノーマライゼーションでエラーになるのでさける
# issue #186より不要なのでコメントアウト
# if imges.size()[0] == 1:
# continue
# GPUが使えるならGPUにデータを送る
imges = imges.to(device)
anno_class_imges = anno_class_imges.to(device)
# multiple minibatchでのパラメータの更新
if (phase == 'train') and (count == 0):
optimizer.step()
optimizer.zero_grad()
count = batch_multiplier
# 順伝搬(forward)計算
with torch.set_grad_enabled(phase == 'train'):
outputs = net(imges)
loss = criterion(
outputs, anno_class_imges.long()) / batch_multiplier
# 訓練時はバックプロパゲーション
if phase == 'train':
loss.backward() # 勾配の計算
count -= 1 # multiple minibatch
if (iteration % 10 == 0): # 10iterに1度、lossを表示
t_iter_finish = time.time()
duration = t_iter_finish - t_iter_start
print('イテレーション {} || Loss: {:.4f} || 10iter: {:.4f} sec.'.format(
iteration, loss.item()/batch_size*batch_multiplier, duration))
t_iter_start = time.time()
epoch_train_loss += loss.item() * batch_multiplier
iteration += 1
# 検証時
else:
epoch_val_loss += loss.item() * batch_multiplier
# epochのphaseごとのlossと正解率
t_epoch_finish = time.time()
print('-------------')
print('epoch {} || Epoch_TRAIN_Loss:{:.4f} ||Epoch_VAL_Loss:{:.4f}'.format(
epoch+1, epoch_train_loss/num_train_imgs, epoch_val_loss/num_val_imgs))
print('timer: {:.4f} sec.'.format(t_epoch_finish - t_epoch_start))
t_epoch_start = time.time()
# ログを保存
log_epoch = {'epoch': epoch+1, 'train_loss': epoch_train_loss /
num_train_imgs, 'val_loss': epoch_val_loss/num_val_imgs}
logs.append(log_epoch)
df = pd.DataFrame(logs)
df.to_csv("log_output.csv")
# 最後のネットワークを保存する
torch.save(net.state_dict(), 'weights/pspnet50_' +
str(epoch+1) + '.pth')
# 学習・検証を実行する
num_epochs = 30
train_model(net, dataloaders_dict, criterion, scheduler, optimizer, num_epochs=num_epochs)
###Output
使用デバイス: cuda:0
-------------
Epoch 1/30
-------------
(train)
イテレーション 10 || Loss: 0.3835 || 10iter: 83.2019 sec.
イテレーション 20 || Loss: 0.2189 || 10iter: 50.9118 sec.
イテレーション 30 || Loss: 0.1510 || 10iter: 50.8032 sec.
イテレーション 40 || Loss: 0.1658 || 10iter: 50.7695 sec.
イテレーション 50 || Loss: 0.0886 || 10iter: 50.6645 sec.
イテレーション 60 || Loss: 0.0728 || 10iter: 50.6198 sec.
イテレーション 70 || Loss: 0.1165 || 10iter: 50.9016 sec.
イテレーション 80 || Loss: 0.1351 || 10iter: 50.4392 sec.
イテレーション 90 || Loss: 0.2174 || 10iter: 50.6154 sec.
イテレーション 100 || Loss: 0.0904 || 10iter: 50.5267 sec.
イテレーション 110 || Loss: 0.1408 || 10iter: 50.4316 sec.
イテレーション 120 || Loss: 0.0668 || 10iter: 50.5083 sec.
イテレーション 130 || Loss: 0.1251 || 10iter: 50.7642 sec.
イテレーション 140 || Loss: 0.1467 || 10iter: 50.5163 sec.
イテレーション 150 || Loss: 0.0794 || 10iter: 50.5443 sec.
イテレーション 160 || Loss: 0.1435 || 10iter: 50.4825 sec.
イテレーション 170 || Loss: 0.2098 || 10iter: 50.4970 sec.
イテレーション 180 || Loss: 0.1435 || 10iter: 50.5844 sec.
-------------
epoch 1 || Epoch_TRAIN_Loss:0.1771 ||Epoch_VAL_Loss:0.0000
timer: 1056.2730 sec.
-------------
Epoch 2/30
-------------
(train)
イテレーション 190 || Loss: 0.0997 || 10iter: 33.7635 sec.
イテレーション 200 || Loss: 0.0866 || 10iter: 50.4115 sec.
イテレーション 210 || Loss: 0.0708 || 10iter: 50.5687 sec.
イテレーション 220 || Loss: 0.0456 || 10iter: 50.5168 sec.
イテレーション 230 || Loss: 0.0542 || 10iter: 50.6751 sec.
イテレーション 240 || Loss: 0.1099 || 10iter: 50.5361 sec.
イテレーション 250 || Loss: 0.0664 || 10iter: 50.3739 sec.
イテレーション 260 || Loss: 0.0586 || 10iter: 50.3346 sec.
イテレーション 270 || Loss: 0.0648 || 10iter: 50.5255 sec.
イテレーション 280 || Loss: 0.0506 || 10iter: 50.5362 sec.
イテレーション 290 || Loss: 0.0584 || 10iter: 50.6339 sec.
イテレーション 300 || Loss: 0.1094 || 10iter: 50.6676 sec.
イテレーション 310 || Loss: 0.0826 || 10iter: 50.4802 sec.
イテレーション 320 || Loss: 0.0723 || 10iter: 50.4925 sec.
イテレーション 330 || Loss: 0.0507 || 10iter: 50.7134 sec.
イテレーション 340 || Loss: 0.0833 || 10iter: 50.6054 sec.
イテレーション 350 || Loss: 0.0687 || 10iter: 50.8262 sec.
イテレーション 360 || Loss: 0.0571 || 10iter: 50.8524 sec.
-------------
epoch 2 || Epoch_TRAIN_Loss:0.0919 ||Epoch_VAL_Loss:0.0000
timer: 1022.4001 sec.
-------------
Epoch 3/30
-------------
(train)
イテレーション 370 || Loss: 0.0670 || 10iter: 17.0645 sec.
イテレーション 380 || Loss: 0.0644 || 10iter: 50.7432 sec.
イテレーション 390 || Loss: 0.0569 || 10iter: 50.5637 sec.
イテレーション 400 || Loss: 0.0435 || 10iter: 50.5550 sec.
イテレーション 410 || Loss: 0.0569 || 10iter: 50.4836 sec.
イテレーション 420 || Loss: 0.0935 || 10iter: 50.4567 sec.
イテレーション 430 || Loss: 0.1124 || 10iter: 50.6227 sec.
イテレーション 440 || Loss: 0.0750 || 10iter: 50.4561 sec.
イテレーション 450 || Loss: 0.0613 || 10iter: 50.5616 sec.
イテレーション 460 || Loss: 0.0669 || 10iter: 50.4179 sec.
イテレーション 470 || Loss: 0.0575 || 10iter: 50.5301 sec.
イテレーション 480 || Loss: 0.0471 || 10iter: 50.5939 sec.
イテレーション 490 || Loss: 0.0730 || 10iter: 50.7252 sec.
イテレーション 500 || Loss: 0.0639 || 10iter: 50.6068 sec.
イテレーション 510 || Loss: 0.0773 || 10iter: 50.5694 sec.
イテレーション 520 || Loss: 0.0845 || 10iter: 50.5048 sec.
イテレーション 530 || Loss: 0.0612 || 10iter: 50.6143 sec.
イテレーション 540 || Loss: 0.0436 || 10iter: 50.6987 sec.
-------------
epoch 3 || Epoch_TRAIN_Loss:0.0784 ||Epoch_VAL_Loss:0.0000
timer: 1022.9392 sec.
-------------
Epoch 4/30
-------------
(train)
イテレーション 550 || Loss: 0.1612 || 10iter: 0.2970 sec.
イテレーション 560 || Loss: 0.0372 || 10iter: 50.6669 sec.
イテレーション 570 || Loss: 0.0570 || 10iter: 50.9205 sec.
イテレーション 580 || Loss: 0.0980 || 10iter: 50.5683 sec.
イテレーション 590 || Loss: 0.0679 || 10iter: 50.7622 sec.
イテレーション 600 || Loss: 0.0668 || 10iter: 50.7143 sec.
イテレーション 610 || Loss: 0.0637 || 10iter: 50.6980 sec.
イテレーション 620 || Loss: 0.0278 || 10iter: 50.7760 sec.
イテレーション 630 || Loss: 0.1320 || 10iter: 50.6662 sec.
イテレーション 640 || Loss: 0.0860 || 10iter: 50.7256 sec.
イテレーション 650 || Loss: 0.0636 || 10iter: 50.6873 sec.
イテレーション 660 || Loss: 0.0748 || 10iter: 50.7949 sec.
イテレーション 670 || Loss: 0.0438 || 10iter: 50.7084 sec.
イテレーション 680 || Loss: 0.1267 || 10iter: 50.9041 sec.
イテレーション 690 || Loss: 0.0850 || 10iter: 50.7844 sec.
イテレーション 700 || Loss: 0.0778 || 10iter: 50.7264 sec.
イテレーション 710 || Loss: 0.0659 || 10iter: 50.6722 sec.
イテレーション 720 || Loss: 0.0436 || 10iter: 50.6910 sec.
イテレーション 730 || Loss: 0.0661 || 10iter: 50.7440 sec.
-------------
epoch 4 || Epoch_TRAIN_Loss:0.0714 ||Epoch_VAL_Loss:0.0000
timer: 1025.6763 sec.
-------------
Epoch 5/30
-------------
(train)
イテレーション 740 || Loss: 0.0746 || 10iter: 39.5013 sec.
イテレーション 750 || Loss: 0.0995 || 10iter: 50.7592 sec.
イテレーション 760 || Loss: 0.1130 || 10iter: 50.6954 sec.
イテレーション 770 || Loss: 0.0331 || 10iter: 50.6704 sec.
イテレーション 780 || Loss: 0.0709 || 10iter: 50.7510 sec.
イテレーション 790 || Loss: 0.0830 || 10iter: 50.7307 sec.
イテレーション 800 || Loss: 0.0665 || 10iter: 50.6446 sec.
イテレーション 810 || Loss: 0.0518 || 10iter: 50.7922 sec.
イテレーション 820 || Loss: 0.0583 || 10iter: 50.8131 sec.
イテレーション 830 || Loss: 0.0921 || 10iter: 50.7893 sec.
イテレーション 840 || Loss: 0.0990 || 10iter: 50.8343 sec.
イテレーション 850 || Loss: 0.1138 || 10iter: 50.9268 sec.
イテレーション 860 || Loss: 0.0342 || 10iter: 50.8274 sec.
イテレーション 870 || Loss: 0.0757 || 10iter: 50.8006 sec.
イテレーション 880 || Loss: 0.0332 || 10iter: 50.6353 sec.
イテレーション 890 || Loss: 0.0353 || 10iter: 50.5433 sec.
イテレーション 900 || Loss: 0.0350 || 10iter: 50.8261 sec.
イテレーション 910 || Loss: 0.0546 || 10iter: 50.7091 sec.
-------------
(val)
-------------
epoch 5 || Epoch_TRAIN_Loss:0.0666 ||Epoch_VAL_Loss:0.0802
timer: 1347.2602 sec.
-------------
Epoch 6/30
-------------
(train)
イテレーション 920 || Loss: 0.0559 || 10iter: 22.6745 sec.
イテレーション 930 || Loss: 0.0487 || 10iter: 50.4602 sec.
イテレーション 940 || Loss: 0.0906 || 10iter: 50.8003 sec.
イテレーション 950 || Loss: 0.0604 || 10iter: 50.9325 sec.
イテレーション 960 || Loss: 0.0573 || 10iter: 50.7144 sec.
イテレーション 970 || Loss: 0.0493 || 10iter: 50.7799 sec.
イテレーション 980 || Loss: 0.0817 || 10iter: 50.8684 sec.
イテレーション 990 || Loss: 0.0916 || 10iter: 50.5585 sec.
イテレーション 1000 || Loss: 0.1501 || 10iter: 50.6191 sec.
イテレーション 1010 || Loss: 0.0450 || 10iter: 50.6775 sec.
イテレーション 1020 || Loss: 0.0680 || 10iter: 50.6985 sec.
イテレーション 1030 || Loss: 0.0592 || 10iter: 50.5961 sec.
イテレーション 1040 || Loss: 0.0649 || 10iter: 50.6750 sec.
イテレーション 1050 || Loss: 0.0574 || 10iter: 50.4274 sec.
イテレーション 1060 || Loss: 0.0709 || 10iter: 50.5833 sec.
イテレーション 1070 || Loss: 0.0381 || 10iter: 50.7320 sec.
イテレーション 1080 || Loss: 0.0499 || 10iter: 50.6843 sec.
イテレーション 1090 || Loss: 0.0608 || 10iter: 50.6675 sec.
-------------
epoch 6 || Epoch_TRAIN_Loss:0.0613 ||Epoch_VAL_Loss:0.0000
timer: 1024.6171 sec.
-------------
Epoch 7/30
-------------
(train)
イテレーション 1100 || Loss: 0.0614 || 10iter: 5.8334 sec.
イテレーション 1110 || Loss: 0.1200 || 10iter: 50.7367 sec.
イテレーション 1120 || Loss: 0.0713 || 10iter: 50.6147 sec.
イテレーション 1130 || Loss: 0.0755 || 10iter: 50.5701 sec.
イテレーション 1140 || Loss: 0.0827 || 10iter: 50.6069 sec.
イテレーション 1150 || Loss: 0.0365 || 10iter: 50.8411 sec.
イテレーション 1160 || Loss: 0.0608 || 10iter: 50.9119 sec.
イテレーション 1170 || Loss: 0.0546 || 10iter: 50.7145 sec.
イテレーション 1180 || Loss: 0.0517 || 10iter: 51.0666 sec.
イテレーション 1190 || Loss: 0.0936 || 10iter: 50.4782 sec.
イテレーション 1200 || Loss: 0.0612 || 10iter: 50.7285 sec.
イテレーション 1210 || Loss: 0.0683 || 10iter: 50.7413 sec.
イテレーション 1220 || Loss: 0.0631 || 10iter: 50.5807 sec.
イテレーション 1230 || Loss: 0.0512 || 10iter: 50.9287 sec.
イテレーション 1240 || Loss: 0.0585 || 10iter: 50.9585 sec.
イテレーション 1250 || Loss: 0.0479 || 10iter: 50.6934 sec.
イテレーション 1260 || Loss: 0.0975 || 10iter: 50.8956 sec.
イテレーション 1270 || Loss: 0.0371 || 10iter: 50.7362 sec.
イテレーション 1280 || Loss: 0.0696 || 10iter: 50.9284 sec.
-------------
epoch 7 || Epoch_TRAIN_Loss:0.0602 ||Epoch_VAL_Loss:0.0000
timer: 1026.3847 sec.
-------------
Epoch 8/30
-------------
(train)
イテレーション 1290 || Loss: 0.0402 || 10iter: 45.1252 sec.
イテレーション 1300 || Loss: 0.0530 || 10iter: 50.6068 sec.
イテレーション 1310 || Loss: 0.0382 || 10iter: 50.6951 sec.
イテレーション 1320 || Loss: 0.0914 || 10iter: 50.5856 sec.
イテレーション 1330 || Loss: 0.0579 || 10iter: 50.7187 sec.
イテレーション 1340 || Loss: 0.0602 || 10iter: 50.7291 sec.
イテレーション 1350 || Loss: 0.0652 || 10iter: 50.8340 sec.
イテレーション 1360 || Loss: 0.0697 || 10iter: 50.8346 sec.
イテレーション 1370 || Loss: 0.0794 || 10iter: 50.9175 sec.
|
demo/Introduction-to-GraphBLAS-with-Python.ipynb | ###Markdown
Introduction to GraphBLAS with PythonThe GraphBLAS is an API for [sparse matrix](https://en.wikipedia.org/wiki/Sparse_matrix) and vector operations to solve graph problems using [Linear Algebra](https://en.wikipedia.org/wiki/Linear_algebra). pygraphblas wraps the GraphBLAS API using [CFFI](https://cffi.readthedocs.io/en/latest/). This notebook is an introduction to the GraphBLAS and also tutorial on how work with graphs in Python using the pygraphblas library.For an mathematical introduction see [Mathematical Foundations of the GraphBLAS](https://people.eecs.berkeley.edu/~aydin/GraphBLAS-Math.pdf).There are several examples of common algorithms implemented with pygraphblas: - [PageRank](./PageRank.ipynb) - [Betweeness Centrality](./BetweenessCentrality.ipynb) - [K-Truss Subgraphs](./K-Truss.ipynb) - [Triangle Counting](./Triangle-Counting.ipynb) - [RadiX-Net Topologies](./RadiX-Net-with-pygraphblas.ipynb) - [User Defined Types](./User-Defined-Types.ipynb) - [Log Semiring](./Log-Semiring.ipynb)The follow is a brief introduction to some of pygraphblas' features. Over time the goal will be to improve the documentation and examples. For any suggestions, please [open an issue](https://github.com/Graphegon/pygraphblas/issues) so we can discuss it. Matrices can represent GraphsThe key concept of the GraphBLAS is that [a matrix can represent a graph](https://en.wikipedia.org/wiki/Adjacency_matrix) and [Linear Algebra](https://en.wikipedia.org/wiki/Linear_algebra) can be used to operate on graph matrices. In the conventional sense, a graph is a set of nodes that have edges connecting them. The same connectivity can be represented as a matrix where edges are weight value at the intersection of rows and columns. The rows represent the outgoing node, and the column represents the incoming node.By relying on the mathmatical foundations of graph theory and abstract algebra, graph problems can be expressed in a closed, composible algebraic language. Instead of thinking of problems node and edge at a time, GraphBLAS encourages you to think of and operate on the graph as a whole object using well understood mathmatical techniques.For expedience we'll import the entire pygraphblas library into this notebook with `import *`. This is normally not a recommended practice. This won't effect the examples here but keep it in mind.
###Code
from pygraphblas import *
from pygraphblas.gviz import draw, draw_matrix_op, draw_graph_op
###Output
_____no_output_____
###Markdown
Matrices can be created several different ways, for example literal lists of data can be used to construct them, as shown here from three lists, a list of outgoing nodes, a list of incoming nodes, and a list of weights. The first two lists must be integers, and pygraphblas automatically makes a matrix of the correct type based on the type of the values in the third list, in this case, integers as well:
###Code
A = Matrix.from_lists(
[0, 0, 1, 3, 3, 4, 1, 5],
[1, 3, 2, 4, 5, 2, 5, 4],
[9, 3, 8, 6, 1, 4, 7, 2],)
draw(A)
###Output
_____no_output_____
###Markdown
Adding MatricesAdding two matrices with the plus operator (`+`) causes the two graphs to be combined together in a union of their shapes, a combining operator is used when both operands have an edge in common. The default operator is `graphblas.plus` which adds the two weights, but there are many binary operators in GraphBLAS that can be used.
###Code
B = Matrix.from_lists(
[0, 0, 1, 3, 3, 4, 1, 5],
[2, 3, 3, 2, 5, 4, 5, 4],
[9, 3, 8, 6, 2, 4, 5, 2],)
C = A + B
draw_graph_op(A, '+', B, C)
###Output
_____no_output_____
###Markdown
Using a different operatorTo specify a different combining operator when elementwise addition, you can use the `with` syntax, inside this with block the specified operator is used.
###Code
with UINT64.MIN:
C = A + B
draw_graph_op(A, '+', B, C)
###Output
_____no_output_____
###Markdown
Elementwise MultiplicationMultiplying two with the star operator (`*`) does elementwise multiplication, which returns only the intersection of common edges in the two graphs. Note how this contrasts with elementwise addition above which takes the union. Only the edges in common between the two graphs are operated on. The default operator is `graphblas.times` which multiples their values.
###Code
C = A * B
draw_graph_op(A, '*', B, C)
###Output
_____no_output_____
###Markdown
Elementwise Multiplication using different operatorsLike addition, elementwise multiplication can use different operators using `with` syntax.
###Code
with INT64.MIN:
C = A * B
draw_graph_op(A, '*', B, C)
###Output
_____no_output_____
###Markdown
Matrix Vector multiplicationMatrices can be multiplied on the right by vectors, this is similar to taking a step "backwards" in a Breadth First Search from nodes specified in the right operand. The type of the output is the same as the left operand.
###Code
v = Vector.from_lists([4],[True], A.nrows)
draw_graph_op(A, '@', v, A @ v)
###Output
_____no_output_____
###Markdown
In order to search forward through a graph doing matrix vector multiplication, then use the transpose of the input matrix:
###Code
draw_graph_op(A.transpose(), '@', v, A.transpose() @ v)
###Output
_____no_output_____
###Markdown
Vector Matrix MultiplicationA Vector can be multiplied on the left by a matrix. This is a fundamental "step" in a Breadth First Search (BFS) across the graph, the result are the adjacent edges to the input. This is mathematically identical to the operation `A.tranpose() @ v` shown above. Note that the type of the result is that same as the left operand, in this case boolean:
###Code
v = Vector.from_lists([3],[True], A.nrows)
y = v @ A
draw_graph_op(v, '@', A, y)
###Output
_____no_output_____
###Markdown
Row vs Column Vector Matrix Multiplication`Av == vA'` so which direction to go depends on your style and problem. But it's good to keep in mind these equivalents:
###Code
(A @ v == v @ A.transpose()).reduce_bool() and (v @ A == A.transpose() @ v).reduce_bool()
###Output
_____no_output_____
###Markdown
Matrix MultiplicationThe matrix multiplication operator (`@`) is also used to multiply two matrices. The common data pattern is multiplying the rows of the left operand by the columns of the right operand and then summing the results. In the results below the `A` matrix will be multiplied by itself. This squaring of an adjacency matrix is often used to find the [Friend of a Friend](https://en.wikipedia.org/wiki/Friend_of_a_friend) graph, where the result's edges join nodes to nodes two steps away:
###Code
draw_graph_op(A, '@', A, A @ A)
###Output
_____no_output_____
###Markdown
SemiringsA different [Semiring](https://en.wikipedia.org/wiki/Semiring) can be used during matrix multiplication to get different results. A semiring defines two binary operators that are substituted for the multiplication "inner" matrix multiply operation and the addition "outer" operation. In the next example uses the `min_plus` semiring.
###Code
draw_graph_op(A, '@', A, A.min_plus(A))
###Output
_____no_output_____
###Markdown
TransposeA matrix can be transposed with the `transpose()` method. This effectively reverses the direction of edges in a graph.
###Code
draw(A.transpose())
###Output
_____no_output_____
###Markdown
Symmetric GraphsAdding a graph to it's transpose makes its symmetric, every edge has a corresponding reverse edge.
###Code
draw_graph_op(A, '+', A.transpose(), A + A.transpose())
###Output
_____no_output_____
###Markdown
Random GraphsRandom graphs can be constructed with the `random` Matrix class method.
###Code
draw(Matrix.random(UINT8, 6,6,10, no_diagonal=True, seed=42), show_weight=True)
###Output
_____no_output_____
###Markdown
Breadth First SearchMatrix multiplication is the basic operation for taking a step across a graph. This single movement can be combined with a loop to step across an entire graph, tracking the distance from a beginning node to every other node in the graph. This is called a *Breadth First Search*.
###Code
def bfs(matrix, start):
"""Given a graph and a starting position, return a vector of hops to all other nodes."""
v = Vector.sparse(UINT8, matrix.nrows) # the result vector
q = Vector.sparse(BOOL, matrix.nrows) # a vector to keep track of known nodes
q[start] = True
level = 1
while q.reduce_bool() and level <= matrix.nrows:
v.assign_scalar(level, mask=q) # Assign the current level to known (q) nodes in v
v.vxm(matrix, mask=v, out=q,
desc=descriptor.RC) # compliment the mask only find the next unknown nodes
level += 1 # increment level
return v
draw(A, show_weight=False, label_vector=bfs(A, 0))
###Output
_____no_output_____
###Markdown
Shortest Path LengthUsing the `min_plus` semiring a similar approach can be used to find the shortest path length between two nodes. Note how instead of a mask/reduce operation, an equality comparison `iseq` is used to determine if a vector changes from one operation to the next.This simple algorithm only returns the shortest path length. Often it's helpful to also compute the actual path and count the number of hops as well. This is demonstrated in the [User Defined Types](./User-Defined-Types.ipynb) notebook.
###Code
def sssp(A, start):
v = Vector.sparse(A.type, A.nrows)
v[start] = 0
for _ in range(A.nrows):
v.min_plus(A, out=v, accum=INT64.min)
return v
draw(A, label_vector=sssp(A, 0))
###Output
_____no_output_____ |
rrnumpy/ipynb/d1vector.ipynb | ###Markdown
一维数组:向量(Vector)
###Code
import numpy as np
v1 = np.array([1, 3, 5, 7, 9])
v2 = np.array([0, 2, 4, 6, 8])
###Output
_____no_output_____
###Markdown
同形向量的标量积(点积)代数定义:$$ \vec{a} \cdot \vec{b} = \sum_{i=1}^{n}{a_ib_i} = a_1b_1 + a_2b_2 + \dots + a_nb_n $$几何定义:$$ \vec{a} \cdot \vec{b} = |\vec{a}| \, |\vec{b}| \cos\theta $$
###Code
assert v1.ndim == 1 and v2.ndim == 1 and v1.shape == v2.shape
v1 @ v2 # 或者 np.dot(v1, v2)
###Output
_____no_output_____ |
manuscript/Supplement/JupyterNotebooks/Allison/S1_Testing_Allison_et_al_2019.ipynb | ###Markdown
This notebook compares the outputs from VESIcal to the excel spreadsheet provided by Allison- This notebook uses the Excel spreadsheet entitled: "S1_Testing_Allison_et_al_2019.xlsx"- Test 1 compares saturation pressures from the spreadsheet of Allison et al. (2019) for the sunset crater composition at variable CO$_2$ contents (H$_2$O=0 wt%).- Test 2 compares saturation pressures from the spreadsheet of Allison et al. (2019) to those calculated by VESIcal for all 6 models at 100, 5000 and 8000 ppm CO$_2$ (and H$_2$O=0 wt%). Note, the SFVF composition is evaluated at 7000 ppm, as at 8000 ppm, the pressure exceeds the maximum allowed by the Allison et al. (2019) spreadsheet.
###Code
import VESIcal as v
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from IPython.display import display, HTML
import pandas as pd
import matplotlib as mpl
import seaborn as sns
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
import statsmodels.api as sm
from statsmodels.sandbox.regression.predstd import wls_prediction_std
%matplotlib inline
sns.set(style="ticks", context="poster",rc={"grid.linewidth": 1,"xtick.major.width": 1,"ytick.major.width": 1, 'patch.edgecolor': 'black'})
plt.style.use("seaborn-colorblind")
plt.rcParams["font.family"] = 'arial'
plt.rcParams["font.size"] =12
plt.rcParams["mathtext.default"] = "regular"
plt.rcParams["mathtext.fontset"] = "dejavusans"
plt.rcParams['patch.linewidth'] = 1
plt.rcParams['axes.linewidth'] = 1
plt.rcParams["xtick.direction"] = "in"
plt.rcParams["ytick.direction"] = "in"
plt.rcParams["ytick.direction"] = "in"
plt.rcParams["xtick.major.size"] = 6 # Sets length of ticks
plt.rcParams["ytick.major.size"] = 4 # Sets length of ticks
plt.rcParams["ytick.labelsize"] = 12 # Sets size of numbers on tick marks
plt.rcParams["xtick.labelsize"] = 12 # Sets size of numbers on tick marks
plt.rcParams["axes.titlesize"] = 14 # Overall title
plt.rcParams["axes.labelsize"] = 14 # Axes labels
plt.rcParams["legend.fontsize"]= 14
###Output
_____no_output_____
###Markdown
Test 1 - Saturation pressures for variable CO$_2$ contents (Sunset Crater, 0 wt% H$_2$O)- this test compares saturation pressures from the spreadsheet of Allison et al. (2019) to those calculated by VESIcal for the Sunset Crater composition.
###Code
myfile_Test1= v.BatchFile('S1_Testing_Allison_et_al_2019.xlsx', sheet_name='SunsetCrater_VariableCarbon_0W') # This loads the unset crater composition, and pressures calculated using the Allison Spreadsheet
data_Test1 = myfile_Test1.data
satPs_wtemps_Allison_Carbon_Test1=myfile_Test1.calculate_saturation_pressure(temperature="Temp", model='AllisonCarbon_sunset')
# Linear Regression
X_Test1=satPs_wtemps_Allison_Carbon_Test1['Press'] # Convert MPa from their supplement to bars
Y_Test1=satPs_wtemps_Allison_Carbon_Test1['SaturationP_bars_VESIcal']
mask_Test1 = (X_Test1>-1) & (Y_Test1>-1) #& (XComb<7000) # This gets rid of Nans
X_Test1noNan=X_Test1[mask_Test1].values.reshape(-1, 1)
Y_Test1noNan=Y_Test1[mask_Test1].values.reshape(-1, 1)
lr=LinearRegression()
lr.fit(X_Test1noNan,Y_Test1noNan)
Y_pred_Test1=lr.predict(X_Test1noNan)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize = (12,5)) # adjust dimensions of figure here
ax1.set_xlabel('P$_{Sat}$ Allison Spreadsheet (bar)')
ax1.set_ylabel('P$_{Sat}$ VESIcal (bar)')
ax1.plot(X_Test1noNan,Y_pred_Test1, color='red', linewidth=0.5, zorder=1) # This plots the best fit line
ax1.scatter(X_Test1, Y_Test1, s=50, edgecolors='k', facecolors='silver', marker='o', zorder=5)
# This bit plots the regression parameters on the graph
I='Intercept= ' + str(np.round(lr.intercept_, 3))[1:-1]
G='Gradient= ' + str(np.round(lr.coef_, 3))[2:-2]
R='R$^2$= ' + str(np.round(r2_score(Y_Test1noNan, Y_pred_Test1), 6))
ax1.text(3000, 2000, R)
ax1.text(3000, 1800, G)
ax1.text(3000, 1600, I)
ax2.hist(100*X_Test1/Y_Test1)
ax2.set_xticks([100, 100.1, 100.2])
ax2.set_yticks(np.linspace(0.2, 2, 10))
ax2.set_xlabel('% Difference (Spreadsheet/VESIcal)')
ax2.set_ylabel('# of measurements')
ax2.set_ylim([0,2])
ax1.annotate("a)", xy=(0.02, 0.95), xycoords="axes fraction", fontsize=14)
ax2.annotate("b)", xy=(0.02, 0.95), xycoords="axes fraction", fontsize=14)
fig.savefig('Allison_Test1.png', transparent=True)
###Output
_____no_output_____
###Markdown
Test 2 - Saturation pressures for variable CO$_2$ contents (0 wt% H$_2$O) for all 5 compositions- this test compares saturation pressures from the spreadsheet of Allison et al. (2019) to those calculated by VESIcal for all 6 composition for 100, 5000 and 10,000 ppm CO$_2$ (and H$_2$O=0 wt%)
###Code
myfile_Test2= v.BatchFile('S1_Testing_Allison_et_al_2019.xlsx', sheet_name='Diff_Models_VariableCarbon_0W') # This loads the unset crater composition, and pressures calculated using the Allison Spreadsheet
data_Test2 = myfile_Test2.get_data()
satPs_wtemps_Allison_Carbon_Test2=myfile_Test2.calculate_saturation_pressure(temperature="Temp", model='AllisonCarbon_sunset')
# This calculates the saturation pressures using each model
satPs_Allison_Carbon_Test2_Sunset=myfile_Test2.calculate_saturation_pressure(temperature="Temp", model='AllisonCarbon_sunset')
satPs_Allison_Carbon_Test2_SFVF=myfile_Test2.calculate_saturation_pressure(temperature="Temp", model='AllisonCarbon_sfvf')
satPs_Allison_Carbon_Test2_Erebus=myfile_Test2.calculate_saturation_pressure(temperature="Temp", model='AllisonCarbon_erebus')
satPs_Allison_Carbon_Test2_Vesuvius=myfile_Test2.calculate_saturation_pressure(temperature="Temp", model='AllisonCarbon_vesuvius')
satPs_Allison_Carbon_Test2_Etna=myfile_Test2.calculate_saturation_pressure(temperature="Temp", model='AllisonCarbon_etna')
satPs_Allison_Carbon_Test2_Stromboli=myfile_Test2.calculate_saturation_pressure(temperature="Temp", model='AllisonCarbon_stromboli')
# Combines outputs from different models to compare to the pressures estimated in the spreadsheet of Allison et al ('Press column of input data')
a=np.concatenate((satPs_Allison_Carbon_Test2_Sunset.loc[satPs_Allison_Carbon_Test2_Sunset.Location=='SunsetCrater', ['SaturationP_bars_VESIcal']].values,
satPs_Allison_Carbon_Test2_SFVF.loc[satPs_Allison_Carbon_Test2_SFVF.Location=='SFVF', ['SaturationP_bars_VESIcal']].values,
satPs_Allison_Carbon_Test2_Erebus.loc[satPs_Allison_Carbon_Test2_Erebus.Location=='Erebus', ['SaturationP_bars_VESIcal']].values,
satPs_Allison_Carbon_Test2_Vesuvius.loc[satPs_Allison_Carbon_Test2_Vesuvius.Location=='Vesuvius', ['SaturationP_bars_VESIcal']].values,
satPs_Allison_Carbon_Test2_Etna.loc[satPs_Allison_Carbon_Test2_Etna.Location=='Etna', ['SaturationP_bars_VESIcal']].values,
satPs_Allison_Carbon_Test2_Stromboli.loc[satPs_Allison_Carbon_Test2_Stromboli.Location=='Stromboli', ['SaturationP_bars_VESIcal']].values
))
Y_syn2=a.reshape(-1, 1)
X_syn2=satPs_Allison_Carbon_Test2_Sunset['Press'].values.reshape(-1, 1)
lr=LinearRegression()
lr.fit(X_syn2,Y_syn2)
Y_pred_syn2=lr.predict(X_syn2)
I='Intercept= ' + str(np.round(lr.intercept_, 5))[1:-1]
G='Gradient= ' + str(np.round(lr.coef_, 5))[2:-2]
R='R$^2$= ' + str(np.round(r2_score(Y_syn2, Y_pred_syn2), 5))
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize = (16,5)) # adjust dimensions of figure here
ax2.set_xlabel('P$_{Sat}$ Allison Spreadsheet (bar)')
ax2.set_ylabel('P$_{Sat}$ VESIcal (bar)')
ax1.plot(X_syn2,Y_pred_syn2, color='red', linewidth=0.5, zorder=1) # This plots the best fit line
# This bit plots the regression parameters on the graph
I='Intercept= ' + str(np.round(lr.intercept_, 2))[1:-1]
G='Gradient= ' + str(np.round(lr.coef_, 3))[2:-2]
R='R$^2$= ' + str(np.round(r2_score(Y_Test1noNan, Y_pred_Test1), 7))
ax1.text(500, 3800, I)
ax1.text(500, 4400, G)
ax1.text(500, 5000, R)
ax1.annotate("a)", xy=(0.02, 0.95), xycoords="axes fraction", fontsize=14)
ax2.annotate("b)", xy=(0.02, 0.95), xycoords="axes fraction", fontsize=14)
ax3.annotate("c)", xy=(0.02, 0.95), xycoords="axes fraction", fontsize=14)
ax1.scatter(satPs_Allison_Carbon_Test2_Sunset.loc[satPs_Allison_Carbon_Test2_Sunset.Location=='SunsetCrater', ['Press']],
satPs_Allison_Carbon_Test2_Sunset.loc[satPs_Allison_Carbon_Test2_Sunset.Location=='SunsetCrater', ['SaturationP_bars_VESIcal']],
s=50, label='Sunset', marker='o', facecolor='green', edgecolor='k', zorder=7)
ax1.scatter(satPs_Allison_Carbon_Test2_Etna.loc[satPs_Allison_Carbon_Test2_Sunset.Location=='Etna', ['Press']],
satPs_Allison_Carbon_Test2_Etna.loc[satPs_Allison_Carbon_Test2_Sunset.Location=='Etna', ['SaturationP_bars_VESIcal']],
s=50, label='Etna', marker='o', facecolor='cyan', edgecolor='k', zorder=2)
ax1.scatter(satPs_Allison_Carbon_Test2_Stromboli.loc[satPs_Allison_Carbon_Test2_Stromboli.Location=='Stromboli', ['Press']],
satPs_Allison_Carbon_Test2_Stromboli.loc[satPs_Allison_Carbon_Test2_Stromboli.Location=='Stromboli', ['SaturationP_bars_VESIcal']],
s=50, label='Stromboli', marker='o', facecolor='yellow', edgecolor='k', zorder=3)
ax1.scatter(satPs_Allison_Carbon_Test2_Vesuvius.loc[satPs_Allison_Carbon_Test2_Vesuvius.Location=='Vesuvius', ['Press']],
satPs_Allison_Carbon_Test2_Vesuvius.loc[satPs_Allison_Carbon_Test2_Vesuvius.Location=='Vesuvius', ['SaturationP_bars_VESIcal']],
s=50, label='Vesuvius', marker='o', facecolor='red', edgecolor='k', zorder=4)
ax1.scatter(satPs_Allison_Carbon_Test2_SFVF.loc[satPs_Allison_Carbon_Test2_SFVF.Location=='SFVF', ['Press']],
satPs_Allison_Carbon_Test2_SFVF.loc[satPs_Allison_Carbon_Test2_SFVF.Location=='SFVF', ['SaturationP_bars_VESIcal']],
s=50, label='SFVF', marker='o', facecolor='magenta', edgecolor='k', zorder=5)
ax1.scatter(satPs_Allison_Carbon_Test2_Erebus.loc[satPs_Allison_Carbon_Test2_Erebus.Location=='Erebus', ['Press']],
satPs_Allison_Carbon_Test2_Erebus.loc[satPs_Allison_Carbon_Test2_SFVF.Location=='Erebus', ['SaturationP_bars_VESIcal']],
s=50, label='Erebus', marker='o', facecolor='white', edgecolor='k', zorder=6)
ax2.hist(100.*X_syn2/Y_syn2)
ax3.scatter(100*(satPs_Allison_Carbon_Test2_Sunset.loc[satPs_Allison_Carbon_Test2_Sunset.Location=='Sunset', ['Press']].values)/(satPs_Allison_Carbon_Test2_Sunset.loc[satPs_Allison_Carbon_Test2_Sunset.Location=='Sunset', ['SaturationP_bars_VESIcal']].values),
satPs_Allison_Carbon_Test2_Sunset.loc[satPs_Allison_Carbon_Test2_Sunset.Location=='Sunset', ['Press']],
s=50, label='Sunset', marker='o', facecolor='green', edgecolor='k', zorder=6)
ax3.scatter(100*(satPs_Allison_Carbon_Test2_Etna.loc[satPs_Allison_Carbon_Test2_Etna.Location=='Etna', ['Press']].values)/(satPs_Allison_Carbon_Test2_Etna.loc[satPs_Allison_Carbon_Test2_Etna.Location=='Etna', ['SaturationP_bars_VESIcal']].values),
satPs_Allison_Carbon_Test2_Etna.loc[satPs_Allison_Carbon_Test2_Etna.Location=='Etna', ['Press']],
s=50, label='Etna', marker='o', facecolor='cyan', edgecolor='k', zorder=6)
ax3.scatter(100*(satPs_Allison_Carbon_Test2_Stromboli.loc[satPs_Allison_Carbon_Test2_Stromboli.Location=='Stromboli', ['Press']].values)/(satPs_Allison_Carbon_Test2_Stromboli.loc[satPs_Allison_Carbon_Test2_Stromboli.Location=='Stromboli', ['SaturationP_bars_VESIcal']].values),
satPs_Allison_Carbon_Test2_Stromboli.loc[satPs_Allison_Carbon_Test2_Stromboli.Location=='Stromboli', ['Press']],
s=50, label='Stromboli', marker='o', facecolor='yellow', edgecolor='k', zorder=6)
ax3.scatter(100*(satPs_Allison_Carbon_Test2_Vesuvius.loc[satPs_Allison_Carbon_Test2_Vesuvius.Location=='Vesuvius', ['Press']].values)/(satPs_Allison_Carbon_Test2_Vesuvius.loc[satPs_Allison_Carbon_Test2_Vesuvius.Location=='Vesuvius', ['SaturationP_bars_VESIcal']].values),
satPs_Allison_Carbon_Test2_Vesuvius.loc[satPs_Allison_Carbon_Test2_Vesuvius.Location=='Vesuvius', ['Press']],
s=50, label='Vesuvius', marker='o', facecolor='red', edgecolor='k', zorder=6)
ax3.scatter(100.*(satPs_Allison_Carbon_Test2_SFVF.loc[satPs_Allison_Carbon_Test2_SFVF.Location=='SFVF', ['Press']].values)/(satPs_Allison_Carbon_Test2_SFVF.loc[satPs_Allison_Carbon_Test2_SFVF.Location=='SFVF', ['SaturationP_bars_VESIcal']].values),
satPs_Allison_Carbon_Test2_SFVF.loc[satPs_Allison_Carbon_Test2_SFVF.Location=='SFVF', ['Press']],
s=50, label='SFVF', marker='o', facecolor='magenta', edgecolor='k', zorder=6)
ax3.scatter(100.*(satPs_Allison_Carbon_Test2_Erebus.loc[satPs_Allison_Carbon_Test2_Erebus.Location=='Erebus', ['Press']].values)/(satPs_Allison_Carbon_Test2_Erebus.loc[satPs_Allison_Carbon_Test2_SFVF.Location=='Erebus', ['SaturationP_bars_VESIcal']].values),
satPs_Allison_Carbon_Test2_Erebus.loc[satPs_Allison_Carbon_Test2_Erebus.Location=='Erebus', ['Press']],
s=50, label='Erebus', marker='o', facecolor='white', edgecolor='k', zorder=6)
ax3.set_xlabel('% Difference (Spreadsheet/VESIcal)')
ax3.set_xlabel('% Difference (Spreadsheet/VESIcal)')
ax3.set_ylabel('P$_{Sat}$ Allison Spreadsheet (bar)')
ax2.set_ylabel('# of measurements')
ax2.set_xlim([99.5, 100.5])
legend = ax1.legend()
legend.get_frame().set_facecolor('none')
ax1.legend(loc='lower right')
ax1.set_xlabel('P$_{Sat}$ Allison Spreadsheet (bar)')
ax1.set_ylabel('P$_{Sat}$ VESIcal (bar)')
plt.subplots_adjust(left=0.125, bottom=None, right=0.9, top=None, wspace=0.3, hspace=None)
fig.savefig('Allison_Test2.png', transparent=True)
###Output
_____no_output_____ |
notebooks/deeplearning.ai/03-logistic-regression-python3.ipynb | ###Markdown
Logistic Regression with a Neural Network mindsetWelcome to your first (required) programming assignment! You will build a logistic regression classifier to recognize cats. This assignment will step you through how to do this with a Neural Network mindset, and so will also hone your intuitions about deep learning.**Instructions:**- Do not use loops (for/while) in your code, unless the instructions explicitly ask you to do so.**You will learn to:**- Build the general architecture of a learning algorithm, including: - Initializing parameters - Calculating the cost function and its gradient - Using an optimization algorithm (gradient descent) - Gather all three functions above into a main model function, in the right order. 1 - Packages First, let's run the cell below to import all the packages that you will need during this assignment. - [numpy](www.numpy.org) is the fundamental package for scientific computing with Python.- [h5py](http://www.h5py.org) is a common package to interact with a dataset that is stored on an H5 file.- [matplotlib](http://matplotlib.org) is a famous library to plot graphs in Python.- [PIL](http://www.pythonware.com/products/pil/) and [scipy](https://www.scipy.org/) are used here to test your model with your own picture at the end.
###Code
import numpy as np
import matplotlib.pyplot as plt
import h5py
import scipy
from PIL import Image
from scipy import ndimage
# OR have this in 'lr_utils.py' and:
# from lr_utils import load_dataset
def load_dataset():
train_dataset = h5py.File('/root/datasets/train_catvnoncat.h5', "r")
train_set_x_orig = np.array(train_dataset["train_set_x"][:]) # your train set features
train_set_y_orig = np.array(train_dataset["train_set_y"][:]) # your train set labels
test_dataset = h5py.File('/root/datasets/test_catvnoncat.h5', "r")
test_set_x_orig = np.array(test_dataset["test_set_x"][:]) # your test set features
test_set_y_orig = np.array(test_dataset["test_set_y"][:]) # your test set labels
classes = np.array(test_dataset["list_classes"][:]) # the list of classes
train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))
test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))
return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes
%matplotlib inline
###Output
_____no_output_____
###Markdown
2 - Overview of the Problem set **Problem Statement**: You are given a dataset ("data.h5") containing: - a training set of m_train images labeled as cat (y=1) or non-cat (y=0) - a test set of m_test images labeled as cat or non-cat - each image is of shape (num_px, num_px, 3) where 3 is for the 3 channels (RGB). Thus, each image is square (height = num_px) and (width = num_px).You will build a simple image-recognition algorithm that can correctly classify pictures as cat or non-cat.Let's get more familiar with the dataset. Load the data by running the following code.
###Code
# Loading the data (cat/non-cat)
train_set_x_orig, train_set_y, test_set_x_orig, test_set_y, classes = load_dataset()
###Output
_____no_output_____
###Markdown
We added "_orig" at the end of image datasets (train and test) because we are going to preprocess them. After preprocessing, we will end up with train_set_x and test_set_x (the labels train_set_y and test_set_y don't need any preprocessing).Each line of your train_set_x_orig and test_set_x_orig is an array representing an image. You can visualize an example by running the following code. Feel free also to change the `index` value and re-run to see other images.
###Code
# Example of a picture
index = 25
plt.imshow(train_set_x_orig[index])
print ("y = " +
str(train_set_y[:,index]) +
", it's a '" +
classes[np.squeeze(train_set_y[:,index])].decode("utf-8") +
"' picture.")
###Output
y = [1], it's a 'cat' picture.
###Markdown
Many software bugs in deep learning come from having matrix/vector dimensions that don't fit. If you can keep your matrix/vector dimensions straight you will go a long way toward eliminating many bugs. **Exercise:** Find the values for: - m_train (number of training examples) - m_test (number of test examples) - num_px (= height = width of a training image)Remember that `train_set_x_orig` is a numpy-array of shape (m_train, num_px, num_px, 3). For instance, you can access `m_train` by writing `train_set_x_orig.shape[0]`.
###Code
print(train_set_x_orig.shape)
print(train_set_y.shape)
print(test_set_x_orig.shape)
print(test_set_y.shape)
print(classes.shape)
### START CODE HERE ### (≈ 3 lines of code)
m_train = train_set_x_orig.shape[0]
m_test = test_set_x_orig.shape[0]
num_px = train_set_x_orig.shape[1]
### END CODE HERE ###
print ("Number of training examples: m_train = " + str(m_train))
print ("Number of testing examples: m_test = " + str(m_test))
print ("Height/Width of each image: num_px = " + str(num_px))
print ("Each image is of size: (" + str(num_px) + ", " + str(num_px) + ", 3)")
print ("train_set_x shape: " + str(train_set_x_orig.shape))
print ("train_set_y shape: " + str(train_set_y.shape))
print ("test_set_x shape: " + str(test_set_x_orig.shape))
print ("test_set_y shape: " + str(test_set_y.shape))
###Output
Number of training examples: m_train = 209
Number of testing examples: m_test = 50
Height/Width of each image: num_px = 64
Each image is of size: (64, 64, 3)
train_set_x shape: (209, 64, 64, 3)
train_set_y shape: (1, 209)
test_set_x shape: (50, 64, 64, 3)
test_set_y shape: (1, 50)
###Markdown
**Expected Output for m_train, m_test and num_px**: **m_train** 209 **m_test** 50 **num_px** 64 For convenience, you should now reshape images of shape (num_px, num_px, 3) in a numpy-array of shape (num_px $*$ num_px $*$ 3, 1). After this, our training (and test) dataset is a numpy-array where each column represents a flattened image. There should be m_train (respectively m_test) columns.**Exercise:** Reshape the training and test data sets so that images of size (num_px, num_px, 3) are flattened into single vectors of shape (num\_px $*$ num\_px $*$ 3, 1).A trick when you want to flatten a matrix X of shape (a,b,c,d) to a matrix X_flatten of shape (b$*$c$*$d, a) is to use: ```pythonX_flatten = X.reshape(X.shape[0], -1).T X.T is the transpose of X```
###Code
# Reshape the training and test examples
### START CODE HERE ### (≈ 2 lines of code)
train_set_x_flatten = train_set_x_orig.reshape(train_set_x_orig.shape[0], -1).T
test_set_x_flatten = test_set_x_orig.reshape(test_set_x_orig.shape[0], -1).T
### END CODE HERE ###
# (209, 64, 64, 3) to (12288, 209)
print ("train_set_x_flatten shape: " + str(train_set_x_flatten.shape))
print ("train_set_y shape: " + str(train_set_y.shape))
print ("test_set_x_flatten shape: " + str(test_set_x_flatten.shape))
print ("test_set_y shape: " + str(test_set_y.shape))
print ("sanity check after reshaping: " + str(train_set_x_flatten[0:5,0]))
###Output
train_set_x_flatten shape: (12288, 209)
train_set_y shape: (1, 209)
test_set_x_flatten shape: (12288, 50)
test_set_y shape: (1, 50)
sanity check after reshaping: [17 31 56 22 33]
###Markdown
**Expected Output**: **train_set_x_flatten shape** (12288, 209) **train_set_y shape** (1, 209) **test_set_x_flatten shape** (12288, 50) **test_set_y shape** (1, 50) **sanity check after reshaping** [17 31 56 22 33] To represent color images, the red, green and blue channels (RGB) must be specified for each pixel, and so the pixel value is actually a vector of three numbers ranging from 0 to 255.One common preprocessing step in machine learning is to center and standardize your dataset, meaning that you substract the mean of the whole numpy array from each example, and then divide each example by the standard deviation of the whole numpy array. But for picture datasets, it is simpler and more convenient and works almost as well to just divide every row of the dataset by 255 (the maximum value of a pixel channel). Let's standardize our dataset.
###Code
train_set_x = train_set_x_flatten/255.
test_set_x = test_set_x_flatten/255.
###Output
_____no_output_____
###Markdown
**What you need to remember:**Common steps for pre-processing a new dataset are:- Figure out the dimensions and shapes of the problem (m_train, m_test, num_px, ...)- Reshape the datasets such that each example is now a vector of size (num_px \* num_px \* 3, 1)- "Standardize" the data 3 - General Architecture of the learning algorithm It's time to design a simple algorithm to distinguish cat images from non-cat images.You will build a Logistic Regression, using a Neural Network mindset. The following Figure explains why **Logistic Regression is actually a very simple Neural Network!****Mathematical expression of the algorithm**:For one example $x^{(i)}$:$$z^{(i)} = w^T x^{(i)} + b \tag{1}$$$$\hat{y}^{(i)} = a^{(i)} = sigmoid(z^{(i)})\tag{2}$$ $$ \mathcal{L}(a^{(i)}, y^{(i)}) = - y^{(i)} \log(a^{(i)}) - (1-y^{(i)} ) \log(1-a^{(i)})\tag{3}$$The cost is then computed by summing over all training examples:$$ J = \frac{1}{m} \sum_{i=1}^m \mathcal{L}(a^{(i)}, y^{(i)})\tag{6}$$**Gradient Descent**:Find `w` and `b` that minimize `J(w,b)`.And repeat $w = w - \alpha \frac{\partial J(w,b)}{\partial w}$ where $\alpha$ is the learning rate.In Python, write as $w = w - \alpha dw$.$$w = w - \alpha \frac{\partial J(w,b)}{\partial w}$$$$b = b - \alpha \frac{\partial J(w,b)}{\partial b}$$**Key steps**:In this exercise, you will carry out the following steps: - Initialize the parameters of the model - Learn the parameters for the model by minimizing the cost - Use the learned parameters to make predictions (on the test set) - Analyse the results and conclude 4 - Building the parts of our algorithm The main steps for building a Neural Network are:1. Define the model structure (such as number of input features) 2. Initialize the model's parameters3. Loop: - Calculate current loss (forward propagation) - Calculate current gradient (backward propagation) - Update parameters (gradient descent)You often build 1-3 separately and integrate them into one function we call `model()`. 4.1 - Helper functions**Exercise**: Using your code from "Python Basics", implement `sigmoid()`. As you've seen in the figure above, you need to compute $sigmoid( w^T x + b) = \frac{1}{1 + e^{-(w^T x + b)}}$ to make predictions. Use np.exp().
###Code
# GRADED FUNCTION: sigmoid
def sigmoid(z):
"""
Compute the sigmoid of z
Arguments:
z -- A scalar or numpy array of any size.
Return:
s -- sigmoid(z)
"""
### START CODE HERE ### (≈ 1 line of code)
s = 1/(1+np.exp(-z))
### END CODE HERE ###
return s
print ("sigmoid([0, 2]) = " + str(sigmoid(np.array([0,2]))))
###Output
sigmoid([0, 2]) = [ 0.5 0.88079708]
###Markdown
**Expected Output**: **sigmoid([0, 2])** [ 0.5 0.88079708] 4.2 - Initializing parameters**Exercise:** Implement parameter initialization in the cell below. You have to initialize w as a vector of zeros. If you don't know what numpy function to use, look up np.zeros() in the Numpy library's documentation.
###Code
# GRADED FUNCTION: initialize_with_zeros
def initialize_with_zeros(dim):
"""
This function creates a vector of zeros of shape (dim, 1) for w and initializes b to 0.
Argument:
dim -- size of the w vector we want (or number of parameters in this case)
Returns:
w -- initialized vector of shape (dim, 1)
b -- initialized scalar (corresponds to the bias)
"""
### START CODE HERE ### (≈ 1 line of code)
w = np.zeros((dim, 1))
b = 0
### END CODE HERE ###
assert(w.shape == (dim, 1))
assert(isinstance(b, float) or isinstance(b, int))
return w, b
dim = 2
w, b = initialize_with_zeros(dim)
print ("w = " + str(w))
print ("b = " + str(b))
###Output
w = [[ 0.]
[ 0.]]
b = 0
###Markdown
**Expected Output**: ** w ** [[ 0.] [ 0.]] ** b ** 0 For image inputs, w will be of shape (num_px $\times$ num_px $\times$ 3, 1). 4.3 - Forward and Backward propagationNow that your parameters are initialized, you can do the "forward" and "backward" propagation steps for learning the parameters.**Exercise:** Implement a function `propagate()` that computes the cost function and its gradient.**Hints**:Forward Propagation:- You get X- You compute $A = \sigma(w^T X + b) = (a^{(0)}, a^{(1)}, ..., a^{(m-1)}, a^{(m)})$- You calculate the cost function: $J = -\frac{1}{m}\sum_{i=1}^{m}y^{(i)}\log(a^{(i)})+(1-y^{(i)})\log(1-a^{(i)})$Here are the two formulas you will be using: $$ \frac{\partial J}{\partial w} = \frac{1}{m}X(A-Y)^T\tag{7}$$$$ \frac{\partial J}{\partial b} = \frac{1}{m} \sum_{i=1}^m (a^{(i)}-y^{(i)})\tag{8}$$**Gradient Descent**:Find `w` and `b` that minimize `J(w,b)`.And repeat $w = w - \alpha \frac{\partial J(w,b)}{\partial w}$ where $\alpha$ is the learning rate.In Python, write as $w = w - \alpha dw$.$$w = w - \alpha \frac{\partial J(w,b)}{\partial w}$$$$b = b - \alpha \frac{\partial J(w,b)}{\partial b}$$
###Code
# GRADED FUNCTION: propagate
def propagate(w, b, X, Y):
"""
Implement the cost function and its gradient for the propagation explained above
Arguments:
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
b -- bias, a scalar
X -- data of size (num_px * num_px * 3, number of examples)
Y -- true "label" vector (containing 0 if non-cat, 1 if cat) of size (1, number of examples)
Return:
cost -- negative log-likelihood cost for logistic regression
dw -- gradient of the loss with respect to w, thus same shape as w
db -- gradient of the loss with respect to b, thus same shape as b
Tips:
- Write your code step by step for the propagation. np.log(), np.dot()
"""
m = X.shape[1]
# FORWARD PROPAGATION (FROM X TO COST)
### START CODE HERE ### (≈ 2 lines of code)
A = sigmoid(np.dot(w.T,X) + b) # compute activation
cost = -1/m * np.sum((Y*np.log(A)) + ((1-Y)*np.log(1-A))) # compute cost
### END CODE HERE ###
# BACKWARD PROPAGATION (TO FIND GRAD)
### START CODE HERE ### (≈ 2 lines of code)
dw = 1/m * np.dot(X, (A-Y).T)
db = 1/m * np.sum(A-Y)
### END CODE HERE ###
assert(dw.shape == w.shape)
assert(db.dtype == float)
cost = np.squeeze(cost)
assert(cost.shape == ())
grads = {"dw": dw,
"db": db}
return grads, cost
w, b, X, Y = np.array([[1],[2]]), 2, np.array([[1,2],[3,4]]), np.array([[1,0]])
grads, cost = propagate(w, b, X, Y)
print ("dw = " + str(grads["dw"]))
print ("db = " + str(grads["db"]))
print ("cost = " + str(cost))
###Output
dw = [[ 0.99993216]
[ 1.99980262]]
db = 0.499935230625
cost = 6.00006477319
###Markdown
**Expected Output**: ** dw ** [[ 0.99993216] [ 1.99980262]] ** db ** 0.499935230625 ** cost ** 6.000064773192205 d) Optimization- You have initialized your parameters.- You are also able to compute a cost function and its gradient.- Now, you want to update the parameters using gradient descent.**Exercise:** Write down the optimization function. The goal is to learn $w$ and $b$ by minimizing the cost function $J$. For a parameter $\theta$, the update rule is $ \theta = \theta - \alpha \text{ } d\theta$, where $\alpha$ is the learning rate.
###Code
# GRADED FUNCTION: optimize
def optimize(w, b, X, Y, num_iterations, learning_rate, print_cost=False):
"""
This function optimizes w and b by running a gradient descent algorithm
Arguments:
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
b -- bias, a scalar
X -- data of shape (num_px * num_px * 3, number of examples)
Y -- true "label" vector (containing 0 if non-cat, 1 if cat), of shape (1, number of examples)
num_iterations -- number of iterations of the optimization loop
learning_rate -- learning rate of the gradient descent update rule
print_cost -- True to print the loss every 100 steps
Returns:
params -- dictionary containing the weights w and bias b
grads -- dictionary containing the gradients of the weights and bias with respect to the cost function
costs -- list of all the costs computed during the optimization, this will be used to plot the learning curve.
Tips:
You basically need to write down two steps and iterate through them:
1) Calculate the cost and the gradient for the current parameters. Use propagate().
2) Update the parameters using gradient descent rule for w and b.
"""
costs = []
for i in range(num_iterations):
# Cost and gradient calculation (≈ 1-4 lines of code)
### START CODE HERE ###
grads, cost = propagate(w, b, X, Y)
### END CODE HERE ###
# Retrieve derivatives from grads
dw = grads["dw"]
db = grads["db"]
# update rule (≈ 2 lines of code)
### START CODE HERE ###
w = w - (learning_rate * dw)
b = b - (learning_rate * db)
### END CODE HERE ###
# Record the costs
if i % 100 == 0:
costs.append(cost)
# Print the cost every 100 training examples
if print_cost and i % 100 == 0:
print ("Cost after iteration %i: %f" %(i, cost))
params = {"w": w,
"b": b}
grads = {"dw": dw,
"db": db}
return params, grads, costs
params, grads, costs = optimize(w, b, X, Y, num_iterations= 100, learning_rate = 0.009, print_cost = True)
print ("w = " + str(params["w"]))
print ("b = " + str(params["b"]))
print ("dw = " + str(grads["dw"]))
print ("db = " + str(grads["db"]))
###Output
Cost after iteration 0: 6.000065
w = [[ 0.1124579 ]
[ 0.23106775]]
b = 1.55930492484
dw = [[ 0.90158428]
[ 1.76250842]]
db = 0.430462071679
###Markdown
**Expected Output**: **w** [[ 0.1124579 ] [ 0.23106775]] **b** 1.55930492484 **dw** [[ 0.90158428] [ 1.76250842]] **db** 0.430462071679 **Exercise:** The previous function will output the learned w and b. We are able to use w and b to predict the labels for a dataset X. Implement the `predict()` function. There is two steps to computing predictions:1. Calculate $\hat{Y} = A = \sigma(w^T X + b)$2. Convert the entries of a into 0 (if activation 0.5), stores the predictions in a vector `Y_prediction`. If you wish, you can use an `if`/`else` statement in a `for` loop (though there is also a way to vectorize this).
###Code
# GRADED FUNCTION: predict
def predict(w, b, X):
'''
Predict whether the label is 0 or 1 using learned logistic regression parameters (w, b)
Arguments:
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
b -- bias, a scalar
X -- data of size (num_px * num_px * 3, number of examples)
Returns:
Y_prediction -- a numpy array (vector) containing all predictions (0/1) for the examples in X
'''
m = X.shape[1]
Y_prediction = np.zeros((1,m))
w = w.reshape(X.shape[0], 1)
# Compute vector "A" predicting the probabilities of a cat being present in the picture
### START CODE HERE ### (≈ 1 line of code)
A = sigmoid(np.dot(w.T, X) + b)
### END CODE HERE ###
for i in range(A.shape[1]):
# Convert probabilities A[0,i] to actual predictions p[0,i]
### START CODE HERE ### (≈ 4 lines of code)
Y_prediction[0, i] = 0 if A[0, i] <= 0.5 else 1
### END CODE HERE ###
assert(Y_prediction.shape == (1, m))
return Y_prediction
print ("predictions = " + str(predict(w, b, X)))
###Output
predictions = [[ 1. 1.]]
###Markdown
**Expected Output**: **predictions** [[ 1. 1.]] **What to remember:**You've implemented several functions that:- Initialize (w,b)- Optimize the loss iteratively to learn parameters (w,b): - computing the cost and its gradient - updating the parameters using gradient descent- Use the learned (w,b) to predict the labels for a given set of examples 5 - Merge all functions into a model You will now see how the overall model is structured by putting together all the building blocks (functions implemented in the previous parts) together, in the right order.**Exercise:** Implement the model function. Use the following notation: - Y_prediction for your predictions on the test set - Y_prediction_train for your predictions on the train set - w, costs, grads for the outputs of optimize()
###Code
# GRADED FUNCTION: model
def model(X_train, Y_train, X_test, Y_test, num_iterations = 2000, learning_rate = 0.5, print_cost = False):
"""
Builds the logistic regression model by calling the function you've implemented previously
Arguments:
X_train -- training set represented by a numpy array of shape (num_px * num_px * 3, m_train)
Y_train -- training labels represented by a numpy array (vector) of shape (1, m_train)
X_test -- test set represented by a numpy array of shape (num_px * num_px * 3, m_test)
Y_test -- test labels represented by a numpy array (vector) of shape (1, m_test)
num_iterations -- hyperparameter representing the number of iterations to optimize the parameters
learning_rate -- hyperparameter representing the learning rate used in the update rule of optimize()
print_cost -- Set to true to print the cost every 100 iterations
Returns:
d -- dictionary containing information about the model.
"""
### START CODE HERE ###
# initialize parameters with zeros (≈ 1 line of code)
w, b = initialize_with_zeros(X_train.shape[0])
# Gradient descent (≈ 1 line of code)
parameters, grads, costs = optimize(w, b, X_train, Y_train, num_iterations, learning_rate, print_cost)
# Retrieve parameters w and b from dictionary "parameters"
w = parameters["w"]
b = parameters["b"]
# Predict test/train set examples (≈ 2 lines of code)
Y_prediction_test = predict(w, b, X_test)
Y_prediction_train = predict(w, b, X_train)
### END CODE HERE ###
# Print train/test Errors
print("train accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_train - Y_train)) * 100))
print("test accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_test - Y_test)) * 100))
d = {"costs": costs,
"Y_prediction_test": Y_prediction_test,
"Y_prediction_train" : Y_prediction_train,
"w" : w,
"b" : b,
"learning_rate" : learning_rate,
"num_iterations": num_iterations}
return d
###Output
_____no_output_____
###Markdown
Run the following cell to train your model.
###Code
d = model(train_set_x,
train_set_y,
test_set_x,
test_set_y,
num_iterations=2000,
learning_rate=0.005,
print_cost=True)
###Output
Cost after iteration 0: 0.693147
Cost after iteration 100: 0.584508
Cost after iteration 200: 0.466949
Cost after iteration 300: 0.376007
Cost after iteration 400: 0.331463
Cost after iteration 500: 0.303273
Cost after iteration 600: 0.279880
Cost after iteration 700: 0.260042
Cost after iteration 800: 0.242941
Cost after iteration 900: 0.228004
Cost after iteration 1000: 0.214820
Cost after iteration 1100: 0.203078
Cost after iteration 1200: 0.192544
Cost after iteration 1300: 0.183033
Cost after iteration 1400: 0.174399
Cost after iteration 1500: 0.166521
Cost after iteration 1600: 0.159305
Cost after iteration 1700: 0.152667
Cost after iteration 1800: 0.146542
Cost after iteration 1900: 0.140872
train accuracy: 99.04306220095694 %
test accuracy: 70.0 %
###Markdown
**Expected Output**: **Train Accuracy** 99.04306220095694 % **Test Accuracy** 70.0 % **Comment**: Training accuracy is close to 100%. This is a good sanity check: your model is working and has high enough capacity to fit the training data. Test error is 68%. It is actually not bad for this simple model, given the small dataset we used and that logistic regression is a linear classifier. But no worries, you'll build an even better classifier next week!Also, you see that the model is clearly overfitting the training data. Later in this specialization you will learn how to reduce overfitting, for example by using regularization. Using the code below (and changing the `index` variable) you can look at predictions on pictures of the test set.
###Code
# Example of a picture that was wrongly classified.
index = 15
plt.imshow(test_set_x[:,index].reshape((num_px, num_px, 3)))
print ("y = " +
str(test_set_y[0,index]) +
", you predicted that it is a \"" +
classes[int(d["Y_prediction_test"][0,index])].decode("utf-8") +
"\" picture.")
###Output
y = 1, you predicted that it is a "cat" picture.
###Markdown
Let's also plot the cost function and the gradients.
###Code
# Plot learning curve (with costs)
costs = np.squeeze(d['costs'])
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('iterations (per hundreds)')
plt.title("Learning rate =" + str(d["learning_rate"]))
plt.show()
###Output
_____no_output_____
###Markdown
**Interpretation**:You can see the cost decreasing. It shows that the parameters are being learned. However, you see that you could train the model even more on the training set. Try to increase the number of iterations in the cell above and rerun the cells. You might see that the training set accuracy goes up, but the test set accuracy goes down. This is called overfitting. 6 - Further analysis (optional/ungraded exercise) Congratulations on building your first image classification model. Let's analyze it further, and examine possible choices for the learning rate $\alpha$. Choice of learning rate **Reminder**:In order for Gradient Descent to work you must choose the learning rate wisely. The learning rate $\alpha$ determines how rapidly we update the parameters. If the learning rate is too large we may "overshoot" the optimal value. Similarly, if it is too small we will need too many iterations to converge to the best values. That's why it is crucial to use a well-tuned learning rate.Let's compare the learning curve of our model with several choices of learning rates. Run the cell below. This should take about 1 minute. Feel free also to try different values than the three we have initialized the `learning_rates` variable to contain, and see what happens.
###Code
learning_rates = [0.01, 0.001, 0.0001]
models = {}
for i in learning_rates:
print ("learning rate is: " + str(i))
models[str(i)] = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 1500, learning_rate = i, print_cost = False)
print ('\n' + "-------------------------------------------------------" + '\n')
for i in learning_rates:
plt.plot(np.squeeze(models[str(i)]["costs"]), label= str(models[str(i)]["learning_rate"]))
plt.ylabel('cost')
plt.xlabel('iterations')
legend = plt.legend(loc='upper center', shadow=True)
frame = legend.get_frame()
frame.set_facecolor('0.90')
plt.show()
###Output
learning rate is: 0.01
train accuracy: 99.52153110047847 %
test accuracy: 68.0 %
-------------------------------------------------------
learning rate is: 0.001
train accuracy: 88.99521531100478 %
test accuracy: 64.0 %
-------------------------------------------------------
learning rate is: 0.0001
train accuracy: 68.42105263157895 %
test accuracy: 36.0 %
-------------------------------------------------------
###Markdown
**Interpretation**: - Different learning rates give different costs and thus different predictions results.- If the learning rate is too large (0.01), the cost may oscillate up and down. It may even diverge (though in this example, using 0.01 still eventually ends up at a good value for the cost). - A lower cost doesn't mean a better model. You have to check if there is possibly overfitting. It happens when the training accuracy is a lot higher than the test accuracy.- In deep learning, we usually recommend that you: - Choose the learning rate that better minimizes the cost function. - If your model overfits, use other techniques to reduce overfitting. (We'll talk about this in later videos.) 7 - Test with your own image (optional/ungraded exercise) Congratulations on finishing this assignment. You can use your own image and see the output of your model. To do that: 1. Click on "File" in the upper bar of this notebook, then click "Open" to go on your Coursera Hub. 2. Add your image to this Jupyter Notebook's directory, in the "images" folder 3. Change your image's name in the following code 4. Run the code and check if the algorithm is right (1 = cat, 0 = non-cat)!
###Code
image1 = np.array(ndimage.imread("/root/datasets/gray-cat.jpeg", flatten=False))
plt.imshow(image1)
image1_resized = scipy.misc.imresize(image1, size=(num_px,num_px)).reshape((1, num_px*num_px*3)).T
image1_pred = predict(d["w"], d["b"], image1_resized)
print("y = " +
str(np.squeeze(image1_pred)) +
", your algorithm predicts a \"" +
classes[int(np.squeeze(image1_pred)),].decode("utf-8") +
"\" picture.")
image2 = np.array(ndimage.imread("images/gray-cat-resized.jpg", flatten=False))
plt.imshow(image2)
image2_resized = scipy.misc.imresize(image2, size=(num_px,num_px)).reshape((1, num_px*num_px*3)).T
image2_pred = predict(d["w"], d["b"], image2_resized)
print("y = " +
str(np.squeeze(image2_pred)) +
", your algorithm predicts a \"" +
classes[int(np.squeeze(image2_pred)),].decode("utf-8") +
"\" picture.")
###Output
y = 1.0, your algorithm predicts a "cat" picture.
|
0-Syllabus/.ipynb_checkpoints/ENGR-1330-2022-1-Syllabus-checkpoint.ipynb | ###Markdown
Download this page as a jupyter notebook at [ENGR-1330-2021-3-Syllabus](http://54.243.252.9/engr-1330-webroot/0-Syllabus/ENGR-1330-2021-3-Syllabus.ipynb) **The syllabus changes from time-to-time** The webpage may become obsolete; the jupyter notebook is always current.
###Code
%%html
<style>
table {margin-left: 0 !important;}
</style>
###Output
_____no_output_____ |
black_melody.ipynb | ###Markdown
Step 1: import pre-built catalogLet's import the pre-built catalog as a pandas dataframe. Note: .iloc drops the first column which is an index column by last output.
###Code
# import the catalog
dfold = pd.read_csv('black_melody.csv').iloc[:, 1:]
dfold.head(1)
# check the columns
dfold.columns
# look up for some parameters
dfold[['Name','D_pc','spectype_vis','Binarity','K1_km/s','Porb_day','fm_Msun','MBH_Msun','X-ray_flux_erg/s','gamma_km/s']]
# modify some element of source MWC 656
# source = 'LB-1' # specify which source you are going to check
# setcol = 'spectype_vis' # specify which parameter you are going to update
# vupdate = 'B' # specify the value to update
# row = dfold[dfold['Name'] == source] # find out the row in which your source recorded
# dfold.loc[row.index, setcol] = vupdate # update the target element
# dfold[setcol] # check the updated value
# add a new column
# dfold.iloc[:,12]
# newcol = ['ROSAT_0.1-2.4keV', 'Chandra_0.5-7.0keV','Swift(XRT)_0.3-10.0keV ','ROSAT_0.1-2.4keV', 'Swift(XRT)_0.3-2.0keV',None,None]
# dfold.insert(loc = 13, column = 'X-ray_window', value = newcol)
# dfold.iloc[:,0:15]
# export the augmented table
# dfold.to_csv('black_melody.csv')
###Output
_____no_output_____
###Markdown
Step 2: add data for a new BHplease fill in the square bracket of each parameter (if available), otherwise just leave if as None.
###Code
dfnew = pd.DataFrame(
{
###############################################################################
# basic information
"Ra":[None], #ra in J2000
"Dec":[None], #dec in J2000
"Name":[None], # source name
"Identifier":[None], # common indentifier
"Binarity":[None], # the type of the system; binary system or triple system
"D_pc":[None], # distance in the unit of pc
"D_err1_pc":[None], # distance lower error bar
"D_err2_pc":[None], # distance upper error bar
"Vmag":[None], # visual magnitude of the system
"Vmag_err":[None], # visual magnitude uncertainty
"EBV":[None], # E(B-V)
"EBV_err":[None], # E(B-V) uncertainty
"X-ray_flux_erg/s":[None], # X-ray flux
"X-ray_window":[None], # X-ray window of observation
###############################################################################
# observed parameters
"K1_km/s":[None], # RV curve semi-amplitude of the BH's visible companion (inner companion if it is a triple system)
"K1_err1_km/s":[None], # RV curve semi-amplitude lower error bar of the BH's visible companion
"K1_err2_km/s":[None], # RV curve semi-amplitude upper error bar of the BH's visible companion
"Porb_day":[None], # orbital period of the system (inner binary if it is a triple system)
"Porb_err1_day":[None], # orbital period lower error bar
"Porb_err2_day":[None], # orbital period upper error bar
"gamma_km/s":[None], # sysmetic velocity of the system
"gamma_err1_km/s":[None], # sysmetic velocity lower error bar
"gamma_err2_km/s":[None], # sysmetic velocity upper error bar
"omega_degree":[None], # argument of periapsis
"omega_err1_degree":[None], # argument of periapsis lower error bar
"omega_err2_degree":[None], # argument of periapsis upper error bar
"Tconj_MJD":[None], # MJD at the conjunction phase
"Tconj_err1_MJD":[None], # MJD lower error bar
"Tconj_err2_MJD":[None], # MJD upper error bar
"e":[None], # eccentricity of the system
"e_err1":[None], # eccentricity lower error bar
"e_err2":[None], # eccentricity upper error bar
"fm_Msun":[None], # mass function of the BH (minimum BH mass)
"fm_err1_Msun":[None], # mass function lower error bar
"fm_err2_Msun":[None], # mass function upper error bar
"vsini_km/s":[None], # the rotational broadening of the star
"vsini_err1_km/s":[None], # the rotational broadening lower error bar
"vsini_err2_km/s":[None], # the rotational broadening upper error bar
"K_Halpha_km/s":[None], # the RV curve semi-amplitude of the Halpha emission line
"K_Halpha_err1_km/s":[None], # the Halpha RV curve semi-amplitude lower error bar
"K_Halpha_err2_km/s":[None], # the Halpha RV curve semi-amplitude upper error bar
"K_HeII_km/s":[None], # the RV curve semi-amplitude of the HeII emission line
"K_HeII_err1_km/s":[None], # the HeII RV curve semi-amplitude lower error bar
"K_HeII_err2_km/s":[None], # the HeII RV curve semi-amplitude upper error bar
###############################################################################
# orbital solution
"MBH_Msun":[None], # BH mass
"MBH_err1_Msun":[None], # BH mass lower error bar
"MBH_err2_Msun":[None], # BH mass upper error bar
"inclin_degree":[None], # inclination angle of the system
"inclin_err1_degree":[None], # inclination angle lower error bar
"inclin_err2_degree":[None], # inclination angle upper error bar
"filling":[None], # Roche lobe filling factor of the visible companion; filling = R_vis / R_L
"filling_err1":[None], # Roche lobe filling factor lower error bar
"filling_err2":[None], # Roche lobe filling factor upper error bar
###############################################################################
# the following parameters refer to the visible companion of the inner binary, if this is a triple system
"spectype_vis":[None], # spectral type of the visible companion
"Teff_vis_K":[None], # effective temperature of the visible companion
"Teff_vis_err1_K":[None], # effective temperature lower error bar
"Teff_vis_err2_K":[None], # effective temperature upper error bar
"logg_vis_dex":[None], # surface gravity of the visible companion
"logg_vis_err1_dex":[None], # surface gravity lower error bar
"logg_vis_err2_dex":[None], # surface gravity upper error bar
"FeH_vis_dex":[None], # metalicity of the visible companion
"FeH_vis_err1_dex":[None], # metalicity lower error bar
"FeH_vis_err2_dex":[None], # metalicity upper error bar
"M_vis_Msun":[None], # mass of the visible companion
"M_vis_err1_Msun":[None], # mass lower error bar
"M_vis_err2_Msun":[None], # mass upper error bar
"L_vis_Lsun":[None], # bolometric luminosity of the visible companion
"L_vis_err1_Lsun":[None], # bolometric luminosity lower error bar
"L_vis_err2_Lsun":[None], # bolometric luminosity upper error bar
"R_vis_Rsun":[None], # radius of the visible companion
"R_vis_err1_Rsun":[None], # radius lower error bar
"R_vis_err2_Rsun":[None], # radius upper error bar
###############################################################################
# the following parameters refer to the outter companion if this is a triple system
"spectype_out3":[None], # spectral type of the outter companion
"Teff_out3_K":[None], # effective temperature of the outter companion in triple system
"Teff_out3_err1_K":[None], # effective temperature lower error bar
"Teff_out3_err2_K":[None], # effective temperature upper error bar
"logg_out3_dex":[None], # surface gravity of the outter companion in triple system
"logg_out3_err1_dex":[None], # surface gravity lower error bar
"logg_out3_err2_dex":[None], # surface gravity upper error bar
"FeH_out3_dex":[None], # metalicity of the outter companion in triple system
"FeH_out3_err1_dex":[None], # metalicity lower error bar
"FeH_out3_err2_dex":[None], # metalicity upper error bar
"M_out3_Msun":[None], # mass of the outter companion in triple system
"M_out3_err1_Msun":[None], # mass lower error bar
"M_out3_err2_Msun":[None], # mass upper error bar
"L_out3_Lsun":[None], # bolometric luminosity of the outter companion in triple system
"L_out3_err1_Lsun":[None], # bolometric luminosity lower error bar
"L_out3_err2_Lsun":[None], # bolometric luminosity upper error bar
"R_out3_Rsun":[None], # radius of the outter companion in triple system
"R_out3_err1_Rsun":[None], # radius lower error bar
"R_out3_err2_Rsun":[None], # radius upper error bar
###############################################################################
"ads1":[None], # ads link of the paper
"ads2":[None], # ads link of the paper
"simbad":[None], # simbad link of the source
"Note1":[None], # notes
"Note2":[None], # notes
}
)
dfnew.head()
###Output
_____no_output_____
###Markdown
Step 3 check the data and update our catalogmerger/join your new row to the pre-built catalog; check your new inputs are valid by visual inspection; if it is OK, export it.
###Code
# join the data; if the old dataframe and the new dataframe have inconsistent metadata, it won't concat.
frames = [dfold, dfnew]
result = pd.concat(frames)
result.head(7)
# export the updated catalog
result.to_csv('black_melody.csv')
# this cell is intentionally left blank
###Output
_____no_output_____
###Markdown
step 0 backup blockEmpty sheet for updating. Do not edit this sheet unless necessary, but only copy it!
###Code
dfnew = pd.DataFrame(
{
###############################################################################
# basic information
"Ra":[None], #ra in J2000
"Dec":[None], #dec in J2000
"Name":[None], # source name
"Identifier":[None], # common indentifier
"Binarity":[None], # the type of the system; binary system or triple system
"D_pc":[None], # distance in the unit of pc
"D_err1_pc":[None], # distance lower error bar
"D_err2_pc":[None], # distance upper error bar
"Vmag":[None], # visual magnitude of the system
"Vmag_err":[None], # visual magnitude uncertainty
"EBV":[None], # E(B-V)
"EBV_err":[None], # E(B-V) uncertainty
"X-ray_flux_erg/s":[None], # X-ray flux
"X-ray_window":[None], # X-ray window of observation
###############################################################################
# observed parameters
"K1_km/s":[None], # RV curve semi-amplitude of the BH's visible companion (inner companion if it is a triple system)
"K1_err1_km/s":[None], # RV curve semi-amplitude lower error bar of the BH's visible companion
"K1_err2_km/s":[None], # RV curve semi-amplitude upper error bar of the BH's visible companion
"Porb_day":[None], # orbital period of the system (inner binary if it is a triple system)
"Porb_err1_day":[None], # orbital period lower error bar
"Porb_err2_day":[None], # orbital period upper error bar
"gamma_km/s":[None], # sysmetic velocity of the system
"gamma_err1_km/s":[None], # sysmetic velocity lower error bar
"gamma_err2_km/s":[None], # sysmetic velocity upper error bar
"omega_degree":[None], # argument of periapsis
"omega_err1_degree":[None], # argument of periapsis lower error bar
"omega_err2_degree":[None], # argument of periapsis upper error bar
"Tconj_MJD":[None], # MJD at the conjunction phase
"Tconj_err1_MJD":[None], # MJD lower error bar
"Tconj_err2_MJD":[None], # MJD upper error bar
"e":[None], # eccentricity of the system
"e_err1":[None], # eccentricity lower error bar
"e_err2":[None], # eccentricity upper error bar
"fm_Msun":[None], # mass function of the BH (minimum BH mass)
"fm_err1_Msun":[None], # mass function lower error bar
"fm_err2_Msun":[None], # mass function upper error bar
"vsini_km/s":[None], # the rotational broadening of the star
"vsini_err1_km/s":[None], # the rotational broadening lower error bar
"vsini_err2_km/s":[None], # the rotational broadening upper error bar
"K_Halpha_km/s":[None], # the RV curve semi-amplitude of the Halpha emission line
"K_Halpha_err1_km/s":[None], # the Halpha RV curve semi-amplitude lower error bar
"K_Halpha_err2_km/s":[None], # the Halpha RV curve semi-amplitude upper error bar
"K_HeII_km/s":[None], # the RV curve semi-amplitude of the HeII emission line
"K_HeII_err1_km/s":[None], # the HeII RV curve semi-amplitude lower error bar
"K_HeII_err2_km/s":[None], # the HeII RV curve semi-amplitude upper error bar
###############################################################################
# orbital solution
"MBH_Msun":[None], # BH mass
"MBH_err1_Msun":[None], # BH mass lower error bar
"MBH_err2_Msun":[None], # BH mass upper error bar
"inclin_degree":[None], # inclination angle of the system
"inclin_err1_degree":[None], # inclination angle lower error bar
"inclin_err2_degree":[None], # inclination angle upper error bar
"filling":[None], # Roche lobe filling factor of the visible companion; filling = R_vis / R_L
"filling_err1":[None], # Roche lobe filling factor lower error bar
"filling_err2":[None], # Roche lobe filling factor upper error bar
###############################################################################
# the following parameters refer to the visible companion of the inner binary, if this is a triple system
"spectype_vis":[None], # spectral type of the visible companion
"Teff_vis_K":[None], # effective temperature of the visible companion
"Teff_vis_err1_K":[None], # effective temperature lower error bar
"Teff_vis_err2_K":[None], # effective temperature upper error bar
"logg_vis_dex":[None], # surface gravity of the visible companion
"logg_vis_err1_dex":[None], # surface gravity lower error bar
"logg_vis_err2_dex":[None], # surface gravity upper error bar
"FeH_vis_dex":[None], # metalicity of the visible companion
"FeH_vis_err1_dex":[None], # metalicity lower error bar
"FeH_vis_err2_dex":[None], # metalicity upper error bar
"M_vis_Msun":[None], # mass of the visible companion
"M_vis_err1_Msun":[None], # mass lower error bar
"M_vis_err2_Msun":[None], # mass upper error bar
"L_vis_Lsun":[None], # bolometric luminosity of the visible companion
"L_vis_err1_Lsun":[None], # bolometric luminosity lower error bar
"L_vis_err2_Lsun":[None], # bolometric luminosity upper error bar
"R_vis_Rsun":[None], # radius of the visible companion
"R_vis_err1_Rsun":[None], # radius lower error bar
"R_vis_err2_Rsun":[None], # radius upper error bar
###############################################################################
# the following parameters refer to the outter companion if this is a triple system
"spectype_out3":[None], # spectral type of the outter companion
"Teff_out3_K":[None], # effective temperature of the outter companion in triple system
"Teff_out3_err1_K":[None], # effective temperature lower error bar
"Teff_out3_err2_K":[None], # effective temperature upper error bar
"logg_out3_dex":[None], # surface gravity of the outter companion in triple system
"logg_out3_err1_dex":[None], # surface gravity lower error bar
"logg_out3_err2_dex":[None], # surface gravity upper error bar
"FeH_out3_dex":[None], # metalicity of the outter companion in triple system
"FeH_out3_err1_dex":[None], # metalicity lower error bar
"FeH_out3_err2_dex":[None], # metalicity upper error bar
"M_out3_Msun":[None], # mass of the outter companion in triple system
"M_out3_err1_Msun":[None], # mass lower error bar
"M_out3_err2_Msun":[None], # mass upper error bar
"L_out3_Lsun":[None], # bolometric luminosity of the outter companion in triple system
"L_out3_err1_Lsun":[None], # bolometric luminosity lower error bar
"L_out3_err2_Lsun":[None], # bolometric luminosity upper error bar
"R_out3_Rsun":[None], # radius of the outter companion in triple system
"R_out3_err1_Rsun":[None], # radius lower error bar
"R_out3_err2_Rsun":[None], # radius upper error bar
###############################################################################
"ads1":[None], # ads link of the paper
"ads2":[None], # ads link of the paper
"simbad":[None], # simbad link of the source
"Note1":[None], # notes
"Note2":[None], # notes
}
)
dfnew.head()
###Output
_____no_output_____ |
automated_quality_check.ipynb | ###Markdown
read and merge files
###Code
for file in files:
f = pd.read_excel(file,engine='openpyxl',index_col=0)
f = f[~f.index.isna()]
if 'responses' in globals():
responses = pd.concat([responses,f],axis=0)
else:
responses = f
###Output
_____no_output_____
###Markdown
entry has been individually checked
###Code
individual_check = responses['Checked'].replace(1,'Passed').replace(np.nan,'Failed')
individual_check.name = 'individual_check'
###Output
_____no_output_____
###Markdown
iso codes
###Code
iso_codes = [list(pycountry.countries)[i].alpha_2 for i in range(len(list(pycountry.countries)))]
def check_iso(code):
if code in iso_codes:
return 'Passed'
else:
return 'Failed'
is_iso_code = pd.Series(responses['Country:'].apply(check_iso),name='iso_code')
###Output
_____no_output_____
###Markdown
numeric values
###Code
def check_float(number):
try:
if np.isnan(float(number)):
return 'Failed'
else:
return 'Passed'
except:
return 'Failed'
value_is_numeric = pd.Series(responses['4a. Numeric value, e.g., 0.3, 1.5, 3'].apply(check_float),name='numeric_value')
###Output
_____no_output_____
###Markdown
self calculated value = reproduced value (leave this for the end) Consistency between 3 used metric and 10 method of power calculation categorise metrics and power related components
###Code
power_related_metrics = ['power_density','installed_power_density','output_power_density','power per unit area',
'capacity_density']
energy_related_metrics = ['energy_density','surface_performance_ratio','energy_yields',
'aperture_specific_net_electrical_output']
land_related_metrics = ['land_use_efficiency','land_requirements','total_impact_area','direct_impact_area_permanent',
'direct_impact_area_temporary','direct_impact_area','land_transformation','land_use_footprints',
'land_use_requirements','area_requirements','direct_land_requirements','land_occupation',
'spatial_footprint','land-use intensity','land use intensity',' land use intensity',
'land_use_intensity','land_use','land_area','area_required_by_system']
other_metrics = ['land_use per vehicle mile traveled (based on land_use_intensity)','land-use per vehicle mile',
'land-use impact (total habitat developed)']
power_related_components_general = ['nameplate (installed) capacity','nameplate (installed) capacity DC',
'nameplate_capacity','nominal nameplate capacity','peak_rated_power',
'peak_capacity','author assumes a typical power per unit area of 2.5\u2009W\u2009m−2',
'commercial module output','module']
power_related_components_capacity_factor = ['nameplate capacity multiplied by capacity factor',
'nameplate (installed) capacity multiplied by capacity factor',
'wind-density multiplied by capacity factor',
'wind density multiplied by capacity factor',
'solar constant/insolation multiplied by capacity factor/efficiency',
'typical solar insolation at average-insolation location',
'typical solar insolation at high-insolation location']
energy_related_components = ['estimated energy generation (unsure what it means)','modelled energy generation',
'simulated using flow-sheet computer program based softbeen simulated using flow-sheet computer program based software Cycle-Tempo',
'reported energy generation','net energy generation (electricity generation after substracting energy needed for manufacturing/dismantling, construction/operation, and transportation)',
'experimentally measured','annual energy production','net output']
unclear_components = ['unclear','number of turbines of a particular type','no power component','no power']
metrics_classification = pd.Series(['power']*len(power_related_metrics)+
['energy']*len(energy_related_metrics)+
['other']*(len(land_related_metrics)+len(other_metrics)),
index = power_related_metrics+energy_related_metrics+land_related_metrics+other_metrics)
component_classification = pd.Series(['power']*(len(power_related_components_general)+len(power_related_components_capacity_factor))+
['energy']*len(energy_related_components)+
['other']*len(unclear_components),
index = power_related_components_general+power_related_components_capacity_factor+energy_related_components+unclear_components)
###Output
_____no_output_____
###Markdown
check metrics
###Code
def check_metric(data):
metric = metrics_classification[data['3. Metrics used:']]
component = component_classification[data['10. Power-related component of land-use requirements is represented by:']]
if metric =='other' or component =='other':
return 'Unclear'
if metric == component:
return 'Passed'
else:
return 'Failed'
metric_fits_powercalculation = pd.Series(responses.apply(check_metric,axis=1),name='metric_fits_powercalculation')
###Output
_____no_output_____
###Markdown
Unit match metrics prepare unit columns merge columns
###Code
def merge_col(line):
return line.dropna().values
measurement_unit = responses[['4c-1. Measurement unit:','4c-2. Measurement unit:']].apply(merge_col,axis=1)
###Output
_____no_output_____
###Markdown
there are two entries where there are values in both columns
###Code
(measurement_unit.apply(len)!=1).sum()
measurement_unit[(measurement_unit.apply(len)!=1)]
###Output
_____no_output_____
###Markdown
only use second value
###Code
measurement_unit[measurement_unit.apply(len)!=1] = measurement_unit[measurement_unit.apply(len)!=1].apply(lambda x: [x[1]])
measurement_unit = measurement_unit.apply(lambda x: x[0])
###Output
_____no_output_____
###Markdown
unify units - replace similar units with one writing
###Code
replace = pd.DataFrame({'replace':['acres/MW', 'm²/GWh', 'ha/GWh/y', 'W m-2', 'W/m²', 'WP/m2', 'W_p/m²', 'We m-2', 'We m−2', 'We/m²', 'W_e/m2', 'w/ft2', 'kW_e/m²', 'MWi km−2', 'GW_e/m2', 'kWh/year/m²', 'kWh/m²year', 'kWh/m2/year', 'kWh/m²/year', 'MWh/year/m²', 'GWh/yr/m2', 'GJ/m2/year', 'rho_e W_e/m2', 'm^2', 'm2/VPM (Vehicl mile traveled)'],
'with': ['acre/MW', 'm2/GWh', 'ha/GWh/year', 'W/m2', 'W/m2', 'Wp/m2', 'Wp/m2', 'We/m2', 'We/m2', 'We/m2', 'We/m2', 'W/ft2', 'kWe/m2', 'MWi/km2', 'GWe/m2', 'kWh/year/m2', 'kWh/year/m2', 'kWh/year/m2', 'kWh/year/m2', 'MWh/year/m2', 'GWh/year/m2', 'GJ/year/m2', 'rhoe We/m2', 'm2', 'm2/VPM (Vehicle mile traveled)']})
for i in range(len(replace)):
measurement_unit = measurement_unit.replace(replace['replace'][i],replace['with'][i])
###Output
_____no_output_____
###Markdown
categorise units
###Code
footprint_power_related = ['m2/W','m2/Wp','m2/kW','m2/kWp','m2/MW','ha/MW','ha/MWp','acre/MW','acre/MW-DC',
'acre/MW-AC','km2/MW']
footprint_energy_related = ['m2/MWh','m2/GWh','km2/GWh','km2/TWh']
footprint_annual_energy_related = ['m2/MWh/year','ha/MWh/year','ha/GWh/year','ha/TWh/year','km2/TWh/year']
power_density = ['W/m2','Wp/m2','We/m2','W/ft2','kW/m2','kWp/m2','kWe/m2','kW/ha','kW/acre','MW/m2',
'MW/ha','MWp/ha','MW/km2','MWi/km2','MW/acre','GWe/m2']
energy_density = ['Wh/cm2/day','kWh/year/m2','kWh/year/acre','kWh/year/ft2','kWh/year/ha','MWh/year/acre',
'MWh/year/m2','MWh/year/ha','GWh/year/m2','GWh/year/km2','GJ/year/m2','TWh/year/km2']
unclear_units = ['MJ/m2','GJ/unit/year','km2 year/GWh','kW/ft','km/GWh','m2/VPM (Vehicle mile traveled)',
'm2','rhoe We/m2','MW/h/ha']
metrics_classification = pd.Series(['power']*len(power_related_metrics)+
['energy']*len(energy_related_metrics)+
['footprint']*len(land_related_metrics)+
['other']*len(other_metrics),
index = power_related_metrics+energy_related_metrics+land_related_metrics+other_metrics)
unit_classification = pd.Series(['power']*len(power_density)+
['energy']*len(energy_density)+
['footprint']*(len(footprint_power_related)+len(footprint_energy_related)+len(footprint_annual_energy_related))+
['other']*len(unclear_units),
index = power_density+energy_density+footprint_power_related+footprint_energy_related+footprint_annual_energy_related+unclear_units)
###Output
_____no_output_____
###Markdown
check units
###Code
def check_unit(data):
metric = metrics_classification[data['Metric']]
unit = unit_classification[data['Unit']]
if metric =='other' or unit =='other':
return 'Unclear'
if metric == unit:
return 'Passed'
else:
return 'Failed'
metric_unit = pd.DataFrame({'Metric':responses['3. Metrics used:'].values,
'Unit':measurement_unit.values},
index=responses.index)
metric_fits_unit = pd.Series(metric_unit.apply(check_unit,axis=1),name='metric_fits_unit')
###Output
_____no_output_____
###Markdown
Check range of values unify units add combined measurement units as column
###Code
responses2 = pd.concat([responses.loc[:,:'4c-2. Measurement unit:'],
pd.Series(measurement_unit,name='4c. Measurement unit:'),
responses.loc[:,'4d. Type of value':]],axis=1)
###Output
_____no_output_____
###Markdown
conversion
###Code
m2peracre = 4046.86
m2perft2 = 10.7639
WattUnits = pd.DataFrame({'start': ['W/ft2', 'kW/m2', 'kW/ha','kW/acre', 'MW/m2', 'MW/ha','MW/km2','MW/acre'],
'target': 'W/m2',
'factor': [1/m2perft2, 1000, 0.1, 1000/m2peracre, 1/10**6, 100, 1, 10**6/m2peracre]})
OtherWattUnits = pd.DataFrame({'start': ['kWp/m2', 'MWp/ha', 'kWe/m2', 'GWe/m2', 'MWi/km2'],
'target': ['Wp/m2', 'Wp/m2', 'We/m2', 'We/m2', 'Wi/m2'],
'factor': [1000, 100, 1000, 10**6, 1]})
WatthUnits = pd.DataFrame({'start': ['Wh/cm2/day', 'kWh/year/acre', 'kWh/year/ft2','kWh/year/ha','MWh/year/acre','MWh/year/m2','MWh/year/ha','GWh/year/m2','GWh/year/km2','GJ/year/m2','TWh/year/km2'],
'target': 'kWh/year/m2',
'factor': [3650, 1/m2peracre, 1/m2perft2, 1/10**4, 1000/m2peracre, 1000, 0.1, 10**6, 1, 10**6/3600, 1000]})
AreaPowerUnits = pd.DataFrame({'start': ['m2/W', 'm2/Wp', 'm2/MW', 'ha/MW', 'ha/MWp', 'acre/MW', 'acre/MW-DC', 'acre/MW-AC', 'km2/MW'],
'target': ['m2/kW', 'm2/kWp', 'm2/kW', 'm2/kW', 'm2/kWp', 'm2/kW', 'm2/kW-DC', 'm2/kW-AC', 'm2/kW'],
'factor': [1000, 1000, 1/1000, 10, 10, m2peracre/1000, m2peracre/1000, m2peracre/1000, 1000]})
AreaEnergyUnits = pd.DataFrame({'start': ['m2/MWh','m2/GWh','km2/GWh','km2/TWh','m2/MWh/year','ha/MWh/year','ha/GWh/year','ha/TWh/year','km2/TWh/year'],
'target': ['m2/kWh','m2/kWh','m2/kWh', 'm2/kWh', 'm2/kWh/year','m2/kWh/year','m2/kWh/year','m2/kWh/year','m2/kWh/year'],
'factor': [1/1000, 1/10**6, 1, 1/1000, 1/1000, 10, 0.01, 1/10**5, 1]})
unit_conversion = pd.concat([WattUnits,OtherWattUnits,WatthUnits,AreaPowerUnits,AreaEnergyUnits],axis=0)
new_unit = responses2['4c. Measurement unit:'].map(unit_conversion.set_index('start').target)
factor = responses2['4c. Measurement unit:'].map(unit_conversion.set_index('start').factor)
# fill in lines where unit stays the same
new_unit[new_unit.isna()] = responses2['4c. Measurement unit:'][new_unit.isna()]
factor = factor.fillna(1)
new_value = responses2['4a. Numeric value, e.g., 0.3, 1.5, 3'][value_is_numeric=='Passed'].apply(float)*factor[value_is_numeric=='Passed']
responses3 = pd.concat([responses2.loc[:,:'4a. Numeric value, e.g., 0.3, 1.5, 3'],
pd.Series(responses2.index.map(new_value),name='4a-1. Converted value',index=responses2.index),
responses2.loc[:,'4b. Is power-related component of the land-use requirement expressed as energy e.g., ha/GWh/year?':'4c. Measurement unit:'],
pd.Series(new_unit,name='4c-3. Converted measurement unit'),
responses2.loc[:,'4d. Type of value':]],axis=1)
###Output
_____no_output_____
###Markdown
find outliers only look for outliers where there are more than 10 values
###Code
frequency_units = responses3['4c-3. Converted measurement unit'].groupby(responses3['4c-3. Converted measurement unit']).count()
def find_outliers(data):
#define a list to accumlate anomalies
outliers = []
# Set upper and lower limit to 3 standard deviation
stdev = np.std(data)
avg = np.mean(data)
lower_limit = avg - stdev*3
upper_limit = avg + stdev*3
# Find outliers
def test_if_in_range(value):
return in_range(lower_limit,upper_limit,value)
return data.apply(test_if_in_range)
def in_range(lower,upper,value):
if (value > lower) & (value < upper):
return 'Passed'
else:
return 'Failed'
outliers = pd.concat([find_outliers(responses3[responses3['4c-3. Converted measurement unit']==unit]['4a-1. Converted value']) for unit in frequency_units.index[(frequency_units>10)].values],axis=0)
value_within_range = pd.Series(responses3.index.map(outliers).fillna('Too few values or not standardised'),name='value_range',index=responses3.index)
###Output
_____no_output_____
###Markdown
check if matches: DOI + first Author + scopus_id + year of publication
###Code
rev_file = pd.read_csv(review_path + 'SCOPUS_DOI2.csv',encoding = "utf-8",dtype=str).dropna().drop_duplicates()
mapped_SCOPUS = responses3['1b. DOI link'].map(rev_file.set_index('DOI').SCOPUS)
scopusID_fits_doi = pd.Series(responses3.index.map(mapped_SCOPUS.dropna()==responses3['1a. SCOPUS ID'].apply(str)[mapped_SCOPUS.notna()]),
index=responses3.index,name='scopusID_fits_doi'
).fillna('Not available').replace(True,'Passed').replace(False,'Failed')
results_quality_check = pd.concat([individual_check,is_iso_code,value_is_numeric,metric_fits_powercalculation,metric_fits_unit,value_within_range,scopusID_fits_doi],axis=1)
results_quality_check.to_csv(path + 'results_quality_check.csv')
results_quality_check
###Output
_____no_output_____
###Markdown
split results per person and remove passed rows and columns
###Code
def clean_and_save(data,name):
passed_results = (data == 'Passed').sum(axis=1)
not_available_results = (data == 'Not available').sum(axis=1)
data2 = data[passed_results+not_available_results<data.shape[1]]
passed_results = (data2 == 'Passed').sum(axis=0)
not_available_results = (data2 == 'Not available').sum(axis=0)
data3 = data2.loc[:,passed_results+not_available_results<data2.shape[0]]
data3.to_csv(output_path + name + '_result_automated_quality_check.csv')
return
comments = pd.DataFrame(np.nan * np.ones(shape=results_quality_check.shape),
columns=results_quality_check.columns.values + '_comment',
index=results_quality_check.index)
results_quality_check_with_comments = pd.concat([results_quality_check,comments],axis=1)
for name in responses['Reviewed by'].unique():
print(name)
d = results_quality_check_with_comments[responses['Reviewed by']==name]
clean_and_save(d,name)
###Output
Luis
Johannes
Peter
Olga
Claude
Michael
Sebastian
Katharina
###Markdown
also split results per person including new values
###Code
for name in responses['Reviewed by'].unique():
print(name)
responses[responses['Reviewed by']==name].to_csv(output_path + name + '_results.csv')
###Output
Luis
Johannes
Peter
Olga
Claude
Michael
Sebastian
Katharina
###Markdown
extract bibtex info
###Code
import urllib.request
url = 'http://enviroinfo.eu/sites/default/files/pdfs/vol8514/0093.pdf'
req = urllib.request.Request(url)
req.add_header('Accept', 'application/x-bibtex')
try:
with urllib.request.urlopen(req) as f:
bibtex = f.read().decode()
print(bibtex)
except:
print('no doi')
clean_bibtex = pd.Series(str.replace(bibtex,"'","").replace("},","").replace("{","").split("\n\t"))[1:]
clean_bibtex.apply(lambda x: str.split(x," = ")[1]).values
pd.Series(clean_bibtex.apply(lambda x: str.split(x," = ")[1]).values,
index = clean_bibtex.apply(lambda x: str.split(x," = ")[0]))
###Output
_____no_output_____ |
Practical_statistics/Hypothesis_Testing/Drawing Conclusions.ipynb | ###Markdown
Calculating ErrorsHere are two datasets that represent two of the examples you have seen in this lesson. One dataset is based on the parachute example, and the second is based on the judicial example. Neither of these datasets is based on real people.Use the exercises below to assist in answering the quiz questions at the bottom of this page.
###Code
import numpy as np
import pandas as pd
jud_data = pd.read_csv('judicial_dataset_predictions.csv')
par_data = pd.read_csv('parachute_dataset.csv')
jud_data.head()
par_data.head()
###Output
_____no_output_____
###Markdown
`1.` Above, you can see the actual and predicted columns for each of the datasets. Using the **jud_data**, find the proportion of errors for the dataset, and furthermore, the percentage of errors of each type. Use the results to answer the questions in quiz 1 below. **Hint for quiz:** an error is any time the prediction doesn't match an actual value. Additionally, there are Type I and Type II errors to think about. We also know we can minimize one type of error by maximizing the other type of error. If we predict all individuals as innocent, how many of the guilty are incorrectly labeled? Similarly, if we predict all individuals as guilty, how many of the innocent are incorrectly labeled?
###Code
jud_data[jud_data['actual'] != jud_data['predicted']].shape[0]/jud_data.shape[0]
jud_data.query("actual == 'innocent' and predicted == 'guilty'").count()[0]/jud_data.shape[0]
jud_data.query("actual == 'guilty' and predicted == 'innocent'").count()[0]/jud_data.shape[0]
jud_data[jud_data['actual'] == 'innocent'].shape[0]/jud_data.shape[0]
###Output
_____no_output_____
###Markdown
`2.` Using the **par_data**, find the proportion of errors for the dataset, and furthermore, the percentage of errors of each type. Use the results to answer the questions in quiz 2 below.These should be very similar operations to those you performed in the previous question.
###Code
par_data[par_data['actual'] != par_data['predicted']].shape[0]/par_data.shape[0]
par_data.query("actual == 'opens' and predicted == 'fails'").count()[0]/par_data.shape[0]
par_data.query("actual == 'fails' and predicted == 'opens'").count()[0]/par_data.shape[0]
par_data[par_data['actual'] == 'opens'].shape[0]/par_data.shape[0]
###Output
_____no_output_____ |
project2-mem-master/6_Model_Optimization/hyperparameter_tuning.ipynb | ###Markdown
Hyperparameter optimizationWe run **gridsearchCV** and **randomsearchCV** to get the best parameters possible. We still run the model on the same features. Results: 1. GridSearchCV: [XGBoost](xgb) - AUC score: 0.884893 - Parameters: {'learning_rate': 0.3, 'loss': 'deviance', 'max_depth': 11, 'max_leaf_nodes': 1, 'n_estimators': 110, 'subsample': 1.0}2. GridSearchCV: [KNN](knn)- AUC score: 0.878967- Parameters: {'algorithm': 'auto', 'leaf_size': 20, 'metric': 'minkowski', 'n_neighbors': 4, 'p': 3, 'weights': 'distance'}3. GridSearchCV: [Random Forest](rf)- AUC score: 0.872383 - Parameters: {'bootstrap': False, 'class_weight': 'balanced', 'criterion': 'entropy', 'max_depth': 20, 'max_features': 0.4, 'max_leaf_nodes': 5, 'min_samples_leaf': 20, 'min_samples_split': 14, 'n_estimators': 100}4. RandomSearchCV: [SVC](svc) - AUC score: 0.850234 - Parameters: {'kernel': 'rbf', 'gamma': 1.0672387970376063, 'class_weight': 'balanced', 'C': 0.8914369396699439}5. GridSearchCV: [Logistic Regression](lr) - AUC score: 0.847899 - Parameters: {'C': 5, 'class_weight': , 'dual': False, 'max_iter': 90, 'solver': 'lbfgs', 'verbose': 0, 'warm_start': True}6. GridSearchCV: [MLP](mlp) - AUC score: 0.847720- Parameters: {'activation': 'identity', 'alpha': 0.0003, 'hidden_layer_sizes': (20, 40), 'learning_rate': 'constant', 'solver': 'lbfgs', 'verbose': True}
###Code
import pandas as pd
import numpy as np
# machine learning
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from xgboost import XGBClassifier
from sklearn.model_selection import train_test_split
from imblearn.over_sampling import SMOTE
from sklearn.metrics import make_scorer, roc_auc_score
# Import GridSearchCV
from sklearn.model_selection import GridSearchCV
from sklearn import metrics, model_selection
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.metrics import roc_auc_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_recall_fscore_support
data = pd.read_csv('all_model_data.csv', index_col = 0)
data.shape
###Output
_____no_output_____
###Markdown
Gridsearch CV 1. Logistic Regression Back to [results](t)
###Code
# select x and y
X = data[['ProdRelPageRatio_Scaled_Bin','totalFracAdmin_Scaled','Administrative_Duration_Scaled'
,'BounceRates_Norm_Scaled', 'ExitRates_Scaled','SpecialDay_1.0']]
y = data.Revenue
# we will use AUC to check validity of hyperparameters
scorer = make_scorer(roc_auc_score)
# Split the `digits` data into two equal sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=123, shuffle = True)
# balance the data
sm = SMOTE(random_state=123, sampling_strategy = 'minority')
x_train_res, y_train_res = sm.fit_sample(X_train, y_train)
# create options for gridsearch (it will iterate through all these options)
dual=[True,False]
C = [3,5,7]
max_iter=[90,100,110]
solver = ['lbfgs','newton-cg']
verbose = [0,1,2]
warm_start = [True, False]
class_weight = [dict,'balanced',None]
param_grid = dict(dual=dual,C=C,max_iter=max_iter,solver=solver,warm_start=warm_start,class_weight=class_weight,
verbose=verbose)
# Create a classifier with the parameter candidates
grid = GridSearchCV(estimator=LogisticRegression(random_state=123), param_grid=param_grid, n_jobs=-1,scoring=scorer,
cv = 3)
# fit grid to the model
grid_result = grid.fit(x_train_res, y_train_res)
# Summarize results
print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
###Output
Best: 0.847899 using {'C': 5, 'class_weight': <class 'dict'>, 'dual': False, 'max_iter': 90, 'solver': 'lbfgs', 'verbose': 0, 'warm_start': True}
###Markdown
2. Random Search: SVC
###Code
X = data[['Month_bin_2','Month_bin_4','Month_bin_1','totalFracProd_Bin',
'ProdRelPageRatio_Scaled_Bin','BounceExitAvg_Norm_Scaled','totalFracInfo_Scaled']]
y = data.Revenue
from sklearn.model_selection import RandomizedSearchCV
from sklearn.metrics import make_scorer, roc_auc_score
# Split the `digits` data into two equal sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=123, shuffle = True)
# balance the data
sm = SMOTE(random_state=123, sampling_strategy = 'minority')
x_train_res, y_train_res = sm.fit_sample(X_train, y_train)
# Initialize the random number generator
np.random.seed(123)
# Create range of values to choose randomly from
C1 = np.random.normal(1,0.1,1).astype(float)
kernel = np.random.choice(['rbf','sigmoid'],1)
gamma = np.random.uniform(0.1,1.5,1)
class_weight = np.random.choice([dict,'balanced'],1)
# join the parameter grid into a dictionary
param_grid1 = dict(C=C1,kernel=kernel,gamma=gamma,class_weight=class_weight)
# innitialize the model
rfr = SVC(random_state = 123)
# use auc to score
scorer = make_scorer(roc_auc_score)
# innitialize random search, put param grid in, use cv=3, use all processors
random = RandomizedSearchCV(estimator=rfr, param_distributions=param_grid1, cv = 3, n_jobs=-1,scoring=scorer)
#fit the model
random_result = random.fit(x_train_res, y_train_res)
# Summarize results
print("Best: %f using %s" % (random_result.best_score_, random_result.best_params_))
###Output
/opt/tljh/user/lib/python3.6/site-packages/sklearn/model_selection/_search.py:281: UserWarning: The total space of parameters 1 is smaller than n_iter=10. Running 1 iterations. For exhaustive searches, use GridSearchCV.
% (grid_size, self.n_iter, grid_size), UserWarning)
###Markdown
3.Grid Search: MLP
###Code
X = data[['PageValues_Scaled_Bin', 'ExitRates_Scaled']]
y = data.Revenue
# we will use AUC to check validity of hyperparameters
scorer = make_scorer(roc_auc_score)
# Split the `digits` data into two equal sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=123, shuffle = True)
# balance the data
sm = SMOTE(random_state=123, sampling_strategy = 'minority')
x_train_res, y_train_res = sm.fit_sample(X_train, y_train)
# Set the parameter candidates
hidden_layer_sizes=[(20,40),(20,40,80),(40,80)]
activation = ['identity','logistic','relu']
solver = ['lbfgs','solver']
alpha = [0.0003,0.0005,0.0007]
max_iter = [200,300,400]
learning_rate = ['constant', 'invscaling', 'adaptive']
#max_fun = [15000,17000]
verbose = [True,False]
# create param grid (join them in the dictionary)
param_grid = dict(hidden_layer_sizes=hidden_layer_sizes,activation=activation,solver=solver,alpha=alpha,
learning_rate=learning_rate,verbose=verbose,max_iter=max_iter)
# Create a classifier with the parameter candidates
grid1 = GridSearchCV(estimator=MLPClassifier(random_state=123), param_grid=param_grid, n_jobs=-1, scoring = scorer, cv = 3)
# Train the classifier on training data
grid_results1 = grid1.fit(x_train_res, y_train_res)
# Summarize results
print("Best: %f using %s" % (grid_results1.best_score_, grid_results1.best_params_))
###Output
Best: 0.847720 using {'activation': 'identity', 'alpha': 0.0003, 'hidden_layer_sizes': (20, 40), 'learning_rate': 'constant', 'solver': 'lbfgs', 'verbose': True}
###Markdown
4. Grid Search: XGBoost
###Code
X = data[['PageValues_Norm_Scaled','AdminBounceRatio_Norm_Scaled','ProdRelExitRatio_Norm_Scaled',
'Month_bin_4','Month_bin_2','VisitorType_bin_2','Informational_Duration_Scaled','totalFracProd_Bin']]
y = data.Revenue
# we will use AUC to check validity of hyperparameters
scorer = make_scorer(roc_auc_score)
# Split the `digits` data into two equal sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=123, shuffle = True)
# balance the data
sm = SMOTE(random_state=123, sampling_strategy = 'minority')
x_train_res, y_train_res = sm.fit_sample(X_train, y_train)
# Set the parameter candidates
loss = ['deviance', 'exponential']
learning_rate = [0.3,0.4,0.5]
n_estimators = [110,120,130]
subsample = [0.8,1.0,1.2]
max_depth = [9,11,13]
max_leaf_nodes = [1,2,None]
# create param grid (join them in the dictionary)
param_grid = dict(loss=loss,learning_rate=learning_rate,n_estimators=n_estimators,subsample=subsample,
max_depth=max_depth,max_leaf_nodes=max_leaf_nodes)
# Create a classifier with the parameter candidates
grid2 = GridSearchCV(estimator=XGBClassifier(random_state=123), param_grid=param_grid, n_jobs=-1, scoring = scorer, cv = 3)
# Train the classifier on training data
grid_results2 = grid2.fit(x_train_res, y_train_res)
# Summarize results
print("Best: %f using %s" % (grid_results2.best_score_, grid_results2.best_params_))
###Output
Best: 0.884893 using {'learning_rate': 0.3, 'loss': 'deviance', 'max_depth': 11, 'max_leaf_nodes': 1, 'n_estimators': 110, 'subsample': 1.0}
###Markdown
5. Grid Search: Random Forest
###Code
X = data[['ProductRelated_Duration_Scaled','BounceRates_Scaled','PageValues_Scaled','totalFracAdmin_Scaled',
'Month_bin_2','ExitRates_Scaled']]
y = data.Revenue
# we will use AUC to check validity of hyperparameters
scorer = make_scorer(roc_auc_score)
# Split the `digits` data into two equal sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=123, shuffle = True)
# balance the data
sm = SMOTE(random_state=123, sampling_strategy = 'minority')
x_train_res, y_train_res = sm.fit_sample(X_train, y_train)
# Set the parameter candidates
n_estimators= [100]
max_depth= [10, 20]
max_leaf_nodes= [2,5]
class_weight= [None,'balanced']
bootstrap = [True, False]
criterion=['entropy','giny']
max_features=['auto',0.4]
min_samples_leaf=[15,20]
min_samples_split=[12,14]
# create param grid (join them in the dictionary)
param_grid = dict(n_estimators=n_estimators, max_depth=max_depth, max_leaf_nodes=max_leaf_nodes, class_weight=class_weight,
bootstrap=bootstrap,criterion=criterion,max_features=max_features,min_samples_leaf=min_samples_leaf,
min_samples_split=min_samples_split)
# Create a classifier with the parameter candidates
clf = GridSearchCV(estimator=RandomForestClassifier(), param_grid=param_grid, n_jobs=-1, scoring = scorer, cv=3)
# Train the classifier on training data
grid_result= clf.fit(x_train_res, y_train_res)
# Print out the results
print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
###Output
Best: 0.872383 using {'bootstrap': False, 'class_weight': 'balanced', 'criterion': 'entropy', 'max_depth': 20, 'max_features': 0.4, 'max_leaf_nodes': 5, 'min_samples_leaf': 20, 'min_samples_split': 14, 'n_estimators': 100}
###Markdown
6. Grid Search: KNN
###Code
X = data[['PageValues_Norm_Scaled','ExitRates_Scaled','totalFracProd_Scaled']]
y = data.Revenue
# we will use AUC to check validity of hyperparameters
scorer = make_scorer(roc_auc_score)
# Split the `digits` data into two equal sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=123, shuffle = True)
# balance the data
sm = SMOTE(random_state=123, sampling_strategy = 'minority')
x_train_res, y_train_res = sm.fit_sample(X_train, y_train)
# Set the parameter candidates
n_neighbors= [4,5,6]
weights= ['uniform','distance']
algorithm= ['auto', 'ball_tree','kd_tree']
leaf_size=[20,30,40]
p=[3,4]
metric= ['minkowski']
# create param grid (join them in the dictionary)
param_grid = dict(n_neighbors=n_neighbors, weights=weights, algorithm=algorithm, leaf_size=leaf_size, p=p, metric=metric)
# Create a classifier with the parameter candidates
clf = GridSearchCV(estimator=KNeighborsClassifier(), param_grid=param_grid, n_jobs=-1, scoring = scorer, cv=10)
# Train the classifier on training data
grid_result= clf.fit(x_train_res, y_train_res)
# Print out the results
print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
###Output
Best: 0.878967 using {'algorithm': 'auto', 'leaf_size': 20, 'metric': 'minkowski', 'n_neighbors': 4, 'p': 3, 'weights': 'distance'}
|
new_beginning/information_analysis_yeast.ipynb | ###Markdown
Load data PPI
###Code
PPI_nx = nx.read_edgelist(f"{NETWORK_DIRECTORY}/PPI_BioGRID.txt")
GDV = graco.orbits(PPI_nx)
GCV = graco.coefficients(GDV)
###Output
_____no_output_____
###Markdown
Annotations
###Code
aspect = 'BP'
PPI = nx.read_edgelist(f"{NETWORK_DIRECTORY}/PPI_BioGRID.txt")
annotation_df = pd.read_csv(f"{ANNOTATION_DIRECTORY}/GO_{aspect}_BioGRID-SGD.csv")
go_dag = obo_parser.GODag(f"{RAW_DATA_DIRECTORY}/go-basic.obo")
gene_population = set(PPI.nodes())
GO_population = set(annotation_df.GO_ID)
# Conversion dictionaries
GO2genes = pd.Series({go_id: set(genes.Systematic_ID) for go_id, genes in annotation_df.groupby('GO_ID')},
name='nb_genes')
gene2GO = {gene : set(go_ids.GO_ID) for gene, go_ids in annotation_df.groupby('Systematic_ID')}
global_GO_counter = GO2genes.apply(len)
###Output
_____no_output_____
###Markdown
Action
###Code
abundand_GO = [go_id for go_id,gene_list in GO2genes.items() if len(gene_list) > 99]
print(len(abundand_GO))
Eq = ['D','A']
distance = 'cityblock'
D = graco.GCV_distance(GCV[Eq], distance)
all_genes = set(GCV[Eq].dropna().index)
len(all_genes), len(GCV)
GCV[Eq]
D.isna().any().any()
GO_id = abundand_GO[0]
GO_genes = GO2genes[GO_id] & set(GCV[Eq].dropna().index)
nGO_genes = (gene_population - GO_genes) & set(GCV[Eq].dropna().index)
assert len(all_genes) == len(GO_genes) + len(nGO_genes)
k = len(GO_genes)
mu_list = []
var_list = []
for i in range(1000):
sample_genes = random.sample(all_genes,k)
sample_dists = squareform(D.loc[sample_genes,sample_genes])
mu_list.append( np.mean(sample_dists))
var_list.append(np.var( sample_dists))
fig, ax = plt.subplots(figsize=(9,6))
ax.hist(mu_list, bins=50);
ax.set_xlim(0,1)
GO_dists = squareform(D.loc[GO_genes,GO_genes])
GO_mu = np.mean(GO_dists)
GO_var = np.var( GO_dists)
ecdf_mu = ECDF(mu_list )
ecdf_var = ECDF(var_list)
ecdf_mu( GO_mu ), ecdf_var(GO_var)
sample_genes = random.sample(all_genes,k)
sample_dists = squareform(D.loc[sample_genes,sample_genes])
prototype = GCV.loc[all_genes, Eq].mean()
for gene in all_genes:
break
graco.distance(GCV.loc[gene,Eq], prototype, distance)
def get_mu_and_var_dist(k, all_genes, D, sample_size=100):
mu_list = []
var_list = []
for _ in range(sample_size):
sample_genes = random.sample(all_genes,k)
sample_dists = squareform(D.loc[sample_genes,sample_genes])
mu_list.append( np.mean(sample_dists))
#var_list.append(np.var( sample_dists))
sample_size = 100
significant_GO_list = []
for GO_id in abundand_GO:
GO_genes = GO2genes[GO_id] & set(GCV[Eq].dropna().index)
nGO_genes = (gene_population - GO_genes) & set(GCV[Eq].dropna().index)
k = len(GO_genes)
assert len(all_genes) == len(GO_genes) + len(nGO_genes)
mu_list = []
#var_list = []
for _ in range(sample_size):
sample_genes = random.sample(all_genes,k)
sample_dists = squareform(D.loc[sample_genes,sample_genes])
mu_list.append( np.mean(sample_dists))
#var_list.append(np.var( sample_dists))
GO_dists = squareform(D.loc[GO_genes,GO_genes])
GO_mu = np.mean(GO_dists)
#GO_var = np.var( GO_dists)
#ecdf_mu = ECDF(mu_list )
#ecdf_var = ECDF(var_list)
#p_mu = ecdf_mu( GO_mu )
#p_var = ecdf_var(GO_var)
if GO_mu < sorted(mu_list)[1]:
print(k, go_dag[GO_id].name)
significant_GO_list.append(GO_id)
#print(f"p-value mu : {p_mu :.9f}")
#print(f"p-value var: {p_var:.9f}")
len(significant_GO_list), len(abundand_GO)
GO_id = significant_GO_list[0]
GO_genes = GO2genes[GO_id] & set(GCV[Eq].dropna().index)
nGO_genes = (gene_population - GO_genes) & set(GCV[Eq].dropna().index)
k = len(GO_genes)
assert len(all_genes) == len(GO_genes) + len(nGO_genes)
mu_list = []
#var_list = []
for _ in range(2**10):
sample_genes = random.sample(all_genes,k)
sample_dists = squareform(D.loc[sample_genes,sample_genes])
mu_list.append( np.mean(sample_dists))
#var_list.append(np.var( sample_dists))
GO_dists = squareform(D.loc[GO_genes,GO_genes])
GO_mu = np.mean(GO_dists)
fig, ax = plt.subplots(figsize=(9,6))
bars, pos, sth = ax.hist(mu_list, bins=50, density=True)
ax.plot([GO_mu,GO_mu], [0,max(bars)]);
fig, ax = plt.subplots(figsize=(9,6))
bars, pos, sth = ax.hist(var_list, bins=50, density=True)
ax.plot([GO_var,GO_var], [0,max(bars)]);
data = GCV[Eq].dropna()
data['hue'] = 0
GO_data = data.loc[GO_genes]
GO_data['hue'] = 1
sample_data = data.loc[sample_genes]
sample_data['hue'] = 2
data = data.append([GO_data,sample_data])
data
fig, ax = plt.subplots(figsize=(12,9))
fig.patch.set_alpha(0)
ax.patch.set_alpha(0)
ax.axis('off')
sns.stripplot(x='hue', y='2',data=data, ax=ax);
GO_genes = GO2genes['GO:0006281']
GCV[Eq].loc[GO_genes]
len(set(GCV.columns.droplevel([depth-1])))
ax = sns.jointplot(data=data, x='1', y='2')
genes = data[data['1']>0.5]
genes
annotation_df[(annotation_df.Systematic_ID.isin(genes.index)) & (annotation_df.Level == 1)].GO_ID.unique()
len(annotation_df[annotation_df.Level == 1].GO_ID.unique())
for go_id in annotation_df[annotation_df.Level == 1].GO_ID.unique():
print(go_dag[go_id].name)
n = 123
m = 45
nx.barabasi_albert_graph(n,m).number_of_edges()
m*(n-m)
n = 5000
rho = 0.005
n/2 - np.sqrt(n**2/4 - n*(n-1)*rho/2)
nx.density(nx.barabasi_albert_graph(n,25))
###Output
_____no_output_____ |
luentokirjat/Lecture 2.ipynb | ###Markdown
Kertaus on opintojen äiti ja oppimista tapahtuu koko ajan, joten palataan viime kerran luennolle. Puhuimme siitä, että mikä onkaan data tieteen osaajan profiili. Valitsetko oman osaamisen lokerosi vai onko sinulla vähän kaikkea. Tästä aihepiiristä Slack:n tietovirtaan tuli Harvard Business Review [artikkeli](https://hbr.org/2019/03/why-data-science-teams-need-generalists-not-specialists?utm_campaign=hbr&utm_source=twitter&utm_medium=social) "There are other downsides to functional specialization. It can lead to loss of accountability and passion from the workers. Smith himself criticizes the division of labor, suggesting that it leads to the dulling of talent—that workers become ignorant and insular as their roles are confined to a few repetitive task. While specialization may provide process efficiencies it is less likely to inspire workers." --Eric Colson is Chief Algorithms Officer at Stitch Fix. Prior to that he was Vice President of Data Science and Engineering at Netflix. Datatieteen prosessi Philip Guo avaa [ACM:n blogissa](https://cacm.acm.org/blogs/blog-cacm/169199-data-science-workflow-overview-and-challenges/fulltext) data tieteen työprosessia Kuva esittelee työn neljä päävaihetta: tiedon esikäsittely, vuoropuhelu analyysin ja tulosten reflektion välillä ja lopussa tulosten viestiminen vastaanottajalle soveltuvassa muodossa.  Viimeisenä huomiona data tieteen työprosessista on ETL ja DAD. Vincent Granville kuvaa [blogissa](https://www.datasciencecentral.com/profiles/blogs/data-scientist-versus-data-engineer) miten ETL (Extract/Load/Transform) on data insinöörien prosessi ja DAD (Discover/Access/Distill) data tieteen työprosessi. Mikä näissä on erona? Data insinöörin keskittyessä ohjelmistotekniikkaan, tietokanta suunnitteluun ja siihen että data kulkee sujuvasti prosessin lävitse tuottaen keskeiset koosteet tietovirrasta, data tieteilijä keskittyy ymmärtämään data tuottamaa arvoa. Tämä ei täysin poista prosessiymmärryksen tarvetta, mutta nostaa arvon luonnin datalla keskeisimmäksi elementiksi. Liiketoimintarelevanssi Miten voidaan perustella data analytiikan liiketoimintarelevanssi? On [osoitettu](https://link.springer.com/article/10.1007/s10796-016-9720-4) että data analytiikan kyvykkyydet (tunnistaminen, hankkiminen, integraatio, toimittaminen sekä analytiikan työkalut) sekä valmiudet (strategia, johdon tuki, resurssointi) parantavat yrityksen suorituskykyä. Kyse on erityisesti siitä miten analytiikka voi lisätä ymmärrätystä liiketoimintaprosessista eikä niinkään tavoitteesta korvata ihmistä. Tätä ymmärrystä voi auttaa [analytiikan nelikenttä](https://journals.sagepub.com/doi/pdf/10.1177/0256090920130401), joka jaetaan seuraaviin teemoihin- Kuvaileva analytiikka - Dignosoiva analytiikka- Ennakoiva analytiikka- Ohjaileva analytiikka  Kuvaileva analytiikka pyrkii selittämään mitä on tapahtunut. Kyse on eri muuttujien tunnistamisesta ja mallintamisesta ajatuksena tuottaa kuvaa siitä mitä prosessissa tapahtuu.Diagnosoiva analytiikka pyrkii selittämään miksi jotain tapahtui. Työ on eksploratiivista nykyisen ja osittain vielä tuntemattoman data kuvailu ja visualisointia jotta voidaan paremmin ymmärtää miksi jokin tapahtuma on tapahtunut.Ennakoiva analytiikka pyrkii, kuten nimi hyvin toteaa, ennakoivaa tulevaa liiketoimintatilannetta, kuten myyntiä. Kyse on yleisimmin aikasarja-aineiston mallintamisetsa tulevaisuuteen. Hyvä kysymyksen asettelu on, että mitä tulee tapahtumaan?Ohjaileva analytiikka pyrkii vastaamaan kysymykseen mitä asialle pitäisi tehdä. Jos taustalla pidetään liiketoiminnan tavoitteet ohjailevalla analytiikalla pyritään tuottamaan ymmärrystä siitä miten nykyistä liiketoiminta prosessia voidaan optiomoida kohti tavoitteita. Esimerkki ennakoivasta analytiikasta Vuoden 2018 AnacondaCon konferenssi yksi mielenkiintoisista esityksistä keskittyi esittelemään miten rakennetaan laaja ennakoivaan huoltoon keskittyvä järjestelmä. Selkeästi kyseessä on ennakoivan analytiikan muoto, eli vastaa kysymykseen mitä todennäköisesti tapahtuu. Esimerkki sisältää myös ohjailevan analytiikan tason, joten on huomattava että yllä esitetty nelikenttä ei ole "jäykkä" luokittelu vaan ajattelun työkalu.
###Code
HTML('<iframe width="560" height="315" src="https://www.youtube.com/embed/fA0yfemf5n0" frameborder="0" allow="accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>')
###Output
_____no_output_____
###Markdown
Toinen hyvä esimerkki ennakoivasta analytiikasta löytyy sosiaalisen median datan hyödyntämisestä menekin ennakoimiseen. ["Veganismia koskevien viestimäärien muutokset keskustelufoorumeilla ennakoivat muutoksia kasvismaitotuotteiden myyntivolyymissa kahden viikon viiveellä, ilmenee Helsingin yliopiston Kuluttajatutkimuskeskuksen tutkimuksesta. Neljän tutkijan kirjoittama artikkeli aiheesta on julkaistu tuoreessa Kansantaloudellisessa aikakauskirjassa."](https://www.kauppalehti.fi/uutiset/some-keskustelut-ennakoivat-menekin-kasvua/bcdf0ca7-6971-48b0-9e03-6d9b46e76006) Mikä on analytiikan nykytila yrityksissä Vaikka [raportti](https://www.bain.com/contentassets/5672af3b82f84aa2a80ca732fa8ea06c/bain20_brief_the_value_of_big_data.pdf) on jo hieman vanha, antaa se hyvä kuvan yritysten analytiikkakyvyistä. Raportin kuva 1 vetää laajan kyselyn tuloksia yhteen seuraavasti:- Vain 36 % yrityksitä on erikoistunut data-analytiikkatiimi- Vain 19 % yrityksistä omaa selkeän ja luotettavan dataprosessin.- Vain 23 % yrityksistä on selkeä analytiikka strategia.- Vain 38 % yrityksistä käyttää nykyaikaisia analytiikka työkaluja Mitä tämä tarkoittaa käytännössä? Hyvän nykytila kuvauksen eri teemoista tarjoaa myös [Sivarajah et al.](https://www.sciencedirect.com/science/article/pii/S014829631630488Xf0005) He eivät käytä samaa nelikenttä jakoa kuin yllä mutta kuvailevat muuten erittäin mielenkiintoisella tavalla eri arkkityyppien menetelmiä (nykyisiä ja tulevaisuuden mahdollisuuksia). Erittäin suositeltava lukea tai ainakin selailla läpi! Liiketoimintarelevanssista menetelmiin Ryömijät ja raapijat (Web crawler & Web scraping) Ryömijä (engl. Web crawler, spider, spiderbot tai crawler) on robotti joka systemaattisesti käy läpi verkko-osoitteita. Tarkoituksena on yleisesti sivujen indeksointi.Lähtökohtaisesti, ryömijä saa käyttäjältä lähtöpisteeksi verkkosivujen osoitteita. Ryömijä käy nämä verkkosivut läpi. Ryömijä kasvattaa omaa hakuavaruuttaan lisäämällä aloituspisteestä löytyvien verkkosivujen sisältämät hyperlinkit omaan hakujonoon, siten kasvattaen omaa hakuavaruuttaan.Listaus eri ryömijöistä on saatavilla [täältä](https://bigdata-madesimple.com/top-50-open-source-web-crawlers-for-data-mining/)  Ryömijän toimintaa kontrolloidaan rajoittamalla mitä sivuja ryömijä käsittelee (valinta), milloin sen tulee palata sivuille joissa se on jo käynyt (uudelleenkäynti), miten se estää ylikuormituksen käytävillä sivuilla (kohteliaisuus) ja miten ryömijöiden toimintaa koordinoidaan (rinnakkaistaminen). Raapija (engl. Web scraping, web harvesting, tai web data extraction) on työkalu jossa keräämme automatisoidusti tieto verkkosivuilta. Käytännön toteutukset ovat yleensä ryömijän ja raapijan yhdistelimiä joissa ryömijä luo hakuavuuren lisäämällä hyperlinkkejä hakujonoon ja raapija kerää annettujen parametrien avulla tietoa verkkosivuilta.Yleisimmin raapija pyrkii tunnistamaan metaelementtejä verkkosivujen rakenteessa ja hyödyntämään näitä käyttäjää kiinnostavan tiedon tallentamiseen. Muina tekniikoina on esimerkiksi semanttiseen rakenteeseen perustuva haku tai sivun ulkoasuun perustuva tunnistaminen.Hyvä listaus erilaisia raapijoita eri käyttötapauksin voi tutustua [täältä](https://www.scraperapi.com/blog/the-10-best-web-scraping-tools) Kurssilla käsittelemme tarkemmin [Scrapy](http://www.scrapy.org). Scrapy on open source ryömijä ja raapija jolla on kohtuullisen helppo toteuttaa erilaisia datan keruu prosesseja. Nämä voidaan integroida osaksi Python-työprosessia. Alla oleva video on ensimmäinen osa hyvää ohjevideoiden sarjaa Scrapyn käyttämisestä
###Code
HTML('<iframe width="560" height="315" src="https://www.youtube.com/embed/ve_0h4Y8nuI" frameborder="0" allow="accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>')
###Output
_____no_output_____
###Markdown
Kokeillaan tehdä lyhyt esimerkki joka kuvastaa mitä prosessissa saadaan aikaiseksi Tehdään esimerkki Shell tilassa Scrapy shell Haetaan verkkosivuna vaikka Verkkokauppa.com puhelinlistauksen ensimmäinen sivu fetch('https://www.verkkokauppa.com/fi/catalog/22a/Puhelimet/products?list=1&page=1')Tämän jälkeen voimme tarkastella mitä saimme noudettua. view(response) Jos haluaisimme ladata kaikki tarjolla olevat puhelimet, niiden nimet sekä tiedot, miten etenisimme. Ainakin olisi saatava selville miten pääsemme käsiksi jokaisen puhelimen tietoihin. KÄyttämällä selaimen inspect-toimintoa löysimme metakentän jonka avulla voimme kohdistaa hakumme oikeaan tietoon response.xpath("//a[contains(@class, 'thumbnail-link thumbnail-link--grid-full-width')]//@href").extract()Mitä saimme aikaan? Mitä voidaan saada aikaiseksi raapijoita ja ryömijöitä hyödyntämällä? [Keräsimme vuosien Google Play](https://scielo.conicyt.cl/pdf/jtaer/v10n2/art02.pdf) alustalta noin 260 miljoonaa asiakaspalautetta 18 kuukauden aikana. Osoitimme että positiiviset palautteet verkkoalustalla korreloivat tilastollisetsi merkitsevällä tavalla myynnin kanssa. Arvoiden merkitys näyttää olevan merkitsevämpi kun tarkastelun ajanjakso on pidempi. Arvioilla on myös enemmän merkitystä tilanteessa jossa tuotteen loppuhinta on suurempi. Mitä voimme tehdä tällä tiedolla? Sitten vielä muutama hyvä huomio ryömijöistä ja raapijoista Onko raapijoiden ja ryömijöiden käyttö sallittua. Tästä kannattaa lukea ainakin [oheinen](https://benbernardblog.com/web-scraping-and-crawling-are-perfectly-legal-right/) teksti. Datan muodot Datatyypit Data tyypillä tarkoitetaan standardimuotoa jolla ohjelma käsittelee muuttujien sisältämää tietoa. Useimmat järjestelmät tunnistavat ainakin tyypit reaali, numerinen ja boolean. Nämä, tai muut järjestelmän sallimat datatyypit, rajoittavat sitä mitä operaatioita datalle voidaan tehdä.  Esimerkkinä voisi toimia seuraava todella yksinkertainen kokeilu
###Code
userInput=input('Anna numero välillä 0-9: ')
try:
sumValue=1+userInput
except TypeError:
print('Et ilmeisesti antanut numeroa?')
###Output
Anna numero välillä 0-9: 5,0
Et ilmeisesti antanut numeroa?
###Markdown
["If the prompt argument is present, it is written to standard output without a trailing newline. The function then reads a line from input, converts it to a string (stripping a trailing newline), and returns that. When EOF is read, EOFError is raised."](https://docs.python.org/3/library/functions.htmlinput)
###Code
userInput=input('Anna kokonaisluku välillä 0-9: ')
while userInput.isdigit()==False:
userInput=input('Ei mennyt ihan nappiin. Anna oikeasti kokonaisluku välillä 0-9: ')
try:
sumValue=1+int(userInput)
print('Lisäsin antamaasi lukuun yhden ja siitä tuli '+str(userInput)+".")
except TypeError:
print('Et ilmeisesti antanut numeroa?')
###Output
Anna kokonaisluku välillä 0-9: 1
Lisäsin antamaasi lukuun yhden ja siitä tuli 1.
###Markdown
Datan tallennusmuodot Data voidaan käytännössä jakaa kahteen muotoon tekstimuotoiseen ja binäärimuotoiseen tiedostotyyppiin. Tekstimuotoinen, kuten ASCII, tiedosto on ihmisen luettavissa. Binääritiedosto, kuten JPG-kuva, on vain tietokoneen luettavissa. Datan hallitseminen Data wrangling Tietojen välittäminen (engl. Data wrangling tai Data munging) on prosessi jossa tieto muunnetaan ja mallinnetaan, usein raakamuodosta, muotoon jossa sitä voidaan hyödyntää. Lopputuote riippuu usein käyttötarkoitukseta, eli siitä analytiikka prosessista johon prosessi liittyy. Kuuluisa tietojen välittämiseen keskittynyt projekti on [Stanford/Berkley Wrangler](http://vis.stanford.edu/wrangler/) jonka lopputuotteeseen voi tutustu alla näkyvästä videosta.
###Code
HTML('<iframe src="https://player.vimeo.com/video/19185801" width="640" height="480" frameborder="0" allowfullscreen></iframe>')
###Output
_____no_output_____
###Markdown
Pandas Pandas on kuitenkin erinomainen työkalu päästä vauhtiin datan hallitsemisessa. Ohessa kokeillaan päästä vauhtiin analysoimalla Kaggle dataa. Kaggle:stä löytyy useita eri tunnisteita joiden avulla voi löytää mielenkiintoista harjoitteludataa eri tarkoituksiin. Esimerkiksi Pandas kokeilut voi aloittaa tunnisteen "Data Cleaning" alta. Tähän tarkoitukseen olen ladannut koneelle [311 Service Request Pitt](https://www.kaggle.com/yoghurtpatil/311-service-requests-pitt) aineiston.
###Code
import pandas as pd
df = pd.read_csv("ServreqPitt.csv")
df.head()
#Tarkisteaan vaikka aluksi että kaikki ID ovat ainutkertaisia
df['REQUEST_ID'].is_unique
#Voidaan varmasti olettaa että REQUEST_ID on hyödyllinen ja prosessin kannalta tunnettu indeksi syventyä tietoon
#Voisimme vaihtaa sen indeksiksi, varsinkin nyt kun tiedämme sen olevan uniikki.
df.set_index('REQUEST_ID', inplace=True)
df.head()
#Mitkä datatyypit vaikuttavat virheellisiltä?
df.dtypes
###Output
_____no_output_____
###Markdown

###Code
#Katsotaan tarkemmin kahta saraketta
df[['FIRE_ZONE', 'POLICE_ZONE']].head()
#Mitä huomattiin edellisestä?
df[['FIRE_ZONE', 'POLICE_ZONE']]=df[['FIRE_ZONE', 'POLICE_ZONE']].astype('category')
#Mitä saatin aikaan?
df.dtypes
#Olisiko voinut tehdä siistimmin?
dfZone=df[['FIRE_ZONE', 'POLICE_ZONE']]
dfZone.head()
#Ovatko alueet poliiseilla ja palokunnalla samanlaisia?
dfZone.groupby(['FIRE_ZONE', 'POLICE_ZONE']).size()
#Muutentaan aikatieto oikean muotoon
df['TimeStamp'] = pd.to_datetime(df['CREATED_ON'], format='%Y%m%dT%H:%M:%S', errors='raise')
df['TimeStamp'].head()
#Tarkistetaan tietorakenteen koko
df.shape
#Poistetaan rivit joissa on NaN arvoja
df = df.dropna(axis=0)
#Mitä on käynyt tietorakenteelle
df.shape
df.groupby('REQUEST_ORIGIN').count()
###Output
_____no_output_____ |
PsyTrack_Manuscript_Figures.ipynb | ###Markdown
_Extracting the Dynamics of Behavior in Decision-Making Experiments_ Figure Generatorby Nicholas A. Roy $\quad$ _(v1.1, last updated November 23, 2020)_--- This notebook will precisely recreate all figures (Figures 1-8 and Supplementary Figures S1-8) from our manuscript _Extracting the Dynamics of Behavior in Decision-Making Experiments_. All figures will require the `PsyTrack` python package, as well as several other standard Python libraries. Figures requiring data will require that the corresponding dataset be downloaded and pre-processed. The necessary requirements for each figure are listed below, followed by instructions for downloading & preparing each of the three datasets: - Only the `PsyTrack` package is needed to produce the simulated data required for Figures 1, 2, S1, and S2 - The IBL mouse dataset is required (as well as the `ONE Light` Python library) for Figures 3, 4, and S3-6 - The Akrami rat dataset is required for Figures 5, 6, 8, and S7 - The Akrami human subject dataset is required for Figures 7 and S8 A section with preliminary setup code is below, followed by code and instructions to load each dataset. There is then a section for each figure, with subsections for each subfigure. A few things to note: - **ALTERATIONS** | Many subfigures in the paper include some superficial additions done in Adobe Illustrator. Subfigures created purely inside Adobe illustrator (e.g. schematic figures) are noted. - **COMPUTE TIME** | While most individual `PsyTrack` models can be fit quickly, some figure require fitting dozens of models and so can take a relatively long time to compute. Subfigures which take longer than 90 seconds to produce are marked with an approximation of how long they ought to take. - **LOCAL STORAGE** | Many figures save the results of model fits to local storage, so figures can be retrieved and modified without having to refit the model each time. All the temporary files produced by the notebook are saved to the directory specified by the `SPATH` variable in the Preliminary setup section below. All temporary files plus all the subfigures saved should use under 500MB total. Note that if you are using a Colab hosted runtime, then anything saved to Colab local storage will disappear once the runtime expires (Colab has a 12 hour max). There is code to download all figures from Colab at the end of the notebook. - **SUBFIGURE DEPENDENCIES** | Occasionally, subfigures will depend upon the results of an earlier subfigure (usually part of the same figure) — a cell which fails to run may simply need an earlier cell to be run first (these instances should be clearly marked). - **SUBJECT-SPECIFIC DETAILS** | Many analyses run on an example subject should allow for other subjects to be easily swapped in, but some analyses may have subject-specific code that may impede this (i.e. hardcoded dates to extract certain sessions for analysis). - **VERSIONING** | Any additions, fixes, or changes made to this notebook will be noted in the versioning section at the very end of the notebook. --- Preliminary setup and data retrieval Users will need to install the `PsyTrack` package (version 2.0), by running the cell below. We also define a variable `SPATH` which is the directory where all data files and figures produced by the notebook will be saved.Several standard Python packages are used: `numpy`, `scipy`, `matplotlib`, and `pandas`. We import all these libraries before proceeding, as well as setting several parameters in `matplotlib` to standardize the figures produced.
###Code
import os
import re
from IPython.display import clear_output
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
# Install then import PsyTrack
!pip install psytrack==2.0
import psytrack as psy
# Set save path for all figures, decide whether to save permanently
SPATH = "Figures/"
!mkdir -p "{SPATH}"
# Set matplotlib defaults for making files consistent in Illustrator
colors = psy.COLORS
zorder = psy.ZORDER
plt.rcParams['figure.dpi'] = 140
plt.rcParams['savefig.dpi'] = 300
plt.rcParams['savefig.facecolor'] = (1,1,1,0)
plt.rcParams['savefig.bbox'] = "tight"
plt.rcParams['font.size'] = 10
# plt.rcParams['font.family'] = 'sans-serif' # not available in Colab
# plt.rcParams['font.sans-serif'] = 'Helvetica' # not available in Colab
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['xtick.labelsize'] = 10
plt.rcParams['ytick.labelsize'] = 10
plt.rcParams['axes.labelsize'] = 12
clear_output()
###Output
_____no_output_____
###Markdown
--- Download and pre-process IBL mouse data 1) Use the command below to instal the IBL's [ONE Light](https://github.com/int-brain-lab/ibllib/tree/master/oneibl) Python library, download the [IBL mouse behavior dataset](https://doi.org/10.6084/m9.figshare.11636748.v7) _(version 7, uploaded February 7, 2020)_ to our `SPATH` directory as `ibl-behavior-data-Dec2019.zip`, and unzip the file.
###Code
#!pip install ibllib
!wget -nc -O "{SPATH}ibl-behavior-data-Dec2019.zip" "https://ndownloader.figshare.com/files/21623715"
!unzip -d "{SPATH}" -n "{SPATH}ibl-behavior-data-Dec2019.zip"
#clear_output()
###Output
zsh:1: command not found: wget
unzip: cannot find or open Figures/ibl-behavior-data-Dec2019.zip, Figures/ibl-behavior-data-Dec2019.zip.zip or Figures/ibl-behavior-data-Dec2019.zip.ZIP.
###Markdown
2) Use the [ONE Light](https://github.com/int-brain-lab/ibllib/tree/master/oneibl) library to build a table of all the subject and session data contained within the dataset.
###Code
from oneibl.onelight import ONE
ibl_data_path = SPATH + 'ibl-behavioral-data-Dec2019'
current_cwd = os.getcwd()
os.chdir(ibl_data_path)
# Search all sessions that have these dataset types.
required_vars = ['_ibl_trials.choice', '_ibl_trials.contrastLeft',
'_ibl_trials.contrastRight','_ibl_trials.feedbackType']
one = ONE()
eids = one.search(required_vars)
mouseData = pd.DataFrame()
for eid in eids:
lab, _, subject, date, session = eid.split("/")
sess_vars = {
"eid": eid,
"lab": lab,
"subject": subject,
"date": date,
"session": session,
}
mouseData = mouseData.append(sess_vars, sort=True, ignore_index=True)
os.chdir(current_cwd)
mouseData
###Output
_____no_output_____
###Markdown
3) Next, we use the table of session data to process the raw trial data below into a single CSV file, `ibl_processed.csv`, saved to our `SPATH` directory.There are several known anomalies in the raw data: - CSHL_002 codes left contrasts as negative right contrasts on 81 trials (these trials are corrected) - ZM_1084 has `feedbackType` of 0 for 3 trials (these trials are omitted) - DY_009, DY_010, DY_011 each have <5000 trials total (no adjustment) - ZM_1367, ZM_1369, ZM_1371, ZM_1372, and ZM_1743 are shown non-standard contrast values of 0.04 and 0.08 (no adjustment)
###Code
all_vars = ["contrastLeft", "contrastRight", "choice", "feedbackType", "probabilityLeft"]
df = pd.DataFrame()
all_mice = []
for j, s in enumerate(mouseData["subject"].unique()):
print("\rProcessing " + str(j+1) + " of " + str(len(mouseData["subject"].unique())), end="")
mouse = mouseData[mouseData["subject"]==s].sort_values(['date', 'session']).reset_index()
for i, row in mouse.iterrows():
myVars = {}
for v in all_vars:
filename = "_ibl_trials." + v + ".npy"
var_file = os.path.join(ibl_data_path, row.eid, "alf", filename)
myVars[v] = list(np.load(var_file).flatten())
num_trials = len(myVars[v])
myVars['lab'] = [row.lab]*num_trials
myVars['subject'] = [row.subject]*num_trials
myVars['date'] = [row.date]*num_trials
myVars['session'] = [row.session]*num_trials
all_mice += [pd.DataFrame(myVars, columns=myVars.keys())]
df = pd.concat(all_mice, ignore_index=True)
df = df[df['choice'] != 0] # dump mistrials
df = df[df['feedbackType'] != 0] # 3 anomalous trials from ZM_1084, omit
df.loc[np.isnan(df['contrastLeft']), "contrastLeft"] = 0
df.loc[np.isnan(df['contrastRight']), "contrastRight"] = 0
df.loc[df["contrastRight"] < 0, "contrastLeft"] = np.abs(df.loc[df["contrastRight"] < 0, "contrastRight"])
df.loc[df["contrastRight"] < 0, "contrastRight"] = 0 # 81 anomalous trials in CSHL_002, correct
df["answer"] = df["feedbackType"] * df["choice"] # new column to indicate correct answer
df.loc[df["answer"]==1, "answer"] = 0
df.loc[df["answer"]==-1, "answer"] = 1
df.loc[df["feedbackType"]==-1, "feedbackType"] = 0
df.loc[df["choice"]==1, "choice"] = 0
df.loc[df["choice"]==-1, "choice"] = 1
df.to_csv(SPATH+"ibl_processed.csv", index=False)
###Output
_____no_output_____
###Markdown
4) Next we run a few sanity checks on our data, to make sure everything processed correctly.
###Code
print("contrastLeft: ", np.unique(df['contrastLeft'])) # [0, 0.0625, 0.125, 0.25, 0.5, 1.0] and [0.04, 0.08]
print("contrastRight: ", np.unique(df['contrastRight'])) # [0, 0.0625, 0.125, 0.25, 0.5, 1.0] and [0.04, 0.08]
print("choice: ", np.unique(df['choice'])) # [0, 1]
print("feedbackType: ", np.unique(df['feedbackType'])) # [0, 1]
print("answer: ", np.unique(df['answer'])) # [0, 1]
###Output
_____no_output_____
###Markdown
5) Finally, we define a function `getMouse` that extracts the data for a single mouse from our CSV file, and returns it as a PsyTrack compatible `dict`. We will use this function to access IBL mouse data in the figures below. Note the keyword argument and default value $p=5$ which controls the strength of the $\tanh$ transformation on the contrast values. See Figure S4 and the STAR Methods of the accompanying paper for more details.**Note:** Once steps 1-5 have been run once, only step 5 will need to be run on subsequent uses.
###Code
ibl_mouse_data_path = SPATH + "ibl_processed.csv"
MOUSE_DF = pd.read_csv(ibl_mouse_data_path)
def getMouse(subject, p=5):
df = MOUSE_DF[MOUSE_DF['subject']==subject] # Restrict data to the subject specified
cL = np.tanh(p*df['contrastLeft'])/np.tanh(p) # tanh transformation of left contrasts
cR = np.tanh(p*df['contrastRight'])/np.tanh(p) # tanh transformation of right contrasts
inputs = dict(cL = np.array(cL)[:, None], cR = np.array(cR)[:, None])
dat = dict(
subject=subject,
lab=np.unique(df["lab"])[0],
contrastLeft=np.array(df['contrastLeft']),
contrastRight=np.array(df['contrastRight']),
date=np.array(df['date']),
dayLength=np.array(df.groupby(['date','session']).size()),
correct=np.array(df['feedbackType']),
answer=np.array(df['answer']),
probL=np.array(df['probabilityLeft']),
inputs = inputs,
y = np.array(df['choice'])
)
return dat
###Output
_____no_output_____
###Markdown
--- Download and pre-process Akrami rat data 1) Download the [Akrami rat behavior dataset](https://doi.org/10.6084/m9.figshare.12213671.v1) _(version 1, uploaded May 18, 2020)_ to the `SPATH` directory as `rat_behavior.csv`.
###Code
!wget -nc -O "{SPATH}rat_behavior.csv" "https://ndownloader.figshare.com/files/22461707"
clear_output()
###Output
_____no_output_____
###Markdown
2) Sessions in the data corresponding to early shaping stages will be omitted, as will all mistrials (see the dataset's README for more info). The `getRat` function will then load a particular rat into a PsyTrack compatible `dict`.`getRat` has two optional parameters: `first` which will return a data set with only the first `first` trials (the default of 20,000 works for all analyses); `cutoff` excludes sessions with fewer than `cutoff` valid trials (default set to 50). We will use this function to access Akrami rat data in the figures below.
###Code
akrami_rat_data_path = SPATH + "rat_behavior.csv"
RAT_DF = pd.read_csv(akrami_rat_data_path)
RAT_DF = RAT_DF[RAT_DF["training_stage"] > 2] # Remove trials from early training
RAT_DF = RAT_DF[~np.isnan(RAT_DF["choice"])] # Remove mistrials
def getRat(subject, first=20000, cutoff=50):
df = RAT_DF[RAT_DF['subject_id']==subject] # restrict dataset to single subject
df = df[:first] # restrict to "first" trials of data
# remove sessions with fewer than "cutoff" valid trials
df = df.groupby('session').filter(lambda x: len(x) >= cutoff)
# Normalize the stimuli to standard normal
s_a = (df["s_a"] - np.mean(df["s_a"]))/np.std(df["s_a"])
s_b = (df["s_b"] - np.mean(df["s_b"]))/np.std(df["s_b"])
# Determine which trials do not have a valid previous trial (mistrial or session boundary)
t = np.array(df["trial"])
prior = ((t[1:] - t[:-1]) == 1).astype(int)
prior = np.hstack(([0], prior))
# Calculate previous average tone value
s_avg = (df["s_a"][:-1] + df["s_b"][:-1])/2
s_avg = (s_avg - np.mean(s_avg))/np.std(s_avg)
s_avg = np.hstack(([0], s_avg))
s_avg = s_avg * prior # for trials without a valid previous trial, set to 0
# Calculate previous correct answer
h = (df["correct_side"][:-1] * 2 - 1).astype(int) # map from (0,1) to (-1,1)
h = np.hstack(([0], h))
h = h * prior # for trials without a valid previous trial, set to 0
# Calculate previous choice
c = (df["choice"][:-1] * 2 - 1).astype(int) # map from (0,1) to (-1,1)
c = np.hstack(([0], c))
c = c * prior # for trials without a valid previous trial, set to 0
inputs = dict(s_a = np.array(s_a)[:, None],
s_b = np.array(s_b)[:, None],
s_avg = np.array(s_avg)[:, None],
h = np.array(h)[:, None],
c = np.array(c)[:, None])
dat = dict(
subject = subject,
inputs = inputs,
s_a = np.array(df['s_a']),
s_b = np.array(df['s_b']),
correct = np.array(df['hit']),
answer = np.array(df['correct_side']),
y = np.array(df['choice']),
dayLength=np.array(df.groupby(['session']).size()),
)
return dat
###Output
_____no_output_____
###Markdown
--- Download and pre-process Akrami human subject data 1) Download the [Akrami human subject behavior dataset](https://doi.org/10.6084/m9.figshare.12213671.v1) _(version 1, uploaded May 18, 2020)_. See the dataset's README for more info.
###Code
!wget -nc -O "{SPATH}human_auditory.csv" "https://ndownloader.figshare.com/files/22461695"
clear_output()
###Output
_____no_output_____
###Markdown
2) We define a function `getHuman` that extracts the data for a single human subject from the downloaded CSV file, and returns it in a PsyTrack compatible `dict`. We will use this function to access Akrami human subject data in the figures below.
###Code
akrami_human_data_path = SPATH + "human_auditory.csv"
HUMAN_DF = pd.read_csv(akrami_human_data_path)
def getHuman(subject):
df = HUMAN_DF[HUMAN_DF['subject_id']==subject]
s_a = (df["s_a"] - np.mean(df["s_a"]))/np.std(df["s_a"])
s_b = (df["s_b"] - np.mean(df["s_b"]))/np.std(df["s_b"])
s_avg = (df["s_a"][:-1] + df["s_b"][:-1])/2
s_avg = (s_avg - np.mean(s_avg))/np.std(s_avg)
s_avg = np.hstack(([0], s_avg))
inputs = dict(s_a = np.array(s_a)[:, None],
s_b = np.array(s_b)[:, None],
s_avg = np.array(s_avg)[:, None])
dat = dict(
subject = subject,
inputs = inputs,
s_a = np.array(df['s_a']),
s_b = np.array(df['s_b']),
correct = np.array(df['reward']),
answer = np.array(df['correct_side']),
y = np.array(df['choice'])
)
return dat
###Output
_____no_output_____
###Markdown
Figure 1 | Schematic of Psychometric Weight Model **(A)** IBL task schematic (Illustrator only)**(B)** Example inputs (Illustrator only)**(C)** Schematic weight trajectories using regressors in (B)**(D)** Psychometric curves produced from weights from (C) at different points in training Figure 1c
###Code
# Fig 1b — generate schematic weight trajectories
def sigmoid(lenx, bias, slope):
x = np.arange(lenx)
return 1.0/(1.0 + np.exp(-(x-bias)/slope))
x = np.arange(10000)
bias_w = 0.8*sigmoid(10000, 6000, 1500)[::-1] - 0.08
sL_w = -sigmoid(10000, 5000, 700) + 0.05
sR_w = sigmoid(10000, 6500, 800) - 0.1
gain = 4
w = gain*np.vstack((bias_w,sL_w,sR_w))
# Plotting
plt.figure(figsize=(3.5,1.2))
plt.plot(x, w[0], c=colors['bias'], lw=2)
plt.plot(x, w[1], c=colors['sL'], lw=2)
plt.plot(x, w[2], c=colors['sR'], lw=2)
plt.axhline(0, color="black", linestyle="--", alpha=0.5, zorder=0)
plt.xticks([]); plt.yticks([0])
plt.gca().set_yticklabels([0])
plt.xlim(0,10000); plt.ylim(-1.02*gain,1.02*gain)
# plt.xlabel("Trials"); plt.ylabel("Weights")
# hand pick divider lines to make the Illustrator plot look nice
xs = [1270,4975,8690]
for x in xs:
plt.axvline(x, color="gray", lw=2, alpha=0.0)
plt.gca().spines['right'].set_visible(False)
plt.gca().spines['top'].set_visible(False)
# this makes the plot itself reflect the figsize, excluding the axis labels and ticks
plt.subplots_adjust(0,0,1,1)
plt.savefig(SPATH + "Fig1c.pdf")
###Output
_____no_output_____
###Markdown
Figure 1d
###Code
# Fig 1c — generate psychmoetric curves corresponding to weights at various times
def generate_psych(w,x):
xL = x.copy(); xR = x.copy()
xL[xL>0] = 0; xR[xR<0] = 0
xL = np.abs(xL)
wx = w[0] + xL*w[1] + xR*w[2]
pR = 1/(1+np.exp(-wx))
return pR
# Generate psychometric curve for each time point in xs
for i,cut in enumerate(xs):
x = np.arange(-1,1.01,.01)
pR = generate_psych(w[:,cut],x)
x_dot = np.array([-1.0,-0.5,0.0,0.5,1.0])
pR_dot = generate_psych(w[:,cut],x_dot)
plt.figure(figsize=(1.25,1))
plt.plot(x*100, pR*100, color="black", lw=1.5)
plt.plot(x_dot*100, pR_dot*100, color="black", marker='o', lw=0, markersize=4)
# Grid lines
plt.axvline( 0, color="black", linestyle="-", alpha=0.1)
plt.axhline( 50, color="black", linestyle="-", alpha=0.1)
plt.xticks([-100,-50,0,50,100]); plt.yticks([0,50,100])
plt.gca().set_xticklabels([]); plt.gca().set_yticklabels([])
plt.xlim(-110,110); plt.ylim(0,100)
# plt.xlabel("Right - Left Contrast (%)"); plt.ylabel("Prob. Left (%)")
plt.gca().spines['right'].set_visible(False)
plt.gca().spines['top'].set_visible(False)
plt.savefig(SPATH + "Fig1d_"+str(i)+".pdf")
###Output
_____no_output_____
###Markdown
Figure 2 | Recovering Psychometric Weights from Simulated Data **(A)** $K=4$ simulated weights of different sigma for $N=5000$ trials, with recovery showing 95% credible interval**(B)** Show the recovery for each sigma in (A), with 95% credible interval**(C)** 3 simulated weights as in (A), except with $\sigma_{\text{Day}}$ **(D)** Show the recovery for hyperparameters in (C), as in (B) Figure 2a
###Code
# Fig 2a — generate simulated weights and recover with errorbars
# Simulate
seed = 31 # paper uses 31
num_weights = 4
num_trials = 5000
hyper = {'sigma' : 2**np.array([-4.0,-5.0,-6.0,-7.0]),
'sigInit' : 2**np.array([ 0.0, 0.0, 0.0, 0.0])}
# Compute
gen = psy.generateSim(K=num_weights, N=num_trials, hyper=hyper,
boundary=6.0, iterations=1, seed=seed, savePath=None)
# Recovery
rec = psy.recoverSim(gen)
# Save interim result
np.savez_compressed(SPATH+'fig2a_data.npz', rec=rec, gen=gen)
# Reload data
rec = np.load(SPATH+'fig2a_data.npz', allow_pickle=True)['rec'].item()
gen = np.load(SPATH+'fig2a_data.npz', allow_pickle=True)['gen'].item()
# Plotting
sim_colors = [colors['bias'], colors['s1'], colors['s2'], colors['s_avg']]
fig = plt.figure(figsize=(3.75,1.4))
for i, c in enumerate(sim_colors):
plt.plot(gen['W'][:,i], c=c, lw=0.5, zorder=2*i)
plt.plot(rec['wMode'][i], c=c, lw=1, linestyle='--', alpha=0.5, zorder=2*i+1)
plt.fill_between(np.arange(num_trials),
rec['wMode'][i] - 2 * rec['hess_info']['W_std'][i],
rec['wMode'][i] + 2 * rec['hess_info']['W_std'][i],
facecolor=c, alpha=0.2, zorder=2*i+1)
plt.axhline(0, color="black", linestyle="--", lw=0.5, alpha=0.5, zorder=0)
plt.xticks(1000*np.arange(0,6))
plt.gca().set_xticklabels([0,1000,2000,3000,4000,5000])
plt.yticks(np.arange(-4,5,2))
plt.xlim(0,5000); plt.ylim(-4.3,4.3)
# plt.xlabel("Trials"); plt.ylabel("Weights")
plt.gca().spines['right'].set_visible(False)
plt.gca().spines['top'].set_visible(False)
plt.subplots_adjust(0,0,1,1)
plt.savefig(SPATH + "Fig2a.pdf")
###Output
_____no_output_____
###Markdown
Figure 2b
###Code
# Reload data
rec = np.load(SPATH+'fig2a_data.npz', allow_pickle=True)['rec'].item()
# Plotting
sim_colors = [colors['bias'], colors['s1'], colors['s2'], colors['s_avg']]
plt.figure(figsize=(1.4,1.4))
true_sigma = np.log2(rec['input']['sigma'])
avg_sigma = np.log2(rec['hyp']['sigma'])
err_sigma = rec['hess_info']['hyp_std']
for i, c in enumerate(sim_colors):
plt.plot([i-0.3, i+0.3], [true_sigma[i]]*2, color="black", linestyle="-", lw=1.2, zorder=0)
plt.errorbar([i], avg_sigma[i], yerr=1.96*err_sigma[i], c=c, lw=1, marker='o', markersize=5)
plt.xticks([0,1,2,3]); plt.yticks(np.arange(-8,-2))
plt.xlim(-0.5,3.5); plt.ylim(-7.5,-3.5)
plt.gca().set_xticklabels([r"$\sigma_1$", r"$\sigma_2$", r"$\sigma_3$", r"$\sigma_4$"])
# plt.ylabel(r"$\log_2(\sigma)$")
plt.gca().spines['right'].set_visible(False)
plt.gca().spines['top'].set_visible(False)
plt.subplots_adjust(0,0,1,1)
plt.savefig(SPATH + "Fig2b.pdf")
###Output
_____no_output_____
###Markdown
Figure 2c_2 min_
###Code
# Fig 2c — generate simulated weights and recover with errorbars
# Simulate
seed = 102 # paper uses 102
num_weights = 3
num_trials = 5000
hyper = {'sigma' : 2**np.array([-4.5, -5.0,-16.0]),
'sigInit' : 2**np.array([ 0.0, 0.0, 0.0]),
'sigDay' : 2**np.array([ 0.5,-16.0, 1.0])
}
days = [500]*9
# Compute
gen = psy.generateSim(K=num_weights, N=num_trials, hyper=hyper, days=days,
boundary=10.0, iterations=1, seed=seed, savePath=None)
# Recovery
rec = psy.recoverSim(gen)
# Save interim result
np.savez_compressed(SPATH+'fig2c_data.npz', rec=rec, gen=gen)
# Reload data
rec = np.load(SPATH+'fig2c_data.npz', allow_pickle=True)['rec'].item()
gen = np.load(SPATH+'fig2c_data.npz', allow_pickle=True)['gen'].item()
# Plotting
sim_colors = [colors['bias'], colors['s1'], colors['s2']]
fig = plt.figure(figsize=(3.75,1.4))
for i, c in enumerate(sim_colors):
plt.plot(gen['W'][:,i], c=c, lw=0.5, zorder=5-i)
plt.plot(rec['wMode'][i], c=c, lw=1, linestyle='--', alpha=0.5, zorder=5-i)
plt.fill_between(np.arange(num_trials),
rec['wMode'][i] - 2 * rec['hess_info']['W_std'][i],
rec['wMode'][i] + 2 * rec['hess_info']['W_std'][i],
facecolor=c, alpha=0.2, zorder=5-i)
for i in np.cumsum(days):
plt.axvline(i, color="black", lw=0.5, alpha=0.5, zorder=0)
plt.axhline(0, color="black", linestyle="--", lw=0.5, alpha=0.5, zorder=0)
plt.xticks(1000*np.arange(0,6))
plt.gca().set_xticklabels([0,1000,2000,3000,4000,5000])
plt.yticks(np.arange(-4,5,2))
plt.xlim(0,5000); plt.ylim(-4.3,4.3)
# plt.xlabel("Trials"); plt.ylabel("Weights")
plt.gca().spines['right'].set_visible(False)
plt.gca().spines['top'].set_visible(False)
plt.subplots_adjust(0,0,1,1)
plt.savefig(SPATH + "Fig2c.pdf")
###Output
_____no_output_____
###Markdown
Figure 2d
###Code
# Reload data
rec = np.load(SPATH+'fig2c_data.npz', allow_pickle=True)['rec'].item()
# Plotting
plt.figure(figsize=(1.4,1.4))
true_sigma = np.log2(rec['input']['sigma'])
avg_sigma = np.log2(rec['hyp']['sigma'])
err_sigma = rec['hess_info']['hyp_std'][:3]
for i, c in enumerate(sim_colors):
plt.plot([2*i-0.3, 2*i+0.3], [true_sigma[i]]*2, color="black", linestyle="-", lw=1.2, zorder=0)
plt.errorbar([2*i], avg_sigma[i], yerr=1.96*err_sigma[i], c=c, lw=1, marker='o', markersize=5)
true_sigma = np.log2(rec['input']['sigDay'])
avg_sigma = np.log2(rec['hyp']['sigDay'])
err_sigma = rec['hess_info']['hyp_std'][3:]
for i, c in enumerate(sim_colors):
plt.plot([2*i-0.3+1, 2*i+0.3+1], [true_sigma[i]]*2, color="black", linestyle="-", lw=1.2, zorder=0)
plt.errorbar([2*i+1], avg_sigma[i], yerr=1.96*err_sigma[i], c=c, lw=1, marker='s', markersize=5)
plt.axvspan(2.6,4.4, facecolor="black", edgecolor="none", alpha=0.1)
plt.xticks(np.arange(6))
plt.yticks([-8,-6,-4,-2,0,2])
plt.gca().set_xticklabels([r"$\sigma_1$", r"$_{day}$",
r"$\sigma_2$", r"$_{day}$",
r"$\sigma_3$", r"$_{day}$",])
plt.xlim(-0.5,5.5); plt.ylim(-8.5,2.5)
# plt.ylabel(r"$\log_2(\sigma)$")
plt.gca().spines['right'].set_visible(False)
plt.gca().spines['top'].set_visible(False)
plt.subplots_adjust(0,0,1,1)
plt.savefig(SPATH + "Fig2d.pdf")
###Output
_____no_output_____
###Markdown
Figure 3 | Visualization of Early Learning in IBL Mice **(A)** A performance curve of an example mouse (`CSHL_003`) on easy trials during early training**(B)** Psychometric weights for the mouse and sessions shown in (A)**(C)** The performance curves of a subset (1 in 8) of the full population of mice on easy trials in early training (first 16 sessions)**(D)** Psychometric weights for all the mice shown in (C), plus average weights calculated from all mice in the population Figure 3a
###Code
from datetime import date, datetime, timedelta
outData = getMouse('CSHL_003', 5)
easy_trials = (outData['contrastLeft'] > 0.45).astype(int) | (outData['contrastRight'] > 0.45).astype(int)
perf = []
for d in np.unique(outData['date']):
date_trials = (outData['date'] == d).astype(int)
inds = (date_trials * easy_trials).astype(bool)
perf += [np.average(outData['correct'][inds])]
dates = np.unique([datetime.strptime(i, "%Y-%m-%d") for i in outData['date']])
dates = np.arange(len(dates)) + 1
# Plotting
fig = plt.figure(figsize=(2.75,0.9))
plt.plot(dates[:16], perf[:16], color="black", linewidth=1.5, zorder=2)
plt.scatter(dates[9], perf[9], c="white", s=30, edgecolors="black", linestyle="--", lw=0.75, zorder=5, alpha=1)
plt.axhline(0.5, color="black", linestyle="--", lw=1, alpha=0.5, zorder=0)
plt.xticks(np.arange(0,16,5))
plt.yticks([0.4,0.6,0.8,1.0])
plt.ylim(0.25,1.0)
plt.xlim(1, 15.5)
plt.gca().spines['right'].set_visible(False)
plt.gca().spines['top'].set_visible(False)
plt.subplots_adjust(0,0,1,1)
plt.savefig(SPATH + "Fig3a.pdf")
###Output
_____no_output_____
###Markdown
Figure 3b
###Code
# Collect data from manually determined training period
new_dat = psy.trim(outData, END=7000)
# Compute
weights = {'bias' : 0, 'cL' : 1, 'cR' : 1}
K = np.sum([weights[i] for i in weights.keys()])
hyper_guess = {
'sigma' : [2**-5]*K,
'sigInit' : 2**5,
'sigDay' : None
}
optList = ['sigma']
hyp, evd, wMode, hess_info = psy.hyperOpt(new_dat, hyper_guess, weights, optList)
dat = {'hyp' : hyp, 'evd' : evd, 'wMode' : wMode, 'W_std' : hess_info['W_std'],
'weights' : weights, 'new_dat' : new_dat}
# Save interim result
np.savez_compressed(SPATH+'fig3b_data.npz', dat=dat)
dat = np.load(SPATH+'fig3b_data.npz', allow_pickle=True)['dat'].item()
fig = psy.plot_weights(dat['wMode'], dat['weights'], days=dat['new_dat']["dayLength"],
errorbar=dat['W_std'], figsize=(2.75,1.3))
plt.axvline(np.cumsum(dat['new_dat']['dayLength'])[8], c="black", lw=1.5, ls="--", zorder=15)
plt.ylim(-5.3,5.3)
plt.xlim(0, 6950)
plt.yticks([-4,-2,0,2,4])
plt.xlabel(None); plt.ylabel(None)
plt.subplots_adjust(0,0,1,1)
plt.savefig(SPATH + "Fig3b.pdf")
###Output
_____no_output_____
###Markdown
Figure 3c_2 min_
###Code
from datetime import date, datetime, timedelta
all_dates = []
all_perf = []
for s in np.unique(MOUSE_DF['subject']):
outData = getMouse(s, 5)
easy_trials = (outData['contrastLeft'] > 0.45).astype(int) | (outData['contrastRight'] > 0.45).astype(int)
perf = []
for d in np.unique(outData['date']):
date_trials = (outData['date'] == d).astype(int)
inds = (date_trials * easy_trials).astype(bool)
perf += [np.average(outData['correct'][inds])]
dates = np.unique([datetime.strptime(i, "%Y-%m-%d") for i in outData['date']])
dates = np.arange(len(dates))
all_dates += [dates]
all_perf += [perf]
x = [[] for i in range(25)]
for dates, perf in zip(all_dates, all_perf):
for ind, d in enumerate(dates):
if d < 25:
x[d] += [perf[ind]]
perf_avg = [np.average(i) for i in x]
fig = plt.figure(figsize=(2.75,0.9))
for dates, perf in zip(all_dates[::8], all_perf[::8]):
plt.plot(dates[:25], perf[:25], color="black", linewidth=1, alpha=0.2, zorder=1)
plt.plot(perf_avg[:25], color="black", lw=2.5, alpha=0.8, zorder=6)
plt.axhline(0.5, color="black", linestyle="--", lw=1, alpha=0.5, zorder=0)
plt.xticks(np.arange(0,16,5))
plt.yticks([0.4,0.6,0.8,1.0])
plt.ylim(0.25,1.0)
plt.xlim(1, 15.5)
plt.gca().set_yticklabels([])
plt.gca().spines['right'].set_visible(False)
plt.gca().spines['top'].set_visible(False)
plt.subplots_adjust(0,0,1,1)
plt.savefig(SPATH + "Fig3c.pdf")
###Output
_____no_output_____
###Markdown
Figure 3d_20 min_
###Code
for i, s in enumerate(MOUSE_DF['subject']):
print("\rProcessing " + str(i+1) + " of " + str(len(MOUSE_DF['subject'].unique())), end="")
outData = getMouse(s, 5)
# Collect data from manually determined training period
new_dat = psy.trim(outData, END=7000)
# Compute
weights = {'bias' : 0, 'cL' : 1, 'cR' : 1}
K = np.sum([weights[i] for i in weights.keys()])
hyper_guess = {
'sigma' : [2**-5]*K,
'sigInit' : 2**5,
'sigDay' : None
}
optList = ['sigma']
hyp, evd, wMode, hess_info = psy.hyperOpt(new_dat, hyper_guess, weights, optList, hess_calc=None)
dat = {'hyp' : hyp, 'evd' : evd, 'wMode' : wMode, 'hess_info' : hess_info,
'weights' : weights, 'new_dat' : new_dat}
# Save interim result
np.savez_compressed(SPATH+'fig3c_'+s+'_data.npz', dat=dat)
plt.figure(figsize=(2.75,1.3))
w0 = []
w1 = []
for i, s in enumerate(np.unique(MOUSE_DF['subject'])):
dat = np.load(SPATH+'fig3c_'+s+'_data.npz', allow_pickle=True)['dat'].item()
w0 += [np.hstack((dat['wMode'][0][:7000], [np.nan]*(7000 - len(dat['wMode'][0][:7000]))))]
w1 += [np.hstack((dat['wMode'][1][:7000], [np.nan]*(7000 - len(dat['wMode'][1][:7000]))))]
if not i%8:
plt.plot(dat['wMode'][0], color=colors['cL'], lw=1, alpha=0.2, zorder=4)
plt.plot(dat['wMode'][1], color=colors['cR'], lw=1, alpha=0.2, zorder=2)
plt.plot(np.nanmean(w0, axis=0), color=colors['cL'], lw=2.5, alpha=0.8, zorder=6)
plt.plot(np.nanmean(w1, axis=0), color=colors['cR'], lw=2.5, alpha=0.8, zorder=6)
plt.axhline(0, linestyle='--', color="black", lw=1, alpha=0.5, zorder=0)
plt.ylim(-5.3,5.3)
plt.xlim(0, 6950)
plt.yticks([-4,-2,0,2,4])
plt.gca().set_yticklabels([])
plt.gca().spines['right'].set_visible(False)
plt.gca().spines['top'].set_visible(False)
plt.subplots_adjust(0,0,1,1)
plt.savefig(SPATH + "Fig3d.pdf")
###Output
_____no_output_____
###Markdown
Figure 4 | Adaptation to Bias Blocks in an Example IBL Mouse **(A)** Show performance curve of example mouse on easy trials, highlight different training periods**(B)** Show data for early bias blocks of example mouse**(C)** Show data for late bias blocks of example mouse**(D)** For early bias blocks (B), chunk the bias weight by block, plot how the weight changes from start to end of each block**(E)** Same as (D) but for late bias blocks (C)**(F)** Overlay optimal bias weight on the 2nd session shown in (C) Figure 4a
###Code
from datetime import date, datetime, timedelta
outData = getMouse("CSHL_003", 5)
easy_trials = (outData['contrastLeft'] > 0.45).astype(int) | (outData['contrastRight'] > 0.45).astype(int)
perf = []
for d in np.unique(outData['date']):
date_trials = (outData['date'] == d).astype(int)
inds = (date_trials * easy_trials).astype(bool)
perf += [np.average(outData['correct'][inds])]
dates = [datetime.strptime(i, "%Y-%m-%d") for i in outData['date']]
dates = np.arange(len(dates)) + 1
# Plotting
plt.figure(figsize=(3.5,0.9))
plt.plot(dates[:52], perf[:52], color="black", linewidth=1.5, zorder=2)
plt.axhline(0.5, linestyle='--', color="black", lw=1, alpha=0.5, zorder=1)
plt.yticks([0.4,0.6,0.8,1.0])
plt.ylim(0.25,1)
plt.xlim(1,47)
plt.gca().spines['right'].set_visible(False)
plt.gca().spines['top'].set_visible(False)
#plt.ylabel("Performance\n(on easy trials)")
#plt.xlabel("Weeks of Training")
plt.axvspan(0,17.5, ymax=1,
edgecolor='None', alpha=0.1, facecolor="black", zorder=0)
plt.axvspan(16.5,19.5, linestyle="-", lw=2.5, ymin=0.03, ymax=0.98,
edgecolor='#E32D91', alpha=.8, facecolor="None", zorder=8)
plt.axvspan(43.5,45.5, linestyle="-", lw=2.5, ymin=0.03, ymax=0.98,
edgecolor='#9252AB', alpha=.8, facecolor="None", zorder=9)
plt.subplots_adjust(0,0,1,1)
# plt.savefig(SPATH + "Fig4a.pdf")
###Output
_____no_output_____
###Markdown
Figure 4b
###Code
# Collect data from manually determined training period
outData = getMouse("CSHL_003", 5)
_start = np.where(outData['date'] >= '2019-03-21')[0][0]
_end = np.where(outData['date'] >= '2019-03-23')[0][0]
new_dat = psy.trim(outData, START=_start, END=_end)
# Hardcode random trials where probL != 0.5 before bias blocks begin to 0.5
# (fyi, this is due to anti-biasing in the IBL early training protocol)
new_dat['probL'][:np.where(new_dat['date'] >= '2019-03-22')[0][0]] = 0.5
# Compute
weights = {'bias' : 1, 'cL' : 1, 'cR' : 1}
K = np.sum([weights[i] for i in weights.keys()])
hyper_guess = {
'sigma' : [2**-5]*K,
'sigInit' : 2**5,
'sigDay' : [2**-5]*K
}
optList = ['sigma', 'sigDay']
hyp, evd, wMode, hess_info = psy.hyperOpt(new_dat, hyper_guess, weights, optList)
dat = {'hyp' : hyp, 'evd' : evd, 'wMode' : wMode, 'W_std' : hess_info['W_std'],
'weights' : weights, 'new_dat' : new_dat}
# Save interim result
np.savez_compressed(SPATH+'fig4b_data.npz', dat=dat)
BIAS_COLORS = {50 : 'None', 20 : psy.COLORS['sR'], 80 : psy.COLORS['sL']}
def addBiasBlocks(fig, pL):
plt.sca(fig.gca())
i = 0
while i < len(pL):
start = i
while i+1 < len(pL) and np.linalg.norm(pL[i] - pL[i+1]) < 0.0001:
i += 1
fc = BIAS_COLORS[int(100 * pL[start])]
plt.axvspan(start, i+1, facecolor=fc, alpha=0.2, edgecolor=None)
i += 1
return fig
dat = np.load(SPATH+'fig4b_data.npz', allow_pickle=True)['dat'].item()
fig = psy.plot_weights(dat['wMode'], dat['weights'], days=dat['new_dat']["dayLength"],
errorbar=dat['W_std'], figsize=(2.75,1.3))
fig = addBiasBlocks(fig, dat['new_dat']['probL'])
plt.xlabel(None); plt.ylabel(None)
plt.gca().set_yticks(np.arange(-6, 7,2))
plt.ylim(-5.3,5.3)
plt.subplots_adjust(0,0,1,1)
plt.savefig(SPATH + "Fig4b.pdf")
###Output
_____no_output_____
###Markdown
Figure 4c
###Code
# Collect data from manually determined training period
outData = getMouse("CSHL_003", 5)
_start = np.where(outData['date'] >= '2019-04-30')[0][0]
_end = np.where(outData['date'] >= '2019-05-02')[0][0]
new_dat = psy.trim(outData, START=_start, END=_end)
# Compute
weights = {'bias' : 1, 'cL' : 1, 'cR' : 1}
K = np.sum([weights[i] for i in weights.keys()])
hyper_guess = {
'sigma' : [2**-5]*K,
'sigInit' : 2**5,
'sigDay' : [2**-5]*K
}
optList = ['sigma', 'sigDay']
hyp, evd, wMode, hess_info = psy.hyperOpt(new_dat, hyper_guess, weights, optList)
dat = {'hyp' : hyp, 'evd' : evd, 'wMode' : wMode, 'W_std' : hess_info['W_std'],
'weights' : weights, 'new_dat' : new_dat}
# Save interim result
np.savez_compressed(SPATH+'fig4c_data.npz', dat=dat)
dat = np.load(SPATH+'fig4c_data.npz', allow_pickle=True)['dat'].item()
fig = psy.plot_weights(dat['wMode'], dat['weights'], days=dat['new_dat']["dayLength"],
errorbar=dat['W_std'], figsize=(2.75,1.3))
fig = addBiasBlocks(fig, dat['new_dat']['probL'])
plt.xlabel(None); plt.ylabel(None)
plt.gca().set_yticks(np.arange(-6, 7,2))
plt.gca().set_yticklabels([])
plt.ylim(-5.3,5.3)
plt.subplots_adjust(0,0,1,1)
plt.savefig(SPATH + "Fig4c.pdf")
###Output
_____no_output_____
###Markdown
Figure 4d
###Code
outData = getMouse("CSHL_003", 5)
# Collect data from manually determined training period
_start = np.where(outData['date'] >= '2019-03-22')[0][0]
_end = np.where(outData['date'] >= '2019-03-26')[0][0]
new_dat = psy.trim(outData, START=_start, END=_end)
# Hardcode random trials where probL != 0.5 before bias begins to 0.5
# (fyi, this is due to anti-biasing in the IBL early training protocol)
new_dat['probL'][:np.where(new_dat['date'] >= '2019-03-22')[0][0]] = 0.5
# Compute
weights = {'bias' : 1, 'cL' : 1, 'cR' : 1}
K = np.sum([weights[i] for i in weights.keys()])
hyper_guess = {
'sigma' : [2**-5]*K,
'sigInit' : 2**5,
'sigDay' : [2**-5]*K
}
optList = ['sigma', 'sigDay']
hyp, evd, wMode, hess_info = psy.hyperOpt(new_dat, hyper_guess, weights, optList)
dat = {'hyp' : hyp, 'evd' : evd, 'wMode' : wMode, 'W_std' : hess_info['W_std'],
'weights' : weights, 'new_dat' : new_dat}
# Save interim result
np.savez_compressed(SPATH+'fig4d_data.npz', dat=dat)
def bias_diff(dat_load, figsize=(1.5,1.5)):
dat = np.load(dat_load, allow_pickle=True)['dat'].item()
pL = dat['new_dat']['probL']
pL_diff = pL[1:] - pL[:-1]
inds = np.where(pL_diff)[0]
start_inds = [0] + list(inds+1)
start_inds = [i for i in start_inds if (np.isclose(pL[i], 0.2) or np.isclose(pL[i], 0.8))]
end_inds = list(inds) + [len(pL)-1]
end_inds = [i for i in end_inds if (np.isclose(pL[i], 0.2) or np.isclose(pL[i], 0.8))]
fig = plt.figure(figsize=figsize)
for s, e in zip(start_inds, end_inds):
if e-s < 20: continue
block_inds = np.arange(s, e+1)
block = dat['wMode'][0, block_inds] - dat['wMode'][0, s]
if np.isclose(pL[s], 0.2):
plt.plot(block, color=colors['cR'], alpha=0.8, zorder=2, lw=1)
else:
plt.plot(block, color=colors['cL'], alpha=0.8, zorder=4, lw=1)
plt.axhline(0, linestyle='--', color="black", lw=1, alpha=0.5, zorder=0)
plt.ylim(-5.5,5.5)
plt.xlim(0, 75)
plt.gca().spines['right'].set_visible(False)
plt.gca().spines['top'].set_visible(False)
plt.subplots_adjust(0,0,1,1)
return fig
fig = bias_diff(SPATH+'fig4d_data.npz', figsize=(1.3,1.3));
plt.gca().set_yticks([-4,-2,0,2,4])
plt.savefig(SPATH + "Fig4d.pdf")
###Output
_____no_output_____
###Markdown
Figure 4e
###Code
outData = getMouse("CSHL_003", 5)
# Collect data from manually determined training period
_start = np.where(outData['date'] >= '2019-04-30')[0][0]
_end = np.where(outData['date'] >= '2019-05-03')[0][0]
new_dat = psy.trim(outData, START=_start, END=_end)
# Compute
weights = {'bias' : 1, 'cL' : 1, 'cR' : 1}
K = np.sum([weights[i] for i in weights.keys()])
hyper_guess = {
'sigma' : [2**-5]*K,
'sigInit' : 2**5,
'sigDay' : [2**-5]*K
}
optList = ['sigma', 'sigDay']
hyp, evd, wMode, hess_info = psy.hyperOpt(new_dat, hyper_guess, weights, optList)
dat = {'hyp' : hyp, 'evd' : evd, 'wMode' : wMode, 'W_std' : hess_info['W_std'],
'weights' : weights, 'new_dat' : new_dat}
# Save interim result
np.savez_compressed(SPATH+'fig4e_data.npz', dat=dat)
fig = bias_diff(SPATH+'fig4e_data.npz', figsize=(1.3,1.3));
plt.gca().set_yticks([-4,-2,0,2,4])
plt.gca().set_yticklabels([])
plt.savefig(SPATH + "Fig4e.pdf")
###Output
_____no_output_____
###Markdown
Figure 4f
###Code
def max_bias(bias, side, wL, wR):
contrasts = np.array([-1., -0.25, -0.125, -0.0625, 0., 0.0625, 0.125, 0.25, 1.])
p=5
transformed_con = np.tanh(p*np.abs(contrasts))/np.tanh(p)
p_biasL = [.8/4.5]*4 + [1/9] + [.2/4.5]*4
p_biasR = [.2/4.5]*4 + [1/9] + [.8/4.5]*4
p_biasM = [1/9]*9
w = [wL]*4 + [0] + [wR]*4
correct = [0]*4 + [0] + [1]*4
pL = 1 - (1/(1+np.exp(-(transformed_con*w + bias))))
pCorrect = np.abs(correct - pL)
if side=="L":
pCorrect[4] = pL[4]*0.8 + (1-pL[4])*0.2
expval = np.sum(p_biasL * pCorrect)
elif side=="R":
pCorrect[4] = pL[4]*0.2 + (1-pL[4])*0.8
expval = np.sum(p_biasR * pCorrect)
elif side=="M":
pCorrect[4] = 0.5
expval = np.sum(p_biasM * pCorrect)
return -expval
from scipy.optimize import minimize
dat = np.load(SPATH+'fig4c_data.npz', allow_pickle=True)['dat'].item()
start = dat['new_dat']['dayLength'][0]
optBias = []
optReward = []
for i in np.arange(start, dat['wMode'].shape[1]):
if dat['new_dat']['probL'][i] < 0.21: side = 'R'
elif dat['new_dat']['probL'][i] > 0.79: side = 'L'
else: side = 'M'
res = minimize(max_bias,[0], args=(side, dat['wMode'][1,i], dat['wMode'][2,i]))
optBias += [res.x]
optReward += [-res.fun]
print("Avg. Reward:", np.mean(optReward))
fig = psy.plot_weights(dat['wMode'], dat['weights'], days=dat['new_dat']["dayLength"],
errorbar=dat['W_std'], figsize=(2.75,1.3))
fig = addBiasBlocks(fig, dat['new_dat']['probL'])
plt.plot(np.arange(start, dat['wMode'].shape[1]), optBias, 'k-', lw=2, zorder=10)
plt.gca().set_yticks(np.arange(-6, 7,2))
plt.gca().set_yticklabels([])
plt.gca().set_xticks([750, 1000, 1250])
plt.xlim(start, None); plt.ylim(-5.3,5.3)
plt.xlabel(None); plt.ylabel(None)
plt.subplots_adjust(0,0,1,1)
plt.savefig(SPATH + "Fig4f.pdf")
# Actual predicted reward using actual bias weight
from scipy.optimize import minimize
optReward_pred = []
optReward_0bias = []
for i in np.arange(start, dat['wMode'].shape[1]):
if dat['new_dat']['probL'][i] < 0.21: side = 'R'
elif dat['new_dat']['probL'][i] > 0.79: side = 'L'
else: side = 'M'
optReward_pred += [-max_bias(dat['wMode'][0,i], side, dat['wMode'][1,i], dat['wMode'][2,i])]
optReward_0bias += [-max_bias(0.0, side, dat['wMode'][1,i], dat['wMode'][2,i])]
print("Predicted Avg. Reward:", np.mean(optReward_pred))
print("No Bias Avg. Reward:", np.mean(optReward_0bias))
print("Empirical Avg. Reward:", np.mean(dat['new_dat']['correct'][start:]))
###Output
_____no_output_____
###Markdown
Figure 5 | Visualization of Learning in an Example Akrami Rat **(A)** Akrami rat task schematic (Illustrator only)**(B)** Psychometric weights for an example rat (`W080`) **(C)** Compare model predictions to empirical choice behavior under various trial conditions, for a 500 trial window starting at trial 2000.**(D)** As in (C), starting at trial 6500**(E)** As in (C), starting at trial 11000 Figure 5b_15 min_
###Code
outData = getRat("W080")
new_dat = psy.trim(outData, START=0, END=12500)
weights = {'bias': 1, 's_a': 1, 's_b': 1, 'h': 1, 'c': 1, "s_avg": 1}
K = np.sum([weights[i] for i in weights.keys()])
hyper_guess = {
'sigma' : [2**-5]*K,
'sigInit' : 2**5,
'sigDay' : [2**-4]*K,
}
optList = ['sigma', 'sigDay']
hyp, evd, wMode, hess_info = psy.hyperOpt(new_dat, hyper_guess, weights, optList)
dat = {'hyp' : hyp, 'evd' : evd, 'wMode' : wMode, 'W_std' : hess_info['W_std'],
'weights' : weights, 'new_dat' : new_dat}
# Save interim result
np.savez_compressed(SPATH+'fig5b_data.npz', dat=dat)
dat = np.load(SPATH+'fig5b_data.npz', allow_pickle=True)['dat'].item()
fig = psy.plot_weights(dat['wMode'], dat['weights'], days=dat['new_dat']["dayLength"],
errorbar=dat['W_std'], figsize=(4.75,1.4))
selected_days = [[2000,2500], [6500,7000], [11000,11500]]
for d in selected_days:
plt.plot(d, [-1.3]*2, lw=2, color="k")
plt.xlabel(None); plt.ylabel(None)
plt.subplots_adjust(0,0,1,1)
plt.savefig(SPATH + "Fig5b.pdf")
###Output
_____no_output_____
###Markdown
Fig 5c-e_2.5 hours_
###Code
outData = getRat("W080")
new_dat = psy.trim(outData, END=12500)
FOLDS = 10 # number of cross-validation folds
SEED = 42 # controls random divide of trials into FOLDS bins
weights = {'bias': 1, 's_a': 1, 's_b': 1, 'h': 1, 'c': 1, "s_avg": 1}
K = np.sum([weights[i] for i in weights.keys()])
hyper_guess = {
'sigma' : [2**-5]*K,
'sigInit' : 2**5,
'sigDay' : [2**-4]*K,
}
optList = ['sigma', 'sigDay']
_, xval_pL = psy.crossValidate(new_dat, hyper_guess, weights, optList, F=FOLDS, seed=SEED)
np.savez_compressed(SPATH+'fig5c_data.npz', new_dat=new_dat, xval_pL=xval_pL)
from datetime import date, datetime, timedelta
from scipy.stats import sem
outData = np.load(SPATH+'fig5c_data.npz', allow_pickle=True)['new_dat'].item()
xval_pL = np.load(SPATH+'fig5c_data.npz', allow_pickle=True)['xval_pL']
outData['xval_pR'] = 1 - xval_pL
all_hists = []
all_ys = []
all_pRs = []
selected_days = [[2000,2500], [6500,7000], [11000,11500]]
for d in selected_days:
new_dat = psy.trim(outData, START=d[0], END=d[1])
hists = []
ys = []
pRs = []
for h in [-1,1]:
for c in [-1,1]:
for a in [-1,1]:
ind_h = (new_dat['inputs']['h'][:,0] == h)
ind_c = (new_dat['inputs']['c'][:,0] == c)
ind_a = (np.sign(new_dat['s_a'] - new_dat['s_b']) == a)
inds = ind_h * ind_c * ind_a
hists += [[h,c,a]]
ys += [new_dat['y'][inds]]
pRs += [new_dat['xval_pR'][inds]]
all_hists += [hists]
all_ys += [ys]
all_pRs += [pRs]
import matplotlib as mpl
def colorFader(c1,c2,mix=0):
c1=np.array(mpl.colors.to_rgb(c1))
c2=np.array(mpl.colors.to_rgb(c2))
w =np.array(mpl.colors.to_rgb("white"))
if mix <= 0.5:
return mpl.colors.to_hex((1-mix*2)*c1 + mix*2*w)
else:
return mpl.colors.to_hex((1-(mix-0.5)*2)*w + (mix-0.5)*2*c2)
def cF(mix):
return colorFader(colors['s2'],colors['s1'],mix)
diff = 0.19
rad = 0.45
cm = plt.get_cmap('RdBu_r')
for d in range(len(selected_days)):
plt.figure(figsize=(0.75,1.5))
avg = [np.average(i) for i in all_ys[d]]
avg_pR = [np.average(i) for i in all_pRs[d]]
std = [sem(i) for i in all_ys[d]]
std_pR = [sem(i) for i in all_pRs[d]]
for i in range(len(avg)):
h = all_hists[d][i][0]
c = all_hists[d][i][1]
a = all_hists[d][i][2]
x = a/2
y = h + c/2
plt.text(x-diff, y+diff, int(np.round(avg_pR[i]*100)),
ha="center", va="center", fontsize=10, zorder=i+1)
t1 = plt.Polygon([[x-rad,y-rad],[x-rad,y+rad],[x+rad,y+rad]],
facecolor=cF(avg_pR[i]), edgecolor="k", lw=0, zorder=i)
plt.gca().add_patch(t1)
plt.text(x+diff, y-1.5*diff, int(np.round(avg[i]*100)),
ha="center", va="center", fontsize=10, zorder = i+11)
t2 = plt.Polygon([[x-rad,y-rad],[x+rad,y-rad],[x+rad,y+rad]],
facecolor=cF(avg[i]), edgecolor="k", lw=0.5, zorder = i+10)
plt.gca().add_patch(t2)
plt.ylim(-2,2)
plt.xlim(-1,1)
plt.gca().spines['right'].set_visible(False)
plt.gca().spines['top'].set_visible(False)
plt.gca().spines['left'].set_visible(False)
plt.gca().spines['bottom'].set_visible(False)
plt.gca().set_xticks([])
plt.gca().set_yticks([])
plt.subplots_adjust(0,0,1,1)
plt.savefig(SPATH + "Fig5cde_" + str(d) + ".pdf")
# Make colorbar
n=500
fig, ax = plt.subplots(figsize=(.2, 1.5))
for x in range(n+1):
ax.axhline(1 - x/n, color=cF(x/n), linewidth=4)
plt.gca().set_yticks([0.005,.25,.5,.75,0.995])
plt.gca().set_xticks([])
plt.gca().set_xticklabels([])
plt.gca().set_yticklabels([])
plt.gca().spines['right'].set_visible(False)
plt.gca().spines['top'].set_visible(False)
plt.gca().spines['left'].set_visible(False)
plt.gca().spines['bottom'].set_visible(False)
plt.subplots_adjust(0,0,1,1)
plt.savefig(SPATH + "Fig5_colorbar.pdf")
###Output
_____no_output_____
###Markdown
Figure 6 | Population Psychometric Weights from Akrami Rats **(A)** Show overlay of population weights (including the average weights) for Tones A + B**(B)** For the Bias weight**(C)** For the Previous Tones weight**(D)** For the Previous (Correct) Answer weight**(E)** For the Previous Choice weight**F)** Show average hyperparamter recovery ($\sigma$ and $\sigma_\text{day}$) for each weight ($\pm1$SD) Figure 6a_6 hours_
###Code
all_rats = RAT_DF["subject_id"].unique()
for i, subject in enumerate(all_rats):
print("\rProcessing " + str(i+1) + " of " + str(len(all_rats)), end="")
outData = getRat(subject)
# Collect data from manually determined training period
new_dat = psy.trim(outData, END=20000)
# Compute
weights = {'bias': 1, 's_a': 1, 's_b': 1, 'h': 1, 'c': 1, "s_avg": 1}
K = np.sum([weights[i] for i in weights.keys()])
hyper_guess = {
'sigma' : [2**-5]*K,
'sigInit' : 2**5,
'sigDay' : [2**-4]*K,
}
optList = ['sigma', 'sigDay']
hyp, evd, wMode, hess_info = psy.hyperOpt(new_dat, hyper_guess, weights, optList, hess_calc=None)
dat = {'hyp' : hyp, 'evd' : evd, 'wMode' : wMode, 'hess_info' : hess_info,
'weights' : weights, 'new_dat' : new_dat}
# Save interim result
np.savez_compressed(SPATH+'fig6a_'+subject+'_data.npz', dat=dat)
all_labels = []
all_w = []
for subject in RAT_DF["subject_id"].unique():
rat = np.load(SPATH+'fig6a_'+subject+'_data.npz', allow_pickle=True)['dat'].item()
labels = []
for j in sorted(rat['weights'].keys()):
labels += [j]*rat['weights'][j]
all_labels += [np.array(labels)]
all_w += [rat['wMode']]
def plot_all(all_labels, all_w, Weights, figsize):
fig = plt.figure(figsize=figsize)
Weights = [Weights] if type(Weights) is str else Weights
avg_len=20000
for i, W in enumerate(Weights):
avg = []
for i in np.arange(0,len(all_w),1):
bias_ind = np.where(all_labels[i] == W)[0][-1]
bias_w = all_w[i][bias_ind]
avg += [list(bias_w[:avg_len]) + [np.nan]*(avg_len - len(bias_w[:avg_len]))]
plt.plot(bias_w, color=colors[W], alpha=0.2, lw=1, zorder=2+i)
plt.plot(np.nanmean(avg, axis=0), color=colors[W], alpha=0.8, lw=2.5, zorder=5+i)
plt.axhline(0, color="black", linestyle="--", lw=1, alpha=0.5, zorder=1)
plt.gca().spines['right'].set_visible(False)
plt.gca().spines['top'].set_visible(False)
plt.xlim(0, 19000)
plt.ylim(-2.5, 2.5)
return fig
plot_all(all_labels, all_w, ["s_a", "s_b"], (1.85, 0.8))
plt.subplots_adjust(0,0,1,1)
plt.gca().set_yticks([-2,0,2])
plt.gca().set_xticklabels([])
plt.savefig(SPATH + "Fig6a.pdf")
###Output
_____no_output_____
###Markdown
Figure 6b
###Code
plot_all(all_labels, all_w, ["bias"], (1.85, 0.8))
plt.gca().set_yticks([-2,0,2])
plt.gca().set_xticklabels([])
plt.gca().set_yticklabels([])
plt.subplots_adjust(0,0,1,1)
plt.savefig(SPATH + "Fig6b.pdf")
###Output
_____no_output_____
###Markdown
Figure 6c
###Code
plot_all(all_labels, all_w, ["s_avg"], (1.85, 0.8))
plt.gca().set_yticks([-2,0,2])
plt.gca().set_yticklabels([])
plt.gca().set_xticklabels([])
plt.subplots_adjust(0,0,1,1)
plt.savefig(SPATH + "Fig6c.pdf")
###Output
_____no_output_____
###Markdown
Figure 6d
###Code
plot_all(all_labels, all_w, ["h"], (1.85, 0.8))
plt.ylim(-0.25, 2.25)
# plt.gca().set_yticklabels([])
plt.subplots_adjust(0,0,1,1)
plt.savefig(SPATH + "Fig6d.pdf")
###Output
_____no_output_____
###Markdown
Figure 6e
###Code
plot_all(all_labels, all_w, ["c"], (1.85, 0.8))
plt.ylim(-0.25, 2.25)
plt.gca().set_yticklabels([])
plt.gca().set_xticklabels([])
plt.subplots_adjust(0,0,1,1)
plt.savefig(SPATH + "Fig6e.pdf")
###Output
_____no_output_____
###Markdown
Figure 6f
###Code
all_sigma = []
all_sigDay = []
for subject in RAT_DF["subject_id"].unique():
rat = np.load(SPATH+'fig6a_'+subject+'_data.npz', allow_pickle=True)['dat'].item()
labels = []
for j in sorted(rat['weights'].keys()):
labels += [j]*rat['weights'][j]
all_sigma += [rat['hyp']['sigma']]
all_sigDay += [rat['hyp']['sigDay']]
all_sigma = np.array(all_sigma)
all_sigDay = np.array(all_sigDay)
pos_map = {0: 2, 1: 5, 2: 4, 3: 0, 4: 3, 5: 1}
plt.figure(figsize=(1.55, 0.8))
for i, j in enumerate(labels):
plt.errorbar([pos_map[i]], np.average(np.log2(all_sigma[:,i])),
yerr=np.std(np.log2(all_sigma[:,i])),
color=colors[j], marker="o", ms=4, elinewidth=1.5)
plt.errorbar([pos_map[i]+8], np.average(np.log2(all_sigDay[:,i])),
yerr=np.std(np.log2(all_sigDay[:,i])),
color=colors[j], marker="s", ms=4, elinewidth=1.5)
plt.ylim(-12.5,-2.6)
# plt.xlim(-1,1)
plt.gca().spines['right'].set_visible(False)
plt.gca().spines['top'].set_visible(False)
plt.gca().set_xticks([])
plt.gca().set_yticks([-12, -10, -8, -6, -4])
plt.gca().set_yticklabels([-12, None, -8, None, -4])
plt.subplots_adjust(0,0,1,1)
plt.savefig(SPATH + "Fig6f.pdf", transparent=True)
###Output
_____no_output_____
###Markdown
Figure 7 | Population Psychometric Weights from Akrami Human Subjects **(A)** Athena human subject task schematic (Illustrator only)**(B)** Psychometric weights for an example human subject (`subject_id=6`)**(C)** Show psychometric weights for all human subjects together Figure 7b
###Code
new_dat = getHuman(6)
# Compute
weights = {'bias': 1, 's_a': 1, 's_b': 1, 's_avg': 1}
K = np.sum([weights[i] for i in weights.keys()])
hyper_guess = {
'sigma' : [2**-5]*K,
'sigInit' : 2**5,
'sigDay' : None
}
optList = ['sigma']
hyp, evd, wMode, hess_info = psy.hyperOpt(new_dat, hyper_guess, weights, optList)
dat = {'hyp' : hyp, 'evd' : evd, 'wMode' : wMode, 'W_std' : hess_info['W_std'],
'weights' : weights, 'new_dat' : new_dat}
# Save interim result
np.savez_compressed(SPATH+'fig7b_data.npz', dat=dat)
dat = np.load(SPATH+'fig7b_data.npz', allow_pickle=True)['dat'].item()
fig = psy.plot_weights(dat['wMode'], dat['weights'], errorbar=dat['W_std'], figsize=(4.75,1))
plt.xlabel(None); plt.ylabel(None)
plt.gca().set_xticks([0,500,1000,1500,2000])
plt.gca().set_yticks(np.arange(-2, 3,2))
plt.xlim(0, 1900); plt.ylim(-3.4, 3.4)
plt.subplots_adjust(0,0,1,1)
plt.savefig(SPATH + "Fig7b.pdf")
###Output
_____no_output_____
###Markdown
Figure 7c_3 min_
###Code
all_dat = []
all_subjects = HUMAN_DF["subject_id"].unique()
for i, subject in enumerate(all_subjects):
print("\rProcessing " + str(i+1) + " of " + str(len(all_subjects)), end="")
new_dat = getHuman(subject)
# Compute
weights = {'bias': 1, 's_a': 1, 's_b': 1, 's_avg': 1}
K = np.sum([weights[i] for i in weights.keys()])
hyper_guess = {
'sigma' : [2**-5]*K,
'sigInit' : 2**5,
'sigDay' : None
}
optList = ['sigma']
hyp, evd, wMode, hess_info = psy.hyperOpt(new_dat, hyper_guess, weights, optList)
dat = {'hyp' : hyp, 'evd' : evd, 'wMode' : wMode, 'W_std' : hess_info['W_std'],
'weights' : weights, 'new_dat' : new_dat}
all_dat += [dat]
# Save interim result
np.savez_compressed(SPATH+'fig7c_data.npz', all_dat=all_dat)
all_dat = np.load(SPATH+'fig7c_data.npz', allow_pickle=True)['all_dat']
plt.figure(figsize=(4.75,1))
for dat in all_dat:
weights = dat['weights']
wMode = dat['wMode']
labels = []
for j in sorted(weights.keys()):
labels += [j]*weights[j]
for i, w in enumerate(labels):
plt.plot(wMode[i], lw=1.5, alpha=0.5, linestyle='-', c=colors[w], zorder=zorder[w])
plt.axhline(0, color="black", linestyle="--", lw=1, alpha=0.5, zorder=0)
plt.gca().set_xticks([0,500,1000,1500,2000])
plt.gca().set_yticks(np.arange(-2, 3,2))
plt.xlim(0, 1900); plt.ylim(-3.4, 3.4)
plt.gca().spines['right'].set_visible(False)
plt.gca().spines['top'].set_visible(False)
plt.subplots_adjust(0,0,1,1)
plt.savefig(SPATH + "Fig7c.pdf")
###Output
_____no_output_____
###Markdown
Figure 8 | History Regressors Improve Model Accuracy for an Example Akrami Rat **(A)** Show plot for model w/o using history weights: predicted accuracy on x-axis, and empirical accuracy on y-axis**(B)** Show histogram of predicted accuracy in trials from (A)**(C)** Show plot for model with history weights: predicted accuracy on x-axis, and empirical accuracy on y-axis**(D)** Show histogram of predicted accuracy in trials from (C) Figure 8a_30 min_
###Code
outData = getRat("W080")
new_dat = psy.trim(outData, END=12500)
FOLDS = 10 # number of cross-validation folds
SEED = 42 # controls random divide of trials into FOLDS bins
weights = {'bias': 1, 's_a': 1, 's_b': 1, 'h': 0, 'c': 0, "s_avg": 0}
K = np.sum([weights[i] for i in weights.keys()])
hyper_guess = {
'sigma' : [2**-5]*K,
'sigInit' : 2**5,
'sigDay' : [2**-4]*K,
}
optList = ['sigma', 'sigDay']
_, xval_pL = psy.crossValidate(new_dat, hyper_guess, weights, optList, F=FOLDS, seed=SEED)
np.savez_compressed(SPATH+'fig8a_data.npz', new_dat=new_dat, xval_pL=xval_pL)
from scipy.stats import sem
xval_pL = np.load(SPATH+'../fig8a_data.npz', allow_pickle=True)['xval_pL']
new_dat = np.load(SPATH+'../fig8a_data.npz', allow_pickle=True)['new_dat'].item()
step = 0.02
edges = np.arange(0.5,1.0+step,step)
est_correct = np.abs(xval_pL - 0.5) + 0.5
match = ((-np.sign(xval_pL - 0.5) + 1)/2).astype(int) == new_dat["y"].astype(int)
print("Average Empirical Accuracy:", np.round(np.average(match), 3))
print("Average Predicted Accuracy:", np.round(np.average(est_correct), 3))
choices = []
centers = []
for i in edges[:-1]:
mask = (est_correct >= i) & (est_correct < i+step)
choices += [match[mask]]
centers += [np.average(est_correct[mask])];
avg_correct = np.array([np.average(i) if len(i) > 40 else np.nan for i in choices])
sem_correct = np.array([sem(i) if len(i) > 40 else np.nan for i in choices])
plt.figure(figsize=(2,1.5))
plt.errorbar(centers, avg_correct, yerr=1.96*sem_correct,
alpha=1, color=colors['bias'], linestyle="None", marker="o", markersize=2)
plt.plot(np.average(est_correct), np.average(match), marker="*", markersize=10, alpha=0.75,
markeredgecolor="None", markerfacecolor="black", zorder=10)
plt.plot([0.4,1.1], [0.4,1.1], color="black", linestyle="--", lw=1, alpha=0.5, zorder=0)
plt.xlim(0.5, 1)
plt.ylim(0.5, 1)
plt.xticks([0.5,0.6,0.7,0.8,0.9,1.0])
plt.gca().set_xticklabels([])
plt.gca().spines['right'].set_visible(False)
plt.gca().spines['top'].set_visible(False)
plt.subplots_adjust(0,0,1,1)
plt.savefig(SPATH + "Fig8a.pdf")
###Output
_____no_output_____
###Markdown
Figure 8b
###Code
plt.figure(figsize=(2,1.5))
plt.hist(est_correct, bins=edges, alpha=1, lw=0.5, color=colors['bias'], edgecolor="black")
plt.xlim(0.5, 1)
plt.ylim(0, 1700)
plt.xticks([0.5,0.6,0.7,0.8,0.9,1.0])
plt.gca().spines['right'].set_visible(False)
plt.gca().spines['top'].set_visible(False)
plt.subplots_adjust(0,0,1,1)
plt.savefig(SPATH + "Fig8b.pdf")
###Output
_____no_output_____
###Markdown
Figure 8cThese subfigures reuse data from Figure 5C-E, please go run the cell above that creates the file `fig5c_data.npz` to produce Figures 8C+D.
###Code
from scipy.stats import sem
xval_pL = np.load(SPATH+'fig5c_data.npz', allow_pickle=True)['xval_pL']
new_dat = np.load(SPATH+'fig5c_data.npz', allow_pickle=True)['new_dat'].item()
step = 0.02
edges = np.arange(0.5,1.0+step,step)
est_correct = np.abs(xval_pL - 0.5) + 0.5
match = ((-np.sign(xval_pL - 0.5) + 1)/2).astype(int) == new_dat["y"].astype(int)
print("Average Empirical Accuracy:", np.round(np.average(match), 3))
print("Average Predicted Accuracy:", np.round(np.average(est_correct), 3))
choices = []
centers = []
for i in edges[:-1]:
mask = (est_correct >= i) & (est_correct < i+step)
choices += [match[mask]]
centers += [np.average(est_correct[mask])];
avg_correct = np.array([np.average(i) if len(i) > 40 else np.nan for i in choices])
sem_correct = np.array([sem(i) if len(i) > 40 else np.nan for i in choices])
plt.figure(figsize=(2,1.5))
plt.errorbar(centers, avg_correct, yerr=1.96*sem_correct,
alpha=1, color=colors['h'], linestyle="None", marker="o", markersize=2)
plt.plot(np.average(est_correct), np.average(match), marker="*", markersize=10, alpha=0.75,
markeredgecolor="None", markerfacecolor="black", zorder=10)
plt.plot([0.4,1.1], [0.4,1.1], color="black", linestyle="--", lw=1, alpha=0.5, zorder=0)
plt.xlim(0.5, 1)
plt.ylim(0.5, 1)
plt.xticks([0.5,0.6,0.7,0.8,0.9,1.0])
plt.gca().set_xticklabels([])
plt.gca().set_yticklabels([])
plt.gca().spines['right'].set_visible(False)
plt.gca().spines['top'].set_visible(False)
plt.subplots_adjust(0,0,1,1)
plt.savefig(SPATH + "Fig8c.pdf")
###Output
_____no_output_____
###Markdown
Figure 8d
###Code
plt.figure(figsize=(2,1.5))
plt.hist(est_correct, bins=edges, alpha=1, lw=0.5, color=colors['h'], edgecolor="black")
plt.xlim(0.5, 1)
plt.ylim(0, 1700)
plt.xticks([0.5,0.6,0.7,0.8,0.9,1.0])
plt.gca().set_yticklabels([])
plt.gca().spines['right'].set_visible(False)
plt.gca().spines['top'].set_visible(False)
plt.subplots_adjust(0,0,1,1)
plt.savefig(SPATH + "Fig8d.pdf")
###Output
_____no_output_____
###Markdown
--- Supplementary Figures Figure S1 | Compute time and model accuracy**(A)** Show compute time**(B)** Show weight recovery accuracy Figure S1a_5 hours_
###Code
from psytrack.runSim import generateSim, recoverSim
num_weights = [2,4,6]
num_trials = 1000*np.array([1,2,4,8,16])
num_simulations = 20
results = []
for N in num_trials:
for K in num_weights:
for i in range(num_simulations):
print("K =", K, " N =", N, " iter = ", i)
# Simulate data
seed = N+100*K+i
np.random.seed(seed)
hyper = {'sigma': 2**np.random.uniform(-7.5, -3.5, size=K), 'sigInit': 1.0}
dat = generateSim(K=K, N=N, hyper=hyper, boundary=5.0, iterations=1, seed=seed)
# Recover data
try:
rec = recoverSim(dat, hess_calc=None)
except:
print("ERROR!!!")
results += [[N, K, i, np.nan, np.nan]]
continue
# Save all data, mainly duration and mean squared error in weight recovery
mse = np.average((rec['wMode'] - rec['input']['W'].T)**2)
print(" " + str(rec['duration'].seconds) +"s mse =", np.round(mse, 4))
results += [[N, K, i, rec['duration'], mse]]
# Update saved record of all info on each iteration
np.savez(SPATH + "FigS1_dat.npz", results=results)
res = np.load(SPATH + "FigS1_dat.npz", allow_pickle=True)['results']
plt.figure(figsize=(2.5,2.5))
COLORS = [colors['bias'],colors['s1'],colors['s2'],]
adjust = [-0.3, 0, 0.3]
for i, K in enumerate(num_weights):
all_duration = [i[3] for i in res if i[1]==K]
all_duration = np.array([i.total_seconds()/60
if i is not None else np.nan
for i in all_duration]).reshape(-1,num_simulations)
plt.errorbar(num_trials/1000 + adjust[i], np.nanmean(all_duration, axis=1),
yerr=np.nanstd(all_duration, axis=1),
color=COLORS[i], marker="o", markersize=3, lw=1)
plt.xlim(0.25, 16.5)
plt.ylim(0, 8.2)
plt.xticks([1,2,4,8,16])
plt.gca().spines['right'].set_visible(False)
plt.gca().spines['top'].set_visible(False)
plt.subplots_adjust(0,0,1,1)
plt.savefig(SPATH + "FigS1a.pdf")
###Output
_____no_output_____
###Markdown
Figure S1b
###Code
res = np.load(SPATH + "FigS1_dat.npz", allow_pickle=True)['results']
plt.figure(figsize=(2.5,2.5))
COLORS = [colors['bias'],colors['s1'],colors['s2']]
adjust = [-0.3, 0, 0.3]
for i, K in enumerate(num_weights):
all_mse = [i[4] for i in res if i[1]==K]
all_mse = np.array([i if i is not None else np.nan
for i in all_mse]).reshape(-1,num_simulations)
plt.errorbar(num_trials/1000 + adjust[i], np.nanmean(all_mse, axis=1),
yerr=np.nanstd(all_mse, axis=1),
color=COLORS[i], linestyle="None", marker="o", markersize=3, lw=1)
plt.xlim(0.25, 16.5); plt.ylim(0, 0.152)
plt.xticks([1,2,4,8,16]); plt.yticks([0,0.05,0.1,0.15])
plt.gca().spines['right'].set_visible(False)
plt.gca().spines['top'].set_visible(False)
plt.subplots_adjust(0,0,1,1)
plt.savefig(SPATH + "FigS1b.pdf")
###Output
_____no_output_____
###Markdown
Figure S2 | Recovering sudden changes in behavior with smooth weight trajectories**(A)** Same as (2C) but without including $\sigma_\text{day}$ in the recovery model**(B)** Same as (2D) but without including $\sigma_\text{day}$ in the recovery model Figure S2a
###Code
# Simulate
seed = 102 # paper uses 102
num_weights = 3
num_trials = 5000
hyper = {'sigma' : 2**np.array([-4.5, -5.0,-16.0]),
'sigInit' : 2**np.array([ 0.0, 0.0, 0.0]),
'sigDay' : 2**np.array([ 0.5,-16.0, 1.0])
}
days = [500]*9
# Compute
gen = psy.generateSim(K=num_weights, N=num_trials, hyper=hyper, days=days,
boundary=10.0, iterations=1, seed=seed, savePath=None)
# Recovery
gen['dayLength'] = None
rec = psy.recoverSim(gen)
# Save interim result
np.savez_compressed(SPATH+'figS2_data.npz', rec=rec, gen=gen)
# Reload data
rec = np.load(SPATH+'figS2_data.npz', allow_pickle=True)['rec'].item()
gen = np.load(SPATH+'figS2_data.npz', allow_pickle=True)['gen'].item()
# Plotting
sim_colors = [colors['bias'], colors['s1'], colors['s2']]
fig = plt.figure(figsize=(3.75,1.4))
for i, c in enumerate(sim_colors):
plt.plot(gen['W'][:,i], c=c, lw=0.5, zorder=5-i)
plt.plot(rec['wMode'][i], c=c, lw=1, linestyle='--', alpha=0.5, zorder=5-i)
plt.fill_between(np.arange(num_trials),
rec['wMode'][i] - 2 * rec['hess_info']['W_std'][i],
rec['wMode'][i] + 2 * rec['hess_info']['W_std'][i],
facecolor=c, alpha=0.2, zorder=5-i)
for i in np.cumsum(days):
plt.axvline(i, color="black", lw=0.5, alpha=0.5, zorder=0)
plt.axhline(0, color="black", linestyle="--", lw=0.5, alpha=0.5, zorder=0)
plt.xticks(1000*np.arange(0,6))
plt.gca().set_xticklabels([0,1000,2000,3000,4000,5000])
plt.yticks(np.arange(-4,5,2))
plt.xlim(0,5000); plt.ylim(-4.3,4.3)
# plt.xlabel("Trials"); plt.ylabel("Weights")
plt.gca().spines['right'].set_visible(False)
plt.gca().spines['top'].set_visible(False)
plt.subplots_adjust(0,0,1,1)
plt.savefig(SPATH + "FigS2a.pdf")
###Output
_____no_output_____
###Markdown
Figure S2b
###Code
# Reload data
rec = np.load(SPATH+'figS2_data.npz', allow_pickle=True)['rec'].item()
# Plotting
plt.figure(figsize=(1.4,1.4))
true_sigma = np.log2(rec['input']['sigma'])
avg_sigma = np.log2(rec['hyp']['sigma'])
err_sigma = rec['hess_info']['hyp_std'][:3]
for i, c in enumerate(sim_colors):
plt.plot([i-0.3, i+0.3], [true_sigma[i]]*2, color="black", linestyle="-", lw=1.2, zorder=0)
plt.errorbar([i], avg_sigma[i], yerr=1.96*err_sigma[i], c=c, lw=1, marker='o', markersize=5)
plt.axvspan(1.6,2.4, facecolor="black", edgecolor="none", alpha=0.1)
plt.xticks(np.arange(3))
# plt.yticks([-8,-6,-4,-2,0,2])
plt.gca().set_xticklabels([r"$\sigma_1$",
r"$\sigma_2$",
r"$\sigma_3$"])
plt.xlim(-0.5,2.5); plt.ylim(-7.5,-2.5)
# plt.ylabel(r"$\log_2(\sigma)$")
plt.gca().spines['right'].set_visible(False)
plt.gca().spines['top'].set_visible(False)
plt.subplots_adjust(0,0,1,1)
plt.savefig(SPATH + "FigS2b.pdf")
###Output
_____no_output_____
###Markdown
Figure S3 | Adding weights to early training sessions in IBL mice**(A)** Refit model from Figure 3b, with history regressor weights**(B)** Refit model from Figure 3b, with bias weight Figure S3a
###Code
# Collect data from manually determined training period
outData = getMouse('CSHL_003', 5)
prev_choice = np.hstack(([0], outData['y'][:-1]*2 - 3)).reshape(-1,1)
prev_answer = np.hstack(([0], outData['answer'][:-1]*2 - 3)).reshape(-1,1)
outData['inputs']['c'] = prev_choice
outData['inputs']['h'] = prev_answer
new_dat = psy.trim(outData, END=7000)
# Compute
weights = {'bias' : 0, 'cL' : 1, 'cR' : 1, 'h' : 1, 'c' : 1}
K = np.sum([weights[i] for i in weights.keys()])
hyper_guess = {
'sigma' : [2**-5]*K,
'sigInit' : 2**5,
'sigDay' : None
}
optList = ['sigma']
hyp, evd, wMode, hess_info = psy.hyperOpt(new_dat, hyper_guess, weights, optList)
dat = {'hyp' : hyp, 'evd' : evd, 'wMode' : wMode, 'W_std' : hess_info['W_std'],
'weights' : weights, 'new_dat' : new_dat}
# Save interim result
np.savez_compressed(SPATH+'figS3a_data.npz', dat=dat)
dat = np.load(SPATH+'figS3a_data.npz', allow_pickle=True)['dat'].item()
fig = psy.plot_weights(dat['wMode'], dat['weights'], days=dat['new_dat']["dayLength"],
errorbar=dat['W_std'], figsize=(2.75,1.3))
plt.axvline(np.cumsum(dat['new_dat']['dayLength'])[8], c="black", lw=1.5, ls="--", zorder=15)
plt.ylim(-5.3,5.3)
plt.xlim(0, 6950)
plt.yticks([-4,-2,0,2,4])
plt.xlabel(None); plt.ylabel(None)
plt.subplots_adjust(0,0,1,1)
plt.savefig(SPATH + "FigS3a.pdf")
###Output
_____no_output_____
###Markdown
Figure S3b
###Code
# Collect data from manually determined training period
outData = getMouse('CSHL_003', 5)
new_dat = psy.trim(outData, END=7000)
# Compute
weights = {'bias' : 1, 'cL' : 1, 'cR' : 1}
K = np.sum([weights[i] for i in weights.keys()])
hyper_guess = {
'sigma' : [2**-5]*K,
'sigInit' : 2**5,
'sigDay' : None
}
optList = ['sigma']
hyp, evd, wMode, hess_info = psy.hyperOpt(new_dat, hyper_guess, weights, optList)
dat = {'hyp' : hyp, 'evd' : evd, 'wMode' : wMode, 'W_std' : hess_info['W_std'],
'weights' : weights, 'new_dat' : new_dat}
# Save interim result
np.savez_compressed(SPATH+'figS3b_data.npz', dat=dat)
dat = np.load(SPATH+'figS3b_data.npz', allow_pickle=True)['dat'].item()
fig = psy.plot_weights(dat['wMode'], dat['weights'], days=dat['new_dat']["dayLength"],
errorbar=dat['W_std'], figsize=(2.75,1.3))
plt.axvline(np.cumsum(dat['new_dat']['dayLength'])[8], c="black", lw=1.5, ls="--", zorder=15)
plt.ylim(-5.3,5.3)
plt.xlim(0, 6950)
plt.yticks([-4,-2,0,2,4])
plt.xlabel(None); plt.ylabel(None)
plt.subplots_adjust(0,0,1,1)
plt.savefig(SPATH + "FigS3b.pdf")
###Output
_____no_output_____
###Markdown
Figure S4 | The impact of the $\tanh$ transformation of IBL contrasts on model weights**(A)** $\tanh$ tranformation on IBL contrasts**(B)** Refit model from Figure 3b, without $\tanh$ transformation Figure S4a
###Code
contrasts = [-1, -0.5, -0.25, -0.125, -0.0625, 0, 0.0625, 0.125, 0.25, 0.5, 1.0]
def tanh_transform(c, p):
return np.tanh(p*np.array(c))/np.tanh(p)
COLORS = [colors['s_avg'], colors['c'], colors['h']]
plt.figure(figsize=(2.25, 2.25))
plt.plot(contrasts, contrasts, "ko-", markersize=3, lw=1, label="Original")
for i, j in enumerate([1,3,5]):
plt.plot(contrasts, tanh_transform(contrasts, j),
"o-", markersize=3, lw=1, color=COLORS[i], label=r"$p = $" +str(j))
plt.axhline(0, color="black", linestyle="--", lw=0.5, zorder=0)#, alpha=0.5)
plt.axvline(0, color="black", linestyle="--", lw=0.5, zorder=0)#, alpha=0.5)
plt.legend(fontsize=10)
# plt.xlabel("Original Contrasts"); plt.ylabel("Transformed Contrasts")
plt.xlim(-1.05,1.05); plt.ylim(-1.05,1.05)
plt.xticks(contrasts, va="top", ha="center")
plt.yticks(contrasts, rotation=90, va="center", ha="right", ma="center")
plt.gca().set_xticklabels(["100%\nLeft",None,None,None,None,0,None,None,None,None,"100%\nRight"])
plt.gca().set_yticklabels(["100%\nLeft",None,None,None,None,0,None,None,None,None,"100%\nRight"])
plt.gca().spines['right'].set_visible(False)
plt.gca().spines['top'].set_visible(False)
plt.subplots_adjust(0,0,1,1)
plt.savefig(SPATH + "FigS4a.pdf")
###Output
_____no_output_____
###Markdown
Figure S4b
###Code
# Collect data from manually determined training period
outData = getMouse("CSHL_003", 0.00001)
_start = np.where(outData['date'] >= '2019-03-21')[0][0]
_end = np.where(outData['date'] >= '2019-03-23')[0][0]
new_dat = psy.trim(outData, START=_start, END=_end)
# Hardcode random trials where probL != 0.5 before bias blocks begin to 0.5
new_dat['probL'][:np.where(new_dat['date'] >= '2019-03-22')[0][0]] = 0.5
# Compute
weights = {'bias' : 1, 'cL' : 1, 'cR' : 1}
K = np.sum([weights[i] for i in weights.keys()])
hyper_guess = {
'sigma' : [2**-5]*K,
'sigInit' : 2**5,
'sigDay' : [2**-5]*K
}
optList = ['sigma', 'sigDay']
hyp, evd, wMode, hess_info = psy.hyperOpt(new_dat, hyper_guess, weights, optList)
dat = {'hyp' : hyp, 'evd' : evd, 'wMode' : wMode, 'W_std' : hess_info['W_std'],
'weights' : weights, 'new_dat' : new_dat}
# Save interim result
np.savez_compressed(SPATH+'figS4b_data.npz', dat=dat)
dat = np.load(SPATH+'figS4b_data.npz', allow_pickle=True)['dat'].item()
fig = psy.plot_weights(dat['wMode'], dat['weights'], days=dat['new_dat']["dayLength"],
errorbar=dat['W_std'], figsize=(3,1.5))
fig = addBiasBlocks(fig, dat['new_dat']['probL'])
plt.xlabel(None); plt.ylabel(None)
plt.gca().set_yticks(np.arange(-15,16,5))
plt.ylim(-16.3,16.3)
plt.subplots_adjust(0,0,1,1)
plt.savefig(SPATH + "FigS4b.pdf")
###Output
_____no_output_____
###Markdown
Figure S5 | Validating the model with a comparison to empirical psychometric curves**(A)** Using the mouse from (4), show the weights recovered from Session 10**(B)** As in (A), for Session 20 (now with a bias weight and bias blocks)**(C)** As in (A), for Session 40**(D)** Generate a psychometric curve from predictions derived from the model weights (in pink) and a curve caluclated from the empirical choice behavior (in black)**(E)** As in (D), for Session 20**(F)** As in (D), for Session 40 Figure S5a_(3 min)_
###Code
from datetime import date, datetime, timedelta
from scipy.stats import sem
FOLDS = 10 # number of cross-validation folds
SEED = 42 # controls random divide of trials into FOLDS bins
outData = getMouse("CSHL_003", 5)
outData['contrast'] = outData['contrastRight'] - outData['contrastLeft']
all_cs = []
all_ys = []
all_pRs = []
all_days = np.unique(outData['date'])
selected_days = [10,20,40]
for d in selected_days:
_start = np.where(outData['date'] >= all_days[d])[0][0]
_end = np.where(outData['date'] > all_days[d])[0][0] + 1
_end = _start + ((_end-_start)//FOLDS * FOLDS)
new_dat = psy.trim(outData, START=_start, END=_end)
if d < 15:
weights = {'bias' : 0, 'cL' : 1, 'cR' : 1}
else:
weights = {'bias' : 1, 'cL' : 1, 'cR' : 1}
K = np.sum([weights[i] for i in weights.keys()])
hyper_guess = {
'sigma' : [2**-5]*K,
'sigInit' : 2**5,
'sigDay' : None
}
optList = ['sigma']
_, _, wMode, hess_info = psy.hyperOpt(new_dat.copy(), hyper_guess, weights, optList)
_, xval_pL = psy.crossValidate(new_dat.copy(), hyper_guess, weights, optList,
F=FOLDS, seed=SEED)
pR = 1 - xval_pL
cs = []
ys = []
pRs = []
for c in np.unique(new_dat['contrast']):
cs += [c]
inds = (new_dat['contrast'] == c)
ys += [new_dat['y'][inds] - 1]
pRs += [pR[inds]]
all_cs += [cs]
all_ys += [ys]
all_pRs += [pRs]
# Plot weights
fig = psy.plot_weights(wMode, weights, days=None,
errorbar=hess_info['W_std'], figsize=(1.75,1.5))
plt.xlabel(None); plt.ylabel(None)
plt.gca().set_yticks(np.arange(-4,6,2))
plt.ylim(-5.3,5.3)
if d > 15:
fig = addBiasBlocks(fig, new_dat['probL'])
plt.gca().set_yticklabels([])
plt.subplots_adjust(0,0,1,1)
plt.savefig(SPATH + "FigS5abc_" + str(d) + ".pdf")
# Save interim result
np.savez_compressed(SPATH+'figS5_data.npz',
all_cs=all_cs, all_ys=all_ys, all_pRs=all_pRs,
selected_days=selected_days)
###Output
_____no_output_____
###Markdown
Figure S5d-f
###Code
all_cs = np.load(SPATH+'figS5_data.npz', allow_pickle=True)['all_cs']
all_ys = np.load(SPATH+'figS5_data.npz', allow_pickle=True)['all_ys']
all_pRs = np.load(SPATH+'figS5_data.npz', allow_pickle=True)['all_pRs']
selected_days = np.load(SPATH+'figS5_data.npz', allow_pickle=True)['selected_days']
# Plotting
diff = 0.01
for d, ind in enumerate(selected_days):
plt.figure(figsize=(1.75,1.5))
avg = [np.average(i) for i in all_ys[d]]
std = [sem(i) for i in all_ys[d]]
plt.errorbar(np.array(all_cs[d])-diff, avg, yerr=std, color="black",
alpha=1.0, ls="-", lw=0.4, marker='_', markersize=3, elinewidth=1.3)
avg_pR = [np.average(i) for i in all_pRs[d]]
std_pR = [sem(i) for i in all_ys[d]]
plt.errorbar(np.array(all_cs[d])+diff, avg_pR, yerr=std_pR, color=colors['emp_perf'],
alpha=1.0, ls="none", marker='_', markersize=3, elinewidth=1.3)
plt.ylim(-0.01,1.01)
plt.xlim(-1-4*diff,1+4*diff)
plt.axvline(0, linestyle='--', color="black", lw=0.5, alpha=0.5, zorder=1)
plt.gca().spines['right'].set_visible(False)
plt.gca().spines['top'].set_visible(False)
plt.gca().set_yticks([0,0.5,1])
if not d:
plt.gca().set_yticklabels([0,None,1])
else:
plt.gca().set_yticklabels([])
plt.gca().set_xticks([-1,-0.5,-0.25,-.125,-.0625,0,0.0625,0.125,0.25,0.5,1])
plt.gca().set_xticklabels([])
# plt.title(d)
plt.subplots_adjust(0,0,1,1)
plt.savefig(SPATH + "FigS5def_" + str(d) + ".pdf")
###Output
_____no_output_____
###Markdown
Figure S6 | Allowing the bias weight to reset between bias blocks with $\sigma_\text{day}$Replica of Figure 4, except bias block boundaries are treated as session boundaries and $\sigma_\text{day}$ is fixed to a large value, allowing for a "reset" of the bias weight between bias blocks Figure S6b
###Code
# Collect data from manually determined training period
outData = getMouse("CSHL_003", 5)
_start = np.where(outData['date'] >= '2019-03-21')[0][0]
_end = np.where(outData['date'] >= '2019-03-23')[0][0]
new_dat = psy.trim(outData, START=_start, END=_end)
# Hardcode random trials where probL != 0.5 before bias blocks begin to 0.5
# (fyi, this is due to anti-biasing in the IBL early training protocol)
new_dat['probL'][:np.where(new_dat['date'] >= '2019-03-22')[0][0]] = 0.5
probL_bound = np.where(new_dat['probL'][1:] - new_dat['probL'][:-1] != 0)[0] + 1
old_dayLength = new_dat['dayLength']
new_dat['dayLength'] = np.hstack((probL_bound[:1], np.diff(probL_bound)))
# Compute
weights = {'bias' : 1, 'cL' : 1, 'cR' : 1}
K = np.sum([weights[i] for i in weights.keys()])
hyper_guess = {
'sigma' : [2**-5]*K,
'sigInit' : 2**5,
'sigDay' : [2**5, 2**-5., 2**-5.]
}
optList = ['sigma']#, 'sigDay']
hyp, evd, wMode, hess_info = psy.hyperOpt(new_dat, hyper_guess, weights, optList)
dat = {'hyp' : hyp, 'evd' : evd, 'wMode' : wMode, 'W_std' : hess_info['W_std'],
'weights' : weights, 'new_dat' : new_dat, 'old_dayLength': old_dayLength}
# Save interim result
np.savez_compressed(SPATH+'figS6b_data.npz', dat=dat)
BIAS_COLORS = {50 : 'None', 20 : psy.COLORS['sR'], 80 : psy.COLORS['sL']}
def addBiasBlocks(fig, pL):
plt.sca(fig.gca())
i = 0
while i < len(pL):
start = i
while i+1 < len(pL) and np.linalg.norm(pL[i] - pL[i+1]) < 0.0001:
i += 1
fc = BIAS_COLORS[int(100 * pL[start])]
plt.axvspan(start, i+1, facecolor=fc, alpha=0.2, edgecolor=None)
i += 1
return fig
dat = np.load(SPATH+'figS6b_data.npz', allow_pickle=True)['dat'].item()
fig = psy.plot_weights(dat['wMode'], dat['weights'], days=dat["old_dayLength"],
errorbar=dat['W_std'], figsize=(2.75,1.3))
fig = addBiasBlocks(fig, dat['new_dat']['probL'])
plt.xlabel(None); plt.ylabel(None)
plt.gca().set_yticks(np.arange(-6, 7,2))
plt.ylim(-5.3,5.3)
plt.subplots_adjust(0,0,1,1)
plt.savefig(SPATH + "FigS6b.pdf")
###Output
_____no_output_____
###Markdown
Figure S6c
###Code
# Collect data from manually determined training period
outData = getMouse("CSHL_003", 5)
_start = np.where(outData['date'] >= '2019-04-30')[0][0]
_end = np.where(outData['date'] >= '2019-05-02')[0][0]
new_dat = psy.trim(outData, START=_start, END=_end)
# Compute
probL_bound = np.where(new_dat['probL'][1:] - new_dat['probL'][:-1] != 0)[0] + 1
old_dayLength = new_dat['dayLength']
new_dat['dayLength'] = np.hstack((probL_bound[:1], np.diff(probL_bound)))
# Compute
weights = {'bias' : 1, 'cL' : 1, 'cR' : 1}
K = np.sum([weights[i] for i in weights.keys()])
hyper_guess = {
'sigma' : [2**-5]*K,
'sigInit' : 2**5,
'sigDay' : [2**5, 2**-5., 2**-5.]
}
optList = ['sigma']#, 'sigDay']
hyp, evd, wMode, hess_info = psy.hyperOpt(new_dat, hyper_guess, weights, optList)
dat = {'hyp' : hyp, 'evd' : evd, 'wMode' : wMode, 'W_std' : hess_info['W_std'],
'weights' : weights, 'new_dat' : new_dat, 'old_dayLength': old_dayLength}
# Save interim result
np.savez_compressed(SPATH+'figS6c_data.npz', dat=dat)
dat = np.load(SPATH+'figS6c_data.npz', allow_pickle=True)['dat'].item()
fig = psy.plot_weights(dat['wMode'], dat['weights'], days=dat["old_dayLength"],
errorbar=dat['W_std'], figsize=(2.75,1.3))
fig = addBiasBlocks(fig, dat['new_dat']['probL'])
plt.xlabel(None); plt.ylabel(None)
plt.gca().set_yticks(np.arange(-6, 7,2))
plt.gca().set_yticklabels([])
plt.ylim(-5.3,5.3)
plt.subplots_adjust(0,0,1,1)
plt.savefig(SPATH + "FigS6c.pdf")
###Output
_____no_output_____
###Markdown
Figure S6d
###Code
outData = getMouse("CSHL_003", 5)
# Collect data from manually determined training period
_start = np.where(outData['date'] >= '2019-03-22')[0][0]
_end = np.where(outData['date'] >= '2019-03-26')[0][0]
new_dat = psy.trim(outData, START=_start, END=_end)
# Hardcode random trials where probL != 0.5 before bias begins to 0.5
# (fyi, this is due to anti-biasing in the IBL early training protocol)
new_dat['probL'][:np.where(new_dat['date'] >= '2019-03-22')[0][0]] = 0.5
probL_bound = np.where(new_dat['probL'][1:] - new_dat['probL'][:-1] != 0)[0] + 1
old_dayLength = new_dat['dayLength']
new_dat['dayLength'] = np.hstack((probL_bound[:1], np.diff(probL_bound)))
# Compute
weights = {'bias' : 1, 'cL' : 1, 'cR' : 1}
K = np.sum([weights[i] for i in weights.keys()])
hyper_guess = {
'sigma' : [2**-5]*K,
'sigInit' : 2**5,
'sigDay' : [2**5, 2**-5., 2**-5.]
}
optList = ['sigma']#, 'sigDay']
hyp, evd, wMode, hess_info = psy.hyperOpt(new_dat, hyper_guess, weights, optList)
dat = {'hyp' : hyp, 'evd' : evd, 'wMode' : wMode, 'W_std' : hess_info['W_std'],
'weights' : weights, 'new_dat' : new_dat, 'old_dayLength': old_dayLength}
# Save interim result
np.savez_compressed(SPATH+'figS6d_data.npz', dat=dat)
def bias_diff(dat_load, figsize=(1.5,1.5)):
dat = np.load(dat_load, allow_pickle=True)['dat'].item()
pL = dat['new_dat']['probL']
pL_diff = pL[1:] - pL[:-1]
inds = np.where(pL_diff)[0]
start_inds = [0] + list(inds+1)
start_inds = [i for i in start_inds if (np.isclose(pL[i], 0.2) or np.isclose(pL[i], 0.8))]
end_inds = list(inds) + [len(pL)-1]
end_inds = [i for i in end_inds if (np.isclose(pL[i], 0.2) or np.isclose(pL[i], 0.8))]
fig = plt.figure(figsize=figsize)
for s, e in zip(start_inds, end_inds):
if e-s < 20: continue
block_inds = np.arange(s, e+1)
block = dat['wMode'][0, block_inds] - dat['wMode'][0, s]
if np.isclose(pL[s], 0.2):
plt.plot(block, color=colors['cR'], alpha=0.8, zorder=2, lw=1)
else:
plt.plot(block, color=colors['cL'], alpha=0.8, zorder=4, lw=1)
plt.axhline(0, linestyle='--', color="black", lw=1, alpha=0.5, zorder=0)
plt.ylim(-5.5,5.5)
plt.xlim(0, 75)
plt.gca().spines['right'].set_visible(False)
plt.gca().spines['top'].set_visible(False)
plt.subplots_adjust(0,0,1,1)
return fig
fig = bias_diff(SPATH+'figS6d_data.npz', figsize=(1.3,1.3));
plt.gca().set_yticks([-4,-2,0,2,4])
plt.savefig(SPATH + "FigS6d.pdf")
###Output
_____no_output_____
###Markdown
Figure S6e
###Code
outData = getMouse("CSHL_003", 5)
# Collect data from manually determined training period
_start = np.where(outData['date'] >= '2019-04-30')[0][0]
_end = np.where(outData['date'] >= '2019-05-03')[0][0]
new_dat = psy.trim(outData, START=_start, END=_end)
# Compute
probL_bound = np.where(new_dat['probL'][1:] - new_dat['probL'][:-1] != 0)[0] + 1
old_dayLength = new_dat['dayLength']
new_dat['dayLength'] = np.hstack((probL_bound[:1], np.diff(probL_bound)))
weights = {'bias' : 1, 'cL' : 1, 'cR' : 1}
K = np.sum([weights[i] for i in weights.keys()])
hyper_guess = {
'sigma' : [2**-5]*K,
'sigInit' : 2**5,
'sigDay' : [2**5, 2**-5., 2**-5.]
}
optList = ['sigma']#, 'sigDay']
hyp, evd, wMode, hess_info = psy.hyperOpt(new_dat, hyper_guess, weights, optList)
dat = {'hyp' : hyp, 'evd' : evd, 'wMode' : wMode, 'W_std' : hess_info['W_std'],
'weights' : weights, 'new_dat' : new_dat, 'old_dayLength': old_dayLength}
# Save interim result
np.savez_compressed(SPATH+'figS6e_data.npz', dat=dat)
fig = bias_diff(SPATH+'figS6e_data.npz', figsize=(1.3,1.3));
plt.gca().set_yticks([-4,-2,0,2,4])
plt.gca().set_yticklabels([])
plt.savefig(SPATH + "FigS6e.pdf")
###Output
_____no_output_____
###Markdown
Figure S6f
###Code
def max_bias(bias, side, wL, wR):
contrasts = np.array([-1., -0.25, -0.125, -0.0625, 0., 0.0625, 0.125, 0.25, 1.])
p=5
transformed_con = np.tanh(p*np.abs(contrasts))/np.tanh(p)
p_biasL = [.8/4.5]*4 + [1/9] + [.2/4.5]*4
p_biasR = [.2/4.5]*4 + [1/9] + [.8/4.5]*4
p_biasM = [1/9]*9
w = [wL]*4 + [0] + [wR]*4
correct = [0]*4 + [0] + [1]*4
pL = 1 - (1/(1+np.exp(-(transformed_con*w + bias))))
pCorrect = np.abs(correct - pL)
if side=="L":
pCorrect[4] = pL[4]*0.8 + (1-pL[4])*0.2
expval = np.sum(p_biasL * pCorrect)
elif side=="R":
pCorrect[4] = pL[4]*0.2 + (1-pL[4])*0.8
expval = np.sum(p_biasR * pCorrect)
elif side=="M":
pCorrect[4] = 0.5
expval = np.sum(p_biasM * pCorrect)
return -expval
from scipy.optimize import minimize
dat = np.load(SPATH+'figS6c_data.npz', allow_pickle=True)['dat'].item()
start = dat['old_dayLength'][0]
optBias = []
optReward = []
for i in np.arange(start, dat['wMode'].shape[1]):
if dat['new_dat']['probL'][i] < 0.21: side = 'R'
elif dat['new_dat']['probL'][i] > 0.79: side = 'L'
else: side = 'M'
res = minimize(max_bias,[0], args=(side, dat['wMode'][1,i], dat['wMode'][2,i]))
optBias += [res.x]
optReward += [-res.fun]
print("Avg. Reward:", np.mean(optReward))
fig = psy.plot_weights(dat['wMode'], dat['weights'], days=dat["old_dayLength"],
errorbar=dat['W_std'], figsize=(2.75,1.3))
fig = addBiasBlocks(fig, dat['new_dat']['probL'])
plt.plot(np.arange(start, dat['wMode'].shape[1]), optBias, 'k-', lw=2, zorder=10)
plt.gca().set_yticks(np.arange(-6, 7,2))
plt.gca().set_yticklabels([])
plt.gca().set_xticks([750, 1000, 1250])
plt.xlim(start, None); plt.ylim(-5.3,5.3)
plt.xlabel(None); plt.ylabel(None)
plt.subplots_adjust(0,0,1,1)
plt.savefig(SPATH + "FigS6f.pdf")
# Actual predicted reward using actual bias weight
from scipy.optimize import minimize
optReward_pred = []
optReward_0bias = []
for i in np.arange(start, dat['wMode'].shape[1]):
if dat['new_dat']['probL'][i] < 0.21: side = 'R'
elif dat['new_dat']['probL'][i] > 0.79: side = 'L'
else: side = 'M'
optReward_pred += [-max_bias(dat['wMode'][0,i], side, dat['wMode'][1,i], dat['wMode'][2,i])]
optReward_0bias += [-max_bias(0.0, side, dat['wMode'][1,i], dat['wMode'][2,i])]
print("Predicted Avg. Reward:", np.mean(optReward_pred))
print("No Bias Avg. Reward:", np.mean(optReward_0bias))
print("Empirical Avg. Reward:", np.mean(dat['new_dat']['correct'][start:]))
###Output
_____no_output_____
###Markdown
Figure S7 | Example Akrami rat without history regressorsReplica of Figure 5, except history regressors are not included in the model. Figure S7b_15 min_
###Code
outData = getRat("W080")
new_dat = psy.trim(outData, START=0, END=12500)
weights = {'bias': 1, 's_a': 1, 's_b': 1, 'h': 0, 'c': 0, "s_avg": 0}
K = np.sum([weights[i] for i in weights.keys()])
hyper_guess = {
'sigma' : [2**-5]*K,
'sigInit' : 2**5,
'sigDay' : [2**-4]*K,
}
optList = ['sigma', 'sigDay']
hyp, evd, wMode, hess_info = psy.hyperOpt(new_dat, hyper_guess, weights, optList)
dat = {'hyp' : hyp, 'evd' : evd, 'wMode' : wMode, 'W_std' : hess_info['W_std'],
'weights' : weights, 'new_dat' : new_dat}
# Save interim result
np.savez_compressed(SPATH+'figS7b_data.npz', dat=dat)
dat = np.load(SPATH+'figS7b_data.npz', allow_pickle=True)['dat'].item()
fig = psy.plot_weights(dat['wMode'], dat['weights'], days=dat['new_dat']["dayLength"],
errorbar=dat['W_std'], figsize=(4.75,1.4))
selected_days = [[2000,2500], [6500,7000], [11000,11500]]
for d in selected_days:
plt.plot(d, [-1.3]*2, lw=2, color="k")
plt.xlabel(None); plt.ylabel(None)
plt.ylim(-1.45,1.45)
plt.subplots_adjust(0,0,1,1)
plt.savefig(SPATH + "FigS7b.pdf")
###Output
_____no_output_____
###Markdown
Fig S7c-eThese subfigures reuse data from Figure 8a, please go run the cell above that creates the file `fig8a_data.npz` to produce Figures S7c-e.
###Code
from datetime import date, datetime, timedelta
from scipy.stats import sem
outData = np.load(SPATH+'fig8a_data.npz', allow_pickle=True)['new_dat'].item()
xval_pL = np.load(SPATH+'fig8a_data.npz', allow_pickle=True)['xval_pL']
outData['xval_pR'] = 1 - xval_pL
all_hists = []
all_ys = []
all_pRs = []
selected_days = [[2000,2500], [6500,7000], [11000,11500]]
for d in selected_days:
new_dat = psy.trim(outData, START=d[0], END=d[1])
hists = []
ys = []
pRs = []
for h in [-1,1]:
for c in [-1,1]:
for a in [-1,1]:
ind_h = (new_dat['inputs']['h'][:,0] == h)
ind_c = (new_dat['inputs']['c'][:,0] == c)
ind_a = (np.sign(new_dat['s_a'] - new_dat['s_b']) == a)
inds = ind_h * ind_c * ind_a
hists += [[h,c,a]]
ys += [new_dat['y'][inds]]
pRs += [new_dat['xval_pR'][inds]]
all_hists += [hists]
all_ys += [ys]
all_pRs += [pRs]
import matplotlib as mpl
def colorFader(c1,c2,mix=0):
c1=np.array(mpl.colors.to_rgb(c1))
c2=np.array(mpl.colors.to_rgb(c2))
w =np.array(mpl.colors.to_rgb("white"))
if mix <= 0.5:
return mpl.colors.to_hex((1-mix*2)*c1 + mix*2*w)
else:
return mpl.colors.to_hex((1-(mix-0.5)*2)*w + (mix-0.5)*2*c2)
def cF(mix):
return colorFader(colors['s2'],colors['s1'],mix)
diff = 0.19
rad = 0.45
cm = plt.get_cmap('RdBu_r')
for d in range(len(selected_days)):
plt.figure(figsize=(0.75,1.5))
avg = [np.average(i) for i in all_ys[d]]
avg_pR = [np.average(i) for i in all_pRs[d]]
for i in range(len(avg)):
h = all_hists[d][i][0]
c = all_hists[d][i][1]
a = all_hists[d][i][2]
x = a/2
y = h + c/2
plt.text(x-diff, y+diff, int(np.round(avg_pR[i]*100)),
ha="center", va="center", fontsize=10, zorder=i+1)
t1 = plt.Polygon([[x-rad,y-rad],[x-rad,y+rad],[x+rad,y+rad]],
facecolor=cF(avg_pR[i]), edgecolor="k", lw=0, zorder=i)
plt.gca().add_patch(t1)
plt.text(x+diff, y-1.5*diff, int(np.round(avg[i]*100)),
ha="center", va="center", fontsize=10, zorder = i+11)
t2 = plt.Polygon([[x-rad,y-rad],[x+rad,y-rad],[x+rad,y+rad]],
facecolor=cF(avg[i]), edgecolor="k", lw=0.5, zorder = i+10)
plt.gca().add_patch(t2)
plt.ylim(-2,2)
plt.xlim(-1,1)
plt.gca().spines['right'].set_visible(False)
plt.gca().spines['top'].set_visible(False)
plt.gca().spines['left'].set_visible(False)
plt.gca().spines['bottom'].set_visible(False)
plt.gca().set_xticks([])
plt.gca().set_yticks([])
plt.subplots_adjust(0,0,1,1)
plt.savefig(SPATH + "FigS7cde_" + str(d) + ".pdf")
###Output
_____no_output_____
###Markdown
Figure S8 | Modeling the Akrami human subjects with the Previous Choice and Previous Answer weights**(A)** Refit model from Figure 6b, with history regressor weights**(B)** Refit models from Figure 6c, with history regressor weights (showing only those weights) Figure S8a
###Code
new_dat = getHuman(6)
prev_choice = np.hstack(([0], new_dat['y'][:-1]*2 - 1)).reshape(-1,1)
prev_answer = np.hstack(([0], new_dat['answer'][:-1]*2 - 1)).reshape(-1,1)
new_dat['inputs']['c'] = prev_choice
new_dat['inputs']['h'] = prev_answer
# Compute
weights = {'bias': 1, 's_a': 1, 's_b': 1, 's_avg': 1, 'h': 1, 'c': 1}
K = np.sum([weights[i] for i in weights.keys()])
hyper_guess = {
'sigma' : [2**-5]*K,
'sigInit' : 2**5,
'sigDay' : None
}
optList = ['sigma']
hyp, evd, wMode, hess_info = psy.hyperOpt(new_dat, hyper_guess, weights, optList)
dat = {'hyp' : hyp, 'evd' : evd, 'wMode' : wMode, 'W_std' : hess_info['W_std'],
'weights' : weights, 'new_dat' : new_dat}
# Save interim result
np.savez_compressed(SPATH+'figS8a_data.npz', dat=dat)
dat = np.load(SPATH+'figS8a_data.npz', allow_pickle=True)['dat'].item()
fig = psy.plot_weights(dat['wMode'], dat['weights'], errorbar=dat['W_std'], figsize=(4.75,1.4))
plt.xlabel(None); plt.ylabel(None)
plt.gca().set_xticks([0,500,1000,1500,2000])
plt.gca().set_yticks(np.arange(-2, 3,2))
plt.xlim(0, 1900); plt.ylim(-3.4, 3.4)
plt.subplots_adjust(0,0,1,1)
plt.savefig(SPATH + "FigS8a.pdf")
###Output
_____no_output_____
###Markdown
Figure S8b_6 min_
###Code
all_dat = []
all_subjects = HUMAN_DF["subject_id"].unique()
for i, subject in enumerate(all_subjects):
print("\rProcessing " + str(i+1) + " of " + str(len(all_subjects)), end="")
new_dat = getHuman(subject)
prev_choice = np.hstack(([0], new_dat['y'][:-1]*2 - 1)).reshape(-1,1)
prev_answer = np.hstack(([0], new_dat['answer'][:-1]*2 - 1)).reshape(-1,1)
new_dat['inputs']['c'] = prev_choice
new_dat['inputs']['h'] = prev_answer
# Compute
weights = {'bias': 1, 's_a': 1, 's_b': 1, 's_avg': 1, 'h': 1, 'c': 1}
K = np.sum([weights[i] for i in weights.keys()])
hyper_guess = {
'sigma' : [2**-5]*K,
'sigInit' : 2**5,
'sigDay' : None
}
optList = ['sigma']
hyp, evd, wMode, hess_info = psy.hyperOpt(new_dat, hyper_guess, weights, optList)
dat = {'hyp' : hyp, 'evd' : evd, 'wMode' : wMode, 'W_std' : hess_info['W_std'],
'weights' : weights, 'new_dat' : new_dat}
all_dat += [dat]
# Save interim result
np.savez_compressed(SPATH+'figS8b_data.npz', all_dat=all_dat)
all_dat = np.load(SPATH+'figS8b_data.npz', allow_pickle=True)['all_dat']
plt.figure(figsize=(4.75,1.4))
for dat in all_dat:
weights = dat['weights']
wMode = dat['wMode']
labels = []
for j in sorted(weights.keys()):
labels += [j]*weights[j]
for i, w in enumerate(labels):
if w in ['h', 'c']:
plt.plot(wMode[i], lw=1.5, alpha=0.5, linestyle='-', c=colors[w], zorder=zorder[w])
plt.axhline(0, color="black", linestyle="--", lw=1, alpha=0.5, zorder=0)
plt.gca().set_xticks([0,500,1000,1500,2000])
plt.gca().set_yticks(np.arange(-2, 3,2))
plt.xlim(0, 1900); plt.ylim(-3.4, 3.4)
plt.gca().spines['right'].set_visible(False)
plt.gca().spines['top'].set_visible(False)
plt.subplots_adjust(0,0,1,1)
plt.savefig(SPATH + "FigS8b.pdf")
###Output
_____no_output_____
###Markdown
--- Notebook Versioning **1.1.0** : (November 23, 2020) update following _Neuron_ reviewer feedback - add/replace Figure 5C-E - add Figure 6F - add Figures S2, S5, S6, & S7**1.0.0** : (May 21, 2020) original release Download All Figures
###Code
!zip -r "all_figures.zip" . -i "{SPATH}*.pdf"
import time; time.sleep(10)
from google.colab import files
files.download("all_figures.zip")
###Output
_____no_output_____ |
estudo/exercicios-ML/kaggle-project-classification-gender.ipynb | ###Markdown
Algoritmo Naive Bayes
###Code
naive_gender = GaussianNB()
naive_gender.fit(x_train, y_train)
previsoes = naive_gender.predict(x_test)
previsoes
accuracy_score(y_teste, previsoes)
###Output
_____no_output_____
###Markdown
Algoritmo Decision Tree
###Code
arvore_gender = DecisionTreeClassifier(criterion='entropy', random_state=0)
arvore_gender.fit(x_train, y_train)
previsoes2 = arvore_gender.predict(x_test)
previsoes2
accuracy_score(y_teste, previsoes2)
###Output
_____no_output_____
###Markdown
Algoritmo Random Forest
###Code
random_forest_gender = RandomForestClassifier(n_estimators=100, criterion='entropy', random_state=0)
random_forest_gender.fit(x_train, y_train)
previsoes3 = random_forest_gender.predict(x_test)
previsoes3
accuracy_score(y_teste, previsoes3)
###Output
_____no_output_____
###Markdown
Algoritmo KNN
###Code
knn_gender = KNeighborsClassifier(n_neighbors=10)
knn_gender.fit(x_train, y_train)
previsoes4 = knn_gender.predict(x_test)
previsoes4
accuracy_score(y_teste, previsoes4)
###Output
_____no_output_____ |
Chapter3_Part1_Data Modeling.ipynb | ###Markdown
第三章 模型搭建和评估--建模 经过前面的两章的知识点的学习,我们可以对数据本身进行处理,比如数据本身的增删查补,还可以做必要的清洗工作。下面我们就要开始使用我们前面处理好的数据了。 这一章我们要做的就是使用数据,我们做数据分析的目的也就是,运用我们的数据以及结合具体业务来得到某些我们需要知道的结果。分析的第一步就是建模,搭建一个预测模型或者其他模型;从这个模型的到结果之后,我们需要分析模型是不是足够的可靠,这时需要评估这个模型。本节我们学习建模,下一节我们学习评估。 我们拥有泰坦尼克号的数据集,那这次的我们数据分析的目的是:泰坦尼克号乘客存活预测。
###Code
import numpy as np
import pandas as pd
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
from IPython.display import Image #To work with images (JPEG, PNG) use the Image class
# plt的一些设置
plt.rcParams['font.sans-serif'] = ['SimHei'] # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号
plt.rcParams['figure.figsize'] = (10, 6) # 设置输出图片大小
###Output
_____no_output_____
###Markdown
载入我们提供清洗之后的数据(clear_data.csv),大家也将原始数据载入(train.csv),说说他们有什么不同 Solution: 最大的一个不同是,clear_data中没有'Survived'column数据
###Code
# 读取titanic数据集中的training data
train = pd.read_csv('titanic/train.csv')
train.shape
train.info()
# 读取清洗过的数据集
data = pd.read_csv('clear_data.csv')
data.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 891 entries, 0 to 890
Data columns (total 11 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 PassengerId 891 non-null int64
1 Pclass 891 non-null int64
2 Age 891 non-null float64
3 SibSp 891 non-null int64
4 Parch 891 non-null int64
5 Fare 891 non-null float64
6 Sex_female 891 non-null int64
7 Sex_male 891 non-null int64
8 Embarked_C 891 non-null int64
9 Embarked_Q 891 non-null int64
10 Embarked_S 891 non-null int64
dtypes: float64(2), int64(9)
memory usage: 76.7 KB
###Markdown
模型搭建 * 经过之前一系列的数据处理操作,我们得到了可以用于模型输入的数据,即建模数据,下一步是需要做的事选择合适的模型* 在进行模型选择之前我们需要先知道数据集最终是进行**监督学习**还是**无监督学习*** 模型的选择一方面是通过我们的任务来决定的,另一方面还可以根据**数据样本量**以及**特征的稀疏性**来决定* 刚开始我们总是先尝试使用一个基本的模型来作为其baseline,进而再训练其他模型做对比,最终选择泛化能力或性能比较好的模型
###Code
# sklearn 模型算法选择路径图
Image('sklearn.png')
###Output
_____no_output_____
###Markdown
任务一:切割训练集和测试集 这里使用留出法划分数据集* 将数据集分为自变量和因变量* 按比例切割训练集和测试集(一般测试集的比例有30%、25%、20%、15%和10%)* 使用分层抽样* 设置随机种子以便结果能复现 【思考】* 划分数据集的方法有哪些?* 为什么使用分层抽样,这样的好处有什么? **任务提示1*** 切割数据集是为了后续能评估模型泛化能力* sklearn中切割数据集的方法为train_test_split* 查看函数文档可以在jupyter notebook里面使用train_test_split?后回车即可看到* 分层和随机种子在参数里寻找 要从clear_data.csv和train.csv中提取train_test_split()所需的参数
###Code
from sklearn.model_selection import train_test_split
# 一般先取出X和y后再切割,有些情况会使用到未切割的,这时候X和y就可以用
# X是清洗好的数据;y是我们要预测的存活数据'Survived'
X = data
y = train['Survived']
# 若未指定train_size,test_size,则默认test_size为0.25
# stratify parameter will preserve the proportion of target as in original dataset, in the train and test datasets as well.
X_train, X_test, y_train, y_test = train_test_split(X,y,stratify=y,random_state=0)
# 查看数据形状
X_train.shape, X_test.shape
###Output
_____no_output_____
###Markdown
【思考】* 什么情况下切割数据集的时候不用进行随机选取 Solution: 一些情况下,需要在保持随机选取的数据保持一致,可以通过设置random_state为同样的值来实现 任务二:模型创建* 创建基于线性模型的分类模型(逻辑回归)* 创建基于树的分类模型(决策树、随机森林)* 分别使用这些模型进行训练,分别的到训练集和测试集的得分* 查看模型的参数,并更改参数值,观察模型变化 **提示2*** 逻辑回归不是回归模型而是分类模型,不要与`LinearRegression`混淆* 随机森林其实是决策树集成为了降低决策树过拟合的情况* 线性模型所在的模块为`sklearn.linear_model`* 树模型所在的模块为`sklearn.ensemble`
###Code
# 分别加载LogisticRegression和RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
###Output
_____no_output_____
###Markdown
**逻辑回归模型**
###Code
# 逻辑回归模型,基本采用默认参数
lr = LogisticRegression(max_iter=1000)
lr.fit(X_train, y_train)
print('Training set score: {:.2f}'.format(lr.score(X_train, y_train)))
print('Testing set score: {:.2f}'.format(lr.score(X_test, y_test)))
# 调整参数后的逻辑回归模型
lr2 = LogisticRegression(C=100,max_iter=1000) # 调整参数C的值,这里仅作为对比实验
lr2.fit(X_train, y_train)
print('Training set score: {:.2f}'.format(lr2.score(X_train, y_train)))
print('Testing set score: {:.2f}'.format(lr2.score(X_test, y_test)))
###Output
Training set score: 0.80
Testing set score: 0.78
###Markdown
**随机森林模型**
###Code
# 随机森林分类模型,采用默认参数
rfc = RandomForestClassifier()
rfc.fit(X_train, y_train)
print('Training set score:{:.2f}'.format(rfc.score(X_train,y_train)))
print('Testing set score:{:.2f}'.format(rfc.score(X_train, y_train)))
# 调整参数后的随机森林分类模型
rfc2 = RandomForestClassifier(n_estimators=100, max_depth=5)
rfc2.fit(X_train, y_train)
print('Training set score:{:.2f}'.format(rfc2.score(X_train,y_train)))
print('Testing set score:{:.2f}'.format(rfc2.score(X_train, y_train)))
###Output
Training set score:0.87
Testing set score:0.87
###Markdown
【思考】* 为什么线性模型可以进行分类任务,背后是怎么的数学关系 solution: sigmoid function* 对于多分类问题,线性模型是怎么进行分类的 任务三:输出模型预测结果* 输出模型预测分类标签* 输出不同分类标签的预测概率 **提示3*** 一般监督模型在sklearn里面有个`predict`能输出预测标签,`predict_proba`则可以输出标签概率
###Code
# 预测标签
pred = lr.predict(X_train)
pred[:10]
# 预测标签概率
pred_proba = lr.predict_proba(X_train)
pred_proba[:10]
###Output
_____no_output_____ |
deprecated/Lending Club Modeling.ipynb | ###Markdown
###Code
# import necessary packages
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import gc
from sklearn.metrics import auc, roc_curve, log_loss
from sklearn.preprocessing import LabelEncoder
from sklearn.utils import shuffle
from imblearn.over_sampling import RandomOverSampler
import h2o
from h2o.frame import H2OFrame
from h2o.estimators.glm import H2OGeneralizedLinearEstimator
from h2o.estimators.random_forest import H2ORandomForestEstimator
from h2o.grid.grid_search import H2OGridSearch
from keras import models, layers
%matplotlib inline
###Output
//anaconda/lib/python3.5/site-packages/h5py/__init__.py:34: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
from ._conv import register_converters as _register_converters
Using TensorFlow backend.
//anaconda/lib/python3.5/importlib/_bootstrap.py:222: RuntimeWarning: compiletime version 3.6 of module 'tensorflow.python.framework.fast_tensor_util' does not match runtime version 3.5
return f(*args, **kwds)
###Markdown
Lending Club Loan Data Analysis and Modeling Classification is one of two most common data science problems (another one is regression). For the supervised classification problem, imbalanced data is pretty common yet very challanging. For example, credit card fraud detection, disease classification, network intrusion and so on, are classification problem with imbalanced data. In this project, working with the Lending Club loan data, we hope to correctlly predict whether or not on loan will be default using the history data. Contents1. Problem Statment2. Data Exploration3. Data Cleaning and Initial Feature Engineering - Feature transformation - Missing values - Feature transformation - Feature normalization - And so on ......4. Visualization5. Further Feature Engineering6. Machine Learning - Logistic Regression - Random Forest - Deep Learning7. Conclusions 1. Problem Statment For companies like Lending Club, correctlly predicting whether or not one loan will be default is very important. In this project, using the historical data, more specifically, the Lending Club loan data from 2007 to 2015, we hope to build a machine learning model such that we can predict the chance of default for the future loans. As I will show later, this dataset is highly imbalanced and includes a lot of features, which makes this problem more challanging. 2. Data Exploration There are several ways to download the dataset, for example, you can go to Lending Club's [website](https://www.lendingclub.com/info/download-data.action), or you can go to [Kaggle](https://www.kaggle.com/wendykan/lending-club-loan-data).I will use the loan data from 2007 to 2015 as the training set (+ validation set), and use the data from 2016 as the test set.
###Code
# Load the training and test data set
train = pd.read_csv('./data/2007-2015-loan.csv', low_memory=False, encoding='ISO-8859-1')
test = pd.read_csv('./data/2016-loan.csv', low_memory=False, encoding='ISO-8859-1')
# There are 74 features in total
print('Train:\t', train.shape)
print('Test:\t', test.shape)
train.head()
test.head()
# Create a concise table for the data
train_dtype = train.dtypes.values
train_missing = train.isnull().sum().values
train_missing_ratio = train_missing / len(train)
test_dtype = test.dtypes.values
test_missing = test.isnull().sum().values
test_missing_ratio = test_missing / len(test)
# Calculate unique values
train_unique = []
test_unique = []
for name in train.columns:
train_unique.append(train[name].unique().shape[0])
test_unique.append(test[name].unique().shape[0])
# Useful information
tmp_map = {'Columns': train.columns, 'Train dtype': train_dtype, 'Train missing': train_missing,
'Train missing ratio': train_missing_ratio, 'Train unique': train_unique, 'Test dtype': test_dtype,
'Test missing': test_missing, 'Test missing ratio': test_missing_ratio, 'Test unique': test_unique}
columns = ['Columns', 'Train dtype', 'Train missing', 'Train missing ratio', 'Train unique',
'Test dtype', 'Test missing', 'Test missing ratio', 'Test unique']
df = pd.DataFrame(tmp_map, columns=columns)
pd.options.display.max_rows = 75
df
###Output
_____no_output_____
###Markdown
We should notice some differences between the training and test set, and look into details. Some major difference are:1. For test set, id, member_id, and url are totally missing, which is different from training set2. For training set, open_acc_6m, open_il_6m, open_il_12m, open_il_24m, mths_since_rcnt_il, total_bal_il, il_util, open_rv_12m, open_rv_24m, max_bal_bc, all_util, inq_fi, total_cu_tl, and inq_last_12m are almost missing in training set, which is different from test set3. desc, mths_since_last_delinq, mths_since_last_record, mths_since_last_major_derog, annual_inc_joint, dti_joint, and verification_status_joint have large amount of missing values4. There are multiple loan status, but we only concern whether or not the load is default 3. Data Cleaning and Initial Feature Engineering I. Transform feature `int_rate` and `revol_util` in test set
###Code
# Check the difference values for int_rate, revol_util
print('Item\t', 'int_rate', '\t', 'revol_util')
print('-' * 40)
print('Train:\t', train['int_rate'][0], '\t\t', train['revol_util'][0])
print('Test:\t', test['int_rate'][0], '\t', test['revol_util'][0])
# Using lambda function
test['int_rate'] = test['int_rate'].apply(lambda x: float(x[:-1]))
index = test[~test['revol_util'].isnull()].index
test.loc[index, 'revol_util'] = test.loc[index, 'revol_util'].apply(lambda x: float(x[:-1]))
test['revol_util'] = test['revol_util'].astype(float)
###Output
_____no_output_____
###Markdown
II. Transform target values `loan_status`
###Code
# Check the target values
print('Train:\n', list(train['loan_status'].unique()))
print('\nTest:\n', list(test['loan_status'].unique()))
# Let's only keep the data that is not in process currentlly, for more details, refer to
# https://help.lendingclub.com/hc/en-us/articles/215488038-What-do-the-different-Note-statuses-mean-
drop_status = ['Issued']
train = train[~train['loan_status'].isin(drop_status)]
test = test[~test['loan_status'].isin(drop_status)]
# Treat this as a binary classification problem
maps = {'Current': 0, 'Fully Paid': 0, 'Charged Off': 1, 'Default': 1,
'Does not meet the credit policy. Status:Fully Paid': 0,
'Does not meet the credit policy. Status:Charged Off': 1,
'In Grace Period': 0, 'Late (16-30 days)': 1, 'Late (31-120 days)': 1}
train['loan_status'] = train['loan_status'].apply(lambda x: maps[x])
test['loan_status'] = test['loan_status'].apply(lambda x: maps[x])
# Check the imbalance
train_default_ratio = np.round(len(train[train['loan_status'] == 1]) / len(train), 4)
train_nondefault_ratio = np.round(len(train[train['loan_status'] == 0]) / len(train), 4)
test_default_ratio = np.round(len(test[test['loan_status'] == 1]) / len(test), 4)
test_nondefault_ratio = np.round(len(test[test['loan_status'] == 0]) / len(test), 4)
print('Item\t', 'Default', '\t', 'Not Default')
print('-' * 40)
print('Train:\t', train_default_ratio, '\t', train_nondefault_ratio)
print('Test:\t', test_default_ratio, '\t', test_nondefault_ratio)
###Output
Item Default Not Default
----------------------------------------
Train: 0.0696 0.9304
Test: 0.0363 0.9637
###Markdown
It's clear that our dataset is highly imbalanced.
###Code
# Create a concise table for the data
train_dtype = train.dtypes.values
train_missing = train.isnull().sum().values
train_missing_ratio = train_missing / len(train)
test_dtype = test.dtypes.values
test_missing = test.isnull().sum().values
test_missing_ratio = test_missing / len(test)
# Calculate unique values
train_unique = []
test_unique = []
for name in train.columns:
train_unique.append(train[name].unique().shape[0])
test_unique.append(test[name].unique().shape[0])
# Useful information
df = pd.DataFrame({'Columns': train.columns, 'Train dtype': train_dtype, 'Train missing': train_missing,
'Train missing ratio': train_missing_ratio, 'Train unique': train_unique,
'Test dtype': test_dtype, 'Test missing': test_missing,
'Test missing ratio': test_missing_ratio, 'Test unique': test_unique},
columns=['Columns', 'Train dtype', 'Train missing', 'Train missing ratio', 'Train unique',
'Test dtype', 'Test missing', 'Test missing ratio', 'Test unique'])
df
###Output
_____no_output_____
###Markdown
III. Drop useless features
###Code
# Drop the features that has too many missing values
useless = ['id', 'member_id', 'url', 'open_acc_6m', 'open_il_6m', 'open_il_12m', 'open_il_24m',
'mths_since_rcnt_il', 'total_bal_il', 'il_util', 'open_rv_12m', 'open_rv_24m',
'max_bal_bc', 'all_util', 'inq_fi', 'total_cu_tl', 'inq_last_12m', 'desc',
'mths_since_last_delinq', 'mths_since_last_record', 'mths_since_last_major_derog',
'annual_inc_joint', 'dti_joint', 'verification_status_joint']
train = train.drop(labels=useless, axis=1)
test = test.drop(labels=useless, axis=1)
# Drop meaningless features based on understanding
meaningless = ['emp_title', 'issue_d', 'last_pymnt_d', 'next_pymnt_d', 'zip_code', 'title',
'grade', 'earliest_cr_line', 'last_credit_pull_d', 'policy_code']
train = train.drop(labels=meaningless, axis=1)
test = test.drop(labels=meaningless, axis=1)
_ = gc.collect()
print(train.shape, test.shape)
###Output
(878919, 40) (434407, 40)
###Markdown
Now, we have successfully reduce the features from 74 to 40. Next, let's focus on more detailed feature engineering First, let's look at the data again. From the below table, we can see that:* Most features are numerical, but there are severl categorical features.* There are still some missing values among numerical and categorical features.
###Code
# Create a concise table for the data
train_dtype = train.dtypes.values
train_missing = train.isnull().sum().values
train_missing_ratio = train_missing / len(train)
test_dtype = test.dtypes.values
test_missing = test.isnull().sum().values
test_missing_ratio = test_missing / len(test)
# Calculate unique values
train_unique = []
test_unique = []
for name in train.columns:
train_unique.append(train[name].unique().shape[0])
test_unique.append(test[name].unique().shape[0])
# Useful information
df = pd.DataFrame({'Columns': train.columns, 'Train dtype': train_dtype, 'Train missing': train_missing,
'Train missing ratio': train_missing_ratio, 'Train unique': train_unique,
'Test dtype': test_dtype, 'Test missing': test_missing,
'Test missing ratio': test_missing_ratio, 'Test unique': test_unique},
columns=['Columns', 'Train dtype', 'Train missing', 'Train missing ratio', 'Train unique',
'Test dtype', 'Test missing', 'Test missing ratio', 'Test unique'])
df
###Output
_____no_output_____
###Markdown
IV. Feature transformation Transform numerical values into categorical values
###Code
transform_featurs = ['total_rec_late_fee', 'recoveries', 'collection_recovery_fee', 'tot_coll_amt',
'collections_12_mths_ex_med', 'acc_now_delinq', 'out_prncp', 'out_prncp_inv']
for name in transform_featurs:
train[name] = (train[name] > 0).astype(str)
test[name] = (test[name] > 0).astype(str)
###Output
_____no_output_____
###Markdown
Transform categorical values into numerical values (discrete)
###Code
# Transform sub_grade
sub_grade_encoder = LabelEncoder()
sub_grade_encoder.fit(train['sub_grade'])
train['sub_grade'] = sub_grade_encoder.transform(train['sub_grade'])
test['sub_grade'] = sub_grade_encoder.transform(test['sub_grade'])
# Transform emp_length (first fill NA value with mode)
mode = train['emp_length'].mode().values[0]
train['emp_length'] = train['emp_length'].fillna(value=mode)
test['emp_length'] = test['emp_length'].fillna(value=mode)
# Manually do encoding due to sort problems
emp_map = {'< 1 year': 0, '1 year': 1, '3 years': 2, '4 years': 3, '5 years': 4, '6 years': 5,
'2 years': 6, '7 years': 7, '8 years': 8, '9 years': 9, '10+ years': 10}
train['emp_length'] = train['emp_length'].apply(lambda x: emp_map[x])
test['emp_length'] = test['emp_length'].apply(lambda x: emp_map[x])
###Output
_____no_output_____
###Markdown
V. Fill missing values* For numerical features, use median* For categorical features, use mode (here, we don't have missing categorical values)
###Code
# Seperate numerical and categorical features (16 categorical features, 23 numerical features)
numerical_feature = train.select_dtypes(exclude=['object']).columns.drop('loan_status')
categorical_feature = train.select_dtypes(include=['object']).columns
print('Numerical:\t', len(numerical_feature))
print('Categorical:\t', len(categorical_feature))
# Fill numerical features with median
medians = train[numerical_feature].median(axis=0, skipna=True)
train[numerical_feature] = train[numerical_feature].fillna(value=medians)
test[numerical_feature] = test[numerical_feature].fillna(value=medians)
###Output
_____no_output_____
###Markdown
4. Visualization I. Visualize categorical features
###Code
# For better visualization purpose, we set the y to be log scale
fig, ax = plt.subplots(nrows=6, ncols=3, figsize=(18, 30))
plt.tight_layout(h_pad=3)
for i in range(len(categorical_feature)):
name = categorical_feature[i]
sns.countplot(x=name, hue='loan_status', data=train, ax=ax[i//3][i%3])
ax[i//3][i%3].set_yscale('log')
plt.show()
_ = gc.collect()
###Output
_____no_output_____
###Markdown
II. Visualize numerical features
###Code
# Histogram of numerical values
fig, ax = plt.subplots(nrows=8, ncols=3, figsize=(18, 42))
plt.tight_layout(h_pad=3)
hist_kws={'histtype': 'bar', 'edgecolor':'black', 'alpha': 0.2}
for i in range(len(numerical_feature)):
name = numerical_feature[i]
sns.distplot(train[train['loan_status'] == 0][name], label='Not Default',
hist_kws=hist_kws, ax=ax[i//3][i%3])
sns.distplot(train[train['loan_status'] == 1][name], label='Default',
hist_kws=hist_kws, ax=ax[i//3][i%3])
ax[i//3][i%3].legend()
plt.show()
_ = gc.collect()
# Heatmap of the correlation
corr = train[numerical_feature].corr()
fig, ax = plt.subplots(figsize=(12, 10))
sns.heatmap(corr, ax=ax)
plt.axis('image')
plt.show()
###Output
_____no_output_____
###Markdown
5. Further Feature Engineering From the above heatmap and the categorical variable countplot, we can see that some feature has strong correlation* loan_amnt, funded_amnt, funded_amnt_inv, installment* int_rate, sub_grade* total_pymnt, total_pymnt_inv, total_rec_prncp* out_prncp, out_prncp_inv* recoveries, collection_recovery_feeWe can drop some of them to reduce redundancy
###Code
# Drop redundant features
useless = ['funded_amnt', 'funded_amnt_inv', 'installment', 'sub_grade', 'total_pymnt_inv',
'total_rec_prncp', 'out_prncp_inv', 'collection_recovery_fee']
train = train.drop(useless, axis=1)
test = test.drop(useless, axis=1)
# Seperate numerical and categorical features (13 categorical features, 25 numerical features)
numerical_feature = train.select_dtypes(exclude=['object']).columns.drop('loan_status')
categorical_feature = train.select_dtypes(include=['object']).columns
print('Numerical:\t', len(numerical_feature))
print('Categorical:\t', len(categorical_feature))
###Output
Numerical: 17
Categorical: 14
###Markdown
Now, we only 14 categorical features, 17 numerical features. Let's check the correlation again.
###Code
print('Numerical features:', '\n', list(numerical_feature), '\n')
print('Categorical Features:', '\n', list(categorical_feature))
# Check the heatmap again
corr = train[numerical_feature].corr()
fig, ax = plt.subplots(figsize=(12, 10))
sns.heatmap(corr, ax=ax)
plt.axis('image')
plt.show()
###Output
_____no_output_____
###Markdown
6. Machine Learning After the above procedures, we are ready to build the predictive models. In this part, I explored three different models: Logistic regression, Random Forest, and Deep Learning.I used to use scikit-learn a lot. But there is one problem with scikit-learn: you need to do one-hot encoding manually, which can sometimes dramatically increase the feature space. In this part, for logistic regression and random forest, I use H2O package, which has a better support with categorical features. For deep learning model, I use Keras with TensorFlow backend.
###Code
# Define x and y variables
train = shuffle(train, random_state=42)
x = list(train.columns.drop('loan_status'))
y = 'loan_status'
train_y = train['loan_status'].values
test_y = test['loan_status'].values
# Initialize H2O cluster
h2o.init()
h2o.remove_all()
# Transform to H2O Frame, and make sure the target variable is categorical
h2o_train = H2OFrame(train)
h2o_test = H2OFrame(test)
h2o_train['loan_status'] = h2o_train['loan_status'].asfactor()
h2o_test['loan_status'] = h2o_test['loan_status'].asfactor()
###Output
Checking whether there is an H2O instance running at http://localhost:54321..... not found.
Attempting to start a local H2O server...
Java Version: openjdk version "1.8.0_121"; OpenJDK Runtime Environment (Zulu 8.20.0.5-macosx) (build 1.8.0_121-b15); OpenJDK 64-Bit Server VM (Zulu 8.20.0.5-macosx) (build 25.121-b15, mixed mode)
Starting server from /anaconda/lib/python3.5/site-packages/h2o/backend/bin/h2o.jar
Ice root: /var/folders/gx/b15jqbt1567grsfr7l_znn2h0000gn/T/tmpqdjoac_3
JVM stdout: /var/folders/gx/b15jqbt1567grsfr7l_znn2h0000gn/T/tmpqdjoac_3/h2o_jifu_started_from_python.out
JVM stderr: /var/folders/gx/b15jqbt1567grsfr7l_znn2h0000gn/T/tmpqdjoac_3/h2o_jifu_started_from_python.err
Server is running at http://127.0.0.1:54321
Connecting to H2O server at http://127.0.0.1:54321... successful.
###Markdown
I. Logistic Regression
###Code
# Train logistic regression model with Lasso using grid search
hyper_parameters = {'alpha': [0, 0.2, 0.4, 0.6, 0.8, 1.0],
'lambda': [1e-4, 1e-5, 1e-6, 1e-7, 1e-8]}
# Create GLM model
glm = H2OGeneralizedLinearEstimator(family='binomial', balance_classes=True,
early_stopping=True, custom_metric_func='auc',
keep_cross_validation_predictions=True, nfolds=5)
# Grid search
glm_grid = H2OGridSearch(glm, hyper_parameters, grid_id='GLM')
glm_grid.train(x=x, y=y, training_frame=h2o_train)
# Get the grid search result, sorted by AUC
glm_models = glm_grid.get_grid(sort_by='auc', decreasing=True)
# Choose the best model
best_glm = glm_grid.models[0]
glm_models
# Make predictions
glm_train_pred = best_glm.predict(h2o_train).as_data_frame()['p1'].values
glm_test_pred = best_glm.predict(h2o_test).as_data_frame()['p1'].values
# Build the ROC curve
train_fpr, train_tpr, _ = roc_curve(train_y, glm_train_pred)
test_fpr, test_tpr, _ = roc_curve(test_y, glm_test_pred)
glm_train_auc = np.round(auc(train_fpr, train_tpr), 3)
glm_test_auc = np.round(auc(test_fpr, test_tpr), 3)
fig, ax = plt.subplots(figsize=(8, 6))
ax.plot(train_fpr, train_tpr, label='Train AUC: ' + str(glm_train_auc))
ax.plot(test_fpr, test_tpr, label='Test AUC: ' + str(glm_test_auc))
ax.set_xlabel('False Positive Rate', fontsize=12)
ax.set_ylabel('True Positive Rate', fontsize=12)
ax.legend(fontsize=12)
plt.show()
# Log-loss
print('Train:\t', log_loss(train_y, glm_train_pred))
print('Test:\t', log_loss(test_y, glm_test_pred))
###Output
Train: 0.10566966640063173
Test: 0.10066479508447695
###Markdown
II. Random Forest
###Code
# Train random forest model using grid search
hyper_parameters = {'max_depth': [10, 15, 20],
'min_rows': [3, 5]}
# Build random forest model
rf = H2ORandomForestEstimator(balance_classes=True, ntrees=200, stopping_rounds=5, nfolds=5,
stopping_metric='auc', keep_cross_validation_predictions=True)
# Grid search
rf_grid = H2OGridSearch(rf, hyper_parameters, grid_id='DRF')
rf_grid.train(x=x, y=y, training_frame=h2o_train)
# Get the grid search result, sorted by AUC
rf_models = rf_grid.get_grid(sort_by='auc', decreasing=True)
# Choose the best model
best_rf = rf_grid.models[0]
rf_models
# Make prediction
rf_train_pred = best_rf.predict(h2o_train).as_data_frame()['p1'].values
rf_test_pred = best_rf.predict(h2o_test).as_data_frame()['p1'].values
# Build the ROC curve
train_fpr, train_tpr, _ = roc_curve(train_y, rf_train_pred)
test_fpr, test_tpr, _ = roc_curve(test_y, rf_test_pred)
rf_train_auc = np.round(auc(train_fpr, train_tpr), 3)
rf_test_auc = np.round(auc(test_fpr, test_tpr), 3)
fig, ax = plt.subplots(figsize=(8, 6))
ax.plot(train_fpr, train_tpr, label='Train AUC: ' + str(rf_train_auc))
ax.plot(test_fpr, test_tpr, label='Test AUC: ' + str(rf_test_auc))
ax.set_xlabel('False Positive Rate', fontsize=12)
ax.set_ylabel('True Positive Rate', fontsize=12)
ax.legend(fontsize=12)
plt.show()
# Log-loss
print('Train:\t', log_loss(train_y, rf_train_pred))
print('Test:\t', log_loss(test_y, rf_test_pred))
# Feature importance
best_rf.varimp_plot(num_of_features=20)
plt.show()
# Shutdown h2o instance
h2o.cluster().shutdown()
###Output
H2O session _sid_b3ad closed.
###Markdown
III. Deep Learning In this part, let's manually build a fully-connected neural network (NN) model to finish the classification task.
###Code
# Feature Normalization
mean = train[numerical_feature].mean()
std = train[numerical_feature].std()
train[numerical_feature] = (train[numerical_feature] - mean) / std
test[numerical_feature] = (test[numerical_feature] - mean) / std
# Dummy encoding
tmp_data = pd.concat([train[categorical_feature], test[categorical_feature]], axis=0)
dummy_data = pd.get_dummies(tmp_data)
train_dummy = dummy_data[:len(train)]
test_dummy = dummy_data[len(train):]
# Get train, test data and target
train_data = pd.concat([train[numerical_feature], train_dummy], axis=1).values
train_target = train['loan_status'].values
test_data = pd.concat([test[numerical_feature], test_dummy], axis=1).values
test_target = test['loan_status'].values
# Define NN model
def build_model(data):
# build a NN with input-layer1-layer2-output structure
model = models.Sequential()
model.add(layers.Dense(128, activation='relu', input_shape=(data.shape[1],)))
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(optimizer='Adam', loss='binary_crossentropy')
return model
def kfold(data, target, k, num_epochs=100, batch_size=128):
""" function to perform k-fold cross validation """
n = len(data) // k
# Keep recording the training and validation loss
train_loss = np.zeros((k, num_epochs))
val_loss = np.zeros((k, num_epochs))
for i in range(k):
print('Processing cross-validation round #', i + 1)
# Get the train and validation set
val_x = data[i * n: (i + 1) * n]
val_y = target[i * n: (i + 1) * n]
train_x = np.concatenate([data[: i * n], data[(i + 1) * n:]], axis=0)
train_y = np.concatenate([target[: i * n], target[(i + 1) * n:]], axis=0)
# Over sampling training set
ros = RandomOverSampler(ratio='minority', random_state=42)
train_x_resampled, train_y_resampled = ros.fit_sample(train_x, train_y)
# Build and train NN model
model = build_model(data)
history = model.fit(train_x_resampled, train_y_resampled, epochs=num_epochs, verbose=0,
validation_data=(val_x, val_y), batch_size=batch_size)
# Retrieve the training and validation history
train_loss[i] = history.history['loss']
val_loss[i] = history.history['val_loss']
return train_loss, val_loss
# Begin 5-fold cross validataion
num_epochs = 100
train_loss, val_loss = kfold(data=train_data, target=train_target,
k=5, num_epochs=100, batch_size=128)
# Visualization
fig, ax = plt.subplots(figsize=(8, 6))
ax.plot(range(1, num_epochs + 1), np.mean(train_loss, axis=0), label='Train Loss')
ax.plot(range(1, num_epochs + 1), np.mean(val_loss, axis=0), label='Validation Loss')
ax.set_xlabel('Epochs', fontsize=12)
ax.set_ylabel('Loss', fontsize=12)
ax.legend(fontsize=12)
plt.show()
# Over sampling training set
ros = RandomOverSampler(ratio='minority', random_state=42)
train_x_resampled, train_y_resampled = ros.fit_sample(train_data, train_target)
sum(train_target == 1) / len(train_target)
sum(train_y_resampled == 1) / len(train_y_resampled)
# Re-fit the model with the best epochs
nn_model = build_model(train_data)
model.fit(train_x_resampled, train_y_resampled, epochs=?, verbose=1 batch_size=128)
# Make prediction
nn_train_pred = nn_model.predict(train_data)
nn_test_pred = nn_model.predict(train_data)
# Build the ROC curve
train_fpr, train_tpr, _ = roc_curve(train_y, nn_train_pred)
test_fpr, test_tpr, _ = roc_curve(test_y, nn_test_pred)
nn_train_auc = np.round(auc(train_fpr, train_tpr), 3)
nn_test_auc = np.round(auc(test_fpr, test_tpr), 3)
fig, ax = plt.subplots(figsize=(8, 6))
ax.plot(train_fpr, train_tpr, label='Train AUC: ' + str(nn_train_auc))
ax.plot(test_fpr, test_tpr, label='Test AUC: ' + str(nn_test_auc))
ax.set_xlabel('False Positive Rate', fontsize=12)
ax.set_ylabel('True Positive Rate', fontsize=12)
ax.legend(fontsize=12)
plt.show()
# Log-loss
print('Train:\t', log_loss(train_y, nn_train_pred))
print('Test:\t', log_loss(test_y, nn_test_pred))
###Output
_____no_output_____ |
notebooks/half-lives.ipynb | ###Markdown
Radioactive Decay Radioactive decay is exponentialSince the probability that a radioistope decays is random, over time there will be fewer atoms remaining, and therefore fewer atoms that can decay. We can visualize this with a quick demo. Say we start with 100 atoms of a radioisotope, and every second there is a 25% chance that any given atom decays.
###Code
N_0 = 100
prob = 0.25
###Output
_____no_output_____
###Markdown
Let's create an array with an element for each particle, and assign it a 1 (the particle still exists) or 0 (the particle has decayed). The array is initially all ones because no isotopes have decayed.
###Code
particles = np.ones(N_0)
###Output
_____no_output_____
###Markdown
After the 1st SecondWe now look to get started observing our simulated particles. Let's also create an array that will contain the fate of a particle after any given second. We will populate this array with newly drawn random values (on the interval 0 to 1) on each turn, and if the random number is less than 0.25, we will say that the particle has decayed. If the random numbers are drawn randomly and uniformly on the interval, there will be a 25% chance that the random number will be less than 0.25 and the particle decays.
###Code
fates = np.random.random(int(sum(particles)))
###Output
_____no_output_____
###Markdown
Now we observe the results.
###Code
def observe(particles,fates):
for i in range(len(fates)):
if fates[i] < 0.25:
particles[i] = 0
return particles
particles = observe(particles,fates)
print('There are now {} particles remaining.'.format(int(sum(particles))))
###Output
There are now 79 particles remaining.
###Markdown
We can repeat this process now repeatedly, and we plot the results for the first 10 seconds. We can see the exponential trend clearly (a true exponential is shown as a dashed line).
###Code
# Save the previous results, and repeat
remaining = [100,sum(particles)]
for t in range(1,10):
fates = np.random.random(len(particles))
particles = observe(particles,fates)
remaining.append(int(sum(particles)))
# Plot results
fig,ax = plt.subplots(figsize=(16,12))
ax.set_title('Particles remaining over time',fontsize=20)
ax.set_xlabel('Time [s]',fontsize=16)
ax.set_ylabel('Particles Remaining',fontsize=16)
ax.tick_params(labelsize='large')
ax.plot(remaining)
t = np.linspace(0,10,100)
ax.plot(t,100*np.exp(-prob*t),'--')
plt.show()
###Output
_____no_output_____
###Markdown
Comparing radioisotopes over timeEach radioactive isotope decays randomly with a distinct probability. The random nature of the decay causes the quantity, $N$, of a radioisotope remaining (from an initial population of $N_0$ particles at time $t_0$) at some later time, $t$, approximates an exponentially decaying function. The equation is$$ N = N_0 e^{-t/\tau} $$This next demo allows you to input the half-lives of any number of radioisotopes (in seconds) and the initial quantity of those isotopes (in atoms) to visually see this behavior. First, specify the number of isotopes you'd like to consider.
###Code
num_isotopes = int(input('Number of isotopes to consider:'))
###Output
Number of isotopes to consider:3
###Markdown
Now, for each isotope, enter the isotopes half-life and initial quantity at time $t_0$.
###Code
T_half = np.empty(num_isotopes)
N_0 = np.empty((num_isotopes,1))
for isotope in range(num_isotopes):
# Enter half-life and initial quantity of material
T_half[isotope]=int(input('Half-life of isotope {} (seconds):'.format(isotope+1)))
N_0[isotope][0]=int(input('Initial quantity of isotope {} (atoms):'.format(isotope+1)))
# Using the half-life, find an appropriate amount of time over which to plot N
t_max = min(T_half)*5
t = np.linspace(0,t_max,100)
# Change half-life to lifetime
tau = T_half/np.log(2)
# Calculate remaining population for each isotope
N = N_0*np.exp(-np.outer(1/tau,t))
###Output
_____no_output_____
###Markdown
Finally, we plot the quantity of each isotope over time.
###Code
fig,ax = plt.subplots(figsize=(16,12))
ax.set_title('Radioisotope populations over time',fontsize=20)
ax.set_xlabel('Time [s]',fontsize=16)
ax.set_ylabel('Quantity [atoms]',fontsize=16)
ax.tick_params(labelsize='large')
for isotope in range(num_isotopes):
ax.plot(t,N[isotope],label='Isotope {}'.format(isotope+1))
ax.legend(fontsize=14)
plt.show()
###Output
_____no_output_____ |
_site/notebooks/biocoding_2018_pythonlab_03.ipynb | ###Markdown
Review of String work, and moving on to lists Let's start off with a small challenge to refresh our skills from the previous notebook. Below is some broken code/incomplete; complete the challenge by fixing it so that we print generate the double-stranded DNA sequence of the hiv 'nef' gene Fix the broken code in each cell
###Code
# store the hiv genome as a variable
hiv_genome = uggaagggcuaauucacucccaacgaagacaagauauccuugaucuguggaucuaccacacacaaggcuacuucccugauuagcagaacuacacaccagggccagggaucagauauccacugaccuuuggauggugcuacaagcuaguaccaguugagccagagaaguuagaagaagccaacaaaggagagaacaccagcuuguuacacccugugagccugcauggaauggaugacccggagagagaaguguuagaguggagguuugacagccgccuagcauuucaucacauggcccgagagcugcauccggaguacuucaagaacugcugacaucgagcuugcuacaagggacuuuccgcuggggacuuuccagggaggcguggccugggcgggacuggggaguggcgagcccucagauccugcauauaagcagcugcuuuuugccuguacugggucucucugguuagaccagaucugagccugggagcucucuggcuaacuagggaacccacugcuuaagccucaauaaagcuugccuugagugcuucaaguagugugugcccgucuguugugugacucugguaacuagagaucccucagacccuuuuagucaguguggaaaaucucuagcaguggcgcccgaacagggaccugaaagcgaaagggaaaccagaggagcucucucgacgcaggacucggcuugcugaagcgcgcacggcaagaggcgaggggcggcgacuggugaguacgccaaaaauuuugacuagcggaggcuagaaggagagagaugggugcgagagcgucaguauuaagcgggggagaauuagaucgaugggaaaaaauucgguuaaggccagggggaaagaaaaaauauaaauuaaaacauauaguaugggcaagcagggagcuagaacgauucgcaguuaauccuggccuguuagaaacaucagaaggcuguagacaaauacugggacagcuacaaccaucccuucagacaggaucagaagaacuuagaucauuauauaauacaguagcaacccucuauugugugcaucaaaggauagagauaaaagacaccaaggaagcuuuagacaagauagaggaagagcaaaacaaaaguaagaaaaaagcacagcaagcagcagcugacacaggacacagcaaucaggucagccaaaauuacccuauagugcagaacauccaggggcaaaugguacaucaggccauaucaccuagaacuuuaaaugcauggguaaaaguaguagaagagaaggcuuucagcccagaagugauacccauguuuucagcauuaucagaaggagccaccccacaagauuuaaacaccaugcuaaacacaguggggggacaucaagcagccaugcaaauguuaaaagagaccaucaaugaggaagcugcagaaugggauagagugcauccagugcaugcagggccuauugcaccaggccagaugagagaaccaaggggaagugacauagcaggaacuacuaguacccuucaggaacaaauaggauggaugacaaauaauccaccuaucccaguaggagaaauuuauaaaagauggauaauccugggauuaaauaaaauaguaagaauguauagcccuaccagcauucuggacauaagacaaggaccaaaggaacccuuuagagacuauguagaccgguucuauaaaacucuaagagccgagcaagcuucacaggagguaaaaaauuggaugacagaaaccuuguugguccaaaaugcgaacccagauuguaagacuauuuuaaaagcauugggaccagcggcuacacuagaagaaaugaugacagcaugucagggaguaggaggacccggccauaaggcaagaguuuuggcugaagcaaugagccaaguaacaaauucagcuaccauaaugaugcagagaggcaauuuuaggaaccaaagaaagauuguuaaguguuucaauuguggcaaagaagggcacacagccagaaauugcagggccccuaggaaaaagggcuguuggaaauguggaaaggaaggacaccaaaugaaagauuguacugagagacaggcuaauuuuuuagggaagaucuggccuuccuacaagggaaggccagggaauuuucuucagagcagaccagagccaacagccccaccagaagagagcuucaggucugggguagagacaacaacucccccucagaagcaggagccgauagacaaggaacuguauccuuuaacuucccucaggucacucuuuggcaacgaccccucgucacaauaaagauaggggggcaacuaaaggaagcucuauuagauacaggagcagaugauacaguauuagaagaaaugaguuugccaggaagauggaaaccaaaaaugauagggggaauuggagguuuuaucaaaguaagacaguaugaucagauacucauagaaaucuguggacauaaagcuauagguacaguauuaguaggaccuacaccugucaacauaauuggaagaaaucuguugacucagauugguugcacuuuaaauuuucccauuagcccuauugagacuguaccaguaaaauuaaagccaggaauggauggcccaaaaguuaaacaauggccauugacagaagaaaaaauaaaagcauuaguagaaauuuguacagagauggaaaaggaagggaaaauuucaaaaauugggccugaaaauccauacaauacuccaguauuugccauaaagaaaaaagacaguacuaaauggagaaaauuaguagauuucagagaacuuaauaagagaacucaagacuucugggaaguucaauuaggaauaccacaucccgcaggguuaaaaaagaaaaaaucaguaacaguacuggaugugggugaugcauauuuuucaguucccuuagaugaagacuucaggaaguauacugcauuuaccauaccuaguauaaacaaugagacaccagggauuagauaucaguacaaugugcuuccacagggauggaaaggaucaccagcaauauuccaaaguagcaugacaaaaaucuuagagccuuuuagaaaacaaaauccagacauaguuaucuaucaauacauggaugauuuguauguaggaucugacuuagaaauagggcagcauagaacaaaaauagaggagcugagacaacaucuguugagguggggacuuaccacaccagacaaaaaacaucagaaagaaccuccauuccuuuggauggguuaugaacuccauccugauaaauggacaguacagccuauagugcugccagaaaaagacagcuggacugucaaugacauacagaaguuaguggggaaauugaauugggcaagucagauuuacccagggauuaaaguaaggcaauuauguaaacuccuuagaggaaccaaagcacuaacagaaguaauaccacuaacagaagaagcagagcuagaacuggcagaaaacagagagauucuaaaagaaccaguacauggaguguauuaugacccaucaaaagacuuaauagcagaaauacagaagcaggggcaaggccaauggacauaucaaauuuaucaagagccauuuaaaaaucugaaaacaggaaaauaugcaagaaugaggggugcccacacuaaugauguaaaacaauuaacagaggcagugcaaaaaauaaccacagaaagcauaguaauauggggaaagacuccuaaauuuaaacugcccauacaaaaggaaacaugggaaacaugguggacagaguauuggcaagccaccuggauuccugagugggaguuuguuaauaccccucccuuagugaaauuaugguaccaguuagagaaagaacccauaguaggagcagaaaccuucuauguagauggggcagcuaacagggagacuaaauuaggaaaagcaggauauguuacuaauagaggaagacaaaaaguugucacccuaacugacacaacaaaucagaagacugaguuacaagcaauuuaucuagcuuugcaggauucgggauuagaaguaaacauaguaacagacucacaauaugcauuaggaaucauucaagcacaaccagaucaaagugaaucagaguuagucaaucaaauaauagagcaguuaauaaaaaaggaaaaggucuaucuggcauggguaccagcacacaaaggaauuggaggaaaugaacaaguagauaaauuagucagugcuggaaucaggaaaguacuauuuuuagauggaauagauaaggcccaagaugaacaugagaaauaucacaguaauuggagagcaauggcuagugauuuuaaccugccaccuguaguagcaaaagaaauaguagccagcugugauaaaugucagcuaaaaggagaagccaugcauggacaaguagacuguaguccaggaauauggcaacuagauuguacacauuuagaaggaaaaguuauccugguagcaguucauguagccaguggauauauagaagcagaaguuauuccagcagaaacagggcaggaaacagcauauuuucuuuuaaaauuagcaggaagauggccaguaaaaacaauacauacugacaauggcagcaauuucaccggugcuacgguuagggccgccuguuggugggcgggaaucaagcaggaauuuggaauucccuacaauccccaaagucaaggaguaguagaaucuaugaauaaagaauuaaagaaaauuauaggacagguaagagaucaggcugaacaucuuaagacagcaguacaaauggcaguauucauccacaauuuuaaaagaaaaggggggauugggggguacagugcaggggaaagaauaguagacauaauagcaacagacauacaaacuaaagaauuacaaaaacaaauuacaaaaauucaaaauuuucggguuuauuacagggacagcagaaauccacuuuggaaaggaccagcaaagcuccucuggaaaggugaaggggcaguaguaauacaagauaauagugacauaaaaguagugccaagaagaaaagcaaagaucauuagggauuauggaaaacagauggcaggugaugauuguguggcaaguagacaggaugaggauuagaacauggaaaaguuuaguaaaacaccauauguauguuucagggaaagcuaggggaugguuuuauagacaucacuaugaaagcccucauccaagaauaaguucagaaguacacaucccacuaggggaugcuagauugguaauaacaacauauuggggucugcauacaggagaaagagacuggcauuugggucagggagucuccauagaauggaggaaaaagagauauagcacacaaguagacccugaacuagcagaccaacuaauucaucuguauuacuuugacuguuuuucagacucugcuauaagaaaggccuuauuaggacacauaguuagcccuaggugugaauaucaagcaggacauaacaagguaggaucucuacaauacuuggcacuagcagcauuaauaacaccaaaaaagauaaagccaccuuugccuaguguuacgaaacugacagaggauagauggaacaagccccagaagaccaagggccacagagggagccacacaaugaauggacacuagagcuuuuagaggagcuuaagaaugaagcuguuagacauuuuccuaggauuuggcuccauggcuuagggcaacauaucuaugaaacuuauggggauacuugggcaggaguggaagccauaauaagaauucugcaacaacugcuguuuauccauuuucagaauugggugucgacauagcagaauaggcguuacucgacagaggagagcaagaaauggagccaguagauccuagacuagagcccuggaagcauccaggaagucagccuaaaacugcuuguaccaauugcuauuguaaaaaguguugcuuucauugccaaguuuguuucauaacaaaagccuuaggcaucuccuauggcaggaagaagcggagacagcgacgaagagcucaucagaacagucagacucaucaagcuucucuaucaaagcaguaaguaguacauguaacgcaaccuauaccaauaguagcaauaguagcauuaguaguagcaauaauaauagcaauaguugugugguccauaguaaucauagaauauaggaaaauauuaagacaaagaaaaauagacagguuaauugauagacuaauagaaagagcagaagacaguggcaaugagagugaaggagaaauaucagcacuuguggagauggggguggagauggggcaccaugcuccuugggauguugaugaucuguagugcuacagaaaaauugugggucacagucuauuaugggguaccuguguggaaggaagcaaccaccacucuauuuugugcaucagaugcuaaagcauaugauacagagguacauaauguuugggccacacaugccuguguacccacagaccccaacccacaagaaguaguauugguaaaugugacagaaaauuuuaacauguggaaaaaugacaugguagaacagaugcaugaggauauaaucaguuuaugggaucaaagccuaaagccauguguaaaauuaaccccacucuguguuaguuuaaagugcacugauuugaagaaugauacuaauaccaauaguaguagcgggagaaugauaauggagaaaggagagauaaaaaacugcucuuucaauaucagcacaagcauaagagguaaggugcagaaagaauaugcauuuuuuuauaaacuugauauaauaccaauagauaaugauacuaccagcuauaaguugacaaguuguaacaccucagucauuacacaggccuguccaaagguauccuuugagccaauucccauacauuauugugccccggcugguuuugcgauucuaaaauguaauaauaagacguucaauggaacaggaccauguacaaaugucagcacaguacaauguacacauggaauuaggccaguaguaucaacucaacugcuguuaaauggcagucuagcagaagaagagguaguaauuagaucugucaauuucacggacaaugcuaaaaccauaauaguacagcugaacacaucuguagaaauuaauuguacaagacccaacaacaauacaagaaaaagaauccguauccagagaggaccagggagagcauuuguuacaauaggaaaaauaggaaauaugagacaagcacauuguaacauuaguagagcaaaauggaauaacacuuuaaaacagauagcuagcaaauuaagagaacaauuuggaaauaauaaaacaauaaucuuuaagcaauccucaggaggggacccagaaauuguaacgcacaguuuuaauuguggaggggaauuuuucuacuguaauucaacacaacuguuuaauaguacuugguuuaauaguacuuggaguacugaagggucaaauaacacugaaggaagugacacaaucacccucccaugcagaauaaaacaaauuauaaacauguggcagaaaguaggaaaagcaauguaugccccucccaucaguggacaaauuagauguucaucaaauauuacagggcugcuauuaacaagagauggugguaauagcaacaaugaguccgagaucuucagaccuggaggaggagauaugagggacaauuggagaagugaauuauauaaauauaaaguaguaaaaauugaaccauuaggaguagcacccaccaaggcaaagagaagaguggugcagagagaaaaaagagcagugggaauaggagcuuuguuccuuggguucuugggagcagcaggaagcacuaugggcgcagccucaaugacgcugacgguacaggccagacaauuauugucugguauagugcagcagcagaacaauuugcugagggcuauugaggcgcaacagcaucuguugcaacucacagucuggggcaucaagcagcuccaggcaagaauccuggcuguggaaagauaccuaaaggaucaacagcuccuggggauuugggguugcucuggaaaacucauuugcaccacugcugugccuuggaaugcuaguuggaguaauaaaucucuggaacagauuuggaaucacacgaccuggauggagugggacagagaaauuaacaauuacacaagcuuaauacacuccuuaauugaagaaucgcaaaaccagcaagaaaagaaugaacaagaauuauuggaauuagauaaaugggcaaguuuguggaauugguuuaacauaacaaauuggcugugguauauaaaauuauucauaaugauaguaggaggcuugguagguuuaagaauaguuuuugcuguacuuucuauagugaauagaguuaggcagggauauucaccauuaucguuucagacccaccucccaaccccgaggggacccgacaggcccgaaggaauagaagaagaagguggagagagagacagagacagauccauucgauuagugaacggauccuuggcacuuaucugggacgaucugcggagccugugccucuucagcuaccaccgcuugagagacuuacucuugauuguaacgaggauuguggaacuucugggacgcagggggugggaagcccucaaauauugguggaaucuccuacaguauuggagucaggaacuaaagaauagugcuguuagcuugcucaaugccacagccauagcaguagcugaggggacagauaggguuauagaaguaguacaaggagcuuguagagcuauucgccacauaccuagaagaauaagacagggcuuggaaaggauuuugcuauaagauggguggcaaguggucaaaaaguagugugauuggauggccuacuguaagggaaagaaugagacgagcugagccagcagcagauagggugggagcagcaucucgagaccuggaaaaacauggagcaaucacaaguagcaauacagcagcuaccaaugcugcuugugccuggcuagaagcacaagaggaggaggagguggguuuuccagucacaccucagguaccuuuaagaccaaugacuuacaaggcagcuguagaucuuagccacuuuuuaaaagaaaaggggggacuggaagggcuaauucacucccaaagaagacaagauauccuugaucuguggaucuaccacacacaaggcuacuucccugauuagcagaacuacacaccagggccaggggucagauauccacugaccuuuggauggugcuacaagcuaguaccaguugagccagauaagauagaagaggccaauaaaggagagaacaccagcuuguuacacccugugagccugcaugggauggaugacccggagagagaaguguuagaguggagguuugacagccgccuagcauuucaucacguggcccgagagcugcauccggaguacuucaagaacugcugacaucgagcuugcuacaagggacuuuccgcuggggacuuuccagggaggcguggccugggcgggacuggggaguggcgagcccucagauccugcauauaagcagcugcuuuuugccuguacugggucucucugguuagaccagaucugagccugggagcucucuggcuaacuagggaacccacugcuuaagccucaauaaagcuugccuugagugcuucaaguagugugugcccgucuguugugugacucugguaacuagagaucccucagacccuuuuagucaguguggaaaaucucuagca'
#translate hiv DNA to RNA
hiv_genome = hiv_genome.rep('u', t)
# isolate the nef gene (start:8797, end:9417)
nef_gene = hiv_genome[8797]
# the nef gene as a fasta file using the header 'nef type 1 (HXB2)'
fasta_header = '>nef type 1 (HXB2)'
print(fasta_heade, nef_gene)
#caculate and report the GC content of the nef gene
nef_gc_content = (nef_gene.count('c') + nef_gene.count('g')) / len(nef_gene)
print("The GC content of the nef gene is: ", nef_gc_content * 100, "%")
###Output
_____no_output_____
###Markdown
Introducing listsNow that we have played a bit with strings, it's time to introduce the next variable type. So far, we have worked with several types of variables and data including:* integers* floats* stringsThe next data type is a list. Lists are just what you would expect, a collection. Lists have a few special properties we'll need to understand, lists are:* ordered* indexed* iterableLet's explore these properties by creating our on list, which in Python is done using the ``[]`` brackets.
###Code
my_list = []
###Output
_____no_output_____
###Markdown
Perhaps it seems nothing much has happened, but you should be able to verify that Python thinks that ``my_list`` is a list; please try:
###Code
type(my_list)
###Output
_____no_output_____
###Markdown
So far, we have created ``[]`` - the empty list, and assigned it the name my list. We can start adding thing to ``my_list`` using the ``.append`` method. For example:
###Code
my_list =[]
# We can add a string
my_list.append('gag')
print(my_list)
# We can add another string
my_list.append('pol')
print(my_list)
# We can yet add another string - please add the string 'env'
# We can also declare lists by naming all its members
my_other_list = ['DNA',
'mRNA',
'Protein',]
print(my_other_list)
###Output
_____no_output_____
###Markdown
A list, maintains the order of every element of that list. Lists are indexed (starting at 0) in a way that was similar to strings. |Index|List Element||:----|:-----------||0|'gag'||1|'pol'||2|'env'|
###Code
# Print the list of these HIV genes in order given the list below
# The correct order is
# gag, pol, vif, vpr, vpu, env, nef
hiv_gene_names = ['env',
'gag',
'vif',
'pol',
'vpr',
'vpu',
'nef']
###Output
_____no_output_____
###Markdown
Iteration and 'for' loopsThis topic is important enough to get its own section! Not only are we going to talk about iteration, but we are going to introduce a very important concept in computing - a loop. In a loop, we are able to get the computer to repeat a set of instructions without us having to write out every command. This is at the heart of what makes computers useful - being able to carry out repetitive tasks without our input. Let's look at our first for loop; to start we will use a list of nucleic acids:
###Code
nucleic_acids = ['adenine',
'thymine',
'cytosine',
'guanine',
'uracil']
print(nucleic_acids)
###Output
_____no_output_____
###Markdown
If we wanted to, we could print the items in this list one by one using several print statements
###Code
print(nucleic_acids[0])
print(nucleic_acids[1])
print(nucleic_acids[2])
print(nucleic_acids[3])
print(nucleic_acids[4])
#Alternatively, we can do this using a for loop:
for nucleotide in nucleic_acids:
print(nucleotide)
###Output
_____no_output_____
###Markdown
A for loop has the following structure: for temporary_variable in itterable : (indent)instruction[temporary_variable]Let's break this down a bit...* ``for`` - a for loop must start with a for statement* ``temporary_variable`` - the next character(s) right after the ``for`` are actually the name of a special, variable. This variable is a placeholder for the objects that will come next in the loop.* ``in`` - this ``in`` must be included and tells Python what itterable it should execute the for loop on* ``iterable:`` The iterable is any ordered collection (such as a string or a list. A ``:`` must come after the iterable.* (indent) - the next line of a for loop must always be indented. The best practice is to use 4 spaces (not the tab key)* ``instruction`` - these are the instructions you want Python to execute. If your instructions make use of the variable (they don't have to) you will use ``temporary_variable`` (whatever you have named it)
###Code
# Try the following with for loops
nucleic_acids = ['adenine',
'thymine',
'cytosine',
'guanine',
'uracil']
# Write a for loop that prints the names of the nucleotides
# Write a for loop that prints 'nucleotide!' for each of the nucleotides
# Write a for loop prints nucleotide name and its one-letter abbreviation
###Output
_____no_output_____
###Markdown
ConditionalsOne of the key functionalities in computing is the ability to make comparisons and choices. In Python, we have several ways to use this. In each case, the answer to a conditional statement is a simple binary result: True or False. Run the following cells and also make some changes to see that you understand how Python is evaluating the statement. Evaluate 1 > 0 + 1 ? How about 99 >= 99 ? What about 0 <= 1 ? And try 1 == 1 The conditionals above all use the comparison operators, a more complete list is as follows:|Operator|Description||-------|:----------||``==``|Comparison - True if both operands are equal||``!=``|Not equal - True if both operands are not equal||````|Not equal - True if both operands are not equal||``>``|Greater than - True if left operand is greater than right||``<``|Less than - True if left operand is less than right||``>=``|Less than or equal to - True if left operand is less than or equal to right||``<=``|Greater than or equal to - True if left operand is greater than or equal to right| Random number and conditionals - Heads or TailsNow, let's combine randomness with our conditional operators to make a simple simulation: flipping a coin. Python has a [Module](https://docs.python.org/2/tutorial/modules.html) call [NumPy](http://www.numpy.org/). NumPy contains a number of useful functions including the ability to generate 'random' numbers. Generating a truly random number is a [science in itself](https://www.random.org/randomness/), but the NumPy ``random`` module will be sufficient for our purpose. See how we use this function in the next cell:
###Code
# Using the from xxx import xxx statement, we tell Python we want to use a package that
# is not part of the default set of Python packages
# NumPy happens to be installed already for us, otherwise we would have to download it
from numpy import random
# We create a variable and then use the . notation to get the random number
# in this case, we are requesting a random int that is between 1 and 10
my_random_int = random.randint(1,10)
print('My random int is %d' % my_random_int)
# rerun this cell a few times to see that you get only number 1-9
###Output
_____no_output_____
###Markdown
Print formattingNotice a new feature in the printing statement. We haven’t used it before, but this string formatting feature allows us to print a variable in a string without using a variablejust put ``%d`` in the string where you want an integer to appear, then after closing the string, put another ``%`` sign followed by the variable name. You can also generate floats:
###Code
# returns a float between 0.0 and 1.0)
my_random_float = random.ranf()
print('My random float is %f' % my_random_float)
# You can also control precision of the float
print('My random float is %0.3f to 3 digits' % my_random_float)
print('My random float is %0.9f to 9 digits' % my_random_float)
print('My random float is %0.30f to 30 digits' % my_random_float)
# You can do this multiple times in the same string
print('My random float is %0.3f or %0.9f' % (my_random_float, my_random_float))
###Output
_____no_output_____
###Markdown
if else statementsWe are now ready to combine the conditions and random number generator to do our first simulation. To do so we will need to make an if else statement:
###Code
if 1 == 1:
print('1 is equal to 1')
###Output
_____no_output_____
###Markdown
The if statement uses the following pattern:if conditional_to_evaluate: (Indent) instruction* ``if`` - if statements begin with an if* ``conditional_to_evaluate`` - this is some conditional statement that Python will evaluate as ``True`` or ``False``. This statement will be followed by a ``:``* (indent) - the next line of a for loop must always be indented. The best practice is to use 4 spaces (not the tab key)* ``instruction`` - these are the instructions you want Python to execute. The instructions will also be executed iff the conditional statement is ``True``Write a few conditional statements and see what happens when the statement is ``True`` or ``False`` We can supplement the if statement by telling Python what to do if the conditional is false, using the else statement:
###Code
if 1 == 2:
print('one is now equal to two')
else:
print('one is NOT equal to two')
###Output
_____no_output_____
###Markdown
Remembering that indenting is important, try writing a few if else statements yourself: As powerful as if/else statements can be, we sometimes wish to let Python explore several contingencies. We do this using ``elif`` (else if) which allows us to use another if statement iff the preceding if statement is ``False``. Complete the next two cells to see an example:
###Code
# What day is today, enter this as a string below
today =
# Things to do
if today == 'Monday':
print('Walk the dog')
elif today == 'Tuesday':
print('Pick up the laundry')
elif today == 'Wednesday':
print('Go shopping')
elif today == 'Thursday':
print('Call mom')
elif today == 'Friday':
print('Plan for the weekend')
else:
print('It must be the weekend, nothing to do')
###Output
_____no_output_____
###Markdown
To recap: The above if/else statement covered several explicit contingencies (If the day of the week was Monday-Friday) as one as a final contingencies if none of the above were ``True`` (the final else statement). Write a statement below using the if/elif/else chain of conditionals. Remember to pay attention to indenting. Putting it all togetherUsing what you have learned so far, write some code to simulate flipping a coin.
###Code
# Use the random number function of NumPy to generate a float
# Use conditionals so that if the float is greater than or equal to 0.5 consider that 'Heads' otherwise 'Tails'
###Output
_____no_output_____
###Markdown
Simulating mutation of the HIV genomeMutations are (at least in part) a random process that drives the change of a genome. Virus in particular use this to their advantage. Mutations in viruses can allow them to evade their hosts immune responses, concur drug resistance, or even the acquisition of new functions.According to [Abrahm et.al. 2010](http://www.uv.es/rsanjuan/Abram%20JVirol%2010.pdf) the mutation rate for the HIV-1 genome is about 4.4E-05 or 0.000044 mutations per single cell infection cycle. The most common mutation type are single nucleotide polymorphisims [SNPs](https://en.wikipedia.org/wiki/Single-nucleotide_polymorphism). In our toy simulation we will use Python to simulate the following:* flip a coin weighted to the probability of the HIV-1 mutation (genome size * mutation rate)* Choose a random nucleotide in the HIV-1 genome to mutate (using the .randint() method)* flip a weighted coin to choose what type of mutation the mutation should be (using the following information, and assuming the genome size is 9181 nucleotides) Here are some code examples that will help
###Code
# unfair coin
from numpy import random
# Coins have two sides (states) - heads or tails; use these as a list
coin_state = ['Heads','Tails']
# A fair coin would have a 50/50 chance of being heads or tails. Represent these probabilities as
# floats which sum to 1.0
fair_coin_probabilities = [0.5,0.5]
#flip the fair coin using numpy's random.choice method
fair_flip = random.choice(coin_state,p = fair_coin_probabilities)
#print the result
print("My fair coin is %s" %fair_flip)
# An unfair coin could be weighted like this
unfair_coin_probabilities = [0.1,0.9]
# Therefore...
unfair_flip = random.choice(coin_state,p = unfair_coin_probabilities)
print("My unfair coin is %s" %unfair_flip)
###Output
_____no_output_____
###Markdown
1. Write a simulation which determines if in one round of replication HIV will mutate or not
###Code
# Set the states (mutation,no_mutation)
# Set the probabilities for each state (hint: they must sum to 1)
# flip the coin (make the choice)
###Output
_____no_output_____
###Markdown
2. Determine how often would HIV mutate in 20 rounds of replicationWe will use a for loop to repeat the coin flip 20 times. We can use a special function ``range()`` to tell Python how many times to execute the for loop. Use the following coin flipping example, to improve your HIV simulation.
###Code
from numpy import random
coin_state = ['Heads','Tails']
fair_coin_probabilities = [0.5,0.5]
for flip in range(1,21):
fair_flip = random.choice(coin_state,p = fair_coin_probabilities)
print(fair_flip)
###Output
_____no_output_____
###Markdown
You can take this even further by saving the result as a list:
###Code
from numpy import random
coin_state = ['Heads','Tails']
fair_coin_probabilities = [0.5,0.5]
# tip: notice how the list is created before the for loop. If you declared
# flip_results = [] in the for loop, it would be reset 20 times
flip_results = []
for flip in range(1,21):
fair_flip = random.choice(coin_state,p = fair_coin_probabilities)
flip_results.append(fair_flip)
###Output
_____no_output_____
###Markdown
Dont' forget you can print the result to see the list:
###Code
print(flip_results)
###Output
_____no_output_____
###Markdown
3. If HIV is in the mutation state, determine which nuclotide to mutateLet's use our coin to determine if I should walk the dog on Monday or Tuesday:
###Code
from numpy import random
coin_state = ['Heads','Tails']
fair_coin_probabilities = [0.5,0.5]
flip_results = []
for flip in range(1,21):
fair_flip = random.choice(coin_state,p = fair_coin_probabilities)
flip_results.append(fair_flip)
# Tip - pay attention to the indenting in this for loop that contains an if/else statement
for result in flip_results:
if result == 'Heads':
print("Walk the dog Monday")
elif result == 'Tails':
print("Walk the dog Tuesday")
###Output
_____no_output_____
###Markdown
Besides using the print instruction you can also place my results into a new list based on the conditional outcome:
###Code
from numpy import random
coin_state = ['Heads','Tails']
fair_coin_probabilities = [0.5,0.5]
flip_results = []
# Initialize some new lists for my conditional outcomes
monday_results = []
tuesday_results = []
for flip in range(1,21):
fair_flip = random.choice(coin_state,p = fair_coin_probabilities)
flip_results.append(fair_flip)
for result in flip_results:
if result == 'Heads':
monday_results.append("Walk the dog Monday")
elif result == 'Tails':
tuesday_results.append("Walk the dog Tuesday")
# We can print how many times we had each type of result stored in our lists
print("My coin said to walk the dog Monday %d times" % len(monday_results))
print("My coin said to walk the dog Tuesday %d times" % len(tuesday_results))
###Output
_____no_output_____
###Markdown
Using the above examples, and your knowledge of how to slice strings to:* determine which nucleotide in the HIV-1 genome to mutate* flip a coin weighted to the probabilities of mutation given in the 'Class 1: single nt substitution' chart above. In each the number of observed mutations of a nucleotide on the y-axis changing to one on the x-axis is shown. * use the ``replace()`` function to mutate your HIV-1 genome ** Bonus *** determine and report in which gene your mutations arise (ignore genes less than 200nt)* determine and report if the mutation in any particular gene introduces a stop codon in reading frame one* determine and report if the mutation in any particular gene introduces a stop codon in the actual reading frame of that gene A little more on HIV viral replication
###Code
from IPython.display import HTML
HTML('<iframe width="560" height="315" src="https://www.youtube.com/embed/RO8MP3wMvqg" frameborder="0" allow="autoplay; encrypted-media" allowfullscreen></iframe>')
# Other nice animations here: https://www.wehi.edu.au/wehi-tv
###Output
_____no_output_____ |
Template/Template.ipynb | ###Markdown
Title IntroductionYou should introduce the main topics in this tutorial here, along with [links to Topic 1](Topic-1) to the corresponding headings. Topic-1 Background KnowledgePresent the background knowledge here. You should use _italic_ for _proper nouns_ and **bold** for words to **emphasize**. You should use `code` or ```codes```to include some code syntax demos. Example-1First, give a brief introduction of the example, then show the code in the following code block. For all codes presented, you **MUST** follow _PEP-8_. You can enable _autopep8_ on jupyter's extensions.
###Code
# Example-1
print("Hello world!")
###Output
Hello world!
###Markdown
For all code blocks, you must also execute and show the results. Code blocks may or may not be correlated, but code blocks for different examples **MUST** be unrelated, i.e. you should copy the initialization part even if they share the same initialization. When user runs all code blocks **sequentially**, they **MUST** be able to reproduce the results. Example-2Keep every example short and simple. If you have a long one, try to **split** it into several unit examples instead of a long one. ExerciseYou should use the extension _exercise2_ to produce an exercise. For each exercise, state the objective first and then show the sample codes. You may follow this animation to add exercise. For the solution, use a block to describe it in plain language first, then show the code. You are strongly encouraged to include [links](background-knowledge) back to some knowledge.
###Code
# Sample Solution Here
print("This is the sample solution")
###Output
This is the sample solution
|
04/.ipynb_checkpoints/topic4_linear_models_part5_valid_learning_curves-checkpoint.ipynb | ###Markdown
Открытый курс по машинному обучениюАвтор материала: программист-исследователь Mail.ru Group, старший преподаватель Факультета Компьютерных Наук ВШЭ Юрий Кашницкий. Материал распространяется на условиях лицензии [Creative Commons CC BY-NC-SA 4.0](https://creativecommons.org/licenses/by-nc-sa/4.0/). Можно использовать в любых целях (редактировать, поправлять и брать за основу), кроме коммерческих, но с обязательным упоминанием автора материала. Тема 4. Линейные модели классификации и регрессии Часть 5. Кривые валидации и обучения
###Code
from __future__ import division, print_function
# отключим всякие предупреждения Anaconda
import warnings
warnings.filterwarnings('ignore')
%matplotlib inline
from matplotlib import pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression, LogisticRegressionCV, SGDClassifier
from sklearn.model_selection import validation_curve
###Output
_____no_output_____
###Markdown
Мы уже получили представление о проверке модели, кросс-валидации и регуляризации.Теперь рассмотрим главный вопрос:**Если качество модели нас не устраивает, что делать?**- Сделать модель сложнее или упростить?- Добавить больше признаков?- Или нам просто нужно больше данных для обучения?Ответы на данные вопросы не всегда лежат на поверхности. В частности, иногда использование более сложной модели приведет к ухудшению показателей. Либо добавление наблюдений не приведет к ощутимым изменениям. Способность принять правильное решение и выбрать правильный способ улучшения модели, собственно говоря, и отличает хорошего специалиста от плохого. Будем работать со знакомыми данными по оттоку клиентов телеком-оператора.
###Code
data = pd.read_csv('../../data/telecom_churn.csv').drop('State', axis=1)
data['International plan'] = data['International plan'].map({'Yes': 1, 'No': 0})
data['Voice mail plan'] = data['Voice mail plan'].map({'Yes': 1, 'No': 0})
y = data['Churn'].astype('int').values
X = data.drop('Churn', axis=1).values
###Output
_____no_output_____
###Markdown
**Логистическую регрессию будем обучать стохастическим градиентным спуском. Пока объясним это тем, что так быстрее, но далее в программе у нас отдельная статья про это дело.**
###Code
alphas = np.logspace(-2, 0, 20)
sgd_logit = SGDClassifier(loss='log', n_jobs=-1, random_state=17)
logit_pipe = Pipeline([('scaler', StandardScaler()), ('poly', PolynomialFeatures(degree=2)),
('sgd_logit', sgd_logit)])
val_train, val_test = validation_curve(logit_pipe, X, y,
'sgd_logit__alpha', alphas, cv=5,
scoring='roc_auc')
###Output
_____no_output_____
###Markdown
**Построим валидационные кривые, показывающие, как качество (ROC AUC) на обучающей и проверочной выборке меняется с изменением параметра регуляризации.**
###Code
def plot_with_err(x, data, **kwargs):
mu, std = data.mean(1), data.std(1)
lines = plt.plot(x, mu, '-', **kwargs)
plt.fill_between(x, mu - std, mu + std, edgecolor='none',
facecolor=lines[0].get_color(), alpha=0.2)
plot_with_err(alphas, val_train, label='training scores')
plot_with_err(alphas, val_test, label='validation scores')
plt.xlabel(r'$\alpha$'); plt.ylabel('ROC AUC')
plt.legend();
###Output
_____no_output_____
###Markdown
Тенденция видна сразу, и она очень часто встречается.1. Для простых моделей тренировочная и валидационная ошибка находятся где-то рядом, и они велики. Это говорит о том, что модель **недообучилась**: то есть она не имеет достаточное кол-во параметров.2. Для сильно усложненных моделей тренировочная и валидационная ошибки значительно отличаются. Это можно объяснить **переобучением**: когда параметров слишком много либо не хватает регуляризации, алгоритм может "отвлекаться" на шум в данных и упускать основной тренд. Сколько нужно данных?Известно, что чем больше данных использует модель, тем лучше. Но как нам понять в конкретной ситуации, помогут ли новые данные? Скажем, целесообразно ли нам потратить \$ N на труд асессоров, чтобы увеличить выборку вдвое?Поскольку новых данных пока может и не быть, разумно поварьировать размер имеющейся обучающей выборки и посмотреть, как качество решения задачи зависит от объема данных, на которм мы обучали модель. Так получаются **кривые обучения** (**learning curves**).Идея простая: мы отображаем ошибку как функцию от количества примеров, используемых для обучения. При этом параметры модели фиксируются заранее.
###Code
from sklearn.model_selection import learning_curve
def plot_learning_curve(degree=2, alpha=0.01):
train_sizes = np.linspace(0.05, 1, 20)
logit_pipe = Pipeline([('scaler', StandardScaler()), ('poly', PolynomialFeatures(degree=degree)),
('sgd_logit', SGDClassifier(n_jobs=-1, random_state=17, alpha=alpha))])
N_train, val_train, val_test = learning_curve(logit_pipe,
X, y, train_sizes=train_sizes, cv=5,
scoring='roc_auc')
plot_with_err(N_train, val_train, label='training scores')
plot_with_err(N_train, val_test, label='validation scores')
plt.xlabel('Training Set Size'); plt.ylabel('AUC')
plt.legend()
###Output
_____no_output_____
###Markdown
Давайте посмотрим, что мы получим для линейной модели. Коэффициент регуляризации выставим большим.
###Code
plot_learning_curve(degree=2, alpha=10)
###Output
_____no_output_____
###Markdown
Типичная ситуация: для небольшого объема данных ошибки на обучающей выборке и в процессе кросс-валидации довольно сильно отличаются, что указывает на переобучение. Для той же модели, но с большим объемом данных ошибки "сходятся", что указывается на недообучение.Если добавить еще данные, ошибка на обучающей выборке не будет расти, но с другой стороны, ошибка на тестовых данных не будет уменьшаться. Получается, ошибки "сошлись", и добавление новых данных не поможет. Собственно, это случай – самый интересный для бизнеса. Возможна ситуация, когда мы увеличиваем выборку в 10 раз. Но если не менять сложность модели, это может и не помочь. То есть стратегия "настроил один раз – дальше использую 10 раз" может и не работать. Что будет, если изменить коэффициент регуляризации?Видим хорошую тенденцию – кривые постепенно сходятся, и если дальше двигаться направо (добавлять в модель данные), можно еще повысить качество на валидации.
###Code
plot_learning_curve(degree=2, alpha=0.05)
###Output
_____no_output_____
###Markdown
А если усложнить ещё больше?Проявляется переобучение - AUC падает как на обучении, так и на валидации.
###Code
plot_learning_curve(degree=2, alpha=1e-4)
###Output
_____no_output_____ |
NBA Clustering.ipynb | ###Markdown
Number of clusters - 3
###Code
k = 3
clusters = KMeans(k,random_state = 42)
clusters.fit(scaled_df)
data['cluster_id'] = clusters.labels_
data
data.info()
position_map_reverse = {0:'Forward',1:'Guard',2:'Center'}
data['Position'] = data['Position'].map(position_map_reverse)
data[data['cluster_id']==0]
data[data['cluster_id']==1]
data[data['cluster_id']==2]
###Output
_____no_output_____ |
notebooks/nih_read_scans.ipynb | ###Markdown
Loading NIH metadata Raw Metadata
###Code
path = '/mnt/storage/data/nih-chest-xrays/'
raw_data = pd.read_csv(os.path.join(path, 'Data_Entry_2017.csv'))
raw_data.head()
###Output
_____no_output_____
###Markdown
Extract paths and labels
###Code
data = raw_data[['Image Index', 'Finding Labels']].copy()
data.columns = ['image', 'label']
data.image = os.path.join(path, 'images/') + data.image
data = data.sample(frac=1)
data.head()
exists = data.image.map(os.path.exists)
###Output
_____no_output_____
###Markdown
Produce a binary matrix of labels
###Code
encoded_labels = data.label.str.get_dummies(sep='|').sort_index(axis=1)
encoded_labels.head()
###Output
_____no_output_____
###Markdown
Read data from Dataset Load individual items directly from metadata
###Code
dataset = tf.data.Dataset.from_tensor_slices({
'index': data.index,
'path': data['image'].values,
'label': encoded_labels.values.astype(np.float32)
})
iterator = dataset.make_one_shot_iterator()
item = iterator.get_next()
sess.run(item)
###Output
_____no_output_____
###Markdown
Read and decode the corresponding image files
###Code
def read_file(item):
#item['path'] = tf.Print(item['path'], [item['path']], 'path: ')
item['image'] = tf.read_file(item['path'])
return item
def decode_image(item):
decoded = tf.image.decode_image(item['image'])
item['image'] = tf.image.convert_image_dtype(decoded, tf.float32)
# All images are B&W, but some seem to have the channel replicated,
# to avoid issues we simply select the first channel
item['image'] = tf.expand_dims(item['image'][:, :, 0], axis=-1)
item['image'].set_shape([None, None, 1])
return item
dataset = dataset.map(
lambda item: decode_image(read_file(item)), num_parallel_calls=32)
iterator = dataset.make_one_shot_iterator()
item = iterator.get_next()
sample = sess.run(item)
plt.figure(figsize=(10, 10))
plt.imshow(sample['image'][..., 0])
plt.title(sample['path'].decode() + ': '+ data.label[sample['index']])
plt.show()
###Output
_____no_output_____
###Markdown
How quickly can we run through the data?
###Code
batch_size = 12
dataset = dataset.shuffle(100)
dataset = dataset.batch(batch_size)
dataset = dataset.repeat() # repeat indefinitely (reshuffled each time)
iterator = dataset.make_one_shot_iterator()
batch = iterator.get_next()
for _ in tqdm.trange(len(data) // batch_size, unit='batch', smoothing=1):
sess.run(batch)
###Output
11%|█ | 1039/9343 [00:43<05:45, 24.06batch/s] |
wandb/run-20210517_223148-2vo4hjvi/tmp/code/main.ipynb | ###Markdown
WorkFlow Classes Load the data Test Modelling Modelling **** Classes
###Code
BATCH_SIZE = 250
import os
import cv2
import torch
import numpy as np
def load_data(img_size=112):
data = []
index = -1
labels = {}
for directory in os.listdir('./data/'):
index += 1
labels[f'./data/{directory}/'] = [index,-1]
print(len(labels))
for label in labels:
for file in os.listdir(label):
filepath = label + file
img = cv2.imread(filepath,cv2.IMREAD_GRAYSCALE)
img = cv2.resize(img,(img_size,img_size))
img = img / 255.0
data.append([
np.array(img),
labels[label][0]
])
labels[label][1] += 1
for _ in range(12):
np.random.shuffle(data)
print(len(data))
np.save('./data.npy',data)
return data
import torch
def other_loading_data_proccess(data):
X = []
y = []
print('going through the data..')
for d in data:
X.append(d[0])
y.append(d[1])
print('splitting the data')
VAL_SPLIT = 0.25
VAL_SPLIT = len(X)*VAL_SPLIT
VAL_SPLIT = int(VAL_SPLIT)
X_train = X[:-VAL_SPLIT]
y_train = y[:-VAL_SPLIT]
X_test = X[-VAL_SPLIT:]
y_test = y[-VAL_SPLIT:]
print('turning data to tensors')
X_train = torch.from_numpy(np.array(X_train))
y_train = torch.from_numpy(np.array(y_train))
X_test = torch.from_numpy(np.array(X_test))
y_test = torch.from_numpy(np.array(y_test))
return [X_train,X_test,y_train,y_test]
###Output
_____no_output_____
###Markdown
**** Load the data
###Code
REBUILD_DATA = True
if REBUILD_DATA:
data = load_data()
np.random.shuffle(data)
X_train,X_test,y_train,y_test = other_loading_data_proccess(data)
###Output
36
2515
###Markdown
Test Modelling
###Code
import torch
import torch.nn as nn
import torch.nn.functional as F
class Test_Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 25 * 25, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 36)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 25 * 25)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
device = torch.device('cuda')
model = Test_Model().to(device)
# preds = model(X_test.reshape(-1,1,112,112).float())
# preds[0]
optimizer = torch.optim.SGD(model.parameters(),lr=0.1)
criterion = nn.CrossEntropyLoss()
BATCH_SIZE = 250
EPOCHS = 5
loss_logs = []
from tqdm import tqdm
PROJECT_NAME = "Sign-Language-Recognition"
def test(net,X,y):
correct = 0
total = 0
net.eval()
with torch.no_grad():
for i in range(len(X)):
real_class = torch.argmax(y[i]).to(device)
net_out = net(X[i].view(-1,1,112,112).to(device).float())
net_out = net_out[0]
predictied_class = torch.argmax(net_out)
if predictied_class == real_class:
correct += 1
total += 1
return round(correct/total,3)
import wandb
len(os.listdir('./data/'))
import random
index = random.randint(0,29)
print(index)
wandb.init(project=PROJECT_NAME,name='test')
for _ in tqdm(range(EPOCHS)):
for i in range(0,len(X_train),BATCH_SIZE):
X_batch = X_train[i:i+BATCH_SIZE].view(-1,1,112,112).to(device)
y_batch = y_train[i:i+BATCH_SIZE].to(device)
model.to(device)
preds = model(X_batch.float())
loss = criterion(preds,torch.tensor(y_batch,dtype=torch.long))
optimizer.zero_grad()
loss.backward()
optimizer.step()
wandb.log({'loss':loss.item(),'accuracy':test(model,X_train,y_train)*100,'val_accuracy':test(model,X_test,y_test)*100,'pred':torch.argmax(preds[index]),'real':torch.argmax(y_batch[index])})
wandb.finish()
import matplotlib.pyplot as plt
import pandas as pd
df = pd.Series(loss_logs)
df.plot.line(figsize=(12,6))
test(model,X_test,y_test)
test(model,X_train,y_train)
preds
correct = 0
total = 0
model.eval()
with torch.no_grad():
for i in range(len(X_test)):
real_class = torch.argmax(y_test[i]).to(device)
net_out = model(X_test[i].view(-1,1,112,112).to(device).float())
net_out = net_out[0]
predictied_class = torch.argmax(net_out)
# print(predictied_class)
if str(predictied_class) == str(real_class):
correct += 1
total += 1
print(round(correct/total,3))
total
for real,pred in zip(y_batch,preds):
print(real)
print(torch.argmax(pred))
print('\n')
###Output
tensor(15, device='cuda:0')
tensor(33, device='cuda:0')
tensor(30, device='cuda:0')
tensor(30, device='cuda:0')
tensor(28, device='cuda:0')
tensor(33, device='cuda:0')
tensor(17, device='cuda:0')
tensor(23, device='cuda:0')
tensor(33, device='cuda:0')
tensor(33, device='cuda:0')
tensor(21, device='cuda:0')
tensor(23, device='cuda:0')
tensor(26, device='cuda:0')
tensor(33, device='cuda:0')
tensor(15, device='cuda:0')
tensor(33, device='cuda:0')
tensor(5, device='cuda:0')
tensor(23, device='cuda:0')
tensor(3, device='cuda:0')
tensor(33, device='cuda:0')
tensor(11, device='cuda:0')
tensor(11, device='cuda:0')
tensor(27, device='cuda:0')
tensor(30, device='cuda:0')
tensor(25, device='cuda:0')
tensor(23, device='cuda:0')
tensor(4, device='cuda:0')
tensor(4, device='cuda:0')
tensor(12, device='cuda:0')
tensor(30, device='cuda:0')
tensor(18, device='cuda:0')
tensor(18, device='cuda:0')
tensor(5, device='cuda:0')
tensor(23, device='cuda:0')
tensor(29, device='cuda:0')
tensor(33, device='cuda:0')
tensor(1, device='cuda:0')
tensor(33, device='cuda:0')
tensor(3, device='cuda:0')
tensor(33, device='cuda:0')
tensor(0, device='cuda:0')
tensor(26, device='cuda:0')
tensor(21, device='cuda:0')
tensor(11, device='cuda:0')
tensor(29, device='cuda:0')
tensor(33, device='cuda:0')
tensor(21, device='cuda:0')
tensor(30, device='cuda:0')
tensor(9, device='cuda:0')
tensor(23, device='cuda:0')
tensor(24, device='cuda:0')
tensor(32, device='cuda:0')
tensor(16, device='cuda:0')
tensor(23, device='cuda:0')
tensor(11, device='cuda:0')
tensor(11, device='cuda:0')
tensor(7, device='cuda:0')
tensor(23, device='cuda:0')
tensor(11, device='cuda:0')
tensor(11, device='cuda:0')
tensor(30, device='cuda:0')
tensor(30, device='cuda:0')
tensor(3, device='cuda:0')
tensor(33, device='cuda:0')
tensor(8, device='cuda:0')
tensor(23, device='cuda:0')
tensor(27, device='cuda:0')
tensor(23, device='cuda:0')
tensor(3, device='cuda:0')
tensor(33, device='cuda:0')
tensor(0, device='cuda:0')
tensor(23, device='cuda:0')
tensor(1, device='cuda:0')
tensor(33, device='cuda:0')
tensor(5, device='cuda:0')
tensor(23, device='cuda:0')
tensor(25, device='cuda:0')
tensor(23, device='cuda:0')
tensor(15, device='cuda:0')
tensor(33, device='cuda:0')
tensor(19, device='cuda:0')
tensor(30, device='cuda:0')
tensor(4, device='cuda:0')
tensor(4, device='cuda:0')
tensor(30, device='cuda:0')
tensor(30, device='cuda:0')
tensor(1, device='cuda:0')
tensor(30, device='cuda:0')
tensor(1, device='cuda:0')
tensor(33, device='cuda:0')
tensor(25, device='cuda:0')
tensor(23, device='cuda:0')
tensor(0, device='cuda:0')
tensor(23, device='cuda:0')
tensor(34, device='cuda:0')
tensor(32, device='cuda:0')
tensor(31, device='cuda:0')
tensor(33, device='cuda:0')
tensor(20, device='cuda:0')
tensor(33, device='cuda:0')
tensor(29, device='cuda:0')
tensor(33, device='cuda:0')
tensor(30, device='cuda:0')
tensor(30, device='cuda:0')
tensor(10, device='cuda:0')
tensor(23, device='cuda:0')
tensor(32, device='cuda:0')
tensor(4, device='cuda:0')
tensor(34, device='cuda:0')
tensor(33, device='cuda:0')
tensor(24, device='cuda:0')
tensor(4, device='cuda:0')
tensor(6, device='cuda:0')
tensor(23, device='cuda:0')
tensor(26, device='cuda:0')
tensor(33, device='cuda:0')
tensor(19, device='cuda:0')
tensor(23, device='cuda:0')
tensor(20, device='cuda:0')
tensor(33, device='cuda:0')
tensor(24, device='cuda:0')
tensor(4, device='cuda:0')
tensor(16, device='cuda:0')
tensor(23, device='cuda:0')
tensor(0, device='cuda:0')
tensor(18, device='cuda:0')
tensor(22, device='cuda:0')
tensor(23, device='cuda:0')
tensor(9, device='cuda:0')
tensor(23, device='cuda:0')
tensor(2, device='cuda:0')
tensor(33, device='cuda:0')
tensor(16, device='cuda:0')
tensor(16, device='cuda:0')
tensor(4, device='cuda:0')
tensor(4, device='cuda:0')
tensor(35, device='cuda:0')
tensor(23, device='cuda:0')
tensor(23, device='cuda:0')
tensor(23, device='cuda:0')
tensor(8, device='cuda:0')
tensor(23, device='cuda:0')
tensor(5, device='cuda:0')
tensor(23, device='cuda:0')
tensor(31, device='cuda:0')
tensor(33, device='cuda:0')
tensor(31, device='cuda:0')
tensor(33, device='cuda:0')
tensor(25, device='cuda:0')
tensor(23, device='cuda:0')
tensor(11, device='cuda:0')
tensor(11, device='cuda:0')
tensor(31, device='cuda:0')
tensor(33, device='cuda:0')
tensor(29, device='cuda:0')
tensor(33, device='cuda:0')
tensor(9, device='cuda:0')
tensor(23, device='cuda:0')
tensor(10, device='cuda:0')
tensor(23, device='cuda:0')
tensor(12, device='cuda:0')
tensor(23, device='cuda:0')
tensor(12, device='cuda:0')
tensor(30, device='cuda:0')
tensor(29, device='cuda:0')
tensor(33, device='cuda:0')
tensor(4, device='cuda:0')
tensor(4, device='cuda:0')
tensor(32, device='cuda:0')
tensor(32, device='cuda:0')
tensor(29, device='cuda:0')
tensor(33, device='cuda:0')
tensor(28, device='cuda:0')
tensor(33, device='cuda:0')
tensor(2, device='cuda:0')
tensor(33, device='cuda:0')
tensor(1, device='cuda:0')
tensor(1, device='cuda:0')
tensor(25, device='cuda:0')
tensor(23, device='cuda:0')
tensor(30, device='cuda:0')
tensor(30, device='cuda:0')
tensor(3, device='cuda:0')
tensor(33, device='cuda:0')
tensor(29, device='cuda:0')
tensor(33, device='cuda:0')
tensor(27, device='cuda:0')
tensor(30, device='cuda:0')
tensor(13, device='cuda:0')
tensor(23, device='cuda:0')
tensor(13, device='cuda:0')
tensor(23, device='cuda:0')
tensor(2, device='cuda:0')
tensor(33, device='cuda:0')
tensor(6, device='cuda:0')
tensor(23, device='cuda:0')
tensor(14, device='cuda:0')
tensor(23, device='cuda:0')
tensor(34, device='cuda:0')
tensor(33, device='cuda:0')
tensor(12, device='cuda:0')
tensor(23, device='cuda:0')
tensor(2, device='cuda:0')
tensor(26, device='cuda:0')
tensor(23, device='cuda:0')
tensor(23, device='cuda:0')
tensor(13, device='cuda:0')
tensor(23, device='cuda:0')
tensor(13, device='cuda:0')
tensor(23, device='cuda:0')
tensor(12, device='cuda:0')
tensor(23, device='cuda:0')
tensor(20, device='cuda:0')
tensor(33, device='cuda:0')
tensor(3, device='cuda:0')
tensor(33, device='cuda:0')
tensor(24, device='cuda:0')
tensor(32, device='cuda:0')
tensor(30, device='cuda:0')
tensor(30, device='cuda:0')
tensor(10, device='cuda:0')
tensor(23, device='cuda:0')
tensor(6, device='cuda:0')
tensor(23, device='cuda:0')
tensor(5, device='cuda:0')
tensor(23, device='cuda:0')
tensor(12, device='cuda:0')
tensor(23, device='cuda:0')
tensor(6, device='cuda:0')
tensor(23, device='cuda:0')
tensor(6, device='cuda:0')
tensor(23, device='cuda:0')
tensor(28, device='cuda:0')
tensor(33, device='cuda:0')
tensor(8, device='cuda:0')
tensor(23, device='cuda:0')
tensor(18, device='cuda:0')
tensor(18, device='cuda:0')
tensor(5, device='cuda:0')
tensor(23, device='cuda:0')
tensor(9, device='cuda:0')
tensor(23, device='cuda:0')
tensor(35, device='cuda:0')
tensor(23, device='cuda:0')
tensor(4, device='cuda:0')
tensor(4, device='cuda:0')
tensor(34, device='cuda:0')
tensor(11, device='cuda:0')
tensor(19, device='cuda:0')
tensor(23, device='cuda:0')
tensor(17, device='cuda:0')
tensor(23, device='cuda:0')
tensor(11, device='cuda:0')
tensor(23, device='cuda:0')
tensor(18, device='cuda:0')
tensor(18, device='cuda:0')
tensor(26, device='cuda:0')
tensor(33, device='cuda:0')
tensor(2, device='cuda:0')
tensor(33, device='cuda:0')
tensor(19, device='cuda:0')
tensor(23, device='cuda:0')
tensor(27, device='cuda:0')
tensor(4, device='cuda:0')
tensor(27, device='cuda:0')
tensor(30, device='cuda:0')
tensor(18, device='cuda:0')
tensor(18, device='cuda:0')
tensor(25, device='cuda:0')
tensor(23, device='cuda:0')
tensor(21, device='cuda:0')
tensor(11, device='cuda:0')
tensor(24, device='cuda:0')
tensor(4, device='cuda:0')
|
html/html_df_loaded.ipynb | ###Markdown
###Code
%%capture
!wget https://people.sc.fsu.edu/~jburkardt/data/csv/biostats.csv -O biostats.csv
import pandas as pd
df = pd.read_csv('biostats.csv')
from IPython.core.display import HTML
df_html = df.to_html()
HTML(df_html)
###Output
_____no_output_____ |
submodules/resource/d2l-zh/mxnet/chapter_preliminaries/ndarray.ipynb | ###Markdown
数据操作:label:`sec_ndarray`为了能够完成各种数据操作,我们需要某种方法来存储和操作数据。通常,我们需要做两件重要的事:(1)获取数据;(2)将数据读入计算机后对其进行处理。如果没有某种方法来存储数据,那么获取数据是没有意义的。首先,我们介绍$n$维数组,也称为*张量*(tensor)。使用过Python中NumPy计算包的读者会对本部分很熟悉。无论使用哪个深度学习框架,它的*张量类*(在MXNet中为`ndarray`,在PyTorch和TensorFlow中为`Tensor`)都与Numpy的`ndarray`类似。但深度学习框架又比Numpy的`ndarray`多一些重要功能:首先,GPU很好地支持加速计算,而NumPy仅支持CPU计算;其次,张量类支持自动微分。这些功能使得张量类更适合深度学习。如果没有特殊说明,本书中所说的张量均指的是张量类的实例。 入门本节的目标是帮助读者了解并运行一些在阅读本书的过程中会用到的基本数值计算工具。如果你很难理解一些数学概念或库函数,请不要担心。后面的章节将通过一些实际的例子来回顾这些内容。如果你已经具有相关经验,想要深入学习数学内容,可以跳过本节。 首先,我们从MXNet导入`np`(`numpy`)模块和`npx`(`numpy_extension`)模块。`np`模块包含NumPy支持的函数;而`npx`模块包含一组扩展函数,用来在类似NumPy的环境中实现深度学习开发。当使用张量时,几乎总是会调用`set_np`函数,这是为了兼容MXNet的其他张量处理组件。
###Code
from mxnet import np, npx
npx.set_np()
###Output
_____no_output_____
###Markdown
[**张量表示由一个数值组成的数组,这个数组可能有多个维度**]。具有一个轴的张量对应数学上的*向量*(vector);具有两个轴的张量对应数学上的*矩阵*(matrix);具有两个轴以上的张量没有特殊的数学名称。 首先,我们可以使用 `arange` 创建一个行向量 `x`。这个行向量包含以0开始的前12个整数,它们默认创建为浮点数。张量中的每个值都称为张量的 *元素*(element)。例如,张量 `x` 中有 12 个元素。除非额外指定,新的张量将存储在内存中,并采用基于CPU的计算。
###Code
x = np.arange(12)
x
###Output
_____no_output_____
###Markdown
[**可以通过张量的`shape`属性来访问张量(沿每个轴的长度)的*形状***](~~和张量中元素的总数~~)。
###Code
x.shape
###Output
_____no_output_____
###Markdown
如果只想知道张量中元素的总数,即形状的所有元素乘积,可以检查它的大小(size)。因为这里在处理的是一个向量,所以它的`shape`与它的`size`相同。
###Code
x.size
###Output
_____no_output_____
###Markdown
[**要想改变一个张量的形状而不改变元素数量和元素值,可以调用`reshape`函数。**]例如,可以把张量`x`从形状为(12,)的行向量转换为形状为(3,4)的矩阵。这个新的张量包含与转换前相同的值,但是它被看成一个3行4列的矩阵。要重点说明一下,虽然张量的形状发生了改变,但其元素值并没有变。注意,通过改变张量的形状,张量的大小不会改变。
###Code
X = x.reshape(3, 4)
X
###Output
_____no_output_____
###Markdown
我们不需要通过手动指定每个维度来改变形状。也就是说,如果我们的目标形状是(高度,宽度),那么在知道宽度后,高度会被自动计算得出,不必我们自己做除法。在上面的例子中,为了获得一个3行的矩阵,我们手动指定了它有3行和4列。幸运的是,我们可以通过`-1`来调用此自动计算出维度的功能。即我们可以用`x.reshape(-1,4)`或`x.reshape(3,-1)`来取代`x.reshape(3,4)`。有时,我们希望[**使用全0、全1、其他常量,或者从特定分布中随机采样的数字**]来初始化矩阵。我们可以创建一个形状为(2,3,4)的张量,其中所有元素都设置为0。代码如下:
###Code
np.zeros((2, 3, 4))
###Output
_____no_output_____
###Markdown
同样,我们可以创建一个形状为`(2,3,4)`的张量,其中所有元素都设置为1。代码如下:
###Code
np.ones((2, 3, 4))
###Output
_____no_output_____
###Markdown
有时我们想通过从某个特定的概率分布中随机采样来得到张量中每个元素的值。例如,当我们构造数组来作为神经网络中的参数时,我们通常会随机初始化参数的值。以下代码创建一个形状为(3,4)的张量。其中的每个元素都从均值为0、标准差为1的标准高斯分布(正态分布)中随机采样。
###Code
np.random.normal(0, 1, size=(3, 4))
###Output
_____no_output_____
###Markdown
我们还可以[**通过提供包含数值的Python列表(或嵌套列表),来为所需张量中的每个元素赋予确定值**]。在这里,最外层的列表对应于轴0,内层的列表对应于轴1。
###Code
np.array([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])
###Output
_____no_output_____
###Markdown
运算符我们的兴趣不仅限于读取数据和写入数据。我们想在这些数据上执行数学运算,其中最简单且最有用的操作是*按元素*(elementwise)运算。它们将标准标量运算符应用于数组的每个元素。对于将两个数组作为输入的函数,按元素运算将二元运算符应用于两个数组中的每对位置对应的元素。我们可以基于任何从标量到标量的函数来创建按元素函数。在数学表示法中,我们将通过符号$f: \mathbb{R} \rightarrow \mathbb{R}$来表示*一元*标量运算符(只接收一个输入)。这意味着该函数从任何实数($\mathbb{R}$)映射到另一个实数。同样,我们通过符号$f: \mathbb{R}, \mathbb{R} \rightarrow \mathbb{R}$表示*二元*标量运算符,这意味着该函数接收两个输入,并产生一个输出。给定同一形状的任意两个向量$\mathbf{u}$和$\mathbf{v}$和二元运算符$f$,我们可以得到向量$\mathbf{c} = F(\mathbf{u},\mathbf{v})$。具体计算方法是$c_i \gets f(u_i, v_i)$,其中$c_i$、$u_i$和$v_i$分别是向量$\mathbf{c}$、$\mathbf{u}$和$\mathbf{v}$中的元素。在这里,我们通过将标量函数升级为按元素向量运算来生成向量值$F: \mathbb{R}^d, \mathbb{R}^d \rightarrow \mathbb{R}^d$。对于任意具有相同形状的张量,[**常见的标准算术运算符(`+`、`-`、`*`、`/`和`**`)都可以被升级为按元素运算**]。我们可以在同一形状的任意两个张量上调用按元素操作。在下面的例子中,我们使用逗号来表示一个具有5个元素的元组,其中每个元素都是按元素操作的结果。
###Code
x = np.array([1, 2, 4, 8])
y = np.array([2, 2, 2, 2])
x + y, x - y, x * y, x / y, x ** y # **运算符是求幂运算
###Output
_____no_output_____
###Markdown
(**“按元素”方式可以应用更多的计算**),包括像求幂这样的一元运算符。
###Code
np.exp(x)
###Output
_____no_output_____
###Markdown
除了按元素计算外,我们还可以执行线性代数运算,包括向量点积和矩阵乘法。我们将在 :numref:`sec_linear-algebra`中解释线性代数的重点内容。[**我们也可以把多个张量*连结*(concatenate)在一起**],把它们端对端地叠起来形成一个更大的张量。我们只需要提供张量列表,并给出沿哪个轴连结。下面的例子分别演示了当我们沿行(轴-0,形状的第一个元素)和按列(轴-1,形状的第二个元素)连结两个矩阵时,会发生什么情况。我们可以看到,第一个输出张量的轴-0长度($6$)是两个输入张量轴-0长度的总和($3 + 3$);第二个输出张量的轴-1长度($8$)是两个输入张量轴-1长度的总和($4 + 4$)。
###Code
X = np.arange(12).reshape(3, 4)
Y = np.array([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])
np.concatenate([X, Y], axis=0), np.concatenate([X, Y], axis=1)
###Output
_____no_output_____
###Markdown
有时,我们想[**通过*逻辑运算符*构建二元张量**]。以`X == Y`为例:对于每个位置,如果`X`和`Y`在该位置相等,则新张量中相应项的值为1。这意味着逻辑语句`X == Y`在该位置处为真,否则该位置为0。
###Code
X == Y
###Output
_____no_output_____
###Markdown
[**对张量中的所有元素进行求和,会产生一个单元素张量。**]
###Code
X.sum()
###Output
_____no_output_____
###Markdown
广播机制:label:`subsec_broadcasting`在上面的部分中,我们看到了如何在相同形状的两个张量上执行按元素操作。在某些情况下,[**即使形状不同,我们仍然可以通过调用*广播机制*(broadcasting mechanism)来执行按元素操作**]。这种机制的工作方式如下:首先,通过适当复制元素来扩展一个或两个数组,以便在转换之后,两个张量具有相同的形状。其次,对生成的数组执行按元素操作。在大多数情况下,我们将沿着数组中长度为1的轴进行广播,如下例子:
###Code
a = np.arange(3).reshape(3, 1)
b = np.arange(2).reshape(1, 2)
a, b
###Output
_____no_output_____
###Markdown
由于`a`和`b`分别是$3\times1$和$1\times2$矩阵,如果让它们相加,它们的形状不匹配。我们将两个矩阵*广播*为一个更大的$3\times2$矩阵,如下所示:矩阵`a`将复制列,矩阵`b`将复制行,然后再按元素相加。
###Code
a + b
###Output
_____no_output_____
###Markdown
索引和切片就像在任何其他Python数组中一样,张量中的元素可以通过索引访问。与任何Python数组一样:第一个元素的索引是0,最后一个元素索引是-1;可以指定范围以包含第一个元素和最后一个之前的元素。如下所示,我们[**可以用`[-1]`选择最后一个元素,可以用`[1:3]`选择第二个和第三个元素**]:
###Code
X[-1], X[1:3]
###Output
_____no_output_____
###Markdown
[**除读取外,我们还可以通过指定索引来将元素写入矩阵。**]
###Code
X[1, 2] = 9
X
###Output
_____no_output_____
###Markdown
如果我们想[**为多个元素赋值相同的值,我们只需要索引所有元素,然后为它们赋值。**]例如,`[0:2, :]`访问第1行和第2行,其中“:”代表沿轴1(列)的所有元素。虽然我们讨论的是矩阵的索引,但这也适用于向量和超过2个维度的张量。
###Code
X[0:2, :] = 12
X
###Output
_____no_output_____
###Markdown
节省内存[**运行一些操作可能会导致为新结果分配内存**]。例如,如果我们用`Y = X + Y`,我们将取消引用`Y`指向的张量,而是指向新分配的内存处的张量。在下面的例子中,我们用Python的`id()`函数演示了这一点,它给我们提供了内存中引用对象的确切地址。运行`Y = Y + X`后,我们会发现`id(Y)`指向另一个位置。这是因为Python首先计算`Y + X`,为结果分配新的内存,然后使`Y`指向内存中的这个新位置。
###Code
before = id(Y)
Y = Y + X
id(Y) == before
###Output
_____no_output_____
###Markdown
这可能是不可取的,原因有两个:首先,我们不想总是不必要地分配内存。在机器学习中,我们可能有数百兆的参数,并且在一秒内多次更新所有参数。通常情况下,我们希望原地执行这些更新。其次,如果我们不原地更新,其他引用仍然会指向旧的内存位置,这样我们的某些代码可能会无意中引用旧的参数。 幸运的是,(**执行原地操作**)非常简单。我们可以使用切片表示法将操作的结果分配给先前分配的数组,例如`Y[:] = `。为了说明这一点,我们首先创建一个新的矩阵`Z`,其形状与另一个`Y`相同,使用`zeros_like`来分配一个全$0$的块。
###Code
Z = np.zeros_like(Y)
print('id(Z):', id(Z))
Z[:] = X + Y
print('id(Z):', id(Z))
###Output
id(Z): 139652460377600
id(Z): 139652460377600
###Markdown
[**如果在后续计算中没有重复使用`X`,我们也可以使用`X[:] = X + Y`或`X += Y`来减少操作的内存开销。**]
###Code
before = id(X)
X += Y
id(X) == before
###Output
_____no_output_____
###Markdown
转换为其他Python对象 将深度学习框架定义的张量[**转换为NumPy张量(`ndarray`)**]很容易,反之也同样容易。转换后的结果不共享内存。这个小的不便实际上是非常重要的:当你在CPU或GPU上执行操作的时候,如果Python的NumPy包也希望使用相同的内存块执行其他操作,你不希望停下计算来等它。
###Code
A = X.asnumpy()
B = np.array(A)
type(A), type(B)
###Output
_____no_output_____
###Markdown
要(**将大小为1的张量转换为Python标量**),我们可以调用`item`函数或Python的内置函数。
###Code
a = np.array([3.5])
a, a.item(), float(a), int(a)
###Output
_____no_output_____ |
src/ml/supervised/Linear_Regression.ipynb | ###Markdown
Regression in Python***This is a very quick run-through of some basic statistical concepts, adapted from [Lab 4 in Harvard's CS109](https://github.com/cs109/2015lab4) course. Please feel free to try the original lab if you're feeling ambitious :-) The CS109 git repository also has the solutions if you're stuck.* Linear Regression Models* Prediction using linear regressionLinear regression is used to model and predict continuous outcomes with normal random errors. There are nearly an infinite number of different types of regression models and each regression model is typically defined by the distribution of the prediction errors (called "residuals") of the type of data. Logistic regression is used to model binary outcomes whereas Poisson regression is used to predict counts. In this exercise, we'll see some examples of linear regression as well as Train-test splits.The packages we'll cover are: `statsmodels`, `seaborn`, and `scikit-learn`. While we don't explicitly teach `statsmodels` and `seaborn` in the Springboard workshop, those are great libraries to know.*** ***
###Code
# special IPython command to prepare the notebook for matplotlib and other libraries
%matplotlib inline
import numpy as np
import pandas as pd
import scipy.stats as stats
import matplotlib.pyplot as plt
import sklearn
import seaborn as sns
# special matplotlib argument for improved plots
from matplotlib import rcParams
sns.set_style("whitegrid")
sns.set_context("poster")
###Output
_____no_output_____
###Markdown
*** Part 1: Introduction to Linear Regression Purpose of linear regression*** Given a dataset containing predictor variables $X$ and outcome/response variable $Y$, linear regression can be used to: Build a predictive model to predict future values of $\hat{Y}$, using new data $X^*$ where $Y$ is unknown. Model the strength of the relationship between each independent variable $X_i$ and $Y$ Many times, only a subset of independent variables $X_i$ will have a linear relationship with $Y$ Need to figure out which $X_i$ contributes most information to predict $Y$ It is in many cases, the first pass prediction algorithm for continuous outcomes. A Brief Mathematical Recap***[Linear Regression](http://en.wikipedia.org/wiki/Linear_regression) is a method to model the relationship between a set of independent variables $X$ (also knowns as explanatory variables, features, predictors) and a dependent variable $Y$. This method assumes the relationship between each predictor $X$ is **linearly** related to the dependent variable $Y$. The most basic linear regression model contains one independent variable $X$, we'll call this the simple model. $$ Y = \beta_0 + \beta_1 X + \epsilon$$where $\epsilon$ is considered as an unobservable random variable that adds noise to the linear relationship. In linear regression, $\epsilon$ is assumed to be normally distributed with a mean of 0. In other words, what this means is that on average, if we know $Y$, a roughly equal number of predictions $\hat{Y}$ will be above $Y$ and others will be below $Y$. That is, on average, the error is zero. The residuals, $\epsilon$ are also assumed to be "i.i.d.": independently and identically distributed. Independence means that the residuals are not correlated -- the residual from one prediction has no effect on the residual from another prediction. Correlated errors are common in time series analysis and spatial analyses.* $\beta_0$ is the intercept of the linear model and represents the average of $Y$ when all independent variables $X$ are set to 0.* $\beta_1$ is the slope of the line associated with the regression model and represents the average effect of a one-unit increase in $X$ on $Y$.* Back to the simple model. The model in linear regression is the *conditional mean* of $Y$ given the values in $X$ is expressed a linear function. $$ y = f(x) = E(Y | X = x)$$ *Image from http://www.learner.org/courses/againstallodds/about/glossary.html. Note this image uses $\alpha$ and $\beta$ instead of $\beta_0$ and $\beta_1$.** The goal is to estimate the coefficients (e.g. $\beta_0$ and $\beta_1$). We represent the estimates of the coefficients with a "hat" on top of the letter. $$ \hat{\beta}_0, \hat{\beta}_1 $$* Once we estimate the coefficients $\hat{\beta}_0$ and $\hat{\beta}_1$, we can use these to predict new values of $Y$ given new data $X$.$$\hat{y} = \hat{\beta}_0 + \hat{\beta}_1 x_1$$* Multiple linear regression is when you have more than one independent variable and the estimation involves matrices * $X_1$, $X_2$, $X_3$, $\ldots$* How do you estimate the coefficients? * There are many ways to fit a linear regression model * The method called **least squares** is the most common methods * We will discuss least squares$$ Y = \beta_0 + \beta_1 X_1 + \ldots + \beta_p X_p + \epsilon$$ Estimating $\hat\beta$: Least squares***[Least squares](http://en.wikipedia.org/wiki/Least_squares) is a method that can estimate the coefficients of a linear model by minimizing the squared residuals: $$ \mathscr{L} = \sum_{i=1}^N \epsilon_i^2 = \sum_{i=1}^N \left( y_i - \hat{y}_i \right)^2 = \sum_{i=1}^N \left(y_i - \left(\beta_0 + \beta_1 x_i\right)\right)^2 $$where $N$ is the number of observations and $\epsilon$ represents a residual or error, ACTUAL - PREDICTED. Estimating the intercept $\hat{\beta_0}$ for the simple linear modelWe want to minimize the squared residuals and solve for $\hat{\beta_0}$ so we take the partial derivative of $\mathscr{L}$ with respect to $\hat{\beta_0}$ $\begin{align}\frac{\partial \mathscr{L}}{\partial \hat{\beta_0}} &= \frac{\partial}{\partial \hat{\beta_0}} \sum_{i=1}^N \epsilon^2 \\&= \frac{\partial}{\partial \hat{\beta_0}} \sum_{i=1}^N \left( y_i - \hat{y}_i \right)^2 \\&= \frac{\partial}{\partial \hat{\beta_0}} \sum_{i=1}^N \left( y_i - \left( \hat{\beta}_0 + \hat{\beta}_1 x_i \right) \right)^2 \\&= -2 \sum_{i=1}^N \left( y_i - \left( \hat{\beta}_0 + \hat{\beta}_1 x_i \right) \right) \hspace{25mm} \mbox{(by chain rule)} \\&= -2 \sum_{i=1}^N (y_i - \hat{\beta}_0 - \hat{\beta}_1 x_i) \\&= -2 \left[ \left( \sum_{i=1}^N y_i \right) - N \hat{\beta_0} - \hat{\beta}_1 \left( \sum_{i=1}^N x_i\right) \right] \\& 2 \left[ N \hat{\beta}_0 + \hat{\beta}_1 \sum_{i=1}^N x_i - \sum_{i=1}^N y_i \right] = 0 \hspace{20mm} \mbox{(Set equal to 0 and solve for $\hat{\beta}_0$)} \\& N \hat{\beta}_0 + \hat{\beta}_1 \sum_{i=1}^N x_i - \sum_{i=1}^N y_i = 0 \\& N \hat{\beta}_0 = \sum_{i=1}^N y_i - \hat{\beta}_1 \sum_{i=1}^N x_i \\& \hat{\beta}_0 = \frac{\sum_{i=1}^N y_i - \hat{\beta}_1 \sum_{i=1}^N x_i}{N} \\& \hat{\beta}_0 = \frac{\sum_{i=1}^N y_i}{N} - \hat{\beta}_1 \frac{\sum_{i=1}^N x_i}{N} \\& \boxed{\hat{\beta}_0 = \bar{y} - \hat{\beta}_1 \bar{x}}\end{align}$ Using this new information, we can compute the estimate for $\hat{\beta}_1$ by taking the partial derivative of $\mathscr{L}$ with respect to $\hat{\beta}_1$. $\begin{align}\frac{\partial \mathscr{L}}{\partial \hat{\beta_1}} &= \frac{\partial}{\partial \hat{\beta_1}} \sum_{i=1}^N \epsilon^2 \\&= \frac{\partial}{\partial \hat{\beta_1}} \sum_{i=1}^N \left( y_i - \hat{y}_i \right)^2 \\&= \frac{\partial}{\partial \hat{\beta_1}} \sum_{i=1}^N \left( y_i - \left( \hat{\beta}_0 + \hat{\beta}_1 x_i \right) \right)^2 \\&= 2 \sum_{i=1}^N \left( y_i - \left( \hat{\beta}_0 + \hat{\beta}_1 x_i \right) \right) \left( -x_i \right) \hspace{25mm}\mbox{(by chain rule)} \\&= -2 \sum_{i=1}^N x_i \left( y_i - \hat{\beta}_0 - \hat{\beta}_1 x_i \right) \\&= -2 \sum_{i=1}^N x_i (y_i - \hat{\beta}_0 x_i - \hat{\beta}_1 x_i^2) \\&= -2 \sum_{i=1}^N x_i (y_i - \left( \bar{y} - \hat{\beta}_1 \bar{x} \right) x_i - \hat{\beta}_1 x_i^2) \\&= -2 \sum_{i=1}^N (x_i y_i - \bar{y}x_i + \hat{\beta}_1\bar{x}x_i - \hat{\beta}_1 x_i^2) \\&= -2 \left[ \sum_{i=1}^N x_i y_i - \bar{y} \sum_{i=1}^N x_i + \hat{\beta}_1\bar{x}\sum_{i=1}^N x_i - \hat{\beta}_1 \sum_{i=1}^N x_i^2 \right] \\&= -2 \left[ \hat{\beta}_1 \left\{ \bar{x} \sum_{i=1}^N x_i - \sum_{i=1}^N x_i^2 \right\} + \left\{ \sum_{i=1}^N x_i y_i - \bar{y} \sum_{i=1}^N x_i \right\}\right] \\& 2 \left[ \hat{\beta}_1 \left\{ \sum_{i=1}^N x_i^2 - \bar{x} \sum_{i=1}^N x_i \right\} + \left\{ \bar{y} \sum_{i=1}^N x_i - \sum_{i=1}^N x_i y_i \right\} \right] = 0 \\& \hat{\beta}_1 = \frac{-\left( \bar{y} \sum_{i=1}^N x_i - \sum_{i=1}^N x_i y_i \right)}{\sum_{i=1}^N x_i^2 - \bar{x}\sum_{i=1}^N x_i} \\&= \frac{\sum_{i=1}^N x_i y_i - \bar{y} \sum_{i=1}^N x_i}{\sum_{i=1}^N x_i^2 - \bar{x} \sum_{i=1}^N x_i} \\& \boxed{\hat{\beta}_1 = \frac{\sum_{i=1}^N x_i y_i - \bar{x}\bar{y}n}{\sum_{i=1}^N x_i^2 - n \bar{x}^2}}\end{align}$ The solution can be written in compact matrix notation as$$\hat\beta = (X^T X)^{-1}X^T Y$$ We wanted to show you this in case you remember linear algebra, in order for this solution to exist we need $X^T X$ to be invertible. Of course this requires a few extra assumptions, $X$ must be full rank so that $X^T X$ is invertible, etc. Basically, $X^T X$ is full rank if all rows and columns are linearly independent. This has a loose relationship to variables and observations being independent respective. **This is important for us because this means that having redundant features in our regression models will lead to poorly fitting (and unstable) models.** We'll see an implementation of this in the extra linear regression example. *** Part 2: Exploratory Data Analysis for Linear RelationshipsThe [Boston Housing data set](https://archive.ics.uci.edu/ml/datasets/Housing) contains information about the housing values in suburbs of Boston. This dataset was originally taken from the StatLib library which is maintained at Carnegie Mellon University and is now available on the UCI Machine Learning Repository. Load the Boston Housing data set from `sklearn`***This data set is available in the [sklearn](http://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_boston.htmlsklearn.datasets.load_boston) python module which is how we will access it today.
###Code
from sklearn.datasets import load_boston
import pandas as pd
boston = load_boston()
boston.keys()
boston.data.shape
# Print column names
print(boston.feature_names)
# Print description of Boston housing data set
print(boston.DESCR)
###Output
.. _boston_dataset:
Boston house prices dataset
---------------------------
**Data Set Characteristics:**
:Number of Instances: 506
:Number of Attributes: 13 numeric/categorical predictive. Median Value (attribute 14) is usually the target.
:Attribute Information (in order):
- CRIM per capita crime rate by town
- ZN proportion of residential land zoned for lots over 25,000 sq.ft.
- INDUS proportion of non-retail business acres per town
- CHAS Charles River dummy variable (= 1 if tract bounds river; 0 otherwise)
- NOX nitric oxides concentration (parts per 10 million)
- RM average number of rooms per dwelling
- AGE proportion of owner-occupied units built prior to 1940
- DIS weighted distances to five Boston employment centres
- RAD index of accessibility to radial highways
- TAX full-value property-tax rate per $10,000
- PTRATIO pupil-teacher ratio by town
- B 1000(Bk - 0.63)^2 where Bk is the proportion of blacks by town
- LSTAT % lower status of the population
- MEDV Median value of owner-occupied homes in $1000's
:Missing Attribute Values: None
:Creator: Harrison, D. and Rubinfeld, D.L.
This is a copy of UCI ML housing dataset.
https://archive.ics.uci.edu/ml/machine-learning-databases/housing/
This dataset was taken from the StatLib library which is maintained at Carnegie Mellon University.
The Boston house-price data of Harrison, D. and Rubinfeld, D.L. 'Hedonic
prices and the demand for clean air', J. Environ. Economics & Management,
vol.5, 81-102, 1978. Used in Belsley, Kuh & Welsch, 'Regression diagnostics
...', Wiley, 1980. N.B. Various transformations are used in the table on
pages 244-261 of the latter.
The Boston house-price data has been used in many machine learning papers that address regression
problems.
.. topic:: References
- Belsley, Kuh & Welsch, 'Regression diagnostics: Identifying Influential Data and Sources of Collinearity', Wiley, 1980. 244-261.
- Quinlan,R. (1993). Combining Instance-Based and Model-Based Learning. In Proceedings on the Tenth International Conference of Machine Learning, 236-243, University of Massachusetts, Amherst. Morgan Kaufmann.
###Markdown
Now let's explore the data set itself.
###Code
bos = pd.DataFrame(boston.data)
bos.head()
###Output
_____no_output_____
###Markdown
There are no column names in the DataFrame. Let's add those.
###Code
bos.columns = boston.feature_names
bos.head()
###Output
_____no_output_____
###Markdown
Now we have a pandas DataFrame called `bos` containing all the data we want to use to predict Boston Housing prices. Let's create a variable called `PRICE` which will contain the prices. This information is contained in the `target` data.
###Code
print(boston.target.shape)
bos['PRICE'] = boston.target
bos.head()
###Output
_____no_output_____
###Markdown
EDA and Summary Statistics***Let's explore this data set. First we use `describe()` to get basic summary statistics for each of the columns.
###Code
bos.describe()
###Output
_____no_output_____
###Markdown
Scatterplots***Let's look at some scatter plots for three variables: 'CRIM' (per capita crime rate), 'RM' (number of rooms) and 'PTRATIO' (pupil-to-teacher ratio in schools).
###Code
plt.scatter(bos.CRIM, bos.PRICE)
plt.xlabel("Per capita crime rate by town (CRIM)")
plt.ylabel("Housing Price")
plt.title("Relationship between CRIM and Price")
###Output
_____no_output_____
###Markdown
Part 2 Checkup Exercise Set IExercise: What kind of relationship do you see? e.g. positive, negative? linear? non-linear? Is there anything else strange or interesting about the data? What about outliers?Exercise: Create scatter plots between *RM* and *PRICE*, and *PTRATIO* and *PRICE*. Label your axes appropriately using human readable labels. Tell a story about what you see.Exercise: What are some other numeric variables of interest? Why do you think they are interesting? Plot scatterplots with these variables and *PRICE* (house price) and tell a story about what you see. your turn: describe relationship- Price spans the entire y-axis when crime rate is at zero, which encompasses a lot of the data points plotted.- The plot shows a very slight negative linear relationship if any.- The upper points lining the left of the graph where crime rate is zero can be considered as outliers.
###Code
# your turn: scatter plot between *RM* and *PRICE*
plt.scatter(bos.RM, bos.PRICE)
plt.xlabel('Average number of rooms per dwelling')
plt.ylabel('Housing price')
plt.title('Relationship between RM and Price')
###Output
_____no_output_____
###Markdown
\* Strong positive linear relationship with a number of outliers at the top price
###Code
# your turn: scatter plot between *PTRATIO* and *PRICE*
plt.scatter(bos.PTRATIO, bos.PRICE)
plt.xlabel('Pupil-teacher ratio by town')
plt.ylabel('Housing price')
plt.title('Relationship between PTRATIO and Price')
###Output
_____no_output_____
###Markdown
\* Very weak negative linear relationship. High spread.
###Code
# your turn: create some other scatter plots
plt.scatter(bos.B, bos.PRICE)
plt.xlabel('Population of blacks in town')
plt.ylabel('Housing price')
plt.title('Relationship between B and Price')
###Output
_____no_output_____
###Markdown
\* Very weak correlation.
###Code
plt.scatter(bos.DIS, bos.PRICE)
plt.xlabel('weighted distances to five Boston employment centres')
plt.ylabel('Housing price')
plt.title('Relationship between DIS and Price')
###Output
_____no_output_____
###Markdown
\* Very weak correlation. A few smaller values in the region where x<2.5. Scatterplots using Seaborn***[Seaborn](https://stanford.edu/~mwaskom/software/seaborn/) is a cool Python plotting library built on top of matplotlib. It provides convenient syntax and shortcuts for many common types of plots, along with better-looking defaults.We can also use [seaborn regplot](https://stanford.edu/~mwaskom/software/seaborn/tutorial/regression.htmlfunctions-to-draw-linear-regression-models) for the scatterplot above. This provides automatic linear regression fits (useful for data exploration later on). Here's one example below.
###Code
sns.regplot(y="PRICE", x="RM", data=bos, fit_reg = True)
###Output
_____no_output_____
###Markdown
Histograms***
###Code
plt.hist(np.log(bos.CRIM))
plt.title("CRIM")
plt.xlabel("Crime rate per capita")
plt.ylabel("Frequencey")
plt.show()
###Output
_____no_output_____
###Markdown
Part 2 Checkup Exercise Set IIExercise: In the above histogram, we took the logarithm of the crime rate per capita. Repeat this histogram without taking the log. What was the purpose of taking the log? What do we gain by making this transformation? What do you now notice about this variable that is not obvious without making the transformation?Exercise: Plot the histogram for *RM* and *PTRATIO* against each other, along with the two variables you picked in the previous section. We are looking for correlations in predictors here.
###Code
#your turn
plt.hist(bos.CRIM, range=(0,30))
plt.title("CRIM")
plt.xlabel("Crime rate per capita")
plt.ylabel("Frequencey")
plt.show()
###Output
_____no_output_____
###Markdown
\* Log helps reduce the skewness of the data, and in turn gives a more normalized distribution. Also helps show percentage change.
###Code
f, axes = plt.subplots(1, 4, figsize=(20,5))
axes[0].hist(bos.RM)
axes[0].set_title('RM')
axes[1].hist(bos.PTRATIO)
axes[1].set_title('PTRATIO')
axes[2].hist(bos.B)
axes[2].set_title('B')
axes[3].hist(bos.DIS)
axes[3].set_title('DIS')
###Output
_____no_output_____
###Markdown
\* Not looking to be much correlation between these variables. Only RM looks to be normally distributed. Part 3: Linear Regression with Boston Housing Data Example***Here, $Y$ = boston housing prices (called "target" data in python, and referred to as the dependent variable or response variable)and$X$ = all the other features (or independent variables, predictors or explanatory variables)which we will use to fit a linear regression model and predict Boston housing prices. We will use the least-squares method to estimate the coefficients. We'll use two ways of fitting a linear regression. We recommend the first but the second is also powerful in its features. Fitting Linear Regression using `statsmodels`***[Statsmodels](http://statsmodels.sourceforge.net/) is a great Python library for a lot of basic and inferential statistics. It also provides basic regression functions using an R-like syntax, so it's commonly used by statisticians. While we don't cover statsmodels officially in the Data Science Intensive workshop, it's a good library to have in your toolbox. Here's a quick example of what you could do with it. The version of least-squares we will use in statsmodels is called *ordinary least-squares (OLS)*. There are many other versions of least-squares such as [partial least squares (PLS)](https://en.wikipedia.org/wiki/Partial_least_squares_regression) and [weighted least squares (WLS)](https://en.wikipedia.org/wiki/Iteratively_reweighted_least_squares).
###Code
# Import regression modules
import statsmodels.api as sm
from statsmodels.formula.api import ols
# statsmodels works nicely with pandas dataframes
# The thing inside the "quotes" is called a formula, a bit on that below
m = ols('PRICE ~ RM',bos).fit()
print(m.summary())
###Output
OLS Regression Results
==============================================================================
Dep. Variable: PRICE R-squared: 0.484
Model: OLS Adj. R-squared: 0.483
Method: Least Squares F-statistic: 471.8
Date: Thu, 08 Aug 2019 Prob (F-statistic): 2.49e-74
Time: 23:18:44 Log-Likelihood: -1673.1
No. Observations: 506 AIC: 3350.
Df Residuals: 504 BIC: 3359.
Df Model: 1
Covariance Type: nonrobust
==============================================================================
coef std err t P>|t| [0.025 0.975]
------------------------------------------------------------------------------
Intercept -34.6706 2.650 -13.084 0.000 -39.877 -29.465
RM 9.1021 0.419 21.722 0.000 8.279 9.925
==============================================================================
Omnibus: 102.585 Durbin-Watson: 0.684
Prob(Omnibus): 0.000 Jarque-Bera (JB): 612.449
Skew: 0.726 Prob(JB): 1.02e-133
Kurtosis: 8.190 Cond. No. 58.4
==============================================================================
Warnings:
[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.
###Markdown
Interpreting coefficientsThere is a ton of information in this output. But we'll concentrate on the coefficient table (middle table). We can interpret the `RM` coefficient (9.1021) by first noticing that the p-value (under `P>|t|`) is so small, basically zero. This means that the number of rooms, `RM`, is a statisticall significant predictor of `PRICE`. The regression coefficient for `RM` of 9.1021 means that *on average, each additional room is associated with an increase of $\$9,100$ in house price net of the other variables*. The confidence interval gives us a range of plausible values for this average change, about ($\$8,279, \$9,925$), definitely not chump change. In general, the $\hat{\beta_i}, i > 0$ can be interpreted as the following: "A one unit increase in $x_i$ is associated with, on average, a $\hat{\beta_i}$ increase/decrease in $y$ net of all other variables."On the other hand, the interpretation for the intercept, $\hat{\beta}_0$ is the average of $y$ given that all of the independent variables $x_i$ are 0. `statsmodels` formulas***This formula notation will seem familiar to `R` users, but will take some getting used to for people coming from other languages or are new to statistics.The formula gives instruction for a general structure for a regression call. For `statsmodels` (`ols` or `logit`) calls you need to have a Pandas dataframe with column names that you will add to your formula. In the below example you need a pandas data frame that includes the columns named (`Outcome`, `X1`,`X2`, ...), but you don't need to build a new dataframe for every regression. Use the same dataframe with all these things in it. The structure is very simple:`Outcome ~ X1`But of course we want to to be able to handle more complex models, for example multiple regression is doone like this:`Outcome ~ X1 + X2 + X3`In general, a formula for an OLS multiple linear regression is`Y ~ X1 + X2 + ... + Xp`This is the very basic structure but it should be enough to get you through the homework. Things can get much more complex. You can force statsmodels to treat variables as categorical with the `C()` function, call numpy functions to transform data such as `np.log` for extremely-skewed data, or fit a model without an intercept by including `- 1` in the formula. For a quick run-down of further uses see the `statsmodels` [help page](http://statsmodels.sourceforge.net/devel/example_formulas.html). Let's see how our model actually fit our data. We can see below that there is a ceiling effect, we should probably look into that. Also, for large values of $Y$ we get underpredictions, most predictions are below the 45-degree gridlines. Part 3 Checkup Exercise Set IExercise: Create a scatterplot between the predicted prices, available in `m.fittedvalues` (where `m` is the fitted model) and the original prices. How does the plot look? Do you notice anything interesting or weird in the plot? Comment on what you see.
###Code
# your turn
plt.scatter(bos.PRICE, m.fittedvalues)
plt.xlabel('Original prices')
plt.ylabel('Fitted prices')
plt.title('Relationship between fitted and original prices')
###Output
_____no_output_____
###Markdown
\* Looks exactly like the scatterplot between RM and Price. Fitting Linear Regression using `sklearn`
###Code
from sklearn.linear_model import LinearRegression
X = bos.drop('PRICE', axis = 1)
# This creates a LinearRegression object
lm = LinearRegression()
###Output
_____no_output_____
###Markdown
What can you do with a LinearRegression object? ***Check out the scikit-learn [docs here](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html). We have listed the main functions here. Most machine learning models in scikit-learn follow this same API of fitting a model with `fit`, making predictions with `predict` and the appropriate scoring function `score` for each model. Main functions | Description--- | --- `lm.fit()` | Fit a linear model`lm.predit()` | Predict Y using the linear model with estimated coefficients`lm.score()` | Returns the coefficient of determination (R^2). *A measure of how well observed outcomes are replicated by the model, as the proportion of total variation of outcomes explained by the model* What output can you get?
###Code
# Look inside lm object
# lm.<tab>
###Output
_____no_output_____
###Markdown
Output | Description--- | --- `lm.coef_` | Estimated coefficients`lm.intercept_` | Estimated intercept Fit a linear model***The `lm.fit()` function estimates the coefficients the linear regression using least squares.
###Code
# Use all 13 predictors to fit linear regression model
lm.fit(X, bos.PRICE)
###Output
_____no_output_____
###Markdown
Part 3 Checkup Exercise Set IIExercise: How would you change the model to not fit an intercept term? Would you recommend not having an intercept? Why or why not? For more information on why to include or exclude an intercept, look [here](https://stats.idre.ucla.edu/other/mult-pkg/faq/general/faq-what-is-regression-through-the-origin/).Exercise: One of the assumptions of the linear model is that the residuals must be i.i.d. (independently and identically distributed). To satisfy this, is it enough that the residuals are normally distributed? Explain your answer.Exercise: True or false. To use linear regression, $Y$ must be normally distributed. Explain your answer. 1. Standardize the variables so that they all have mean of 0. Whether to have an intercept or not depends on the situation, with rooms and price a positive intercept would make some sense since land is also worth money. 2. A normal distribution would indicate less heteroskedasticity.
###Code
plt.hist(bos.PRICE)
###Output
_____no_output_____
###Markdown
3. False, as long as the standard errors are normally distributed, or if the dependent variable is "conditionally" normal, and by the law of large numbers, a linear regression will fit. Estimated intercept and coefficientsLet's look at the estimated coefficients from the linear model using `1m.intercept_` and `lm.coef_`. After we have fit our linear regression model using the least squares method, we want to see what are the estimates of our coefficients $\beta_0$, $\beta_1$, ..., $\beta_{13}$: $$ \hat{\beta}_0, \hat{\beta}_1, \ldots, \hat{\beta}_{13} $$
###Code
print('Estimated intercept coefficient: {}'.format(lm.intercept_))
print('Number of coefficients: {}'.format(len(lm.coef_)))
# The coefficients
pd.DataFrame({'features': X.columns, 'estimatedCoefficients': lm.coef_})[['features', 'estimatedCoefficients']]
###Output
_____no_output_____
###Markdown
Predict Prices We can calculate the predicted prices ($\hat{Y}_i$) using `lm.predict`. $$ \hat{Y}_i = \hat{\beta}_0 + \hat{\beta}_1 X_1 + \ldots \hat{\beta}_{13} X_{13} $$
###Code
# first five predicted prices
lm.predict(X)[0:5]
###Output
_____no_output_____
###Markdown
Part 3 Checkup Exercise Set IIIExercise: Histogram: Plot a histogram of all the predicted prices. Write a story about what you see. Describe the shape, center and spread of the distribution. Are there any outliers? What might be the reason for them? Should we do anything special with them?Exercise: Scatterplot: Let's plot the true prices compared to the predicted prices to see they disagree (we did this with `statsmodels` before).Exercise: We have looked at fitting a linear model in both `statsmodels` and `scikit-learn`. What are the advantages and disadvantages of each based on your exploration? Based on the information provided by both packages, what advantage does `statsmodels` provide?
###Code
# your turn
y_pred = lm.predict(X)
plt.hist(y_pred)
###Output
_____no_output_____
###Markdown
\* Normal distribution centered at around 20-25. No significant outliers, likely due to the "regression" nature of the model.
###Code
plt.scatter(bos.PRICE, y_pred)
plt.xlabel('Actual price')
plt.ylabel('Predicted price')
plt.title('Relationship between actual and predicted price')
plt.show()
###Output
_____no_output_____
###Markdown
\* Tighter spread on the sklearn model, less outliers.- The statmodels version gives access to summary statistics which is a great plus. Evaluating the Model: Sum-of-SquaresThe partitioning of the sum-of-squares shows the variance in the predictions explained by the model and the variance that is attributed to error.$$TSS = ESS + RSS$$ Residual Sum-of-Squares (aka $RSS$)The residual sum-of-squares is one of the basic ways of quantifying how much error exists in the fitted model. We will revisit this in a bit.$$ RSS = \sum_{i=1}^N r_i^2 = \sum_{i=1}^N \left(y_i - \left(\beta_0 + \beta_1 x_i\right)\right)^2 $$
###Code
print(np.sum((bos.PRICE - lm.predict(X)) ** 2))
###Output
11078.784577954977
###Markdown
Explained Sum-of-Squares (aka $ESS$)The explained sum-of-squares measures the variance explained by the regression model.$$ESS = \sum_{i=1}^N \left( \hat{y}_i - \bar{y} \right)^2 = \sum_{i=1}^N \left( \left( \hat{\beta}_0 + \hat{\beta}_1 x_i \right) - \bar{y} \right)^2$$
###Code
print(np.sum((lm.predict(X) - np.mean(bos.PRICE)) ** 2)
###Output
_____no_output_____
###Markdown
Evaluating the Model: The Coefficient of Determination ($R^2$)The coefficient of determination, $R^2$, tells us the percentage of the variance in the response variable $Y$ that can be explained by the linear regression model.$$ R^2 = \frac{ESS}{TSS} $$The $R^2$ value is one of the most common metrics that people use in describing the quality of a model, but it is important to note that *$R^2$ increases artificially as a side-effect of increasing the number of independent variables.* While $R^2$ is reported in almost all statistical packages, another metric called the *adjusted $R^2$* is also provided as it takes into account the number of variables in the model, and can sometimes even be used for non-linear regression models!$$R_{adj}^2 = 1 - \left( 1 - R^2 \right) \frac{N - 1}{N - K - 1} = R^2 - \left( 1 - R^2 \right) \frac{K}{N - K - 1} = 1 - \frac{\frac{RSS}{DF_R}}{\frac{TSS}{DF_T}}$$where $N$ is the number of observations, $K$ is the number of variables, $DF_R = N - K - 1$ is the degrees of freedom associated with the residual error and $DF_T = N - 1$ is the degrees of the freedom of the total error. Evaluating the Model: Mean Squared Error and the $F$-Statistic***The mean squared errors are just the *averages* of the sum-of-squares errors over their respective degrees of freedom.$$MSE = \frac{RSS}{N-K-1}$$$$MSR = \frac{ESS}{K}$$**Remember:** Notation may vary across resources particularly the use of $R$ and $E$ in $RSS/ESS$ and $MSR/MSE$. In some resources, E = explained and R = residual. In other resources, E = error and R = regression (explained). **This is a very important distinction that requires looking at the formula to determine which naming scheme is being used.**Given the MSR and MSE, we can now determine whether or not the entire model we just fit is even statistically significant. We use an $F$-test for this. The null hypothesis is that all of the $\beta$ coefficients are zero, that is, none of them have any effect on $Y$. The alternative is that *at least one* $\beta$ coefficient is nonzero, but it doesn't tell us which one in a multiple regression:$$H_0: \beta_i = 0, \mbox{for all $i$} \\H_A: \beta_i > 0, \mbox{for some $i$}$$ $$F = \frac{MSR}{MSE} = \left( \frac{R^2}{1 - R^2} \right) \left( \frac{N - K - 1}{K} \right)$$ Once we compute the $F$-statistic, we can use the $F$-distribution with $N-K$ and $K-1$ degrees of degrees of freedom to get a p-value.**Warning!** The $F$-statistic mentioned in this section is NOT the same as the F1-measure or F1-value discused in Unit 7. Part 3 Checkup Exercise Set IVLet's look at the relationship between `PTRATIO` and housing price.Exercise: Try fitting a linear regression model using only the 'PTRATIO' (pupil-teacher ratio by town) and interpret the intercept and the coefficients.Exercise: Calculate (or extract) the $R^2$ value. What does it tell you?Exercise: Compute the $F$-statistic. What does it tell you?Exercise: Take a close look at the $F$-statistic and the $t$-statistic for the regression coefficient. What relationship do you notice? Note that this relationship only applies in *simple* linear regression models.
###Code
# your turn
pt = ols('PRICE ~ PTRATIO',bos).fit()
print(pt.summary())
###Output
OLS Regression Results
==============================================================================
Dep. Variable: PRICE R-squared: 0.258
Model: OLS Adj. R-squared: 0.256
Method: Least Squares F-statistic: 175.1
Date: Sun, 11 Aug 2019 Prob (F-statistic): 1.61e-34
Time: 01:35:49 Log-Likelihood: -1764.8
No. Observations: 506 AIC: 3534.
Df Residuals: 504 BIC: 3542.
Df Model: 1
Covariance Type: nonrobust
==============================================================================
coef std err t P>|t| [0.025 0.975]
------------------------------------------------------------------------------
Intercept 62.3446 3.029 20.581 0.000 56.393 68.296
PTRATIO -2.1572 0.163 -13.233 0.000 -2.477 -1.837
==============================================================================
Omnibus: 92.924 Durbin-Watson: 0.725
Prob(Omnibus): 0.000 Jarque-Bera (JB): 191.444
Skew: 1.001 Prob(JB): 2.68e-42
Kurtosis: 5.252 Cond. No. 160.
==============================================================================
Warnings:
[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.
###Markdown
\* The two have a negative relationship based on the coefficient, and the intercept is positive which makes sense for housing prices since there are other factors at play. \* The R-squared tells us only about a quarter of the variance is explained by our single-variable model with PTRATIO. \* Low p-value considering the F-statistic. It tells us that the relationship between the variable and Y do in fact have a relationship and the coefficient is not zero. Part 3 Checkup Exercise Set VFit a linear regression model using three independent variables 'CRIM' (per capita crime rate by town) 'RM' (average number of rooms per dwelling) 'PTRATIO' (pupil-teacher ratio by town)Exercise: Compute or extract the $F$-statistic. What does it tell you about the model?Exercise: Compute or extract the $R^2$ statistic. What does it tell you about the model?Exercise: Which variables in the model are significant in predicting house price? Write a story that interprets the coefficients.
###Code
crp = ols('PRICE ~ CRIM + RM + PTRATIO',bos).fit()
print(crp.summary())
###Output
OLS Regression Results
==============================================================================
Dep. Variable: PRICE R-squared: 0.594
Model: OLS Adj. R-squared: 0.592
Method: Least Squares F-statistic: 245.2
Date: Sun, 11 Aug 2019 Prob (F-statistic): 6.15e-98
Time: 01:41:55 Log-Likelihood: -1612.0
No. Observations: 506 AIC: 3232.
Df Residuals: 502 BIC: 3249.
Df Model: 3
Covariance Type: nonrobust
==============================================================================
coef std err t P>|t| [0.025 0.975]
------------------------------------------------------------------------------
Intercept -3.3707 4.034 -0.836 0.404 -11.296 4.555
CRIM -0.2050 0.032 -6.399 0.000 -0.268 -0.142
RM 7.3804 0.402 18.382 0.000 6.592 8.169
PTRATIO -1.0695 0.133 -8.051 0.000 -1.331 -0.809
==============================================================================
Omnibus: 234.656 Durbin-Watson: 0.830
Prob(Omnibus): 0.000 Jarque-Bera (JB): 2020.689
Skew: 1.815 Prob(JB): 0.00
Kurtosis: 12.092 Cond. No. 311.
==============================================================================
Warnings:
[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.
###Markdown
\* F-statistic shows that the coefficients relating to this model(price) are not zero, and have an effect on price. \* Adjusted R-squared shows that the three variables explain about 60% of the total variance which is significant but with much room for improvement. \* RM is the most significant variable even when taking into account the t-statistic. Part 4: Comparing Models During modeling, there will be times when we want to compare models to see which one is more predictive or fits the data better. There are many ways to compare models, but we will focus on two. The $F$-Statistic RevisitedThe $F$-statistic can also be used to compare two *nested* models, that is, two models trained on the same dataset where one of the models contains a *subset* of the variables of the other model. The *full* model contains $K$ variables and the *reduced* model contains a subset of these $K$ variables. This allows us to add additional variables to a base model and then test if adding the variables helped the model fit.$$F = \frac{\left( \frac{RSS_{reduced} - RSS_{full}}{DF_{reduced} - DF_{full}} \right)}{\left( \frac{RSS_{full}}{DF_{full}} \right)}$$where $DF_x = N - K_x - 1$ where $K_x$ is the number of variables in model $x$. Akaike Information Criterion (AIC)Another statistic for comparing two models is AIC, which is based on the likelihood function and takes into account the number of variables in the model.$$AIC = 2 K - 2 \log_e{L}$$where $L$ is the likelihood of the model. AIC is meaningless in the absolute sense, and is only meaningful when compared to AIC values from other models. Lower values of AIC indicate better fitting models.`statsmodels` provides the AIC in its output. Part 4 Checkup ExercisesExercise: Find another variable (or two) to add to the model we built in Part 3. Compute the $F$-test comparing the two models as well as the AIC. Which model is better?
###Code
crrp = ols('PRICE ~ CRIM + RM + RAD + PTRATIO',bos).fit()
print(crrp.summary())
###Output
OLS Regression Results
==============================================================================
Dep. Variable: PRICE R-squared: 0.596
Model: OLS Adj. R-squared: 0.592
Method: Least Squares F-statistic: 184.5
Date: Sun, 11 Aug 2019 Prob (F-statistic): 4.59e-97
Time: 21:40:44 Log-Likelihood: -1611.1
No. Observations: 506 AIC: 3232.
Df Residuals: 501 BIC: 3253.
Df Model: 4
Covariance Type: nonrobust
==============================================================================
coef std err t P>|t| [0.025 0.975]
------------------------------------------------------------------------------
Intercept -4.3061 4.096 -1.051 0.294 -12.354 3.741
CRIM -0.1758 0.039 -4.482 0.000 -0.253 -0.099
RM 7.3940 0.401 18.421 0.000 6.605 8.183
RAD -0.0536 0.042 -1.288 0.198 -0.135 0.028
PTRATIO -1.0015 0.143 -7.009 0.000 -1.282 -0.721
==============================================================================
Omnibus: 253.630 Durbin-Watson: 0.815
Prob(Omnibus): 0.000 Jarque-Bera (JB): 2375.954
Skew: 1.974 Prob(JB): 0.00
Kurtosis: 12.855 Cond. No. 361.
==============================================================================
Warnings:
[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.
###Markdown
\* Added the index of accessibility to radial highways as extra variable.- New variable has p-value of around 0.2, the F-statistic of the model is lower than the previous, and there is no change in the AIC, all indicating that the new variable should not be included as an estimator of housing price. Part 5: Evaluating the Model via Model Assumptions and Other Issues***Linear regression makes several assumptions. It is always best to check that these assumptions are valid after fitting a linear regression model. **Linearity**. The dependent variable $Y$ is a linear combination of the regression coefficients and the independent variables $X$. This can be verified with a scatterplot of each $X$ vs. $Y$ and plotting correlations among $X$. Nonlinearity can sometimes be resolved by [transforming](https://onlinecourses.science.psu.edu/stat501/node/318) one or more independent variables, the dependent variable, or both. In other cases, a [generalized linear model](https://en.wikipedia.org/wiki/Generalized_linear_model) or a [nonlinear model](https://en.wikipedia.org/wiki/Nonlinear_regression) may be warranted. **Constant standard deviation**. The SD of the dependent variable $Y$ should be constant for different values of X. We can check this by plotting each $X$ against $Y$ and verifying that there is no "funnel" shape showing data points fanning out as $X$ increases or decreases. Some techniques for dealing with non-constant variance include weighted least squares (WLS), [robust standard errors](https://en.wikipedia.org/wiki/Heteroscedasticity-consistent_standard_errors), or variance stabilizing transformations. **Normal distribution for errors**. The $\epsilon$ term we discussed at the beginning are assumed to be normally distributed. This can be verified with a fitted values vs. residuals plot and verifying that there is no pattern, and with a quantile plot. $$ \epsilon_i \sim N(0, \sigma^2)$$Sometimes the distributions of responses $Y$ may not be normally distributed at any given value of $X$. e.g. skewed positively or negatively. **Independent errors**. The observations are assumed to be obtained independently. e.g. Observations across time may be correlated There are some other issues that are important investigate with linear regression models. **Correlated Predictors:** Care should be taken to make sure that the independent variables in a regression model are not too highly correlated. Correlated predictors typically do not majorly affect prediction, but do inflate standard errors of coefficients making interpretation unreliable. Common solutions are dropping the least important variables involved in the correlations, using regularlization, or, when many predictors are highly correlated, considering a dimension reduction technique such as principal component analysis (PCA). **Influential Points:** Data points that have undue influence on the regression model. These points can be high leverage points or outliers. Such points are typically removed and the regression model rerun. Part 5 Checkup ExercisesTake the reduced model from Part 3 to answer the following exercises. Take a look at [this blog post](http://mpastell.com/2013/04/19/python_regression/) for more information on using statsmodels to construct these plots. Exercise: Construct a fitted values versus residuals plot. What does the plot tell you? Are there any violations of the model assumptions?Exercise: Construct a quantile plot of the residuals. What does the plot tell you?Exercise: What are some advantages and disadvantages of the fitted vs. residual and quantile plot compared to each other?Exercise: Identify any outliers (if any) in your model and write a story describing what these outliers might represent.Exercise: Construct a leverage plot and identify high leverage points in the model. Write a story explaining possible reasons for the high leverage points.Exercise: Remove the outliers and high leverage points from your model and run the regression again. How do the results change?
###Code
# Your turn.
#m = ols('PRICE ~ RM',bos).fit()
#print(m.summary())
plt.scatter(m.resid, m.fittedvalues, alpha=0.5)
plt.xlabel('Residuals')
plt.ylabel('Fitted values')
###Output
_____no_output_____
###Markdown
\* Residuals look to be normally distributed with 0 at the mean.
###Code
sm.qqplot(m.resid, stats.t, fit=True, line='45')
###Output
_____no_output_____
###Markdown
\* 2. The closer the dots follow the line, the more normally distributed it is. In this case, the residuals show a convincing normal distribution. \* 3. A scatterplot between fitted values and residuals results in a clearer picture of where the individual values are located. On the other hand, a quantile plot provides clearer insight into the normality of the residuals.
###Code
# Construct leverage plot
sm.graphics.plot_leverage_resid2(m)
###Output
_____no_output_____ |
notebooks/rsys2.ipynb | ###Markdown
Recommendation systems:Collaborative Filtering Edgar Acuna Mayo 2020 Database Movielens(100k) Contains the ratings given by 943 users to 1682 movies (items) Duration: Approx 22 minutes
###Code
import warnings
warnings.filterwarnings("ignore")
from surprise import Reader, Dataset
import pandas as pd
import pandas as pd
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
# pass in column names for each CSV as the column name is not given in the file and read them using pandas.
# You can check the column names from the readme file
#Reading users file:
u_cols = ['user_id', 'age', 'sex', 'occupation', 'zip_code']
url='http://files.grouplens.org/datasets/movielens/ml-100k/u.user'
users = pd.read_csv(url, sep='|', names=u_cols, encoding="ISO-8859-1")
print(users.shape)
#Reading ratings file:
r_cols = ['user_id', 'movie_id', 'rating', 'unix_timestamp']
url='http://files.grouplens.org/datasets/movielens/ml-100k/u.data'
ratings = pd.read_csv(url, sep='\t', names=r_cols,encoding='latin-1')
ratings.head()
#Showing the ratings given by users to the movies
Ratings = ratings.pivot(index = 'user_id', columns ='movie_id', values = 'rating').fillna(0)
Ratings.head()
#Reading items file where the category of the movie is included
i_cols = ['movie_id', 'movie title' ,'release date','video release date', 'IMDb URL', 'unknown', 'Action', 'Adventure',
'Animation', 'Children\'s', 'Comedy', 'Crime', 'Documentary', 'Drama', 'Fantasy',
'Film-Noir', 'Horror', 'Musical', 'Mystery', 'Romance', 'Sci-Fi', 'Thriller', 'War', 'Western']
url='http://files.grouplens.org/datasets/movielens/ml-100k/u.item'
items = pd.read_csv(url, sep='|', names=i_cols,
encoding='latin-1')
items.head()
###Output
_____no_output_____
###Markdown
1-Item-based Recommendation system
###Code
data1 = pd.merge(ratings, items.drop_duplicates(['movie_id']), on='movie_id')
data1.head()
#Calculating the mean rating of all the movies
data1.groupby('movie title')['rating'].mean().sort_values(ascending=False).head(10)
# Calculate count rating of all movies
data1.groupby('movie title')['rating'].count().sort_values(ascending=False).head(10)
# Sorting values according to the 'num of rating column'
ratings1 = pd.DataFrame(data1.groupby('movie title')['rating'].mean())
ratings1['num of ratings'] = pd.DataFrame(data1.groupby('movie title')['rating'].count())
ratings.head()
moviemat = data1.pivot_table(index ='user_id', columns ='movie title', values ='rating')
moviemat.head()
ratings1.sort_values('num of ratings', ascending = False).head(10)
# Analyzing correlation with similar movies
toysstory_user_ratings = moviemat['Toy Story (1995)']
# analysing correlation with similar movies
similar_to_toystory = moviemat.corrwith(toysstory_user_ratings)
corr_toystory = pd.DataFrame(similar_to_toystory, columns =['Correlation'])
corr_toystory.dropna(inplace = True)
corr_toystory.head()
# Similar movies like toy story
corr_toystory.sort_values('Correlation', ascending = False).head(10)
corr_toystory = corr_toystory.join(ratings1['num of ratings'])
corr_toystory.head()
corr_toystory[corr_toystory['num of ratings']>100].sort_values('Correlation', ascending = False).head()
###Output
_____no_output_____
###Markdown
2. Evaluating the peformance of RS by Collaborative Filtering
###Code
#reading the train and the test datasets for ratings
r_cols = ['user_id', 'movie_id', 'rating', 'unix_timestamp']
url='http://files.grouplens.org/datasets/movielens/ml-100k/ua.base'
ratings_train = pd.read_csv(url, sep='\t', names=r_cols, encoding='latin-1')
url='http://files.grouplens.org/datasets/movielens/ml-100k/ua.test'
ratings_test = pd.read_csv(url, sep='\t', names=r_cols, encoding='latin-1')
ratings_train.shape, ratings_test.shape
#Building the user-item matrix
n_users=943
n_items=1682
data_matrix = np.zeros((n_users, n_items))
for line in ratings.itertuples():
data_matrix[line[1]-1, line[2]-1] = line[3]
data_matrix.shape
print(data_matrix)
#Computing the sparsity of the matrix (percentage of non-zero entries )
sparsity = float(len(data_matrix.nonzero()[0]))
sparsity /= (data_matrix.shape[0] * data_matrix.shape[1])
sparsity *= 100
print('Sparsity: {:4.2f}%'.format(sparsity))
###Output
Sparsity: 6.30%
###Markdown
In this dataset, every user has rated at least 20 movies which results in a reasonable sparsity of 6.3%. This means that 6.3% of the user-item ratings have a value. Note that, although we filled in missing ratings as 0, we should not assume these values to truly be zero. More appropriately, they are just empty entries. We will split our data into training and test sets by removing 10 ratings per user from the training set and placing them in the test set.
###Code
#Building the training and test sets
def train_test_split(ratings):
test = np.zeros(ratings.shape)
train = ratings.copy()
for user in range(ratings.shape[0]):
test_ratings = np.random.choice(ratings[user, :].nonzero()[0],
size=10,
replace=False)
train[user, test_ratings] = 0.
test[user, test_ratings] = ratings[user, test_ratings]
# Test and training are truly disjoint
assert(np.all((train * test) == 0))
return train, test
train, test = train_test_split(data_matrix)
def similarity(ratings, kind='user', epsilon=1e-9):
# epsilon -> small number for handling division-by-zero errors
if kind == 'user':
sim = ratings.dot(ratings.T) + epsilon
elif kind == 'item':
sim = ratings.T.dot(ratings) + epsilon
norms = np.array([np.sqrt(np.diagonal(sim))])
return (sim / norms / norms.T)
#computing the similarity matrices using the cosine metric
#from sklearn.metrics.pairwise import pairwise_distances
user_similarity = similarity(train, kind='user')
item_similarity = similarity(train, kind='item')
print(item_similarity[:4, :4])
def predict(ratings, similarity, type='user'):
if type == 'user':
mean_user_rating = ratings.mean(axis=1)
#We use np.newaxis so that mean_user_rating has same format as ratings
ratings_diff = (ratings - mean_user_rating[:, np.newaxis])
pred = mean_user_rating[:, np.newaxis] + similarity.dot(ratings_diff) / np.array([np.abs(similarity).sum(axis=1)]).T
elif type == 'item':
pred = ratings.dot(similarity) / np.array([np.abs(similarity).sum(axis=1)])
return pred
user_prediction = predict(train, user_similarity, type='user')
item_prediction = predict(train, item_similarity, type='item')
item_prediction.shape
print(user_prediction[:4, :10])
#Computing the MSE of predictions
from sklearn.metrics import mean_squared_error
def get_mse(pred, actual):
# Ignore nonzero terms.
pred = pred[actual.nonzero()].flatten()
actual = actual[actual.nonzero()].flatten()
return mean_squared_error(pred, actual)
print('User-based CF MSE: ' + str(get_mse(user_prediction, test)))
print('Item-based CF MSE: ' + str(get_mse(item_prediction, test)))
###Output
User-based CF MSE: 8.676460967854107
Item-based CF MSE: 11.515445435774618
###Markdown
3. Top-k collaborative filtering
###Code
def predict_topk(ratings, similarity, kind='user', k=40):
pred = np.zeros(ratings.shape)
if kind == 'user':
for i in range(ratings.shape[0]):
top_k_users = [np.argsort(similarity[:,i])[:-k-1:-1]]
for j in range(ratings.shape[1]):
pred[i, j] = similarity[i, :][top_k_users].dot(ratings[:, j][top_k_users])
pred[i, j] /= np.sum(np.abs(similarity[i, :][top_k_users]))
if kind == 'item':
for j in range(ratings.shape[1]):
top_k_items = [np.argsort(similarity[:,j])[:-k-1:-1]]
for i in range(ratings.shape[0]):
pred[i, j] = similarity[j, :][top_k_items].dot(ratings[i, :][top_k_items].T)
pred[i, j] /= np.sum(np.abs(similarity[j, :][top_k_items]))
return pred
pred = predict_topk(train, user_similarity, kind='user', k=40)
print('Top-k User-based CF MSE: ' + str(get_mse(user_prediction,test)))
pred = predict_topk(data_matrix, item_similarity, kind='item', k=40)
print('Top-k Item-based CF MSE: ' + str(get_mse(item_prediction, test)))
k_array = [5, 15, 30, 50, 100, 150]
user_train_mse = []
user_test_mse = []
item_test_mse = []
item_train_mse = []
def get_mse(pred, actual):
pred = pred[actual.nonzero()].flatten()
actual = actual[actual.nonzero()].flatten()
return mean_squared_error(pred, actual)
for k in k_array:
user_pred = predict_topk(train, user_similarity, kind='user', k=k)
item_pred = predict_topk(train, item_similarity, kind='item', k=k)
user_train_mse += [get_mse(user_pred, train)]
user_test_mse += [get_mse(user_pred, test)]
item_train_mse += [get_mse(item_pred, train)]
item_test_mse += [get_mse(item_pred, test)]
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
pal = sns.color_palette("Set2", 2)
plt.figure(figsize=(8, 8))
plt.plot(k_array, user_train_mse, c=pal[0], label='User-based train', alpha=0.5, linewidth=5)
plt.plot(k_array, user_test_mse, c=pal[0], label='User-based test', linewidth=5)
plt.plot(k_array, item_train_mse, c=pal[1], label='Item-based train', alpha=0.5, linewidth=5)
plt.plot(k_array, item_test_mse, c=pal[1], label='Item-based test', linewidth=5)
plt.legend(loc='best', fontsize=20)
plt.xticks(fontsize=16);
plt.yticks(fontsize=16);
plt.xlabel('k', fontsize=30);
plt.ylabel('MSE', fontsize=30);
###Output
_____no_output_____
###Markdown
It looks like a k of 50 and 15 produces a minimum in the test error for user- and item-based collaborative filtering, respectively. 3.1 Validation
###Code
# Load in movie data
url='http://files.grouplens.org/datasets/movielens/ml-100k/u.item'
idmovies = pd.read_csv(url,header= None,sep='|', encoding="ISO-8859-1")#with open('c://users/edgar2017/ml-100k/u.item', 'r') as f:
idx_to_movie=idmovies.loc[:,4]
idx_to_movie
def top_k_movies(similarity, mapper, movie_idx, k=6):
return [mapper[x] for x in np.argsort(similarity[movie_idx,:])[:-k-1:-1]]
idx = 0 # Toy Story
movies = top_k_movies(item_similarity, idx_to_movie, idx)
movies
idx = 1 # Golden Eye
movies = top_k_movies(item_similarity, idx_to_movie, idx)
movies
###Output
_____no_output_____
###Markdown
3.2 Replacing the cosine similarity measure by the Pearson correlation measure
###Code
from sklearn.metrics import pairwise_distances
# Convert from distance to similarity
item_correlation = 1 - pairwise_distances(train.T, metric='correlation')
item_correlation[np.isnan(item_correlation)] = 0.
idx = 0 # Toy Story
movies = top_k_movies(item_correlation, idx_to_movie, idx)
movies
idx = 1 # Golden Eye
movies = top_k_movies(item_correlation, idx_to_movie, idx)
movies
###Output
_____no_output_____
###Markdown
4. Collaborative Filtering using Matrix Factorization and the module Surprise
###Code
from surprise import SVD
from surprise import Dataset
from surprise.model_selection import cross_validate
# Load the movielens-100k dataset (download it if needed).
data = Dataset.load_builtin('ml-100k')
# Use the famous SVD algorithm.
algo = SVD()
print(data.build_full_trainset())
algo = SVD()
# Run 5-fold cross-validation and print results
cross_validate(algo, data, measures=['RMSE', 'MAE'], cv=5, verbose=True)
output=algo.fit(data.build_full_trainset())
predictions = algo.predict(uid='10',iid='0')
score=predictions.est
print(score)
# Train the algorithm on the trainset, and predict ratings for the testset
# sample random trainset and testset
# test set is made of 25% of the ratings.
from surprise import accuracy
from surprise.model_selection import train_test_split
trainset, testset = train_test_split(data, test_size=.25)
algo.fit(trainset)
predictions = algo.test(testset)
# Then compute RMSE
accuracy.rmse(predictions)
predictions
from collections import defaultdict
from surprise import SVD
from surprise import Dataset
def get_top_n(predictions, n=10):
'''Return the top-N recommendation for each user from a set of predictions.
Args:
predictions(list of Prediction objects): The list of predictions, as
returned by the test method of an algorithm.
n(int): The number of recommendation to output for each user. Default
is 10.
Returns:
A dict where keys are user (raw) ids and values are lists of tuples:
[(raw item id, rating estimation), ...] of size n.
'''
# First map the predictions to each user.
top_n = defaultdict(list)
for uid, iid, true_r, est, _ in predictions:
top_n[uid].append((iid, est))
# Then sort the predictions for each user and retrieve the k highest ones.
for uid, user_ratings in top_n.items():
user_ratings.sort(key=lambda x: x[1], reverse=True)
top_n[uid] = user_ratings[:n]
return top_n
# First train an SVD algorithm on the movielens dataset.
data = Dataset.load_builtin('ml-100k')
trainset = data.build_full_trainset()
algo = SVD()
algo.fit(trainset)
# Than predict ratings for all pairs (u, i) that are NOT in the training set.
testset = trainset.build_anti_testset()
predictions = algo.test(testset)
top_n = get_top_n(predictions, n=10)
print(top_n.items)
# Print the recommended items for each user
for uid, user_ratings in top_n.items():
print(uid, [iid for (iid, _) in user_ratings])
###Output
<built-in method items of collections.defaultdict object at 0x000002590EE6D048>
196 ['169', '357', '178', '134', '483', '513', '474', '64', '480', '100']
186 ['318', '22', '64', '515', '275', '511', '651', '313', '272', '513']
22 ['114', '169', '64', '408', '178', '427', '603', '483', '705', '170']
244 ['134', '483', '127', '251', '98', '223', '190', '515', '603', '427']
166 ['408', '169', '136', '194', '275', '22', '64', '50', '479', '178']
298 ['64', '136', '408', '12', '169', '316', '515', '114', '258', '251']
115 ['179', '180', '134', '483', '484', '285', '474', '513', '251', '276']
253 ['272', '114', '515', '511', '178', '316', '923', '651', '657', '172']
305 ['603', '515', '513', '137', '136', '498', '657', '132', '211', '57']
6 ['654', '603', '651', '114', '661', '923', '1367', '190', '251', '963']
62 ['192', '654', '178', '187', '657', '488', '165', '493', '197', '603']
286 ['427', '496', '735', '302', '124', '647', '59', '963', '603', '1007']
200 ['302', '408', '114', '64', '603', '480', '12', '83', '427', '488']
210 ['64', '178', '318', '408', '603', '513', '199', '480', '285', '316']
224 ['174', '210', '50', '64', '265', '172', '144', '121', '515', '302']
303 ['527', '193', '180', '178', '513', '320', '521', '190', '48', '197']
122 ['408', '169', '12', '285', '276', '89', '302', '251', '647', '114']
194 ['408', '316', '528', '42', '603', '272', '251', '963', '480', '513']
291 ['178', '127', '318', '357', '408', '654', '205', '169', '520', '269']
234 ['408', '57', '114', '275', '302', '83', '522', '272', '59', '169']
119 ['498', '694', '189', '483', '513', '169', '478', '251', '923', '490']
167 ['483', '519', '114', '170', '89', '657', '132', '178', '242', '50']
299 ['493', '187', '430', '9', '223', '654', '519', '611', '316', '124']
308 ['114', '320', '647', '173', '10', '302', '251', '190', '1524', '474']
95 ['408', '603', '318', '114', '487', '427', '480', '611', '529', '963']
38 ['143', '174', '186', '173', '210', '64', '230', '11', '710', '204']
102 ['408', '114', '64', '169', '603', '923', '483', '178', '589', '474']
63 ['12', '272', '493', '169', '515', '513', '923', '657', '320', '127']
160 ['180', '318', '191', '515', '12', '134', '178', '272', '89', '357']
50 ['199', '127', '12', '59', '313', '168', '169', '513', '483', '515']
301 ['169', '512', '408', '480', '189', '478', '285', '524', '659', '313']
225 ['515', '483', '408', '318', '169', '923', '657', '513', '12', '178']
290 ['313', '166', '300', '114', '8', '408', '96', '87', '178', '661']
97 ['127', '318', '114', '187', '483', '511', '302', '134', '87', '515']
157 ['272', '318', '357', '190', '408', '169', '64', '427', '513', '480']
181 ['210', '143', '313', '318', '272', '133', '136', '1019', '114', '174']
278 ['357', '318', '408', '513', '272', '64', '483', '488', '169', '114']
276 ['483', '134', '114', '199', '191', '190', '654', '657', '178', '480']
7 ['408', '124', '251', '59', '493', '302', '1007', '811', '283', '900']
10 ['318', '408', '528', '316', '515', '8', '136', '114', '659', '272']
284 ['603', '408', '169', '316', '178', '23', '134', '251', '173', '483']
201 ['474', '488', '14', '1142', '199', '178', '135', '114', '169', '320']
287 ['603', '480', '272', '408', '127', '318', '187', '513', '511', '474']
246 ['183', '134', '276', '357', '483', '508', '171', '114', '9', '64']
242 ['302', '474', '98', '603', '515', '480', '272', '483', '408', '178']
249 ['474', '127', '654', '511', '178', '514', '515', '134', '316', '657']
99 ['318', '180', '357', '223', '169', '513', '603', '408', '165', '190']
178 ['169', '408', '251', '429', '515', '603', '114', '166', '657', '496']
251 ['483', '603', '169', '187', '272', '318', '316', '357', '98', '127']
81 ['64', '114', '427', '515', '357', '12', '134', '316', '603', '408']
260 ['483', '408', '169', '462', '114', '480', '98', '318', '170', '481']
25 ['64', '12', '96', '313', '318', '316', '272', '519', '302', '166']
59 ['251', '114', '316', '192', '144', '246', '315', '223', '1142', '475']
72 ['474', '165', '511', '114', '408', '272', '178', '316', '513', '514']
87 ['143', '169', '357', '603', '408', '28', '69', '313', '480', '117']
42 ['313', '22', '328', '169', '498', '429', '651', '515', '611', '633']
292 ['427', '474', '178', '318', '12', '493', '316', '192', '963', '172']
20 ['59', '133', '169', '484', '251', '483', '958', '528', '285', '316']
13 ['408', '496', '479', '641', '528', '512', '169', '251', '1372', '499']
138 ['408', '169', '64', '114', '50', '657', '89', '190', '127', '223']
60 ['169', '923', '408', '114', '318', '512', '127', '520', '963', '251']
57 ['408', '169', '114', '127', '963', '313', '316', '483', '172', '515']
223 ['174', '50', '12', '169', '83', '64', '511', '98', '657', '520']
189 ['169', '408', '427', '190', '187', '223', '192', '168', '114', '251']
243 ['408', '474', '603', '483', '315', '647', '134', '178', '12', '1449']
92 ['127', '357', '272', '511', '603', '187', '302', '114', '659', '269']
241 ['318', '114', '474', '64', '272', '408', '178', '12', '127', '174']
254 ['169', '96', '483', '79', '195', '198', '494', '194', '318', '657']
293 ['197', '475', '318', '178', '408', '114', '191', '154', '268', '511']
127 ['169', '316', '320', '657', '408', '96', '12', '921', '302', '1169']
222 ['272', '114', '187', '496', '83', '169', '1194', '317', '1063', '203']
267 ['178', '357', '134', '511', '603', '663', '318', '285', '48', '652']
11 ['178', '276', '114', '50', '302', '408', '963', '174', '64', '144']
8 ['178', '114', '100', '318', '480', '483', '169', '173', '197', '64']
162 ['272', '114', '408', '516', '8', '923', '169', '488', '479', '315']
279 ['512', '1073', '270', '285', '516', '282', '498', '524', '694', '57']
145 ['357', '169', '482', '480', '192', '408', '479', '498', '318', '493']
28 ['408', '169', '178', '64', '318', '172', '316', '275', '272', '302']
135 ['64', '408', '169', '127', '313', '114', '187', '483', '511', '48']
32 ['114', '178', '169', '56', '483', '318', '657', '12', '515', '603']
90 ['251', '513', '408', '320', '963', '655', '638', '487', '1142', '1449']
216 ['474', '127', '197', '316', '484', '272', '187', '661', '185', '14']
250 ['59', '251', '60', '513', '489', '178', '285', '124', '275', '603']
271 ['513', '483', '408', '1039', '251', '519', '223', '59', '228', '1007']
265 ['408', '169', '114', '272', '923', '318', '480', '89', '189', '427']
198 ['483', '134', '114', '285', '12', '657', '178', '199', '313', '603']
168 ['174', '318', '50', '357', '22', '427', '64', '194', '265', '408']
110 ['169', '50', '174', '318', '483', '923', '513', '515', '519', '1449']
58 ['179', '657', '114', '10', '136', '528', '515', '958', '251', '14']
237 ['480', '318', '251', '511', '302', '136', '316', '216', '12', '654']
94 ['137', '178', '124', '408', '276', '511', '48', '515', '269', '169']
128 ['272', '169', '251', '316', '520', '22', '189', '114', '641', '408']
44 ['483', '114', '408', '12', '169', '484', '223', '199', '512', '963']
264 ['427', '480', '174', '483', '408', '178', '169', '963', '114', '165']
41 ['169', '408', '134', '12', '89', '114', '480', '23', '483', '603']
82 ['12', '408', '530', '193', '515', '114', '427', '963', '190', '498']
262 ['474', '178', '408', '493', '603', '694', '483', '127', '480', '589']
174 ['302', '408', '169', '64', '480', '316', '144', '1019', '176', '514']
43 ['83', '427', '205', '483', '603', '178', '22', '923', '239', '963']
84 ['114', '427', '285', '169', '478', '603', '480', '513', '657', '178']
269 ['178', '86', '199', '150', '408', '480', '463', '60', '434', '114']
259 ['191', '318', '127', '515', '480', '474', '483', '114', '174', '641']
85 ['408', '114', '178', '603', '251', '285', '484', '185', '169', '1449']
213 ['480', '408', '169', '114', '178', '427', '134', '306', '251', '921']
121 ['408', '191', '483', '223', '199', '8', '134', '511', '513', '251']
49 ['474', '654', '134', '137', '603', '408', '276', '223', '127', '269']
155 ['169', '302', '50', '483', '174', '64', '172', '12', '408', '89']
68 ['318', '114', '408', '515', '483', '48', '479', '64', '100', '661']
172 ['318', '114', '408', '64', '513', '169', '484', '493', '223', '136']
19 ['408', '169', '603', '12', '45', '483', '178', '657', '114', '316']
268 ['64', '9', '648', '603', '482', '657', '511', '963', '508', '530']
5 ['641', '134', '647', '519', '285', '12', '199', '114', '166', '483']
80 ['114', '408', '318', '313', '603', '178', '169', '98', '173', '186']
66 ['318', '313', '272', '515', '64', '12', '316', '251', '79', '183']
18 ['114', '511', '484', '525', '606', '272', '694', '505', '615', '922']
26 ['64', '114', '98', '12', '483', '408', '357', '178', '169', '173']
130 ['498', '483', '178', '318', '659', '923', '963', '114', '166', '515']
256 ['515', '479', '408', '318', '114', '178', '8', '606', '921', '480']
1 ['483', '408', '318', '657', '705', '474', '484', '663', '510', '512']
56 ['12', '923', '8', '318', '657', '515', '357', '963', '48', '182']
15 ['318', '174', '172', '313', '28', '12', '114', '22', '357', '173']
207 ['427', '272', '498', '496', '603', '114', '178', '487', '480', '604']
232 ['654', '285', '513', '242', '923', '134', '709', '484', '485', '59']
52 ['483', '408', '178', '169', '114', '64', '603', '1137', '12', '480']
161 ['12', '408', '114', '515', '659', '223', '313', '484', '169', '166']
148 ['12', '199', '512', '318', '223', '483', '100', '285', '659', '64']
125 ['237', '419', '313', '651', '423', '429', '385', '215', '1019', '408']
83 ['318', '313', '408', '169', '96', '12', '87', '190', '189', '511']
272 ['64', '180', '318', '603', '114', '169', '408', '480', '190', '192']
151 ['269', '315', '923', '165', '511', '963', '527', '272', '493', '513']
54 ['12', '318', '86', '480', '513', '83', '223', '316', '524', '657']
16 ['483', '484', '169', '50', '313', '165', '316', '513', '408', '133']
91 ['169', '272', '251', '408', '199', '100', '178', '166', '963', '196']
294 ['64', '318', '22', '114', '178', '172', '527', '316', '272', '198']
229 ['606', '134', '169', '191', '408', '357', '320', '603', '178', '483']
36 ['272', '408', '318', '64', '169', '513', '603', '315', '316', '174']
70 ['318', '357', '64', '114', '272', '480', '178', '427', '603', '481']
14 ['169', '513', '611', '251', '178', '318', '480', '478', '661', '707']
295 ['302', '474', '603', '515', '165', '480', '479', '272', '61', '408']
233 ['178', '169', '114', '427', '272', '408', '480', '251', '316', '474']
214 ['178', '515', '474', '189', '657', '86', '48', '480', '923', '528']
192 ['408', '98', '318', '483', '603', '511', '357', '114', '134', '124']
100 ['169', '22', '603', '408', '318', '496', '114', '520', '488', '170']
307 ['127', '14', '318', '134', '199', '508', '530', '480', '12', '498']
297 ['134', '178', '408', '483', '654', '64', '127', '165', '511', '481']
193 ['511', '408', '318', '272', '169', '427', '172', '522', '651', '480']
113 ['427', '313', '318', '114', '512', '272', '511', '963', '187', '484']
275 ['318', '513', '272', '427', '923', '483', '657', '484', '114', '519']
219 ['286', '169', '408', '474', '127', '251', '302', '489', '483', '647']
218 ['169', '408', '127', '474', '480', '513', '519', '178', '64', '285']
123 ['56', '89', '357', '408', '180', '474', '654', '169', '661', '659']
158 ['318', '493', '515', '178', '199', '59', '134', '474', '114', '427']
302 ['114', '318', '64', '272', '169', '515', '474', '251', '496', '178']
23 ['169', '515', '114', '223', '923', '12', '519', '813', '641', '657']
296 ['318', '169', '511', '223', '408', '133', '12', '86', '603', '657']
33 ['169', '408', '483', '316', '528', '170', '515', '603', '657', '12']
154 ['56', '408', '127', '169', '528', '483', '603', '192', '124', '285']
77 ['603', '408', '124', '178', '12', '654', '177', '435', '528', '190']
270 ['474', '246', '515', '427', '165', '480', '498', '23', '750', '272']
187 ['318', '483', '357', '408', '127', '515', '169', '272', '513', '170']
170 ['169', '483', '408', '316', '357', '488', '79', '12', '183', '64']
101 ['64', '318', '483', '174', '480', '603', '79', '173', '408', '22']
184 ['59', '408', '603', '136', '923', '114', '661', '493', '519', '302']
112 ['199', '318', '483', '427', '474', '50', '64', '603', '657', '178']
133 ['169', '408', '603', '357', '657', '12', '98', '480', '318', '178']
215 ['318', '199', '12', '190', '114', '408', '657', '178', '513', '603']
69 ['178', '127', '474', '251', '408', '318', '603', '64', '483', '659']
104 ['172', '174', '169', '318', '513', '173', '178', '408', '199', '520']
240 ['127', '169', '98', '657', '64', '12', '480', '408', '483', '187']
144 ['511', '496', '483', '169', '427', '408', '479', '132', '520', '513']
191 ['169', '408', '603', '127', '64', '483', '479', '12', '528', '223']
61 ['64', '318', '408', '114', '178', '519', '515', '98', '480', '169']
142 ['64', '480', '100', '357', '318', '302', '187', '603', '12', '98']
177 ['474', '603', '357', '483', '316', '185', '251', '647', '480', '180']
203 ['64', '408', '318', '178', '98', '480', '127', '483', '285', '187']
21 ['357', '150', '174', '475', '64', '483', '14', '183', '156', '134']
197 ['318', '64', '429', '194', '427', '28', '98', '963', '191', '204']
134 ['318', '408', '114', '589', '96', '169', '483', '204', '603', '210']
180 ['408', '170', '1194', '136', '923', '603', '45', '134', '519', '483']
236 ['114', '515', '923', '272', '1449', '474', '498', '606', '316', '136']
263 ['474', '178', '313', '191', '169', '603', '285', '427', '165', '513']
109 ['300', '313', '430', '500', '114', '169', '272', '163', '316', '1137']
64 ['223', '192', '134', '483', '114', '169', '408', '357', '272', '479']
114 ['127', '169', '64', '408', '511', '114', '272', '480', '134', '187']
239 ['285', '315', '170', '127', '657', '1367', '30', '131', '922', '615']
117 ['127', '169', '474', '318', '114', '272', '316', '479', '408', '357']
65 ['272', '313', '515', '963', '114', '187', '59', '357', '480', '169']
137 ['194', '603', '512', '498', '135', '272', '127', '318', '520', '189']
257 ['483', '127', '603', '513', '408', '98', '64', '357', '530', '488']
111 ['169', '408', '50', '64', '174', '173', '170', '483', '12', '513']
285 ['169', '483', '178', '657', '515', '318', '488', '963', '606', '189']
96 ['12', '923', '313', '357', '408', '302', '169', '114', '963', '603']
116 ['357', '318', '64', '190', '12', '114', '23', '493', '98', '173']
73 ['223', '483', '168', '654', '134', '9', '408', '192', '185', '114']
221 ['357', '127', '205', '197', '169', '513', '114', '180', '285', '276']
235 ['64', '114', '513', '127', '408', '169', '12', '651', '654', '134']
164 ['174', '408', '318', '430', '169', '12', '114', '647', '22', '195']
281 ['50', '483', '172', '318', '169', '181', '408', '480', '64', '657']
182 ['318', '496', '174', '408', '483', '313', '427', '64', '114', '98']
129 ['408', '513', '427', '357', '515', '169', '603', '483', '127', '923']
45 ['98', '64', '313', '176', '12', '114', '173', '483', '316', '192']
131 ['318', '12', '114', '474', '50', '64', '488', '192', '1449', '513']
230 ['318', '272', '313', '923', '215', '692', '302', '483', '660', '1449']
126 ['408', '484', '169', '285', '174', '483', '513', '23', '114', '318']
231 ['480', '318', '488', '483', '515', '513', '408', '114', '474', '603']
280 ['408', '474', '606', '242', '1019', '1039', '515', '603', '272', '498']
288 ['169', '408', '522', '603', '316', '315', '963', '659', '59', '474']
152 ['194', '603', '515', '165', '480', '174', '479', '408', '178', '318']
217 ['272', '98', '169', '514', '198', '515', '482', '654', '313', '528']
79 ['408', '169', '134', '318', '64', '272', '178', '511', '136', '483']
75 ['64', '198', '512', '318', '169', '603', '12', '357', '168', '187']
245 ['408', '169', '64', '511', '513', '272', '12', '223', '124', '127']
282 ['483', '528', '251', '316', '12', '169', '659', '408', '272', '603']
78 ['50', '172', '174', '302', '169', '483', '181', '178', '318', '603']
118 ['181', '196', '515', '480', '275', '496', '648', '750', '479', '272']
283 ['512', '174', '172', '318', '169', '114', '483', '408', '285', '313']
171 ['483', '408', '199', '50', '89', '114', '484', '169', '318', '515']
107 ['603', '318', '483', '42', '168', '408', '12', '435', '127', '474']
226 ['318', '114', '64', '483', '603', '357', '515', '1449', '190', '50']
306 ['169', '480', '408', '479', '484', '178', '513', '12', '191', '603']
173 ['474', '98', '603', '100', '515', '480', '174', '479', '272', '48']
185 ['12', '64', '483', '98', '408', '1019', '603', '89', '165', '272']
150 ['56', '64', '114', '357', '603', '408', '187', '192', '98', '483']
274 ['408', '64', '205', '483', '169', '114', '12', '480', '127', '427']
188 ['169', '12', '313', '647', '114', '408', '320', '251', '479', '83']
48 ['285', '318', '313', '169', '408', '515', '114', '166', '127', '134']
311 ['313', '114', '166', '657', '603', '11', '316', '272', '169', '963']
165 ['408', '511', '190', '427', '923', '483', '12', '498', '272', '114']
208 ['318', '313', '64', '50', '483', '172', '272', '174', '651', '511']
2 ['64', '483', '408', '474', '511', '357', '318', '603', '493', '169']
205 ['169', '408', '427', '512', '251', '963', '923', '483', '603', '484']
248 ['408', '357', '169', '12', '134', '603', '56', '657', '483', '42']
93 ['56', '408', '427', '169', '515', '474', '603', '318', '114', '178']
159 ['181', '174', '50', '313', '169', '12', '663', '172', '210', '641']
146 ['169', '12', '480', '408', '474', '483', '180', '127', '529', '187']
29 ['318', '357', '408', '483', '64', '169', '603', '515', '190', '114']
156 ['408', '169', '114', '483', '285', '199', '134', '484', '23', '513']
37 ['408', '169', '511', '603', '483', '474', '114', '134', '657', '223']
141 ['114', '169', '408', '480', '64', '148', '143', '87', '210', '174']
195 ['50', '483', '172', '199', '318', '511', '64', '174', '357', '12']
108 ['169', '64', '56', '408', '357', '114', '474', '318', '189', '272']
47 ['178', '474', '187', '513', '127', '603', '59', '169', '493', '318']
255 ['127', '357', '114', '172', '318', '346', '12', '173', '89', '316']
89 ['515', '272', '178', '318', '64', '199', '168', '169', '474', '174']
140 ['483', '174', '408', '169', '199', '318', '64', '480', '178', '100']
190 ['50', '318', '64', '12', '408', '251', '515', '185', '173', '474']
24 ['515', '483', '134', '657', '603', '408', '480', '488', '50', '114']
17 ['318', '483', '603', '12', '178', '408', '199', '169', '302', '272']
313 ['408', '513', '963', '923', '285', '169', '45', '12', '707', '315']
53 ['169', '272', '408', '512', '515', '114', '285', '603', '170', '189']
124 ['169', '408', '114', '285', '515', '302', '513', '483', '272', '657']
149 ['318', '50', '483', '191', '251', '114', '427', '127', '185', '408']
176 ['192', '199', '480', '132', '178', '484', '493', '134', '12', '482']
106 ['50', '127', '174', '483', '603', '114', '169', '83', '272', '190']
312 ['285', '12', '923', '242', '59', '127', '45', '318', '64', '272']
175 ['318', '313', '174', '963', '22', '498', '480', '272', '427', '316']
153 ['318', '192', '98', '514', '603', '519', '606', '100', '180', '408']
220 ['515', '318', '178', '64', '408', '114', '474', '513', '169', '603']
143 ['12', '169', '64', '178', '603', '408', '429', '1449', '96', '661']
199 ['56', '357', '654', '134', '114', '127', '474', '169', '511', '64']
202 ['12', '302', '316', '169', '408', '427', '963', '174', '272', '709']
277 ['318', '483', '114', '187', '134', '272', '427', '357', '64', '169']
206 ['173', '50', '114', '483', '172', '187', '657', '480', '12', '89']
76 ['127', '285', '199', '178', '408', '302', '654', '480', '180', '187']
314 ['50', '313', '333', '181', '512', '195', '174', '210', '748', '98']
136 ['483', '302', '408', '603', '169', '513', '64', '963', '59', '12']
179 ['515', '169', '64', '98', '483', '487', '185', '216', '114', '357']
4 ['98', '603', '515', '427', '480', '496', '174', '479', '272', '172']
304 ['12', '64', '22', '114', '169', '603', '316', '165', '174', '963']
3 ['64', '172', '483', '168', '603', '528', '12', '169', '173', '474']
227 ['408', '64', '511', '603', '187', '134', '479', '251', '272', '483']
252 ['474', '98', '603', '427', '483', '408', '127', '318', '64', '357']
212 ['114', '272', '316', '173', '22', '603', '178', '483', '64', '169']
310 ['474', '100', '169', '408', '603', '12', '64', '45', '511', '165']
35 ['169', '408', '22', '483', '174', '50', '515', '511', '498', '318']
147 ['272', '483', '408', '127', '318', '64', '136', '427', '520', '178']
105 ['169', '408', '12', '64', '127', '603', '318', '483', '50', '427']
34 ['474', '603', '515', '512', '480', '48', '483', '408', '178', '318']
71 ['12', '114', '513', '318', '408', '474', '189', '169', '488', '223']
51 ['313', '318', '169', '408', '174', '100', '251', '12', '513', '484']
204 ['178', '474', '357', '603', '114', '190', '484', '408', '285', '134']
315 ['169', '408', '114', '357', '480', '89', '483', '515', '190', '484']
31 ['169', '100', '408', '285', '318', '134', '483', '474', '114', '223']
316 ['169', '86', '606', '528', '516', '251', '316', '408', '603', '529']
103 ['12', '483', '114', '408', '603', '169', '170', '64', '318', '178']
318 ['511', '169', '483', '408', '22', '178', '923', '963', '515', '606']
30 ['408', '483', '169', '318', '22', '127', '479', '316', '64', '496']
120 ['318', '64', '408', '114', '134', '483', '169', '603', '496', '480']
46 ['408', '318', '169', '427', '496', '530', '199', '178', '480', '603']
289 ['515', '483', '64', '513', '205', '603', '178', '357', '134', '408']
209 ['513', '483', '479', '174', '511', '520', '474', '199', '318', '169']
261 ['302', '603', '181', '515', '427', '480', '174', '272', '79', '172']
88 ['603', '100', '427', '408', '318', '56', '64', '357', '185', '127']
9 ['169', '64', '318', '408', '272', '114', '515', '194', '520', '513']
247 ['408', '515', '318', '603', '12', '174', '427', '483', '511', '169']
321 ['318', '169', '114', '606', '427', '302', '408', '12', '316', '923']
266 ['169', '318', '302', '408', '474', '114', '511', '56', '483', '357']
74 ['169', '64', '963', '318', '483', '496', '196', '83', '479', '657']
238 ['474', '318', '169', '923', '515', '1449', '272', '316', '313', '479']
319 ['169', '408', '318', '64', '98', '272', '515', '12', '603', '100']
323 ['174', '357', '12', '187', '1142', '511', '603', '318', '474', '427']
67 ['98', '194', '480', '318', '357', '199', '169', '12', '50', '174']
211 ['408', '285', '169', '515', '114', '513', '484', '647', '601', '1449']
98 ['515', '169', '318', '511', '285', '923', '251', '483', '408', '114']
12 ['408', '169', '12', '114', '923', '64', '484', '272', '603', '963']
40 ['12', '408', '114', '64', '657', '169', '603', '178', '174', '483']
258 ['114', '694', '923', '515', '318', '169', '285', '87', '483', '1019']
228 ['50', '483', '64', '515', '318', '174', '496', '173', '69', '172']
325 ['427', '169', '12', '251', '14', '603', '428', '285', '223', '479']
320 ['408', '318', '64', '169', '603', '114', '178', '12', '98', '515']
326 ['408', '114', '316', '124', '28', '57', '1449', '656', '641', '709']
327 ['483', '114', '134', '185', '647', '661', '223', '59', '513', '187']
183 ['515', '657', '408', '603', '318', '496', '169', '513', '133', '178']
328 ['923', '1137', '170', '48', '513', '114', '707', '251', '1194', '735']
322 ['408', '357', '169', '180', '170', '285', '480', '511', '657', '191']
330 ['302', '515', '498', '272', '79', '483', '408', '198', '189', '169']
27 ['64', '603', '408', '169', '12', '174', '302', '181', '511', '178']
331 ['169', '251', '272', '172', '185', '12', '919', '408', '137', '89']
332 ['603', '427', '512', '132', '272', '483', '178', '318', '357', '169']
329 ['408', '513', '474', '603', '89', '168', '285', '641', '524', '519']
86 ['12', '64', '50', '318', '427', '480', '603', '114', '408', '483']
139 ['318', '114', '408', '64', '483', '178', '189', '357', '515', '427']
300 ['603', '498', '479', '483', '408', '318', '169', '12', '511', '114']
163 ['12', '483', '178', '174', '50', '603', '657', '408', '169', '22']
333 ['408', '169', '210', '496', '114', '83', '64', '173', '478', '963']
334 ['114', '357', '178', '511', '64', '516', '654', '242', '647', '513']
39 ['178', '318', '483', '50', '479', '603', '114', '484', '480', '12']
324 ['474', '98', '603', '515', '427', '512', '480', '96', '195', '479']
132 ['408', '169', '515', '488', '603', '483', '480', '272', '511', '357']
336 ['64', '127', '318', '187', '12', '272', '483', '98', '191', '1142']
335 ['285', '127', '513', '134', '100', '56', '408', '641', '8', '180']
169 ['64', '169', '357', '114', '318', '178', '1019', '408', '515', '657']
338 ['185', '192', '12', '137', '923', '48', '60', '657', '285', '199']
339 ['408', '493', '318', '223', '48', '169', '114', '513', '285', '520']
309 ['318', '64', '191', '169', '357', '100', '98', '408', '483', '127']
342 ['515', '520', '493', '171', '199', '127', '474', '187', '318', '613']
340 ['22', '64', '318', '313', '178', '302', '496', '483', '408', '82']
317 ['64', '515', '318', '483', '114', '603', '408', '480', '178', '169']
341 ['98', '603', '100', '480', '23', '272', '483', '408', '127', '318']
343 ['192', '648', '647', '251', '479', '480', '484', '603', '493', '285']
344 ['603', '178', '427', '134', '921', '185', '474', '223', '483', '475']
345 ['169', '474', '408', '127', '493', '483', '183', '154', '182', '603']
346 ['169', '191', '603', '408', '193', '194', '59', '483', '258', '641']
347 ['313', '64', '316', '198', '641', '272', '114', '189', '603', '169']
273 ['127', '483', '408', '511', '169', '479', '134', '513', '178', '963']
55 ['114', '169', '64', '313', '483', '408', '272', '127', '316', '12']
349 ['408', '169', '513', '316', '483', '654', '480', '114', '479', '611']
348 ['483', '318', '64', '484', '169', '12', '114', '408', '641', '657']
354 ['302', '474', '408', '654', '514', '114', '488', '12', '1019', '519']
351 ['194', '603', '515', '427', '480', '498', '496', '174', '479', '483']
358 ['12', '657', '50', '320', '306', '484', '603', '96', '178', '189']
352 ['318', '357', '64', '178', '408', '169', '480', '127', '185', '427']
360 ['114', '408', '89', '169', '318', '98', '923', '192', '514', '12']
363 ['318', '124', '513', '272', '483', '59', '64', '654', '478', '510']
355 ['98', '515', '498', '272', '483', '408', '127', '318', '64', '357']
362 ['169', '98', '12', '357', '408', '603', '50', '178', '64', '114']
357 ['265', '95', '603', '181', '515', '512', '480', '498', '496', '174']
356 ['427', '59', '496', '318', '64', '408', '127', '603', '357', '169']
361 ['134', '357', '318', '483', '515', '408', '427', '511', '187', '474']
365 ['114', '641', '223', '12', '318', '515', '513', '173', '483', '357']
350 ['318', '408', '12', '199', '114', '513', '498', '285', '180', '272']
367 ['474', '603', '196', '480', '479', '272', '483', '408', '318', '89']
368 ['173', '114', '408', '272', '64', '174', '12', '169', '483', '511']
371 ['169', '313', '318', '228', '272', '166', '178', '923', '408', '216']
373 ['408', '923', '313', '657', '483', '515', '513', '294', '1019', '1']
370 ['318', '408', '169', '251', '474', '357', '185', '272', '515', '1019']
374 ['22', '169', '313', '272', '316', '300', '923', '601', '315', '660']
372 ['302', '603', '515', '427', '512', '15', '480', '272', '483', '178']
337 ['12', '316', '318', '199', '96', '484', '174', '178', '114', '496']
378 ['963', '483', '357', '427', '603', '511', '170', '251', '178', '169']
366 ['242', '302', '474', '603', '515', '427', '512', '480', '275', '498']
377 ['64', '318', '178', '483', '357', '50', '603', '515', '172', '127']
375 ['114', '408', '318', '427', '285', '50', '657', '513', '178', '923']
359 ['12', '98', '64', '169', '603', '515', '178', '316', '172', '114']
379 ['180', '408', '182', '483', '513', '607', '169', '316', '1007', '484']
380 ['178', '191', '320', '173', '484', '603', '963', '192', '169', '513']
381 ['190', '408', '173', '156', '169', '654', '56', '603', '98', '427']
385 ['179', '923', '475', '515', '45', '753', '509', '124', '178', '936']
382 ['498', '169', '192', '408', '513', '174', '607', '488', '923', '316']
387 ['134', '654', '124', '285', '483', '185', '661', '657', '484', '137']
364 ['64', '318', '22', '169', '515', '408', '603', '187', '114', '923']
369 ['318', '408', '64', '169', '178', '56', '427', '59', '127', '483']
388 ['194', '181', '423', '515', '498', '496', '132', '174', '479', '172']
386 ['483', '318', '480', '272', '313', '64', '498', '657', '488', '357']
389 ['114', '408', '318', '694', '12', '513', '170', '511', '659', '175']
383 ['408', '64', '318', '178', '511', '199', '923', '529', '169', '191']
390 ['318', '174', '64', '483', '136', '178', '208', '651', '963', '735']
393 ['300', '923', '408', '963', '657', '520', '174', '746', '1449', '709']
392 ['484', '603', '12', '483', '474', '480', '432', '479', '427', '408']
376 ['169', '408', '64', '272', '513', '480', '316', '318', '127', '474']
394 ['64', '114', '603', '408', '318', '169', '483', '178', '511', '357']
391 ['513', '515', '178', '408', '185', '169', '607', '165', '512', '156']
398 ['318', '22', '515', '527', '114', '169', '313', '487', '170', '651']
397 ['187', '64', '408', '234', '144', '173', '651', '185', '169', '527']
399 ['408', '1019', '169', '285', '86', '657', '641', '694', '709', '134']
396 ['408', '603', '657', '12', '114', '496', '480', '178', '169', '483']
401 ['923', '22', '963', '1149', '170', '427', '606', '8', '132', '513']
402 ['963', '496', '318', '194', '498', '272', '603', '64', '169', '173']
384 ['483', '318', '172', '515', '50', '178', '513', '251', '169', '59']
395 ['483', '22', '195', '191', '408', '176', '169', '603', '114', '178']
353 ['318', '483', '185', '64', '187', '50', '134', '23', '320', '192']
403 ['169', '408', '12', '313', '320', '657', '174', '114', '483', '272']
405 ['125', '144', '496', '294', '282', '866', '258', '117', '332', '963']
400 ['408', '169', '515', '114', '483', '318', '190', '178', '603', '98']
406 ['223', '242', '530', '408', '856', '512', '656', '525', '177', '178']
407 ['178', '12', '318', '480', '64', '192', '114', '165', '654', '83']
409 ['408', '169', '64', '302', '251', '269', '190', '512', '515', '320']
404 ['169', '408', '174', '318', '114', '483', '657', '12', '50', '480']
413 ['318', '408', '114', '172', '173', '174', '169', '178', '483', '657']
416 ['705', '186', '228', '483', '169', '408', '208', '661', '190', '527']
408 ['923', '408', '318', '483', '603', '114', '427', '480', '606', '515']
410 ['357', '318', '923', '515', '114', '427', '483', '64', '12', '178']
411 ['64', '12', '483', '169', '114', '251', '515', '963', '178', '657']
417 ['318', '427', '408', '22', '480', '615', '114', '603', '313', '204']
412 ['98', '483', '251', '50', '484', '83', '12', '136', '316', '272']
420 ['64', '318', '316', '59', '357', '199', '98', '169', '483', '480']
422 ['357', '192', '427', '205', '134', '483', '169', '318', '480', '603']
425 ['603', '9', '134', '170', '169', '488', '306', '242', '189', '654']
419 ['169', '98', '127', '272', '483', '187', '64', '302', '178', '357']
415 ['408', '169', '12', '23', '603', '302', '511', '178', '513', '173']
423 ['318', '64', '50', '357', '178', '98', '483', '963', '515', '408']
429 ['515', '169', '272', '408', '513', '268', '512', '523', '316', '659']
428 ['603', '100', '427', '165', '480', '174', '483', '408', '178', '318']
427 ['242', '474', '1', '95', '246', '98', '194', '603', '1137', '181']
418 ['50', '408', '169', '318', '174', '357', '172', '79', '134', '192']
424 ['169', '408', '513', '524', '484', '512', '709', '603', '488', '189']
432 ['318', '272', '173', '169', '83', '114', '919', '316', '64', '357']
421 ['64', '169', '408', '318', '512', '480', '316', '189', '178', '320']
435 ['114', '508', '272', '657', '320', '134', '189', '483', '1019', '648']
433 ['169', '14', '408', '603', '483', '114', '963', '127', '529', '272']
426 ['169', '114', '64', '357', '313', '408', '127', '302', '963', '170']
436 ['169', '272', '316', '515', '100', '320', '64', '318', '22', '12']
430 ['408', '483', '169', '357', '190', '479', '513', '114', '223', '498']
434 ['357', '427', '603', '313', '174', '318', '114', '64', '127', '178']
437 ['178', '522', '528', '408', '269', '136', '169', '194', '114', '919']
438 ['174', '408', '178', '64', '169', '313', '186', '194', '603', '657']
431 ['318', '64', '483', '313', '357', '191', '178', '427', '50', '603']
442 ['408', '127', '50', '114', '474', '657', '134', '483', '178', '135']
440 ['98', '603', '480', '483', '408', '318', '64', '12', '185', '513']
445 ['474', '318', '357', '187', '483', '285', '178', '488', '114', '496']
447 ['408', '172', '169', '64', '318', '302', '603', '210', '199', '173']
449 ['408', '114', '89', '190', '474', '124', '98', '513', '318', '483']
450 ['408', '515', '963', '524', '1449', '513', '1194', '641', '1019', '512']
446 ['50', '515', '169', '483', '408', '114', '199', '285', '511', '318']
439 ['657', '318', '127', '64', '272', '313', '515', '511', '483', '513']
451 ['313', '210', '12', '405', '923', '1', '50', '114', '195', '318']
452 ['657', '12', '169', '302', '408', '1039', '478', '656', '133', '28']
454 ['251', '265', '684', '333', '125', '207', '239', '408', '429', '83']
453 ['64', '134', '169', '178', '165', '192', '316', '187', '408', '198']
414 ['408', '127', '318', '64', '357', '134', '50', '511', '169', '251']
455 ['169', '408', '657', '198', '136', '357', '187', '114', '496', '483']
444 ['12', '513', '64', '318', '169', '172', '483', '174', '210', '923']
448 ['285', '483', '134', '50', '318', '172', '408', '174', '478', '651']
457 ['603', '496', '408', '187', '251', '427', '480', '513', '178', '520']
456 ['511', '285', '654', '242', '114', '169', '408', '134', '178', '488']
458 ['89', '661', '480', '175', '488', '525', '510', '493', '59', '641']
462 ['515', '174', '408', '318', '169', '483', '480', '427', '178', '251']
459 ['408', '318', '64', '313', '114', '178', '480', '427', '603', '483']
460 ['169', '483', '513', '408', '480', '474', '603', '199', '511', '318']
461 ['169', '408', '100', '98', '483', '114', '272', '64', '316', '127']
467 ['408', '251', '134', '169', '474', '12', '484', '135', '603', '480']
468 ['48', '483', '194', '520', '480', '114', '488', '169', '496', '197']
466 ['64', '251', '318', '408', '169', '963', '427', '12', '483', '474']
472 ['474', '86', '98', '32', '1137', '515', '512', '498', '648', '514']
465 ['515', '483', '589', '272', '173', '45', '320', '647', '89', '316']
463 ['114', '178', '64', '408', '603', '98', '427', '169', '134', '480']
471 ['316', '178', '318', '169', '515', '272', '285', '114', '96', '479']
474 ['144', '251', '1142', '169', '272', '408', '114', '223', '156', '429']
469 ['272', '178', '318', '169', '114', '480', '408', '657', '427', '251']
464 ['64', '169', '408', '272', '1449', '189', '170', '963', '483', '59']
476 ['64', '12', '190', '318', '98', '169', '114', '316', '408', '313']
478 ['56', '114', '513', '483', '194', '316', '197', '205', '234', '709']
473 ['318', '114', '64', '427', '408', '272', '357', '313', '651', '15']
470 ['169', '474', '170', '318', '174', '12', '64', '316', '513', '657']
480 ['408', '513', '520', '192', '251', '488', '519', '285', '484', '657']
441 ['483', '318', '408', '64', '50', '199', '603', '169', '127', '114']
479 ['191', '98', '12', '432', '166', '302', '186', '251', '178', '429']
484 ['408', '178', '479', '480', '657', '496', '169', '603', '429', '641']
486 ['56', '316', '272', '173', '318', '174', '98', '527', '172', '64']
487 ['408', '657', '169', '7', '124', '603', '484', '659', '228', '190']
482 ['169', '12', '408', '320', '512', '318', '64', '657', '479', '272']
481 ['169', '174', '408', '178', '528', '487', '478', '603', '302', '166']
492 ['114', '515', '316', '302', '12', '603', '357', '191', '169', '963']
493 ['169', '64', '114', '87', '313', '144', '408', '511', '251', '28']
490 ['482', '483', '12', '408', '199', '134', '474', '180', '114', '169']
489 ['408', '318', '198', '169', '22', '114', '511', '12', '56', '156']
483 ['408', '169', '357', '483', '511', '223', '178', '98', '174', '114']
496 ['114', '515', '169', '475', '313', '12', '285', '320', '179', '1240']
494 ['190', '173', '114', '172', '178', '318', '187', '313', '705', '408']
495 ['603', '515', '408', '178', '484', '657', '1194', '519', '12', '604']
477 ['302', '474', '193', '603', '196', '423', '515', '427', '512', '165']
497 ['64', '483', '515', '511', '512', '474', '320', '530', '285', '480']
488 ['195', '316', '313', '12', '169', '114', '272', '257', '657', '408']
498 ['285', '60', '408', '169', '432', '427', '511', '654', '357', '86']
499 ['169', '603', '408', '64', '515', '199', '178', '1019', '654', '134']
491 ['98', '114', '199', '318', '603', '169', '96', '50', '641', '519']
500 ['12', '474', '185', '654', '127', '315', '603', '652', '657', '484']
502 ['357', '603', '318', '56', '483', '408', '657', '488', '98', '64']
503 ['192', '251', '114', '483', '276', '178', '169', '320', '23', '170']
504 ['515', '408', '1449', '169', '923', '12', '272', '114', '1019', '528']
505 ['515', '318', '408', '272', '1449', '136', '603', '611', '606', '169']
506 ['22', '313', '64', '83', '528', '483', '114', '750', '318', '316']
443 ['22', '64', '318', '603', '169', '174', '272', '496', '515', '98']
507 ['474', '265', '86', '486', '144', '1', '246', '98', '194', '603']
514 ['205', '251', '1449', '480', '615', '603', '166', '316', '427', '516']
508 ['178', '408', '483', '654', '127', '302', '170', '275', '603', '137']
511 ['64', '318', '408', '515', '169', '272', '98', '173', '302', '316']
515 ['114', '190', '169', '408', '50', '318', '12', '483', '611', '64']
512 ['603', '357', '192', '12', '169', '427', '657', '513', '483', '480']
513 ['194', '515', '174', '483', '318', '357', '513', '313', '519', '641']
475 ['64', '657', '513', '12', '318', '191', '408', '56', '169', '98']
523 ['302', '603', '143', '480', '496', '174', '272', '483', '318', '64']
518 ['923', '318', '313', '144', '210', '607', '174', '272', '963', '320']
509 ['169', '64', '318', '483', '357', '313', '114', '127', '79', '320']
516 ['318', '408', '272', '114', '64', '603', '190', '12', '483', '178']
510 ['408', '114', '285', '302', '169', '483', '318', '50', '641', '89']
524 ['315', '223', '169', '408', '272', '137', '183', '113', '1019', '1149']
501 ['272', '515', '169', '318', '408', '923', '134', '657', '357', '64']
525 ['318', '50', '114', '169', '178', '483', '316', '134', '195', '272']
521 ['114', '169', '316', '302', '408', '272', '603', '98', '611', '64']
520 ['318', '483', '64', '705', '169', '513', '408', '515', '205', '168']
519 ['302', '474', '265', '144', '234', '246', '98', '193', '194', '603']
528 ['169', '172', '408', '64', '114', '272', '313', '12', '483', '963']
532 ['174', '172', '64', '286', '173', '50', '513', '114', '28', '385']
530 ['169', '923', '166', '318', '315', '1449', '272', '408', '114', '480']
531 ['480', '483', '199', '191', '318', '479', '64', '50', '408', '496']
529 ['64', '136', '408', '318', '169', '22', '98', '265', '189', '114']
517 ['183', '169', '178', '285', '114', '657', '408', '100', '519', '96']
527 ['515', '199', '178', '408', '484', '480', '251', '481', '98', '836']
485 ['50', '357', '178', '603', '169', '134', '480', '483', '408', '172']
533 ['59', '512', '923', '963', '114', '732', '272', '531', '513', '136']
535 ['408', '1019', '12', '169', '589', '251', '89', '183', '694', '430']
536 ['313', '515', '178', '479', '615', '114', '651', '657', '272', '64']
526 ['98', '611', '169', '12', '603', '511', '89', '1142', '479', '8']
537 ['251', '408', '169', '223', '114', '316', '522', '154', '320', '9']
534 ['603', '515', '512', '480', '498', '23', '132', '174', '272', '483']
541 ['313', '223', '272', '318', '169', '114', '180', '89', '515', '484']
538 ['114', '169', '657', '515', '511', '64', '408', '603', '479', '190']
542 ['408', '169', '483', '474', '480', '603', '98', '657', '251', '178']
545 ['12', '313', '189', '408', '64', '302', '480', '169', '611', '615']
539 ['169', '114', '89', '408', '12', '302', '318', '100', '251', '1019']
547 ['169', '408', '483', '134', '223', '285', '199', '114', '318', '657']
543 ['178', '408', '127', '657', '483', '316', '50', '489', '921', '182']
548 ['195', '96', '174', '228', '230', '82', '177', '408', '169', '483']
546 ['132', '178', '285', '114', '529', '603', '520', '316', '429', '483']
522 ['127', '408', '483', '98', '647', '169', '64', '191', '603', '515']
551 ['530', '435', '61', '169', '498', '434', '474', '114', '513', '320']
544 ['64', '357', '318', '127', '513', '408', '169', '285', '98', '56']
553 ['12', '408', '169', '302', '318', '272', '64', '694', '223', '357']
552 ['169', '272', '114', '12', '483', '408', '603', '318', '302', '189']
540 ['169', '357', '483', '657', '603', '923', '511', '427', '318', '12']
554 ['313', '169', '64', '408', '178', '357', '480', '483', '515', '427']
550 ['318', '64', '483', '357', '659', '603', '272', '408', '496', '86']
556 ['357', '98', '483', '114', '185', '197', '515', '316', '199', '180']
559 ['302', '251', '923', '50', '1449', '483', '169', '213', '694', '480']
560 ['169', '173', '408', '114', '223', '64', '56', '357', '251', '172']
561 ['134', '127', '408', '654', '169', '192', '114', '165', '242', '663']
563 ['64', '408', '480', '603', '318', '178', '357', '169', '174', '316']
566 ['408', '169', '474', '60', '185', '183', '603', '61', '654', '515']
557 ['114', '318', '169', '484', '272', '427', '178', '513', '64', '83']
558 ['169', '515', '12', '408', '114', '272', '603', '657', '483', '98']
564 ['169', '12', '64', '318', '408', '515', '657', '251', '603', '316']
565 ['474', '603', '272', '408', '178', '169', '251', '302', '64', '483']
573 ['318', '169', '408', '64', '515', '483', '251', '12', '114', '172']
549 ['64', '318', '603', '357', '12', '98', '483', '169', '69', '174']
567 ['114', '169', '285', '408', '320', '512', '528', '251', '316', '86']
569 ['318', '408', '169', '483', '223', '513', '64', '98', '427', '603']
562 ['178', '272', '474', '647', '607', '513', '199', '615', '522', '963']
576 ['1019', '483', '408', '318', '302', '169', '272', '515', '64', '170']
577 ['408', '524', '661', '520', '1194', '641', '603', '169', '136', '511']
579 ['313', '483', '318', '496', '64', '172', '114', '174', '963', '205']
574 ['64', '318', '178', '515', '357', '42', '98', '408', '169', '50']
555 ['515', '408', '657', '483', '12', '205', '528', '199', '611', '958']
572 ['474', '64', '483', '318', '114', '408', '427', '7', '12', '169']
575 ['169', '408', '480', '56', '64', '223', '134', '513', '484', '12']
584 ['408', '318', '169', '515', '483', '357', '64', '96', '603', '519']
588 ['19', '963', '114', '1137', '923', '1019', '196', '302', '709', '520']
587 ['114', '483', '169', '515', '318', '223', '1194', '923', '134', '100']
568 ['318', '192', '408', '23', '480', '357', '170', '302', '1070', '8']
586 ['12', '513', '641', '512', '114', '285', '189', '64', '168', '529']
585 ['318', '169', '178', '12', '515', '480', '100', '174', '408', '603']
582 ['56', '127', '12', '172', '483', '98', '64', '199', '183', '408']
591 ['408', '134', '169', '178', '483', '474', '513', '318', '694', '98']
581 ['483', '408', '169', '493', '251', '272', '484', '474', '318', '657']
592 ['474', '511', '114', '19', '205', '45', '199', '582', '1461', '661']
580 ['56', '357', '114', '603', '318', '657', '178', '14', '83', '86']
590 ['357', '180', '318', '50', '408', '483', '134', '135', '191', '480']
593 ['64', '408', '114', '483', '169', '258', '657', '479', '228', '12']
583 ['474', '178', '134', '179', '127', '603', '183', '56', '132', '64']
596 ['114', '515', '64', '408', '169', '483', '318', '513', '98', '251']
570 ['114', '483', '515', '318', '169', '187', '12', '50', '408', '127']
599 ['515', '479', '272', '258', '408', '69', '318', '64', '169', '50']
589 ['22', '169', '8', '50', '515', '12', '611', '143', '408', '647']
594 ['408', '114', '12', '513', '98', '64', '285', '923', '519', '320']
597 ['272', '172', '12', '199', '427', '515', '169', '22', '195', '498']
578 ['50', '483', '172', '641', '169', '114', '181', '480', '302', '318']
601 ['169', '408', '114', '134', '199', '180', '529', '603', '657', '648']
602 ['408', '318', '174', '172', '169', '64', '483', '511', '302', '603']
600 ['100', '12', '357', '483', '180', '603', '8', '169', '64', '488']
605 ['251', '474', '480', '134', '316', '197', '511', '199', '169', '272']
603 ['169', '191', '427', '59', '963', '483', '480', '657', '195', '318']
595 ['56', '172', '187', '198', '174', '169', '408', '357', '318', '12']
606 ['318', '408', '114', '357', '496', '480', '603', '275', '169', '515']
608 ['521', '251', '198', '169', '531', '923', '180', '515', '320', '1137']
|
notebooks/tay_donovan_12964300_week3_SVC_avg.ipynb | ###Markdown
**Model**
###Code
experiment_label = 'SVC04_avg'
user_label = 'tay_donovan'
###Output
_____no_output_____
###Markdown
**Aim** Look for performance improvement in SVC model, by nullifying all negative values **Findings**Findings for this notebook
###Code
#Initial imports
import pandas as pd
import numpy as np
import seaborn as sb
import matplotlib.pyplot as plt
import os
import sys
sys.path.append(os.path.abspath('..'))
from src.common_lib import DataReader, NBARawData
from sklearn.svm import SVC
###Output
_____no_output_____
###Markdown
**Data input and cleansing**
###Code
#Load dataset using common function DataReader.read_data()
data_reader = DataReader()
# Load Raw Train Data
df_train = data_reader.read_data(NBARawData.TRAIN)
# Load Test Raw Data
df_test = data_reader.read_data(NBARawData.TEST)
#For train dataframe, remove redundant column 'Id_old'
cols_drop = ["Id", "Id_old"]
df_train.drop(cols_drop, axis=1, inplace=True)
df_train.columns = df_train.columns.str.strip()
df_train.describe
#For test dataframe, remove redundant column 'Id_old'
df_test.drop(cols_drop, axis=1, inplace=True)
df_test.columns = df_test.columns.str.strip()
df_test.describe
###Output
_____no_output_____
###Markdown
**Negative values in dataset**
###Code
print(df_train.where(df_train < 0).count())
# Negative values do not make sense in this context
#Define negative cleaning function
def clean_negatives(strategy, df):
if strategy=='abs':
df = abs(df)
if strategy=='null':
df[df < 0] = 0
if strategy=='mean':
df[df < 0] = None
df.fillna(df.mean(), inplace=True)
return(df)
#Clean negative numbers
negatives_strategy = 'mean'
df_train = clean_negatives(negatives_strategy, df_train)
df_test = clean_negatives(negatives_strategy, df_test)
###Output
_____no_output_____
###Markdown
**Feature Correlation and Selection**
###Code
#Use Pearson Correlation to determine feature correlation
pearsoncorr = df_train.corr('pearson')
#Create heatmap of pearson correlation factors
fig, ax = plt.subplots(figsize=(10,10))
sb.heatmap(pearsoncorr,
xticklabels=pearsoncorr.columns,
yticklabels=pearsoncorr.columns,
cmap='RdBu_r',
annot=True,
linewidth=0.2)
#Drop correlated features w/ score over 0.9 - retain "MINS", "3P MADE","FTM","REB"
#selected_features = data_reader.select_feature_by_correlation(df_train)
###Output
_____no_output_____
###Markdown
**Standard Scaling**
###Code
#Standardise scaling of all feature values
df_train_selected = df_train[selected_features]
#Apply scaler
scaler = StandardScaler()
df_cleaned = df_train_selected.copy()
target = df_cleaned.pop('TARGET_5Yrs')
df_train_cleaned = scaler.fit_transform(df_cleaned)
df_train_scaled = pd.DataFrame(df_train_cleaned)
df_train_scaled.columns = df_cleaned.columns
df_train_scaled['TARGET_5Yrs'] = target
# Split the training dataset using common function data_reader.splitdata
X_train, X_val, y_train, y_val = data_reader.split_data(df_train)
#X_train, X_val, y_train, y_val = data_reader.split_data(df_train_scaled)
###Output
_____no_output_____
###Markdown
**Model Selection and Training**
###Code
#Create Optimised Model
optmodel = SVC()
#Use GridSearchCV to optimise parameters
from sklearn.model_selection import GridSearchCV
# defining parameter range
param_grid = {'C': [0.1, 1, 10, 100, 500],
'gamma': [1, 0.1, 0.01, 0.001, 0.0001],
'kernel': ['rbf']}
grid = GridSearchCV(SVC(probability=True), param_grid, refit = True, verbose = 3, scoring="roc_auc", n_jobs=-2)
# fitting the model for grid search
grid.fit(X_train, y_train)
#Print the optimised parameters
print(grid.best_params_)
#Create model with the optimised parameters
model = SVC(C=500, break_ties=False, class_weight='balanced', coef0=0.0,
decision_function_shape='ovr', degree=3,
gamma=0.0001, kernel='rbf', max_iter=-1,
probability=True, random_state=None, shrinking=True,
tol=0.001, verbose=False)
X_train.describe()
model.fit(X_train, y_train);
#Store model in /models
from joblib import dump
dump(model, '../models/' + experiment_label + '.joblib')
###Output
_____no_output_____
###Markdown
**Model Evaluation**
###Code
#Create predictions for train and validation
y_train_preds = model.predict(X_train)
y_val_preds = model.predict(X_val)
#Evaluate train predictions
#from src.models.aj_metrics import confusion_matrix
from sklearn.metrics import roc_auc_score
from sklearn.metrics import plot_roc_curve, plot_precision_recall_curve
from sklearn.metrics import classification_report
sys.path.append(os.path.abspath('..'))
from src.models.aj_metrics import confusion_matrix
y_train_preds
#Training performance results
print("ROC AUC Score:")
print(roc_auc_score(y_train,y_train_preds))
print(classification_report(y_train, y_train_preds))
#Confusion matrix
print(confusion_matrix(y_train, y_train_preds))
#ROC Curve
plot_roc_curve(model,X_train, y_train)
#Precision Recall Curve
plot_precision_recall_curve(model,X_train,y_train)
#Validation performance analysis
print("ROC AUC Score:")
print(roc_auc_score(y_val,y_val_preds))
print("Confusion Matrix:")
print(classification_report(y_val, y_val_preds))
#Confusion matrix
print(confusion_matrix(y_train, y_train_preds))
#ROC Curve
plot_roc_curve(model,X_val, y_val)
#Precision Recall Curve
plot_precision_recall_curve(model,X_train,y_train)
###Output
_____no_output_____
###Markdown
**Test output**
###Code
#Output predictions
X_test = df_test
y_test_preds = model.predict_proba(X_test)[:,1]
y_test_preds
output = pd.DataFrame({'Id': range(0,3799), 'TARGET_5Yrs': [p for p in y_test_preds]})
output.to_csv("../reports/" + user_label + "_submission_" + experiment_label + ".csv", index=False)
###Output
_____no_output_____ |
LA_Eigenvalue_and_Eigenvector_(12_8_21).ipynb | ###Markdown
###Code
import numpy as np
from numpy.linalg import eig
A=np.array([[-12,3],[4,1]])
print(A)
inv_A=np.linalg.inv(A)
print(inv_A)
B=np.array([[0],[0]])
print(B)
X=np.dot(inv_A,B)
print(X)
#X=solve(A,B)
#print(X)
#Example 1
A=np.array([[-6,3],[4,5]])
print(A,"\n")
w,v=np.linalg.eig(A)
print("The eigenvalue/s is/are:", w)
print("The right eigenvectors are:",v)
#x=v.round()
#print(x)
#Example 2
A=np.array([[2,2,4],[1,3,5],[2,3,4]])
print(A,"\n")
s,t=np.linalg.eig(A)
print(s.round())
print(t.round(),"\n")
c=np.dot(A,t.round())
print(c)
f=np.array([[-1],[-1],[-1]])
print(f)
g=np.dot(A,f)
print(g)
###Output
[[-1]
[-1]
[-1]]
[[-8]
[-9]
[-9]]
|
Figure_3_crit_init_noisy_relu.ipynb | ###Markdown
Critical initialisation for noisy ReLU networks.In this notebook, we provide code to reproduce Figure 3 in the paper.
###Code
# imports
import os, sys, pickle
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from matplotlib.lines import Line2D
from matplotlib.collections import LineCollection
from matplotlib.colors import ListedColormap
# custom import
from src.numpy_simulation import *
# from src.simulation import *
from src.viz import get_colours
from src.utils import load_experiment
from src.theory import depth
from src.theory import critical_point
# plot settings
import matplotlib as mpl
# mpl.rcParams['figure.figsize'] = [14.0, 6.0]
mpl.rcParams['font.size'] = 100
sns.set_context("paper", font_scale=2)
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['text.usetex'] = True
plt.rcParams['text.latex.unicode'] = True
plt.rcParams['image.cmap'] = 'viridis'
# plt.rcParams.update({'font.size': 50})
# results directory
relative_results_dir = "results"
results_dir = os.path.join(relative_results_dir)
###Output
_____no_output_____
###Markdown
--- Noisy variance map for ReLU with multiplicative noise ---
###Code
experiments = [
{"dist": "bern", "noise": ('prob_1', 0.6), "act":"relu", "init":"underflow"},
{"dist": "bern", "noise": ('prob_1', 0.6), "act":"relu", "init":"overflow"},
{"dist": "bern", "noise": ('prob_1', 0.6), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 0.25), "act":"relu", "init":"underflow"},
{"dist": "mult gauss", "noise": ('std', 0.25), "act":"relu", "init":"overflow"},
{"dist": "mult gauss", "noise": ('std', 0.25), "act":"relu", "init":"crit"}
]
for i, experiment in enumerate(experiments):
dist = experiment['dist']
noise = experiment['noise']
act = experiment['act']
init = experiment['init']
# run simulations for scenario
noisy_signal_prop_simulations(dist, noise, act, init, seed=i)
# Dictionary for data that needs to be extracted
tests = [
{
"distributions": [{"dist": "mult gauss", "std": 0.25}],
"activations": ["relu"],
"inits": ["underflow"]
},
{
"distributions": [{"dist": "mult gauss", "std": 0.25}],
"activations": ["relu"],
"inits": ["overflow"]
},
{
"distributions": [{"dist": "mult gauss", "std": 0.25}],
"activations": ["relu"],
"inits": ["crit"]
}
,
{
"distributions": [{"dist": "bern", "prob_1": 0.6}],
"activations": ["relu"],
"inits": ["underflow"]
},
{
"distributions": [{"dist": "bern", "prob_1": 0.6}],
"activations": ["relu"],
"inits": ["overflow"]
},
{
"distributions": [{"dist": "bern", "prob_1": 0.6}],
"activations": ["relu"],
"inits": ["crit"]
}
]
############################################################################
# q - length / variance plots
############################################################################
nq = 30
qmax = 15
qrange = np.linspace(0, qmax, nq)
widxs = [0]
bidxs = [0]
n_hidden_layers = 16
n_tests = len(tests)
pal = get_colours(10, 7)
test_data = []
for i, test in enumerate(tests):
test_data.append(load_experiment(test, ["q_maps", "single_layer_qmap_sim", "multi_layer_qmap_sim"], "results"))
fig, [ax1, ax2, ax3] = plt.subplots(1, 3, figsize=(16, 5))
# Add unity line
ax1.plot((0, qmax), (0, qmax), '--', color='k', zorder=900, dashes=(12, 24))
ax1.set_xlim(0, qmax)
ax1.set_ylim(0, qmax)
ax1.set_xlabel('Input variance ($q^{l-1})$')
ax1.set_ylabel('Output variance ($q^l$)')
ax1.set_title("Iterative variance map")
ax1.text(2, 10, r'$\sigma^2_w > \frac{2}{\mu_2}$', fontsize=20)
ax1.text(10, 1, r'$\sigma^2_w < \frac{2}{\mu_2}$', fontsize=20)
ax1.text(11, 8.5, r'$\sigma^2_w = \frac{2}{\mu_2}$', fontsize=20)
# axis 2
ax2.set_xlim(0, qmax)
ax2.set_ylim(0, qmax)
ax2.set_xlabel('Layer ($l$)')
ax2.set_ylabel('Variance ($q^{l})$')
ax2.set_title("Dynamics of $q$")
nn = len(test_data)
col_i = 0
bern_label = False
gauss_label = False
for test, attr in zip(test_data, tests):
for dist in attr["distributions"]:
label = ""
if dist['dist'] == "none":
col_i = 0
elif dist['dist'] == "bern":
col_i = 1
label = "dropout"
elif "gauss" in dist['dist']:
col_i = 3
label = "Mult Gauss"
for act in attr["activations"]:
for init in attr["inits"]:
dashes = (None, None)
if "under" in init:
shade_i = 4
elif "crit" in init:
shade_i = 5
dashes = (24, 12) if dist['dist'] == "bern" else (None, None)
else:
shade_i = 6
# extract test data
qmaps = test[dist['dist']][act][init]['q_maps']['qmaps']
single_layer_sims = test[dist['dist']][act][init]['single_layer_qmap_sim']
multi_layer_sims = test[dist['dist']][act][init]['multi_layer_qmap_sim']['qmaps_sim']
multi_layer_theory = test[dist['dist']][act][init]['multi_layer_qmap_sim']['theory']
############################################################################
# left
############################################################################
for w, b in zip(widxs, bidxs):
# plot means of simulation (as dots)
mu = single_layer_sims[w, b].mean(-1).mean(-2)
ax1.plot(qrange, mu, w, b, marker='o', ls='none', markersize=1, alpha=0.9, zorder=0, c=pal[col_i][shade_i])
# add confidence interval around simulation
std = single_layer_sims[w, b].mean(-1).std(-2)
ax1.fill_between(qrange, mu-std, mu+std, alpha=0.4, label='_nolegend_', color=pal[col_i][shade_i])
# theory line
ax1.plot(qrange, qmaps[0, 0, :, 1], c=pal[col_i][shade_i], label=label, dashes=dashes)
# fixed point
############################################################################
# middle
############################################################################
q = 1
xx = np.arange(multi_layer_sims.shape[-2])
for w, b in zip(widxs, bidxs):
# confidence intervals
mu = multi_layer_sims[w, b].mean(axis=-1).mean(axis=0)
std = multi_layer_sims[w, b].mean(axis=-1).std(axis=0)
# plot theory
if "dropout" in label and not bern_label:
bern_label = True
ax2.plot(multi_layer_theory, c=pal[col_i][shade_i], label=label)
elif "Gauss" in label and not gauss_label:
gauss_label = True
ax2.plot(multi_layer_theory, c=pal[col_i][shade_i], label=label)
else:
ax2.plot(multi_layer_theory, c=pal[col_i][shade_i])
# plot the simulation
ax2.fill_between(xx, mu-std, mu+std, alpha=0.2, label='_nolegend_', color=pal[col_i][shade_i])
# dots for mean
ax2.plot(xx, mu, 'o', markersize=4, alpha=0.9, color=pal[col_i][shade_i])
leg = ax2.legend()
# set the linewidth of each legend object
for legobj in leg.legendHandles:
legobj.set_linewidth(3.0)
mu21 = np.linspace(1, 2, 100)
sigma1 = 2/mu21
############################################################################
# right
############################################################################
ax3.plot(mu21, sigma1, c="purple", label="Variance critical boundary", linestyle='--')
ax3.fill_between(mu21, 1, sigma1, facecolor='blue', alpha=0.2)
ax3.fill_between(mu21, 2, sigma1, facecolor='red', alpha=0.2)
ax3.text(1.5, 1.6, 'Overflow', fontsize=25)
ax3.text(1.55, 1.5, r'($\sigma^2_w > \frac{2}{\mu_2}$)', fontsize=15)
ax3.text(1.1, 1.2, 'Underflow', fontsize=25)
ax3.text(1.15, 1.1, r'($\sigma^2_w < \frac{2}{\mu_2}$)', fontsize=15)
ax3.text(1.2, 1.7, r'$\sigma^2_w = \frac{2}{\mu_2}$', fontsize=18)
ax3.set_xlim(1, 2)
ax3.set_ylim(1, 2)
ax3.set_xlabel('Weight initialisation ($\sigma^2_w$)')
ax3.set_ylabel('Second moment of noise dist. ($\mu_2$)')
leg = ax3.legend(prop={'size': 15})
# set the linewidth of each legend object
for legobj in leg.legendHandles:
legobj.set_linewidth(3.0)
ax3.set_title('Variance propagation dynamics')
fig.text(0.02, 0.95, "(a)", fontsize=20)
fig.text(0.35, 0.95, "(b)", fontsize=20)
fig.text(0.68, 0.95, "(c)", fontsize=20)
plt.gcf().tight_layout()
plt.show()
# plt.savefig("vairance.pdf", dpi=200)
###Output
_____no_output_____ |
book-d2l-en/chapter_computational-performance/auto-parallelism.ipynb | ###Markdown
Automatic ParallelismMXNet automatically constructs computational graphs at the back end. Using a computational graph, the system is aware of all the computational dependencies, and can selectively execute multiple non-interdependent tasks in parallel to improve computing performance. For instance, the first example in the [“Asynchronous Computing”](async-computation.md) section executes `a = nd.ones((1, 2))` and `b = nd.ones((1, 2))` in turn. There is no dependency between these two steps, so the system can choose to execute them in parallel.Typically, a single operator will use all the computational resources on all CPUs or a single GPU. For example, the `dot` operator will use all threads on all CPUs (even if there are multiple CPU processors on a single machine) or a single GPU. If computational load of each operator is large enough and multiple operators are run in parallel on only on the CPU or a single GPU, then the operations of each operator can only receive a portion of computational resources of CPU or single GPU. Even if these computations can be parallelized, the ultimate increase in computing performance may not be significant. In this section, our discussion of automatic parallel computation mainly focuses on parallel computation using both CPUs and GPUs, as well as the parallelization of computation and communication.First, import the required packages or modules for experiment in this section. Note that we need at least one GPU to run the experiment in this section.
###Code
import sys
sys.path.insert(0, '..')
import d2l
import mxnet as mx
from mxnet import nd
###Output
_____no_output_____
###Markdown
Parallel Computation using CPUs and GPUsFirst, we will discuss parallel computation using CPUs and GPUs, for example, when computation in a program occurs both on the CPU and a GPU. First, define the `run` function so that it performs 10 matrix multiplications.
###Code
def run(x):
return [nd.dot(x, x) for _ in range(10)]
###Output
_____no_output_____
###Markdown
Next, create an NDArray on both the CPU and GPU.
###Code
x_cpu = nd.random.uniform(shape=(2000, 2000))
x_gpu = nd.random.uniform(shape=(6000, 6000), ctx=mx.gpu(0))
###Output
_____no_output_____
###Markdown
Then, use the two NDArrays to run the `run` function on both the CPU and GPU and print the time required.
###Code
run(x_cpu) # Warm-up begins
run(x_gpu)
nd.waitall() # Warm-up ends
with d2l.Benchmark('Run on CPU.'):
run(x_cpu)
nd.waitall()
with d2l.Benchmark('Then run on GPU.'):
run(x_gpu)
nd.waitall()
###Output
Run on CPU. time: 0.6224 sec
###Markdown
We remove `nd.waitall()` between the two computing tasks `run(x_cpu)` and `run(x_gpu)` and hope the system can automatically parallel these two tasks.
###Code
with d2l.Benchmark('Run on both CPU and GPU in parallel.'):
run(x_cpu)
run(x_gpu)
nd.waitall()
###Output
Run on both CPU and GPU in parallel. time: 1.2206 sec
###Markdown
As we can see, when two computing tasks are executed together, the total execution time is less than the sum of their separate execution times. This means that MXNet can effectively automate parallel computation on CPUs and GPUs. Parallel Computation of Computing and CommunicationIn computations that use both the CPU and GPU, we often need to copy data between the CPU and GPU, resulting in data communication. In the example below, we compute on the GPU and then copy the results back to the CPU. We print the GPU computation time and the communication time from the GPU to CPU.
###Code
def copy_to_cpu(x):
return [y.copyto(mx.cpu()) for y in x]
with d2l.Benchmark('Run on GPU.'):
y = run(x_gpu)
nd.waitall()
with d2l.Benchmark('Then copy to CPU.'):
copy_to_cpu(y)
nd.waitall()
###Output
Run on GPU. time: 1.2176 sec
###Markdown
We remove the `waitall` function between computation and communication and print the total time need to complete both tasks.
###Code
with d2l.Benchmark('Run and copy in parallel.'):
y = run(x_gpu)
copy_to_cpu(y)
nd.waitall()
###Output
Run and copy in parallel. time: 1.2653 sec
|
Face_Recognition_Workshop.ipynb | ###Markdown
###Code
//My output model for Face Recognition using the inputs provided in the Workshop and knowledge of Machine Learning.
import imutils
import numpy as np
import cv2
from google.colab.patches import cv2_imshow
from IPython.display import display, Javascript
from google.colab.output import eval_js
from base64 import b64decode
def take_photo(filename='photo.jpg', quality=0.8):
js = Javascript('''
async function takePhoto(quality) {
const div = document.createElement('div');
const capture = document.createElement('button');
capture.textContent = 'Capture';
div.appendChild(capture);
const video = document.createElement('video');
video.style.display = 'block';
const stream = await navigator.mediaDevices.getUserMedia({video: true});
document.body.appendChild(div);
div.appendChild(video);
video.srcObject = stream;
await video.play();
// Resize the output to fit the video element.
google.colab.output.setIframeHeight(document.documentElement.scrollHeight, true);
// Wait for Capture to be clicked.
await new Promise((resolve) => capture.onclick = resolve);
const canvas = document.createElement('canvas');
canvas.width = video.videoWidth;
canvas.height = video.videoHeight;
canvas.getContext('2d').drawImage(video, 0, 0);
stream.getVideoTracks()[0].stop();
div.remove();
return canvas.toDataURL('image/jpeg', quality);
}
''')
display(js)
data = eval_js('takePhoto({})'.format(quality))
binary = b64decode(data.split(',')[1])
with open(filename, 'wb') as f:
f.write(binary)
return filename
image_file = take_photo()
#image = cv2.imread(image_file, cv2.IMREAD_UNCHANGED)
image = cv2.imread(image_file)
# resize it to have a maximum width of 400 pixels
image = imutils.resize(image, width=400)
(h, w) = image.shape[:2]
print(w,h)
cv2_imshow(image)
!wget -N https://raw.githubusercontent.com/opencv/opencv/master/samples/dnn/face_detector/deploy.prototxt
!wget -N https://raw.githubusercontent.com/opencv/opencv_3rdparty/dnn_samples_face_detector_20170830/res10_300x300_ssd_iter_140000.caffemodel
print("[INFO] loading model...")
prototxt = 'deploy.prototxt'
model = 'res10_300x300_ssd_iter_140000.caffemodel'
net = cv2.dnn.readNetFromCaffe(prototxt, model)
# resize it to have a maximum width of 400 pixels
image = imutils.resize(image, width=400)
blob = cv2.dnn.blobFromImage(cv2.resize(image, (300, 300)), 1.0, (300, 300), (104.0, 177.0, 123.0))
print("[INFO] computing object detections...")
net.setInput(blob)
detections = net.forward()
for i in range(0, detections.shape[2]):
# extract the confidence (i.e., probability) associated with the prediction
confidence = detections[0, 0, i, 2]
if confidence > 0.5:
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
text = "{:.2f}%".format(confidence * 100)
y = startY - 10 if startY - 10 > 10 else startY + 10
cv2.rectangle(image, (startX, startY), (endX, endY), (0, 0, 255), 2)
cv2.putText(image, text, (startX, y),
cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)
cv2_imshow(image)
###Output
_____no_output_____ |
homeworkdata/Homework_3_Paolo_Rivas_Legua.ipynb | ###Markdown
Homework assignment 3These problem sets focus on using the Beautiful Soup library to scrape web pages. Problem Set 1: Basic scrapingI've made a web page for you to scrape. It's available [here](http://static.decontextualize.com/widgets2016.html). The page concerns the catalog of a famous [widget](http://en.wikipedia.org/wiki/Widget) company. You'll be answering several questions about this web page. In the cell below, I've written some code so that you end up with a variable called `html_str` that contains the HTML source code of the page, and a variable `document` that stores a Beautiful Soup object.
###Code
from bs4 import BeautifulSoup
from urllib.request import urlopen
html_str = urlopen("http://static.decontextualize.com/widgets2016.html").read()
document = BeautifulSoup(html_str, "html.parser")
###Output
_____no_output_____
###Markdown
Now, in the cell below, use Beautiful Soup to write an expression that evaluates to the number of `` tags contained in `widgets2016.html`.
###Code
h3_tags = document.find_all('h3')
print(type(h3_tags))
print([tag.string for tag in h3_tags])
len([tag.string for tag in h3_tags])
###Output
<class 'bs4.element.ResultSet'>
['Forensic Widgets', 'Wondrous widgets', 'Mood widgets', 'Hallowed widgets']
###Markdown
Now, in the cell below, write an expression or series of statements that displays the telephone number beneath the "Widget Catalog" header.
###Code
telephone_checkup = document.find('a', attrs={'class': 'tel'})
[tag.string for tag in telephone_checkup]
###Output
_____no_output_____
###Markdown
In the cell below, use Beautiful Soup to write some code that prints the names of all the widgets on the page. After your code has executed, `widget_names` should evaluate to a list that looks like this (though not necessarily in this order):```Skinner WidgetWidget For FurtivenessWidget For StrawmanJittery WidgetSilver WidgetDivided WidgetManicurist WidgetInfinite WidgetYellow-Tipped WidgetUnshakable WidgetSelf-Knowledge WidgetWidget For Cinema```
###Code
all_widget = document.find_all('td', attrs={'class': 'wname'})
#USING FOR LOOP
for widget in all_widget:
widget2 = widget.string
print(widget2)
print("##########")
#Using LIST comprehention
raw_widget = [tag.string for tag in all_widget]
raw_widget
###Output
Skinner Widget
Widget For Furtiveness
Widget For Strawman
Jittery Widget
Silver Widget
Divided Widget
Manicurist Widget
Infinite Widget
Yellow-Tipped Widget
Unshakable Widget
Self-Knowledge Widget
Widget For Cinema
##########
###Markdown
Problem set 2: Widget dictionariesFor this problem set, we'll continue to use the HTML page from the previous problem set. In the cell below, I've made an empty list and assigned it to a variable called `widgets`. Write code that populates this list with dictionaries, one dictionary per widget in the source file. The keys of each dictionary should be `partno`, `wname`, `price`, and `quantity`, and the value for each of the keys should be the value for the corresponding column for each row. After executing the cell, your list should look something like this:```[{'partno': 'C1-9476', 'price': '$2.70', 'quantity': u'512', 'wname': 'Skinner Widget'}, {'partno': 'JDJ-32/V', 'price': '$9.36', 'quantity': '967', 'wname': u'Widget For Furtiveness'}, ...several items omitted... {'partno': '5B-941/F', 'price': '$13.26', 'quantity': '919', 'wname': 'Widget For Cinema'}]```And this expression: widgets[5]['partno'] ... should evaluate to: LH-74/O
###Code
widgets = []
# your code here
search_table = document.find_all('tr', attrs={'class': 'winfo'})
for new_key in search_table:
diccionaries = {}
partno_tag = new_key.find('td', attrs={'class': 'partno'})
price_tag = new_key.find('td', attrs={'class': 'price'})
quantity_tag = new_key.find('td', attrs={'class': 'quantity'})
widget_tag = new_key.find('td', attrs={'class': 'wname'})
diccionaries['partno'] = partno_tag.string
diccionaries['price'] = price_tag.string
diccionaries['quantity'] = quantity_tag.string
diccionaries['widget'] = widget_tag.string
widgets.append(diccionaries)
widgets
# end your code
#test
widgets[5]['partno']
###Output
_____no_output_____
###Markdown
In the cell below, duplicate your code from the previous question. Modify the code to ensure that the values for `price` and `quantity` in each dictionary are floating-point numbers and integers, respectively. I.e., after executing the cell, your code should display something like this: [{'partno': 'C1-9476', 'price': 2.7, 'quantity': 512, 'widgetname': 'Skinner Widget'}, {'partno': 'JDJ-32/V', 'price': 9.36, 'quantity': 967, 'widgetname': 'Widget For Furtiveness'}, ... some items omitted ... {'partno': '5B-941/F', 'price': 13.26, 'quantity': 919, 'widgetname': 'Widget For Cinema'}](Hint: Use the `float()` and `int()` functions. You may need to use string slices to convert the `price` field to a floating-point number.)
###Code
widgets = []
# your code here
search_table = document.find_all('tr', attrs={'class': 'winfo'})
for new_key in search_table:
diccionaries = {}
partno_tag = new_key.find('td', attrs={'class': 'partno'})
price_tag = new_key.find('td', attrs={'class': 'price'})
quantity_tag = new_key.find('td', attrs={'class': 'quantity'})
widget_tag = new_key.find('td', attrs={'class': 'wname'})
diccionaries['partno'] = partno_tag.string
diccionaries['price'] = float(price_tag.string[1:])
diccionaries['quantity'] = int(quantity_tag.string)
diccionaries['widget'] = widget_tag.string
widgets.append(diccionaries)
widgets
#widgets
# end your code
###Output
_____no_output_____
###Markdown
Great! I hope you're having fun. In the cell below, write an expression or series of statements that uses the `widgets` list created in the cell above to calculate the total number of widgets that the factory has in its warehouse.Expected output: `7928`
###Code
new_list = []
for items in widgets:
new_list.append(items['quantity'])
sum(new_list)
###Output
_____no_output_____
###Markdown
In the cell below, write some Python code that prints the names of widgets whose price is above $9.30.Expected output:```Widget For FurtivenessJittery WidgetSilver WidgetInfinite WidgetWidget For Cinema```
###Code
for widget in widgets:
if widget['price'] > 9.30:
print(widget['widget'])
###Output
Widget For Furtiveness
Jittery Widget
Silver Widget
Infinite Widget
Widget For Cinema
###Markdown
Problem set 3: Sibling rivalriesIn the following problem set, you will yet again be working with the data in `widgets2016.html`. In order to accomplish the tasks in this problem set, you'll need to learn about Beautiful Soup's `.find_next_sibling()` method. Here's some information about that method, cribbed from the notes:Often, the tags we're looking for don't have a distinguishing characteristic, like a class attribute, that allows us to find them using `.find()` and `.find_all()`, and the tags also aren't in a parent-child relationship. This can be tricky! For example, take the following HTML snippet, (which I've assigned to a string called `example_html`):
###Code
example_html = """
<h2>Camembert</h2>
<p>A soft cheese made in the Camembert region of France.</p>
<h2>Cheddar</h2>
<p>A yellow cheese made in the Cheddar region of... France, probably, idk whatevs.</p>
"""
###Output
_____no_output_____
###Markdown
If our task was to create a dictionary that maps the name of the cheese to the description that follows in the `` tag directly afterward, we'd be out of luck. Fortunately, Beautiful Soup has a `.find_next_sibling()` method, which allows us to search for the next tag that is a sibling of the tag you're calling it on (i.e., the two tags share a parent), that also matches particular criteria. So, for example, to accomplish the task outlined above:
###Code
example_doc = BeautifulSoup(example_html, "html.parser")
cheese_dict = {}
for h2_tag in example_doc.find_all('h2'):
cheese_name = h2_tag.string
cheese_desc_tag = h2_tag.find_next_sibling('p')
cheese_dict[cheese_name] = cheese_desc_tag.string
cheese_dict
###Output
_____no_output_____
###Markdown
With that knowledge in mind, let's go back to our widgets. In the cell below, write code that uses Beautiful Soup, and in particular the `.find_next_sibling()` method, to print the part numbers of the widgets that are in the table *just beneath* the header "Hallowed Widgets."Expected output:```MZ-556/BQV-730T1-97315B-941/F```
###Code
for h3_tag in document.find_all('h3'):
if "Hallowed widgets" in h3_tag:
table = h3_tag.find_next_sibling('table', {'class': 'widgetlist'})
partno = table.find_all('td', {'class': 'partno'})
for x in partno:
print(x.string)
###Output
MZ-556/B
QV-730
T1-9731
5B-941/F
###Markdown
Okay, now, the final task. If you can accomplish this, you are truly an expert web scraper. I'll have little web scraper certificates made up and I'll give you one, if you manage to do this thing. And I know you can do it!In the cell below, I've created a variable `category_counts` and assigned to it an empty dictionary. Write code to populate this dictionary so that its keys are "categories" of widgets (e.g., the contents of the `` tags on the page: "Forensic Widgets", "Mood widgets", "Hallowed Widgets") and the value for each key is the number of widgets that occur in that category. I.e., after your code has been executed, the dictionary `category_counts` should look like this:```{'Forensic Widgets': 3, 'Hallowed widgets': 4, 'Mood widgets': 2, 'Wondrous widgets': 3}```
###Code
widget_count = {}
doc = document.find_all('h3')
for h3_tag in doc:
widget_name = h3_tag.string
table = h3_tag.find_next_sibling('table', {'class': 'widgetlist'})
partno = table.find_all('td', {'class': 'partno'})
count = len(partno)
widget_count[widget_name] = count
widgets
###Output
_____no_output_____ |
content/machine_learning_methods/solution_jupyter_notebooks/Dimensionsreduktion.ipynb | ###Markdown
DimensionsreduktionIn diesem Beispiel wollen wir eine mehrdimensionales Daten set (64 Dimensionen) in seiner Dimensionalität reduzieren um es in 2 Dimensionen plotten zu können. Wir nutzen ein Datenset, das handschriftliche Ziffern in Form von 8x8 Feldern darstellt. Ein Beschreibung des Datensets gib es bei [scikit-learn](https://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_digits.htmlsklearn.datasets.load_digits) und im [UC Irvine Machine Learning Repository](https://archive.ics.uci.edu/ml/datasets/Optical+Recognition+of+Handwritten+Digits).
###Code
# Wir laden das Datenset
from sklearn.datasets import load_digits
digits = load_digits()
# Wir wollen mit einer Pricinple Component Analysis (PCA) die
# Reduktion durchführen, laden dafür die Klasse und erzeugen
# eine Instanz davon. Wir geben an dass wir danach nur zwei Dimensionen
# ("components") erhalten würden.
from sklearn.decomposition import PCA
pca = PCA(random_state=1, n_components=2)
# Wir können die Tranformation mit der Funktion
# "fit_transform" durchführen.
pca_result = pca.fit_transform(digits.data)
# Wir erhalten eine Matrix mit der gleichen Anzahl an Datenpunkte
# aber nur zwei Spalten.
pca_result.shape
# Wir können nun die beiden Spalten der resultierenden Matrix
# plotten.
import matplotlib.pyplot as plt
%matplotlib inline
fig, ax = plt.subplots()
plt.scatter(pca_result[:, 0], pca_result[:, 1], c=digits.target, cmap="Set1")
plt.colorbar()
# Wir können das gleich Vorgehen mit dem t-SNE-Verfahren durchführen
from sklearn.manifold import TSNE
tsne = TSNE(random_state=1, n_components=2)
tsne_result = tsne.fit_transform(digits.data)
fig, ax = plt.subplots()
plt.scatter(tsne_result[:, 0], tsne_result[:, 1], c=digits.target, cmap="Set1")
plt.colorbar()
###Output
_____no_output_____ |
notebooks/Positivity of Adj_n + kOp_n in ISL(n,Z).ipynb | ###Markdown
Installation The following instructions were prepared using
###Code
versioninfo()
###Output
Julia Version 1.5.2
Commit 539f3ce943 (2020-09-23 23:17 UTC)
Platform Info:
OS: Linux (x86_64-pc-linux-gnu)
CPU: Intel(R) Core(TM) i9-9900X CPU @ 3.50GHz
WORD_SIZE: 64
LIBM: libopenlibm
LLVM: libLLVM-9.0.1 (ORCJIT, skylake-avx512)
Environment:
JULIA_NUM_THREADS = 4
###Markdown
Before exploring the notebook you need to clone the main repository:```bash git clone https://github.com/kalmarek/1812.03456.git```This notebook should be located in `1812.03456/notebooks` directory.In the main directory (`1812.03456`) you should run the following code in `julia`s `REPL` console to instantiate the environment for computations:```juliausing PkgPkg.activate(".")Pkg.instantiate()```(this needs to be done once per installation). Jupyter notebook may be launched then from `REPL` by```julia> using IJuliajulia> notebook(dir=".")```Instantiation should install (among others): the [`SCS` solver][1], [`JuMP` package][2] for mathematical programming and `IntervalArithmetic.jl` package from [`ValidatedNumerics.jl`][3].The environment uses [`Groups.jl`][7], [`GroupRings.jl`][6] (which are built on the framework of [`AbstractAlgebra.jl`][4]) and [`PropertyT.jl`][8] packages.[1]: https://github.com/cvxgrp/scs [2]: https://github.com/JuliaOpt/JuMP.jl [3]: https://github.com/JuliaIntervals/ValidatedNumerics.jl[4]: https://github.com/Nemocas/AbstractAlgebra.jl[5]: https://github.com/Nemocas/Nemo.jl[6]: https://github.com/kalmarek/GroupRings.jl[7]: https://github.com/kalmarek/Groups.jl[8]: https://github.com/kalmarek/PropertyT.jl The computationThe following programme certifies that$$\operatorname{Adj}_4 + \operatorname{Op}_4 - 0.82\Delta_4 =\Sigma_i \xi_i^*\xi_i \in \Sigma^2_2\mathbb{R}\operatorname{SL}(4,\mathbb{Z}).$$With small changes (which we will indicate) it also certifies that $$\operatorname{Adj}_3 - 0.157999\Delta_3 \in \Sigma^2_2\mathbb{R}\operatorname{SL}(3,\mathbb{Z})$$and that$$\operatorname{Adj}_5 +1.5 \mathrm{Op}_5 - 1.5\Delta_5 \in \Sigma^2_2\mathbb{R}\operatorname{SL}(5,\mathbb{Z}).$$
###Code
using Pkg
Pkg.activate("..")
using Dates
now()
using LinearAlgebra
using AbstractAlgebra
using Groups
using GroupRings
using PropertyT
###Output
_____no_output_____
###Markdown
So far we only made the needed packages available in the notebook. In the next cell we define `G` to be the set of all $4\times 4$ matrices over $\mathbb Z$.(For the second computation, set `N=3` below; for the third, set `N=5`)
###Code
N = 4
G = MatrixAlgebra(zz, N)
###Output
_____no_output_____
###Markdown
Generating setNow we create the elementary matrices $E_{i,j}$. The set of all such matrices and their inverses is denoted by `S`.
###Code
S = PropertyT.generating_set(G)
###Output
_____no_output_____
###Markdown
Group Ring and LaplaciansNow we will generate the ball `E_R` of radius $R=4$ in $\operatorname{SL}(N,\mathbb{Z})$ and use this as a (partial) basis in a group ring (denoted by `RG` below). Such group ring also needs a multiplication table (`pm`, which is actually a *division table*) which is created as follows: when $x,y$ reside at positions `i`-th and `j`-th in `E_R`, then `pm[i,j] = k`, where `k` is the position of $x^{-1}y$ in `E_R`.
###Code
halfradius = 2
E_R, sizes = Groups.wlmetric_ball(S, radius=2*halfradius);
E_rdict = GroupRings.reverse_dict(E_R)
pm = GroupRings.create_pm(E_R, E_rdict, sizes[halfradius]; twisted=true);
RG = GroupRing(G, E_R, E_rdict, pm)
@show sizes;
Δ = length(S)*one(RG) - sum(RG(s) for s in S)
###Output
sizes = [25, 433, 6149, 75197]
###Markdown
Orbit DecompositionNow something happens: in the next cell we split the subspace of $\mathbb{R} \operatorname{SL}(N, \mathbb{Z})$ supported on `E_R` into irreducible representations of the wreath product $\mathbb Z / 2 \mathbb Z \wr \operatorname{Sym}_N$. The action of wreath product on the elements of the matrix space is by conjugation, i.e. permutation of rows and columns.We also compute projections on the invariant subspaces to later speed up the optimisation step.
###Code
block_decomposition = let bd = PropertyT.BlockDecomposition(RG, WreathProduct(SymmetricGroup(2), SymmetricGroup(N)))
PropertyT.decimate(bd, false);
end;
###Output
┌ Info: Decomposing basis of RG into orbits of
│ autS = Wreath Product of Full symmetric group over 2 elements by Full symmetric group over 4 elements
└ @ PropertyT /home/kalmar/.julia/packages/PropertyT/vcGsE/src/blockdecomposition.jl:15
###Markdown
Elements Adj and OpNow we define the elements $\operatorname{Adj}_N$ and $\operatorname{Op}_N$. The functions `Sq`, `Adj`, `Op` returning the appropriate elements are defined in the `src/sqadjop.jl` source file.
###Code
@time AdjN = PropertyT.Adj(RG, N)
@time OpN = PropertyT.Op(RG, N);
###Output
0.733075 seconds (1.04 M allocations: 53.078 MiB)
0.145846 seconds (155.32 k allocations: 7.875 MiB)
###Markdown
Finally we compute the element `elt` of our interest:* if `N=3`: $\operatorname{elt} = \operatorname{Adj}_3$* if `N=4`: $\operatorname{elt} = \operatorname{Adj}_4 + \operatorname{Op}_4$* if `N=5`: $\operatorname{elt} = \operatorname{Adj}_5 + 1.5\operatorname{Op}_5.$
###Code
if N == 3
k = 0
elseif N == 4
k = 1
elseif N == 5
k = 1.5
end
elt = AdjN + k*OpN;
elt.coeffs
###Output
_____no_output_____
###Markdown
Optimization Problem We are ready to define the optimisation problem. Function> `PropertyT.SOS_problem(x, Δ, orbit_data; upper_bound=UB)` defines the optimisation problem equivalent to the one of the form\begin{align}\text{ maximize : } \quad & \lambda\\\text{under constraints : }\quad & 0 \leqslant \lambda \leqslant \operatorname{UB},\\ & x - \lambda \Delta = \sum \xi_i^* \xi_i,\\ & \text{the set of $\xi_i$s is invariant under $\mathbb{Z}/2\mathbb{Z} \wr \operatorname{Sym}_N$}.\end{align}
###Code
# @time SDP_problem, varλ, varP = PropertyT.SOS_problem(elt, Δ, orbit_data)
if N == 3
UB = 0.158
elseif N == 4
UB = 0.82005
elseif N == 5
UB = 1.5005
end
SDP_problem, varP = PropertyT.SOS_problem_primal(elt, Δ, block_decomposition; upper_bound=UB)
using PropertyT.JuMP
using SCS
λ = Ps = warm = nothing
###Output
_____no_output_____
###Markdown
Solving the problemDepending on the actual problem one may need to tweak the parameters given to the solver: * `eps` sets the requested accuracy * `max_iters` sets the number of iterations to run before solver gives up * `alpha` is a parameter ($\alpha \in (0,2)$) which determines the rate of convergence at the cost of the accuracy * `acceleration_lookback`: if you experience numerical instability in scs log should be changed to `1` (at the cost of rate of convergence). The parameters below should be enough to obtain a decent solution for $\operatorname{SL}(4, \mathbb{Z}), \operatorname{SL}(5, \mathbb{Z})$. For $\operatorname{SL}(3, \mathbb{Z})$ multiple runs of the following cell are required to obtain convergence (i.e. `status = MathOptInterface.OPTIMAL`; Note that when changing `UB` (above) to `0.15` a much faster convergence can be observed.
###Code
with_SCS = with_optimizer(SCS.Optimizer,
linear_solver=SCS.DirectSolver,
eps=3e-13,
max_iters=10_000,
alpha=1.5,
acceleration_lookback=20,
warm_start=true)
status, warm = PropertyT.solve(SDP_problem, with_SCS, warm);
λ = value(SDP_problem[:λ])
@show(status, λ);
###Output
----------------------------------------------------------------------------
SCS v2.1.2 - Splitting Conic Solver
(c) Brendan O'Donoghue, Stanford University, 2012
----------------------------------------------------------------------------
Lin-sys: sparse-direct, nnz in A = 133004
eps = 3.00e-13, alpha = 1.50, max_iters = 10000, normalize = 1, scale = 1.00
acceleration_lookback = 20, rho_x = 1.00e-03
Variables n = 1388, constraints m = 1946
Cones: primal zero / dual free vars: 1196
linear vars: 1
sd vars: 749, sd blks: 14
Setup time: 6.60e-02s
SCS using variable warm-starting
----------------------------------------------------------------------------
Iter | pri res | dua res | rel gap | pri obj | dua obj | kap/tau | time (s)
----------------------------------------------------------------------------
0| 1.65e+20 1.24e+20 1.00e+00 -3.16e+21 2.48e+19 3.17e+20 2.27e-03
100| 6.67e-05 5.27e-05 2.89e-04 6.71e-05 3.56e-04 4.04e-16 1.39e-01
200| 1.00e-04 7.86e-05 2.84e-04 -2.30e-04 -5.13e-04 1.83e-16 2.75e-01
300| 3.39e-05 2.33e-05 6.98e-06 4.20e-05 3.50e-05 1.80e-16 4.11e-01
400| 1.62e-05 1.15e-05 4.18e-05 -3.66e-05 -7.84e-05 1.44e-15 5.47e-01
500| 8.64e-06 5.80e-06 4.15e-06 8.60e-06 4.45e-06 1.13e-15 6.83e-01
600| 7.87e-06 5.55e-06 7.06e-06 9.34e-06 2.28e-06 5.86e-16 8.20e-01
700| 2.52e-06 1.87e-06 2.71e-06 9.04e-07 -1.80e-06 6.73e-16 9.54e-01
800| 1.64e-06 1.11e-06 2.68e-06 -8.28e-07 1.85e-06 2.27e-16 1.09e+00
900| 1.62e-06 1.07e-06 1.36e-06 4.07e-06 5.43e-06 1.99e-16 1.22e+00
1000| 2.25e-07 1.19e-07 7.07e-09 -6.14e-08 -5.43e-08 3.53e-17 1.34e+00
1100| 2.42e-07 1.27e-07 3.08e-07 4.23e-08 3.50e-07 1.59e-16 1.47e+00
1200| 2.59e-07 1.62e-07 9.41e-08 1.62e-10 -9.40e-08 3.13e-16 1.59e+00
1300| 2.26e-07 1.18e-07 5.09e-09 -2.48e-08 -2.99e-08 2.03e-16 1.71e+00
1400| 3.78e-07 2.85e-07 8.83e-07 2.76e-07 1.16e-06 2.90e-16 1.83e+00
1500| 2.25e-07 1.18e-07 1.69e-08 -5.93e-09 1.10e-08 3.96e-16 1.95e+00
1600| 2.23e-07 1.15e-07 3.42e-08 3.24e-09 -3.10e-08 1.15e-15 2.07e+00
1699| 2.39e-13 1.22e-13 1.49e-13 7.53e-14 -7.40e-14 1.43e-15 2.19e+00
----------------------------------------------------------------------------
Status: Solved
Timing: Solve time: 2.19e+00s
Lin-sys: nnz in L factor: 293352, avg solve time: 6.19e-04s
Cones: avg projection time: 3.03e-04s
Acceleration: avg step time: 2.96e-04s
----------------------------------------------------------------------------
Error metrics:
dist(s, K) = 2.1231e-16, dist(y, K*) = 1.8235e-09, s'y/|s||y| = 1.0298e-13
primal res: |Ax + s - b|_2 / (1 + |b|_2) = 2.3903e-13
dual res: |A'y + c|_2 / (1 + |c|_2) = 1.2178e-13
rel gap: |c'x + b'y| / (1 + |c'x| + |b'y|) = 1.4934e-13
----------------------------------------------------------------------------
c'x = 0.0000, -b'y = -0.0000
============================================================================
status = MathOptInterface.OPTIMAL
λ = 0.8200499999999246
###Markdown
Checking the solutionNow we reconstruct the solution to the original problem over $\mathbb{R} \operatorname{SL}(N,\mathbb{Z})$, which essentially boils down to averaging the obtained solution over the orbits of wreath product action: $$Q=\frac{1}{|\Sigma|}\sum_{\sigma\in\Sigma}\sum_{\pi\in \widehat{\Sigma}} \dim{\pi}\cdot\sigma\left(U_{\pi}^T \sqrt{P_{\pi}} U_{\pi}\right).$$
###Code
Ps = [value.(P) for P in varP]
Qs = real.(sqrt.(Ps));
Q = PropertyT.reconstruct(Qs, block_decomposition)
###Output
_____no_output_____
###Markdown
As explained in the paper the columns of the square-root of the solution matrix provide the coefficients for $\xi_i$'s in basis `E_R` of the group ring. Below we compute the residual $$ b = \left(x - \lambda\Delta\right) - \sum \xi_i^*\xi_i.$$As we do it in floating-point arithmetic, the result can't be taken seriously.
###Code
function SOS_residual(x::GroupRingElem, Q::Matrix)
RG = parent(x)
@time sos = PropertyT.compute_SOS(RG, Q);
return x - sos
end
residual = SOS_residual(elt - λ*Δ, Q)
@show norm(residual, 1);
###Output
0.034382 seconds (43.79 k allocations: 5.102 MiB)
norm(residual, 1) = 2.1927155930872447e-9
###Markdown
Checking in interval arithmetic
###Code
using PropertyT.IntervalArithmetic
IntervalArithmetic.setrounding(Interval, :tight)
IntervalArithmetic.setformat(sigfigs=12);
###Output
_____no_output_____
###Markdown
Here we resort to interval arithmetic to provide certified upper and lower bounds on the norm of the residual.* We first change entries of `Q` to narrow intervals* We project columns of `Q` so that $0$ is in the sum of coefficients of each column (i.e. $\xi_i \in I \operatorname{SL}(N,\mathbb{Z})$)* We compute the sum of squares and the $\ell_1$-norm of the residual in the interval arithmetic.The returned `check_columns_augmentation` is a boolean flag to detect if the projection was successful, i.e. if we can guarantee that each column of `Q_aug` can be represented by an element from the augmentation ideal. (If it were not successful, one may project `Q = PropertyT.augIdproj(Q)` in the floating point arithmetic prior to the cell below).The resulting norm of the residual is **guaranteed** to be contained in the resulting interval. E.g. if each entry of `Q` were changed into an honest rational number and all the computations were carried out in rational arithmetic, the rational $\ell_1$-norm will be contained in the interval $\ell_1$-norm.
###Code
Q_aug, check_columns_augmentation = PropertyT.augIdproj(Interval, Q);
@assert check_columns_augmentation
elt_int = elt - @interval(λ)*Δ;
residual_int = SOS_residual(elt_int, Q_aug)
@show norm(residual_int, 1);
certified_λ = @interval(λ) - 2*norm(residual_int,1)
###Output
_____no_output_____
###Markdown
So $\operatorname{elt} - \lambda_0 \Delta \in \Sigma^2 I\operatorname{SL}(N, \mathbb{Z})$, where as $\lambda_0$ we could take the left end of the above interval:
###Code
certified_λ.lo
using Dates
now()
###Output
_____no_output_____ |
model-profiling-2/model_debugging-udacity.ipynb | ###Markdown
SageMaker Model DebuggingHere we will see how we can use Sagemaker Debugging to see our model training performance as well as generate a simple report called the Profiler Report that gives us an overview of our training job.First we will need to install `smdebug`. `pytorch_mnist.py` Click here to see the full script code ``` pythonimport argparseimport torchimport torch.nn as nnimport torch.nn.functional as Fimport torch.optim as optimfrom torchvision import datasets, transforms ==================================== 1. Import SMDebug framework class. ====================================import smdebug.pytorch as smdclass Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(1, 32, 3, 1) self.conv2 = nn.Conv2d(32, 64, 3, 1) self.dropout1 = nn.Dropout(0.25) self.dropout2 = nn.Dropout(0.5) self.fc1 = nn.Linear(9216, 128) self.fc2 = nn.Linear(128, 10) def forward(self, x): x = self.conv1(x) x = F.relu(x) x = self.conv2(x) x = F.relu(x) x = F.max_pool2d(x, 2) x = self.dropout1(x) x = torch.flatten(x, 1) x = self.fc1(x) x = F.relu(x) x = self.dropout2(x) x = self.fc2(x) output = F.log_softmax(x, dim=1) return outputdef train(model, train_loader, optimizer, epoch, hook): model.train() ================================================= 2. Set the SMDebug hook for the training phase. ================================================= hook.set_mode(smd.modes.TRAIN) for batch_idx, (data, target) in enumerate(train_loader): optimizer.zero_grad() output = model(data) loss = F.nll_loss(output, target) loss.backward() optimizer.step() if batch_idx % 100 == 0: print( "Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}".format( epoch, batch_idx * len(data), len(train_loader.dataset), 100.0 * batch_idx / len(train_loader), loss.item(), ) )def test(model, test_loader, hook): model.eval() =================================================== 3. Set the SMDebug hook for the validation phase. =================================================== hook.set_mode(smd.modes.EVAL) test_loss = 0 correct = 0 with torch.no_grad(): for data, target in test_loader: output = model(data) test_loss += F.nll_loss(output, target, reduction="sum").item() sum up batch loss pred = output.argmax(dim=1, keepdim=True) get the index of the max log-probability correct += pred.eq(target.view_as(pred)).sum().item() test_loss /= len(test_loader.dataset) print( "\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n".format( test_loss, correct, len(test_loader.dataset), 100.0 * correct / len(test_loader.dataset) ) )def main(): Training settings parser = argparse.ArgumentParser() parser.add_argument( "--batch-size", type=int, default=64, metavar="N", help="input batch size for training (default: 64)", ) parser.add_argument( "--test-batch-size", type=int, default=1000, metavar="N", help="input batch size for testing (default: 1000)", ) parser.add_argument( "--epochs", type=int, default=14, metavar="N", help="number of epochs to train (default: 14)", ) parser.add_argument( "--lr", type=float, default=1.0, metavar="LR", help="learning rate (default: 1.0)" ) args = parser.parse_args() train_kwargs = {"batch_size": args.batch_size} test_kwargs = {"batch_size": args.test_batch_size} transform = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))] ) dataset1 = datasets.MNIST("../data", train=True, download=True, transform=transform) dataset2 = datasets.MNIST("../data", train=False, transform=transform) train_loader = torch.utils.data.DataLoader(dataset1, **train_kwargs) test_loader = torch.utils.data.DataLoader(dataset2, **test_kwargs) model = Net() ====================================================== 4. Register the SMDebug hook to save output tensors. ====================================================== hook = smd.Hook.create_from_json_file() hook.register_hook(model) optimizer = optim.Adadelta(model.parameters(), lr=args.lr) for epoch in range(1, args.epochs + 1): =========================================================== 5. Pass the SMDebug hook to the train and test functions. =========================================================== train(model, train_loader, optimizer, epoch, hook) test(model, test_loader, hook) torch.save(model.state_dict(), "mnist_cnn.pt")if __name__ == "__main__": main()```
###Code
!pip install smdebug
###Output
/opt/conda/lib/python3.7/site-packages/secretstorage/dhcrypto.py:16: CryptographyDeprecationWarning: int_from_bytes is deprecated, use int.from_bytes instead
from cryptography.utils import int_from_bytes
/opt/conda/lib/python3.7/site-packages/secretstorage/util.py:25: CryptographyDeprecationWarning: int_from_bytes is deprecated, use int.from_bytes instead
from cryptography.utils import int_from_bytes
Requirement already satisfied: smdebug in /opt/conda/lib/python3.7/site-packages (1.0.11)
Requirement already satisfied: numpy>=1.16.0 in /opt/conda/lib/python3.7/site-packages (from smdebug) (1.20.3)
Requirement already satisfied: packaging in /opt/conda/lib/python3.7/site-packages (from smdebug) (20.1)
Requirement already satisfied: boto3>=1.10.32 in /opt/conda/lib/python3.7/site-packages (from smdebug) (1.17.74)
Requirement already satisfied: protobuf>=3.6.0 in /opt/conda/lib/python3.7/site-packages (from smdebug) (3.17.0)
Requirement already satisfied: pyinstrument>=3.1.3 in /opt/conda/lib/python3.7/site-packages (from smdebug) (3.4.2)
Requirement already satisfied: botocore<1.21.0,>=1.20.74 in /opt/conda/lib/python3.7/site-packages (from boto3>=1.10.32->smdebug) (1.20.74)
Requirement already satisfied: s3transfer<0.5.0,>=0.4.0 in /opt/conda/lib/python3.7/site-packages (from boto3>=1.10.32->smdebug) (0.4.2)
Requirement already satisfied: jmespath<1.0.0,>=0.7.1 in /opt/conda/lib/python3.7/site-packages (from boto3>=1.10.32->smdebug) (0.10.0)
Requirement already satisfied: python-dateutil<3.0.0,>=2.1 in /opt/conda/lib/python3.7/site-packages (from botocore<1.21.0,>=1.20.74->boto3>=1.10.32->smdebug) (2.8.1)
Requirement already satisfied: urllib3<1.27,>=1.25.4 in /opt/conda/lib/python3.7/site-packages (from botocore<1.21.0,>=1.20.74->boto3>=1.10.32->smdebug) (1.26.4)
Requirement already satisfied: six>=1.9 in /opt/conda/lib/python3.7/site-packages (from protobuf>=3.6.0->smdebug) (1.14.0)
Requirement already satisfied: pyinstrument-cext>=0.2.2 in /opt/conda/lib/python3.7/site-packages (from pyinstrument>=3.1.3->smdebug) (0.2.4)
Requirement already satisfied: pyparsing>=2.0.2 in /opt/conda/lib/python3.7/site-packages (from packaging->smdebug) (2.4.6)
[33mWARNING: Running pip as root will break packages and permissions. You should install packages reliably by using venv: https://pip.pypa.io/warnings/venv[0m
[33mWARNING: You are using pip version 21.1.1; however, version 21.1.3 is available.
You should consider upgrading via the '/opt/conda/bin/python -m pip install --upgrade pip' command.[0m
###Markdown
Debugger Rule and ConfigsNext we need to import the packages we will need and specify the debugger rules and configs. We will check for overfitting, overtraining, poor weight initialization and vanishing gradients. We will also set a save interval of 100 and 10 for training and testing respectively.
###Code
from sagemaker.pytorch import PyTorch
from sagemaker import get_execution_role
from sagemaker.debugger import (
Rule,
DebuggerHookConfig,
rule_configs,
)
rules = [
Rule.sagemaker(rule_configs.vanishing_gradient()),
Rule.sagemaker(rule_configs.overfit()),
Rule.sagemaker(rule_configs.overtraining()),
Rule.sagemaker(rule_configs.poor_weight_initialization()),
]
hook_config = DebuggerHookConfig(
hook_parameters={"train.save_interval": "100", "eval.save_interval": "10"}
)
###Output
_____no_output_____
###Markdown
Next we will specify the hyperparameters and create our estimator. In our estimator, we will additionally need to specify the debugger rules and configs that we created before.
###Code
hyperparameters = {"epochs": "2", "batch-size": "32", "test-batch-size": "100", "lr": "0.001"}
estimator = PyTorch(
entry_point="scripts/pytorch_mnist.py",
base_job_name="smdebugger-mnist-pytorch",
role=get_execution_role(),
instance_count=1,
instance_type="ml.m5.large",
hyperparameters=hyperparameters,
framework_version="1.8",
py_version="py36",
## Debugger parameters
rules=rules,
debugger_hook_config=hook_config,
)
estimator.fit(wait=True)
job_name = estimator.latest_training_job.name
client = estimator.sagemaker_session.sagemaker_client
description = client.describe_training_job(TrainingJobName=estimator.latest_training_job.name)
###Output
_____no_output_____
###Markdown
Checking Training PerformanceBelow is some boilerplate code to get the training job object using the training job name and display the training metrics that we were tracking as well as some of the training tensors. The plots may not show up in the classroom, but it will show up when you train the model in SageMaker Studio.
###Code
from smdebug.trials import create_trial
from smdebug.core.modes import ModeKeys
trial = create_trial(estimator.latest_job_debugger_artifacts_path())
###Output
[2021-07-20 13:33:33.643 datascience-1-0-ml-t3-medium-1abf3407f667f989be9d86559395:385 INFO utils.py:27] RULE_JOB_STOP_SIGNAL_FILENAME: None
[2021-07-20 13:33:33.659 datascience-1-0-ml-t3-medium-1abf3407f667f989be9d86559395:385 INFO s3_trial.py:42] Loading trial debug-output at path s3://sagemaker-us-east-1-648346130239/smdebugger-mnist-pytorch-2021-07-20-13-25-42-554/debug-output
###Markdown
Fetch tensor names and print their lengths
###Code
trial.tensor_names()
len(trial.tensor("nll_loss_output_0").steps(mode=ModeKeys.TRAIN))
len(trial.tensor("nll_loss_output_0").steps(mode=ModeKeys.EVAL))
###Output
_____no_output_____
###Markdown
Set up functions to plot the output tensors
###Code
def get_data(trial, tname, mode):
tensor = trial.tensor(tname)
steps = tensor.steps(mode=mode)
vals = []
for s in steps:
vals.append(tensor.value(s, mode=mode))
return steps, vals
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import host_subplot
def plot_tensor(trial, tensor_name):
steps_train, vals_train = get_data(trial, tensor_name, mode=ModeKeys.TRAIN)
print("loaded TRAIN data")
steps_eval, vals_eval = get_data(trial, tensor_name, mode=ModeKeys.EVAL)
print("loaded EVAL data")
fig = plt.figure(figsize=(10, 7))
host = host_subplot(111)
par = host.twiny()
host.set_xlabel("Steps (TRAIN)")
par.set_xlabel("Steps (EVAL)")
host.set_ylabel(tensor_name)
(p1,) = host.plot(steps_train, vals_train, label=tensor_name)
print("completed TRAIN plot")
(p2,) = par.plot(steps_eval, vals_eval, label="val_" + tensor_name)
print("completed EVAL plot")
leg = plt.legend()
host.xaxis.get_label().set_color(p1.get_color())
leg.texts[0].set_color(p1.get_color())
par.xaxis.get_label().set_color(p2.get_color())
leg.texts[1].set_color(p2.get_color())
plt.ylabel(tensor_name)
plt.show()
plot_tensor(trial, "nll_loss_output_0")
###Output
loaded TRAIN data
loaded EVAL data
completed TRAIN plot
completed EVAL plot
###Markdown
Display the Profiler ReportThe profiler report will be saved in an S3 bucket. Below we can see how to get the path of the report, fetch it and display it. The profiler report may not display in the notebook, but you can take a look at it from the ProfilerReport folder.
###Code
rule_output_path = estimator.output_path + estimator.latest_training_job.job_name + "/rule-output"
! aws s3 ls {rule_output_path} --recursive
! aws s3 cp {rule_output_path} ./ --recursive
import os
# get the autogenerated folder name of profiler report
profiler_report_name = [
rule["RuleConfigurationName"]
for rule in estimator.latest_training_job.rule_job_summary()
if "Profiler" in rule["RuleConfigurationName"]
][0]
import IPython
IPython.display.HTML(filename=profiler_report_name + "/profiler-output/profiler-report.html")
###Output
_____no_output_____ |
previous_codes/main_segnet_v4.ipynb | ###Markdown
The whold training and testing is on Google Colab.
###Code
import os
from google.colab import drive
drive.mount('/content/drive')
path = "/content/drive/My Drive/segnet/"
os.chdir(path)
os.listdir(path)
import os
print (os.getcwd()) # get current working directory
###Output
/content/drive/My Drive/segnet
###Markdown
0. parameters
###Code
import torch.utils.data as data
import torch
import numpy as np
import h5py
import matplotlib.pyplot as plt
%matplotlib inline
import warnings
warnings.filterwarnings('ignore')
from tqdm import tqdm
import torch.nn as nn
from torch.nn import init
import torch.nn.functional as F
import random
import cv2 as cv
from PIL import Image
import time
device = 'cuda' if torch.cuda.is_available() else 'cpu'
n_epochs = 3 # training epochs
class_num = 34
batch_size = 4
learning_rate = 2e-4
weight_decay = 5e-4
log_interval = 10
random_seed = 42
val_percent = 0.1 # training set : validation set = 9:1
torch.manual_seed(random_seed)
bn_momentum = 0.1 # momentum for batch normalization
cate_weight = [1/34]*34 # weight for each class
dir_pre_train_weights = "vgg16_bn-6c64b313.pth" # pre_train weights downloaded from https://download.pytorch.org/models/vgg16_bn-6c64b313.pth
dir_weights = "./weights"
dir_checkpoint = './checkpoints'
###Output
_____no_output_____
###Markdown
1.Implement a data loader class to handle the downloaded data. (5 points)For more information on the dataset please refer to: CityScapes dataset.
###Code
color_codes = h5py.File("lab2_test_data.h5", 'r')['color_codes']
# 'rgb' stores the raw images, while 'seg' stores segmentation maps
class DataFromH5File(data.Dataset):
def __init__(self, filepath):
h5File = h5py.File(filepath, 'r')
# self.color_codes = h5File['color_codes']
self.rgb = h5File['rgb']
self.seg = h5File['seg']
def __getitem__(self, idx):
label = torch.from_numpy(self.seg[idx]).float()
data = torch.from_numpy(self.rgb[idx]).float()
data = data/255.0 # normalization
data = data.permute(2,0,1) # change the image channels into (channel, width, height)
return data, label
def __len__(self):
assert self.rgb.shape[0] == self.seg.shape[0], "Wrong data length" # robustness
return self.rgb.shape[0]
# load training data from lab2_train_data.h5
dataset = DataFromH5File("lab2_train_data.h5")
n_val = int(len(dataset) * val_percent)
n_train = len(dataset) - n_val
# split train&val
train, val = data.random_split(dataset, [n_train, n_val])
train_loader = data.DataLoader(dataset=train, batch_size=batch_size, shuffle=True, pin_memory=True, drop_last = True)
val_loader = data.DataLoader(dataset=val, batch_size=batch_size, shuffle=False, pin_memory=True, drop_last = True) # drop_last=True
# load testing data from lab2_test_data.h5
testset = DataFromH5File("lab2_test_data.h5")
test_loader = data.DataLoader(dataset=testset, batch_size=batch_size, shuffle=False, pin_memory=True)
# test the data loader
for step, (x, y) in enumerate(train_loader):
print(x.min(),x.max())
print(y.min(),y.max())
print(step)
break
print(len(train_loader), len(val_loader), len(test_loader)) # 669 74 125 when batch_size==4
###Output
tensor(0.) tensor(1.)
tensor(1.) tensor(33.)
0
669 74 125
###Markdown
2. Define the model. Provide a schematic of your architecture depicting its overall structure and the relevant parameters. (20 points) 2.1 Define the model.SegNet proposed by Badrinarayanan et al. Paper link:https://arxiv.org/pdf/1511.00561.pdf
###Code
# encoder
class Encoder(nn.Module):
def __init__(self, input_channels):
super(Encoder, self).__init__()
self.enco1 = nn.Sequential(
nn.Conv2d(input_channels, 64, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(64, momentum=bn_momentum),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(64, momentum=bn_momentum),
nn.ReLU()
)
self.enco2 = nn.Sequential(
nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(128, momentum=bn_momentum),
nn.ReLU(),
nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(128, momentum=bn_momentum),
nn.ReLU()
)
self.enco3 = nn.Sequential(
nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(256, momentum=bn_momentum),
nn.ReLU(),
nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(256, momentum=bn_momentum),
nn.ReLU(),
nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(256, momentum=bn_momentum),
nn.ReLU()
)
self.enco4 = nn.Sequential(
nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(512, momentum=bn_momentum),
nn.ReLU(),
nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(512, momentum=bn_momentum),
nn.ReLU(),
nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(512, momentum=bn_momentum),
nn.ReLU()
)
self.enco5 = nn.Sequential(
nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(512, momentum=bn_momentum),
nn.ReLU(),
nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(512, momentum=bn_momentum),
nn.ReLU(),
nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(512, momentum=bn_momentum),
nn.ReLU()
)
def forward(self, x):
id = []
x = self.enco1(x)
x, id1 = F.max_pool2d(x, kernel_size=2, stride=2, return_indices=True) # 保留最大值的位置
id.append(id1)
x = self.enco2(x)
x, id2 = F.max_pool2d(x, kernel_size=2, stride=2, return_indices=True)
id.append(id2)
x = self.enco3(x)
x, id3 = F.max_pool2d(x, kernel_size=2, stride=2, return_indices=True)
id.append(id3)
x = self.enco4(x)
x, id4 = F.max_pool2d(x, kernel_size=2, stride=2, return_indices=True)
id.append(id4)
x = self.enco5(x)
x, id5 = F.max_pool2d(x, kernel_size=2, stride=2, return_indices=True)
id.append(id5)
return x, id
# encoder + decoder
class SegNet(nn.Module):
def __init__(self, input_channels, output_channels):
super(SegNet, self).__init__()
self.weights_new = self.state_dict()
self.encoder = Encoder(input_channels)
self.deco1 = nn.Sequential(
nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(512, momentum=bn_momentum),
nn.ReLU(),
nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(512, momentum=bn_momentum),
nn.ReLU(),
nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(512, momentum=bn_momentum),
nn.ReLU()
)
self.deco2 = nn.Sequential(
nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(512, momentum=bn_momentum),
nn.ReLU(),
nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(512, momentum=bn_momentum),
nn.ReLU(),
nn.Conv2d(512, 256, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(256, momentum=bn_momentum),
nn.ReLU()
)
self.deco3 = nn.Sequential(
nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(256, momentum=bn_momentum),
nn.ReLU(),
nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(256, momentum=bn_momentum),
nn.ReLU(),
nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(128, momentum=bn_momentum),
nn.ReLU()
)
self.deco4 = nn.Sequential(
nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(128, momentum=bn_momentum),
nn.ReLU(),
nn.Conv2d(128, 64, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(64, momentum=bn_momentum),
nn.ReLU()
)
self.deco5 = nn.Sequential(
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(64, momentum=bn_momentum),
nn.ReLU(),
nn.Conv2d(64, output_channels, kernel_size=3, stride=1, padding=1),
)
def forward(self, x):
x, id = self.encoder(x)
x = F.max_unpool2d(x, id[4], kernel_size=2, stride=2)
x = self.deco1(x)
x = F.max_unpool2d(x, id[3], kernel_size=2, stride=2)
x = self.deco2(x)
x = F.max_unpool2d(x, id[2], kernel_size=2, stride=2)
x = self.deco3(x)
x = F.max_unpool2d(x, id[1], kernel_size=2, stride=2)
x = self.deco4(x)
x = F.max_unpool2d(x, id[0], kernel_size=2, stride=2)
x = self.deco5(x)
return x
# delete weights of three fc layers
def load_weights(self, weights_path):
weights = torch.load(weights_path)
del weights["classifier.0.weight"]
del weights["classifier.0.bias"]
del weights["classifier.3.weight"]
del weights["classifier.3.bias"]
del weights["classifier.6.weight"]
del weights["classifier.6.bias"]
names = []
for key, value in self.encoder.state_dict().items():
if "num_batches_tracked" in key:
continue
names.append(key)
for name, dict in zip(names, weights.items()):
self.weights_new[name] = dict[1]
self.encoder.load_state_dict(self.weights_new)
###Output
_____no_output_____
###Markdown
2.2 Provide a schematic of your architecture depicting its overall structure and the relevant parameters. $$\rm Figure 1.model architecture$$ $$\rm Note: Badrinarayanan, Vijay et al. “SegNet: A Deep Convolutional Encoder-Decoder Architecture for Image Segmentation.” IEEE transactions on pattern analysis and machine intelligence vol. 39,12 (2017): 2481-2495. doi:10.1109/TPAMI.2016.2644615$$
###Code
from torchsummary import summary
# use this version to avoid bugs
# !pip install torch-summary==1.4.4
model = SegNet(input_channels=3, output_channels=class_num) # RGB images so the input_channels=3
model = model.to(device)
x = torch.ones([batch_size, 3, 128, 256]) # input shape
x = x.to(device)
y = model(x)
print(y.shape) # output shape
summary(model, input_size=(3, 128, 256))
###Output
torch.Size([4, 34, 128, 256])
=================================================================
Layer (type:depth-idx) Param #
=================================================================
├─Encoder: 1-1 --
| └─Sequential: 2-1 --
| | └─Conv2d: 3-1 1,792
| | └─BatchNorm2d: 3-2 128
| | └─ReLU: 3-3 --
| | └─Conv2d: 3-4 36,928
| | └─BatchNorm2d: 3-5 128
| | └─ReLU: 3-6 --
| └─Sequential: 2-2 --
| | └─Conv2d: 3-7 73,856
| | └─BatchNorm2d: 3-8 256
| | └─ReLU: 3-9 --
| | └─Conv2d: 3-10 147,584
| | └─BatchNorm2d: 3-11 256
| | └─ReLU: 3-12 --
| └─Sequential: 2-3 --
| | └─Conv2d: 3-13 295,168
| | └─BatchNorm2d: 3-14 512
| | └─ReLU: 3-15 --
| | └─Conv2d: 3-16 590,080
| | └─BatchNorm2d: 3-17 512
| | └─ReLU: 3-18 --
| | └─Conv2d: 3-19 590,080
| | └─BatchNorm2d: 3-20 512
| | └─ReLU: 3-21 --
| └─Sequential: 2-4 --
| | └─Conv2d: 3-22 1,180,160
| | └─BatchNorm2d: 3-23 1,024
| | └─ReLU: 3-24 --
| | └─Conv2d: 3-25 2,359,808
| | └─BatchNorm2d: 3-26 1,024
| | └─ReLU: 3-27 --
| | └─Conv2d: 3-28 2,359,808
| | └─BatchNorm2d: 3-29 1,024
| | └─ReLU: 3-30 --
| └─Sequential: 2-5 --
| | └─Conv2d: 3-31 2,359,808
| | └─BatchNorm2d: 3-32 1,024
| | └─ReLU: 3-33 --
| | └─Conv2d: 3-34 2,359,808
| | └─BatchNorm2d: 3-35 1,024
| | └─ReLU: 3-36 --
| | └─Conv2d: 3-37 2,359,808
| | └─BatchNorm2d: 3-38 1,024
| | └─ReLU: 3-39 --
├─Sequential: 1-2 --
| └─Conv2d: 2-6 2,359,808
| └─BatchNorm2d: 2-7 1,024
| └─ReLU: 2-8 --
| └─Conv2d: 2-9 2,359,808
| └─BatchNorm2d: 2-10 1,024
| └─ReLU: 2-11 --
| └─Conv2d: 2-12 2,359,808
| └─BatchNorm2d: 2-13 1,024
| └─ReLU: 2-14 --
├─Sequential: 1-3 --
| └─Conv2d: 2-15 2,359,808
| └─BatchNorm2d: 2-16 1,024
| └─ReLU: 2-17 --
| └─Conv2d: 2-18 2,359,808
| └─BatchNorm2d: 2-19 1,024
| └─ReLU: 2-20 --
| └─Conv2d: 2-21 1,179,904
| └─BatchNorm2d: 2-22 512
| └─ReLU: 2-23 --
├─Sequential: 1-4 --
| └─Conv2d: 2-24 590,080
| └─BatchNorm2d: 2-25 512
| └─ReLU: 2-26 --
| └─Conv2d: 2-27 590,080
| └─BatchNorm2d: 2-28 512
| └─ReLU: 2-29 --
| └─Conv2d: 2-30 295,040
| └─BatchNorm2d: 2-31 256
| └─ReLU: 2-32 --
├─Sequential: 1-5 --
| └─Conv2d: 2-33 147,584
| └─BatchNorm2d: 2-34 256
| └─ReLU: 2-35 --
| └─Conv2d: 2-36 73,792
| └─BatchNorm2d: 2-37 128
| └─ReLU: 2-38 --
├─Sequential: 1-6 --
| └─Conv2d: 2-39 36,928
| └─BatchNorm2d: 2-40 128
| └─ReLU: 2-41 --
| └─Conv2d: 2-42 19,618
=================================================================
Total params: 29,462,626
Trainable params: 29,462,626
Non-trainable params: 0
=================================================================
###Markdown
3. Define the loss function and optimizer. (10 points)
###Code
import torch.optim as optim
optimizer = optim.Adam(model.parameters(), lr=learning_rate, weight_decay=weight_decay)
# cross entropy loss
# To cope with the sample imbalance between different categories, we assign different weights to them.
criterion = nn.CrossEntropyLoss(weight=torch.from_numpy(np.array(cate_weight)).float()).cuda()
###Output
_____no_output_____
###Markdown
4. Train the network. (5 points)
###Code
train_losses = []
train_counter = []
test_losses = []
test_counter = [i*len(train_loader.dataset) for i in range(n_epochs + 1)]
model.load_weights(dir_pre_train_weights)
###Output
_____no_output_____
###Markdown
The segmentation challenge is evaluated using the mean Intersection over Union (mIoU) metric. Let $n$ denotes the number of classes, then $$mIoU = \frac{TP}{TP + FP + FN}=\frac{1}{n}\sum_{i = 1}^{n}\frac{p_{ii}}{\sum_{j=1}^{n}p_{ij}+\sum_{j=1}^{n}p_{ji}+p_{ii}}$$
###Code
def mIoU(pred, target):
mini = 1
intersection = pred * (pred == target)
# histogram
area_inter, _ = np.histogram(intersection, bins=2, range=(mini, class_num))
area_pred, _ = np.histogram(pred, bins=2, range=(mini, class_num))
area_target, _ = np.histogram(target, bins=2, range=(mini, class_num))
area_union = area_pred + area_target - area_inter
# Intersection area should be smaller than Union area
assert (area_inter <= area_union).all(), "Intersection area should be smaller than Union area"
rate = round(max(area_inter) / max(area_union), 4)
return rate
def validate(epoch):
val_pbar = tqdm(val_loader)
for batch_idx, (data, target) in enumerate(val_pbar):
output = model(data.to(device)).to('cpu') # np.histogram requires cpu type tensor
target = target.squeeze().long()
miou = mIoU(output.argmax(dim=1), target) # data.argmax(dim=1) represents the segmentation results
val_pbar.set_description(f"Validation | Epoch: {epoch} | mIoU: {miou.item():.4f}")
# color the img according to the color_codes
# elegant code!
def color(src):
ret = np.zeros((src.shape[0], src.shape[1], 3))
for i in range(class_num):
ret[src==i] = color_codes[i]
return ret
def train(epoch):
model.train()
pbar = tqdm(train_loader)
for batch_idx, (data, target) in enumerate(pbar):
data = data.to(device)
target = target.to(device)
optimizer.zero_grad()
output = model(data)
target = target.squeeze().long()
# print('output shape=',output.shape)
# print('target shape=',target.shape)
loss = criterion(output, target)
loss.backward()
optimizer.step()
pbar.set_description(f"Epoch: {epoch} | Loss: {loss.item():.4f}")
if batch_idx % log_interval == 0:
train_losses.append(loss.item())
train_counter.append((batch_idx*batch_size) + ((epoch-1)*len(train_loader.dataset)))
# save the parameters
torch.save(model.state_dict(), '/content/drive/My Drive/segnet/weights/'+str(epoch)+'_model.pth')
torch.save(optimizer.state_dict(), '/content/drive/My Drive/segnet/weights/'+str(epoch)+'_optimizer.pth')
for epoch in range(1, n_epochs + 1):
train(epoch)
# validation
if epoch % 4 == 0 or epoch == n_epochs-1 or epoch==1:
validate(epoch)
###Output
Epoch: 1 | Loss: 0.9715: 100%|██████████| 669/669 [09:12<00:00, 1.21it/s]
Validation | Epoch: 1 | mIoU: 0.0279: 100%|██████████| 74/74 [00:04<00:00, 18.13it/s]
Epoch: 2 | Loss: 0.8216: 13%|█▎ | 84/669 [01:09<08:05, 1.21it/s]
###Markdown
plots of the loss evolution
###Code
fig = plt.figure()
plt.plot(train_counter, train_losses)
plt.legend(['Train Loss'], loc='upper right')
plt.xlabel('number of training examples seen')
plt.ylabel('negative log likelihood loss')
plt.show()
###Output
_____no_output_____
###Markdown
5. Test the resulting network on examples from an independent test set. Implement and present: (40 points)a. Predictions for (μ, aleatoric, epistemic) . b. Visualizations for (μ, aleatoric, epistemic) on 5 different input examples. c. Comment briefly on how the model’s performance could be improved. d. Please save your code and results for submission.
###Code
# visualize the segmentation results of a random batch of test sample
def visualize():
rand_idx = random.randint(0,len(test_loader)-1)
for batch_idx, (data, target) in enumerate(test_loader):
if batch_idx == rand_idx:
data = data.to(device)
output = model(data).to('cpu').argmax(dim=1)
data = data.to('cpu').permute(0,2,3,1)
target = target.squeeze().to('cpu')
for i in range(batch_size):
f, ax = plt.subplots(1, 3, figsize=(10,5))
ax[0].set_title('Input') #设置子标题
ax[0].imshow(data[i])
ax[1].set_title('Output') #设置子标题
ax[1].imshow(color(output[i])/255.0)
ax[2].set_title('GT') #设置子标题
ax[2].imshow(color(target[i])/255.0)
plt.show()
break
visualize()
###Output
_____no_output_____ |
GraficarVectores.ipynb | ###Markdown
###Code
def graficarVectores(vecs, cols, alpha = 1):
plt.figure()
plt.axvline(x = 0, color = "grey", zorder = 0)
plt.axhline(y = 0, color = "grey", zorder = 0)
for i in range(len(vecs)):
x = np.concatenate([[0,0], vecs[i]])
plt.quiver([x[0]],
[x[1]],
[x[2]],
[x[3]],
angles="xy", scale_units="xy", scale=1,
color=cols[i],
alpha = alpha)
###Output
_____no_output_____ |
dft_unevenly_spaced_data.ipynb | ###Markdown
Linear Antenna Array Analysis
###Code
import numpy as np
from scipy import signal
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
%config InlineBackend.figure_format = 'svg'
sns.set_style('darkgrid')
frequency = 24e9
wavelength = 3e8 / frequency
k = 2 * np.pi / wavelength
theta = np.arange(-180, 180, 0.05)
###Output
_____no_output_____
###Markdown
Parameters```array_size```: size of the linear array```array_spacing```: space between antenna elements (meter)```steer_angle```: angle of the main beam (degree)
###Code
array_size = 64
array_spacing = 0.5 * wavelength
steer_angle = 50
array_geometry = np.arange(0, array_spacing * array_size, array_spacing)
weight = np.exp(-1j * k * array_geometry * np.sin(
steer_angle / 180 * np.pi))*signal.chebwin(array_size, at=60)
for idx, value in enumerate(array_geometry):
if idx%3 is 0:
array_geometry[idx]=0
weight[idx]=0
theta_grid, array_geometry_grid = np.meshgrid(theta, array_geometry)
A = np.exp(1j * k * array_geometry_grid * np.sin(theta_grid / 180 * np.pi))
AF = np.matmul(weight, A)
plt.scatter(array_geometry, np.zeros((1, array_size)))
plt.xlim([
np.min(array_geometry) - array_spacing,
np.max(array_geometry) + array_spacing
])
plt.ylim([-1, 1])
plt.xlabel('Array geometry (m)')
pattern_dB = 20 * np.log10(np.abs(AF))
pattern_dB = pattern_dB - np.max(pattern_dB)
plt.plot(theta, pattern_dB)
plt.xlabel(r'Angle ($^\circ$)')
plt.ylabel('Normalized amplitude (dB)')
plt.xlim([-90, 90])
plt.ylim([-70, 0])
# plt.savefig('pattern.svg')
30*6
1/5e6*1e6*256*30-30*6
1/5e6*1e6*256
20/0.1
###Output
_____no_output_____ |
A detailed notebook (implementation of various algorithms for news classification).ipynb | ###Markdown
Content* Importing Libraries* Constants* Preprocessing * Normalizing * Tokenizing * Stemming * Lemmatizing* Feature Engineering * Bag of Words * FastText Word2Vec* Model Selection Importing Libraries
###Code
from __future__ import unicode_literals
import json
import os
import re
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import scipy.optimize as opt
from functools import reduce
from hazm import *
from pprint import pprint
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn import feature_extraction as fe
from sklearn.model_selection import train_test_split
from sklearn import svm
from sklearn import linear_model as l
from sklearn import naive_bayes as nb
from sklearn.metrics import confusion_matrix
from copy import deepcopy
# Feature Engineering
from sklearn import feature_extraction
np.array(list(range(10)))[np.random.permutation(10)]
###Output
_____no_output_____
###Markdown
Constants
###Code
# Data root path
data_root = 'data'
# Dataset dataframe column names
keys = None
# News headline tags
valid_tags = None
# News agencies
news_agencies = None
###Output
_____no_output_____
###Markdown
Preprocessing Import Dataset
###Code
with open(os.path.join(data_root, 'out.jsonl'), encoding='utf-8') as json_data:
news = [json.loads(line) for line in json_data]
news = pd.DataFrame(news)
print('Number of Datapoints: {}'.format(len(news)))
keys = list(news.columns)
pd.DataFrame([keys])
###Output
_____no_output_____
###Markdown
Lets look at our data
###Code
news.head(1)
###Output
_____no_output_____
###Markdown
subtitle & rutitr
###Code
print(news.subtitle.sample(5))
print(news.rutitr.sample())
###Output
473
661 عکس زیر دکتر هاشمی قاضیزاده وزیر بهداشت را در...
926
372 طولانی شدن غیبت عبدالله دوم شاه اردن پس از دید...
297 ظاهرا آنها اعلام کردهاند ایران روزانه یک میلی...
Name: subtitle, dtype: object
939
Name: rutitr, dtype: object
###Markdown
As we can see, there might not exist any 'subtitle' or 'rutitr', so we drop them if they do not have valuable features.
###Code
print("Not null 'subtitle' ",len([i for i in news.subtitle if len(i) != 0]))
print("Not null 'rutitr' ",len([i for i in news.rutitr if len(i) != 0]))
###Output
Not null 'subtitle' 569
Not null 'rutitr' 50
###Markdown
So based on information we got here, we know that these columns can help us, so we consider them. Drop Useless Columns But it is clear for us, `date`,`newsCode`, `newsLink`,`bodyHtml` and `_id` are useless features. So we remove them from our dataset.
###Code
news = news.drop(['_id','date','newsCode','newsLink','bodyHtml'], axis=1)
news.head(5)
###Output
_____no_output_____
###Markdown
newsPath & newsPathLinks
###Code
newspathlinks_tags = list(set([list(x.keys())[1] for x in news.newsPathLinks]))
newspath_tags = list(set(x.split(' » ')[1] for x in news.newsPath))
print("news path links and news path show the same thing? => ", newspath_tags == newspathlinks_tags)
valid_tags = newspath_tags
pd.DataFrame([valid_tags])
###Output
news path links and news path show the same thing? => True
###Markdown
Note: we can remove `newsPathLinks` because it is exactly duplicate of `newsPath` and update `newsPath` column in dataframe with just one keyword
###Code
news = news.drop(['newsPathLinks'], axis=1)
news.head(2)
news.loc[:,'newsPath'] = list(x.split(' » ')[1] for x in news.newsPath)
news.head(3)
###Output
_____no_output_____
###Markdown
NewsAgency
###Code
news_agencies = list(news.NewsAgency.unique())
pd.DataFrame([news_agencies])
###Output
_____no_output_____
###Markdown
tags
###Code
def tag_extractor(tags_dict):
"""
gets a tags dictionary and finds unique tags in collection of values and keys
::params tags_dict :
"""
keys = list(set(tags_dict.keys()))
values = { v.split('/')[-1] for v in set(tags_dict.values())}
[values.add(i) for i in keys]
return list(values)
print('not processed tags_dict',news.tags[0])
print('processesd tags_dict',tag_extractor(news.tags[0]))
###Output
not processed tags_dict {'استقلال': '/fa/tag/1/استقلال', 'افتخاری': '/fa/tag/1/افتخاری'}
processesd tags_dict ['استقلال', 'افتخاری']
###Markdown
Now we replace `tags` column with extract values from `tag_extractor` function.
###Code
news.loc[:, 'tags'] = [tag_extractor(tag) for tag in news.tags]
news.head(5)
###Output
_____no_output_____
###Markdown
Note: If you look at row 0 and row 10, you can see there is many noise in this dataset. We have two different tags for same news. Normalizing
###Code
normalizer = Normalizer()
news['body'] = news['body'].apply(normalizer.normalize)
news['rutitr'] = news['rutitr'].apply(normalizer.normalize)
news['subtitle'] = news['subtitle'].apply(normalizer.normalize)
news['title'] = news['title'].apply(normalizer.normalize)
###Output
_____no_output_____
###Markdown
Tokenizing
###Code
def tokenize(phrase):
sentences = sent_tokenize(phrase)
if len(sentences) > 1:
words = reduce(np.append, [word_tokenize(sentence) for sentence in sentences])
elif len(sentences) == 1:
words = word_tokenize(sentences[0])
else:
words = None
return words
news['body'] = news['body'].apply(tokenize)
news['rutitr'] = news['rutitr'].apply(tokenize)
news['subtitle'] = news['subtitle'].apply(tokenize)
news['title'] = news['title'].apply(tokenize)
###Output
_____no_output_____
###Markdown
Stemming
###Code
stemmer = Stemmer()
stem = lambda s: [stemmer.stem(w) for w in s] if s is not None else None
news['body'] = news['body'].apply(stem)
news['rutitr'] = news['rutitr'].apply(stem)
news['subtitle'] = news['subtitle'].apply(stem)
news['title'] = news['title'].apply(stem)
###Output
_____no_output_____
###Markdown
Lemmatizing
###Code
lemmatizer = Lemmatizer()
lemmatize = lambda s: [lemmatizer.lemmatize(w) for w in s] if s is not None else None
news['body'] = news['body'].apply(lemmatize)
news['rutitr'] = news['rutitr'].apply(lemmatize)
news['subtitle'] = news['subtitle'].apply(lemmatize)
news['title'] = news['title'].apply(lemmatize)
news.head(5)
###Output
_____no_output_____
###Markdown
Filter Words Remove StopwordsFor this step, we use stopwords from this repository.There are some files of stopwords and we are using this.```bashgit clone https://github.com/kharazi/persian-stopwords.git```
###Code
stopwords_root = 'persian-stopwords'
with open(os.path.join(stopwords_root, 'persian'), encoding='utf-8') as stopwords_file:
stopwords = [re.sub(r'\n','',word) for word in stopwords_file]
pd.DataFrame([stopwords[150:170]])
###Output
_____no_output_____
###Markdown
Now we remove all stopwords from our dataset.
###Code
def filter_words(words_list, stopwords=stopwords):
"""
Gets a list of words and remove stopwords from that list.
:param words_list: a list of words to apply stopwords
:param stopwords: a list of stopwords to remove from words_list
"""
if words_list is None:
return None
filtered_words = [word for word in words_list if word not in stopwords]
return filtered_words
s = news.title[0]
print(s)
s_filtered = filter_words(s, stopwords)
print(s_filtered)
news['body'] = news['body'].apply(filter_words)
news['rutitr'] = news['rutitr'].apply(filter_words)
news['subtitle'] = news['subtitle'].apply(filter_words)
news['title'] = news['title'].apply(filter_words)
print(news.title[0])
news.head(3)
for idx,n in enumerate(news.newsPath):
if n=='دانلود':
print(news.body[idx])
###Output
['IObit', 'Malware', 'Fighter', 'برنامه', 'عال', 'بردن', 'افزار', 'نر', 'افزار', 'جاسوس', 'ابزار', 'تبلیغات', 'مزاح', 'تروجان', 'لاگر', 'ربات', 'کرم', 'بود#باش', 'موتور', 'هسته', 'تعبیه', 'نر', 'افزار', 'IObit', 'Malware', 'Fighter', 'توانست#توان', 'برنامه', 'مخرب', 'اس', 'سیس', 'آسیب', 'رساند#رسان', 'ببرید', 'کوچک', 'مشکل', 'سیس', 'ایجاد', 'سیس', 'شد#شو', 'نر', 'افزار', 'IObit', 'Malware', 'Fighter', 'بردن', 'افزار', 'داشت#دار', 'اسکن', 'هوشمند', 'اسکن', 'اسکن', 'سفارش', ' و', 'مطمئن', 'بود#باش', 'سه', 'حرفه', 'عمق', 'بدافزار', 'سیس', 'نابود', 'شد#شو', 'قابلیت', 'نر', 'افزار', 'IObit', 'Malware', 'Fighter', 'امک', 'رس', 'خودکار', 'نر', 'افزار', 'اینترن', 'امک', 'اسکن', 'سیس', 'سه', 'متد', 'اسکن', 'سیس', 'صور', 'مداو', 'اتوماتیک', 'جلوگیر', 'نفوذ', 'برنامه', 'مخرب', 'سیس', 'ایجاد', 'Real-time', 'Protection', 'قو', 'قدرتمند', 'ابزار', 'Startup', 'Guard', 'Browser', 'Guard', 'Network', 'Guard', 'File', 'Guard', 'Cookie', 'Guard', 'Process', 'Guard', 'USD', 'Disk', 'Guard', 'Malicious', 'Action', 'Guard', 'توانا', 'عمیق', 'فایل', 'مخرب', 'فناور', 'DOG', 'تامین', 'مراقبت', 'عل', 'Anti-malware', 'Anti-spyware', 'Anti-adware', 'Anti-trojan', 'Anti-bots', 'سبک', 'نر', 'افزار', 'سبب', 'موتور', 'جس', 'جو', 'بهینه', 'هسته', 'دارا', 'آخرین', 'فن', 'محاسب', 'ابر', 'تجزیه', 'تحلیل', 'رفتار', 'انواع', 'فایل', 'سازگار', 'انواع', 'ویروس', 'بسته', 'امنیت', 'دانلود', 'نر', 'افزار IObit', 'Malware', 'Fighter']
['Magic', 'Tiles', '–', 'کاشی', 'جادو', '۳ باز', 'محبوب', 'سرگر', 'سبک', 'بازی', 'موزیکال', 'استودیو', 'ساز', 'Amanotes', 'JSC', 'اندروید', 'اس', 'صور', 'رایگ', 'گوگل', 'پل', 'عرضه', 'لحظه', 'ب', 'کاربر', 'اندروید', 'جه', 'دریاف', 'محبوب', 'بازی', 'دسته', 'بند', 'موزیکال', 'شمار', 'رفت#رو', '!', 'ال', 'محبوب', 'نا', 'آشنا', '“Piano', 'Tiles', '۲ ”', 'ساخته_شده_اس', 'امک', 'نواختن', 'موزیک', 'پیانو', 'داد#ده', 'سرع', 'کاشی', 'موجود', 'صفحه', 'نما', 'لمس', 'کرد#کن', 'اقدا', 'نواختن', 'موزیک', 'نمود#نما', 'ساعت', 'مشغول', 'بود#باش', '!', 'کاشی', 'صفحه', 'نما', 'سم', 'پایین', 'حرک', '#هست', 'پایین', 'خارج', 'شد#شو', 'لمس', 'کرد#کن', '!', 'افزا', 'تعداد', 'کاشی', 'خروج', 'سرع', 'خروج', 'کاشی', 'افزا', 'کرد#کن', 'هیج', 'شد#شو', '!', 'کاشی', 'مشابه', 'کاشی', 'موزیک', 'دلربا', 'قرار', 'خواهند_گرف', 'صور', 'طول', 'لمس', 'کرد#کن', '!', 'علاقه', 'مند', 'بازی', 'موزیکال', '#هست', 'شک', 'Magic', 'Tiles', 'طراح', 'ساخ', 'عال', 'نظر', 'جلب', 'نمود#نما', '!', 'دانلود', 'Magic', 'Tiles']
['Top', 'Drives', '–', 'تاپ', 'درایوز باز', 'محبوب', 'سرگر', 'جالب', 'سبک بازی', 'ماشین', 'سواری –', 'مدیر', 'ماشین', 'کارت', 'استودیو', 'بازیساز', 'Hutch', 'Games', 'اندروید', 'اس', 'Top', 'Drives', 'قادرید', 'کار', '', 'ب', '۷۰۰', 'کار', 'برد#بر', 'شناخته', 'ماشین', 'جمله', 'پورشه', 'ب', 'دبلیو', 'غیره', 'انتخاب', 'کرد#کن', 'مجموعه', 'کارت', 'داشت#دار', '!', 'کارت', 'مسابقه', 'اتومبیل', 'ران', 'دنیا', 'شرک', 'کرد#کن', 'بخ', 'برنده', 'بیازمایید', '!', 'حالت', 'امک', 'سرگر', 'داد#ده', 'عنو', 'مثال', 'نفره', 'آنلاین', 'کاربر', 'جهان', 'رقاب', 'کرد#کن', 'صور', 'پیروز', 'اس', 'کار', 'ماشین', 'خاص', 'دس', 'کرد#کن', '!', 'امک', 'مدیر', 'ماشین', 'حت', 'ارتقا', 'امک', 'شرک', 'مسابق', 'رانندگ', 'مسابق', 'شتاب', 'مسابق', 'سرع', 'پیس', 'مسابق', 'رفتن', 'تپه', 'لحاظ', 'شرایط', 'آب', 'هوا', 'سطوح', 'جاده', 'ساخ', 'جالب', 'توانست#توان', 'نظر', 'دوستدار', 'بازی', 'کارت', 'جلب', '!', 'دنبال', 'سرگر', 'پر', 'اوق', 'فراغ', '', '#هست', 'Top', 'Drives', 'امتح', 'کرد#کن', '!', 'نک', 'تکمیل', ' باز', 'آنلاین', 'اینترنت', 'استدو', ' باز', 'هک', 'نمیشود', 'نسخه', 'مود', 'نداردسه', ' جدید', 'آپد', 'همز', 'انتشار', 'صفحه', 'دریاف', 'کرد#کن', ' ', 'دستورالعمل', 'نصب', 'اجرا', '–', 'ابتدا', 'فایل', 'نصب', 'دانلود', 'نصب', 'کرد#کن', '–', 'فایل', 'دیتا', 'دانلود', '', 'فشرده', 'خارج', 'کرد#کن', 'پوشه', 'com', 'hutchgames', 'cccg', 'Android/Obb', 'حافظه', 'دستگاه', 'کپ', 'کرد#کن', '–', 'آنلاین', 'اجرا', 'نمود#نما', 'دانلود', 'بازی Top', 'Drives', 'اندروید', 'دانلود', 'دیتا', 'بازی Top', 'Drives', 'منبع', 'فارسروید']
['DeskCalc تما', 'ویژگی', 'ماشین', 'حساب', 'معمول', 'علاوه', 'کاربر', 'ماشین', 'حساب', 'دیجیتال', 'تحت ویندوز انتظار', 'داشت#دار', 'گنجانده', 'اس', 'ویندوز', 'ماشین', 'حساب', 'کاره', 'انجا', 'محاسب', 'عدد', 'ساده', 'محاسبه', 'فرمول', 'ریاض', 'دشوار', 'کاربا', 'انواع', 'حسابدار', 'مهندس', '…داشته', 'بود#باش', 'ویژگ', '', 'افزودن', 'ورود', 'منطق', 'ذخیره', 'بازگردان', 'محاسب', 'انجا', 'چاپ', 'محاسب', 'جداساز', 'ارقا', 'کار', 'اعشار', 'کار', 'فرمول', 'مهندس', 'تصحیح', 'رس', 'اضافه', 'حذف', 'ارز', 'تهیه', 'خروج', 'اکسل', 'تن', 'برخ', 'امک', 'ماشین', 'حسابنر', 'افزار', 'بود#باش', 'قابلیت', 'کلیدی نر', 'افزار DeskCalc', 'وارد', 'متون', 'ارگونامیک-', 'ویژگی', 'تصحیح', 'رس', 'اضافه', 'حذف', 'ارز', 'کار', 'رق', 'اعشار', 'شناور-', 'مفسر', 'فرمول-', 'توابع', 'محاسبه', 'مال', 'فروش-', 'محاسبه', 'درصد-', 'مبدل', 'ارز-', 'توابع', 'مثلثات', 'سینوس', 'کسینوس', 'تانژان', 'رابط', 'کاربر', 'مشابه', 'نر', 'افزار', 'آفیس-', 'خروج', 'Excel-', 'نما', 'پاسخ', 'خط', 'عنو', 'پنجره-', 'دانلود', 'نر', 'افزار DeskCalc']
['Elite', 'Trials نام بازی ا', 'زیبا', 'سرگر', 'سبک', 'بازی', 'موتور', 'سوار', 'مسابقه', 'استودیو', 'ساز', 'GX', 'Games', 'بود#باش', 'برای اندروید منتشر', 'شده_اس', 'موتور', 'متنوع', 'انتخاب', 'داشت#دار', 'شامل', 'نفره', 'نفره', 'آنلاین', 'بود#باش', 'مهارت', 'موتورسوار', 'داد#ده', 'موانع', 'خطرناک', 'شیشه', 'خورده', 'موانع', 'نوک', 'تیز', 'بمب', 'جر', 'هوا', 'عبور', 'کرد#کن', 'زیبا', 'مهیج', 'ترین بازی', 'موتورسوار', 'لذ', 'ببرید', 'امتیاز', '۴٫۶', 'کسب', 'دارای گرافیک زیبا', 'جذاب', 'همراه', 'پل', 'اعتیادآور', 'بود#باش', 'زیبا', 'افزوده', 'ساعت', 'سرگر', 'دانلود', 'بازی Elite', 'Trials', 'اندروید']
###Markdown
this shows that our 'دانلود' newsPath tag is valid but there is only a few samples for this tag. Remove ASCIIThere are a lot of ascii characters which we do not need them such as html tags.
###Code
def remove_ascii(word_list):
"""
Remove ascii characters from a list of words or a string
:params word_list: a list of string or a string
"""
if type(word_list) is list:
return [re.sub(r'[\x00-\x7F]+',' ',w) for w in word_list]
if type(word_list) is str:
words = word_list.split()
return [re.sub(r'[\x00-\x7F]+',' ',w) for w in words]
###Output
_____no_output_____
###Markdown
Feature EngineeringThe inherent unstructured (no neatly formatted data columns!) and noisy nature of textual data makes it harder for machine learning methods to directly work on raw text data. MotivationThe importance of feature engineering is even more important for unstructured, textual data because we need to convert free flowing text into some numeric representations which can then be understood by machine learning algorithms. Feature Engineering StrategiesWe try different methods and compare result using F1, precision and recall score gathered by result of different machine learning methods. Import Libraries Bag of Words ModelFirst of all we should define **vector space** term. When we convert unstructured data to number such each dimension of a vector is one feature of space, we have a vector space.**Bag of words** model is one of simpleset vector space methods.In bag of words model, each vector represent a document in corpus and each dimension is word in document and the value of corresponding dimension is the frequency of given word. Creating the trainable Document
###Code
title_count = 2
body_train = []
for i, w in news.iterrows():
body_train.append([])
if w.body is not None:
body_train[i] += w.body
if w.title is not None:
body_train[i] += w.title * title_count
print(len([w for w in body_train if (w is not None) and len(w) != 0]))
###Output
1000
###Markdown
Adding Unknown Words
###Code
def make_uknown(train_doc, thresh= 2):
vocab = list(set(reduce(lambda x, y: np.append(x, y), train_doc)))
vocab = dict.fromkeys(vocab, 0)
mega_doc = reduce(np.append, train_doc)
for w in mega_doc:
vocab[w] += 1
unknown_list = []
for k, v in vocab.items():
if v < thresh:
unknown_list.append(k)
unknown_list = set(unknown_list)
for i, doc in enumerate(train_doc):
for j, w in enumerate(doc):
if w in unknown_list:
train_doc[i][j] = 'unk_w'
return train_doc
body_train = make_uknown(body_train, 2)
###Output
_____no_output_____
###Markdown
convert a list of string items to a string item
###Code
# convert a list of string items to a string item (detokenization)
# just check it out! I got a weird result!
# body_train = [' '.join([w for w in s]) for s in body_train if s is not None]
body_train_ = []
for idx,s in enumerate(body_train):
string=''
if s is not None:
for w in s:
string+=' '+w
body_train_.append(string)
else:
body_train_.append(string)
# Note: We just apply this method to "body", then after builing some models, we try other ones.
def bag_of_words(train):
"""
calculate bag of word vector space of a list of strings
:params train: train data as a list of strings
"""
cv = fe.text.CountVectorizer(ngram_range=(1, 4), min_df=0.005, vocabulary=None,
lowercase=False, analyzer='word') # 4-gram model
bow_train = cv.fit_transform(train)
return bow_train, cv.get_feature_names()
bowt,feature_names = bag_of_words(body_train_)
pd.DataFrame(bowt.todense(),columns=feature_names).head(5)
###Output
_____no_output_____
###Markdown
Bag of Words Vector Space Dimensions
###Code
print('number of examples: {}'.format(bowt.shape[0]))
print('number of features: {}'.format(bowt.shape[1]))
###Output
number of examples: 1000
number of features: 10865
###Markdown
OK let it go! LabelsNow for training purpose, we should train our model using train data and corresponding labels.In this dataset, labels are `newsPath`. To deal with multiclass labels, we use `onehotencoders` to encode classes into categorical values represted by numbers.Here we go...
###Code
y = news['newsPath'].values
def label_encoder(array):
"""
Return corresponding label encoded array
"""
array_label_encoder = LabelEncoder()
encoded = array_label_encoder.fit_transform(array)
return array_label_encoder,encoded
def onehot_encoder(label_encoded_array):
"""
Return onehot encoded version of a label encoded input array
"""
array_onehot_encoder = OneHotEncoder()
onehot_encoded_array = array_onehot_encoder.fit_transform(label_encoded_array)
return array_onehot_encoder, onehot_encoded_array
def one_hot_encoder(labels):
label_set = np.array(list(set(labels)))
en_labels = np.array(list(map(lambda x: x == label_set, labels)))
return label_set, en_labels
pd.DataFrame(label_encoder(y)[1], columns=['newsPath']).head()
###Output
_____no_output_____
###Markdown
Features for Naive Bayes Model
###Code
y_decoder, y_en = one_hot_encoder(y)
x_train_naive,x_test_naive ,y_train_naive ,y_test_naive = train_test_split(body_train , y_en, train_size = 0.8, random_state=85)
###Output
C:\Users\Erfan\Anaconda3\envs\tf\lib\site-packages\sklearn\model_selection\_split.py:2069: FutureWarning: From version 0.21, test_size will always complement train_size unless both are specified.
FutureWarning)
###Markdown
Split Dataset Into Trainset and Testset
###Code
# for Naive Bayes we just need to label encode target labels.
_, y_label_encoded = label_encoder(y)
x_train,x_test,y_train,y_test = train_test_split(bowt , y_label_encoded.reshape(-1,1), train_size = 0.90, random_state=85)
print('x #examples: {}, x #features:{}'.format(bowt.shape[0], bowt.shape[1]))
print('x_train #examples: {}, x_train #features: {}'.format(x_train.shape[0], x_train.shape[1]))
print('x_test #examples: {}, x_test #features: {}'.format(x_test.shape[0], x_test.shape[1]))
print('y_train #examples: {}, y_train #features: {}'.format(y_train.shape[0], y_train.shape[1]))
print('y_test #examples: {}, y_test #features: {}'.format(y_test.shape[0], y_test.shape[1]))
x_train = x_train.todense()
x_test = x_test.todense()
###Output
C:\Program Files\Anaconda3\lib\site-packages\sklearn\model_selection\_split.py:2069: FutureWarning: From version 0.21, test_size will always complement train_size unless both are specified.
FutureWarning)
###Markdown
Model Selection
###Code
def accuracy_on_cm(confusion_matrix):
"""
Calculate accuracy on given confusion matrix
"""
t = np.trace(confusion_matrix)
f = np.sum(confusion_matrix) - t
ac = t/(t+f)
return (t,f,ac)
###Output
_____no_output_____
###Markdown
Naive Bayes
###Code
class NaiveBayes:
def __init__(self, nb_type='multiclass_nb'):
pass
def fit(self, documents, labels, s_value = 0.1):
"""[This function will create a naive bayes model and fit it to the documents and labels]
Arguments:
documents {[unstructured array]} -- [...]
labels {[2_D array]} -- [One hot encoded array]
s_value {[float]} -- [Smoothing Value]
"""
labels = np.array(labels)
documents = np.array(documents)
self.class_probs = np.zeros(labels.shape[1])
# Calculating P(c)
for label in labels:
self.class_probs[np.argmax(label)] += 1
self.class_probs = np.log(self.class_probs/labels.shape[0])
# Creating Vocabulary
self.vocab = list(set(reduce(lambda x, y: np.append(x, y), documents)))
# Calculating P(w|c)
self.mega_probs = [None] * labels.shape[1]
for i in range(labels.shape[1]):
mega_doc = reduce(np.append, documents[np.array(list(map(np.argmax, labels))) == i])
self.mega_probs[i] = dict.fromkeys(self.vocab, s_value)
words_count = 0
for w in mega_doc:
words_count += 1
self.mega_probs[i][w] += 1
self.mega_probs[i] = {k:np.log(v/((len(self.vocab)*s_value) + words_count)) for k, v in self.mega_probs[i].items()}
def predict(self, documents):
pred_labels = []
for doc in documents:
probs = deepcopy(self.class_probs[:])
for i in range(len(probs)):
for w in doc:
if w in self.vocab:
probs[i] += self.mega_probs[i][w]
else:
probs[i] += self.mega_probs[i]['unk_w']
pred_labels.append(probs)
return pred_labels
def evaluate(self, documents, labels):
pred_labels = self.predict(documents)
print('Accuracy: {}'.format(np.mean(np.argmax(pred_labels, 1) == np.argmax(labels, 1))))
nb = NaiveBayes()
nb.fit(x_train_naive, y_train_naive)
nb.evaluate(x_train_naive, y_train_naive)
nb.evaluate(x_test_naive, y_test_naive)
###Output
Accuracy: 0.665
###Markdown
Gaussian Naive Bayes
###Code
naive_bayes = nb.GaussianNB()
naive_bayes = naive_bayes.fit(x_train, y_train.ravel())
y_train_predict = naive_bayes.predict(x_train)
cm_train = confusion_matrix(y_train,y_train_predict)
t_train,f_train,acc_train = accuracy_on_cm(cm_train)
print('Train status = #{} True, #{} False, %{} Accuracy'.format(t_train,f_train,acc_train*100))
y_test_predict = naive_bayes.predict(x_test)
cm_test = confusion_matrix(y_test,y_test_predict)
t_test,f_test,acc_test = accuracy_on_cm(cm_test)
print('Guassian Test status = #{} True, #{} False, %{} Accuracy'.format(t_test,f_test,acc_test*100))
###Output
Guassian Test status = #51 True, #49 False, %51.0 Accuracy
###Markdown
Multinomial Naive Bayes
###Code
naive_bayes = nb.MultinomialNB()
naive_bayes = naive_bayes.fit(x_train, y_train.ravel())
y_train_predict = naive_bayes.predict(x_train)
cm_train = confusion_matrix(y_train,y_train_predict)
t_train,f_train,acc_train = accuracy_on_cm(cm_train)
print('Multinomial Train status = #{} True, #{} False, %{} Accuracy'.format(t_train,f_train,acc_train*100))
y_test_predict = naive_bayes.predict(x_test)
cm_test = confusion_matrix(y_test,y_test_predict)
t_test,f_test,acc_test = accuracy_on_cm(cm_test)
print('Multinomial Test status = #{} True, #{} False, %{} Accuracy'.format(t_test,f_test,acc_test*100))
###Output
Multinomial Test status = #70 True, #30 False, %70.0 Accuracy
###Markdown
Bernoulli Naive Bayes
###Code
naive_bayes = nb.BernoulliNB()
naive_bayes = naive_bayes.fit(x_train, y_train.ravel())
y_train_predict = naive_bayes.predict(x_train)
cm_train = confusion_matrix(y_train,y_train_predict)
t_train,f_train,acc_train = accuracy_on_cm(cm_train)
print('Bernoulli Train status = #{} True, #{} False, %{} Accuracy'.format(t_train,f_train,acc_train*100))
y_test_predict = naive_bayes.predict(x_test)
cm_test = confusion_matrix(y_test,y_test_predict)
t_test,f_test,acc_test = accuracy_on_cm(cm_test)
print('Bernoulli Test status = #{} True, #{} False, %{} Accuracy'.format(t_test,f_test,acc_test*100))
###Output
Bernoulli Train status = #635 True, #265 False, %70.55555555555556 Accuracy
Bernoulli Test status = #47 True, #53 False, %47.0 Accuracy
###Markdown
Complement Naive Bayes
###Code
naive_bayes = nb.ComplementNB()
naive_bayes = naive_bayes.fit(x_train, y_train.ravel())
y_train_predict = naive_bayes.predict(x_train)
cm_train = confusion_matrix(y_train,y_train_predict)
t_train,f_train,acc_train = accuracy_on_cm(cm_train)
print('Complement Train status = #{} True, #{} False, %{} Accuracy'.format(t_train,f_train,acc_train*100))
y_test_predict = naive_bayes.predict(x_test)
cm_test = confusion_matrix(y_test,y_test_predict)
t_test,f_test,acc_test = accuracy_on_cm(cm_test)
print('Complement Test status = #{} True, #{} False, %{} Accuracy'.format(t_test,f_test,acc_test*100))
###Output
Bernoulli Train status = #745 True, #155 False, %82.77777777777777 Accuracy
Bernoulli Test status = #73 True, #27 False, %73.0 Accuracy
###Markdown
Logistic Regression Kernel Newton-CG
###Code
logistic_regression = l.LogisticRegression(random_state=85, solver='newton-cg', multi_class='auto')
logistic_regression = logistic_regression.fit(x_train, y_train.ravel())
y_train_predict = logistic_regression.predict(x_train)
cm_train = confusion_matrix(y_train,y_train_predict)
t_train,f_train,acc_train = accuracy_on_cm(cm_train)
print('Logistic Regression Train status = #{} True, #{} False, %{} Accuracy'.format(t_train,f_train,acc_train*100))
y_test_predict = logistic_regression.predict(x_test)
cm_test = confusion_matrix(y_test,y_test_predict)
t_test,f_test,acc_test = accuracy_on_cm(cm_test)
print('Logistic Regression Test status = #{} True, #{} False, %{} Accuracy'.format(t_test,f_test,acc_test*100))
###Output
Bernoulli Train status = #865 True, #35 False, %96.11111111111111 Accuracy
Bernoulli Test status = #62 True, #38 False, %62.0 Accuracy
###Markdown
Scratch ModelIn this section we implement Logistic Regression from scratch using numpy library.We will explain our structure in each section provided below:1. Constants1. Cost Function2. Gradient Function3. Learning Parameters using `minimize` optimizer4. Prediction5. Evaluating Model6. Hyperparameter Tuning 1. Constants
###Code
m,n = x_train.shape
# define theta as zero
theta = np.zeros(n)
# define hyperparameter λ
lambda_ = 1
###Output
_____no_output_____
###Markdown
2. Cost Function
###Code
def sigmoid(z):
return 1/(1+np.exp(-z))
def lr_hypothesis(x,theta):
return np.dot(x,theta)
def compute_cost(theta, x, y, lambda_, m, n):
theta = theta.reshape(n,1)
x = x.reshape(m,n)
y = y.reshape(m,1)
infunc1 = -y*(np.log(sigmoid(lr_hypothesis(x,theta)))) - (1-y)*(np.log(1 - sigmoid(lr_hypothesis(x,theta))))
infunc2 = lambda_*np.sum(theta[1:]**2)/(2*m)
j = np.sum(infunc1)/m+infunc2
return j
###Output
_____no_output_____
###Markdown
3. Gradient Function
###Code
zzz = []
# gradient[0] correspond to gradient for theta(0)
# gradient[1:] correspond to gradient for theta(j) j>0
def compute_gradient(theta, x, y, lambda_, m, n):
gradient = np.zeros(n).reshape(n,) # 5,
theta = theta.reshape(n,1) # 5,1
x = x.reshape(m,n)
y = y.reshape(m,1)
infunc1 = sigmoid(lr_hypothesis(x,theta))-y # 10,1
gradient_ = np.dot(x.T,infunc1)/m # shape=(n,1)
gradient_ = gradient_.reshape(n,) # this line not working at all. shape=(1,n) !!!!!!!
#gradient[0] = gradient_[0]
gradient[1:] = gradient_[1:]+(lambda_*theta[1:,]/m).reshape(n-1,) # theta(j) ; j>0
return gradient
theta_test = np.array([-2,-1,1,2])
x_test = np.append(np.ones(5),np.arange(0.1,1.6,0.1)).reshape(5,4, order='F')
y_test = np.array([1,0,1,0,1]).reshape(-1,1)
m,n= x_test.shape
cost_temp = compute_cost(theta=theta_test,x=x_test,y=y_test,lambda_=3,m=5, n=4)
gradient_temp = compute_gradient(theta=theta_test,x=x_test,y=y_test,lambda_=3, m=5, n=4)
print('if lambda = 3 =======>\n cost = {}\n ,\n gradients = \n{}'
.format(cost_temp,gradient_temp))
###Output
if lambda = 3 =======>
cost = 2.5348193961097443
,
gradients =
[ 0. -0.54855841 0.72472227 1.39800296]
###Markdown
4. Learning Parameters Using `fmin_cg` Optimizer**Scipy's fmin_cg** is an optimization solver that finds the **minimum of an unconstrained** function. For regularized logistic regression, you want to **optimize the cost function J(θ) with parameters θ**. Concretely, you are going to use minimize to find the best parameters θ for the regularized logistic regression cost function, given a fixed dataset (of x and y values). You will pass to minimize the following inputs:1. The initial values of the parameters we are trying to optimize.2. A function that, when given the training set and a particular θ, computes the regularized logistic regression cost with respect to θ for the dataset (x, y) ======> compute_cost3. A function that, when given the training set and a particular θ, computes the regularized logistic regression gradient with respect to θ for the dataset (x, y) ======> compute_gradient 5. Evaluting ModelWe need to calculate **probabilities and related predictions** and then compare predicted value to real one to get accuracy.
###Code
m,n = x_train.shape
lambda_=0.1
theta = np.zeros(shape=(n,))
# import library
def one_vs_all(theta,x,y,num_labels,lambda_, m, n):
all_theta = np.zeros(shape=(num_labels,n))
for i in range(0,num_labels):
all_theta[i] = opt.fmin_cg(f=compute_cost, fprime=compute_gradient,
x0 = theta, args=(x,(y==i)*1, lambda_, m, n), full_output=True)
#optimized = opt.minimize(compute_cost, theta, args=(x,(y==i)*1,lambda_),
# method=None, jac= True)
#all_theta[c] = optimized.X
return all_theta
all_theta = one_vs_all(theta.flatten() ,x_train.flatten() , y_train, len(valid_tags), lambda_,
m=x_train.shape[0], n=x_train.shape[1])
###Output
_____no_output_____
###Markdown
Kernel SAGA
###Code
logistic_regression = l.LogisticRegression(random_state=85, solver='saga', multi_class='auto')
logistic_regression = logistic_regression.fit(x_train, y_train.ravel())
y_train_predict = logistic_regression.predict(x_train)
cm_train = confusion_matrix(y_train,y_train_predict)
t_train,f_train,acc_train = accuracy_on_cm(cm_train)
print('Saga Train status = #{} True, #{} False, %{} Accuracy'.format(t_train,f_train,acc_train*100))
y_test_predict = logistic_regression.predict(x_test)
cm_test = confusion_matrix(y_test,y_test_predict)
t_test,f_test,acc_test = accuracy_on_cm(cm_test)
print('Saga Test status = #{} True, #{} False, %{} Accuracy'.format(t_test,f_test,acc_test*100))
###Output
Bernoulli Train status = #649 True, #251 False, %72.11111111111111 Accuracy
Bernoulli Test status = #63 True, #37 False, %63.0 Accuracy
###Markdown
SVM
###Code
svc = svm.SVC(kernel='linear', random_state=85, gamma='auto')
svc = svc.fit(x_train, y_train.ravel())
y_train_predict = svc.predict(x_train)
cm_train = confusion_matrix(y_train,y_train_predict)
t_train,f_train,acc_train = accuracy_on_cm(cm_train)
print('SVM Train status = #{} True, #{} False, %{} Accuracy'.format(t_train,f_train,acc_train*100))
y_test_predict = svc.predict(x_test)
cm_test = confusion_matrix(y_test,y_test_predict)
t_test,f_test,acc_test = accuracy_on_cm(cm_test)
print('SVM Test status = #{} True, #{} False, %{} Accuracy'.format(t_test,f_test,acc_test*100))
###Output
Bernoulli Train status = #866 True, #34 False, %96.22222222222221 Accuracy
Bernoulli Test status = #53 True, #47 False, %53.0 Accuracy
|
Coursera/Machine Learning with Python-IBM/Week-2/Exercise/ML0101EN-Reg-NoneLinearRegression-py-v1.ipynb | ###Markdown
Non Linear Regression Analysis If the data shows a curvy trend, then linear regression will not produce very accurate results when compared to a non-linear regression because, as the name implies, linear regression presumes that the data is linear. Let's learn about non linear regressions and apply an example on python. In this notebook, we fit a non-linear model to the datapoints corrensponding to China's GDP from 1960 to 2014. Importing required libraries
###Code
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
###Output
_____no_output_____
###Markdown
Though Linear regression is very good to solve many problems, it cannot be used for all datasets. First recall how linear regression, could model a dataset. It models a linear relation between a dependent variable y and independent variable x. It had a simple equation, of degree 1, for example y = $2x$ + 3.
###Code
x = np.arange(-5.0, 5.0, 0.1)
##You can adjust the slope and intercept to verify the changes in the graph
y = 2*(x) + 3
y_noise = 2 * np.random.normal(size=x.size)
ydata = y + y_noise
#plt.figure(figsize=(8,6))
plt.plot(x, ydata, 'bo')
plt.plot(x,y, 'r')
plt.ylabel('Dependent Variable')
plt.xlabel('Indepdendent Variable')
plt.show()
###Output
_____no_output_____
###Markdown
Non-linear regressions are a relationship between independent variables $x$ and a dependent variable $y$ which result in a non-linear function modeled data. Essentially any relationship that is not linear can be termed as non-linear, and is usually represented by the polynomial of $k$ degrees (maximum power of $x$). $$ \ y = a x^3 + b x^2 + c x + d \ $$Non-linear functions can have elements like exponentials, logarithms, fractions, and others. For example: $$ y = \log(x)$$ Or even, more complicated such as :$$ y = \log(a x^3 + b x^2 + c x + d)$$ Let's take a look at a cubic function's graph.
###Code
x = np.arange(-5.0, 5.0, 0.1)
##You can adjust the slope and intercept to verify the changes in the graph
y = 1*(x**3) + 1*(x**2) + 1*x + 3
y_noise = 20 * np.random.normal(size=x.size)
ydata = y + y_noise
plt.plot(x, ydata, 'bo')
plt.plot(x,y, 'r')
plt.ylabel('Dependent Variable')
plt.xlabel('Indepdendent Variable')
plt.show()
###Output
_____no_output_____
###Markdown
As you can see, this function has $x^3$ and $x^2$ as independent variables. Also, the graphic of this function is not a straight line over the 2D plane. So this is a non-linear function. Some other types of non-linear functions are: Quadratic $$ Y = X^2 $$
###Code
x = np.arange(-5.0, 5.0, 0.1)
##You can adjust the slope and intercept to verify the changes in the graph
y = np.power(x,2)
y_noise = 2 * np.random.normal(size=x.size)
ydata = y + y_noise
plt.plot(x, ydata, 'bo')
plt.plot(x,y, 'r')
plt.ylabel('Dependent Variable')
plt.xlabel('Indepdendent Variable')
plt.show()
###Output
_____no_output_____
###Markdown
Exponential An exponential function with base c is defined by $$ Y = a + b c^X$$ where b ≠0, c > 0 , c ≠1, and x is any real number. The base, c, is constant and the exponent, x, is a variable.
###Code
X = np.arange(-5.0, 5.0, 0.1)
##You can adjust the slope and intercept to verify the changes in the graph
Y= np.exp(X)
plt.plot(X,Y)
plt.ylabel('Dependent Variable')
plt.xlabel('Indepdendent Variable')
plt.show()
###Output
_____no_output_____
###Markdown
LogarithmicThe response $y$ is a results of applying logarithmic map from input $x$'s to output variable $y$. It is one of the simplest form of __log()__: i.e. $$ y = \log(x)$$Please consider that instead of $x$, we can use $X$, which can be polynomial representation of the $x$'s. In general form it would be written as \begin{equation}y = \log(X)\end{equation}
###Code
X = np.arange(-5.0, 5.0, 0.1)
Y = np.log(X)
plt.plot(X,Y)
plt.ylabel('Dependent Variable')
plt.xlabel('Indepdendent Variable')
plt.show()
###Output
/home/jupyterlab/conda/lib/python3.6/site-packages/ipykernel_launcher.py:3: RuntimeWarning: invalid value encountered in log
This is separate from the ipykernel package so we can avoid doing imports until
###Markdown
Sigmoidal/Logistic $$ Y = a + \frac{b}{1+ c^{(X-d)}}$$
###Code
X = np.arange(-5.0, 5.0, 0.1)
Y = 1-4/(1+np.power(3, X-2))
plt.plot(X,Y)
plt.ylabel('Dependent Variable')
plt.xlabel('Indepdendent Variable')
plt.show()
###Output
_____no_output_____
###Markdown
Non-Linear Regression example For an example, we're going to try and fit a non-linear model to the datapoints corresponding to China's GDP from 1960 to 2014. We download a dataset with two columns, the first, a year between 1960 and 2014, the second, China's corresponding annual gross domestic income in US dollars for that year.
###Code
import numpy as np
import pandas as pd
#downloading dataset
!wget -nv -O china_gdp.csv https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/ML0101ENv3/labs/china_gdp.csv
df = pd.read_csv("china_gdp.csv")
df.head(10)
###Output
2019-01-04 18:42:05 URL:https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/ML0101ENv3/labs/china_gdp.csv [1218/1218] -> "china_gdp.csv" [1]
###Markdown
__Did you know?__ When it comes to Machine Learning, you will likely be working with large datasets. As a business, where can you host your data? IBM is offering a unique opportunity for businesses, with 10 Tb of IBM Cloud Object Storage: [Sign up now for free](http://cocl.us/ML0101EN-IBM-Offer-CC) Plotting the Dataset This is what the datapoints look like. It kind of looks like an either logistic or exponential function. The growth starts off slow, then from 2005 on forward, the growth is very significant. And finally, it decelerate slightly in the 2010s.
###Code
plt.figure(figsize=(8,5))
x_data, y_data = (df["Year"].values, df["Value"].values)
plt.plot(x_data, y_data, 'ro')
plt.ylabel('GDP')
plt.xlabel('Year')
plt.show()
###Output
_____no_output_____
###Markdown
Choosing a model From an initial look at the plot, we determine that the logistic function could be a good approximation,since it has the property of starting with a slow growth, increasing growth in the middle, and then decreasing again at the end; as illustrated below:
###Code
X = np.arange(-5.0, 5.0, 0.1)
Y = 1.0 / (1.0 + np.exp(-X))
plt.plot(X,Y)
plt.ylabel('Dependent Variable')
plt.xlabel('Indepdendent Variable')
plt.show()
###Output
_____no_output_____
###Markdown
The formula for the logistic function is the following:$$ \hat{Y} = \frac1{1+e^{\beta_1(X-\beta_2)}}$$$\beta_1$: Controls the curve's steepness,$\beta_2$: Slides the curve on the x-axis. Building The Model Now, let's build our regression model and initialize its parameters.
###Code
def sigmoid(x, Beta_1, Beta_2):
y = 1 / (1 + np.exp(-Beta_1*(x-Beta_2)))
return y
###Output
_____no_output_____
###Markdown
Lets look at a sample sigmoid line that might fit with the data:
###Code
beta_1 = 0.10
beta_2 = 1990.0
#logistic function
Y_pred = sigmoid(x_data, beta_1 , beta_2)
#plot initial prediction against datapoints
plt.plot(x_data, Y_pred*15000000000000.)
plt.plot(x_data, y_data, 'ro')
###Output
_____no_output_____
###Markdown
Our task here is to find the best parameters for our model. Lets first normalize our x and y:
###Code
# Lets normalize our data
xdata =x_data/max(x_data)
ydata =y_data/max(y_data)
###Output
_____no_output_____
###Markdown
How we find the best parameters for our fit line?we can use __curve_fit__ which uses non-linear least squares to fit our sigmoid function, to data. Optimal values for the parameters so that the sum of the squared residuals of sigmoid(xdata, *popt) - ydata is minimized.popt are our optimized parameters.
###Code
from scipy.optimize import curve_fit
popt, pcov = curve_fit(sigmoid, xdata, ydata)
#print the final parameters
print(" beta_1 = %f, beta_2 = %f" % (popt[0], popt[1]))
###Output
beta_1 = 690.453017, beta_2 = 0.997207
###Markdown
Now we plot our resulting regression model.
###Code
x = np.linspace(1960, 2015, 55)
x = x/max(x)
plt.figure(figsize=(8,5))
y = sigmoid(x, *popt)
plt.plot(xdata, ydata, 'ro', label='data')
plt.plot(x,y, linewidth=3.0, label='fit')
plt.legend(loc='best')
plt.ylabel('GDP')
plt.xlabel('Year')
plt.show()
###Output
_____no_output_____
###Markdown
PracticeCan you calculate what is the accuracy of our model?
###Code
# write your code here
msk = np.random.rand(len(df)) < 0.8
train_x = xdata[msk]
test_x = xdata[~msk]
train_y = ydata[msk]
test_y = ydata[~msk]
# build the model using train set
popt, pcov = curve_fit(sigmoid, train_x, train_y)
# predict using test set
y_hat = sigmoid(test_x, *popt)
# evaluation
print("Mean absolute error: %.2f" % np.mean(np.absolute(y_hat - test_y)))
print("Residual sum of squares (MSE): %.2f" % np.mean((y_hat - test_y) ** 2))
from sklearn.metrics import r2_score
print("R2-score: %.2f" % r2_score(y_hat , test_y) )
###Output
Mean absolute error: 0.03
Residual sum of squares (MSE): 0.00
R2-score: 0.96
|
prog1/implementacoes/tutoriais/dicionarios.ipynb | ###Markdown
DicionáriosUm dicionário no Python é uma coleção desordenada formada por pares chave-valor, onde a chave precisa ser um tipo imutável (string, tuplas ou tipos numéricos) e os valores podem ser mutáveis ou imutáveis. Inicializando um Dicionário no PythonEm Python, um dicionário pode ser inicializado de duas formas:```python Declarando o dicionário para só depois adicionar chaves-valornome_do_dicionario = {}nome_do_dicionario['Chave1'] = 'Valor1'nome_do_dicionario['Chave2'] = 'Valor2'Ou Declarando o dicionário já com seus pares nome_do_dicionario = {'Chave1': 'Valor1', 'Chave2': 'Valor2'}```Alguns exemplos de declaração podem ser vistos abaixo:
###Code
# Declara o dicionario já admitindo alguns pares chave-valor
autores_musicas = {'Ze Ramalho': 'Frevo Mulher', 'Chico Cesar': 'Mama Africa'}
# Imprime o valor para o qual a chave 'Zé Ramalho' aponta
print (autores_musicas['Ze Ramalho'])
# Imprime o valor para o qual a chave 'Chico César' aponta
print (autores_musicas['Chico Cesar'])
# Adiciona outro par chave-valor ao dicionário autores_musicas
autores_musicas['Xand Aviao'] = 'Nota Dez'
# Imprime o valor para o qual a chave 'Xand Avião' aponta
print (autores_musicas['Xand Aviao'])
###Output
Nota Dez
###Markdown
No caso abaixo, a chave recebe uma lista como valor.
###Code
# Adiciona outro par chave-valor ao dicionario autores_musicas
autores_musicas['Rihanna'] = ['Diamonds', 'Needed Me', 'Kiss It Better']
# Imprime o valor para o qual a chave 'Rihanna' aponta
print (autores_musicas['Rihanna'])
###Output
['Diamonds', 'Needed Me', 'Kiss It Better']
###Markdown
Como o valor é uma lista, seus elementos podem ser acessados por iteração.
###Code
# Guarda a lista de músicas que está contida no dicionário com a chave 'Rihanna'
# na variável musicas_rihanna
musicas_rihanna = autores_musicas['Rihanna']
# Itera sobre os indices da lista
for indice in range(len(musicas_rihanna)):
# Imprime os elementos da lista que tem como chave 'Rihanna'
print (musicas_rihanna[indice])
###Output
Diamonds
Needed Me
Kiss It Better
###Markdown
Métodos importantes para iterar sobre dicionáriosNativamente, o Python fornece métodos para iterar sobre os dicionários, sendo eles listados na tabela abaixo:Método | O que faz-------------|-----------------items() | retorna uma lista de tuplas com as chaves e os valores keys() | retorna uma lista contendo somente as chavesvalues() | retorna uma lista contendo somente os valores Mostrando na prática:
###Code
# Declara o dicionário glossario_computacao
glossario_computacao = {'P1': 'Programacao 1', 'LP1': 'Laboratorio de Programacao 1'}
# Imprime o retorno dos métodos para o dicionário glossario_computacao
print ("Método items() =>", list(glossario_computacao.items()))
print ("Método keys() =>", list(glossario_computacao.keys()))
print ("Método values() =>", list(glossario_computacao.values()))
###Output
Método items() => [('P1', 'Programacao 1'), ('LP1', 'Laboratorio de Programacao 1')]
Método keys() => ['P1', 'LP1']
Método values() => ['Programacao 1', 'Laboratorio de Programacao 1']
###Markdown
Agora utilizando o *for* para iterar sobre as coleções retornadas pelos métodos.
###Code
# Para iterar sobre a lista de tuplas chave-valor gerada pelo método items()
# são necessárias duas variáveis de controle
for sigla, significado in glossario_computacao.items():
print ("Chave:", sigla, "=>", "Valor:", significado)
###Output
Chave: P1 => Valor: Programacao 1
Chave: LP1 => Valor: Laboratorio de Programacao 1
###Markdown
Como o retorno do método keys é uma __lista__, é possível iterar da mesma maneira.
###Code
# Por elemento
for sigla in glossario_computacao.keys():
print (sigla)
# Por índice
lista_chaves = list(glossario_computacao.keys())
for indice in range(len(lista_chaves)):
print (lista_chaves[indice])
###Output
P1
LP1
###Markdown
Por último, o método *values()*.
###Code
# Por elemento
for significado in glossario_computacao.values():
print (significado)
# Por índice
lista_valores = list(glossario_computacao.values())
for indice in range(len(lista_valores)):
print (lista_valores[indice])
###Output
Programacao 1
Laboratorio de Programacao 1
###Markdown
DicionáriosUm dicionário no Python é uma coleção desordenada formada por pares chave-valor, onde a chave precisa ser um tipo imutável (string, tuplas ou tipos numéricos) e os valores podem ser mutáveis ou imutáveis. Inicializando um Dicionário no PythonEm Python, um dicionário pode ser inicializado de duas formas:```python Declarando o dicionário para só depois adicionar chaves-valornome_do_dicionario = {}nome_do_dicionario['Chave1'] = 'Valor1'nome_do_dicionario['Chave2'] = 'Valor2'Ou Declarando o dicionário já com seus pares nome_do_dicionario = {'Chave1': 'Valor1', 'Chave2': 'Valor2'}```Alguns exemplos de declaração podem ser vistos abaixo:
###Code
# Declara o dicionario já admitindo alguns pares chave-valor
autores_musicas = {'Ze Ramalho': 'Frevo Mulher', 'Chico Cesar': 'Mama Africa'}
# Imprime o valor para o qual a chave 'Zé Ramalho' aponta
print autores_musicas['Ze Ramalho']
# Imprime o valor para o qual a chave 'Chico César' aponta
print autores_musicas['Chico Cesar']
# Adiciona outro par chave-valor ao dicionário autores_musicas
autores_musicas['Xand Aviao'] = 'Nota Dez'
# Imprime o valor para o qual a chave 'Xand Avião' aponta
print autores_musicas['Xand Aviao']
###Output
Nota Dez
###Markdown
No caso abaixo, a chave recebe uma lista como valor.
###Code
# Adiciona outro par chave-valor ao dicionario autores_musicas
autores_musicas['Rihanna'] = ['Diamonds', 'Needed Me', 'Kiss It Better']
# Imprime o valor para o qual a chave 'Rihanna' aponta
print autores_musicas['Rihanna']
###Output
['Diamonds', 'Needed Me', 'Kiss It Better']
###Markdown
Como o valor é uma lista, seus elementos podem ser acessados por iteração.
###Code
# Guarda a lista de músicas que está contida no dicionário com a chave 'Rihanna'
# na variável musicas_rihanna
musicas_rihanna = autores_musicas['Rihanna']
# Itera sobre os indices da lista
for indice in range(len(musicas_rihanna)):
# Imprime os elementos da lista que tem como chave 'Rihanna'
print musicas_rihanna[indice]
###Output
Diamonds
Needed Me
Kiss It Better
###Markdown
Métodos importantes para iterar sobre dicionáriosNativamente, o Python fornece métodos para iterar sobre os dicionários, sendo eles listados na tabela abaixo:Método | O que faz-------------|-----------------items() | retorna uma lista de tuplas com as chaves e os valores keys() | retorna uma lista contendo somente as chavesvalues() | retorna uma lista contendo somente os valores Mostrando na prática:
###Code
# Declara o dicionário glossario_computacao
glossario_computacao = {'P1': 'Programacao 1', 'LP1': 'Laboratorio de Programacao 1'}
# Imprime o retorno dos métodos para o dicionário glossario_computacao
print "Método items() =>", glossario_computacao.items()
print "Método keys() =>", glossario_computacao.keys()
print "Método values() =>", glossario_computacao.values()
###Output
Método items() => [('P1', 'Programacao 1'), ('LP1', 'Laboratorio de Programacao 1')]
Método keys() => ['P1', 'LP1']
Método values() => ['Programacao 1', 'Laboratorio de Programacao 1']
###Markdown
Agora utilizando o *for* para iterar sobre as coleções retornadas pelos métodos.
###Code
# Para iterar sobre a lista de tuplas chave-valor gerada pelo método items()
# são necessárias duas variáveis de controle
for sigla, significado in glossario_computacao.items():
print "Chave:", sigla, "=>", "Valor:", significado
###Output
Chave: P1 => Valor: Programacao 1
Chave: LP1 => Valor: Laboratorio de Programacao 1
###Markdown
Como o retorno do método keys é uma __lista__, é possível iterar da mesma maneira.
###Code
# Por elemento
for sigla in glossario_computacao.keys():
print sigla
# Por índice
lista_chaves = glossario_computacao.keys()
for indice in range(len(lista_chaves)):
print lista_chaves[indice]
###Output
P1
LP1
###Markdown
Por último, o método *values()*.
###Code
# Por elemento
for significado in glossario_computacao.values():
print significado
# Por índice
lista_valores = glossario_computacao.values()
for indice in range(len(lista_valores)):
print lista_valores[indice]
###Output
Programacao 1
Laboratorio de Programacao 1
|
V6/1_movie_lens/3_Movies_SQL_Solutions.ipynb | ###Markdown
Questions for the MovieLens Dataset Setup SQL
###Code
%load_ext sql
%config SqlMagic.autocommit=False # avoiding the error: FAILED: IllegalStateException COMMIT is not supported yet.
%sql hive://hadoop@localhost:10000/movielens
###Output
_____no_output_____
###Markdown
Playground How many movies do we have?
###Code
%time %sql SELECT count(*) FROM movies
###Output
* hive://hadoop@localhost:10000/movielens
Done.
CPU times: user 8.68 ms, sys: 4.67 ms, total: 13.4 ms
Wall time: 22.8 s
###Markdown
How many ratings do we have?
###Code
%time %sql SELECT count(*) FROM ratings
###Output
* hive://hadoop@localhost:10000/movielens
Done.
CPU times: user 5.34 ms, sys: 5.57 ms, total: 10.9 ms
Wall time: 30.2 s
###Markdown
How many users do we have?
###Code
%time %sql SELECT COUNT(DISTINCT(userid)) FROM ratings
###Output
* hive://hadoop@localhost:10000/movielens
Done.
CPU times: user 5.09 ms, sys: 5.37 ms, total: 10.5 ms
Wall time: 29.8 s
###Markdown
Which movie(s) has (have) the most number of genres?
###Code
%time %sql select title, year, genres, size(genres) as num_gen from movies order by num_gen desc limit 2
###Output
* hive://hadoop@localhost:10000/movielens
Done.
CPU times: user 10.3 ms, sys: 0 ns, total: 10.3 ms
Wall time: 26.5 s
###Markdown
Show all movies with terminator in the title
###Code
%time %sql select movieid, title, year from movies where lower(title) like '%terminator%'
###Output
* hive://hadoop@localhost:10000/movielens
Done.
CPU times: user 9.33 ms, sys: 1.27 ms, total: 10.6 ms
Wall time: 1.77 s
###Markdown
How many movies do we have from 1984?
###Code
%time %sql select count(*) from movies where year = 1984
###Output
* hive://hadoop@localhost:10000/movielens
Done.
CPU times: user 10.3 ms, sys: 0 ns, total: 10.3 ms
Wall time: 25.7 s
###Markdown
Show the distribution of movies per year (where year >= 2000), sorted by year
###Code
%time %sql select year, count(title) from movies where year >= 2000 group by year order by year asc
###Output
* hive://hadoop@localhost:10000/movielens
Done.
CPU times: user 11.1 ms, sys: 1.45 ms, total: 12.5 ms
Wall time: 50 s
###Markdown
Movies with the most number of ratings
###Code
%time %sql select title, year, num_rating, median_rating from movie_rating order by num_rating DESC limit 10
###Output
* hive://hadoop@localhost:10000/movielens
Done.
CPU times: user 9.62 ms, sys: 1.08 ms, total: 10.7 ms
Wall time: 27.5 s
###Markdown
Top ten best rated movies (by median) where we have at least 100 ratings for a movie
###Code
%%time
%%sql
select title, year, num_rating, median_rating
from movie_rating
where num_rating > 100
order by median_rating DESC, num_rating DESC
limit 10
###Output
* hive://hadoop@localhost:10000/movielens
Done.
CPU times: user 1.94 ms, sys: 8.78 ms, total: 10.7 ms
Wall time: 24.4 s
###Markdown
Top ten worst rated movies (by median) where we have at least 100 ratings for a movie
###Code
%%time
%%sql
select title, year, num_rating, median_rating
from movie_rating
where
num_rating is not null
and num_rating > 100
order by median_rating ASC, num_rating DESC
limit 10
###Output
* hive://hadoop@localhost:10000/movielens
Done.
CPU times: user 10 ms, sys: 0 ns, total: 10 ms
Wall time: 27.2 s
###Markdown
Which genres were used how often?
###Code
%%time
%%sql
SELECT genre, COUNT(genre) AS cnt FROM (
SELECT EXPLODE(genres) genre FROM movies
)t
GROUP BY genre
ORDER BY cnt DESC
###Output
* hive://hadoop@localhost:10000/movielens
Done.
CPU times: user 12.6 ms, sys: 0 ns, total: 12.6 ms
Wall time: 53.1 s
###Markdown
Naïve Movie Recommender Step 1 - find two movies (the `movieid` you like a lot) --> 4011 == Snatch --> 1270 == Back to the Future
###Code
%time %sql select movieid, title, year from movies where lower(title) like '%snatch%'
%time %sql select movieid, title, year from movies where lower(title) like '%back to the%'
###Output
* hive://hadoop@localhost:10000/movielens
Done.
CPU times: user 9.08 ms, sys: 0 ns, total: 9.08 ms
Wall time: 267 ms
###Markdown
Find people who liked these movies as well and save it into temp table
###Code
%%time
%%sql
CREATE TEMPORARY TABLE similar_people as
select distinct(userid) userid
from ratings
where (movieid = 4011 or movieid = 1270) and rating = 5
###Output
* hive://hadoop@localhost:10000/movielens
Done.
CPU times: user 8.23 ms, sys: 423 µs, total: 8.65 ms
Wall time: 33.1 s
###Markdown
Basic checks for `similar_people`
###Code
%time %sql select * from similar_people limit 2
%time %sql select count(*) from similar_people
###Output
* hive://hadoop@localhost:10000/movielens
Done.
CPU times: user 2.33 ms, sys: 5.81 ms, total: 8.14 ms
Wall time: 124 ms
###Markdown
Join `similar_people` with `movies` and `ratings` and get movie recommendations
###Code
%%time
%%sql
SELECT m.title, count(*) as five_star_count from ratings r
INNER JOIN similar_people sp ON r.userid = sp.userid
INNER JOIN movies m ON r.movieid = m.movieid
WHERE rating = 5
GROUP BY m.title
ORDER BY five_star_count DESC
LIMIT 20
###Output
* hive://hadoop@localhost:10000/movielens
Done.
CPU times: user 6.27 ms, sys: 7.87 ms, total: 14.1 ms
Wall time: 1min 3s
|
Studying Materials/Course 3 Classification/Week 5 Boosting Classifiers/module-8-boosting-assignment-2-blank.ipynb | ###Markdown
Boosting a decision stumpThe goal of this notebook is to implement your own boosting module.**Brace yourselves**! This is going to be a fun and challenging assignment.* Use SFrames to do some feature engineering.* Modify the decision trees to incorporate weights.* Implement Adaboost ensembling.* Use your implementation of Adaboost to train a boosted decision stump ensemble.* Evaluate the effect of boosting (adding more decision stumps) on performance of the model.* Explore the robustness of Adaboost to overfitting.Let's get started! Fire up GraphLab Create Make sure you have the latest version of GraphLab Create **(1.8.3 or newer)**. Upgrade by``` pip install graphlab-create --upgrade```See [this page](https://dato.com/download/) for detailed instructions on upgrading.
###Code
import graphlab
import matplotlib.pyplot as plt
%matplotlib inline
###Output
_____no_output_____
###Markdown
Getting the data ready We will be using the same [LendingClub](https://www.lendingclub.com/) dataset as in the previous assignment.
###Code
loans = graphlab.SFrame('lending-club-data.gl/')
###Output
This non-commercial license of GraphLab Create for academic use is assigned to [email protected] and will expire on August 11, 2018.
###Markdown
Extracting the target and the feature columnsWe will now repeat some of the feature processing steps that we saw in the previous assignment:First, we re-assign the target to have +1 as a safe (good) loan, and -1 as a risky (bad) loan.Next, we select four categorical features: 1. grade of the loan 2. the length of the loan term3. the home ownership status: own, mortgage, rent4. number of years of employment.
###Code
features = ['grade', # grade of the loan
'term', # the term of the loan
'home_ownership', # home ownership status: own, mortgage or rent
'emp_length', # number of years of employment
]
loans['safe_loans'] = loans['bad_loans'].apply(lambda x : +1 if x==0 else -1)
loans.remove_column('bad_loans')
target = 'safe_loans'
loans = loans[features + [target]]
###Output
_____no_output_____
###Markdown
Subsample dataset to make sure classes are balanced Just as we did in the previous assignment, we will undersample the larger class (safe loans) in order to balance out our dataset. This means we are throwing away many data points. We use `seed=1` so everyone gets the same results.
###Code
safe_loans_raw = loans[loans[target] == 1]
risky_loans_raw = loans[loans[target] == -1]
# Undersample the safe loans.
percentage = len(risky_loans_raw)/float(len(safe_loans_raw))
risky_loans = risky_loans_raw
safe_loans = safe_loans_raw.sample(percentage, seed=1)
loans_data = risky_loans_raw.append(safe_loans)
print "Percentage of safe loans :", len(safe_loans) / float(len(loans_data))
print "Percentage of risky loans :", len(risky_loans) / float(len(loans_data))
print "Total number of loans in our new dataset :", len(loans_data)
###Output
Percentage of safe loans : 0.502236174422
Percentage of risky loans : 0.497763825578
Total number of loans in our new dataset : 46508
###Markdown
**Note:** There are many approaches for dealing with imbalanced data, including some where we modify the learning algorithm. These approaches are beyond the scope of this course, but some of them are reviewed in this [paper](http://ieeexplore.ieee.org/xpl/login.jsp?tp=&arnumber=5128907&url=http%3A%2F%2Fieeexplore.ieee.org%2Fiel5%2F69%2F5173046%2F05128907.pdf%3Farnumber%3D5128907 ). For this assignment, we use the simplest possible approach, where we subsample the overly represented class to get a more balanced dataset. In general, and especially when the data is highly imbalanced, we recommend using more advanced methods. Transform categorical data into binary features In this assignment, we will work with **binary decision trees**. Since all of our features are currently categorical features, we want to turn them into binary features using 1-hot encoding. We can do so with the following code block (see the first assignments for more details):
###Code
loans_data = risky_loans.append(safe_loans)
for feature in features:
loans_data_one_hot_encoded = loans_data[feature].apply(lambda x: {x: 1})
loans_data_unpacked = loans_data_one_hot_encoded.unpack(column_name_prefix=feature)
# Change None's to 0's
for column in loans_data_unpacked.column_names():
loans_data_unpacked[column] = loans_data_unpacked[column].fillna(0)
loans_data.remove_column(feature)
loans_data.add_columns(loans_data_unpacked)
###Output
_____no_output_____
###Markdown
Let's see what the feature columns look like now:
###Code
features = loans_data.column_names()
features.remove('safe_loans') # Remove the response variable
features
###Output
_____no_output_____
###Markdown
Train-test splitWe split the data into training and test sets with 80% of the data in the training set and 20% of the data in the test set. We use `seed=1` so that everyone gets the same result.
###Code
train_data, test_data = loans_data.random_split(0.8, seed=1)
###Output
_____no_output_____
###Markdown
Weighted decision trees Let's modify our decision tree code from Module 5 to support weighting of individual data points. Weighted error definitionConsider a model with $N$ data points with:* Predictions $\hat{y}_1 ... \hat{y}_n$ * Target $y_1 ... y_n$ * Data point weights $\alpha_1 ... \alpha_n$.Then the **weighted error** is defined by:$$\mathrm{E}(\mathbf{\alpha}, \mathbf{\hat{y}}) = \frac{\sum_{i=1}^{n} \alpha_i \times 1[y_i \neq \hat{y_i}]}{\sum_{i=1}^{n} \alpha_i}$$where $1[y_i \neq \hat{y_i}]$ is an indicator function that is set to $1$ if $y_i \neq \hat{y_i}$. Write a function to compute weight of mistakesWrite a function that calculates the weight of mistakes for making the "weighted-majority" predictions for a dataset. The function accepts two inputs:* `labels_in_node`: Targets $y_1 ... y_n$ * `data_weights`: Data point weights $\alpha_1 ... \alpha_n$We are interested in computing the (total) weight of mistakes, i.e.$$\mathrm{WM}(\mathbf{\alpha}, \mathbf{\hat{y}}) = \sum_{i=1}^{n} \alpha_i \times 1[y_i \neq \hat{y_i}].$$This quantity is analogous to the number of mistakes, except that each mistake now carries different weight. It is related to the weighted error in the following way:$$\mathrm{E}(\mathbf{\alpha}, \mathbf{\hat{y}}) = \frac{\mathrm{WM}(\mathbf{\alpha}, \mathbf{\hat{y}})}{\sum_{i=1}^{n} \alpha_i}$$The function **intermediate_node_weighted_mistakes** should first compute two weights: * $\mathrm{WM}_{-1}$: weight of mistakes when all predictions are $\hat{y}_i = -1$ i.e $\mathrm{WM}(\mathbf{\alpha}, \mathbf{-1}$) * $\mathrm{WM}_{+1}$: weight of mistakes when all predictions are $\hat{y}_i = +1$ i.e $\mbox{WM}(\mathbf{\alpha}, \mathbf{+1}$) where $\mathbf{-1}$ and $\mathbf{+1}$ are vectors where all values are -1 and +1 respectively. After computing $\mathrm{WM}_{-1}$ and $\mathrm{WM}_{+1}$, the function **intermediate_node_weighted_mistakes** should return the lower of the two weights of mistakes, along with the class associated with that weight. We have provided a skeleton for you with `YOUR CODE HERE` to be filled in several places.
###Code
def intermediate_node_weighted_mistakes(labels_in_node, data_weights):
# Sum the weights of all entries with label +1
total_weight_positive = sum(data_weights[labels_in_node == 1])
# Weight of mistakes for predicting all -1's is equal to the sum above
### YOUR CODE HERE
weighted_mistakes_all_negative = total_weight_positive
# Sum the weights of all entries with label -1
### YOUR CODE HERE
total_weight_negative = sum(data_weights[labels_in_node == -1])
# Weight of mistakes for predicting all +1's is equal to the sum above
### YOUR CODE HERE
weighted_mistakes_all_positive = total_weight_negative
# Return the tuple (weight, class_label) representing the lower of the two weights
# class_label should be an integer of value +1 or -1.
# If the two weights are identical, return (weighted_mistakes_all_positive,+1)
### YOUR CODE HERE
if weighted_mistakes_all_negative == weighted_mistakes_all_positive:
return (weighted_mistakes_all_positive, 1)
elif weighted_mistakes_all_negative < weighted_mistakes_all_positive:
return (weighted_mistakes_all_negative, -1)
else:
return (weighted_mistakes_all_positive, 1)
###Output
_____no_output_____
###Markdown
**Checkpoint:** Test your **intermediate_node_weighted_mistakes** function, run the following cell:
###Code
example_labels = graphlab.SArray([-1, -1, 1, 1, 1])
example_data_weights = graphlab.SArray([1., 2., .5, 1., 1.])
if intermediate_node_weighted_mistakes(example_labels, example_data_weights) == (2.5, -1):
print 'Test passed!'
else:
print 'Test failed... try again!'
###Output
Test passed!
###Markdown
Recall that the **classification error** is defined as follows:$$\mbox{classification error} = \frac{\mbox{ mistakes}}{\mbox{ all data points}}$$**Quiz Question:** If we set the weights $\mathbf{\alpha} = 1$ for all data points, how is the weight of mistakes $\mbox{WM}(\mathbf{\alpha}, \mathbf{\hat{y}})$ related to the `classification error`? If the weight of all data points is 1, the weight of mistakes divided by the number of data points will be equal to the classification error. Function to pick best feature to split on We continue modifying our decision tree code from the earlier assignment to incorporate weighting of individual data points. The next step is to pick the best feature to split on.The **best_splitting_feature** function is similar to the one from the earlier assignment with two minor modifications: 1. The function **best_splitting_feature** should now accept an extra parameter `data_weights` to take account of weights of data points. 2. Instead of computing the number of mistakes in the left and right side of the split, we compute the weight of mistakes for both sides, add up the two weights, and divide it by the total weight of the data. Complete the following function. Comments starting with `DIFFERENT HERE` mark the sections where the weighted version differs from the original implementation.
###Code
def best_splitting_feature(data, features, target, data_weights):
# These variables will keep track of the best feature and the corresponding error
best_feature = None
best_error = float('+inf')
num_points = float(len(data))
# Loop through each feature to consider splitting on that feature
for feature in features:
# The left split will have all data points where the feature value is 0
# The right split will have all data points where the feature value is 1
left_split = data[data[feature] == 0]
right_split = data[data[feature] == 1]
# Apply the same filtering to data_weights to create left_data_weights, right_data_weights
## YOUR CODE HERE
left_data_weights = data_weights[data[feature] == 0]
right_data_weights = data_weights[data[feature] == 1]
# DIFFERENT HERE
# Calculate the weight of mistakes for left and right sides
## YOUR CODE HERE
left_weighted_mistakes, left_class = intermediate_node_weighted_mistakes(left_split[target], left_data_weights)
right_weighted_mistakes, right_class = intermediate_node_weighted_mistakes(right_split[target], right_data_weights)
# DIFFERENT HERE
# Compute weighted error by computing
# ( [weight of mistakes (left)] + [weight of mistakes (right)] ) / [total weight of all data points]
## YOUR CODE HERE
error = (left_weighted_mistakes + right_weighted_mistakes) / float(sum(left_data_weights) + sum(right_data_weights))
# If this is the best error we have found so far, store the feature and the error
if error < best_error:
best_feature = feature
best_error = error
# Return the best feature we found
return best_feature
###Output
_____no_output_____
###Markdown
**Checkpoint:** Now, we have another checkpoint to make sure you are on the right track.
###Code
example_data_weights = graphlab.SArray(len(train_data)* [1.5])
if best_splitting_feature(train_data, features, target, example_data_weights) == 'term. 36 months':
print 'Test passed!'
else:
print 'Test failed... try again!'
###Output
Test passed!
###Markdown
**Note**. If you get an exception in the line of "the logical filter has different size than the array", try upgradting your GraphLab Create installation to 1.8.3 or newer. **Very Optional**. Relationship between weighted error and weight of mistakesBy definition, the weighted error is the weight of mistakes divided by the weight of all data points, so$$\mathrm{E}(\mathbf{\alpha}, \mathbf{\hat{y}}) = \frac{\sum_{i=1}^{n} \alpha_i \times 1[y_i \neq \hat{y_i}]}{\sum_{i=1}^{n} \alpha_i} = \frac{\mathrm{WM}(\mathbf{\alpha}, \mathbf{\hat{y}})}{\sum_{i=1}^{n} \alpha_i}.$$In the code above, we obtain $\mathrm{E}(\mathbf{\alpha}, \mathbf{\hat{y}})$ from the two weights of mistakes from both sides, $\mathrm{WM}(\mathbf{\alpha}_{\mathrm{left}}, \mathbf{\hat{y}}_{\mathrm{left}})$ and $\mathrm{WM}(\mathbf{\alpha}_{\mathrm{right}}, \mathbf{\hat{y}}_{\mathrm{right}})$. First, notice that the overall weight of mistakes $\mathrm{WM}(\mathbf{\alpha}, \mathbf{\hat{y}})$ can be broken into two weights of mistakes over either side of the split:$$\mathrm{WM}(\mathbf{\alpha}, \mathbf{\hat{y}})= \sum_{i=1}^{n} \alpha_i \times 1[y_i \neq \hat{y_i}]= \sum_{\mathrm{left}} \alpha_i \times 1[y_i \neq \hat{y_i}] + \sum_{\mathrm{right}} \alpha_i \times 1[y_i \neq \hat{y_i}]\\= \mathrm{WM}(\mathbf{\alpha}_{\mathrm{left}}, \mathbf{\hat{y}}_{\mathrm{left}}) + \mathrm{WM}(\mathbf{\alpha}_{\mathrm{right}}, \mathbf{\hat{y}}_{\mathrm{right}})$$We then divide through by the total weight of all data points to obtain $\mathrm{E}(\mathbf{\alpha}, \mathbf{\hat{y}})$:$$\mathrm{E}(\mathbf{\alpha}, \mathbf{\hat{y}})= \frac{\mathrm{WM}(\mathbf{\alpha}_{\mathrm{left}}, \mathbf{\hat{y}}_{\mathrm{left}}) + \mathrm{WM}(\mathbf{\alpha}_{\mathrm{right}}, \mathbf{\hat{y}}_{\mathrm{right}})}{\sum_{i=1}^{n} \alpha_i}$$ Building the treeWith the above functions implemented correctly, we are now ready to build our decision tree. Recall from the previous assignments that each node in the decision tree is represented as a dictionary which contains the following keys: { 'is_leaf' : True/False. 'prediction' : Prediction at the leaf node. 'left' : (dictionary corresponding to the left tree). 'right' : (dictionary corresponding to the right tree). 'features_remaining' : List of features that are posible splits. } Let us start with a function that creates a leaf node given a set of target values:
###Code
def create_leaf(target_values, data_weights):
# Create a leaf node
leaf = {'splitting_feature' : None,
'is_leaf': True}
# Computed weight of mistakes.
weighted_error, best_class = intermediate_node_weighted_mistakes(target_values, data_weights)
# Store the predicted class (1 or -1) in leaf['prediction']
leaf['prediction'] = best_class
return leaf
###Output
_____no_output_____
###Markdown
We provide a function that learns a weighted decision tree recursively and implements 3 stopping conditions:1. All data points in a node are from the same class.2. No more features to split on.3. Stop growing the tree when the tree depth reaches **max_depth**.
###Code
def weighted_decision_tree_create(data, features, target, data_weights, current_depth = 1, max_depth = 10):
remaining_features = features[:] # Make a copy of the features.
target_values = data[target]
print "--------------------------------------------------------------------"
print "Subtree, depth = %s (%s data points)." % (current_depth, len(target_values))
# Stopping condition 1. Error is 0.
if intermediate_node_weighted_mistakes(target_values, data_weights)[0] <= 1e-15:
print "Stopping condition 1 reached."
return create_leaf(target_values, data_weights)
# Stopping condition 2. No more features.
if remaining_features == []:
print "Stopping condition 2 reached."
return create_leaf(target_values, data_weights)
# Additional stopping condition (limit tree depth)
if current_depth > max_depth:
print "Reached maximum depth. Stopping for now."
return create_leaf(target_values, data_weights)
splitting_feature = best_splitting_feature(data, features, target, data_weights)
remaining_features.remove(splitting_feature)
left_split = data[data[splitting_feature] == 0]
right_split = data[data[splitting_feature] == 1]
left_data_weights = data_weights[data[splitting_feature] == 0]
right_data_weights = data_weights[data[splitting_feature] == 1]
print "Split on feature %s. (%s, %s)" % (\
splitting_feature, len(left_split), len(right_split))
# Create a leaf node if the split is "perfect"
if len(left_split) == len(data):
print "Creating leaf node."
return create_leaf(left_split[target], data_weights)
if len(right_split) == len(data):
print "Creating leaf node."
return create_leaf(right_split[target], data_weights)
# Repeat (recurse) on left and right subtrees
left_tree = weighted_decision_tree_create(
left_split, remaining_features, target, left_data_weights, current_depth + 1, max_depth)
right_tree = weighted_decision_tree_create(
right_split, remaining_features, target, right_data_weights, current_depth + 1, max_depth)
return {'is_leaf' : False,
'prediction' : None,
'splitting_feature': splitting_feature,
'left' : left_tree,
'right' : right_tree}
###Output
_____no_output_____
###Markdown
Here is a recursive function to count the nodes in your tree:
###Code
def count_nodes(tree):
if tree['is_leaf']:
return 1
return 1 + count_nodes(tree['left']) + count_nodes(tree['right'])
###Output
_____no_output_____
###Markdown
Run the following test code to check your implementation. Make sure you get **'Test passed'** before proceeding.
###Code
example_data_weights = graphlab.SArray([1.0 for i in range(len(train_data))])
small_data_decision_tree = weighted_decision_tree_create(train_data, features, target,
example_data_weights, max_depth=2)
if count_nodes(small_data_decision_tree) == 7:
print 'Test passed!'
else:
print 'Test failed... try again!'
print 'Number of nodes found:', count_nodes(small_data_decision_tree)
print 'Number of nodes that should be there: 7'
###Output
--------------------------------------------------------------------
Subtree, depth = 1 (37224 data points).
Split on feature term. 36 months. (9223, 28001)
--------------------------------------------------------------------
Subtree, depth = 2 (9223 data points).
Split on feature grade.A. (9122, 101)
--------------------------------------------------------------------
Subtree, depth = 3 (9122 data points).
Reached maximum depth. Stopping for now.
--------------------------------------------------------------------
Subtree, depth = 3 (101 data points).
Reached maximum depth. Stopping for now.
--------------------------------------------------------------------
Subtree, depth = 2 (28001 data points).
Split on feature grade.D. (23300, 4701)
--------------------------------------------------------------------
Subtree, depth = 3 (23300 data points).
Reached maximum depth. Stopping for now.
--------------------------------------------------------------------
Subtree, depth = 3 (4701 data points).
Reached maximum depth. Stopping for now.
Test passed!
###Markdown
Let us take a quick look at what the trained tree is like. You should get something that looks like the following```{'is_leaf': False, 'left': {'is_leaf': False, 'left': {'is_leaf': True, 'prediction': -1, 'splitting_feature': None}, 'prediction': None, 'right': {'is_leaf': True, 'prediction': 1, 'splitting_feature': None}, 'splitting_feature': 'grade.A' }, 'prediction': None, 'right': {'is_leaf': False, 'left': {'is_leaf': True, 'prediction': 1, 'splitting_feature': None}, 'prediction': None, 'right': {'is_leaf': True, 'prediction': -1, 'splitting_feature': None}, 'splitting_feature': 'grade.D' }, 'splitting_feature': 'term. 36 months'}```
###Code
small_data_decision_tree
###Output
_____no_output_____
###Markdown
Making predictions with a weighted decision tree We give you a function that classifies one data point. It can also return the probability if you want to play around with that as well.
###Code
def classify(tree, x, annotate = False):
# If the node is a leaf node.
if tree['is_leaf']:
if annotate:
print "At leaf, predicting %s" % tree['prediction']
return tree['prediction']
else:
# Split on feature.
split_feature_value = x[tree['splitting_feature']]
if annotate:
print "Split on %s = %s" % (tree['splitting_feature'], split_feature_value)
if split_feature_value == 0:
return classify(tree['left'], x, annotate)
else:
return classify(tree['right'], x, annotate)
###Output
_____no_output_____
###Markdown
Evaluating the treeNow, we will write a function to evaluate a decision tree by computing the classification error of the tree on the given dataset.Again, recall that the **classification error** is defined as follows:$$\mbox{classification error} = \frac{\mbox{ mistakes}}{\mbox{ all data points}}$$The function called **evaluate_classification_error** takes in as input:1. `tree` (as described above)2. `data` (an SFrame)The function does not change because of adding data point weights.
###Code
def evaluate_classification_error(tree, data):
# Apply the classify(tree, x) to each row in your data
prediction = data.apply(lambda x: classify(tree, x))
# Once you've made the predictions, calculate the classification error
return (prediction != data[target]).sum() / float(len(data))
evaluate_classification_error(small_data_decision_tree, test_data)
###Output
_____no_output_____
###Markdown
Example: Training a weighted decision treeTo build intuition on how weighted data points affect the tree being built, consider the following:Suppose we only care about making good predictions for the **first 10 and last 10 items** in `train_data`, we assign weights:* 1 to the last 10 items * 1 to the first 10 items * and 0 to the rest. Let us fit a weighted decision tree with `max_depth = 2`.
###Code
# Assign weights
example_data_weights = graphlab.SArray([1.] * 10 + [0.]*(len(train_data) - 20) + [1.] * 10)
# Train a weighted decision tree model.
small_data_decision_tree_subset_20 = weighted_decision_tree_create(train_data, features, target,
example_data_weights, max_depth=2)
###Output
--------------------------------------------------------------------
Subtree, depth = 1 (37224 data points).
Split on feature home_ownership.RENT. (20514, 16710)
--------------------------------------------------------------------
Subtree, depth = 2 (20514 data points).
Split on feature grade.F. (19613, 901)
--------------------------------------------------------------------
Subtree, depth = 3 (19613 data points).
Reached maximum depth. Stopping for now.
--------------------------------------------------------------------
Subtree, depth = 3 (901 data points).
Stopping condition 1 reached.
--------------------------------------------------------------------
Subtree, depth = 2 (16710 data points).
Split on feature grade.D. (13315, 3395)
--------------------------------------------------------------------
Subtree, depth = 3 (13315 data points).
Stopping condition 1 reached.
--------------------------------------------------------------------
Subtree, depth = 3 (3395 data points).
Stopping condition 1 reached.
###Markdown
Now, we will compute the classification error on the `subset_20`, i.e. the subset of data points whose weight is 1 (namely the first and last 10 data points).
###Code
subset_20 = train_data.head(10).append(train_data.tail(10))
evaluate_classification_error(small_data_decision_tree_subset_20, subset_20)
###Output
_____no_output_____
###Markdown
Now, let us compare the classification error of the model `small_data_decision_tree_subset_20` on the entire test set `train_data`:
###Code
evaluate_classification_error(small_data_decision_tree_subset_20, train_data)
###Output
_____no_output_____
###Markdown
The model `small_data_decision_tree_subset_20` performs **a lot** better on `subset_20` than on `train_data`.So, what does this mean?* The points with higher weights are the ones that are more important during the training process of the weighted decision tree.* The points with zero weights are basically ignored during training.**Quiz Question**: Will you get the same model as `small_data_decision_tree_subset_20` if you trained a decision tree with only the 20 data points with non-zero weights from the set of points in `subset_20`? Yes, because the data points with zero weights are ignored during training, using only the 20 data points with non-zero weights would be equivalent to using the entire training data and setting the weight of all but 20 data points equal to 0.
###Code
decision_tree_subset_20 = weighted_decision_tree_create(train_data, features, target,
example_data_weights, max_depth=6)
evaluate_classification_error(decision_tree_subset_20, subset_20)
evaluate_classification_error(decision_tree_subset_20, train_data)
###Output
_____no_output_____
###Markdown
Implementing your own Adaboost (on decision stumps) Now that we have a weighted decision tree working, it takes only a bit of work to implement Adaboost. For the sake of simplicity, let us stick with **decision tree stumps** by training trees with **`max_depth=1`**. Recall from the lecture the procedure for Adaboost:1\. Start with unweighted data with $\alpha_j = 1$2\. For t = 1,...T: * Learn $f_t(x)$ with data weights $\alpha_j$ * Compute coefficient $\hat{w}_t$: $$\hat{w}_t = \frac{1}{2}\ln{\left(\frac{1- \mbox{E}(\mathbf{\alpha}, \mathbf{\hat{y}})}{\mbox{E}(\mathbf{\alpha}, \mathbf{\hat{y}})}\right)}$$ * Re-compute weights $\alpha_j$: $$\alpha_j \gets \begin{cases} \alpha_j \exp{(-\hat{w}_t)} & \text{ if }f_t(x_j) = y_j\\ \alpha_j \exp{(\hat{w}_t)} & \text{ if }f_t(x_j) \neq y_j \end{cases}$$ * Normalize weights $\alpha_j$: $$\alpha_j \gets \frac{\alpha_j}{\sum_{i=1}^{N}{\alpha_i}} $$ Complete the skeleton for the following code to implement **adaboost_with_tree_stumps**. Fill in the places with `YOUR CODE HERE`.
###Code
from math import log
from math import exp
def adaboost_with_tree_stumps(data, features, target, num_tree_stumps):
# start with unweighted data
alpha = graphlab.SArray([1.]*len(data))
weights = []
tree_stumps = []
target_values = data[target]
for t in xrange(num_tree_stumps):
print '====================================================='
print 'Adaboost Iteration %d' % t
print '====================================================='
# Learn a weighted decision tree stump. Use max_depth=1
tree_stump = weighted_decision_tree_create(data, features, target, data_weights=alpha, max_depth=1)
tree_stumps.append(tree_stump)
# Make predictions
predictions = data.apply(lambda x: classify(tree_stump, x))
# Produce a Boolean array indicating whether
# each data point was correctly classified
is_correct = predictions == target_values
is_wrong = predictions != target_values
# Compute weighted error
# YOUR CODE HERE
weighted_error = sum(alpha * is_wrong) / (sum(alpha * is_correct) + sum(alpha *is_wrong))
# Compute model coefficient using weighted error
# YOUR CODE HERE
weight = 0.5 * log( (1 - weighted_error) / float(weighted_error))
weights.append(weight)
# Adjust weights on data point
adjustment = is_correct.apply(lambda is_correct : exp(-weight) if is_correct else exp(weight))
# Scale alpha by multiplying by adjustment
# Then normalize data points weights
## YOUR CODE HERE
alpha = alpha * adjustment
alpha = alpha / sum(alpha)
return weights, tree_stumps
###Output
_____no_output_____
###Markdown
Checking your Adaboost codeTrain an ensemble of **two** tree stumps and see which features those stumps split on. We will run the algorithm with the following parameters:* `train_data`* `features`* `target`* `num_tree_stumps = 2`
###Code
stump_weights, tree_stumps = adaboost_with_tree_stumps(train_data, features, target, num_tree_stumps=2)
def print_stump(tree):
split_name = tree['splitting_feature'] # split_name is something like 'term. 36 months'
if split_name is None:
print "(leaf, label: %s)" % tree['prediction']
return None
split_feature, split_value = split_name.split('.')
print ' root'
print ' |---------------|----------------|'
print ' | |'
print ' | |'
print ' | |'
print ' [{0} == 0]{1}[{0} == 1] '.format(split_name, ' '*(27-len(split_name)))
print ' | |'
print ' | |'
print ' | |'
print ' (%s) (%s)' \
% (('leaf, label: ' + str(tree['left']['prediction']) if tree['left']['is_leaf'] else 'subtree'),
('leaf, label: ' + str(tree['right']['prediction']) if tree['right']['is_leaf'] else 'subtree'))
###Output
_____no_output_____
###Markdown
Here is what the first stump looks like:
###Code
print_stump(tree_stumps[0])
###Output
root
|---------------|----------------|
| |
| |
| |
[term. 36 months == 0] [term. 36 months == 1]
| |
| |
| |
(leaf, label: -1) (leaf, label: 1)
###Markdown
Here is what the next stump looks like:
###Code
print_stump(tree_stumps[1])
print stump_weights
###Output
[0.15802933659263743, 0.17682363293627254]
###Markdown
If your Adaboost is correctly implemented, the following things should be true:* `tree_stumps[0]` should split on **term. 36 months** with the prediction -1 on the left and +1 on the right.* `tree_stumps[1]` should split on **grade.A** with the prediction -1 on the left and +1 on the right.* Weights should be approximately `[0.158, 0.177]` **Reminders**- Stump weights ($\mathbf{\hat{w}}$) and data point weights ($\mathbf{\alpha}$) are two different concepts.- Stump weights ($\mathbf{\hat{w}}$) tell you how important each stump is while making predictions with the entire boosted ensemble.- Data point weights ($\mathbf{\alpha}$) tell you how important each data point is while training a decision stump. Training a boosted ensemble of 10 stumps Let us train an ensemble of 10 decision tree stumps with Adaboost. We run the **adaboost_with_tree_stumps** function with the following parameters:* `train_data`* `features`* `target`* `num_tree_stumps = 10`
###Code
stump_weights, tree_stumps = adaboost_with_tree_stumps(train_data, features,
target, num_tree_stumps=10)
###Output
=====================================================
Adaboost Iteration 0
=====================================================
--------------------------------------------------------------------
Subtree, depth = 1 (37224 data points).
Split on feature term. 36 months. (9223, 28001)
--------------------------------------------------------------------
Subtree, depth = 2 (9223 data points).
Reached maximum depth. Stopping for now.
--------------------------------------------------------------------
Subtree, depth = 2 (28001 data points).
Reached maximum depth. Stopping for now.
0.421636578551
=====================================================
Adaboost Iteration 1
=====================================================
--------------------------------------------------------------------
Subtree, depth = 1 (37224 data points).
Split on feature grade.A. (32094, 5130)
--------------------------------------------------------------------
Subtree, depth = 2 (32094 data points).
Reached maximum depth. Stopping for now.
--------------------------------------------------------------------
Subtree, depth = 2 (5130 data points).
Reached maximum depth. Stopping for now.
0.412498248915
=====================================================
Adaboost Iteration 2
=====================================================
--------------------------------------------------------------------
Subtree, depth = 1 (37224 data points).
Split on feature grade.D. (30465, 6759)
--------------------------------------------------------------------
Subtree, depth = 2 (30465 data points).
Reached maximum depth. Stopping for now.
--------------------------------------------------------------------
Subtree, depth = 2 (6759 data points).
Reached maximum depth. Stopping for now.
0.453574664308
=====================================================
Adaboost Iteration 3
=====================================================
--------------------------------------------------------------------
Subtree, depth = 1 (37224 data points).
Split on feature home_ownership.MORTGAGE. (19846, 17378)
--------------------------------------------------------------------
Subtree, depth = 2 (19846 data points).
Reached maximum depth. Stopping for now.
--------------------------------------------------------------------
Subtree, depth = 2 (17378 data points).
Reached maximum depth. Stopping for now.
0.463619975981
=====================================================
Adaboost Iteration 4
=====================================================
--------------------------------------------------------------------
Subtree, depth = 1 (37224 data points).
Split on feature grade.B. (26858, 10366)
--------------------------------------------------------------------
Subtree, depth = 2 (26858 data points).
Reached maximum depth. Stopping for now.
--------------------------------------------------------------------
Subtree, depth = 2 (10366 data points).
Reached maximum depth. Stopping for now.
0.466518644019
=====================================================
Adaboost Iteration 5
=====================================================
--------------------------------------------------------------------
Subtree, depth = 1 (37224 data points).
Split on feature grade.E. (33815, 3409)
--------------------------------------------------------------------
Subtree, depth = 2 (33815 data points).
Reached maximum depth. Stopping for now.
--------------------------------------------------------------------
Subtree, depth = 2 (3409 data points).
Reached maximum depth. Stopping for now.
0.467760207218
=====================================================
Adaboost Iteration 6
=====================================================
--------------------------------------------------------------------
Subtree, depth = 1 (37224 data points).
Split on feature grade.A. (32094, 5130)
--------------------------------------------------------------------
Subtree, depth = 2 (32094 data points).
Reached maximum depth. Stopping for now.
--------------------------------------------------------------------
Subtree, depth = 2 (5130 data points).
Reached maximum depth. Stopping for now.
0.472746758716
=====================================================
Adaboost Iteration 7
=====================================================
--------------------------------------------------------------------
Subtree, depth = 1 (37224 data points).
Split on feature grade.F. (35512, 1712)
--------------------------------------------------------------------
Subtree, depth = 2 (35512 data points).
Reached maximum depth. Stopping for now.
--------------------------------------------------------------------
Subtree, depth = 2 (1712 data points).
Reached maximum depth. Stopping for now.
0.478258250407
=====================================================
Adaboost Iteration 8
=====================================================
--------------------------------------------------------------------
Subtree, depth = 1 (37224 data points).
Split on feature grade.A. (32094, 5130)
--------------------------------------------------------------------
Subtree, depth = 2 (32094 data points).
Reached maximum depth. Stopping for now.
--------------------------------------------------------------------
Subtree, depth = 2 (5130 data points).
Reached maximum depth. Stopping for now.
0.485509702974
=====================================================
Adaboost Iteration 9
=====================================================
--------------------------------------------------------------------
Subtree, depth = 1 (37224 data points).
Split on feature emp_length.n/a. (35781, 1443)
--------------------------------------------------------------------
Subtree, depth = 2 (35781 data points).
Reached maximum depth. Stopping for now.
--------------------------------------------------------------------
Subtree, depth = 2 (1443 data points).
Reached maximum depth. Stopping for now.
0.487021661048
###Markdown
Making predictionsRecall from the lecture that in order to make predictions, we use the following formula:$$\hat{y} = sign\left(\sum_{t=1}^T \hat{w}_t f_t(x)\right)$$We need to do the following things:- Compute the predictions $f_t(x)$ using the $t$-th decision tree- Compute $\hat{w}_t f_t(x)$ by multiplying the `stump_weights` with the predictions $f_t(x)$ from the decision trees- Sum the weighted predictions over each stump in the ensemble.Complete the following skeleton for making predictions:
###Code
def predict_adaboost(stump_weights, tree_stumps, data):
scores = graphlab.SArray([0.]*len(data))
for i, tree_stump in enumerate(tree_stumps):
predictions = data.apply(lambda x: classify(tree_stump, x))
# Accumulate predictions on scores array
# YOUR CODE HERE
scores += predictions * stump_weights[i]
return scores.apply(lambda score : +1 if score > 0 else -1)
predictions = predict_adaboost(stump_weights, tree_stumps, test_data)
accuracy = graphlab.evaluation.accuracy(test_data[target], predictions)
print 'Accuracy of 10-component ensemble = %s' % accuracy
###Output
Accuracy of 10-component ensemble = 0.620314519604
###Markdown
Now, let us take a quick look what the `stump_weights` look like at the end of each iteration of the 10-stump ensemble:
###Code
stump_weights
###Output
_____no_output_____
###Markdown
**Quiz Question:** Are the weights monotonically decreasing, monotonically increasing, or neither?**Reminder**: Stump weights ($\mathbf{\hat{w}}$) tell you how important each stump is while making predictions with the entire boosted ensemble. The weights are neither all increasing or all decreasing.
###Code
import matplotlib.pyplot as plt
%matplotlib inline
plt.plot(range(10), stump_weights);
###Output
_____no_output_____
###Markdown
Performance plotsIn this section, we will try to reproduce some of the performance plots dicussed in the lecture. How does accuracy change with adding stumps to the ensemble?We will now train an ensemble with:* `train_data`* `features`* `target`* `num_tree_stumps = 30`Once we are done with this, we will then do the following:* Compute the classification error at the end of each iteration.* Plot a curve of classification error vs iteration.First, lets train the model.
###Code
# this may take a while...
stump_weights, tree_stumps = adaboost_with_tree_stumps(train_data,
features, target, num_tree_stumps=30)
###Output
=====================================================
Adaboost Iteration 0
=====================================================
--------------------------------------------------------------------
Subtree, depth = 1 (37224 data points).
Split on feature term. 36 months. (9223, 28001)
--------------------------------------------------------------------
Subtree, depth = 2 (9223 data points).
Reached maximum depth. Stopping for now.
--------------------------------------------------------------------
Subtree, depth = 2 (28001 data points).
Reached maximum depth. Stopping for now.
=====================================================
Adaboost Iteration 1
=====================================================
--------------------------------------------------------------------
Subtree, depth = 1 (37224 data points).
Split on feature grade.A. (32094, 5130)
--------------------------------------------------------------------
Subtree, depth = 2 (32094 data points).
Reached maximum depth. Stopping for now.
--------------------------------------------------------------------
Subtree, depth = 2 (5130 data points).
Reached maximum depth. Stopping for now.
=====================================================
Adaboost Iteration 2
=====================================================
--------------------------------------------------------------------
Subtree, depth = 1 (37224 data points).
Split on feature grade.D. (30465, 6759)
--------------------------------------------------------------------
Subtree, depth = 2 (30465 data points).
Reached maximum depth. Stopping for now.
--------------------------------------------------------------------
Subtree, depth = 2 (6759 data points).
Reached maximum depth. Stopping for now.
=====================================================
Adaboost Iteration 3
=====================================================
--------------------------------------------------------------------
Subtree, depth = 1 (37224 data points).
Split on feature home_ownership.MORTGAGE. (19846, 17378)
--------------------------------------------------------------------
Subtree, depth = 2 (19846 data points).
Reached maximum depth. Stopping for now.
--------------------------------------------------------------------
Subtree, depth = 2 (17378 data points).
Reached maximum depth. Stopping for now.
=====================================================
Adaboost Iteration 4
=====================================================
--------------------------------------------------------------------
Subtree, depth = 1 (37224 data points).
Split on feature grade.B. (26858, 10366)
--------------------------------------------------------------------
Subtree, depth = 2 (26858 data points).
Reached maximum depth. Stopping for now.
--------------------------------------------------------------------
Subtree, depth = 2 (10366 data points).
Reached maximum depth. Stopping for now.
=====================================================
Adaboost Iteration 5
=====================================================
--------------------------------------------------------------------
Subtree, depth = 1 (37224 data points).
Split on feature grade.E. (33815, 3409)
--------------------------------------------------------------------
Subtree, depth = 2 (33815 data points).
Reached maximum depth. Stopping for now.
--------------------------------------------------------------------
Subtree, depth = 2 (3409 data points).
Reached maximum depth. Stopping for now.
=====================================================
Adaboost Iteration 6
=====================================================
--------------------------------------------------------------------
Subtree, depth = 1 (37224 data points).
Split on feature grade.A. (32094, 5130)
--------------------------------------------------------------------
Subtree, depth = 2 (32094 data points).
Reached maximum depth. Stopping for now.
--------------------------------------------------------------------
Subtree, depth = 2 (5130 data points).
Reached maximum depth. Stopping for now.
=====================================================
Adaboost Iteration 7
=====================================================
--------------------------------------------------------------------
Subtree, depth = 1 (37224 data points).
Split on feature grade.F. (35512, 1712)
--------------------------------------------------------------------
Subtree, depth = 2 (35512 data points).
Reached maximum depth. Stopping for now.
--------------------------------------------------------------------
Subtree, depth = 2 (1712 data points).
Reached maximum depth. Stopping for now.
=====================================================
Adaboost Iteration 8
=====================================================
--------------------------------------------------------------------
Subtree, depth = 1 (37224 data points).
Split on feature grade.A. (32094, 5130)
--------------------------------------------------------------------
Subtree, depth = 2 (32094 data points).
Reached maximum depth. Stopping for now.
--------------------------------------------------------------------
Subtree, depth = 2 (5130 data points).
Reached maximum depth. Stopping for now.
=====================================================
Adaboost Iteration 9
=====================================================
--------------------------------------------------------------------
Subtree, depth = 1 (37224 data points).
Split on feature emp_length.n/a. (35781, 1443)
--------------------------------------------------------------------
Subtree, depth = 2 (35781 data points).
Reached maximum depth. Stopping for now.
--------------------------------------------------------------------
Subtree, depth = 2 (1443 data points).
Reached maximum depth. Stopping for now.
=====================================================
Adaboost Iteration 10
=====================================================
--------------------------------------------------------------------
Subtree, depth = 1 (37224 data points).
Split on feature grade.D. (30465, 6759)
--------------------------------------------------------------------
Subtree, depth = 2 (30465 data points).
Reached maximum depth. Stopping for now.
--------------------------------------------------------------------
Subtree, depth = 2 (6759 data points).
Reached maximum depth. Stopping for now.
=====================================================
Adaboost Iteration 11
=====================================================
--------------------------------------------------------------------
Subtree, depth = 1 (37224 data points).
Split on feature grade.B. (26858, 10366)
--------------------------------------------------------------------
Subtree, depth = 2 (26858 data points).
Reached maximum depth. Stopping for now.
--------------------------------------------------------------------
Subtree, depth = 2 (10366 data points).
Reached maximum depth. Stopping for now.
=====================================================
Adaboost Iteration 12
=====================================================
--------------------------------------------------------------------
Subtree, depth = 1 (37224 data points).
Split on feature emp_length.n/a. (35781, 1443)
--------------------------------------------------------------------
Subtree, depth = 2 (35781 data points).
Reached maximum depth. Stopping for now.
--------------------------------------------------------------------
Subtree, depth = 2 (1443 data points).
Reached maximum depth. Stopping for now.
=====================================================
Adaboost Iteration 13
=====================================================
--------------------------------------------------------------------
Subtree, depth = 1 (37224 data points).
Split on feature emp_length.4 years. (34593, 2631)
--------------------------------------------------------------------
Subtree, depth = 2 (34593 data points).
Reached maximum depth. Stopping for now.
--------------------------------------------------------------------
Subtree, depth = 2 (2631 data points).
Reached maximum depth. Stopping for now.
=====================================================
Adaboost Iteration 14
=====================================================
--------------------------------------------------------------------
Subtree, depth = 1 (37224 data points).
Split on feature emp_length.n/a. (35781, 1443)
--------------------------------------------------------------------
Subtree, depth = 2 (35781 data points).
Reached maximum depth. Stopping for now.
--------------------------------------------------------------------
Subtree, depth = 2 (1443 data points).
Reached maximum depth. Stopping for now.
=====================================================
Adaboost Iteration 15
=====================================================
--------------------------------------------------------------------
Subtree, depth = 1 (37224 data points).
Split on feature grade.C. (27812, 9412)
--------------------------------------------------------------------
Subtree, depth = 2 (27812 data points).
Reached maximum depth. Stopping for now.
--------------------------------------------------------------------
Subtree, depth = 2 (9412 data points).
Reached maximum depth. Stopping for now.
=====================================================
Adaboost Iteration 16
=====================================================
--------------------------------------------------------------------
Subtree, depth = 1 (37224 data points).
Split on feature grade.A. (32094, 5130)
--------------------------------------------------------------------
Subtree, depth = 2 (32094 data points).
Reached maximum depth. Stopping for now.
--------------------------------------------------------------------
Subtree, depth = 2 (5130 data points).
Reached maximum depth. Stopping for now.
=====================================================
Adaboost Iteration 17
=====================================================
--------------------------------------------------------------------
Subtree, depth = 1 (37224 data points).
Split on feature grade.F. (35512, 1712)
--------------------------------------------------------------------
Subtree, depth = 2 (35512 data points).
Reached maximum depth. Stopping for now.
--------------------------------------------------------------------
Subtree, depth = 2 (1712 data points).
Reached maximum depth. Stopping for now.
=====================================================
Adaboost Iteration 18
=====================================================
--------------------------------------------------------------------
Subtree, depth = 1 (37224 data points).
Split on feature term. 36 months. (9223, 28001)
--------------------------------------------------------------------
Subtree, depth = 2 (9223 data points).
Reached maximum depth. Stopping for now.
--------------------------------------------------------------------
Subtree, depth = 2 (28001 data points).
Reached maximum depth. Stopping for now.
=====================================================
Adaboost Iteration 19
=====================================================
--------------------------------------------------------------------
Subtree, depth = 1 (37224 data points).
Split on feature grade.B. (26858, 10366)
--------------------------------------------------------------------
Subtree, depth = 2 (26858 data points).
Reached maximum depth. Stopping for now.
--------------------------------------------------------------------
Subtree, depth = 2 (10366 data points).
Reached maximum depth. Stopping for now.
=====================================================
Adaboost Iteration 20
=====================================================
--------------------------------------------------------------------
Subtree, depth = 1 (37224 data points).
Split on feature emp_length.n/a. (35781, 1443)
--------------------------------------------------------------------
Subtree, depth = 2 (35781 data points).
Reached maximum depth. Stopping for now.
--------------------------------------------------------------------
Subtree, depth = 2 (1443 data points).
Reached maximum depth. Stopping for now.
=====================================================
Adaboost Iteration 21
=====================================================
--------------------------------------------------------------------
Subtree, depth = 1 (37224 data points).
Split on feature grade.D. (30465, 6759)
--------------------------------------------------------------------
Subtree, depth = 2 (30465 data points).
Reached maximum depth. Stopping for now.
--------------------------------------------------------------------
Subtree, depth = 2 (6759 data points).
Reached maximum depth. Stopping for now.
=====================================================
Adaboost Iteration 22
=====================================================
--------------------------------------------------------------------
Subtree, depth = 1 (37224 data points).
Split on feature grade.F. (35512, 1712)
--------------------------------------------------------------------
Subtree, depth = 2 (35512 data points).
Reached maximum depth. Stopping for now.
--------------------------------------------------------------------
Subtree, depth = 2 (1712 data points).
Reached maximum depth. Stopping for now.
=====================================================
Adaboost Iteration 23
=====================================================
--------------------------------------------------------------------
Subtree, depth = 1 (37224 data points).
Split on feature grade.A. (32094, 5130)
--------------------------------------------------------------------
Subtree, depth = 2 (32094 data points).
Reached maximum depth. Stopping for now.
--------------------------------------------------------------------
Subtree, depth = 2 (5130 data points).
Reached maximum depth. Stopping for now.
=====================================================
Adaboost Iteration 24
=====================================================
--------------------------------------------------------------------
Subtree, depth = 1 (37224 data points).
Split on feature emp_length.n/a. (35781, 1443)
--------------------------------------------------------------------
Subtree, depth = 2 (35781 data points).
Reached maximum depth. Stopping for now.
--------------------------------------------------------------------
Subtree, depth = 2 (1443 data points).
Reached maximum depth. Stopping for now.
=====================================================
Adaboost Iteration 25
=====================================================
--------------------------------------------------------------------
Subtree, depth = 1 (37224 data points).
Split on feature emp_length.2 years. (33652, 3572)
--------------------------------------------------------------------
Subtree, depth = 2 (33652 data points).
Reached maximum depth. Stopping for now.
--------------------------------------------------------------------
Subtree, depth = 2 (3572 data points).
Reached maximum depth. Stopping for now.
=====================================================
Adaboost Iteration 26
=====================================================
--------------------------------------------------------------------
Subtree, depth = 1 (37224 data points).
Split on feature grade.F. (35512, 1712)
--------------------------------------------------------------------
Subtree, depth = 2 (35512 data points).
Reached maximum depth. Stopping for now.
--------------------------------------------------------------------
Subtree, depth = 2 (1712 data points).
Reached maximum depth. Stopping for now.
=====================================================
Adaboost Iteration 27
=====================================================
--------------------------------------------------------------------
Subtree, depth = 1 (37224 data points).
Split on feature home_ownership.OWN. (34149, 3075)
--------------------------------------------------------------------
Subtree, depth = 2 (34149 data points).
Reached maximum depth. Stopping for now.
--------------------------------------------------------------------
Subtree, depth = 2 (3075 data points).
Reached maximum depth. Stopping for now.
=====================================================
Adaboost Iteration 28
=====================================================
--------------------------------------------------------------------
Subtree, depth = 1 (37224 data points).
Split on feature emp_length.n/a. (35781, 1443)
--------------------------------------------------------------------
Subtree, depth = 2 (35781 data points).
Reached maximum depth. Stopping for now.
--------------------------------------------------------------------
Subtree, depth = 2 (1443 data points).
Reached maximum depth. Stopping for now.
=====================================================
Adaboost Iteration 29
=====================================================
--------------------------------------------------------------------
Subtree, depth = 1 (37224 data points).
Split on feature grade.C. (27812, 9412)
--------------------------------------------------------------------
Subtree, depth = 2 (27812 data points).
Reached maximum depth. Stopping for now.
--------------------------------------------------------------------
Subtree, depth = 2 (9412 data points).
Reached maximum depth. Stopping for now.
###Markdown
Computing training error at the end of each iterationNow, we will compute the classification error on the **train_data** and see how it is reduced as trees are added.
###Code
error_all = []
for n in xrange(1, 31):
predictions = predict_adaboost(stump_weights[:n], tree_stumps[:n], train_data)
error = 1.0 - graphlab.evaluation.accuracy(train_data[target], predictions)
error_all.append(error)
print "Iteration %s, training error = %s" % (n, error_all[n-1])
###Output
Iteration 1, training error = 0.421636578551
Iteration 2, training error = 0.433430045132
Iteration 3, training error = 0.400037610144
Iteration 4, training error = 0.400037610144
Iteration 5, training error = 0.384724908661
Iteration 6, training error = 0.384617451107
Iteration 7, training error = 0.382763808296
Iteration 8, training error = 0.384617451107
Iteration 9, training error = 0.382763808296
Iteration 10, training error = 0.384483129164
Iteration 11, training error = 0.382736943907
Iteration 12, training error = 0.381447453256
Iteration 13, training error = 0.381528046422
Iteration 14, training error = 0.380560928433
Iteration 15, training error = 0.380507199656
Iteration 16, training error = 0.378223726628
Iteration 17, training error = 0.378277455405
Iteration 18, training error = 0.378411777348
Iteration 19, training error = 0.378062540297
Iteration 20, training error = 0.378761014399
Iteration 21, training error = 0.379566946056
Iteration 22, training error = 0.378895336342
Iteration 23, training error = 0.378895336342
Iteration 24, training error = 0.378761014399
Iteration 25, training error = 0.378895336342
Iteration 26, training error = 0.378975929508
Iteration 27, training error = 0.379110251451
Iteration 28, training error = 0.378922200731
Iteration 29, training error = 0.379029658285
Iteration 30, training error = 0.378734150011
###Markdown
Visualizing training error vs number of iterationsWe have provided you with a simple code snippet that plots classification error with the number of iterations.
###Code
plt.rcParams['figure.figsize'] = 7, 5
plt.plot(range(1,31), error_all, '-', linewidth=4.0, label='Training error')
plt.title('Performance of Adaboost ensemble')
plt.xlabel('# of iterations')
plt.ylabel('Classification error')
plt.legend(loc='best', prop={'size':15})
plt.rcParams.update({'font.size': 16})
###Output
_____no_output_____
###Markdown
**Quiz Question**: Which of the following best describes a **general trend in accuracy** as we add more and more components? Answer based on the 30 components learned so far.1. Training error goes down monotonically, i.e. the training error reduces with each iteration but never increases.2. Training error goes down in general, with some ups and downs in the middle.3. Training error goes up in general, with some ups and downs in the middle.4. Training error goes down in the beginning, achieves the best error, and then goes up sharply.5. None of the above Evaluation on the test dataPerforming well on the training data is cheating, so lets make sure it works on the `test_data` as well. Here, we will compute the classification error on the `test_data` at the end of each iteration. The training error goes down in general with some ups and downs.
###Code
test_error_all = []
for n in xrange(1, 31):
predictions = predict_adaboost(stump_weights[:n], tree_stumps[:n], test_data)
error = 1.0 - graphlab.evaluation.accuracy(test_data[target], predictions)
test_error_all.append(error)
print "Iteration %s, test error = %s" % (n, test_error_all[n-1])
###Output
Iteration 1, test error = 0.42330891857
Iteration 2, test error = 0.428479103835
Iteration 3, test error = 0.398104265403
Iteration 4, test error = 0.398104265403
Iteration 5, test error = 0.379900904782
Iteration 6, test error = 0.380008616975
Iteration 7, test error = 0.379254631624
Iteration 8, test error = 0.380008616975
Iteration 9, test error = 0.379254631624
Iteration 10, test error = 0.379685480396
Iteration 11, test error = 0.379254631624
Iteration 12, test error = 0.377962085308
Iteration 13, test error = 0.379254631624
Iteration 14, test error = 0.377854373115
Iteration 15, test error = 0.378500646273
Iteration 16, test error = 0.377854373115
Iteration 17, test error = 0.377962085308
Iteration 18, test error = 0.377854373115
Iteration 19, test error = 0.378177509694
Iteration 20, test error = 0.376884963378
Iteration 21, test error = 0.377531236536
Iteration 22, test error = 0.376777251185
Iteration 23, test error = 0.376777251185
Iteration 24, test error = 0.376884963378
Iteration 25, test error = 0.376777251185
Iteration 26, test error = 0.376561826799
Iteration 27, test error = 0.376454114606
Iteration 28, test error = 0.376992675571
Iteration 29, test error = 0.376777251185
Iteration 30, test error = 0.376777251185
###Markdown
Visualize both the training and test errorsNow, let us plot the training & test error with the number of iterations.
###Code
plt.rcParams['figure.figsize'] = 7, 5
plt.plot(range(1,31), error_all, '-', linewidth=4.0, label='Training error')
plt.plot(range(1,31), test_error_all, '-', linewidth=4.0, label='Test error')
plt.title('Performance of Adaboost ensemble')
plt.xlabel('# of iterations')
plt.ylabel('Classification error')
plt.rcParams.update({'font.size': 16})
plt.legend(loc='best', prop={'size':15})
plt.tight_layout()
###Output
_____no_output_____ |
06 - Time series analysis (Pandas).ipynb | ###Markdown
Time series analysis (Pandas) Nikolay [email protected] This is part of [**Python for Geosciences**](https://github.com/koldunovn/python_for_geosciences) notes. ================ Here I am going to show just some basic [pandas](http://pandas.pydata.org/) stuff for time series analysis, as I think for the Earth Scientists it's the most interesting topic. If you find this small tutorial useful, I encourage you to watch [this video](http://pyvideo.org/video/1198/time-series-data-analysis-with-pandas), where Wes McKinney give extensive introduction to the time series data analysis with pandas.On the official website you can find explanation of what problems pandas solve in general, but I can tell you what problem pandas solve for me. It makes analysis and visualisation of 1D data, especially time series, MUCH faster. Before pandas working with time series in python was a pain for me, now it's fun. Ease of use stimulate in-depth exploration of the data: why wouldn't you make some additional analysis if it's just one line of code? Hope you will also find this great tool helpful and useful. So, let's begin. As an example we are going to use time series of [Arctic Oscillation (AO)](http://en.wikipedia.org/wiki/Arctic_oscillation) and [North Atlantic Oscillation (NAO)](http://en.wikipedia.org/wiki/North_Atlantic_oscillation) data sets. Module import First we have to import necessary modules:
###Code
import pandas as pd
import numpy as np
pd.set_option('max_rows',15) # this limit maximum numbers of rows
###Output
_____no_output_____
###Markdown
And "switch on" inline graphic for the notebook:
###Code
%matplotlib inline
###Output
_____no_output_____
###Markdown
Pandas developing very fast, and while we are going to use only basic functionality, some details may still change in the newer versions.
###Code
pd.__version__
###Output
_____no_output_____
###Markdown
Loading data Now, when we are done with preparations, let's get some data. If you work on Windows download monthly AO data [from here](http://www.cpc.ncep.noaa.gov/products/precip/CWlink/daily_ao_index/monthly.ao.index.b50.current.ascii). If you on *nix machine, you can do it directly from ipython notebook using system call to wget command:
###Code
!wget http://www.cpc.ncep.noaa.gov/products/precip/CWlink/daily_ao_index/monthly.ao.index.b50.current.ascii
###Output
_____no_output_____
###Markdown
Pandas has very good IO capabilities, but we not going to use them in this tutorial in order to keep things simple. For now we open the file simply with numpy loadtxt:
###Code
ao = np.loadtxt('monthly.ao.index.b50.current.ascii')
###Output
_____no_output_____
###Markdown
Every line in the file consist of three elements: year, month, value:
###Code
ao[0:2]
###Output
_____no_output_____
###Markdown
And here is the shape of our array (note that shape of the file might differ in your case, since data updated monthly):
###Code
ao.shape
###Output
_____no_output_____
###Markdown
Time Series We would like to convert this data in to time series, that can be manipulated naturally and easily. First step, that we have to do is to create the range of dates for our time series. From the file it is clear, that record starts at January 1950 and ends at September 2013 (at the time I am writing this, of course). **You have to adjust the last date according to values in your file!** Frequency of the data is one month (freq='M').
###Code
dates = pd.date_range('1950-01', '2014-01', freq='M')
###Output
_____no_output_____
###Markdown
As you see syntax is quite simple, and this is one of the reasons why I love Pandas so much :) Another thing to mention, is that we put October 2003 instead of September because the interval is open on the right side. You can check if the range of dates is properly generated:
###Code
dates
dates.shape
###Output
_____no_output_____
###Markdown
Now we are ready to create our first time series. Dates from the *dates* variable will be our index, and AO values will be our, hm... values. We are going to use data only untill the end of 2013:
###Code
AO = pd.Series(ao[:768,2], index=dates)
AO
###Output
_____no_output_____
###Markdown
Now we can plot complete time series:
###Code
AO.plot()
###Output
_____no_output_____
###Markdown
or its part:
###Code
AO['1980':'1990'].plot()
###Output
_____no_output_____
###Markdown
or even smaller part:
###Code
AO['1980-05':'1981-03'].plot()
###Output
_____no_output_____
###Markdown
Reference to the time periods is done in a very natural way. You, of course, can also get individual values. By number:
###Code
AO[120]
###Output
_____no_output_____
###Markdown
or by index (date in our case):
###Code
AO['1960-01']
###Output
_____no_output_____
###Markdown
And what if we choose only one year?
###Code
AO['1960']
###Output
_____no_output_____
###Markdown
Isn't that great? :) One bonus example :)
###Code
AO[AO > 0]
###Output
_____no_output_____
###Markdown
Data Frame Now let's make live a bit more interesting and download more data. This will be NAO time series (Windowd users can get it [here](http://www.cpc.ncep.noaa.gov/products/precip/CWlink/pna/norm.nao.monthly.b5001.current.ascii)).
###Code
!wget http://www.cpc.ncep.noaa.gov/products/precip/CWlink/pna/norm.nao.monthly.b5001.current.ascii
###Output
_____no_output_____
###Markdown
Create Series the same way as we did for AO:
###Code
nao = np.loadtxt('norm.nao.monthly.b5001.current.ascii')
dates_nao = pd.date_range('1950-01', '2014-01', freq='M')
NAO = pd.Series(nao[:768,2], index=dates_nao)
###Output
_____no_output_____
###Markdown
Time period is the same:
###Code
NAO.index
###Output
_____no_output_____
###Markdown
Now we create Data Frame, that will contain both AO and NAO data. It sort of an Excel table where the first row contain headers for the columns and firs column is an index:
###Code
aonao = pd.DataFrame({'AO' : AO, 'NAO' : NAO})
###Output
_____no_output_____
###Markdown
One can plot the data straight away:
###Code
aonao.plot()
###Output
_____no_output_____
###Markdown
Or have a look at the first several rows:
###Code
aonao.head()
###Output
_____no_output_____
###Markdown
We can reference each column by its name:
###Code
aonao['NAO']
###Output
_____no_output_____
###Markdown
or as method of the Data Frame variable (if name of the variable is a valid python name):
###Code
aonao.NAO
###Output
_____no_output_____
###Markdown
We can simply add column to the Data Frame:
###Code
aonao['Diff'] = aonao['AO'] - aonao['NAO']
aonao.head()
###Output
_____no_output_____
###Markdown
And delete it:
###Code
del aonao['Diff']
aonao.tail()
###Output
_____no_output_____
###Markdown
Slicing will also work:
###Code
aonao['1981-01':'1981-03']
###Output
_____no_output_____
###Markdown
even in some crazy combinations:
###Code
import datetime
aonao.ix[(aonao.AO > 0) & (aonao.NAO < 0)
& (aonao.index > datetime.datetime(1980,1,1))
& (aonao.index < datetime.datetime(1989,1,1)),
'NAO'].plot(kind='barh')
###Output
_____no_output_____
###Markdown
Here we use special [advanced indexing attribute .ix](http://pandas.pydata.org/pandas-docs/stable/indexing.htmladvanced-indexing-with-labels). We choose all NAO values in the 1980s for months where AO is positive and NAO is negative, and then plot them. Magic :) Statistics Back to simple stuff. We can obtain statistical information over elements of the Data Frame. Default is column wise:
###Code
aonao.mean()
aonao.max()
aonao.min()
###Output
_____no_output_____
###Markdown
You can also do it row-wise:
###Code
aonao.mean(1)
###Output
_____no_output_____
###Markdown
Or get everything at once:
###Code
aonao.describe()
###Output
_____no_output_____
###Markdown
By the way getting correlation coefficients for members of the Data Frame is as simple as:
###Code
aonao.corr()
###Output
_____no_output_____
###Markdown
Resampling Pandas provide easy way to resample data to different time frequency. Two main parameters for resampling is time period you resemple to and the method that you use. By default the method is mean. Following example calculates annual ('A') mean:
###Code
AO_mm = AO.resample("A")
AO_mm.plot()
###Output
_____no_output_____
###Markdown
median:
###Code
AO_mm = AO.resample("A", how='median')
AO_mm.plot()
###Output
_____no_output_____
###Markdown
You can use your methods for resampling, for example np.max (in this case we change resampling frequency to 3 years):
###Code
AO_mm = AO.resample("3A", how=np.max)
AO_mm.plot()
###Output
_____no_output_____
###Markdown
You can specify several functions at once as a list:
###Code
AO_mm = AO.resample("A", how=['mean', np.min, np.max])
#AO_mm['1900':'2020'].plot(subplots=True)
AO_mm['1900':'2020'].plot()
###Output
_____no_output_____ |
Assignments/assignment5/stxx-variant-tgt-wordembed/Untitled.ipynb | ###Markdown
Let's do a test
###Code
e_char = 50
char_pad_token_idx = char2id['<pad>']
charEmbedding = nn.Embedding(95, e_char, char_pad_token_idx)
output.shape#.torch.Size([8, 4, 12]) sen_len, batch, m_word
###Output
_____no_output_____
###Markdown
Getting output, now CNN
###Code
x = charEmbedding(output)
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
CS224N 2019-20: Homework 5
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
class CNN(nn.Module):
# Remember to delete the above 'pass' after your implementation
### YOUR CODE HERE for part 1g
def __init__(self, e_char, filters, padding=1, kernel_size=5):
super(CNN, self).__init__()
self.e_char = e_char
self.m_word = m_word
self.filters = filters
self.padding = padding
self.k = kernel_size
self.conv1d = None
self.maxpool = None
self.conv1d = nn.Conv1d(
in_channels=self.e_char,
out_channels=self.filters,
kernel_size=self.k,
stride=1,
padding=self.padding,
padding_mode='zeros',
bias=True
)
def forward(self, xemb: torch.Tensor):
# xemb #shape (batch, m_word, e_char)
# xreshape #shape (batch, e_char, m_word)
# W #shape (filter, e_char, k)
m_word = xemb.shape[-2]
x_reshaped = xemb.permute(0, 1, 3, 2)
x_conv = self.conv1d(x_reshaped)
x_conv = F.relu(x_conv)
maxpool = nn.MaxPool1d(kernel_size=m_word + 2 * self.padding - self.k + 1)
x_conv_out = maxpool(x_conv)
return x_conv_out
### END YOUR CODE
cnn = CNN(e_char, filters=10)
x.shape[-2]
y = x.permute(0, 1, 3, 2)
y.shape
xx = torch.randn(2,4,6)
xx.shape
xx = torch.randn(50, 30, 21)
xx.shape
ccc = nn.Conv1d(
in_channels=30,
out_channels=3,
kernel_size=5,
stride=1,
padding=1,
padding_mode='zeros',
bias=True
)
yy = torch.randn(50,3,1)
yy.
w_proj = nn.Linear(3, 3, bias=True)
w_proj(yy)
maxpool = nn.MaxPool1d(5)
maxpool(xx)
def pad_sents_char(sents, char_pad_token):
""" Pad list of sentences according to the longest sentence in the batch and longest words in all sentences.
@param sents (list[list[list[int]]]): list of sentences, result of `words2charindices()`
from `vocab.py`
@param char_pad_token (int): index of the character-padding token
@returns sents_padded (list[list[list[int]]]): list of sentences where sentences/words shorter
than the max length sentence/word are padded out with the appropriate pad token, such that
each sentence in the batch now has same number of words and each word has an equal
number of characters
Output shape: (batch_size, max_sentence_length, max_word_length)
"""
sents_padded = []
max_word_length = max(len(w) for s in sents for w in s )
max_sent_len = max(len(s) for s in sents)
batch_size = len(sents)
for k in range(batch_size):
sentence = sents[k]
sent_padded = []
for w in sentence:
data = [c for c in w] + [char_pad_token for _ in range(max_word_length-len(w))]
if len(data) > max_word_length:
data = data[:max_word_length]
sent_padded.append(data)
sent_padded = sent_padded[:max_sent_len] + [[char_pad_token]*max_word_length] * max(0, max_sent_len - len(sent_padded))
sents_padded.append(sent_padded)
return sents_padded
# xx = pad_sents_char(src_sents, '<char>')
# xx
char_list = list(
"""ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789,;.!?:'\"/\\|_@#$%^&*~`+-=<>()[]""")
char2id = dict() # Converts characters to integers
char2id['∏'] = 0 # <pad> token
char2id['{'] = 1 # start of word token
char2id['}'] = 2 # end of word token
char2id['Û'] = 3 # <unk> token
for i, c in enumerate(char_list):
char2id[c] = len(char2id)
char_pad = char2id['∏']
char_unk = char2id['Û']
start_of_word = char2id["{"]
end_of_word = char2id["}"]
assert start_of_word + 1 == end_of_word
sentences = [['Human', ':', 'What', 'do', 'we', 'want', '?'], ['Computer', ':', 'Natural', 'language', 'processing', '!'], ['Human', ':', 'When', 'do', 'we', 'want', 'it', '?'], ['Computer', ':', 'When', 'do', 'we', 'want', 'what', '?']]
sentence_length = 8
BATCH_SIZE = 4
word_length = 12
vocabEntry = VocabEntry()
output = vocabEntry.to_input_tensor_char(sentences, 'cpu')
from collections import Counter
from docopt import docopt
from itertools import chain
import json
import torch
from typing import List
from utils import read_corpus, pad_sents, pad_sents_char
class VocabEntry(object):
""" Vocabulary Entry, i.e. structure containing either
src or tgt language terms.
"""
def __init__(self, word2id=None):
""" Init VocabEntry Instance.
@param word2id (dict): dictionary mapping words 2 indices
"""
if word2id:
self.word2id = word2id
else:
self.word2id = dict()
self.word2id['<pad>'] = 0 # Pad Token
self.word2id['<s>'] = 1 # Start Token
self.word2id['</s>'] = 2 # End Token
self.word2id['<unk>'] = 3 # Unknown Token
self.unk_id = self.word2id['<unk>']
self.id2word = {v: k for k, v in self.word2id.items()}
## Additions to the A4 code:
self.char_list = list(
"""ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789,;.!?:'\"/\\|_@#$%^&*~`+-=<>()[]""")
self.char2id = dict() # Converts characters to integers
self.char2id['∏'] = 0 # <pad> token
self.char2id['{'] = 1 # start of word token
self.char2id['}'] = 2 # end of word token
self.char2id['Û'] = 3 # <unk> token
for i, c in enumerate(self.char_list):
self.char2id[c] = len(self.char2id)
self.char_pad = self.char2id['∏']
self.char_unk = self.char2id['Û']
self.start_of_word = self.char2id["{"]
self.end_of_word = self.char2id["}"]
assert self.start_of_word + 1 == self.end_of_word
self.id2char = {v: k for k, v in self.char2id.items()} # Converts integers to characters
## End additions to the A4 code
def __getitem__(self, word):
""" Retrieve word's index. Return the index for the unk
token if the word is out of vocabulary.
@param word (str): word to look up.
@returns index (int): index of word
"""
return self.word2id.get(word, self.unk_id)
def __contains__(self, word):
""" Check if word is captured by VocabEntry.
@param word (str): word to look up
@returns contains (bool): whether word is contained
"""
return word in self.word2id
def __setitem__(self, key, value):
""" Raise error, if one tries to edit the VocabEntry.
"""
raise ValueError('vocabulary is readonly')
def __len__(self):
""" Compute number of words in VocabEntry.
@returns len (int): number of words in VocabEntry
"""
return len(self.word2id)
def __repr__(self):
""" Representation of VocabEntry to be used
when printing the object.
"""
return 'Vocabulary[size=%d]' % len(self)
def id2word(self, wid):
""" Return mapping of index to word.
@param wid (int): word index
@returns word (str): word corresponding to index
"""
return self.id2word[wid]
def add(self, word):
""" Add word to VocabEntry, if it is previously unseen.
@param word (str): word to add to VocabEntry
@return index (int): index that the word has been assigned
"""
if word not in self:
wid = self.word2id[word] = len(self)
self.id2word[wid] = word
return wid
else:
return self[word]
def words2charindices(self, sents):
""" Convert list of sentences of words into list of list of list of character indices.
@param sents (list[list[str]]): sentence(s) in words
@return word_ids (list[list[list[int]]]): sentence(s) in indices
"""
return [[[self.char2id.get(c, self.char_unk) for c in ("{" + w + "}")] for w in s] for s in sents]
def words2indices(self, sents):
""" Convert list of sentences of words into list of list of indices.
@param sents (list[list[str]]): sentence(s) in words
@return word_ids (list[list[int]]): sentence(s) in indices
"""
return [[self[w] for w in s] for s in sents]
def indices2words(self, word_ids):
""" Convert list of indices into words.
@param word_ids (list[int]): list of word ids
@return sents (list[str]): list of words
"""
return [self.id2word[w_id] for w_id in word_ids]
def to_input_tensor_char(self, sents: List[List[str]], device: torch.device) -> torch.Tensor:
""" Convert list of sentences (words) into tensor with necessary padding for
shorter sentences.
@param sents (List[List[str]]): list of sentences (words)
@param device: device on which to load the tensor, i.e. CPU or GPU
@returns sents_var: tensor of (max_sentence_length, batch_size, max_word_length)
"""
### YOUR CODE HERE for part 1e
### TODO:
### - Use `words2charindices()` from this file, which converts each character to its corresponding index in the
### character-vocabulary.
### - Use `pad_sents_char()` from utils.py, which pads all words to max_word_length of all words in the batch,
### and pads all sentences to max length of all sentences in the batch. Read __init__ to see how to get
### index of character-padding token
### - Connect these two parts to convert the resulting padded sentences to a torch tensor.
### HINT:
### - You may find .contiguous() useful after reshaping. Check the following links for more details:
### https://pytorch.org/docs/stable/tensors.html#torch.Tensor.contiguous
### https://pytorch.org/docs/stable/tensors.html#torch.Tensor.view
char_sents = self.words2charindices(sents)
res = torch.tensor(pad_sents_char(char_sents, char_pad_token=self.char_pad)) #b, max_sent_len, max_word
res = res.permute(1, 0, 2).contiguous() # max_sent_len, b, max_word
return res
### END YOUR CODE
def to_input_tensor(self, sents: List[List[str]], device: torch.device) -> torch.Tensor:
""" Convert list of sentences (words) into tensor with necessary padding for
shorter sentences.
@param sents (List[List[str]]): list of sentences (words)
@param device: device on which to load the tesnor, i.e. CPU or GPU
@returns sents_var: tensor of (max_sentence_length, batch_size)
"""
word_ids = self.words2indices(sents)
sents_t = pad_sents(word_ids, self['<pad>'])
sents_var = torch.tensor(sents_t, dtype=torch.long, device=device)
return torch.t(sents_var)
@staticmethod
def from_corpus(corpus, size, freq_cutoff=2):
""" Given a corpus construct a Vocab Entry.
@param corpus (list[str]): corpus of text produced by read_corpus function
@param size (int): # of words in vocabulary
@param freq_cutoff (int): if word occurs n < freq_cutoff times, drop the word
@returns vocab_entry (VocabEntry): VocabEntry instance produced from provided corpus
"""
vocab_entry = VocabEntry()
word_freq = Counter(chain(*corpus))
valid_words = [w for w, v in word_freq.items() if v >= freq_cutoff]
print('number of word types: {}, number of word types w/ frequency >= {}: {}'
.format(len(word_freq), freq_cutoff, len(valid_words)))
top_k_words = sorted(valid_words, key=lambda w: word_freq[w], reverse=True)[:size]
for word in top_k_words:
vocab_entry.add(word)
return vocab_entry
# char2id
def words2charindices(sents):
""" Convert list of sentences of words into list of list of list of character indices.
@param sents (list[list[str]]): sentence(s) in words
@return word_ids (list[list[list[int]]]): sentence(s) in indices
"""
return [[[char2id.get(c, char_unk) for c in ("{" + w + "}")] for w in s] for s in sents]
import json
char2id =json.load(open('./sanity_check_en_es_data/char_vocab_sanity_check.json', 'r'))
id2char = {v: k for k, v in char2id.items()}
input = torch.randn(2).unsqueeze(0)
import torch.nn as nn
x = torch.tensor([[[1., 1., 1., 1.],
[-2, -2, -2., -2.]],
[[2, 2, 1, 1],
[0.5, 0.5, 0, 0]]], dtype=torch.float32)
print("input tensor shape: ", x.size())
x = x.permute(0, 2, 1).contiguous()
print("input tensor shape: ", x.size())
x = torch.randn(2, 6, 50)
xreshaped = x.permute(0, 2, 1)
xreshaped.shape
xreshaped #(e_char=4, m_word=6)
b, e_char, m_word = xreshaped.size()
e_char
xconv = conv1d(xreshaped)
xconv.shape
# m_word #6
# k # 5
# padding # 1
# 6-5+2+1 = 4
maxpool = nn.MaxPool1d(kernel_size=m_word+2*1-5+1)
xout = maxpool(xconv)
xout.shape
xccc = F.relu(xconv)
xccc.shape
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
CS224N 2019-20: Homework 5
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
class CNN(nn.Module):
# Remember to delete the above 'pass' after your implementation
### YOUR CODE HERE for part 1g
def __init__(self, e_char, filters, padding=1, kernel_size=5):
super(CNN, self).__init__()
self.e_char = e_char
self.m_word = m_word
self.filters = filters
self.padding = padding
self.k = kernel_size
self.conv1d = None
self.maxpool = None
self.conv1d = nn.Conv1d(
in_channels=self.e_char,
out_channels=self.filters,
kernel_size=self.k,
stride=1,
padding=self.padding,
padding_mode='zeros',
bias=True
)
def forward(self, xemb: torch.Tensor):
# xemb #shape (batch, m_word, e_char)
# xreshape #shape (batch, e_char, m_word)
# W #shape (filter, e_char, k)
m_word = xemb.shape[1]
x_reshaped = xemb.permute(0, 2, 1)
x_conv = self.conv1d(x_reshaped)
x_conv = F.relu(x_conv)
maxpool = nn.MaxPool1d(kernel_size=m_word + 2 * self.padding - self.k + 1)
x_conv_out = maxpool(x_conv)
return x_conv_out
### END YOUR CODE
import torch
import torch.nn as nn
import torch.nn.functional as F
class Highway(nn.Module):
# Remember to delete the above 'pass' after your implementation
### YOUR CODE HERE for part 1f
def __init__(self, eword_size):
super(Highway, self).__init__()
self.eword_size = eword_size
self.w_proj = nn.Linear(self.eword_size, self.eword_size, bias=True)
# self.b_proj = None
self.w_gate = nn.Linear(self.eword_size, self.eword_size, bias=True)
# self.b_gate = None
self.highway_ReLU = nn.ReLU()
def forward(self, x_conv: torch.Tensor):
x_proj_pre = self.w_proj(x_conv)
x_proj = self.highway_ReLU(x_proj_pre)
x_gate_pre = self.w_gate(x_proj)
x_gate = F.sigmoid(x_gate_pre)
x_highway = x_gate*x_proj + (1-x_gate)*x_conv
return x_highway
x = torch.randn(6, 4)#.unsqueeze(0)
x.shape
# eword_size = 4
# w_proj = nn.Linear(eword_size, eword_size, bias=True)
# w_gate = nn.Linear(eword_size, eword_size, bias=True)
# highway_ReLU = nn.ReLU()
# x_conv = x
# x_proj_pre = w_proj(x_conv)
# x_proj = highway_ReLU(x_proj_pre)
# x_gate_pre = w_gate(x_proj)
# x_gate = F.sigmoid(x_gate_pre)
x_proj*x_gate
torch.bmm(x_gate, x_proj)
x_gate
hh = Highway(eword_size=4)
hh(x)
# yy = torch.t(yy)#.shape # [b, max_sent_len, max_word]
yy.shape
yy
yy.permute((1,0,2)).contiguous()
ss = src_sents
max_word_length = max(len(w) for s in ss for w in s )
max_sent_len = max(len(s) for s in ss)
xx[1]
###Output
_____no_output_____ |
notebooks/demo_mesh_lpt.ipynb | ###Markdown
`final_field` contains an approximation of the matter density field at present day, obtained by moving particles initially uniformly distributed according to a displacement vector returned by the `flowpm.tfpm.lpt1` function. This displacement vector is obtained by applying a Fourier kernel to the `initial_conditions` field. Now the same thing in mesh tensorflow
###Code
# Compute necessary Fourier kernels
kvec = flowpm.kernels.fftk((nc, nc, nc), symmetric=False)
from flowpm.kernels import laplace_kernel, gradient_kernel
lap = tf.cast(laplace_kernel(kvec), tf.complex64)
grad_x = gradient_kernel(kvec, 0)
grad_y = gradient_kernel(kvec, 1)
grad_z = gradient_kernel(kvec, 2)
import flowpm.mesh as mpm
graph = mtf.Graph()
mesh = mtf.Mesh(graph, "my_mesh")
batch_dim = mtf.Dimension("batch", batch_size)
x_dim = mtf.Dimension("nx", nc)
y_dim = mtf.Dimension("ny", nc)
z_dim = mtf.Dimension("nz", nc)
tx_dim = mtf.Dimension("tnx", nc)
ty_dim = mtf.Dimension("tny", nc)
tz_dim = mtf.Dimension("tnz", nc)
rfield = mtf.import_tf_tensor(mesh, initial_conditions , shape=[batch_dim, x_dim, y_dim, z_dim])
mlx = mtf.import_tf_tensor(mesh, grad_x*lap, shape=[x_dim, y_dim, z_dim])
mly = mtf.import_tf_tensor(mesh, grad_y*lap, shape=[x_dim, y_dim, z_dim])
mlz = mtf.import_tf_tensor(mesh, grad_z*lap, shape=[x_dim, y_dim, z_dim])
# Create a list of particles for each slice of the data
mstate = mpm.mtf_indices(mesh, shape=[x_dim, y_dim, z_dim], dtype=tf.float32)
# Repeating pos to match batch size
mstate = mtf.einsum([mtf.ones(mesh, [batch_dim]), mstate], output_shape=[batch_dim] + mstate.shape[:])
# Compute 3d fourier transform of the input field
def fft3d(field):
cube = mtf.slicewise(tf.signal.fft, [mtf.cast(field, tf.complex64)], output_dtype=tf.complex64,
splittable_dims=field.shape[:-1])
cube = mtf.transpose(cube, new_shape=[batch_dim, y_dim, z_dim, x_dim])
cube = mtf.reshape(cube, new_shape=[batch_dim, ty_dim, z_dim, tx_dim])
cube = mtf.slicewise(tf.signal.fft, [cube], output_dtype=tf.complex64,
splittable_dims=cube.shape[:-1])
cube = mtf.transpose(cube, new_shape=[batch_dim, z_dim, tx_dim, ty_dim])
cube = mtf.reshape(cube, new_shape=[batch_dim, z_dim, x_dim, y_dim])
cube = mtf.slicewise(tf.signal.fft, [cube], output_dtype=tf.complex64,
splittable_dims=cube.shape[:-1])
cube = mtf.transpose(cube, new_shape=[batch_dim, x_dim, y_dim, z_dim])
return cube
def ifft3d(field):
cube = mtf.slicewise(tf.signal.ifft, [mtf.cast(field, tf.complex64)], output_dtype=tf.complex64,
splittable_dims=field.shape[:-1])
cube = mtf.transpose(cube, new_shape=[batch_dim, y_dim, z_dim, x_dim])
cube = mtf.reshape(cube, new_shape=[batch_dim, ty_dim, z_dim, tx_dim])
cube = mtf.slicewise(tf.signal.ifft, [cube], output_dtype=tf.complex64,
splittable_dims=cube.shape[:-1])
cube = mtf.transpose(cube, new_shape=[batch_dim, z_dim, tx_dim, ty_dim])
cube = mtf.reshape(cube, new_shape=[batch_dim, z_dim, x_dim, y_dim])
cube = mtf.slicewise(tf.signal.ifft, [cube], output_dtype=tf.complex64,
splittable_dims=cube.shape[:-1])
cube = mtf.transpose(cube, new_shape=[batch_dim, x_dim, y_dim, z_dim])
return mtf.cast(cube, dtype=tf.float32)
# Compute displacement by applying a series of fourier kernels, and taking the inverse fourier transform
lineark = fft3d(rfield)
displacement = [ifft3d(mtf.multiply(lineark,mlx)),
ifft3d(mtf.multiply(lineark,mly)),
ifft3d(mtf.multiply(lineark,mlz))]
displacement = mtf.stack(displacement, dim_name="ndim", axis=4)
# Apply displacement to input particles, scaled by cosmology
mfstate = mstate + pt.D1(a)*displacement
# Paint the particles onto a new field
lpt_field = mtf.slicewise(flowpm.cic_paint,
[mtf.zeros_like(rfield), mfstate],
output_dtype=tf.float32,
output_shape=[batch_dim,x_dim,y_dim, z_dim],
splittable_dims=rfield.shape[:])
devices = ["gpu:0", "gpu:1"]
mesh_shape = [("all", 2)]
layout_rules = [("nx", "all"), ("tny", "all")]
mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
mesh_shape, layout_rules, devices)
lowering = mtf.Lowering(graph, {mesh:mesh_impl})
result = lowering.export_to_tf_tensor(lpt_field)
sess = tf.Session()
a,b,c = sess.run([initial_conditions, final_field, result])
figure(figsize=(15,5))
subplot(141)
imshow(a[0].sum(axis=2))
title('Initial Conditions')
subplot(142)
imshow(b[0].sum(axis=2))
title('FlowPM')
subplot(143)
imshow(c[0].sum(axis=2))
title('Mesh TensorFlow')
subplot(144)
imshow((b[0] - c[0]).sum(axis=2))
title('Residuals')
###Output
_____no_output_____ |
Scrapping/juypter_main.ipynb | ###Markdown
Google Image Scrapper for Juypter Notebook
###Code
from GoogleImageScrapper import GoogleImageScraper
import os
webdriver_path = os.getcwd()+"\\webdriver\\chromedriver.exe"
image_path = os.getcwd()+"\\photos"
#add new search key into array ["cat","t-shirt","apple","orange","pear","fish"]
search_keys= ["cat","t-shirt"]
number_of_images = 20
headless = False
#min_resolution = (width,height)
min_resolution=(0,0)
#max_resolution = (width,height)
max_resolution=(1920,1080)
for search_key in search_keys:
image_scrapper = GoogleImageScraper(webdriver_path,image_path,search_key,number_of_images,headless,min_resolution,max_resolution)
image_urls = image_scrapper.find_image_urls()
image_scrapper.save_images(image_urls)
###Output
_____no_output_____ |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.