path
stringlengths 7
265
| concatenated_notebook
stringlengths 46
17M
|
---|---|
model_interactive_race.ipynb | ###Markdown
Boucher et al., 2007 Interactive Race Model
###Code
import numpy
import random
import matplotlib.pyplot as plt
import pandas
%matplotlib inline
params={'mugo':.2,
'mustop':.8,
'threshold':60,
'nondecisiongo':50,
'nondecisionstop':50,
'inhibitionParam':1,
'ssds':[1,50,100,150, 200,250, 300, 350, 400, 450, 500,3000],
'nreps':1000,
'maxtime':1000}
def interactiverace(params):
stopaccumsave = []
meanrtgo = numpy.zeros(len(params['ssds']))
presp = numpy.zeros(len(params['ssds']));
for irep in range(params['nreps']):
for j,ssd in enumerate(params['ssds']):
stopsignaldelay = ssd
goaccumulator = 0
stopaccumulator = 0
rtgo = 0
itime = 0
while itime < params['maxtime'] and rtgo == 0: # single trial
itime = itime + 1
if itime < stopsignaldelay + params['nondecisionstop']:
inhibition = 0
else:
inhibition = params['inhibitionParam']
stopaccumulator = stopaccumulator + params['mustop'] + numpy.random.normal(loc=0, scale=1)
if stopaccumulator <= 0:
stopaccumulator = 0;
stopaccumsave.append(stopaccumulator)
if itime >= params['nondecisiongo']:
goaccumulator = goaccumulator + params['mugo'] - inhibition*stopaccumulator + numpy.random.normal(loc=0, scale=1)
if goaccumulator <= 0:
goaccumulator = 0;
if goaccumulator > params['threshold']:
if rtgo == 0:
rtgo = itime;
meanrtgo[j] += rtgo;
if rtgo > 0:
presp[j] += 1;
for ssd in range(len(params['ssds'])):
if presp[ssd] > 0:
meanrtgo[ssd] = meanrtgo[ssd]/presp[ssd];
presp[ssd] = presp[ssd]/params['nreps'];
return(meanrtgo,presp,stopaccumsave)
meanrtgo,presp,stopaccumsave=interactiverace(params)
print(meanrtgo)
print(presp)
plt.figure(figsize=(10,5))
plt.subplot(1,2,1)
plt.plot(params['ssds'][:11],meanrtgo[:11] - meanrtgo[11])
plt.plot([params['ssds'][0],params['ssds'][10]],[0,0],'k:')
plt.xlabel('Stop signal delay')
plt.ylabel('Violation (Stop Failure RT - No-Stop RT)')
plt.subplot(1,2,2)
plt.plot(params['ssds'][:11],presp[:11])
plt.xlabel('Stop signal delay')
plt.ylabel('Probability of responding')
plt.axis([params['ssds'][0],params['ssds'][10],0,1])
plt.figure(figsize=(10,5))
plt.subplot(1,2,1)
plt.plot(params['ssds'][:5],meanrtgo[:5] - 350)
plt.plot([params['ssds'][0],params['ssds'][4]],[0,0],'k:')
plt.xlabel('Stop signal delay')
plt.ylabel('Violation (Stop Failure RT - No-Stop RT)')
plt.subplot(1,2,2)
plt.plot(params['ssds'][:5],presp[:5])
plt.xlabel('Stop signal delay')
plt.ylabel('Probability of responding')
plt.axis([params['ssds'][0],params['ssds'][4],0,1])
###Output
_____no_output_____ |
.ipynb_checkpoints/Emails-checkpoint.ipynb | ###Markdown
Emails
###Code
import smtplib, ssl
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
import pandas as pd
import numpy as np
friends = pd.read_csv('friends.csv')
couples_casual = pd.read_csv('relationshipandcasual.csv')
couples_casual
data = np.array(couples_casual)
###Output
_____no_output_____ |
sm_bert_log_reg.ipynb | ###Markdown
Logistic Regression with small Bert encodings
###Code
import numpy as np
import pandas as pd
# for model:
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import make_pipeline
from sklearn.multiclass import OneVsRestClassifier
from sklearn.model_selection import train_test_split
# for scoring:
from sklearn import metrics
from sklearn.metrics import f1_score
# requires toxic-tr
x = np.loadtxt("toxic_bert_matrix_small.out", delimiter=",")
df = pd.read_csv('toxic-train-clean-small.csv')
y = df.iloc[:, 2:8]
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size= 0.2)
X_train.shape, X_test.shape, y_train.shape, y_test.shape
pipe = make_pipeline(OneVsRestClassifier(LogisticRegression(max_iter=500, class_weight='balanced')))
param_grid = {'onevsrestclassifier__estimator__solver': ['liblinear']}
grid = GridSearchCV(pipe, param_grid, cv=15, scoring='roc_auc', verbose=3)
grid3 = grid.fit(X_train, y_train)
grid3.best_score_
predicted_y_test = grid3.predict(X_test)
predicted_y_test[:1]
y_pred_prob = grid3.predict_proba(X_test)
y_pred_prob[:1]
auc_score = metrics.roc_auc_score(y_test, y_pred_prob)
auc_score
f1_score(y_test, predicted_y_test, average='micro')
###Output
_____no_output_____ |
gobgob_ipynb_v0_3.ipynb | ###Markdown
Pythonでごぶの探索設計図: https://docs.google.com/presentation/d/1wmV3fb-fx1qQahOk2ZkiEcQjAm2EgGZrNFMwcBmwSnc/edit?usp=sharing Stateに必要な関数群 stateの引数 id
###Code
from typing import List
import itertools
# idは134bitの文字列
# stateの引数からidを作成
def create_id(is_first: bool, is_choise: bool ,choise_board: List[List[bool]],
choise_hand: List[bool] ,board: List[List[bool]], hand: List[bool]):
big_list = [int(is_first)] + [int(is_choise)] \
+ list(itertools.chain.from_iterable(choise_board)) + choise_hand \
+ list(itertools.chain.from_iterable(board)) + hand;
id = "".join(map(str, big_list))
return id;
# idからstateの引数を作成
def id2is_first(id: str):
return id[0];
def id2is_choise(id: str):
return id[1];
def id2choise_board(id: str):
choise_board = [[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0]]
for i, str_bit in enumerate(id[2:2+54]):
choise_board[i//9][i%9] = int(str_bit)
return choise_board;
def id2choise_hand(id: str):
choise_hand = [0,0,0,0,0,0,0,0,0,0,0,0]
for i, str_bit in enumerate(id[56:56+12]):
choise_hand[i] = int(str_bit)
return choise_hand;
def id2board(id: str):
board = [[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0]]
for i, str_bit in enumerate(id[68:68+54]):
board[i//9][i%9] = int(str_bit)
return board;
def id2hand(id: str):
hand = [0,0,0,0,0,0,0,0,0,0,0,0]
for i, str_bit in enumerate(id[122:122+12]):
hand[i] = int(str_bit)
return hand;
###Output
_____no_output_____
###Markdown
正規化
###Code
# 正規化
def create_normalization_board(board):
board = board # 候補
# 変換表を一つずつためし、cand_boardと比較して小さい方を残す
convert_tables = [[6,3,0,7,4,1,8,5,2],[8,7,6,5,4,3,2,1,0],[2,5,8,1,4,7,0,3,6],
[2,1,0,5,4,3,8,7,6],[6,7,8,3,4,5,0,1,2],[0,3,6,1,4,7,2,5,8],[8,5,2,7,4,1,6,3,0]]
for convert_table in convert_tables:
cand_board = [
[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0]];
for i in range(6):
for j, num in enumerate(convert_table):
cand_board[i][j] = board[i][num]
# cand_pieces と temp_cand_pieces を比較
if board > cand_board:
board = cand_board
return board
def create_normalization_hand(hand):
hand = hand # 候補
# "1,0" を "0,1"にするだけ。
for i in range(6):
if hand[2*i] == 1 and hand[2*i+1] == 0:
hand[2*i] = 0
hand[2*i+1] = 1
return hand
###Output
_____no_output_____
###Markdown
合法手
###Code
# 合法手の作成
import copy
def create_legal_actions(is_first, is_choise, choise_board, choise_hand, board, hand):
if is_choise == True: # stateはchoise、ここではchoiseした駒を置く行動を絞り込む
# 1. 一旦すべての行動を禁止する。
actions = [
[[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0]],
[0,0,0,0,0,0,0,0,0,0,0,0]]
choise_piece_size = 0 # 0~5
# 2. choiseした駒が手駒なら、
if choise_hand != [0,0,0,0,0,0,0,0,0,0,0,0]:
# 2-1. 選択した駒と同じtypeすべての盤面の行動を許可する
for i, piece in enumerate(choise_hand):
if piece == 1:
choise_piece_size = i//2 # 0~11 -> 0~5
actions[0][choise_piece_size] = [1,1,1,1,1,1,1,1,1]
break;
# 3. choiseした駒が盤面の駒なら、
else:
# 3-1. 選択した駒と同じtypeすべての盤面の行動を許可する
for i in range(6):
if choise_board[i] != [0,0,0,0,0,0,0,0,0]:
choise_piece_size = i
actions[0][choise_piece_size] = [1,1,1,1,1,1,1,1,1]
break;
# 3-2. 選択した駒がもとあった位置の盤面の行動を禁止する
for place, is_exist in enumerate(choise_board[choise_piece_size]):
if is_exist == 1:
actions[0][choise_piece_size][place] = 0
break;
# 3-3. 選択した駒と同じ大きさ以上の駒が盤面にあるなら、
# if board[選択した駒と同じ大きさ以上のtype][place] == 1:
# actions[0][type][place] = 0
check_piece_sizes = [[0,3], [0,1,3,4], [0,1,2,3,4,5]]
for check_piece_size in check_piece_sizes[(choise_piece_size) % 3]:
for place, is_exist in enumerate(board[check_piece_size]):
if is_exist == 1:
# 3-3-1.その場所にはおけない
actions[0][choise_piece_size][place] = 0
else: # is_choise == False: # stateはput、ここではchoiseする駒を選べる行動を絞り込む
# 1. 配置されている駒はすべて許可する
actions = [copy.deepcopy(board), copy.deepcopy(hand)]
# 2. 現在の手番プレイヤーの駒の行動を禁止する(手番が交代するから)
if is_first == 1: # 先手の駒を行動不可にする
for i in [0,1,2]:
actions[0][i] = [0,0,0,0,0,0,0,0,0] # 2-1. ボードの行動の禁止
for i in [0,1,2,3,4,5]: # 2-2. 手駒の行動の禁止
actions[1][i] = 0
else: # 後手の駒を行動不可にする
for i in [3,4,5]:
actions[0][i] = [0,0,0,0,0,0,0,0,0] # 2-1. ボードの行動の禁止
for i in [6,7,8,9,10,11]: # 2-2. 手駒の行動の禁止
actions[1][i] = 0
# 3. ボード上の駒を調べ、LがあるplaceのM,Sを行動不可, MがあるplaceのSを行動不可にする。
piece_sizes = {"M, S": [1,2,4,5],"S": [2,5]}
for place in range(9):
# 3-1. ボードにL駒が配置されているなら
if board[0][place] == 1 or board[3][place] == 1:
# 3-1-1. 同じ場所のM,Sの行動を禁止する
for piece_size in piece_sizes["M, S"]:
actions[0][piece_size][place] = 0
# 3.2 ボードにM駒が配置されているなら
elif board[1][place] == 1 or board[4][place] == 1:
# 3-2-1. 同じ場所のSの行動を禁止する
for piece_size in piece_sizes["S"]:
actions[0][piece_size][place] = 0
return actions
###Output
_____no_output_____
###Markdown
次の状態
###Code
# 次の状態の作成
def create_next_states(is_first, is_choise, choise_board, choise_hand, board, hand, actions):
next_states = []
next_is_choise = int(not is_choise)
next_is_first = int(not is_first) if next_is_choise == 1 else is_first # 次がchoiseなら反転
# 現在is_choise== 1 なら、actionにはchoiseした駒をおける場所が入っている。
# action[1]にビットが立っていることはない.
# action[0][type][place]== 1のとき、 board[type][place] = 1にする。
if is_choise == 1: # 手番を継続, 次はセット, actionをboard, handに反映する
# print("create_next_states : setなステートを作成する")
next_choise_board = [[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0]];
next_choise_hand = [0,0,0,0,0,0,0,0,0,0,0,0]
for piece_type_num, piece_type in enumerate(actions[0]):
for place_num, place in enumerate(piece_type):
# print("place_num = {}, place = {}".format(place_num, place))
if place == 1: # 行動可能ならその箇所を1にする
next_board = copy.deepcopy(board)
next_board[piece_type_num][place_num] = 1
state = State(next_is_first, next_is_choise,
next_choise_board, next_choise_hand,
next_board, copy.deepcopy(hand))
next_states.append(state)
# 現在is_choise== 0 なら、actionにはchoiseできる駒の位置が入っている。
# action[0][place]== 1のとき、 hand[place] = 0、choise_hand[place] = 1にする。
# action[0][type][place]== 1のとき、 board[type][place] = 0、choise_board[place] = 1にする。
else: # is_choise == 0: # 手番を交代、次はチョイス, actionをchise_board, chise_handに反映する
print("create_next_states : choiseなステートを作成する")
# 盤面からchoiseする
for piece_type_num, piece_type in enumerate(actions[0]):
for place_num, place in enumerate(piece_type):
if place == 1: # 行動可能ならその箇所のchoise_boardを1にして、その箇所のboardを0にする
next_choise_board = [[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0]];
next_board = copy.deepcopy(board)
next_choise_board[piece_type_num][place_num] = 1
next_board[piece_type_num][place_num] = 0
state = State(next_is_first, next_is_choise,
next_choise_board, [0,0,0,0,0,0,0,0,0,0,0,0],
next_board, hand)
next_states.append(state)
# ハンドからchoiseする
for place_num, place in enumerate(actions[1]):
if place == 1: # 行動可能ならその箇所のchoise_handを1にして、その箇所のhandを0にする
next_choise_hand = [0,0,0,0,0,0,0,0,0,0,0,0]
next_hand = copy.deepcopy(hand)
next_choise_hand[place_num] = 1
next_hand[place_num] = 0
state = State(next_is_first, next_is_choise,
[[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0]],
next_choise_hand, board, next_hand)
next_states.append(state)
return next_states
###Output
_____no_output_____
###Markdown
結果
###Code
# 勝敗の有無
def is_win(single_surface):
s = single_surface
# 横, 縦, 左斜め, 右斜めのラインを調べる
check_lines = [[0,1,2], [3,4,5], [6,7,8], [0,3,6], [1,4,7], [2,5,8], [0,4,8], [2,4,6]]
for check_line in check_lines:
if s[check_line[0]] and s[check_line[1]] and s[check_line[2]]:
return True;
return False;
# 表面の駒だけbitが立つboardを作成する
def create_surface(board):
board_surface = [[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0]];
# S駒を反映させる
board_surface[2] = board[2]
board_surface[5] = board[5]
# このとき、おいた場所により小さい駒があったら、その駒を0にする
# 1マスずつ見ていく
for place in range(9):
# M駒を反映させる
if board[1][place] == 1 or board[4][place] == 1:
if board[1][place] == 1:
board_surface[1][place] = 1
if board[4][place] == 1:
board_surface[4][place] = 1
board_surface[2][place] = 0 # S駒を0にする
board_surface[5][place] = 0
# L駒を反映させる
if board[0][place] == 1 or board[3][place] == 1:
if board[0][place] == 1:
board_surface[0][place] = 1
if board[3][place] == 1:
board_surface[3][place] = 1
board_surface[1][place] = 0 # M駒を0にする
board_surface[4][place] = 0
board_surface[2][place] = 0 # S駒を0にする
board_surface[5][place] = 0
return board_surface
#勝敗の有無、勝者を確認する
def check_result(board):
board_surface = create_surface(board)
is_done = 0 # 決着がついているなら1を返す
winner = 0 # 先手は0, 後手は1
# 内部でboard_surface[0,1,2]とboard_surface[3,4,5]を合成
single_surfaces = [[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0]];
for i in range(9):
if board_surface[0][i] == 1 or board_surface[1][i] == 1 or board_surface[2][i] == 1:
single_surfaces[0][i] = 1
elif board_surface[3][i] == 1 or board_surface[4][i] == 1 or board_surface[5][i] == 1:
single_surfaces[1][i] = 1
is_done = is_win(single_surfaces[0]) == 1 or is_win(single_surfaces[1]) == 1
winner = 1 if is_win(single_surfaces[1]) == 1 else 0
return [is_done, winner]
###Output
_____no_output_____
###Markdown
Stateクラス
###Code
# stateクラス
class State():
def __init__(self, is_first, is_choise, choise_board, choise_hand, board, hand):
self.is_first = is_first
self.is_choise = is_choise
self.choise_board = choise_board
self.choise_hand = choise_hand
self.board = board
self.hand = hand
self.id = create_id(self.is_first, self.is_choise, self.choise_board,
self.choise_hand, self.board, self.hand)
self.normalized_id = create_id(
self.is_first, self.is_choise,
create_normalization_board(self.choise_board), create_normalization_hand(self.choise_hand),
create_normalization_board(self.board), create_normalization_hand(self.hand))
self.legal_actions = create_legal_actions(
self.is_first, self.is_choise, self.choise_board,
self.choise_hand, self.board, self.hand)
self.is_done, self.winner = check_result(self.board)
# 次の状態の作成
def next_states(self):
states = create_next_states(
self.is_first, self.is_choise, self.choise_board,
self.choise_hand, self.board, self.hand, self.legal_actions)
return states
###Output
_____no_output_____
###Markdown
Stateクラスのテスト
###Code
# テスト
# 初期(setステート)
is_first = False
is_choise = False
choise_board = [[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0]]
choise_hand = [0,0,0,0,0,0,0,0,0,0,0,0]
board = [[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0]]
hand = [1,1,1,1,1,1,1,1,1,1,1,1]
legal_actions = [
[[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0]],
[1,1,1,1,1,1,0,0,0,0,0,0]]
# # 1手目代表(choiseステート)
# is_first = True
# is_choise = True
# choise_board = [[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],
# [0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0]]
# choise_hand = [1,0,0,0,0,0,0,0,0,0,0,0]
# board = [[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],
# [0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0]]
# hand = [0,1,1,1,1,1,1,1,1,1,1,1]
# legal_actions = [
# [[1,1,1,1,1,1,1,1,1],[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],
# [0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0]],
# [0,0,0,0,0,0,0,0,0,0,0,0]]
# # 1手目代表(setステート)
# is_first = True
# is_choise = False
# choise_board = [[1,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],
# [0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0]]
# choise_hand = [0,0,0,0,0,0,0,0,0,0,0,0]
# board = [[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],
# [0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0]]
# hand = [0,1,1,1,1,1,1,1,1,1,1,1]
# legal_actions = [
# [[1,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],
# [0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0]],
# [0,1,1,1,1,1,1,1,1,1,1,1]]
id = create_id(is_first, is_choise,choise_board,choise_hand,board,hand)
state = State(is_first, is_choise,choise_board,choise_hand,board,hand)
# # idのテスト
# print("id = {}".format(id))
# print("is_first = {}".format(id2is_first(id)))
# print("is_choise = {}".format(id2is_choise(id)))
# print("choise_board = {}".format(id2choise_board(id)))
# print("choise_hand = {}".format(id2choise_hand(id)))
# print("board = {}".format(id2board(id)))
# print("hand = {}".format(id2hand(id)))
# stateのテスト
print("state = {}".format(state))
print("is_first = {}".format(state.is_first))
print("is_choise = {}".format(state.is_choise))
print("choise_board = {}".format(state.choise_board))
print("choise_hand = {}".format(state.choise_hand))
print("board = {}".format(state.board))
print("hand = {}".format(state.hand))
print("legal_actions = {}".format(state.legal_actions))
print("next_statesの数 = {}".format(len(state.next_states())))
# 正規化テスト
# hand: List[bool] = [1,0,1,0,1,1,0,1,0,1,0,1]
# print("hand = {}".format(hand))
# print("nrm_hand = {}".format(create_normalization_hand(hand)))
# board = [
# [1,0,1,0,1,0,1,0,1],[1,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],
# [0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0]];
# print("board = {}".format(board))
# print("nrm_board = {}".format(create_normalization_board(board)))
# 初期(setステート)
is_first: bool = False
is_choise: bool = False
choise_board: List[List[bool]] =[[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0]]
choise_hand: List[bool] = [0,0,0,0,0,0,0,0,0,0,0,0]
board: List[List[bool]] =[[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0]]
hand: List[bool] = [1,1,1,1,1,1,1,1,1,1,1,1]
# 回答
# legal_actions: List[List]= [
# [[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],
# [0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0]],
# [1,1,1,1,1,1,0,0,0,0,0,0]]
id = create_id(is_first, is_choise,choise_board,choise_hand,board,hand)
state = State(is_first, is_choise,choise_board,choise_hand,board,hand)
# stateのテスト
print("id = {}".format(id))
print("type(id) = {}".format(type(id)))
print("state = {}".format(state))
print("state.is_first = {}".format(state.is_first))
print("state.is_choise = {}".format(state.is_choise))
print("state.choise_board = {}".format(state.choise_board))
print("state.choise_hand = {}".format(state.choise_hand))
print("state.board = {}".format(state.board))
print("state.hand = {}".format(state.hand))
print("state.legal_actions = {}".format(state.legal_actions))
print("state.next_states = {}".format(state.next_states()))
print("self.is_done = {}".format(state.is_done))
print("self.winner = {}".format(state.winner))
###Output
id = 00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000111111111111
type(id) = <class 'str'>
state = <__main__.State object at 0x7f99320395c0>
state.is_first = False
state.is_choise = False
state.choise_board = [[0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0]]
state.choise_hand = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
state.board = [[0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0]]
state.hand = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
state.legal_actions = [[[0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0]], [1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0]]
create_next_states : choiseなステートを作成する
state.next_states = [<__main__.State object at 0x7f9932039588>, <__main__.State object at 0x7f9932039630>, <__main__.State object at 0x7f9932039668>, <__main__.State object at 0x7f99320396a0>, <__main__.State object at 0x7f99320396d8>, <__main__.State object at 0x7f9932039710>]
self.is_done = False
self.winner = 0
###Markdown
実装
###Code
import networkx as nx
from google.colab import files
# csvの読み込むためのモジュール
import pandas as pd
from pandas import DataFrame
import numpy as np
from tabulate import tabulate # pandasのdfをきれいに出力するためのモジュール
# uploaded = files.upload()
# for fn in uploaded.keys():
# print("hogehoge")
# G = nx.readwrite.gml.read_gml(fn)
# nx.draw_spring(G, node_size=200, node_color="red", with_labels=True)
### BFSでゲーム木を作成するプログラム ###
### 設定ここから ###
printFlag = False
### 設定ここまで ###
###mainここから
# unsolvedDf, solvedDfの初期化
if printFlag:
print("===")
print("プログラム開始")
print("===")
print()
print("データを初期化します")
cols = ["PREVIOUS_STATES", "STATE", "NEXT_STATES", "RESULT"] #[前の状態list, 状態, 次の状態list]
df = pd.DataFrame(index=[], columns=cols)
df.set_index("STATE")
unsolvedDf = df
solvedDf = df
if printFlag:
print("データを初期化しました")
print()
# 初期状態"※1"をunsolvedに追加する。unsolvedに積まれているノードは未訪問.
if printFlag:
print("===")
print("BFSの準備")
print("===")
print()
print("初期状態をセットします")
# 初期(setステート)
is_first: bool = False
is_choise: bool = False
choise_board: List[List[bool]] =[[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0]]
choise_hand: List[bool] = [0,0,0,0,0,0,0,0,0,0,0,0]
board: List[List[bool]] =[[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0]]
hand: List[bool] = [1,1,1,1,1,1,1,1,1,1,1,1]
state = State(is_first, is_choise,choise_board,choise_hand,board,hand)
init_state = state.normalized_id
previous_state = ""
unsolvedDf = unsolvedDf.append(pd.Series([[previous_state], init_state, "unsolved", ""], index=df.columns, name=init_state))
if printFlag:
print("初期状態をセットしました") # 確認
print("確認[UNSOLVED_DF]:") # 確認
print(unsolvedDf) # 確認
print() # 確認
# unsolvedが空になるまで以下を行う. BFS開始
if printFlag:
print("===")
print("BFSを開始します")
print("===")
print()
for _ in range(10): # while len(unsolvedDf) > 0: # 開発のためにfor文にしている。
# unsolvedDfから先頭のノードをpopする。
if len(unsolvedDf) <= 0:
break;
current_node = unsolvedDf.iloc[0] # 先頭のノード(current_node)を抽出。
unsolvedDf.drop(unsolvedDf.index[0], inplace=True) # 抽出したノードをunsolvedから削除。
# stateの作成
id = current_node.STATE
state = State(id2is_first(id), id2is_choise(id),id2choise_board(id),
id2choise_hand(id),id2board(id),id2hand(id))
# 勝敗の確認
result = state.winner
# 勝敗確定盤面なら
if state.is_done:
current_node.RESULT = state.winner
current_node.NEXT_STATES = []
else: # 勝敗確定盤面でないなら
# 先頭のノード(current_node)から次のノード(next_nodes)を探索する。
next_states = state.next_states() # 次のノードの探索結果
next_state_ids = []
for next_state in next_states:
next_state_ids.append(next_state.normalized_id)
current_node.NEXT_STATES = next_state_ids # current_nodeのNEXT_STATESに探索結果を反映
# 探索した全ての状態について、以下を行う。
if printFlag:
print("unsolvedDfからpopされたノード'{}'の探索を行います".format(current_node.STATE))
if len(next_state_ids) <= 0:
if printFlag:
print(" 探索結果: このノードは末端です")
for next_state in next_state_ids:
# もし、next_nodeが未発見ならば # unsolved, solvedのいずれにもnext_nodeが存在しない
if (next_state not in unsolvedDf.STATE.values) and (next_state not in solvedDf.STATE.values):
if next_state == current_node.STATE: # 次のノードが自身と同一
if printFlag:
print("探索結果: 自身のノード'{}'と同一です".format(next_state))
continue;
else:
if printFlag:
print(" 探索結果: 未発見のノード'{}'です".format(next_state))
# T)そのノードを未訪問にする。 # unsolvedに追加
previous_state = [current_node.STATE]
next_node = pd.Series([previous_state, next_state, "unsolved", ""], index=df.columns, name=next_state) # next_nodeの作成
unsolvedDf = unsolvedDf.append(next_node)
else: # F)そうではなく、発見済みならば
if printFlag:
print(" 探索結果: 発見済みのノード'{}'です".format(next_state))
#これを既に登録されていたノードのprevious_stateに追加する。
previous_state = [current_node.STATE]
if next_state in unsolvedDf.STATE.values: # unsolvedDfに存在
if printFlag:
print(" これはunsolvedに存在しています")
# unsolvedDf[unsolvedDf.STATE.values == next_state])にprevious_stateを追加する
tmp = unsolvedDf.loc[next_state, "PREVIOUS_STATES"]
tmp.append(previous_state[0])
unsolvedDf.loc[next_state, "PREVIOUS_STATES"] = tmp
elif next_state in solvedDf.STATE.values:# solveDfに存在
if printFlag:
print(" これはsolvedに存在しています")
# solvedDf[solvedDf.STATE.values == next_state])にprevious_stateを追加する
tmp = solvedDf.loc[next_state, "PREVIOUS_STATES"]
tmp.append(previous_state[0])
solvedDf.loc[next_state, "PREVIOUS_STATES"] = tmp
else: # 何らかの理由で漏れた状態
print(" エラー")
# 現在のノード(current_node)をsolvedDfに追加する。solvedDfのノードは既訪問。
solvedDf = solvedDf.append(current_node)
if printFlag:
print()
print("BFSが終了しました")
print()
# 結果確認
print("===")
print("結果確認")
print("===")
print()
print("確認[unsolvedDf]:")
print()
print(tabulate(unsolvedDf, unsolvedDf.columns,tablefmt='github', showindex=True))
print()
print("確認[solvedDf]:")
print()
print(tabulate(solvedDf, solvedDf.columns,tablefmt='github', showindex=True))
print()
### mainここまで
###Output
create_next_states : choiseなステートを作成する
create_next_states : choiseなステートを作成する
create_next_states : choiseなステートを作成する
create_next_states : choiseなステートを作成する
create_next_states : choiseなステートを作成する
create_next_states : choiseなステートを作成する
create_next_states : choiseなステートを作成する
create_next_states : choiseなステートを作成する
create_next_states : choiseなステートを作成する
create_next_states : choiseなステートを作成する
===
結果確認
===
確認[unsolvedDf]:
| | PREVIOUS_STATES | STATE | NEXT_STATES | RESULT |
|----------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|---------------|----------|
| 00000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000011101111111 | ['00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000111101111111', '00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000111101111111'] | 00000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000011101111111 | unsolved | |
| 00000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000110101111111 | ['00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000111101111111', '00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000111101111111'] | 00000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000110101111111 | unsolved | |
| 00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000111100111111 | ['00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000111101111111'] | 00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000111100111111 | unsolved | |
| 00000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000111111111 | ['00000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000001111111111', '00000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000001111111111'] | 00000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000111111111 | unsolved | |
| 00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000001101111111 | ['00000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000001111111111', '00000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000001111111111'] | 00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000001101111111 | unsolved | |
| 00000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000111111111 | ['00000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010111111111', '00000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000010111111111'] | 00000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000111111111 | unsolved | |
| 00000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010011111111 | ['00000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010111111111', '00000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000010111111111'] | 00000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010011111111 | unsolved | |
| 00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000010101111111 | ['00000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010111111111', '00000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010111111111', '00000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000010111111111', '00000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000010111111111'] | 00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000010101111111 | unsolved | |
| 00000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000001101111111 | ['00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000011101111111'] | 00000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000001101111111 | unsolved | |
| 00000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010101111111 | ['00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000011101111111', '00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000011101111111'] | 00000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010101111111 | unsolved | |
| 00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000011100111111 | ['00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000011101111111'] | 00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000011100111111 | unsolved | |
| 00000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000010011111111 | ['00000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000110011111111', '00000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000110011111111'] | 00000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000010011111111 | unsolved | |
| 00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000110001111111 | ['00000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000110011111111', '00000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000110011111111'] | 00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000110001111111 | unsolved | |
| 00000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000010101111111 | ['00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000110101111111', '00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000110101111111'] | 00000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000010101111111 | unsolved | |
| 00000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000110001111111 | ['00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000110101111111'] | 00000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000110001111111 | unsolved | |
| 00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000110100111111 | ['00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000110101111111'] | 00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000110100111111 | unsolved | |
確認[solvedDf]:
| | PREVIOUS_STATES | STATE | NEXT_STATES | RESULT |
|----------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------|
| 00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000111111111111 | [''] | 00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000111111111111 | ['00000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000011111111111', '00000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000011111111111', '00000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000110111111111', '00000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000110111111111', '00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000111101111111', '00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000111101111111'] | |
| 00000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000011111111111 | ['00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000111111111111', '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000111111111111'] | 00000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000011111111111 | ['00000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000001111111111', '00000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010111111111', '00000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010111111111', '00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000011101111111', '00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000011101111111'] | |
| 00000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000110111111111 | ['00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000111111111111', '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000111111111111'] | 00000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000110111111111 | ['00000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000010111111111', '00000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000010111111111', '00000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000110011111111', '00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000110101111111', '00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000110101111111'] | |
| 00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000111101111111 | ['00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000111111111111', '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000111111111111'] | 00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000111101111111 | ['00000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000011101111111', '00000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000011101111111', '00000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000110101111111', '00000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000110101111111', '00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000111100111111'] | |
| 00000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000001111111111 | ['00000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000011111111111'] | 00000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000001111111111 | ['00000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000111111111', '00000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000111111111', '00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000001101111111', '00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000001101111111'] | |
| 00000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010111111111 | ['00000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000011111111111', '00000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000011111111111'] | 00000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010111111111 | ['00000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000111111111', '00000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010011111111', '00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000010101111111', '00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000010101111111'] | |
| 00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000011101111111 | ['00000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000011111111111', '00000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000011111111111'] | 00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000011101111111 | ['00000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000001101111111', '00000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010101111111', '00000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010101111111', '00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000011100111111'] | |
| 00000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000010111111111 | ['00000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000110111111111', '00000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000110111111111'] | 00000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000010111111111 | ['00000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000111111111', '00000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010011111111', '00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000010101111111', '00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000010101111111'] | |
| 00000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000110011111111 | ['00000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000110111111111'] | 00000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000110011111111 | ['00000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000010011111111', '00000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000010011111111', '00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000110001111111', '00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000110001111111'] | |
| 00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000110101111111 | ['00000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000110111111111', '00000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000110111111111'] | 00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000110101111111 | ['00000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000010101111111', '00000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000010101111111', '00000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000110001111111', '00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000110100111111'] | |
###Markdown
出力
###Code
# ドライブのマウント
from google.colab import drive
drive.mount('/content/drive')
# 出力
# solvedDfをox_outputという名前で書き出し
solvedDf.to_csv('/content/drive/My Drive/ox/workspace/ox_output.csv')
# ox_outputの確認
solvedDf = pd.read_csv(
"/content/drive/My Drive/ox/workspace/ox_output.csv",
index_col=0, # 最初の1行はデータ名。
encoding="cp932" # windowsの追加文字に対応。おまじないだと思えば良い。
)
print(solvedDf)
###Output
_____no_output_____ |
Time Series ANN & LSTM VIX.ipynb | ###Markdown
NN
###Code
nn_model = Sequential()
nn_model.add(Dense(12, input_dim=1, activation='relu'))
nn_model.add(Dense(1))
nn_model.compile(loss='mean_squared_error', optimizer='adam')
early_stop = EarlyStopping(monitor='loss', patience=2, verbose=1)
history = nn_model.fit(X_train, y_train, epochs=100, batch_size=1, verbose=1, callbacks=[early_stop], shuffle=False)
y_pred_test_nn = nn_model.predict(X_test)
y_train_pred_nn = nn_model.predict(X_train)
print("The R2 score on the Train set is:\t{:0.3f}".format(r2_score(y_train, y_train_pred_nn)))
print("The R2 score on the Test set is:\t{:0.3f}".format(r2_score(y_test, y_pred_test_nn)))
###Output
The R2 score on the Train set is: 0.897
The R2 score on the Test set is: 0.789
###Markdown
LSTM
###Code
train_sc_df = pd.DataFrame(train_sc, columns=['Y'], index=train.index)
test_sc_df = pd.DataFrame(test_sc, columns=['Y'], index=test.index)
for s in range(1,2):
train_sc_df['X_{}'.format(s)] = train_sc_df['Y'].shift(s)
test_sc_df['X_{}'.format(s)] = test_sc_df['Y'].shift(s)
X_train = train_sc_df.dropna().drop('Y', axis=1)
y_train = train_sc_df.dropna().drop('X_1', axis=1)
X_test = test_sc_df.dropna().drop('Y', axis=1)
y_test = test_sc_df.dropna().drop('X_1', axis=1)
X_train = X_train.as_matrix()
y_train = y_train.as_matrix()
X_test = X_test.as_matrix()
y_test = y_test.as_matrix()
X_train_lmse = X_train.reshape(X_train.shape[0], X_train.shape[1], 1)
X_test_lmse = X_test.reshape(X_test.shape[0], X_test.shape[1], 1)
print('Train shape: ', X_train_lmse.shape)
print('Test shape: ', X_test_lmse.shape)
lstm_model = Sequential()
lstm_model.add(LSTM(7, input_shape=(1, X_train_lmse.shape[1]), activation='relu', kernel_initializer='lecun_uniform', return_sequences=False))
lstm_model.add(Dense(1))
lstm_model.compile(loss='mean_squared_error', optimizer='adam')
early_stop = EarlyStopping(monitor='loss', patience=2, verbose=1)
history_lstm_model = lstm_model.fit(X_train_lmse, y_train, epochs=100, batch_size=1, verbose=1, shuffle=False, callbacks=[early_stop])
y_pred_test_lstm = lstm_model.predict(X_test_lmse)
y_train_pred_lstm = lstm_model.predict(X_train_lmse)
print("The R2 score on the Train set is:\t{:0.3f}".format(r2_score(y_train, y_train_pred_lstm)))
print("The R2 score on the Test set is:\t{:0.3f}".format(r2_score(y_test, y_pred_test_lstm)))
nn_test_mse = nn_model.evaluate(X_test, y_test, batch_size=1)
lstm_test_mse = lstm_model.evaluate(X_test_lmse, y_test, batch_size=1)
print('NN: %f'%nn_test_mse)
print('LSTM: %f'%lstm_test_mse)
nn_y_pred_test = nn_model.predict(X_test)
lstm_y_pred_test = lstm_model.predict(X_test_lmse)
plt.figure(figsize=(10, 6))
plt.plot(y_test, label='True')
plt.plot(y_pred_test_nn, label='NN')
plt.title("ANN's Prediction")
plt.xlabel('Observation')
plt.ylabel('Adj Close Scaled')
plt.legend()
plt.show();
plt.figure(figsize=(10, 6))
plt.plot(y_test, label='True')
plt.plot(y_pred_test_lstm, label='LSTM')
plt.title("LSTM's Prediction")
plt.xlabel('Observation')
plt.ylabel('Adj Close scaled')
plt.legend()
plt.show();
###Output
_____no_output_____ |
notebooks/old/Prorotype.ipynb | ###Markdown
int parse_document(char *line, WORD *words, double *label, long *queryid, long *slackid, double *costfactor, long int *numwords, long int max_words_doc, char **comment){ register long wpos,pos; long wnum; double weight; char featurepair[1000],junk[1000]; (*queryid)=0; (*slackid)=0; (*costfactor)=1; pos=0; (*comment)=NULL; while(line[pos] ) { /* cut off comments */ if((line[pos] == '') && (!(*comment))) { line[pos]=0; (*comment)=&(line[pos+1]); } if(line[pos] == '\n') { /* strip the CR */ line[pos]=0; } pos++; } if(!(*comment)) (*comment)=&(line[pos]); /* printf("Comment: '%s'\n",(*comment)); */ wpos=0; /* check, that line starts with target value or zero, but not with feature pair */ if(sscanf(line,"%s",featurepair) == EOF) return(0); pos=0; while((featurepair[pos] != ':') && featurepair[pos]) pos++; if(featurepair[pos] == ':') { perror ("Line must start with label or 0!!!\n"); printf("LINE: %s\n",line); exit (1); } /* read the target value */ if(sscanf(line,"%lf",label) == EOF) return(0); pos=0; while(space_or_null((int)line[pos])) pos++; while((!space_or_null((int)line[pos])) && line[pos]) pos++; while((pos+=read_word(line+pos,featurepair)) && (featurepair[0]) && (wpos<max_words_doc)) { /* printf("%s\n",featurepair); */ if(sscanf(featurepair,"qid:%ld%s",&wnum,junk)==1) { /* it is the query id */ (*queryid)=(long)wnum; } else if(sscanf(featurepair,"sid:%ld%s",&wnum,junk)==1) { /* it is the slack id */ if(wnum > 0) (*slackid)=(long)wnum; else { perror ("Slack-id must be greater or equal to 1!!!\n"); printf("LINE: %s\n",line); exit (1); } } else if(sscanf(featurepair,"cost:%lf%s",&weight,junk)==1) { /* it is the example-dependent cost factor */ (*costfactor)=(double)weight; } else if(sscanf(featurepair,"%ld:%lf%s",&wnum,&weight,junk)==2) { /* it is a regular feature */ if(wnum<=0) { perror ("Feature numbers must be larger or equal to 1!!!\n"); printf("LINE: %s\n",line); exit (1); } if((wpos>0) && ((words[wpos-1]).wnum >= wnum)) { perror ("Features must be in increasing order!!!\n"); printf("LINE: %s\n",line); exit (1); } (words[wpos]).wnum=wnum; (words[wpos]).weight=(FVAL)weight; wpos++; } else { perror ("Cannot parse feature/value pair!!!\n"); printf("'%s' in LINE: %s\n",featurepair,line); exit (1); } } (words[wpos]).wnum=0; (*numwords)=wpos+1; return(1);} if((wpos>0) && ((words[wpos-1]).wnum >= wnum)) { perror ("Features must be in increasing order!!!\n"); printf("LINE: %s\n",line); exit (1); }
###Code
# Okay, so this is where we're falling down..
# The position is the pointer on the current line for parseing the file..
# and wpos is the word position..
# so the conditional we're failing @ is: if the word position is > 0 and the value (struct, I assume) at some
# memory location (representing the final word)s wnum value is >= wnum, then we exit..
# now.. what's wnum..?
# I'm half tempted to just remove this conditional and recompile, lol
# it looks like wnum is the (temporarily) binary property that we're using to say that this feature is present
# within this data point, it's weird because each word has a wnum value and there appears to be some kind of
# global wnum..
# I believe.. We can just remove this 'naughtily' remove this conditional form the source code and recompile..
# because I really don't see what I'm doing wrong.
# jesus, now features have to start from 1, not zero, kill me lord
training_dict_of_tokens
# okay, so if I re-introduce the conditional into the source code for the SVM, it doesn't run
# I believe it's because the feature values (on the left hand side) are not increaseing from 1 to 14
# so... let's make some changes and see if modifying that increases the accuracy from ~61%
training_set_compart
with codecs.open("./feats.train", "r", "UTF-8") as file:
with codecs.open("./feats.train.ordered", "w", "UTF-8") as write_file:
for line in sorted(file.readlines(), key=lambda line: int(line.split()[0])):
if len(line) > 1:
write_file.write(line)
'newest' in training_dict_of_tokens.values()
training_set_compart_pre
###Output
_____no_output_____ |
examples/neural_network_inference/Neural_network_control_flow_power_iteration.ipynb | ###Markdown
In CoreML Neural Network Specification version 4 (which is available from iOS 13 and MacOS 10.15), several "control-flow" layers have been added. CoreML spec is described in the protobuf format and for a list of all supported layer types and documentation, see [here](https://github.com/apple/coremltools/blob/master/mlmodel/format/NeuralNetwork.proto).In this notebook, we build a neural network that uses a few of the new control flow layers. We will write a simple python program to compute the largest eigenvalue of a given matrix and then show how a neural network can be built to replicate that program in an mlmodel.We choose the [power iteration method](https://en.wikipedia.org/wiki/Power_iteration). It is a simple iterative algorithm. Given a square matrix, $A$ of dimensions $n\times n$, it computes the largest eigenvalue (by magnitude) and the corresponding eigenvector (the algorithm can be adapted to compute all the eigenvalues, however we do not implement that here). Here is how the algorithm works. Pick a normalized random vector to start with, $x$, of dimension $n$. Repetitively, multiply it by the matrix and normalize it, i.e., $x\leftarrow Ax$ and $x\leftarrow \frac{x}{\left \| x \right \|}$. Gradually the vector converges to the largest eigenvector. Simple as that! There are a few conditions that the matrix should satisfy for this to happen, but let us not worry about it for this example. For now we will assume that the matrix is real and symmetric, this guarantees the eigenvalues to be real. After we have the normalized eigenvector, the corresponding eigenvalue can be computed by the formula $x^TAx$ Let's code this up in Python using Numpy!
###Code
import numpy as np
import copy
np.random.seed(8) # try different seeds to play with the number of iterations it takes for convergence!
'''
Use power method to compute the largest eigenvalue of a real symmetric matrix
'''
convergence_tolerance = 1e-6 # decrease/increase to trade off precision
number_of_iterations = 100 # decrease/increase to trade off precision
def power_iteration(matrix, starting_vector):
x = copy.deepcopy(starting_vector)
for i in range(number_of_iterations):
y = np.matmul(A,x)
#normalize
y = y / np.sqrt(np.sum(y**2))
# compute the diff to check for convergence
# we use cosine difference as both vectors are normalized and can get
# rotated by 180 degrees between iterations
diff = 1-abs(np.dot(x,y))
# update x
x = y
print('{}: diff: {}'.format(i, diff))
if diff < convergence_tolerance:
break
x_t = np.transpose(x)
eigen_value = np.matmul(x_t, np.matmul(A,x))
return eigen_value, x
# define the symmetric real matrix for which we need the eigenvalue.
A = np.array([[4,-5], [-5,3]], dtype=np.float)
# a random starting vector
starting_vector = np.random.rand(2)
starting_vector = starting_vector / np.sqrt(np.sum(starting_vector**2)) ## normalize it
eigen_value, eigen_vector = power_iteration(A, starting_vector)
print('Largest eigenvalue: %.4f ' % eigen_value)
print('Corresponding eigenvector: ', eigen_vector)
###Output
0: diff: 6.69187030143e-05
1: diff: 0.00208718410489
2: diff: 0.0614522880272
3: diff: 0.771617699317
4: diff: 0.193129218664
5: diff: 0.0075077446807
6: diff: 0.000241962094403
7: diff: 7.74407193072e-06
8: diff: 2.47796068775e-07
Largest eigenvalue: 8.5249
('Corresponding eigenvector: ', array([-0.74152421, 0.67092611]))
###Markdown
We see that in this case, the algorithm converged, given our specified toelrance, in 9 iterations. To confirm whether the eigenvalue is correct, lets use the "linalg" sub-package of numpy.
###Code
from numpy import linalg as LA
e, v = LA.eig(A)
idx = np.argmax(abs(e))
print('numpy linalg: largest eigenvalue: %.4f ' % e[idx])
print('numpy linalg: first eigenvector: ', v[:,idx])
###Output
numpy linalg: largest eigenvalue: 8.5249
('numpy linalg: first eigenvector: ', array([ 0.74145253, -0.67100532]))
###Markdown
Indeed we see that the eigenvalue matches with our power iteration code. The eigenvector is rotated by 180 degrees, but that is fine.Now, lets build an mlmodel to do the same. We use the builder API provided by coremltools to write out the protobuf messages.
###Code
import coremltools
import coremltools.models.datatypes as datatypes
from coremltools.models.neural_network import NeuralNetworkBuilder
input_features = [('matrix', datatypes.Array(*(2,2))),
('starting_vector', datatypes.Array(*(2,)))]
output_features = [('maximum_eigen_value', datatypes.Array(*(1,))),
('eigen_vector', None),
('iteration_count', datatypes.Array(*(1,)))]
builder = NeuralNetworkBuilder(input_features, output_features, disable_rank5_shape_mapping=True)
# convert the starting_vector which has shape (2,) to shape (2,1)
# so that it can be used by the Batched-MatMul layer
builder.add_expand_dims('expand_dims', 'starting_vector', 'x', axes=[-1])
builder.add_load_constant_nd('iteration_count', 'iteration_count',
constant_value=np.zeros((1,)),
shape=(1,))
# start building the loop
loop_layer = builder.add_loop('loop', max_iterations=number_of_iterations)
# get the builder object for the "body" of the loop
loop_body_builder = NeuralNetworkBuilder(nn_spec=loop_layer.loop.bodyNetwork)
# matrix multiply
# input shapes: (n,n),(n,1)
# output shape: (n,1)
loop_body_builder.add_batched_mat_mul('bmm.1', input_names=['matrix','x'], output_name='y')
# normalize the vector
loop_body_builder.add_reduce_l2('reduce', input_name='y', output_name='norm', axes = [0])
loop_body_builder.add_divide_broadcastable('divide', ['y','norm'], 'y_normalized')
# find difference with previous, which is computed as (1 - abs(cosine diff))
loop_body_builder.add_batched_mat_mul('cosine', ['y_normalized', 'x'], 'cosine_diff', transpose_a=True)
loop_body_builder.add_unary('abs_cosine','cosine_diff','abs_cosine_diff', mode='abs')
loop_body_builder.add_activation('diff', non_linearity='LINEAR',
input_name='abs_cosine_diff',
output_name='diff', params=[-1,1])
# update iteration count
loop_body_builder.add_activation('iteration_count_add', non_linearity='LINEAR',
input_name='iteration_count',
output_name='iteration_count_plus_1', params=[1,1])
loop_body_builder.add_copy('iteration_count_update', 'iteration_count_plus_1', 'iteration_count')
# update 'x'
loop_body_builder.add_copy('update_x', 'y_normalized', 'x')
# add condition to break from the loop, if convergence criterion is met
loop_body_builder.add_less_than('cond', ['diff'], 'cond', alpha=convergence_tolerance)
branch_layer = loop_body_builder.add_branch('branch_layer', 'cond')
builder_ifbranch = NeuralNetworkBuilder(nn_spec=branch_layer.branch.ifBranch)
builder_ifbranch.add_loop_break('break')
# now we are out of the loop, compute the eigenvalue
builder.add_batched_mat_mul('bmm.2', input_names=['matrix','x'], output_name='x_right')
builder.add_batched_mat_mul('bmm.3', input_names=['x','x_right'], output_name='maximum_eigen_value', transpose_a=True)
builder.add_squeeze('squeeze', 'x', 'eigen_vector', squeeze_all=True)
spec = builder.spec
model = coremltools.models.MLModel(spec)
###Output
_____no_output_____
###Markdown
Okay, so now we have the mlmodel spec. Before we call predict on it, lets print it out to check whether everything looks okay. We use the utility called "print_network_spec"
###Code
from coremltools.models.neural_network.printer import print_network_spec
print_network_spec(spec, style='coding')
# call predict on CoreML model
input_dict = {}
input_dict['starting_vector'] = starting_vector
input_dict['matrix'] = A.astype(np.float)
output = model.predict(input_dict)
coreml_eigen_value = output['maximum_eigen_value']
coreml_eigen_vector = output['eigen_vector']
print('CoreML computed eigenvalue: %.4f' % coreml_eigen_value)
print('CoreML computed eigenvector: ', coreml_eigen_vector, coreml_eigen_vector.shape)
print('CoreML iteration count: %d' % output['iteration_count'])
###Output
CoreML computed eigenvalue: 8.5249
('CoreML computed eigenvector: ', array([-0.74152416, 0.67092603]), (2,))
CoreML iteration count: 9
###Markdown
Indeed the output matches with our python program. Although, we do not do it here, the parameters "convergence_tolerance" and "number_of_iterations" can be made as network inputs, so that their value can be modifed at runtime. Currently, the input shapes to the Core ML model are fixed, $(2, 2)$ for the matrix and $(2,)$ for the starting vector. However, we can add shape flexibility so that the same mlmodel can be run on different input sizes. There are two ways to specify shape flexibility, either through "ranges" or via a list of "enumerated" shapes. Here we specify the latter.
###Code
from coremltools.models.neural_network import flexible_shape_utils
# (2,2) has already been provided as the default shape for "matrix"
# during initialization of the builder,
# here we add two more shapes that will be allowed at runtime
flexible_shape_utils.add_multiarray_ndshape_enumeration(spec,
feature_name='matrix',
enumerated_shapes=[(3,3), (4,4)])
# (2,) has already been provided as the default shape for "matrix"
# during initialization of the builder,
# here we add two more shapes that will be allowed at runtime
flexible_shape_utils.add_multiarray_ndshape_enumeration(spec,
feature_name='starting_vector',
enumerated_shapes=[(3,), (4,)])
model = coremltools.models.MLModel(spec)
# lets run the model with a (3,3) matrix
A = np.array([[1, -6, 8], [-6, 1, 5], [8, 5, 1]], dtype=np.float)
starting_vector = np.random.rand(3)
starting_vector = starting_vector / np.sqrt(np.sum(starting_vector**2)) ## normalize it
eigen_value, eigen_vector = power_iteration(A, starting_vector)
print('python code: largest eigenvalue: %.4f ' % eigen_value)
print('python code: corresponding eigenvector: ', eigen_vector)
from numpy import linalg as LA
e, v = LA.eig(A)
idx = np.argmax(abs(e))
print('numpy linalg: largest eigenvalue: %.4f ' % e[idx])
print('numpy linalg: first eigenvector: ', v[:,idx])
input_dict['starting_vector'] = starting_vector
input_dict['matrix'] = A.astype(np.float)
output = model.predict(input_dict)
coreml_eigen_value = output['maximum_eigen_value']
coreml_eigen_vector = output['eigen_vector']
print('CoreML computed eigenvalue: %.4f' % coreml_eigen_value)
print('CoreML computed eigenvector: ', coreml_eigen_vector, coreml_eigen_vector.shape)
print('CoreML iteration count: %d' % output['iteration_count'])
###Output
CoreML computed eigenvalue: -11.7530
('CoreML computed eigenvector: ', array([ 0.61622757, 0.52125645, -0.59038568]), (3,))
CoreML iteration count: 30
|
MaPeCode_Notebooks/21. Arbol regresion.ipynb | ###Markdown
Árboles de Regresión
###Code
import pandas as pd
from sklearn.tree import DecisionTreeRegressor
from sklearn.model_selection import KFold, cross_val_score
import numpy as np
data = pd.read_csv("../datasets/boston/Boston.csv")
data.head()
data.shape
colnames = data.columns.values.tolist()
predictors = colnames[:13]
target = colnames[13]
X = data[predictors]
Y = data[target]
regtree = DecisionTreeRegressor(min_samples_split=30, min_samples_leaf=10, max_depth=5, random_state=0)
regtree.fit(X,Y)
data["preds"] = regtree.predict(data[predictors])
data[["preds", "medv"]]
###Output
_____no_output_____ |
Interview Preparation Kit/7. Search/Pairs.ipynb | ###Markdown
Pairs
###Code
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the pairs function below.
def pairs(k, arr):
d= dict()
for i in arr:
d[i] = d.get(i, 0) + 1
res = 0
for i in range(n):
val = k + arr[i]
if d.get(val):
res += 1
return res
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
nk = input().split()
n = int(nk[0])
k = int(nk[1])
arr = list(map(int, input().rstrip().split()))
result = pairs(k, arr)
fptr.write(str(result) + '\n')
fptr.close()
###Output
_____no_output_____ |
salomon_exp/Finetuning_1.ipynb | ###Markdown
Test Time Augmentation TTA
###Code
# !pip install git+https://github.com/qubvel/ttach
import ttach as tta
transforms = tta.Compose(
[
tta.HorizontalFlip(),
tta.Rotate90(angles=[0, 180]),
# tta.Scale(scales=[1, 2, 4]),
# tta.Multiply(factors=[0.9, 1, 1.1]),
]
)
tta_model = tta.ClassificationTTAWrapper(model, transforms)
loss_val, acc_val = validate(valid_loader, tta_model, criterion, optimizer, 5)
# # #---------------------------------------------
# lr = 2e-4 # 0.001
# criterion = nn.CrossEntropyLoss()
# optimizer = optim.Adam(model.parameters(), lr=lr)
# epoch_num = 5
# best_val_acc = 0.88
# total_loss_val, total_acc_val = [],[]
# for epoch in range(1, epoch_num+1):
# loss_train, acc_train = train(train_loader, tta_model, criterion, optimizer, epoch)
# loss_val, acc_val = validate(valid_loader, tta_model, criterion, optimizer, epoch)
# total_loss_val.append(loss_val)
# total_acc_val.append(acc_val)
# if acc_val > best_val_acc:
# best_val_acc = acc_val
# torch.save(model.state_dict(), model_name+'freeze_'+str(best_val_acc)[:4]+'.ckpt')
# print('*****************************************************')
# print('best record: [epoch %d], [val loss %.5f], [val acc %.5f]' % (epoch, loss_val, acc_val))
# print('*****************************************************')
# # tta_model = tta.ClassificationTTAWrapper(model, tta.aliases.five_crop_transform())
# tta_model
loss_val, acc_val = validate(train_loader, tta_model, criterion, optimizer, epoch)
###Output
_____no_output_____
###Markdown
Pseudo Labelling
###Code
T1 = 100
T2 = 700
af = 3
def alpha_weight(epoch):
if epoch < T1:
return 0.0
elif epoch > T2:
return af
else:
return ((epoch-T1) / (T2-T1))*af
best_val_acc
total_loss_train, total_acc_train = [],[]
def semi_superv_train(train_loader, model, criterion, optimizer, unlabeled_loader, valid_loader, epoch):
model.eval()
val_loss = AverageMeter()
val_acc = AverageMeter()
with torch.no_grad():
for i, data in enumerate(unlabeled_loader):
images, _ = data
N = images.size(0)
images = Variable(images).to(device)
labels = Variable(labels).to(device)
outputs = model(images)
prediction = outputs.max(1, keepdim=True)[1]
val_acc.update(prediction.eq(labels.view_as(prediction)).sum().item()/N)
val_loss.update(criterion(outputs, labels).item())
print('------------------------------------------------------------')
print('[epoch %d], [val loss %.5f], [val acc %.5f]' % (epoch, val_loss.avg, val_acc.avg))
print('------------------------------------------------------------')
return val_loss.avg, val_acc.avg
model.train()
train_loss = AverageMeter()
train_acc = AverageMeter()
curr_iter = (epoch - 1) * len(train_loader)
for i, data in enumerate(unlabeled_loader):
images, labels = data
N = images.size(0)
# print('image shape:',images.size(0), 'label shape',labels.size(0))
images = Variable(images).to(device)
# labels = Variable(labels).to(device)
optimizer.zero_grad()
outputs = model(images)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
prediction = outputs.max(1, keepdim=True)[1]
train_acc.update(prediction.eq(labels.view_as(prediction)).sum().item()/N)
train_loss.update(loss.item())
curr_iter += 1
if (i + 1) % 100 == 0:
print('[epoch %d], [iter %d / %d], [train loss %.5f], [train acc %.5f]' % (
epoch, i + 1, len(train_loader), train_loss.avg, train_acc.avg))
total_loss_train.append(train_loss.avg)
total_acc_train.append(train_acc.avg)
return train_loss.avg, train_acc.avg
# Concept from : https://github.com/peimengsui/semi_supervised_mnist
from tqdm.notebook import tqdm
acc_scores = []
unlabel = []
pseudo_label = []
alpha_log = []
test_acc_log = []
test_loss_log = []
best_val_acc = 0.87
def semisup_train(train_loader, model, criterion, optimizer, unlabeled_loader, valid_loader, epoch):
# optimizer = torch.optim.SGD(model.parameters(), lr = 0.1)
# EPOCHS = 5
# Instead of using current epoch we use a "step" variable to calculate alpha_weight
# This helps the model converge faster
step = 100
model.train()
# for epoch in tqdm(range(EPOCHS)):
for epoch in range(epoch):
# for batch_idx, x_unlabeled in enumerate(unlabeled_loader):
for i, x_unlabeled in enumerate(unlabeled_loader):
# Forward Pass to get the pseudo labels
x_unlabeled = x_unlabeled[0].to(device)
model.eval()
with torch.no_grad():
output_unlabeled = model(x_unlabeled)
pseudo_labeled = output_unlabeled.max(1, keepdim=True)[1]
model.train()
# Now calculate the unlabeled loss using the pseudo label
output = model(x_unlabeled)
pseudo_labeled = Variable(pseudo_labeled).to(device)
output = Variable(output).to(device)
unlabeled_loss = alpha_weight(step) * F.nll_loss(output, pseudo_labeled).tem()
# Backpropogate
optimizer.zero_grad()
unlabeled_loss.backward()
optimizer.step()
# For every 50 batches train one epoch on labeled data
if i % 50 == 0:
# Normal training procedure
for batch_idx, (X_batch, y_batch) in enumerate(train_loader):
X_batch = Variable(X_batch).to(device)
y_batch = Variable(y_batch).to(device)
output = model(X_batch)
labeled_loss = F.nll_loss(output, y_batch)
optimizer.zero_grad()
labeled_loss.backward()
optimizer.step()
# Now we increment step by 1
step += 1
loss_val, acc_va = validate(val_loader, model, criterion, optimizer, epoch) # evaluate(model, test_loader)
print('Epoch: {} : Alpha Weight : {:.5f} | Test Acc : {:.5f} | Test Loss : {:.3f} '.format(epoch, alpha_weight(step), test_acc, test_loss))
if acc_va > best_val_acc:
best_val_acc = acc_val
torch.save(model.state_dict(), model_name+'freeze_'+str(best_val_acc)[:4]+'.ckpt')
print('*****************************************************')
print('best record: [epoch %d], [val loss %.5f], [val acc %.5f]' % (epoch, loss_val, acc_val))
print('*****************************************************')
# """ LOGGING VALUES """
# alpha_log.append(alpha_weight(step))
# test_acc_log.append(test_acc/100)
# test_loss_log.append(test_loss)
# """ ************** """
model.train()
semisup_train(train_loader, model, criterion, optimizer, unlabeled_loader, valid_loader, 5)
# Concept from : https://github.com/peimengsui/semi_supervised_mnist
from tqdm.notebook import tqdm
acc_scores = []
unlabel = []
pseudo_label = []
alpha_log = []
test_acc_log = []
test_loss_log = []
best_val_acc = 0.87
def semisup_train(model, train_loader, unlabeled_loader, val_loader):
# optimizer = torch.optim.SGD(model.parameters(), lr = 0.1)
EPOCHS = 5
# Instead of using current epoch we use a "step" variable to calculate alpha_weight
# This helps the model converge faster
step = 100
model.train()
# for epoch in tqdm(range(EPOCHS)):
for epoch in range(EPOCHS):
for batch_idx, x_unlabeled in enumerate(unlabeled_loader):
# Forward Pass to get the pseudo labels
x_unlabeled = x_unlabeled[0].to(device)
model.eval()
output_unlabeled = model(x_unlabeled)
_, pseudo_labeled = torch.max(output_unlabeled, 1)
model.train()
""" ONLY FOR VISUALIZATION"""
if (batch_idx < 3) and (epoch % 10 == 0):
unlabel.append(x_unlabeled.cpu())
pseudo_label.append(pseudo_labeled.cpu())
""" ********************** """
# Now calculate the unlabeled loss using the pseudo label
output = model(x_unlabeled)
unlabeled_loss = alpha_weight(step) * criterion(output, pseudo_labeled)
# Backpropogate
optimizer.zero_grad()
unlabeled_loss.backward()
optimizer.step()
# For every 2 batches train one epoch on labeled data
if batch_idx % 2 == 0:
# Normal training procedure
for batch_idx, (X_batch, y_batch) in enumerate(train_loader):
X_batch = X_batch.to(device)
y_batch = y_batch.to(device)
output = model(X_batch)
labeled_loss = criterion(output, y_batch)
optimizer.zero_grad()
labeled_loss.backward()
optimizer.step()
# Now we increment step by 1
step += 1
loss_val, acc_va = validate(val_loader, model, criterion, optimizer, epoch) # evaluate(model, test_loader)
print('Epoch: {} : Alpha Weight : {:.5f} | Test Acc : {:.5f} | Test Loss : {:.3f} '.format(epoch, alpha_weight(step), test_acc, test_loss))
if acc_va > best_val_acc:
best_val_acc = acc_val
torch.save(model.state_dict(), model_name+'freeze_'+str(best_val_acc)[:4]+'.ckpt')
print('*****************************************************')
print('best record: [epoch %d], [val loss %.5f], [val acc %.5f]' % (epoch, loss_val, acc_val))
print('*****************************************************')
""" LOGGING VALUES """
alpha_log.append(alpha_weight(step))
test_acc_log.append(test_acc/100)
test_loss_log.append(test_loss)
""" ************** """
model.train()
semisup_train(model, train_loader, unlabeled_loader, valid_loader)
total_loss_train, total_acc_train = [],[]
def train(train_loader, model, criterion, optimizer, unlabeled_loader, valid_loader, epoch):
model.train()
train_loss = AverageMeter()
train_acc = AverageMeter()
curr_iter = (epoch - 1) * len(train_loader)
for i, data in enumerate(train_loader):
images, labels = data
N = images.size(0)
# print('image shape:',images.size(0), 'label shape',labels.size(0))
images = Variable(images).to(device)
labels = Variable(labels).to(device)
optimizer.zero_grad()
outputs = model(images)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
prediction = outputs.max(1, keepdim=True)[1]
train_acc.update(prediction.eq(labels.view_as(prediction)).sum().item()/N)
train_loss.update(loss.item())
curr_iter += 1
if (i + 1) % 100 == 0:
print('[epoch %d], [iter %d / %d], [train loss %.5f], [train acc %.5f]' % (
epoch, i + 1, len(train_loader), train_loss.avg, train_acc.avg))
total_loss_train.append(train_loss.avg)
total_acc_train.append(train_acc.avg)
return train_loss.avg, train_acc.avg
for _,label in
###Output
_____no_output_____
###Markdown
Sumission
###Code
class_names = {0:'cbsd', 1: 'cgm', 2: 'cbb', 3: 'healthy', 4: 'cmd'}
def process_image(image_dir):
# Process a PIL image for use in a PyTorch model
# tensor.numpy().transpose(1, 2, 0)
image = Image.open(image_dir)
preprocess = transforms.Compose([ transforms.Resize(224),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=mean,std=std)])
image = preprocess(image)
# Convert 2D image to 1D vector
image = np.expand_dims(image, 0)
image = torch.from_numpy(image)
inputs = image.to(device)
return inputs
# Using our model to predict the label
def predict(image, model):
# Pass the image through our model
output = model(image)
# Reverse the log function in our output
output = torch.exp(output)
# Get the top predicted class, and the output percentage for
# that class
probs, classes = output.topk(1, dim=1)
return probs.item(), classes.item()
test_directory = "./data/test/test/0"
predictions, test_image_fileName = [], []
try:
test_images = listdir(test_directory)
for images in test_images:
test_image_fileName.append(images)
image = process_image(f'{test_directory}/{images}')
top_prob, top_class = predict(image, model)
predictions.append(class_names[top_class])
except Exception as e:
print(e)
print("[INFO] Creating pandas dataframe")
submission_data = {"Category":predictions,"Id":test_image_fileName,}
submission_data_frame = pd.DataFrame(submission_data)
submission_data_frame.head()
submission_data_frame.to_csv('submission'+model_name+'_freeze_86_flip.csv', header=True, index=False)
###Output
_____no_output_____ |
experiments/spamClassifier.ipynb | ###Markdown
Only reading in one email for now
###Code
enroncsv = "../experiments/data/enron.csv"
metadataHeaders = '../experiments/data/metadataHeaders.csv'
spam = '../experiments/data/spam.csv'
spamSubjects = '../experiments/data/spamWords.csv'
import pandas as pd
columns = pd.read_csv(metadataHeaders, sep=',').columns.tolist()
df = pd.read_csv(enroncsv, names=columns, sep='|', low_memory=False)
print("-- DONE --")
removableColumns = pd.read_csv('data/removableColumns.csv', sep=',').columns.tolist()
df.drop(removableColumns, axis=1)
print('-- DONE --')
listOfEmailsForBagOfWords = df.loc[df['Directory'].str.contains('inbox')]
print('-- DONE --')
wordsPerEmail = df[pd.notnull(df['Subject'])]
nonSpamSubjects = wordsPerEmail['Subject']
wordsPerEmail = nonSpamSubjects.str.split(' ')
len(wordsPerEmail)
import string
printable = set(string.printable)
def isEnglish(s):
for x in s:
if x not in printable:
return False
return True
def addToBagOfWords(dictionary, arr):
for word in arr:
word = word.lower()
if '=' in word:
continue
if '/' in word:
continue
if '\\' in word:
continue
if '_' in word:
continue
if '-' in word:
continue
if ':' in word:
continue
if '@' in word:
continue
if '#' in word:
continue
if '$' in word:
word = '$'
word = word.replace('.', '')
word = word.replace(')', '')
word = word.replace('(', '')
word = word.replace('&', '')
word = word.replace('\'', '')
word = word.replace('\"', '')
word = word.replace(',', '')
word = word.replace('[', '')
word = word.replace(']', '')
word = word.replace('{', '')
word = word.replace('}', '')
word = word.replace(';', '')
if word == '':
continue
if word in dictionary:
dictionary[word] = dictionary[word] + 1
else:
dictionary[word] = 1
return dictionary
def addToDictionary(dictionary, arr):
for index, subject in arr.iteritems():
dictionary = addToBagOfWords(dictionary, subject)
return dictionary
dictionary = {}
dictionary = addToDictionary(dictionary, wordsPerEmail)
len(dictionary)
spamSubjects = pd.read_csv(spam, names=['Subject'], sep=',')
wordsPerEmail = spamSubjects['Subject'].str.split(' ')
dictionary = addToDictionary(dictionary, wordsPerEmail)
len(dictionary)
dictionaryList = []
for key in dictionary:
dictionaryList.append([key, dictionary[key]])
dictionaryList = pd.DataFrame(dictionaryList, columns=["Word", "Occurances"])
len(dictionaryList)
###Output
_____no_output_____
###Markdown
We will only use words that occurred more than 30 times in our bag of words as other words would almost never match up anyway
###Code
dictionaryList = dictionaryList.loc[dictionaryList['Occurances'] > 30].reset_index(drop=True)
len(dictionaryList)
def addBagToInputOutput(inputItems, outputItems, arr):
for index, subject in inputItems.iteritems():
posBagOfWords = {}
posBagOfWords = addToBagOfWords(posBagOfWords, subject)
listOfIn = []
for key in posBagOfWords:
listOfIn.append(key)
inputBag = dictionaryList['Word'].isin(listOfIn).values.tolist()
arr.append([np.array(inputBag), np.array(outputItems)])
return arr
###Output
_____no_output_____
###Markdown
Create training data
###Code
import matplotlib.pyplot as plt
import numpy as np
training = []
NON_SPAM_SAMPLES = 5000
SPAM_SAMPLES = 5000
trainingPos = df[pd.notnull(df['Subject'])]
trainingPos = trainingPos.sample(NON_SPAM_SAMPLES)
trainingPos = trainingPos['Subject']
trainingPosSplit = trainingPos.str.split(' ')
training = addBagToInputOutput(trainingPosSplit, [0], training)
trainingNeg = spamSubjects.sample(SPAM_SAMPLES)
trainingNeg = trainingNeg['Subject']
trainingNegSplit = trainingNeg.str.split(' ')
training = addBagToInputOutput(trainingNegSplit, [1], training)
training = np.array(training)
np.random.shuffle(training)
len(training)
###Output
_____no_output_____
###Markdown
Lets set up a neuarl network with neurons on each layer input - 5558 + bias hidden layer 1 - 200 + bias hidden layer 2 - 20 + bias output - 1 The learing rate is set to 0.001The netword is bound to (-2, 2)
###Code
from mlpy.numberGenerator.bounds import Bounds
from mlpy.neuralNetwork.feedForwardNeuralNetwork import NeuralNetwork
from mlpy.neuralNetwork.structure.layer import Layer
l_rate = 0.001
bounds = Bounds(-2, 2)
inputLayer = Layer(bounds, size = len(training[0][0]), prev = None, l_rate = l_rate, bias = True, label = "Input layer")
hiddenLayer = Layer(bounds, size = 300, prev = inputLayer, l_rate = l_rate, bias = True, label = "Hidden layer")
hiddenLayer2 = Layer(bounds, size = 30, prev = hiddenLayer, l_rate = l_rate, bias = True, label = "Hidden layer 2")
outputLayer = Layer(bounds, size = len(training[0][1]), prev = hiddenLayer2, l_rate = l_rate, bias = False, label = "Output layer")
fnn = NeuralNetwork()
fnn.appendLayer(inputLayer)
fnn.appendLayer(hiddenLayer)
fnn.appendLayer(hiddenLayer2)
fnn.appendLayer(outputLayer)
group_training = np.array([input[0] for input in training])
group_target = np.array([output[1] for output in training])
errors = []
###Output
_____no_output_____
###Markdown
We will run training over 4000 iterations and output the mean error every 200 iterations.
###Code
ITERATIONS = 8000
print("Starting..")
for i in range(ITERATIONS):
mod = i % len(training)
in_out = training[mod]
result = fnn.fire(group_training)
error = fnn.backPropagation(group_target)
if i % 50 == 0:
print(str(np.round(i/ITERATIONS*100)) + '%\t', error.mean())
print("-- DONE --")
###Output
Starting..
0.0% -0.12995725726515203
1.0% -0.21724487662987976
1.0% -0.18667304135646745
2.0% -0.154810128458398
2.0% -0.13319424540570068
3.0% -0.11482310680891214
4.0% -0.10200137026296527
4.0% -0.09131738005633179
5.0% -0.08177446592544309
6.0% -0.07356384744998444
6.0% -0.06472169480361051
7.0% -0.056788084025721296
8.0% -0.05390726930064169
8.0% -0.05214460293985674
9.0% -0.04926056579967013
9.0% -0.046819271107435896
10.0% -0.04360912417521026
11.0% -0.03953216966816548
11.0% -0.034343089128355986
12.0% -0.028134008620796118
12.0% -0.022479422292637174
13.0% -0.018752070351201765
14.0% -0.014828378816415114
14.0% -0.009150373463211663
15.0% -0.004839548813960415
16.0% -0.003110097369714201
16.0% -0.002767333887769694
17.0% -0.002777686060159787
18.0% -0.002862039866603216
18.0% -0.0029086166534290085
19.0% -0.0028686689112518357
19.0% -0.0027967610791959138
20.0% -0.0027035285623649334
21.0% -0.002669235427740258
21.0% -0.002716969116087496
22.0% -0.0028705231707788813
22.0% -0.002823455610468195
23.0% -0.00279034224904821
24.0% -0.0027777263149955226
24.0% -0.0028138742234375537
25.0% -0.0029494982141474997
26.0% -0.0031635396069228045
26.0% -0.003272504248318063
27.0% -0.003248178515255792
28.0% -0.0031899507723966466
28.0% -0.0031347994504770846
29.0% -0.003166854035949554
29.0% -0.0032252218254010044
30.0% -0.0032827878026197013
31.0% -0.0033388730774651125
31.0% -0.0033859723955635035
32.0% -0.003427195709314436
32.0% -0.0034454593846064752
33.0% -0.0034172965857931573
34.0% -0.0034839070461295477
34.0% -0.003567664395593532
35.0% -0.003655396827364685
36.0% -0.003755592805128952
36.0% -0.0038682923938287346
37.0% -0.004000206091561236
38.0% -0.004108928511617719
38.0% -0.004126024113292501
39.0% -0.004124974148298702
39.0% -0.004154021838255315
40.0% -0.004200461355393985
41.0% -0.004194888264219212
41.0% -0.004183185783848195
42.0% -0.004222115502592257
42.0% -0.004254361909348518
43.0% -0.00425940449945379
44.0% -0.004264702526397921
44.0% -0.004403401831741533
45.0% -0.004425979505792481
46.0% -0.004375594626558757
46.0% -0.0043368406266972625
47.0% -0.0044111837348234095
48.0% -0.004491819342457434
48.0% -0.00455920602990423
49.0% -0.004568583439620395
49.0% -0.004598076987781548
50.0% -0.004643328483229581
51.0% -0.004715507254854109
51.0% -0.004841374948858609
52.0% -0.005006680064098336
52.0% -0.004983604023061282
53.0% -0.004926676299501488
54.0% -0.004888368021476618
54.0% -0.004860570328943323
55.0% -0.004830731052195825
56.0% -0.004787079716947177
56.0% -0.004725144749739807
57.0% -0.0046994414852798315
57.0% -0.004723233766614949
58.0% -0.004760997306724776
59.0% -0.004780624818512833
59.0% -0.004763968059501063
60.0% -0.00474393543866398
61.0% -0.00472669053701639
61.0% -0.004705515517139774
62.0% -0.004669793253409783
62.0% -0.004609309090833115
63.0% -0.004593819537091224
64.0% -0.004620532813222186
64.0% -0.004629592268684322
65.0% -0.004629476225383548
66.0% -0.004641419707323327
66.0% -0.004670627190246738
67.0% -0.0047106929917137864
68.0% -0.004759278118927859
68.0% -0.004779553317507377
69.0% -0.004731523953287008
69.0% -0.004640584401409143
70.0% -0.00469507743304905
71.0% -0.00471212985843715
71.0% -0.004720097736077684
72.0% -0.0047357845182810256
72.0% -0.004765637129127012
73.0% -0.00480940188287139
74.0% -0.00486223679469922
74.0% -0.004922963888478532
75.0% -0.0049731251866059
76.0% -0.00496719984727518
76.0% -0.0021382446188515417
77.0% -0.0021957828314472716
78.0% -0.00214754414728875
78.0% -0.002352515657152761
79.0% -0.002424745678361645
79.0% -0.0024741641370942353
80.0% -0.002493361287073093
81.0% -0.00249084647627102
81.0% -0.0024691809287224887
82.0% -0.002433859666036953
82.0% -0.002394277055050933
83.0% -0.0023568908865243417
84.0% -0.0023218844189309175
84.0% -0.0022858766720460643
85.0% -0.0022428357992942223
86.0% -0.002182751549752209
86.0% -0.002087199367170067
87.0% -0.0019202433456739208
88.0% -0.001660165640411973
88.0% -0.0015930724997442064
89.0% -0.0018462253297201444
89.0% -0.002018390478420803
90.0% -0.0020868873699028062
91.0% -0.0020957746496549114
91.0% -0.0020601682142609573
92.0% -0.0019670977929095806
92.0% -0.0017917800432042344
93.0% -0.0017850568119192076
94.0% -0.0019547558108213153
94.0% -0.002030454694460856
95.0% -0.002059503171589354
96.0% -0.002073873914388648
96.0% -0.002086006084592534
97.0% -0.002099461303314075
98.0% -0.0021142203179992216
98.0% -0.0021268926907029146
99.0% -0.00212100866762783
99.0% -0.002112834030166159
-- DONE --
###Markdown
Create testing data
###Code
testing = []
TEST_NON_SPAM_SAMPLES = 5000
TEST_SPAM_SAMPLES = 5000
trainingPos = df[pd.notnull(df['Subject'])]
trainingPos = trainingPos.sample(TEST_NON_SPAM_SAMPLES)
trainingPos = trainingPos['Subject']
trainingPosSplit = trainingPos.str.split(' ')
testing = addBagToInputOutput(trainingPosSplit, [0], testing)
trainingNeg = spamSubjects.sample(TEST_SPAM_SAMPLES)
trainingNeg = trainingNeg['Subject']
trainingNegSplit = trainingNeg.str.split(' ')
testing = addBagToInputOutput(trainingNegSplit, [1], testing)
testing = np.array(testing)
np.random.shuffle(testing)
len(testing)
###Output
_____no_output_____
###Markdown
Testing the model we will take the test data and use it as an measure of performace. The following will be output:1. The Classification Accuracy2. The Non-spam Classsification accuracy3. The Spam Classification accuracy
###Code
correct = 0
spamCorrect = 0
nonSpamCorrect = 0
for i in range(len(testing)):
in_out = testing[i]
result = fnn.fire(np.array([in_out[0]]))
target = in_out[1][0]
result = np.round(result[0][0])
if result == target:
correct += 1
if target == 1:
spamCorrect += 1
else:
nonSpamCorrect += 1
print("Classification accuracy: ", correct / len(testing))
print("Non spam classification accuracy: ", nonSpamCorrect / TEST_NON_SPAM_SAMPLES)
print("Spam classification accuracy: ", spamCorrect / TEST_SPAM_SAMPLES)
###Output
Classification accuracy: 0.8731
Non spam classification accuracy: 0.8638
Spam classification accuracy: 0.8824
###Markdown
Profiling: Lets apply the classifier to a subset of the data to see what it classifies as spam
###Code
dfClassify = df.drop(['Filename', 'Person', 'Directory', 'Message-ID', 'Content-Transfer-Encoding', 'Content-Type', 'Date', 'X-FileName', 'X-To', 'X-bcc', 'Cc', 'X-cc', 'X-Folder', 'X-Origin', 'Time', 'Bcc', 'X-From', 'Attendees', 'Re', 'Mime-Version'], axis=1)
dfClassify = dfClassify[pd.notnull(df['Subject'])]
dfClassify = dfClassify.sample(len(dfClassify))
def classify(arr):
classifySplit = arr.split(' ')
posBagOfWords = {}
posBagOfWords = addToBagOfWords(posBagOfWords, classifySplit)
listOfIn = []
for key in posBagOfWords:
listOfIn.append(key)
inputBag = dictionaryList['Word'].isin(listOfIn).values.tolist()
result = fnn.fire(np.array([np.array(inputBag)]))[0][0]
return result
dfClassify['Classification'] = dfClassify['Subject'].apply(classify)
dfClassify.sort_values(['Classification'], ascending=[0])
dfClassify[dfClassify['From'] == "[email protected]"]
###Output
_____no_output_____
###Markdown
Lets group by the sender and see which mailing address sent the most spam
###Code
groupedClassify = dfClassify[dfClassify['Classification'] > 0.90]
groupedClassify = groupedClassify.loc[groupedClassify['From'].str.contains('enron.com', regex=True) == False]
groupedClassify = groupedClassify.groupby(['From']).agg(['count', 'mean'])
groupedClassify = groupedClassify.reset_index()
groupedClassify.sort_values([('Classification', 'count')], ascending=[0]).head()
###Output
_____no_output_____ |
exercises/exercism.org/Basics/Guidos_Gorgeous_Lasagna.ipynb | ###Markdown
AufgabeQuelle: https://exercism.org/tracks/python/exercises/guidos-gorgeous-lasagna/Sie werden einen Code schreiben, der Ihnen hilft, eine wunderschöne Lasagne aus Ihrem Lieblingskochbuch zu kochen.Sie haben fünf Aufgaben, die sich alle auf das Kochen Ihres Rezepts beziehen. Definieren Sie die erwartete Backzeit in MinutenDefinieren Sie eine EXPECTED_BAKE_TIME-Konstante, die angibt, wie viele Minuten die Lasagne im Ofen backen soll. Laut Kochbuch sollte die Lasagne 40 Minuten im Ofen sein: Berechnen Sie die verbleibende Backzeit in MinutenImplementieren Sie die bake_time_remaining()-Funktion, die die tatsächlichen Minuten, die die Lasagne im Ofen war, als Argument annimmt und zurückgibt, wie viele Minuten die Lasagne basierend auf der EXPECTED_BAKE_TIME noch backen muss. Berechnen Sie die Vorbereitungszeit in MinutenImplementieren Sie die preparation_time_in_minutes()-Funktion, die die Anzahl der Schichten, die Sie der Lasagne hinzufügen möchten, als Argument verwendet und zurückgibt, wie viele Minuten Sie damit verbringen würden, sie zu machen. Angenommen, jede Schicht dauert 2 Minuten, um sich vorzubereiten. Berechnen Sie die gesamte verstrichene Garzeit (Vorbereiten + Backen) in MinutenImplementieren Sie die elapsed_time_in_minutes()-Funktion mit zwei Parametern: number_of_layers(die Anzahl der Schichten, die der Lasagne hinzugefügt werden) und elapsed_bake_time(die Anzahl der Minuten, die die Lasagne im Ofen gebacken hat). Diese Funktion sollte die Gesamtzahl der Minuten, die Sie bisher gekocht haben (als Summe Ihrer Vorbereitungszeit und der Zeit, die die Lasagne bereits im Ofen gebacken hat), zurückgeben. Aktualisieren Sie den Code, so dass jede Funktion korrekt dokumentiert ist.Gehen Sie den Code durch und fügen Sie Notizen und Dokumentation hinzu, so wie es bspw. bei der Funktionen bake_time_remaining() gezeigt ist. Konzepte- Variablen und Konstanten- Funktionen- Addition, Subtraktion und Multiplikation- Dokumentation von Funktionen Automatische Überprüfung
###Code
nachricht = "Ihre Implementierung von {} ist {}"
true_or_false = bake_time_remaining(10) == 30
print(nachricht.format("bake_time_remaining()", true_or_false))
true_or_false = preparation_time_in_minutes(12) == 24
print(nachricht.format("preparation_time_in_minutes()", true_or_false))
true_or_false = elapsed_time_in_minutes(10, 20) == 40
print(nachricht.format("elapsed_time_in_minutes()", true_or_false))
###Output
Ihre Implementierung von bake_time_remaining() ist True
Ihre Implementierung von preparation_time_in_minutes() ist True
Ihre Implementierung von elapsed_time_in_minutes() ist True
###Markdown
Ihre Lösung
###Code
EXPECTED_BAKE_TIME = 40
PREPARATION_TIME = 2
# TODO: Definiere die Funktion 'bake_time_remaining()'
def bake_time_remaining(elapsed_bake_time):
"""Berechnen Sie die verbleibende Backzeit.
:param elapsed_bake_time: int Backzeit bereits abgelaufen.
:return: int verbleibende Backzeit abgeleitet von 'EXPECTED_BAKE_TIME'.
Funktion, sterben die tatsächlichen Minuten benötigt, in denen die sterben im Ofen war, als
ein Argument und gibt zurück, wie viele Minuten die Lasagne noch braucht, um zu backen
basierend auf der `EXPECTED_BAKE_TIME`.
"""
pass
# TODO: Definiere die Funktion 'preparation_time_in_minutes()'
# TODO: Definiere die Funktion 'elapsed_time_in_minutes()'
bake_time_remaining()
###Output
_____no_output_____ |
nbs/indexers.FaceClusteringIndexer.Models.ipynb | ###Markdown
FaceClustering Model This module contains functionality to cluster faces on images by person. It uses the `FaceEmbeddingModel` to extract faces from photos, crop them and create embeddings for each face. These are embeddings are used as input for this module to cluster them based on network topology generated by the the graph of all embeddings.
###Code
# export
class FaceClusteringModel():
model_fname = "pretrained_gcn_v_ms1m.pth"
model_path = MODEL_DIR / model_fname
model_s3_url = f"{MEMRI_S3}/{model_fname}"
def __init__(self, tau=0.4, *args, **kwargs):
# tau used in the paper=0.65
self.tau=tau
self.rec_model = FaceEmbeddingModel()
self.clustering_model = GCN_V(feature_dim=256, nhid=512, nclass=1, dropout=0.0)
download_file(self.model_s3_url, self.model_path)
load_checkpoint(self.clustering_model, str(self.model_path), map_location="cpu", strict=True);
self.clustering_model.eval()
def get_cluster_labels(self, features):
dataset = GCNVDataset(features)
features = torch.FloatTensor(dataset.features)
adj = sparse_mx_to_torch_sparse_tensor(dataset.adj)
confidences = self.clustering_model((features, adj)).detach().numpy()
clusters = confidence2clusters(confidences, dists=dataset.dists,
nbrs=dataset.nbrs, tau=self.tau)
return clusters
def run(self, photos):
crop_photos = self.rec_model.get_crops(photos)
for c in progress_bar(crop_photos): c.embedding = self.rec_model.get_embedding(c)
crop_embeddings = np.stack([x.embedding[256:] for x in crop_photos])
cluster_labels = self.get_cluster_labels(crop_embeddings)
return crop_photos, cluster_labels
###Output
_____no_output_____
###Markdown
Running the model on a toy dataset You can test the model on your favorite images, we use 2 images from the modern family tv show as input.
###Code
data_dir = PYI_TESTDATA / "photos" / "faceclustering"
photos = [IPhoto.from_path(path=x, size=640) for x in data_dir.ls() if str(x).endswith("jpg")]
show_images(photos)
###Output
_____no_output_____
###Markdown
You can initialize the model and run it on your data with a few very simple function calls
###Code
model = FaceClusteringModel()
crops, crop_cluster_labels = model.run(photos)
###Output
_____no_output_____
###Markdown
Visualize results You can group the photos from a cluster using the `group_clusters` functions, and easily visualize the results.
###Code
for i, photos in enumerate(group_clusters(crops, crop_cluster_labels)[:3]):
print(f"Cluster {i}")
show_images(photos)
###Output
Cluster 0
###Markdown
Export -
###Code
# hide
from nbdev.export import *
notebook2script()
###Output
Converted basic.ipynb.
Converted importers.EmailImporter.ipynb.
Converted importers.Importer.ipynb.
Converted importers.util.ipynb.
Converted index.ipynb.
Converted indexers.FaceClusteringIndexer.Models.ipynb.
Converted indexers.FaceClusteringIndexer.Utils.ipynb.
Converted indexers.FaceClusteringIndexer.indexer.ipynb.
Converted indexers.FaceRecognitionModel.ipynb.
Converted indexers.FacerecognitionIndexer.Photo.ipynb.
Converted indexers.GeoIndexer.ipynb.
Converted indexers.NoteListIndexer.NoteList.ipynb.
Converted indexers.NoteListIndexer.Parser.ipynb.
Converted indexers.NoteListIndexer.ipynb.
Converted indexers.NoteListIndexer.util.ipynb.
Converted indexers.indexer.ipynb.
Converted itembase.ipynb.
Converted pod.client.ipynb.
|
project3/.Trash-0/files/project_3_starter 6.ipynb | ###Markdown
Project 3: Smart Beta Portfolio and Portfolio Optimization InstructionsEach problem consists of a function to implement and instructions on how to implement the function. The parts of the function that need to be implemented are marked with a ` TODO` comment. After implementing the function, run the cell to test it against the unit tests we've provided. For each problem, we provide one or more unit tests from our `project_tests` package. These unit tests won't tell you if your answer is correct, but will warn you of any major errors. Your code will be checked for the correct solution when you submit it Udacity. PackagesWhen you implement the functions, you'll only need to use the [Pandas](https://pandas.pydata.org/) and [Numpy](http://www.numpy.org/) packages. Don't import any other packages, otherwise the grader willn't be able to run your code.The other packages that we're importing is `helper`, `project_helper`, and `project_tests`. These are custom packages built to help you solve the problems. The `helper` and `project_helper` module contains utility functions and graph functions. The `project_tests` contains the unit tests for all the problems. Install Packages
###Code
import sys
!{sys.executable} -m pip install -r requirements.txt
###Output
_____no_output_____
###Markdown
Load Packages
###Code
import pandas as pd
import numpy as np
import helper
import project_helper
import project_tests
###Output
_____no_output_____
###Markdown
Market DataThe data source we'll be using is the [Wiki End of Day data](https://www.quandl.com/databases/WIKIP) hosted at [Quandl](https://www.quandl.com). This contains data for many stocks, but we'll just be looking at the S&P 500 stocks. We'll also make things a little easier to solve by narrowing our range of time from 2007-06-30 to 2017-09-30. Set API KeySet the `quandl_api_key` variable to your Quandl api key. You can find your Quandl api key [here](https://www.quandl.com/account/api).
###Code
# TODO: Add your Quandl API Key
quandl_api_key = ''
###Output
_____no_output_____
###Markdown
Download Data
###Code
import os
snp500_file_path = 'data/tickers_SnP500.txt'
wiki_file_path = 'data/WIKI_PRICES.csv'
start_date, end_date = '2013-07-01', '2017-06-30'
use_columns = ['date', 'ticker', 'adj_close', 'adj_volume', 'ex-dividend']
if not os.path.exists(wiki_file_path):
with open(snp500_file_path) as f:
tickers = f.read().split()
helper.download_quandl_dataset(quandl_api_key, 'WIKI', 'PRICES', wiki_file_path, use_columns, tickers, start_date, end_date)
else:
print('Data already downloaded')
###Output
_____no_output_____
###Markdown
Load Data
###Code
df = pd.read_csv(wiki_file_path)
###Output
_____no_output_____
###Markdown
Create the UniverseWe'll be selecting dollar volume stocks for our stock universe. This universe is similar to large market cap stocks, because they are the highly liquid.
###Code
percent_top_dollar = 0.2
high_volume_symbols = project_helper.large_dollar_volume_stocks(df, 'adj_close', 'adj_volume', percent_top_dollar)
df = df[df['ticker'].isin(high_volume_symbols)]
###Output
_____no_output_____
###Markdown
2-D MatricesIn the previous projects, we used a [multiindex](https://pandas.pydata.org/pandas-docs/stable/advanced.html) to store all the data in a single dataframe. As you work with larger datasets, it come infeasable to store all the data in memory. Starting with this project, we'll be storing all our data as 2-D matrices to match what you'll be expecting in the real world.
###Code
close = df.reset_index().pivot(index='ticker', columns='date', values='adj_close')
volume = df.reset_index().pivot(index='ticker', columns='date', values='adj_volume')
ex_dividend = df.reset_index().pivot(index='ticker', columns='date', values='ex-dividend')
###Output
_____no_output_____
###Markdown
View DataTo see what one of these 2-d matrices looks like, let's take a look at the closing prices matrix.
###Code
project_helper.print_dataframe(close)
###Output
_____no_output_____
###Markdown
Part 1: Smart Beta PortfolioIn Part 1 of this project, you'll build a smart beta portfolio using dividend yield. To see how well it performs, you'll compare this portfolio to an index. Index WeightsAfter building the smart beta portfolio, should compare it to a similar strategy or index.Implement `generate_dollar_volume_weights` to generate the weights for this index. For each date, generate the weights based on dollar volume traded for that date. For example, assume the following is dollar volume traded data:| | 10/02/2010 | 10/03/2010 ||----------|------------|------------|| **AAPL** | 2 | 2 || **BBC** | 5 | 6 || **GGL** | 1 | 2 || **ZGB** | 6 | 5 |The weights should be the following:| | 10/02/2010 | 10/03/2010 ||----------|------------|------------|| **AAPL** | 0.142 | 0.133 || **BBC** | 0.357 | 0.400 || **GGL** | 0.071 | 0.133 || **ZGB** | 0.428 | 0.333 |
###Code
def generate_dollar_volume_weights(close, volume):
"""
Generate dollar volume weights.
Parameters
----------
close : DataFrame
Close price for each ticker and date
volume : str
Volume for each ticker and date
Returns
-------
dollar_volume_weights : DataFrame
The dollar volume weights for each ticker and date
"""
assert close.index.equals(volume.index)
assert close.columns.equals(volume.columns)
#TODO: Implement function
return None
project_tests.test_generate_dollar_volume_weights(generate_dollar_volume_weights)
###Output
_____no_output_____
###Markdown
View DataLet's generate the index weights using `generate_dollar_volume_weights` and view them using a heatmap.
###Code
index_weights = generate_dollar_volume_weights(close, volume)
project_helper.plot_weights(index_weights, 'Index Weights')
###Output
_____no_output_____
###Markdown
ETF WeightsNow that we have the index weights, it's time to build the weights for the smart beta ETF. Let's build an ETF portfolio that is based on dividends. This is a common factor used to build portfolios. Unlike most portfolios, we'll be using a single factor for simplicity.Implement `calculate_dividend_weights` to returns the weights for each stock based on its total dividend yield over time. This is similar to generating the weight for the index, but it's dividend data instead.
###Code
def calculate_dividend_weights(ex_dividend):
"""
Calculate dividend weights.
Parameters
----------
ex_dividend : DataFrame
Ex-dividend for each stock and date
Returns
-------
dividend_weights : DataFrame
Weights for each stock and date
"""
#TODO: Implement function
return None
project_tests.test_calculate_dividend_weights(calculate_dividend_weights)
###Output
_____no_output_____
###Markdown
View DataLet's generate the ETF weights using `calculate_dividend_weights` and view them using a heatmap.
###Code
etf_weights = calculate_dividend_weights(ex_dividend)
project_helper.plot_weights(etf_weights, 'ETF Weights')
###Output
_____no_output_____
###Markdown
ReturnsImplement `generate_returns` to generate the returns. Note this isn't log returns. Since we're not dealing with volatility, we don't have to use log returns.
###Code
def generate_returns(close):
"""
Generate returns for ticker and date.
Parameters
----------
close : DataFrame
Close price for each ticker and date
Returns
-------
returns : Dataframe
The returns for each ticker and date
"""
#TODO: Implement function
return None
project_tests.test_generate_returns(generate_returns)
###Output
_____no_output_____
###Markdown
View DataLet's generate the closing returns using `generate_returns` and view them using a heatmap.
###Code
returns = generate_returns(close)
project_helper.plot_returns(returns, 'Close Returns')
###Output
_____no_output_____
###Markdown
Weighted ReturnsWith the returns of each stock computed, we can use it to compute the returns for for an index or ETF. Implement `generate_weighted_returns` to create weighted returns using returns and weights for an Index or ETF.
###Code
def generate_weighted_returns(returns, weights):
"""
Generate weighted returns.
Parameters
----------
returns : DataFrame
Returns for each ticker and date
weights : DataFrame
Weights for each ticker and date
Returns
-------
weighted_returns : DataFrame
Weighted returns for each ticker and date
"""
assert returns.index.equals(weights.index)
assert returns.columns.equals(weights.columns)
#TODO: Implement function
return None
project_tests.test_generate_weighted_returns(generate_weighted_returns)
###Output
_____no_output_____
###Markdown
View DataLet's generate the etf and index returns using `generate_weighted_returns` and view them using a heatmap.
###Code
index_weighted_returns = generate_weighted_returns(returns, index_weights)
etf_weighted_returns = generate_weighted_returns(returns, etf_weights)
project_helper.plot_returns(index_weighted_returns, 'Index Returns')
project_helper.plot_returns(etf_weighted_returns, 'ETF Returns')
###Output
_____no_output_____
###Markdown
Cumulative ReturnsImplement `calculate_cumulative_returns` to calculate the cumulative returns over time.
###Code
def calculate_cumulative_returns(returns):
"""
Calculate cumulative returns.
Parameters
----------
returns : DataFrame
Returns for each ticker and date
Returns
-------
cumulative_returns : Pandas Series
Cumulative returns for each date
"""
#TODO: Implement function
return None
project_tests.test_calculate_cumulative_returns(calculate_cumulative_returns)
###Output
_____no_output_____
###Markdown
View DataLet's generate the etf and index cumulative returns using `calculate_cumulative_returns` and compare the two.
###Code
index_weighted_cumulative_returns = calculate_cumulative_returns(index_weighted_returns)
etf_weighted_cumulative_returns = calculate_cumulative_returns(etf_weighted_returns)
project_helper.plot_benchmark_returns(index_weighted_cumulative_returns, etf_weighted_cumulative_returns, 'Smart Beta ETF vs Index')
###Output
_____no_output_____
###Markdown
Tracking ErrorIn order to check the performance of the smart beta portfolio, we can compare it against the index. Let's generate the tracking error using the helper function's `tracking_error` and graph it over time.
###Code
smart_beta_tracking_error = project_helper.tracking_error(index_weighted_cumulative_returns, etf_weighted_cumulative_returns)
project_helper.plot_tracking_error(smart_beta_tracking_error, 'Smart Beta Tracking Error')
###Output
_____no_output_____
###Markdown
Part 2: Portfolio OptimizationIn Part 2, you'll optimize the index you created in part 1. You'll use `cvxopt` to optimize the convex problem of finding the optimal weights for the portfolio. Just like before, we'll compare these results to the index. CovarianceImplement `get_covariance` to calculate the covariance of `returns` and `weighted_index_returns`. We'll use this to feed into our convex optimization function. By using covariance, we can prevent the optimizer from going all in on a few stocks.
###Code
def get_covariance(returns, weighted_index_returns):
"""
Calculate covariance matrices.
Parameters
----------
returns : DataFrame
Returns for each ticker and date
weighted_index_returns : DataFrame
Weighted index returns for each ticker and date
Returns
-------
xtx, xty : (2 dimensional Ndarray, 1 dimensional Ndarray)
"""
assert returns.index.equals(weighted_index_returns.index)
assert returns.columns.equals(weighted_index_returns.columns)
#TODO: Implement function
return None, None
project_tests.test_get_covariance(get_covariance)
###Output
_____no_output_____
###Markdown
View DataLet's look the the covariance generated from `get_covariance`.
###Code
xtx, xty = get_covariance(returns, index_weighted_returns)
xtx = pd.DataFrame(xtx, returns.index, returns.index)
xty = pd.Series(xty, returns.index)
project_helper.plot_covariance(xty, xtx)
###Output
_____no_output_____
###Markdown
Quadratic ProgrammingNow that you have the covariance, we can use this to optimize the weights. Run the following cell to generate optimal weights using helper function's `solve_qp`.
###Code
raw_optim_etf_weights = project_helper.solve_qp(xtx.values, xty.values)
raw_optim_etf_weights_per_date = np.tile(raw_optim_etf_weights, (len(returns.columns), 1))
optim_etf_weights = pd.DataFrame(raw_optim_etf_weights_per_date.T, returns.index, returns.columns)
###Output
_____no_output_____
###Markdown
Optimized PortfolioWith our optimized etf weights built using quadratic programming, let's compare it to the index. Run the next cell to calculate the optimized etf returns and compare the returns to the index returns.
###Code
optim_etf_returns = generate_weighted_returns(returns, optim_etf_weights)
optim_etf_cumulative_returns = calculate_cumulative_returns(optim_etf_returns)
project_helper.plot_benchmark_returns(index_weighted_cumulative_returns, optim_etf_cumulative_returns, 'Optimized ETF vs Index')
optim_etf_tracking_error = project_helper.tracking_error(index_weighted_cumulative_returns, optim_etf_cumulative_returns)
project_helper.plot_tracking_error(optim_etf_tracking_error, 'Optimized ETF Tracking Error')
###Output
_____no_output_____
###Markdown
Rebalance PortfolioThe optimized etf portfolio used different weights for each day. After calculating in transaction fees, this amount of turnover to the portfolio can reduce the total returns. Let's find the optimal times to rebalance the portfolio instead of doing it every day.Implement `rebalance_portfolio` to rebalance a portfolio.
###Code
def rebalance_portfolio(returns, weighted_index_returns, shift_size, chunk_size):
"""
Get weights for each rebalancing of the portfolio.
Parameters
----------
returns : DataFrame
Returns for each ticker and date
weighted_index_returns : DataFrame
Weighted index returns for each ticker and date
shift_size : int
The number of days between each rebalance
chunk_size : int
The number of days to look in the past for rebalancing
Returns
-------
all_rebalance_weights : list of Ndarrays
The etf weights for each point they are rebalanced
"""
assert returns.index.equals(weighted_index_returns.index)
assert returns.columns.equals(weighted_index_returns.columns)
assert shift_size > 0
assert chunk_size >= 0
#TODO: Implement function
return None
project_tests.test_rebalance_portfolio(rebalance_portfolio)
###Output
_____no_output_____
###Markdown
Run the following cell to rebalance the portfolio using `rebalance_portfolio`.
###Code
chunk_size = 250
shift_size = 5
all_rebalance_weights = rebalance_portfolio(returns, index_weighted_returns, shift_size, chunk_size)
###Output
_____no_output_____
###Markdown
Portfolio Rebalance CostWith the portfolio rebalanced, we need to use a metric to measure the cost of rebalancing the portfolio. Implement `get_rebalance_cost` to calculate the rebalance cost.
###Code
def get_rebalance_cost(all_rebalance_weights, shift_size, rebalance_count):
"""
Get the cost of all the rebalancing.
Parameters
----------
all_rebalance_weights : list of Ndarrays
ETF Returns for each ticker and date
shift_size : int
The number of days between each rebalance
rebalance_count : int
Number of times the portfolio was rebalanced
Returns
-------
rebalancing_cost : float
The cost of all the rebalancing
"""
assert shift_size > 0
assert rebalance_count > 0
#TODO: Implement function
return None
project_tests.test_get_rebalance_cost(get_rebalance_cost)
###Output
_____no_output_____
###Markdown
Run the following cell to get the rebalance cost from `get_rebalance_cost`.
###Code
unconstrained_costs = get_rebalance_cost(all_rebalance_weights, shift_size, returns.shape[1])
print(unconstrained_costs)
###Output
_____no_output_____ |
src/linear_regression_using_scikit_learn.ipynb | ###Markdown
ImportsNumpy import for array processing, python doesn’t have built in array support. The feature of working with native arrays can be used in python with the help of numpy library.Pandas is a library of python used for working with tables, on importing the data, mostly data will be of table format, for ease manipulation of tables pandas library is importedMatplotlib is a library of python used to plot graphs, for the purpose of visualizing the results we would be plotting the results with the help of matplotlib library.
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Reading the dataset from dataIn this line of code using the read_excel method of pandas library, the dataset has been imported from data folder and stored in dataset variable.
###Code
dataset = pd.read_csv(r'..\\data\\prices.csv')
###Output
_____no_output_____
###Markdown
On viewing the dataset, it contains of two columns X and Y where X is dependent variable and Y is Independent Variable.
###Code
dataset.head()
###Output
_____no_output_____
###Markdown
Creating Dependent and Independent variablesThe X Column from the dataset is extracted into an X variable of type numpy, similarly the y variableX is an independent variable Y is dependent variable Inference
###Code
X = dataset['X'].values
y = dataset['Y'].values
###Output
_____no_output_____
###Markdown
On execution of first line would result in a pandas Series ObjectOn using values attribute it would result in an numpy array
###Code
print(type(dataset['X']))
print(type(dataset['X'].values))
###Output
<class 'pandas.core.series.Series'>
<class 'numpy.ndarray'>
###Markdown
Visualizing the data The step is to just see how the dataset is On visualization the data would appear something like thisThe X and Y attributes would vary based on dataset.Each point on the plot is a data point showing the respective list price on x-axis and Best Price on y-axis
###Code
title='Linear Regression on Prices Dataset'
x_axis_label = 'List Price'
y_axis_label = 'Best Price'
plt.scatter(X,y)
plt.title(title)
plt.xlabel(x_axis_label)
plt.ylabel(y_axis_label)
plt.show()
###Output
_____no_output_____
###Markdown
Splitting the data into training set and test setWe are splitting the whole dataset into training and test set where training set is used for fitting the line to data and test set is used to check how good the line if for the data.
###Code
from sklearn.model_selection import train_test_split
X_test,X_train,y_test,y_train = train_test_split(X,y, test_size = 0.8)
###Output
_____no_output_____
###Markdown
Reshaping the numpy arrays since the scikit learn model expects 2-D array in further codeIn further the scikit learn model would be expecting a 2-D array of shape (length,1).
###Code
X_train = np.reshape(X_train,newshape = (-1,1))
y_train = np.reshape(y_train,newshape = (-1,1))
X_test = np.reshape(X_test,newshape = (-1,1))
y_test = np.reshape(y_test,newshape = (-1,1))
###Output
_____no_output_____
###Markdown
The code was just to convert a single dimensional array into a 2-D array where each element is an array.
###Code
print('Before Reshaping',np.shape(X))
print('After Reshaping',np.shape(X_train))
###Output
Before Reshaping (23,)
After Reshaping (19, 1)
###Markdown
Importing the linear model from sklearn frameworkFrom scikit learn Library LinearRegression is imported. Lr is an object of LinearRegression.The process of training is done in the fit method, our dependent and independent variable are fed into to the fit method in which it would try to fit a line to the data provided.
###Code
from sklearn.linear_model import LinearRegression
lr = LinearRegression()
lr.fit(X = X_train, y = y_train)
###Output
_____no_output_____
###Markdown
Predicting the ResultsBy the trained linear regression model we are trying to predict the values of test data. Y_pred variable contains all the predicted y-values of the test x-values.
###Code
y_pred = lr.predict(X_test)
###Output
_____no_output_____
###Markdown
Visualizing the ResultsAs we have predicted the y-values for a set of x-values we are visualizing the results to check how good did our line fit for our predictions.The plot shows the red points are the data points are actual values where the blue line is the predictions.
###Code
plt.scatter(X_test,y_test,c='red')
plt.plot(X_test,y_pred)
plt.title(title)
plt.xlabel(x_axis_label)
plt.ylabel(y_axis_label)
plt.show()
###Output
_____no_output_____ |
3d_data_stacker,_extractor,_and_viewer.ipynb | ###Markdown
###Code
from google.colab import drive
drive.mount('/content/drive')
!ls "/content/drive/My Drive"
import glob
import numpy as np
import matplotlib.pyplot as plt
import skimage as sk
import tifffile as tifffile
import os as os
import numpy as np
from skimage import filters, transform, io
from skimage import filters, io, img_as_ubyte
from skimage.data import camera
import numpy as np
import matplotlib.pyplot as plt
import skimage as sk
from skimage import filters, io
from skimage.data import camera
from skimage import data, io,img_as_float64, img_as_float32, exposure
from skimage.exposure import histogram
from scipy import ndimage as ndi
import skimage as sk
from skimage.external import tifffile
from matplotlib import cm
from matplotlib import pyplot as plt
from skimage import img_as_float64, img_as_int
from mpl_toolkits.mplot3d.art3d import Poly3DCollection, Line3DCollection
import numpy as np
from scipy import ndimage as ndi
from skimage import (exposure, feature, filters, io, measure,
morphology, restoration, segmentation, transform,
util)
#name your new tiff stack here
with tifffile.TiffWriter('drive/My Drive/filepath/stackname.tiff', bigtiff=True) as stack:
#put the file path to the folder of images you want to stack here
dir_name = 'drive/My Drive/filepath/'
filenames = os.listdir(dir_name)
#remove extranious file info from the images here by splitting before and after the image #; this will make sure the images are ordered properly in the stack
sort_idx = np.argsort([int(filename.split('ed')[1].split('.png')[0]) for filename in filenames])
for i in sort_idx:
filename = dir_name + filenames[i]
# image=np.array(filename, dtype='ubyte')
# image=np.array(filename)
stack.save(io.imread(filename))
# %matplotlib inline
#***************************load tiff stack here
data = io.imread('drive/My Drive/filepath/stackname.tiff')
#data=np.array(data, dtype=int)
#data=img_as_int(data)
print("shape: {}".format(data.shape))
print("dtype: {}".format(data.dtype))
print("range: ({}, {})".format(data.min(), data.max()))
#io.imsave('data.tiff', data)
#io.imshow('data.tiff', cmap='binary')
print(data.shape[1])
print()
#i'm leaving this in because we might be able to use it to alter the spacing of your images.
#The microscope reports the following spacing
original_spacing = np.array([1.4, 1.4, 1.4])
# We downsampled each slice 4x to make the data smaller
rescaled_spacing = original_spacing * [1, 4, 4]
# Normalize the spacing so that pixels are a distance of 1 apart
spacing = rescaled_spacing / rescaled_spacing
print("microscope spacing: {}\n".format(original_spacing))
print("after rescaling images: {}\n".format(rescaled_spacing))
print("normalized spacing: {}\n".format(spacing))
def show_plane(ax, plane, cmap="gray", title=None):
ax.imshow(plane, cmap=cmap)
ax.set_xticks([])
ax.set_yticks([])
if title:
ax.set_title(title)
def slice_in_3D(ax, i):
# From:
# https://stackoverflow.com/questions/44881885/python-draw-3d-cube
import numpy as np
from mpl_toolkits.mplot3d.art3d import Poly3DCollection, Line3DCollection
Z = np.array([[0, 0, 0],
[1, 0, 0],
[1, 1, 0],
[0, 1, 0],
[0, 0, 1],
[1, 0, 1],
[1, 1, 1],
[0, 1, 1]])
Z = Z * data.shape
r = [-1,1]
X, Y = np.meshgrid(r, r)
# plot vertices
ax.scatter3D(Z[:,0], Z[:,1], Z[:,2])
# list of sides' polygons of figure
verts = [[Z[0], Z[1], Z[2], Z[3]],
[Z[4], Z[5], Z[6], Z[7]],
[Z[0], Z[1], Z[5], Z[4]],
[Z[2], Z[3], Z[7], Z[6]],
[Z[1], Z[2], Z[6], Z[5]],
[Z[4], Z[7], Z[3], Z[0]],
[Z[2], Z[3], Z[7], Z[6]]]
# plot sides
ax.add_collection3d(
Poly3DCollection(verts, facecolors=(0, 1, 1, 0.25), linewidths=1,
edgecolors='darkblue')
)
verts = np.array([[[0, 0, 0],
[0, 0, 1],
[0, 1, 1],
[0, 1, 0]]])
verts = verts * (data.shape[0], data.shape[1], data.shape[2])
verts += [i, 0, 0]
ax.add_collection3d(Poly3DCollection(verts,
facecolors='magenta', linewidths=1, edgecolors='black'))
ax.set_xlabel('plane')
ax.set_ylabel('col')
ax.set_zlabel('row')
# Auto-scale plot axes
scaling = np.array([getattr(ax, 'get_{}lim'.format(dim))() for dim in 'xyz'])
ax.auto_scale_xyz(*[[np.min(scaling), np.max(scaling)]] * 3)
#plt.show()
from ipywidgets import interact
def slice_explorer(data, cmap='gray'):
N = len(data)
@interact(plane=(0, N - 1))
def display_slice(plane=6):
fig, ax = plt.subplots(figsize=(20, 5))
ax_3D = fig.add_subplot(133, projection='3d')
show_plane(ax, data[plane], title="Plane {}".format(plane), cmap=cmap)
slice_in_3D(ax_3D, plane)
plt.show()
return display_slice
slice_explorer(data);
print("original aka data")
def plot_hist(ax, data, title=None):
ax.hist(data.ravel(), bins=256)
ax.ticklabel_format(axis="y", style="scientific", scilimits=(0, 0))
if title:
ax.set_title(title)
# I included this code for histogram normalization because it might help you process the images initially?
#feel free not to use
equalized = exposure.equalize_hist(data)
# # i used the equalized image because the scale is from 0-1 and the region properties command seems to work best with it.
# feel free to try using the orginal file labled 'data'
interior_label = equalized
print("interior label: {}".format(np.unique(interior_label)))
interior_label=img_as_int(interior_label)
relabeled, _, _ = segmentation.relabel_sequential(interior_label)
print("relabeled labels: {}".format(np.unique(relabeled)))
regionprops = measure.regionprops(relabeled)
supported = []
unsupported = []
for prop in regionprops[0]:
try:
regionprops[0][prop]
supported.append(prop)
except NotImplementedError:
unsupported.append(prop)
print("Supported properties:")
print(" " + "\n ".join(supported))
print()
print("Unsupported properties:")
print(" " + "\n ".join(unsupported))
#check this parameter, make sure computer measured the correct number of regions
print()
print("measured regions: {}".format(np.unique(relabeled)))
#get volumes in pixels
volumes = [regionprop.area for regionprop in regionprops]
print("total pixels: {}".format(volumes))
max_volume = np.max(volumes)
mean_volume = np.mean(volumes)
min_volume = np.min(volumes)
sd_volume = np.std(volumes)
total_volume = np.sum(volumes)
print("Volume statistics")
print("total: {}".format(total_volume))
print("min: {}".format(min_volume))
print("max: {}".format(max_volume))
print("mean: {:0.2f}".format(mean_volume))
print("standard deviation: {:0.2f}".format(sd_volume))
print()
#3d image generator. It will run out of ram really fast, can only handle small image stacks (20x500x988 @ 25 gigs of ram)
#select region you want to visualize
selected_cell = 2
print('region')
print(selected_cell)
# skimage.measure.marching_cubes expects ordering (row, col, pln)
volume = (relabeled == regionprops[selected_cell].label).transpose(1, 2, 0)
verts_px, faces_px, _, _ = measure.marching_cubes_lewiner(volume, level=0)
surface_area_pixels = measure.mesh_surface_area(verts_px, faces_px)
verts, faces, _, _ = measure.marching_cubes_lewiner(volume, level=0)
surface_area_actual = measure.mesh_surface_area(verts, faces)
print("surface area (total pixels): {:0.2f}".format(surface_area_pixels))
print("surface area (actual): {:0.2f}".format(surface_area_actual))
fig = plt.figure(figsize=(22, 8))
ax = fig.add_subplot(111, projection="3d")
mesh = Poly3DCollection(verts_px[faces_px])
mesh.set_edgecolor("b")
ax.add_collection3d(mesh)
ax.set_xlabel("col")
ax.set_ylabel("row")
ax.set_zlabel("pln")
min_pln, min_row, min_col, max_pln, max_row, max_col = regionprops[selected_cell].bbox
ax.set_xlim(min_row, max_row)
ax.set_ylim(min_col, max_col)
ax.set_zlim(min_pln, max_pln)
for angle in range(0, 360):
ax.view_init(elev=90,azim=0)
plt.tight_layout()
plt.show()
print('finished')
###Output
_____no_output_____ |
00_Data_Preparation.ipynb | ###Markdown
Dataset PreparationDownload __database.sqlite__ from [here](https://www.kaggle.com/hugomathien/soccer)
###Code
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import sqlite3
import matplotlib.pyplot as plt
import xml.etree.ElementTree as ET
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
path = "datasets/" #Insert path here
database = path + 'database.sqlite'
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
# import os
# for dirname, _, filenames in os.walk('/kaggle/input'):
# for filename in filenames:
# print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output\
conn = sqlite3.connect(database)
tables = pd.read_sql("""SELECT *
FROM sqlite_master
WHERE type='table';""", conn)
type(tables)
tables
team_t = pd.read_sql("""SELECT * FROM Team""", conn)
team_t
match_t = pd.read_sql("""SELECT * FROM Match""", conn)
match_t
m_master = team_t.set_index('team_api_id')['team_long_name'].to_dict()
h_master = match_t.filter(like='home_team_api_id')
match_t[h_master.columns] = h_master.replace(m_master)
a_master = match_t.filter(like='away_team_api_id')
match_t[a_master.columns] = a_master.replace(m_master)
match_t
league_t = pd.read_sql("""SELECT * FROM League""", conn)
league_t
selected_leagues = pd.read_sql("""SELECT *
FROM League
WHERE id IN(1729, 21518);""", conn)
selected_leagues
matches = pd.read_sql("""SELECT *
FROM Match
WHERE league_id IN(1729, 21518);""", conn)
matches
# Replace team_api_id with team_long_name
#m = team_t.set_index('team_api_id')['team_long_name'].to_dict()
#h = matches.filter(like='home_team_api_id')
#matches[h.columns] = h.replace(m)
#a = matches.filter(like='away_team_api_id')
#matches[a.columns] = a.replace(m)
df = matches[matches.columns.drop(list(matches.filter(regex='player|BW|IW|LB|PS|WH|SJ|VC|GB|BS')))]
df_EPL = df[df['league_id'] == 1729]
len(df_EPL)
df_LaLiga = df[df['league_id'] == 21518]
df_LaLiga.reset_index(drop=True, inplace=True)
df_LaLiga
# count shotonm shotoff, foulcommut
# for i in range(len(df_EPL['shoton'])):
# root = ET.fromstring(df_EPL['shoton'][i])
# count = len(root.getchildren())
# df_EPL.at[i,'shoton'] = count
# for i in range(len(df_EPL['shotoff'])):
# root = ET.fromstring(df_EPL['shotoff'][i])
# count = len(root.getchildren())
# df_EPL.at[i,'shotoff'] = count
# for i in range(len(df_EPL['foulcommit'])):
# root = ET.fromstring(df_EPL['foulcommit'][i])
# count = len(root.getchildren())
# df_EPL.at[i,'foulcommit'] = count
#add home cway yellow and red cards stat
# Unsportsmanlike Condduct data has no card_type, treat it as a yellow card
# reference: https://en.wikipedia.org/wiki/Fouls_and_misconduct_(association_football)#Red_card_(dismissal)
# if no team record for card data, omit card row
#EPL
home_y_card =[]
away_y_card =[]
home_r_card =[]
away_r_card = []
NoneType = type(None)
for i in range(len(df_EPL['card'])):
root = ET.fromstring(df_EPL['card'][i])
home_y_card_count = 0
away_y_card_count = 0
home_r_card_count = 0
away_r_card_count = 0
for child in root:
if type(child.find('team')) != NoneType:
if child.find('team').text == str(df_EPL['home_team_api_id'][i]):
if type(child.find('card_type')) != NoneType:
if "y" in child.find('card_type').text:
home_y_card_count +=1
else:
home_r_card_count +=1
else:
home_y_card_count +=1
else:
if type(child.find('card_type')) != NoneType:
if "y" in child.find('card_type').text:
away_y_card_count +=1
else:
away_r_card_count +=1
else:
away_y_card_count +=1
home_y_card.append(home_y_card_count)
home_r_card.append(home_r_card_count)
away_y_card.append(away_y_card_count)
away_r_card.append(away_r_card_count)
df_EPL.loc[:,"home_y_card"] = home_y_card
df_EPL.loc[:,"home_r_card"] = home_r_card
df_EPL.loc[:,"away_y_card"] = away_y_card
df_EPL.loc[:,"away_r_card"] = away_r_card
#LALIGA
home_y_card =[]
away_y_card =[]
home_r_card =[]
away_r_card = []
NoneType = type(None)
for i in range(len(df_LaLiga['card'])):
home_y_card_count = 0
away_y_card_count = 0
home_r_card_count = 0
away_r_card_count = 0
if type(df_LaLiga['card'][i])!= NoneType:
root = ET.fromstring(df_LaLiga['card'][i])
for child in root:
if type(child.find('team')) != NoneType:
if child.find('team').text == str(df_LaLiga['home_team_api_id'][i]):
if type(child.find('card_type')) != NoneType:
if "y" in child.find('card_type').text:
home_y_card_count +=1
else:
home_r_card_count +=1
else:
home_y_card_count +=1
else:
if type(child.find('card_type')) != NoneType:
if "y" in child.find('card_type').text:
away_y_card_count +=1
else:
away_r_card_count +=1
else:
away_y_card_count +=1
home_y_card.append(home_y_card_count)
home_r_card.append(home_r_card_count)
away_y_card.append(away_y_card_count)
away_r_card.append(away_r_card_count)
df_LaLiga.loc[:,"home_y_card"] = home_y_card
df_LaLiga.loc[:,"home_r_card"] = home_r_card
df_LaLiga.loc[:,"away_y_card"] = away_y_card
df_LaLiga.loc[:,"away_r_card"] = away_r_card
df_LaLiga.shoton[3039]
#LALIGA
home_shoton =[]
away_shoton =[]
NoneType = type(None)
for i in range(len(df_LaLiga)):
home_shoton_count = 0
away_shoton_count = 0
if type(df_LaLiga['shoton'][i])!= NoneType:
root = ET.fromstring(df_LaLiga['shoton'][i])
for child in root:
if type(child.find('team')) != NoneType:
if child.find('team').text == str(df_LaLiga['home_team_api_id'][i]):
print(i, child.find('team').text)
home_shoton_count +=1
else:
away_shoton_count +=1
home_shoton.append(home_shoton_count)
away_shoton.append(away_shoton_count)
df_LaLiga.loc[:,"home_shoton"] = home_shoton
df_LaLiga.loc[:,"away_shoton"] = away_shoton
home_shoton[3039]
#extract shoton stat for home and away team from cross column
#EPL
home_shoton =[]
away_shoton =[]
NoneType = type(None)
for i in range(len(df_EPL)):
root = ET.fromstring(df_EPL['shoton'][i])
home_shoton_count = 0
away_shoton_count = 0
for child in root:
if type(child.find('team')) != NoneType:
if child.find('team').text == str(df_EPL['home_team_api_id'][i]):
home_shoton_count +=1
else:
away_shoton_count +=1
home_shoton.append(home_shoton_count)
away_shoton.append(away_shoton_count)
df_EPL.loc[:,"home_shoton"] = home_shoton
df_EPL.loc[:,"away_shoton"] = away_shoton
#LALIGA
home_shoton =[]
away_shoton =[]
NoneType = type(None)
for i in range(len(df_LaLiga)):
home_shoton_count = 0
away_shoton_count = 0
if type(df_LaLiga['shoton'][i])!= NoneType:
root = ET.fromstring(df_LaLiga['shoton'][i])
for child in root:
if type(child.find('team')) != NoneType:
if child.find('team').text == str(df_LaLiga['home_team_api_id'][i]):
home_shoton_count +=1
else:
away_shoton_count +=1
home_shoton.append(home_shoton_count)
away_shoton.append(away_shoton_count)
df_LaLiga.loc[:,"home_shoton"] = home_shoton
df_LaLiga.loc[:,"away_shoton"] = away_shoton
#extract shotoff stat for home and away team from cross column
#EPL
home_shotoff =[]
away_shotoff =[]
NoneType = type(None)
for i in range(len(df_EPL)):
root = ET.fromstring(df_EPL['shotoff'][i])
home_shotoff_count = 0
away_shotoff_count = 0
for child in root:
if type(child.find('team')) != NoneType:
if child.find('team').text == str(df_EPL['home_team_api_id'][i]):
home_shotoff_count +=1
else:
away_shotoff_count +=1
home_shotoff.append(home_shotoff_count)
away_shotoff.append(away_shotoff_count)
df_EPL.loc[:,"home_shotoff"] = home_shotoff
df_EPL.loc[:,"away_shotoff"] = away_shotoff
#LALIGA
home_shotoff =[]
away_shotoff =[]
NoneType = type(None)
for i in range(len(df_LaLiga)):
home_shotoff_count = 0
away_shotoff_count = 0
if type(df_LaLiga['shotoff'][i])!= NoneType:
root = ET.fromstring(df_LaLiga['shotoff'][i])
for child in root:
if type(child.find('team')) != NoneType:
if child.find('team').text == str(df_LaLiga['home_team_api_id'][i]):
home_shotoff_count +=1
else:
away_shotoff_count +=1
home_shotoff.append(home_shotoff_count)
away_shotoff.append(away_shotoff_count)
df_LaLiga.loc[:,"home_shotoff"] = home_shotoff
df_LaLiga.loc[:,"away_shotoff"] = away_shotoff
#extract foulcommit stat for home and away team from cross column
#EPL
home_foulcommit =[]
away_foulcommit =[]
NoneType = type(None)
for i in range(len(df_EPL)):
root = ET.fromstring(df_EPL['foulcommit'][i])
home_foulcommit_count = 0
away_foulcommit_count = 0
for child in root:
if type(child.find('team')) != NoneType:
if child.find('team').text == str(df_EPL['home_team_api_id'][i]):
home_foulcommit_count +=1
else:
away_foulcommit_count +=1
home_foulcommit.append(home_foulcommit_count)
away_foulcommit.append(away_foulcommit_count)
df_EPL.loc[:,"home_foulcommit"] = home_foulcommit
df_EPL.loc[:,"away_foulcommit"] = away_foulcommit
#LALIGA
home_foulcommit =[]
away_foulcommit =[]
NoneType = type(None)
for i in range(len(df_LaLiga)):
home_foulcommit_count = 0
away_foulcommit_count = 0
if type(df_LaLiga['foulcommit'][i])!= NoneType:
root = ET.fromstring(df_LaLiga['foulcommit'][i])
for child in root:
if type(child.find('team')) != NoneType:
if child.find('team').text == str(df_LaLiga['home_team_api_id'][i]):
home_foulcommit_count +=1
else:
away_foulcommit_count +=1
home_foulcommit.append(home_foulcommit_count)
away_foulcommit.append(away_foulcommit_count)
df_LaLiga.loc[:,"home_foulcommit"] = home_foulcommit
df_LaLiga.loc[:,"away_foulcommit"] = away_foulcommit
#extract cross stat for home and away team from cross column
#EPL
home_cross =[]
away_cross =[]
NoneType = type(None)
for i in range(len(df_EPL)):
root = ET.fromstring(df_EPL['cross'][i])
home_cross_count = 0
away_cross_count = 0
for child in root:
if type(child.find('team')) != NoneType:
if child.find('team').text == str(df_EPL['home_team_api_id'][i]):
home_cross_count +=1
else:
away_cross_count +=1
home_cross.append(home_cross_count)
away_cross.append(away_cross_count)
df_EPL.loc[:,"home_cross"] = home_cross
df_EPL.loc[:,"away_cross"] = away_cross
#LALIGA
home_cross =[]
away_cross =[]
NoneType = type(None)
for i in range(len(df_LaLiga)):
home_cross_count = 0
away_cross_count = 0
if type(df_LaLiga['cross'][i])!= NoneType:
root = ET.fromstring(df_LaLiga['cross'][i])
for child in root:
if type(child.find('team')) != NoneType:
if child.find('team').text == str(df_LaLiga['home_team_api_id'][i]):
home_cross_count +=1
else:
away_cross_count +=1
home_cross.append(home_cross_count)
away_cross.append(away_cross_count)
df_LaLiga.loc[:,"home_cross"] = home_cross
df_LaLiga.loc[:,"away_cross"] = away_cross
#extract corner stat for home and away team from corner column
#EPL
home_corner =[]
away_corner =[]
NoneType = type(None)
for i in range(len(df_EPL)):
root = ET.fromstring(df_EPL['corner'][i])
home_corner_count = 0
away_corner_count = 0
for child in root:
if type(child.find('team')) != NoneType:
if child.find('team').text == str(df_EPL['home_team_api_id'][i]):
home_corner_count +=1
else:
away_corner_count +=1
home_corner.append(home_corner_count)
away_corner.append(away_corner_count)
df_EPL.loc[:,"home_corner"] = home_corner
df_EPL.loc[:,"away_corner"] = away_corner
#LALIGA
home_corner =[]
away_corner =[]
NoneType = type(None)
for i in range(len(df_LaLiga)):
home_corner_count = 0
away_corner_count = 0
if type(df_LaLiga['corner'][i])!= NoneType:
root = ET.fromstring(df_LaLiga['corner'][i])
for child in root:
if type(child.find('team')) != NoneType:
if child.find('team').text == str(df_LaLiga['home_team_api_id'][i]):
home_corner_count +=1
else:
away_corner_count +=1
home_corner.append(home_corner_count)
away_corner.append(away_corner_count)
df_LaLiga.loc[:,"home_corner"] = home_corner
df_LaLiga.loc[:,"away_corner"] = away_corner
# extract full game possession for home and away team from possession column
# few data rows do not have 90 minutes data but only 80+ mins, so we set the fulltime possession threshold to 80 mins up.
#EPL
home_possession =[]
away_possession =[]
NoneType = type(None)
for i in range(len(df_EPL)):
root = ET.fromstring(df_EPL['possession'][i])
for child in root:
if type(child.find('elapsed')) != NoneType:
if int(child.find('elapsed').text) > 80:
if type(child.find('homepos')) != NoneType:
home_pos = child.find('homepos').text
if type(child.find('awaypos')) != NoneType:
away_pos = child.find('awaypos').text
home_possession.append(home_pos)
away_possession.append(away_pos)
df_EPL.loc[:,"home_possession"] = home_possession
df_EPL.loc[:,"away_possession"] = away_possession
#LALIGA
home_possession =[]
away_possession =[]
NoneType = type(None)
for i in range(len(df_LaLiga)):
if type(df_LaLiga['possession'][i])!= NoneType:
root = ET.fromstring(df_LaLiga['possession'][i])
for child in root:
if type(child.find('elapsed')) != NoneType:
if int(child.find('elapsed').text) > 80:
if type(child.find('homepos')) != NoneType:
home_pos = child.find('homepos').text
if type(child.find('awaypos')) != NoneType:
away_pos = child.find('awaypos').text
else:
home_pos = str(50)
away_pos = str(50)
home_possession.append(home_pos)
away_possession.append(away_pos)
df_LaLiga.loc[:,"home_possession"] = home_possession
df_LaLiga.loc[:,"away_possession"] = away_possession
h = df_LaLiga.filter(like='home_team_api_id')
df_LaLiga[h.columns] = h.replace(m_master)
a = df_LaLiga.filter(like='away_team_api_id')
df_LaLiga[a.columns] = a.replace(m_master)
h = df_EPL.filter(like='home_team_api_id')
df_EPL[h.columns] = h.replace(m_master)
a = df_EPL.filter(like='away_team_api_id')
df_EPL[a.columns] = a.replace(m_master)
pd.set_option('display.max_columns', None)
df_EPL_sort = df_EPL[df_EPL.columns.drop(list(matches.filter(regex='country_id|league_id|card|^goal$|^foulcommit$|^shoton$|^shotoff$|cross|corner|possession')))]
df_EPL_sort.sort_values(by=['date','home_team_api_id'], inplace=True, ascending=True)
df_EPL_sort
df_LaLiga.home_team_api_id.unique()
pd.set_option('display.max_columns', None)
df_LaLiga_sort = df_LaLiga[df_LaLiga.columns.drop(list(matches.filter(regex='country_id|league_id|card|^goal$|^foulcommit$|^shoton$|^shotoff$|cross|corner|possession')))]
df_LaLiga_sort.sort_values(by=['date','home_team_api_id'], inplace=True, ascending=True)
df_LaLiga_sort
df_EPL.to_csv(path + "EPL.csv")
df_LaLiga.to_csv(path + "LaLiga.csv")
df_epl_stat = pd.read_csv(path + 'epl_stats.csv')
df_epl_stat
len(df_epl_stat)
df_LaLiga_stat = pd.read_csv(path + 'la_liga_stats.csv')
dict1 = ['Espanol', 'Valencia', 'Ath Bilbao', 'Ath Madrid', 'Betis',
'La Coruna', 'Numancia', 'Osasuna', 'Santander', 'Sp Gijon',
'Barcelona', 'Valladolid', 'Almeria', 'Getafe', 'Malaga',
'Mallorca', 'Real Madrid', 'Recreativo', 'Sevilla', 'Villarreal',
'Zaragoza', 'Tenerife', 'Xerez', 'Hercules', 'Levante', 'Sociedad',
'Granada', 'Vallecano', 'Celta', 'Elche', 'Eibar', 'Cordoba',
'Las Palmas']
dict2 = ['RCD Espanyol','Valencia CF','Athletic Club de Bilbao','Atlético Madrid','Real Betis Balompié',
'RC Deportivo de La Coruña','CD Numancia','CA Osasuna','Racing Santander', 'Real Sporting de Gijón',
'FC Barcelona','Real Valladolid','UD Almería','Getafe CF','Málaga CF',
'RCD Mallorca','Real Madrid CF', 'RC Recreativo', 'Sevilla FC','Villarreal CF',
'Real Zaragoza', 'CD Tenerife', 'Xerez Club Deportivo', 'Hércules Club de Fútbol', 'Levante UD', 'Real Sociedad',
'Granada CF', 'Rayo Vallecano', 'RC Celta de Vigo', 'Elche CF', 'SD Eibar', 'Córdoba CF',
'UD Las Palmas']
dict3 = ['Valencia CF', 'CA Osasuna', 'RC Deportivo de La Coruña',
'CD Numancia', 'Racing Santander', 'Real Sporting de Gijón',
'Real Betis Balompié', 'RCD Espanyol', 'Athletic Club de Bilbao',
'Atlético Madrid', 'Sevilla FC', 'Villarreal CF', 'Real Madrid CF',
'FC Barcelona', 'Getafe CF', 'RCD Mallorca', 'UD Almería',
'Málaga CF', 'Real Valladolid', 'RC Recreativo', 'Real Zaragoza',
'CD Tenerife', 'Xerez Club Deportivo', 'Hércules Club de Fútbol',
'Levante UD', 'Real Sociedad', 'Rayo Vallecano', 'Granada CF',
'RC Celta de Vigo', 'Elche CF', 'SD Eibar', 'Córdoba CF',
'UD Las Palmas']
m = dict(zip(dict1, dict2))
h = df_LaLiga_stat.filter(like='HomeTeam')
df_LaLiga_stat[h.columns] = h.replace(m)
a = df_LaLiga_stat.filter(like='AwayTeam')
df_LaLiga_stat[a.columns] = a.replace(m)
df_LaLiga_stat
len(df_epl_stat)
len(df_LaLiga_stat)
df_epl_stat = df_epl_stat.iloc[-3040:]
df_epl_stat.tail(10)
df_LaLiga_stat = df_LaLiga_stat.iloc[-3040:]
df_LaLiga_stat['Date'] = pd.to_datetime(df_LaLiga_stat['Date'], format='%m/%d/%Y')
df_LaLiga_stat.sort_values(by=['Date','HomeTeam'], inplace=True, ascending=True)
df_EPL_sort.loc[:,"HTP"] = df_epl_stat['HTP'].values.tolist()
df_EPL_sort.loc[:,"ATP"] = df_epl_stat['ATP'].values.tolist()
df_EPL_sort.loc[:,"Result"] = df_epl_stat['FTR'].values.tolist()
df_EPL_sort.head()
df_EPL_sort.to_csv(path + "EPL_sort.csv")
df_LaLiga_sort.loc[:,"HTP"] = df_LaLiga_stat['HTP'].values.tolist()
df_LaLiga_sort.loc[:,"ATP"] = df_LaLiga_stat['ATP'].values.tolist()
df_LaLiga_sort.loc[:,"Result"] = df_LaLiga_stat['FTR'].values.tolist()
df_LaLiga_sort.tail(10)
df_LaLiga.head()
df_LaLiga_sort.to_csv(path + "LaLiga_sort.csv")
###Output
_____no_output_____ |
Sparkify_AWS_EMR.ipynb | ###Markdown
Churn predictive modelling using Apache Spark (PySpark) with Sparkify datasetThis project sets to create a predictive model for churn prediction of a music streaming service: Sparkify. Two dataset are made available, a tiny set of 128Mb and a full dataset of 12Gb. The project will train the tiny dataset on a local machine to get a sense of the sample data before deciding the components necessary to model on the full dataset. Aside from data ecxploration, the local modelling work will find out how to preprocess the data, what features to select and the suitable learning algorithm to adopt. Doing so will make the modelling work more time and computationally efficient. For modelling work on the large dataset, AWS EMR cluster will be adopted to do the final training and modelling work. We will also compare to see if full dataset behaves simialrly as well as descriptively similar to the tiny dataset. As such our choice for training features and learning algorithm are wise.
###Code
from pyspark.sql.functions import skewness, kurtosis
# import libraries
# Starter code
from pyspark.sql import SparkSession
from pyspark.sql import Window
from pyspark.sql.functions import avg, col, count, desc, stddev, udf, isnan, when, isnull, mean, min, max, skewness, kurtosis
from pyspark.sql.types import IntegerType, BooleanType
from pyspark.sql.functions import max as max_fn
from pyspark.sql.functions import min as min_fn
from pyspark.ml.feature import StandardScaler, VectorAssembler
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
from pyspark.ml.classification import GBTClassifier
from pyspark.ml.classification import GBTClassificationModel
from pyspark.ml.tuning import CrossValidator, ParamGridBuilder
#import seaborn as sns
import datetime
import pandas as pd
from time import time
# create a Spark session
spark = SparkSession \
.builder \
.appName("Sparkify") \
.getOrCreate()
###Output
_____no_output_____
###Markdown
Load and Clean DatasetThe full dataset 12Gb is loaded from an AWS S3 bucket
###Code
# Read in full sparkify dataset
event_data = "s3a://udacity-dsnd/sparkify/sparkify_event_data.json"
df = spark.read.json(event_data)
df.head()
df.printSchema()
type(df)
###Output
<class 'pyspark.sql.dataframe.DataFrame'>
###Markdown
Number of data points in the dataset
###Code
df.count()
###Output
26259199
###Markdown
A brief description of the dataset
###Code
df.describe().show()
df.select("page").distinct().sort("page").show(50)
###Output
+--------------------+
| page|
+--------------------+
| About|
| Add Friend|
| Add to Playlist|
| Cancel|
|Cancellation Conf...|
| Downgrade|
| Error|
| Help|
| Home|
| Login|
| Logout|
| NextSong|
| Register|
| Roll Advert|
| Save Settings|
| Settings|
| Submit Downgrade|
| Submit Registration|
| Submit Upgrade|
| Thumbs Down|
| Thumbs Up|
| Upgrade|
+--------------------+
###Markdown
Missing Values
###Code
for col in df.columns:
missing_count = df.filter((isnan(df[col])) | (df[col].isNull()) | (df[col] == "")).count()
if missing_count > 0:
print("{}: {}".format(col, missing_count))
###Output
artist: 5408927
firstName: 778479
gender: 778479
lastName: 778479
length: 5408927
location: 778479
registration: 778479
song: 5408927
userAgent: 778479
###Markdown
Remove rows with missing values in userId and sessionId
###Code
print("Number of rows in the Pyspark dataframe: {}".format(df.count()))
df_cleaned = df.dropna(how = "any", subset = ["userId", "sessionId"])
df_cleaned = df_cleaned.filter(df["userId"] != "") # `userId` should not be empty string
print("Number of rows after clearning: {}".format(df_cleaned.count()))
if df.count() == df_cleaned.count():
print("There is no missing values in userId and sessionId")
else:
print("{} rows removed.".format(df.count() - df_cleaned.count()))
###Output
There is no missing values in userId and sessionId
###Markdown
Exploratory Data AnalysisWhen you're working with the full dataset, perform EDA by loading a small subset of the data and doing basic manipulations within Spark. In this workspace, you are already provided a small subset of data you can explore. Define ChurnOnce you've done some preliminary analysis, create a column `Churn` to use as the label for your model. I suggest using the `Cancellation Confirmation` events to define your churn, which happen for both paid and free users. As a bonus task, you can also look into the `Downgrade` events. Explore DataOnce you've defined churn, perform some exploratory data analysis to observe the behavior for users who stayed vs users who churned. You can start by exploring aggregates on these two groups of users, observing how much of a specific action they experienced per a certain time unit or number of songs played.
###Code
numerical_cols = []
categorical_cols = []
for s in df_cleaned.schema:
data_type = str(s.dataType)
if data_type == "StringType":
categorical_cols.append(s.name)
if data_type == "LongType" or data_type == "DoubleType":
numerical_cols.append(s.name)
###Output
_____no_output_____
###Markdown
Investigate categorical columns
###Code
for c in categorical_cols:
print("{} count: {}".format(c, df_cleaned.select(c).count()))
###Output
artist count: 26259199
auth count: 26259199
firstName count: 26259199
gender count: 26259199
lastName count: 26259199
level count: 26259199
location count: 26259199
method count: 26259199
page count: 26259199
song count: 26259199
userAgent count: 26259199
userId count: 26259199
###Markdown
Investigate numerical columns
###Code
for n in numerical_cols:
print("{} count: {}".format(n, df_cleaned.select(n).count()))
df_cleaned.select([mean(n), min(n), max(n), stddev(n)]).show()
###Output
itemInSession count: 26259199
+------------------+------------------+------------------+--------------------------+
|avg(itemInSession)|min(itemInSession)|max(itemInSession)|stddev_samp(itemInSession)|
+------------------+------------------+------------------+--------------------------+
|106.56267561702853| 0| 1428| 117.65812617523798|
+------------------+------------------+------------------+--------------------------+
length count: 26259199
+------------------+-----------+-----------+-------------------+
| avg(length)|min(length)|max(length)|stddev_samp(length)|
+------------------+-----------+-----------+-------------------+
|248.72543296748836| 0.522| 3024.66567| 97.28710387078071|
+------------------+-----------+-----------+-------------------+
registration count: 26259199
+--------------------+-----------------+-----------------+-------------------------+
| avg(registration)|min(registration)|max(registration)|stddev_samp(registration)|
+--------------------+-----------------+-----------------+-------------------------+
|1.535220665260512...| 1508018725000| 1543821822000| 3.2402990978250685E9|
+--------------------+-----------------+-----------------+-------------------------+
sessionId count: 26259199
+------------------+--------------+--------------+----------------------+
| avg(sessionId)|min(sessionId)|max(sessionId)|stddev_samp(sessionId)|
+------------------+--------------+--------------+----------------------+
|100577.99253503505| 1| 240381| 71909.21077875949|
+------------------+--------------+--------------+----------------------+
status count: 26259199
+------------------+-----------+-----------+-------------------+
| avg(status)|min(status)|max(status)|stddev_samp(status)|
+------------------+-----------+-----------+-------------------+
|210.06768953615074| 200| 404| 31.550728788197617|
+------------------+-----------+-----------+-------------------+
ts count: 26259199
+--------------------+-------------+-------------+--------------------+
| avg(ts)| min(ts)| max(ts)| stddev_samp(ts)|
+--------------------+-------------+-------------+--------------------+
|1.540905636113773E12|1538352001000|1543622402000|1.5158105552719693E9|
+--------------------+-------------+-------------+--------------------+
###Markdown
Investigate every column
###Code
df_cleaned.select("artist").distinct().count()
df_cleaned.select("auth").distinct().show()
df_cleaned.select("firstName").distinct().count()
df_cleaned.select("gender").distinct().show()
df_cleaned.select("itemInSession").distinct().count()
df_cleaned.select("lastName").distinct().count()
df_cleaned.select("length").distinct().count()
df_cleaned.select("level").distinct().show()
df_cleaned.select("location").distinct().count()
df_cleaned.select("location").distinct().show(20)
df_cleaned.select("method").distinct().show()
df_cleaned.select("page").distinct().show()
df_cleaned.select("registration").distinct().count()
df_cleaned.select("sessionId").distinct().count()
df_cleaned.select("song").distinct().count()
df_cleaned.select("status").distinct().show()
df_cleaned.select("userAgent").distinct().show(10, truncate=False)
df_cleaned.select("userId").distinct().count()
###Output
22278
###Markdown
Define Churn Number of cancellations:
###Code
df_cleaned.filter(df_cleaned.page=="Cancellation Confirmation").select("userId").dropDuplicates().count()
churn_list = df_cleaned.filter(df_cleaned.page=="Cancellation Confirmation" ).select("userId").dropDuplicates()
churned_users = [(row['userId']) for row in churn_list.collect()]
df_churn = df_cleaned.withColumn("churn", df_cleaned.userId.isin(churned_users))
df_churn.dropDuplicates(["userId", "gender"]).groupby(["churn", "gender"]).count().sort("churn").show()
churn_events = udf(lambda x: 1 if x == "Cancellation Confirmation" else 0, IntegerType())
df_cleaned = df_cleaned.withColumn("churn_flag", churn_events("page"))
# Calculate percentage of users who churned
churn_flag = df_cleaned.groupBy('userId').agg({'churn_flag': 'sum'})\
.select(avg('sum(churn_flag)')).collect()[0]['avg(sum(churn_flag))']
print("{} % of users have churned by cancelling subscription.".format(round(churn_flag*100, 3)))
###Output
22.457 % of users have churned by cancelling subscription.
###Markdown
User churn percentage of 22.457 % is very close to those of the tiny dataset of 22.098%. We can at least assume that label skewness is likely similar. Number of upgrades
###Code
df_cleaned.filter(df_cleaned.page=="Submit Upgrade").select("userId").dropDuplicates().count()
upgrade_list = df_cleaned.filter(df_cleaned.page=="Submit Upgrade" ).select("userId").distinct()
upgraded_users = [(row['userId']) for row in upgrade_list.collect()]
df_upgrade = df_cleaned.withColumn("upgrade", df_cleaned.userId.isin(upgraded_users))
df_upgrade.dropDuplicates(["userId", "gender"]).groupby(["upgrade", "gender"]).count().sort("upgrade").show()
###Output
+-------+------+-----+
|upgrade|gender|count|
+-------+------+-----+
| false| F| 4894|
| false| M| 5301|
| false| null| 1|
| true| M| 6350|
| true| F| 5732|
+-------+------+-----+
###Markdown
Number of downgrades:
###Code
df_cleaned.filter(df_cleaned.page=="Submit Downgrade").select("userId").dropDuplicates().count()
downgrade_list = df_cleaned.filter(df_cleaned.page=="Submit Downgrade" ).select("userId").distinct()
downgraded_users = [(row['userId']) for row in downgrade_list.collect()]
df_downgrade = df_cleaned.withColumn("downgrade", df_cleaned.userId.isin(downgraded_users))
df_downgrade.dropDuplicates(["userId", "gender"]).groupby(["downgrade", "gender"]).count().sort("downgrade").show()
###Output
+---------+------+-----+
|downgrade|gender|count|
+---------+------+-----+
| false| M| 9036|
| false| F| 8138|
| false| null| 1|
| true| M| 2615|
| true| F| 2488|
+---------+------+-----+
###Markdown
Feature EngineeringOnce you've familiarized yourself with the data, build out the features you find promising to train your model on. To work with the full dataset, you can follow the following steps.- Write a script to extract the necessary features from the smaller subset of data- Ensure that your script is scalable, using the best practices discussed in Lesson 3- Try your script on the full data set, debugging your script if necessaryIf you are working in the classroom workspace, you can just extract features based on the small subset of data contained here. Be sure to transfer over this work to the larger dataset when you work on your Spark cluster. Gender (binary)
###Code
# Latest level
fn_gender = udf(lambda x: 1 if x=="F" else 0, IntegerType())
feat_gender = df_cleaned.select(['userId', 'gender'])\
.dropDuplicates(['userId'])\
.select(['userId', 'gender'])\
.withColumn('gender', fn_gender('gender').cast(IntegerType()))
feat_gender.describe().show(5)
feat_gender.select(skewness("gender"), kurtosis("gender")).show()
###Output
+-------------------+-------------------+
| skewness(gender)| kurtosis(gender)|
+-------------------+-------------------+
|0.09220664431939639|-1.9914979347433575|
+-------------------+-------------------+
###Markdown
Paid or Free (binary)
###Code
# Latest level
fn_level = udf(lambda x: 1 if x=="paid" else 0, IntegerType())
feat_level = df_cleaned.select(['userId', 'level', 'ts'])\
.orderBy(desc('ts'))\
.dropDuplicates(['userId'])\
.select(['userId', 'level'])\
.withColumn('level', fn_level('level').cast(IntegerType()))
feat_level.describe().show(5)
feat_level.select(skewness("level"), kurtosis("level")).show()
###Output
+-------------------+-------------------+
| skewness(level)| kurtosis(level)|
+-------------------+-------------------+
|-0.4025645802550173|-1.8379417587241018|
+-------------------+-------------------+
###Markdown
Total number of songs listened
###Code
feat_song = df_cleaned \
.select(["userId","song"]) \
.groupby("userID") \
.count()\
.withColumnRenamed("count", "num_song") \
.orderBy("userId")
feat_song.describe().show(5)
feat_song.select(skewness("num_song"), kurtosis("num_song")).show()
###Output
+------------------+------------------+
|skewness(num_song)|kurtosis(num_song)|
+------------------+------------------+
|135.95349045633083|19660.671563405533|
+------------------+------------------+
###Markdown
Total number of artist listened
###Code
# Number of artists listened
feat_artist = df_cleaned \
.filter(df_cleaned.page=="NextSong") \
.select("userId", "artist") \
.dropDuplicates() \
.groupby("userId") \
.count() \
.withColumnRenamed("count", "num_artist") \
.orderBy("userId")
feat_artist.describe().show()
feat_artist.select(skewness("num_artist"), kurtosis("num_artist")).show()
###Output
+--------------------+--------------------+
|skewness(num_artist)|kurtosis(num_artist)|
+--------------------+--------------------+
| 1.5260667285754526| 2.656182841474317|
+--------------------+--------------------+
###Markdown
Number of songs in playlist(s)
###Code
feat_playlist = df_cleaned \
.select('userID','page') \
.where(df_cleaned.page == 'Add to Playlist') \
.groupBy('userID') \
.count() \
.withColumnRenamed('count', 'num_playlist_song') \
.orderBy("userId")
feat_playlist.describe().show()
feat_playlist.select(skewness("num_playlist_song"), kurtosis("num_playlist_song")).show()
###Output
+---------------------------+---------------------------+
|skewness(num_playlist_song)|kurtosis(num_playlist_song)|
+---------------------------+---------------------------+
| 2.3914875986095625| 8.073009618134558|
+---------------------------+---------------------------+
###Markdown
Number of friends
###Code
feat_friends = df_cleaned \
.select('userID','page') \
.where(df_cleaned.page == 'Add Friend') \
.groupBy('userID') \
.count() \
.withColumnRenamed('count', 'num_friend') \
.orderBy("userId")
feat_friends.describe().show()
feat_friends.select(skewness("num_friend"), kurtosis("num_friend")).show()
###Output
+--------------------+--------------------+
|skewness(num_friend)|kurtosis(num_friend)|
+--------------------+--------------------+
| 2.3834675795984976| 8.182711524378096|
+--------------------+--------------------+
###Markdown
Total length of listening
###Code
# Total length of listening
feat_listentime = df_cleaned \
.select('userID','length') \
.groupBy('userID') \
.sum() \
.withColumnRenamed('sum(length)', 'time_listen') \
.orderBy("userId")
feat_listentime.describe().show()
feat_listentime.select(skewness("time_listen"), kurtosis("time_listen")).show()
###Output
+---------------------+---------------------+
|skewness(time_listen)|kurtosis(time_listen)|
+---------------------+---------------------+
| 2.439989611449916| 8.466901311178267|
+---------------------+---------------------+
###Markdown
Average number of songs per session
###Code
feat_avgsongs = df_cleaned.filter(df_cleaned.page =="NextSong") \
.groupBy(["userId", "sessionId"]) \
.count() \
.groupby(['userId']) \
.agg({'count':'avg'}) \
.withColumnRenamed('avg(count)', 'avg_songs') \
.orderBy("userId")
feat_avgsongs.describe().show()
feat_avgsongs.select(skewness("avg_songs"), kurtosis("avg_songs")).show()
###Output
+-------------------+-------------------+
|skewness(avg_songs)|kurtosis(avg_songs)|
+-------------------+-------------------+
| 1.736381217684329| 8.096277081017185|
+-------------------+-------------------+
###Markdown
Average time per session
###Code
feat_sesstime = df_cleaned.groupBy(["userId", "sessionId"]) \
.agg(((max_fn(df_cleaned.ts)-min_fn(df_cleaned.ts))/(1000*60))
.alias("sessTime"))
feat_avgtime = feat_sesstime.groupby("userId") \
.agg(avg(feat_sesstime.sessTime).alias("avgSessTime")) \
.orderBy("userId")
feat_avgtime.describe().show()
feat_avgtime.select(skewness("avgSessTime"), kurtosis("avgSessTime")).show()
feat_avgtime.show(5)
###Output
+-------+------------------+
| userId| avgSessTime|
+-------+------------------+
|1000025| 404.793137254902|
|1000035| 235.9363636363636|
|1000083|186.10454545454547|
|1000103| 68.93333333333334|
|1000164|218.88981481481483|
+-------+------------------+
only showing top 5 rows
###Markdown
Number of session per user
###Code
feat_session.describe().show()
feat_session = df_cleaned.select("userId", "sessionId") \
.dropDuplicates() \
.groupby("userId") \
.count() \
.withColumnRenamed('count', 'session') \
.orderBy("userId")
feat_session.select(skewness("session"), kurtosis("session")).show()
###Output
+------------------+-----------------+
| skewness(session)|kurtosis(session)|
+------------------+-----------------+
|149.21426452603328|22266.26341038374|
+------------------+-----------------+
###Markdown
Label (churn)
###Code
# label user who churned using the churn_flag defined earlier.
user_partitions = Window.partitionBy('userId')
df_cleaned = df_cleaned.withColumn('churn', max('churn_flag').over(user_partitions))
label = df_cleaned \
.select(['userId', 'churn']) \
.dropDuplicates() \
.withColumnRenamed("churn", "label") \
.orderBy("userId")
label.describe().show()
label.select(skewness("label"), kurtosis("label")).show()
###Output
+------------------+--------------------+
| skewness(label)| kurtosis(label)|
+------------------+--------------------+
|1.3200520841972045|-0.25746249500661467|
+------------------+--------------------+
###Markdown
Construct dataset
###Code
dataset = feat_gender.join(feat_level,'userID','outer') \
.join(feat_song,'userID','outer') \
.join(feat_artist,'userID','outer') \
.join(feat_playlist,'userID','outer') \
.join(feat_friends,'userID','outer') \
.join(feat_listentime,'userID','outer') \
.join(feat_avgsongs,'userID','outer') \
.join(feat_avgtime,'userID','outer') \
.join(feat_session,'userID','outer') \
.join(label,'userID','outer') \
.drop('userID') \
.fillna(0)
dataset.show(5)
dataset.head()
###Output
Row(gender=0, level=0, num_song=1317, num_artist=767, num_playlist_song=25, num_friend=14, time_listen=259349.89726000009, avg_songs=48.666666666666664, avgSessTime=194.9060606060606, session=22, label=1)
###Markdown
ModelingSplit the full dataset into train, test, and validation sets. Test out several of the machine learning methods you learned. Evaluate the accuracy of the various models, tuning parameters as necessary. Determine your winning model based on test accuracy and report results on the validation set. Since the churned users are a fairly small subset, I suggest using F1 score as the metric to optimize. Features
###Code
dataset.printSchema()
###Output
root
|-- gender: integer (nullable = true)
|-- level: integer (nullable = true)
|-- num_song: long (nullable = true)
|-- num_artist: long (nullable = true)
|-- num_playlist_song: long (nullable = true)
|-- num_friend: long (nullable = true)
|-- time_listen: double (nullable = false)
|-- avg_songs: double (nullable = false)
|-- avgSessTime: double (nullable = false)
|-- session: long (nullable = true)
|-- label: integer (nullable = true)
###Markdown
Labels
###Code
dataset.groupby('label').count().show()
###Output
+-----+-----+
|label|count|
+-----+-----+
| 1| 5003|
| 0|17275|
+-----+-----+
###Markdown
Vector assembler
###Code
cols = dataset.columns[:-1]
assembler = VectorAssembler(inputCols=cols, outputCol="NumericFeatures")
data = assembler.transform(dataset)
data
###Output
DataFrame[gender: int, level: int, num_song: bigint, num_artist: bigint, num_playlist_song: bigint, num_friend: bigint, time_listen: double, avg_songs: double, avgSessTime: double, session: bigint, label: int, NumericFeatures: vector]
###Markdown
Standard scaler
###Code
std_scaler = StandardScaler(inputCol="NumericFeatures", outputCol="features", withStd=True)
scalerModel = std_scaler.fit(data)
data = scalerModel.transform(data)
# Train test split
train, test = data.randomSplit([0.8, 0.2], seed=36)
def train_model(train, estimator, paramGrid, folds=3):
"""
Fit an estimator with training data and tune it with the defined parameter grid using 3-folds cross validation
"""
crossval = CrossValidator(estimator=estimator,
estimatorParamMaps=paramGrid,
evaluator=MulticlassClassificationEvaluator(),
numFolds=folds)
model = crossval.fit(train)
return model
def eval_model(model, data):
"""
Evaluate a learned model given an unseen dataset
"""
pred = model.transform(data)
evaluator = MulticlassClassificationEvaluator()
evalMetrics = {}
evalMetrics["precision"] = evaluator.evaluate(pred, {evaluator.metricName: "weightedPrecision"})
evalMetrics["recall"] = evaluator.evaluate(pred, {evaluator.metricName: "weightedRecall"})
evalMetrics["f1"] = evaluator.evaluate(pred, {evaluator.metricName: "f1"})
evalMetrics["accuracy"] = evaluator.evaluate(pred, {evaluator.metricName: "accuracy"})
# Build a Spark dataframe from the metrics
metrics_to_display = {
k:round(v, 4) for k,v in evalMetrics.items() if ('confusion_matrix' not in k)
}
summary = spark.createDataFrame(pd.DataFrame([metrics_to_display], columns=metrics_to_display.keys()))
return summary
gbt = GBTClassifier(labelCol="label", featuresCol="features")
paramGrid_gbt = ParamGridBuilder()\
.addGrid(gbt.maxIter,[30])\
.addGrid(gbt.maxBins, [40])\
.addGrid(gbt.maxDepth,[8]) \
.build()
start = time()
print("Training & tuning GBTClassifier model >")
model = train_model(train, gbt, paramGrid_gbt)
end = time()
print('Training time {} minutes'.format(round((end - start)/60,2)))
summary = eval_model(model, test)
print("Evaluation result:")
summary.show()
###Output
Evaluation result:
+------+------+---------+--------+
| f1|recall|precision|accuracy|
+------+------+---------+--------+
|0.7254|0.7868| 0.7444| 0.7908|
+------+------+---------+--------+
###Markdown
Using the same classifier and the same parameters to learn from the full dataset, the evaluation shows less promising results. Save the trained model
###Code
model.bestModel.write().overwrite().save('GBTClassifier')
###Output
_____no_output_____
###Markdown
Load a trained model
###Code
best_model = GBTClassificationModel.load('GBTClassifier')
###Output
_____no_output_____ |
1_Data_Aquisition_&_Preparation.ipynb | ###Markdown
###Code
%%capture
%cd ..
%load_ext autoreload
%autoreload 2
from pathlib import Path
import pandas as pd
import numpy as np
import json
###Output
_____no_output_____
###Markdown
Data Acquisition and Preparation
###Code
RAW_DATA_FOLDER = Path('data/raw/')
INTERMEDIATE_DATA_FOLDER = Path('data/interim/')
REFERENCE_FOLDER = Path('references/')
###Output
_____no_output_____
###Markdown
Downloading Data
###Code
# TODO: Data Acquisition
###Output
_____no_output_____
###Markdown
Preparing Dataset
###Code
FAKE_DATA_FOLDER = RAW_DATA_FOLDER / 'fake'
TRUE_DATA_FOLDER = RAW_DATA_FOLDER / 'true'
FAKE_META_FOLDER = RAW_DATA_FOLDER / 'fake-meta-information'
TRUE_META_FOLDER = RAW_DATA_FOLDER / 'true-meta-information'
###Output
_____no_output_____
###Markdown
Text datasets
###Code
def create_text_dataframe(folder):
df_dict = {}
for filepath in folder.glob("*.txt"):
with open(filepath, 'r', encoding='utf-8') as f:
df_dict[filepath.stem] = f.read()
return pd.DataFrame.from_dict(df_dict, orient='index', columns=['text'])
fake_text_df = create_text_dataframe(FAKE_DATA_FOLDER)
true_text_df = create_text_dataframe(TRUE_DATA_FOLDER)
###Output
_____no_output_____
###Markdown
Metadata Datasets
###Code
def create_metadata_datasets(folder, metadata_columns, metadata_dtypes):
df_dict = {}
df_dict = {k:[] for k in metadata_columns}
df_dict["index"] = []
for filepath in list(folder.glob("*.txt")):
with open(filepath, 'r') as f:
df_dict["index"].append(filepath.stem.split("-")[0])
for col, value in zip(metadata_columns, f.readlines()):
df_dict[col].append(value[0:-1])
df = pd.DataFrame(df_dict)
df = df.replace("None", np.nan)
df = df.astype(metadata_dtypes, errors='ignore').set_index("index", drop=True)
df.index.name = None
return df
metadata_columns = [
"author", "link", "category", "date_of_publication",
"tokens", "words_no_punctuation", "types", "links_inside",
"upper_words", "verbs", "subjuntive_imperative_verbs",
"nouns", "adjectives", "adverbs", "modal_verbs",
"singular_first_second_personal_pronouns",
"plural_first_personal_pronouns", "pronouns",
"pausality", "characters", "average_sentence_length",
"average_word_lenght", "percentage_spelling_errors",
"emotiveness", "diversity"
]
metadata_translate = [
"author", "link", "category", "date of publication", "number of tokens",
"number of words without punctuation", "number of types",
"number of links inside the news", "number of words in upper case",
"number of verbs", "number of subjuntive and imperative verbs",
"number of nouns", "number of adjectives", "number of adverbs",
"number of modal verbs (mainly auxiliary verbs)",
"number of singular first and second personal pronouns",
"number of plural first personal pronouns", "number of pronouns",
"pausality", "number of characters", "average sentence length",
"average word length", "percentage of news with speeling errors",
"emotiveness", "diversity"
]
metadata_dtypes = {
"author": "string", "link": "string", "category": "string",
"date_of_publication": "datetime64[ns]",
"tokens": "float", "words_no_punctuation": "float",
"types": "float","links_inside": "float", "upper_words": "float",
"verbs": "float", "subjuntive_imperative_verbs": "float", "nouns": "float",
"adjectives": "float", "adverbs": "float","modal_verbs": "float",
"singular_first_second_personal_pronouns": "float",
"plural_first_personal_pronouns": "float", "pronouns": "float","characters": "float",
"pausality": "float", "average_sentence_length": "float",
"average_word_lenght": "float", "percentage_spelling_errors": "float",
"emotiveness": "float", "diversity": "float"
}
fake_metadata_df = create_metadata_datasets(FAKE_META_FOLDER, metadata_columns, metadata_dtypes)
true_metadata_df = create_metadata_datasets(TRUE_META_FOLDER, metadata_columns, metadata_dtypes)
fake_metadata_df.links_inside.unique()
fake_metadata_df.links_inside.isna().sum()
true_metadata_df.loc[["69", "61"]]
true_text_df.loc[['68']].text
###Output
_____no_output_____
###Markdown
Merging Created Datasets Fake Dataset
###Code
fake_df = pd.concat([fake_text_df, fake_metadata_df], axis=1, sort=False)
fake_df.index = fake_df.index.astype(int)
fake_df = fake_df.sort_index()
fake_df = fake_df.reset_index(drop=True)
###Output
_____no_output_____
###Markdown
True Dataset
###Code
true_df = pd.concat([true_text_df, true_metadata_df], axis=1, sort=False)
true_df.index = true_df.index.astype(int)
true_df = true_df.sort_index()
true_df = true_df.reset_index(drop=True)
###Output
_____no_output_____
###Markdown
Merge All Datasets
###Code
result = pd.concat([true_df, fake_df], keys=['True', 'Fake'])
result = result.reset_index(level=0).rename(columns={"level_0": "class"})
result.to_csv(INTERMEDIATE_DATA_FOLDER/"fake_true_news.csv", index=False)
###Output
_____no_output_____
###Markdown
Columns Information
###Code
columns_info ={}
columns_info['text'] = 'Text extracted from the news'
for var, desc in zip(metadata_columns, metadata_translate):
columns_info[var] = desc
with open(REFERENCE_FOLDER / "news_data_dictionary.json","w") as f:
f.write(json.dumps(columns_info))
f.close()
###Output
_____no_output_____ |
Fintech_deteccion_de_fraude.ipynb | ###Markdown
Deteccion de Fraude El objetivo de este proyecto es comparar varios modelos de clasificacion en la deteccion de fraude en transacciones. Definicion del Problema Utilizaremos los datos de Kaggle: https://www.kaggle.com/mlg-ulb/creditcardfraud Para nuestro proyecto definiremos con valor 1 ($y=1$) cuando la transaccion fue fraudulenta, y 0 ($y=0$) en otro caso. LOs datos contienen transacciones de tarjetas de credito de Septiembre 2013, todas de personas que viven en Europa. En total tenemos acceso a 2 dias de transacciones, donde hubo un total de 284k transacciones con 492 fraudes. Los fraudes solo representan el **.172%** del total de transacciones. El task es predecir el fraude. La variable `Class` es nuestro target ($y$). Las variables ya han sido preprocesadas con PCA para mantener anonimato, por lo que es algo dificil de interpretarlas directamnte. Librerias
###Code
# Load libraries
import numpy as np
import pandas as pd
from matplotlib import pyplot
from pandas import read_csv, set_option
from pandas.plotting import scatter_matrix
import seaborn as sns
from sklearn.model_selection import train_test_split, KFold, cross_val_score, GridSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import ElasticNet
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.neural_network import MLPClassifier
from sklearn.pipeline import Pipeline
from sklearn.ensemble import AdaBoostClassifier, GradientBoostingClassifier, RandomForestClassifier, ExtraTreesClassifier
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
#Libraries for Deep Learning Models
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasClassifier
from tensorflow.keras.optimizers import SGD
#Libraries for Saving the Model
from pickle import dump
from pickle import load
from pprint import pprint
import warnings
warnings.filterwarnings('ignore')
###Output
_____no_output_____
###Markdown
ETL Carguemos la funcion de pandas[pd.read_csv](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_csv.html)
###Code
df= read_csv('https://github.com/uumami/workshop_riskmathics/blob/main/creditcard.csv.zip?raw=true',
compression='zip')
df.head()
###Output
_____no_output_____
###Markdown
Veamos su dimension
###Code
df.shape
###Output
_____no_output_____
###Markdown
Veamos su informacion general
###Code
df.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 284807 entries, 0 to 284806
Data columns (total 31 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Time 284807 non-null float64
1 V1 284807 non-null float64
2 V2 284807 non-null float64
3 V3 284807 non-null float64
4 V4 284807 non-null float64
5 V5 284807 non-null float64
6 V6 284807 non-null float64
7 V7 284807 non-null float64
8 V8 284807 non-null float64
9 V9 284807 non-null float64
10 V10 284807 non-null float64
11 V11 284807 non-null float64
12 V12 284807 non-null float64
13 V13 284807 non-null float64
14 V14 284807 non-null float64
15 V15 284807 non-null float64
16 V16 284807 non-null float64
17 V17 284807 non-null float64
18 V18 284807 non-null float64
19 V19 284807 non-null float64
20 V20 284807 non-null float64
21 V21 284807 non-null float64
22 V22 284807 non-null float64
23 V23 284807 non-null float64
24 V24 284807 non-null float64
25 V25 284807 non-null float64
26 V26 284807 non-null float64
27 V27 284807 non-null float64
28 V28 284807 non-null float64
29 Amount 284807 non-null float64
30 Class 284807 non-null int64
dtypes: float64(30), int64(1)
memory usage: 67.4 MB
###Markdown
En este caso no tuvimos muchos problemas, pues la informacion ha sido pre-procesada previamente con PCA que es una tecnica de reduccion de dimensionalidad. Exploratory Data Analysis
###Code
df.head()
df.describe()
###Output
_____no_output_____
###Markdown
**Balanceo de Y**
###Code
class_names = {0:'Not Fraud', 1:'Fraud'}
print(df.Class.value_counts().rename(index = class_names))
###Output
Not Fraud 284315
Fraud 492
Name: Class, dtype: int64
###Markdown
Podemos observar como nuestras clases estan extremadamente desbalanceadas, la mayoria no son fraude. Descripciones Visuales y Graficas
###Code
# histograms
df.hist(sharex=False, sharey=False, xlabelsize=1, ylabelsize=1, figsize=(12,12))
pyplot.show()
###Output
_____no_output_____
###Markdown
Podemos observar que la distribucion de los datos esta sesgada. Pero, dado que no conocemos el significado de cada variable, es dificil tener una interpretacion.
###Code
df[['Time', 'Amount']].describe()
###Output
_____no_output_____
###Markdown
La variable de `Time` parece ser altamente variable y tomar valores muy grandes. La variable `Amount` parece comportarse mejor y no variar tanto, aunque parece tener valores extremos. Observemos el percentil 75 y el max.
###Code
sns.histplot(x=df['Time'])
df['Time'].unique().shape
###Output
_____no_output_____
###Markdown
La variable `Time` contiene los segundos que pasaron entre la transaccion actual, y la primera transaccion del set de datos.
###Code
# se grafica la distribución como raiz cuarta para apreciarla mejor, por un gran outlier
sns.displot(x=(np.power(df['Amount'], .25)))
###Output
_____no_output_____
###Markdown
Preparacion de Datos
###Code
print('Null Values =',df.isnull().values.any())
###Output
Null Values = False
###Markdown
Por suerte no hay valores nulos en los datos! Split de datos
###Code
Y = df["Class"]
X = df.loc[:, df.columns != 'Class']
test_size = 0.2
seed = 7
# Split train y test
X_train, X_test, Y_train, Y_test = train_test_split(
X, Y, test_size=test_size, random_state=seed)
# Split para validacion
X_train, X_val, Y_train, Y_val = train_test_split(
X_train, Y_train, test_size=test_size, random_state=seed)
print(f'Numero de fraudes en test: {Y_test.value_counts()}')
print(f'Numero de fraudes en train: {Y_train.value_counts()}')
print(f'Numero de fraudes en validacion: {Y_val.value_counts()}')
###Output
Numero de fraudes en test: 0 56862
1 100
Name: Class, dtype: int64
Numero de fraudes en train: 0 181967
1 309
Name: Class, dtype: int64
Numero de fraudes en validacion: 0 45486
1 83
Name: Class, dtype: int64
###Markdown
Parece que los datos de fraude $y=1$ estan balanceados entre train y test. Sin embargo el total de datos esta desbalanceado. Por ahora, ignoraremos esto, y probaremos varios modelos. Despues ataqueremos este problema directamente, y compararemos resultados. Seleccion de Variables Notebook de seleccion de variables: https://github.com/IEXE-Tec/aprendizaje-maquina-2/blob/master/01_seleccion_de_variables.ipynb Existen muchas tecnicas de estadistica clasica para Feature Selection ya sea tablas ANOVA, ANCOVA o diferentes tipos de correlacion para diferentes tipos de datos. Post de [Feature Selection](https://machinelearningmastery.com/feature-selection-with-real-and-categorical-data/)  En nuestro caso nos saltaremos el **Feature Selection** para concentrarnos en los algoritmos y regularizacion. El siguiente snippet contiene codigo de Kaggle para feature selection, utilizando Chi2. Aunque, esto no es del todo correcto, pues nuestras variables no son categoricas. Sin embargo, nos da una idea de la flexibilidad del ML.
###Code
# ## No ejecutar
# from sklearn.feature_selection import SelectKBest
# from sklearn.feature_selection import chi2
# bestfeatures = SelectKBest( k=10)
# bestfeatures
# Y_train = df["Class"]
# X_train = df.loc[:, df.columns != 'Class']
# fit = bestfeatures.fit(X_train,Y_train)
# dfscores = pd.DataFrame(fit.scores_)
# dfcolumns = pd.DataFrame(X_train.columns)
# #concat two dataframes for better visualization
# featureScores = pd.concat([dfcolumns,dfscores],axis=1)
# featureScores.columns = ['Specs','Score'] #naming the dataframe columns
# print(featureScores.nlargest(10,'Score')) #print 10 best features
###Output
_____no_output_____
###Markdown
Tarea: Analisis de Time **Tarea**Propon una metrica para evaluar la importancia de la variable `Time`! Parece tomar muchos valores, y no necesariamente aportar mucho al modelaje.
###Code
#Time es una variable numérica, y la variable a predecir es una variable categórica Class con dos posibles valores (fraude o no fraude),
#de acuerdo al esquema de selección de variables de arriba, se podría verificar la correlación entre Time y Class con un análisis
#ANOVA
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import f_classif
# La implemenatación de ANOVA de scikitlearn está en la función f_classif()
fs = SelectKBest(score_func=f_classif, k='all')
y_train_fs = df["Class"]
X_train_fs = df.loc[:, df.columns != 'Class']
fit = fs.fit(X_train_fs, y_train_fs)
dfscores = pd.DataFrame(fit.scores_)
dfcolumns = pd.DataFrame(X_train_fs.columns)
featureScores = pd.concat([dfcolumns,dfscores],axis=1)
featureScores.columns = ['feature','Score'] #naming the dataframe columns
featureScores.sort_values(by="Score", ascending = False)
#Efectivamente, la variable Time aporta muy poco a la detección de fraude, sin embargo no es la que aporta menos.
###Output
_____no_output_____
###Markdown
Feature engineering Escalemos las variables numericas. Las otras variables no necesitan escalamiento, pues dado que fueron sometidas a PCA fueron escaladas previamente. **Nota**: En estricto sentido el escalamiento o normalizacion deberia hacerse en sobre el train set, y despues aplicarse al test set. Sin embargo, es muy comun aplicar el escalamiento a todo el data set, pensando que no producira **data leakage**. + Para la variable **time** utilizaremos [MinMaxScaler](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.MinMaxScaler.html). Ahora la variable se encuentrara entre 0 y 1. Dado que **no parece tener outliers**, y distribuirse mas o menos uniforme.+ Para **Amount** utilizaremos [RobustScaler](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.RobustScaler.html). La variable es escalda con los rangos intercuantiles. Lo elegimos por el tipo de distribucion que que **tiene outliers**. 
###Code
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import RobustScaler
# ##Time
time_scaler = MinMaxScaler().fit(X_train['Time'].values.reshape(-1,1))
# Entrenamos con train
X_train['time_scaled'] = time_scaler.transform(X_train['Time'].values.reshape(-1,1))
# Escalamos el test
X_test['time_scaled'] = time_scaler.transform(X_test['Time'].values.reshape(-1,1))
# Val
X_val['time_scaled'] = time_scaler.transform(X_val['Time'].values.reshape(-1,1))
# ##Amount
amount_scaler = RobustScaler().fit(X_train['Amount'].values.reshape(-1,1))
# Entrenamos con train
X_train['amount_scaled'] = amount_scaler.transform(X_train['Amount'].values.reshape(-1,1))
# Escalamos el test
X_test['amount_scaled'] = amount_scaler.transform(X_test['Amount'].values.reshape(-1,1))
# Val
X_val['amount_scaled'] = amount_scaler.transform(X_val['Amount'].values.reshape(-1,1))
X_train[['time_scaled', 'amount_scaled']].describe()
sns.pairplot(data=X_train[['time_scaled', 'amount_scaled']])
X_val[['time_scaled', 'amount_scaled']].describe()
X_train.drop(['Time', 'Amount'], axis=1, inplace=True)
X_test.drop(['Time', 'Amount'], axis=1, inplace=True)
X_val.drop(['Time', 'Amount'], axis=1, inplace=True)
###Output
_____no_output_____
###Markdown
Modelaje Evaluacion de Modelos Usaremos Cross Validation para elegir modelos. Una vez elegidos los modelos, utilizaremos CV de nuevo para tuneo de hiperparametros del modelo que elegimos. Elijamos los Folds y los Modelos a utilizar
###Code
# Opciones de Evaluacion de Modelos
num_folds = 5
seed = 7
# scoring ='f1'
scoring = 'accuracy'
# Modelos
models = []
models.append(('LR', LogisticRegression()))
#models.append(('RF', RandomForestClassifier()))
models.append(('LDA', LinearDiscriminantAnalysis()))
models.append(('KNN', KNeighborsClassifier()))
models.append(('CART', DecisionTreeClassifier()))
###Output
_____no_output_____
###Markdown
CV sobre diferentes modelos
###Code
results = []
names = []
for name, model in models:
kfold = KFold(n_splits=num_folds, random_state=seed, shuffle=True)
cv_results = cross_val_score(model, X_train, Y_train, cv=kfold, scoring=scoring,
n_jobs=-1)
results.append(cv_results)
names.append(name)
msg = "%s: %f (%f)" % (name, cv_results.mean(), cv_results.std())
print(msg)
###Output
LR: 0.999210 (0.000122)
LDA: 0.999386 (0.000158)
KNN: 0.999462 (0.000102)
CART: 0.999161 (0.000191)
###Markdown
Evaluacion de LDA [LDA doc](https://scikit-learn.org/0.16/modules/generated/sklearn.lda.LDA.html)
###Code
# Llamar al Modelo
model = LinearDiscriminantAnalysis()
model.fit(X_train, Y_train)
rescaledValidationX = X_train
predictions = model.predict(rescaledValidationX)
print(accuracy_score(Y_train, predictions))
print(confusion_matrix(Y_train, predictions))
print(classification_report(Y_train, predictions))
df_cm = pd.DataFrame(confusion_matrix(Y_train, predictions),
columns=np.unique(Y_train), index = np.unique(Y_train))
df_cm.index.name = 'Actual'
df_cm.columns.name = 'Predicted'
sns.heatmap(df_cm, cmap="Blues", annot=True,annot_kws={"size": 16})# font size
###Output
_____no_output_____
###Markdown
LDA Validacion
###Code
predictions = model.predict(X_val)
df_cm = pd.DataFrame(confusion_matrix(Y_val, predictions),
columns=np.unique(Y_val), index = np.unique(Y_val))
df_cm.index.name = 'Actual'
df_cm.columns.name = 'Predicted'
sns.heatmap(df_cm, cmap="Blues", annot=True,annot_kws={"size": 16})# font size
###Output
_____no_output_____
###Markdown
Logistic Regression **Tarea**: Agregar `l1_ratio` al gridsearch.
###Code
# Numero de Arboles en el Bosque
penalty = ['l1', 'l2', 'elasticnet', 'none']
# l1_ratio
l1_ratio =[0,1]
# Inverso de la Regularizacion
C = np.logspace(-4, 4, 20)
# Crea diccionario de busqueda
random_grid = {'penalty': penalty,
'C': C,
'l1_ratio':l1_ratio }
pprint(random_grid)
# Construyamos el algoritmo
lr = LogisticRegression()
# Tuneo de Hyperparametros con CV
lr_random = GridSearchCV(estimator = lr, param_grid = random_grid,
scoring = 'accuracy',
cv = 5, verbose=2, n_jobs = -1)
# Entrenar
lr_random.fit(X_train, Y_train)
print(f'Mejores parametros: {lr_random.best_params_}')
print(f'Mejor Desempeño: {lr_random.best_score_}')
lr_opt = LogisticRegression(**lr_random.best_params_)
lr_opt.fit(X_train, Y_train)
# Estimar Test
predictions = lr_opt.predict(X_train)
df_cm = pd.DataFrame(confusion_matrix(Y_train, predictions),
columns=np.unique(Y_train), index = np.unique(Y_train))
df_cm.index.name = 'Actual'
df_cm.columns.name = 'Predicted'
sns.heatmap(df_cm, cmap="Blues", annot=True,annot_kws={"size": 16})# font size
###Output
_____no_output_____
###Markdown
Logistic Regression Validacion
###Code
lr_opt = LogisticRegression(**lr_random.best_params_)
lr_opt.fit(X_train, Y_train)
# Estimar Test
predictions = lr_opt.predict(X_val)
print(accuracy_score(Y_val, predictions))
print(confusion_matrix(Y_val, predictions))
print(classification_report(Y_val, predictions))
df_cm = pd.DataFrame(confusion_matrix(Y_val, predictions),
columns=np.unique(Y_val), index = np.unique(Y_val))
df_cm.index.name = 'Actual'
df_cm.columns.name = 'Predicted'
sns.heatmap(df_cm, cmap="Blues", annot=True,annot_kws={"size": 16})
###Output
0.9990344313019817
[[45478 8]
[ 36 47]]
precision recall f1-score support
0 1.00 1.00 1.00 45486
1 0.85 0.57 0.68 83
accuracy 1.00 45569
macro avg 0.93 0.78 0.84 45569
weighted avg 1.00 1.00 1.00 45569
###Markdown
Modelaje con Undersampling Balanceo con Undersampling Ahora entrenaremos modelos usando el metodo de **Random Under Sampling**. Consiste en remover datos aleatoriamente de la clase que tiene mas valores. De esta manera, nuestros modelos no van a sobreajustar a la clase dominante. Pasos:1. Determinar que tan imbalaceada estan nuestras clases (usar value counts)2. Despues balancemos las clases sampleando de manera aleatoria la que tiene mas numeros. Normalmente se busca tener algo cercano al 50/50.3. Entrenar y Evaluar con las clases balanceadas. El principal riesgo de esta tecnica es que podemos perder muchos datos, lo cual puede afectar el desempeño de nuestros modelos. Por ejemplo, pasamos de 280,315 datos de no fraude a 492. Rebalanceo en el train
###Code
df_train = pd.concat([X_train, Y_train], axis=1)
fraud_df = df_train.loc[df_train['Class'] == 1]
non_fraud_df = df_train.loc[df_train['Class'] == 0][:fraud_df.shape[0]]
normal_distributed_df = pd.concat([fraud_df, non_fraud_df])
# Shuffle de datos de nuevo
df_new = normal_distributed_df.sample(frac=1, random_state=42)
# split out validation dataset for the end
Y_train_new= df_new["Class"]
X_train_new = df_new.loc[:, df.columns != 'Class']
print('Distribucion de las Clases en el Data Set')
print(df_new['Class'].value_counts()/len(df_new))
sns.countplot('Class', data=df_new)
pyplot.title('Clases balanceadas', fontsize=14)
pyplot.show()
class_names = {0:'Not Fraud', 1:'Fraud'}
print(df_new.Class.value_counts().rename(index = class_names))
###Output
Fraud 309
Not Fraud 309
Name: Class, dtype: int64
###Markdown
Evaluacion de modelos Undersampling
###Code
# Opciones de Evaluacion de Modelos
num_folds = 10
seed = 7
# scoring ='f1'
scoring = 'accuracy'
# ### Modelos
models = []
models.append(('LR', LogisticRegression()))
models.append(('LDA', LinearDiscriminantAnalysis()))
models.append(('KNN', KNeighborsClassifier()))
models.append(('CART', DecisionTreeClassifier()))
models.append(('NB', GaussianNB()))
models.append(('SVM', SVC()))
#Neural Network
models.append(('NN', MLPClassifier()))
# #Ensable Models
# Boosting methods
models.append(('AB', AdaBoostClassifier()))
models.append(('GBM', GradientBoostingClassifier()))
# Bagging methods
models.append(('RF', RandomForestClassifier()))
models.append(('ET', ExtraTreesClassifier()))
###Output
_____no_output_____
###Markdown
**Red Neuronal Profunda con Keras**
###Code
# Modelo de red Neuronal
# Si deseas entrenarlo pon el flag=1
EnableDLModelsFlag = 1
if EnableDLModelsFlag == 1 :
# Creacion del modelo con Keras
def create_model(neurons=12, activation='relu', learn_rate = 0.01, momentum=0):
# create model
model = Sequential()
model.add(Dense(X_train.shape[1], input_dim=X_train.shape[1], activation=activation))
model.add(Dense(32, activation=activation))
model.add(Dense(1, activation='sigmoid'))
# Compilar Modelo
optimizer = SGD(lr=learn_rate, momentum=momentum)
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
models.append(('DNN', KerasClassifier(build_fn=create_model, epochs=50, batch_size=10, verbose=0)))
results = []
names = []
for name, model in models:
kfold = KFold(n_splits=num_folds, random_state=seed, shuffle=True)
cv_results = cross_val_score(model, X_train_new, Y_train_new, cv=kfold,
scoring=scoring, n_jobs=-1)
results.append(cv_results)
names.append(name)
msg = "%s: %f (%f)" % (name, cv_results.mean(), cv_results.std())
print(msg)
# compare algorithms
fig = pyplot.figure()
fig.suptitle('Comparar Algoritmos')
ax = fig.add_subplot(111)
pyplot.boxplot(results)
ax.set_xticklabels(names)
fig.set_size_inches(8,4)
pyplot.show()
###Output
_____no_output_____
###Markdown
Logistic Regression Documentacion [LogisticRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) Logistic Regression Training **Tarea**Gridsearch con `l1_ratio`.
###Code
# Numero de Arboles en el Bosque
penalty = ['l1', 'l2', 'elasticnet', 'none']
# Inverso de la Regularizacion
C = np.logspace(-4, 4, 20)
l1_ratio = [0,.5,1]
# Crea diccionario de busqueda
random_grid = {'penalty': penalty,
'C': C,
'l1_ratio': l1_ratio}
pprint(random_grid)
# Construyamos el algoritmo
lr = LogisticRegression()
# Tuneo de Hyperparametros con CV
lr_random = GridSearchCV(estimator = lr, param_grid = random_grid,
scoring = 'accuracy',
cv = 5, verbose=2, n_jobs = -1)
# Entrenar
lr_random.fit(X_train_new, Y_train_new)
print(f'Mejores parametros: {lr_random.best_params_}')
print(f'Mejor Desempeño: {lr_random.best_score_}')
lr_opt = LogisticRegression(**lr_random.best_params_)
lr_opt.fit(X_train_new, Y_train_new)
# Estimar Test
predictions = lr_opt.predict(X_train_new)
print(accuracy_score(Y_train_new, predictions))
print(confusion_matrix(Y_train_new, predictions))
print(classification_report(Y_train_new, predictions))
df_cm = pd.DataFrame(confusion_matrix(Y_train_new, predictions),
columns=np.unique(Y_train_new), index = np.unique(Y_train_new))
df_cm.index.name = 'Actual'
df_cm.columns.name = 'Predicted'
sns.heatmap(df_cm, cmap="Blues", annot=True,annot_kws={"size": 16})
###Output
0.9611650485436893
[[303 6]
[ 18 291]]
precision recall f1-score support
0 0.94 0.98 0.96 309
1 0.98 0.94 0.96 309
accuracy 0.96 618
macro avg 0.96 0.96 0.96 618
weighted avg 0.96 0.96 0.96 618
###Markdown
Logistic Regression Val
###Code
lr_opt = LogisticRegression(**lr_random.best_params_)
lr_opt.fit(X_train_new, Y_train_new)
# Estimar Test
predictions = lr_opt.predict(X_val)
print(accuracy_score(Y_val, predictions))
print(confusion_matrix(Y_val, predictions))
print(classification_report(Y_val, predictions))
df_cm = pd.DataFrame(confusion_matrix(Y_val, predictions),
columns=np.unique(Y_val), index = np.unique(Y_val))
df_cm.index.name = 'Actual'
df_cm.columns.name = 'Predicted'
sns.heatmap(df_cm, cmap="Blues", annot=True,annot_kws={"size": 16})
###Output
0.9603677938949725
[[43690 1796]
[ 10 73]]
precision recall f1-score support
0 1.00 0.96 0.98 45486
1 0.04 0.88 0.07 83
accuracy 0.96 45569
macro avg 0.52 0.92 0.53 45569
weighted avg 1.00 0.96 0.98 45569
###Markdown
Random Forest Tuneo de Random Forest Documentacion a [RandomForestClassifer](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html)
###Code
rf = RandomForestClassifier(random_state = 42)
print('Parameters currently in use:\n')
pprint(rf.get_params())
###Output
Parameters currently in use:
{'bootstrap': True,
'ccp_alpha': 0.0,
'class_weight': None,
'criterion': 'gini',
'max_depth': None,
'max_features': 'auto',
'max_leaf_nodes': None,
'max_samples': None,
'min_impurity_decrease': 0.0,
'min_samples_leaf': 1,
'min_samples_split': 2,
'min_weight_fraction_leaf': 0.0,
'n_estimators': 100,
'n_jobs': None,
'oob_score': False,
'random_state': 42,
'verbose': 0,
'warm_start': False}
###Markdown
Son muchos parametros, concentremonos en los mas importantes:  Parametros a buscar con Random Forest
###Code
# Numero de Arboles en el Bosque
n_estimators = [int(x) for x in np.linspace(start = 100, stop = 1000, num = 5)]
# Numero de Variables por split, tecnica
max_features = ['auto']
# Profundidad del Arbol
max_depth = [int(x) for x in np.linspace(10, 110, num = 11)]
max_depth.append(None)
# Numero minimo de observaciones para hacer el Split
min_samples_split = [2, 5, 10]
# Minimo numero de observaciones en las hojas
min_samples_leaf = [1, 2, 4]
# Metodo para elegir las observaciones en cada paso
bootstrap = [True]
# Crea el grid de busqueda
random_grid = {'n_estimators': n_estimators,
'max_features': max_features,
'max_depth': max_depth,
'min_samples_split': min_samples_split,
'min_samples_leaf': min_samples_leaf,
'bootstrap': bootstrap}
pprint(random_grid)
###Output
{'bootstrap': [True],
'max_depth': [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, None],
'max_features': ['auto'],
'min_samples_leaf': [1, 2, 4],
'min_samples_split': [2, 5, 10],
'n_estimators': [100, 325, 550, 775, 1000]}
###Markdown
Accuracy Tiempo 45 mins
###Code
rf = RandomForestClassifier(random_state = 42)
# Tuneo de Hyperparametros con CV
rf_under = GridSearchCV(estimator = rf, param_grid = random_grid,
scoring = 'accuracy',
cv = 5, verbose=2, n_jobs = -1)
# Entrenar
rf_under.fit(X_train_new, Y_train_new)
###Output
Fitting 5 folds for each of 540 candidates, totalling 2700 fits
###Markdown
Mejores Parametros
###Code
print(f'Mejores parametros: {rf_under.best_params_}')
print(f'Mejor Desempeño: {rf_under.best_score_}')
###Output
Mejores parametros: {'bootstrap': True, 'max_depth': 10, 'max_features': 'auto', 'min_samples_leaf': 4, 'min_samples_split': 10, 'n_estimators': 325}
Mejor Desempeño: 0.940112772095463
###Markdown
F1 Score
###Code
# Numero de Arboles en el Bosque
n_estimators = [int(x) for x in np.linspace(start = 300, stop = 500, num = 3)]
# Numero de Variables por split, tecnica
max_features = ['auto']
# Profundidad del Arbol
max_depth = [int(x) for x in np.linspace(8, 12, num = 3)]
max_depth.append(None)
# Numero minimo de observaciones para hacer el Split
min_samples_split = [ 8, 10, 12]
# Minimo numero de observaciones en las hojas
min_samples_leaf = [3, 4]
# Metodo para elegir las observaciones en cada paso
bootstrap = [True]
# Crea el grid de busqueda
random_grid = {'n_estimators': n_estimators,
'max_features': max_features,
'max_depth': max_depth,
'min_samples_split': min_samples_split,
'min_samples_leaf': min_samples_leaf,
'bootstrap': bootstrap}
pprint(random_grid)
# Construyamos el algoritmo
rf = RandomForestClassifier(random_state = 42)
# Tuneo de Hyperparametros con CV
rf_under_f1 = GridSearchCV(estimator = rf, param_grid = random_grid,
scoring = 'f1',
cv = 5, verbose=2, n_jobs = -1)
# Entrenar
rf_under_f1.fit(X_train_new, Y_train_new)
print('Anterior')
print(f'Mejores parametros: {rf_under.best_params_}')
print(f'Mejor Desempeño: {rf_under.best_score_}')
print('F1 score')
print(f'Mejores parametros f1: {rf_under_f1.best_params_}')
print(f'Mejor Desempeño f1: {rf_under_f1.best_score_}')
###Output
Anterior
Mejores parametros: {'bootstrap': True, 'max_depth': 10, 'max_features': 'auto', 'min_samples_leaf': 4, 'min_samples_split': 10, 'n_estimators': 325}
Mejor Desempeño: 0.940112772095463
F1 score
Mejores parametros f1: {'bootstrap': True, 'max_depth': 10, 'max_features': 'auto', 'min_samples_leaf': 4, 'min_samples_split': 10, 'n_estimators': 300}
Mejor Desempeño f1: 0.9369254492519727
###Markdown
En ambos casos parece que mas estimadores `n_estimators` mejora la prediccion. Tuneomos un poco mas este hyperparametro. Random Search RF V2
###Code
# Numero de Arboles en el Bosque
n_estimators = [int(x) for x in np.linspace(start = 280, stop = 330, num = 5)]
# Numero de Variables por split, tecnica
max_features = ['auto']
# Profundidad del Arbol
max_depth = [10]
max_depth.append(None)
# Numero minimo de observaciones para hacer el Split
min_samples_split = [9,10,11,12]
# Minimo numero de observaciones en las hojas
min_samples_leaf = [1]
# Metodo para elegir las observaciones en cada paso
bootstrap = [True]
# Crea el grid de busqueda
random_grid = {'n_estimators': n_estimators,
'max_features': max_features,
'max_depth': max_depth,
'min_samples_split': min_samples_split,
'min_samples_leaf': min_samples_leaf,
'bootstrap': bootstrap}
pprint(random_grid)
# Construyamos el algoritmo
rf = RandomForestClassifier(random_state = 42)
# Tuneo de Hyperparametros con CV
rf_random2 = GridSearchCV(estimator = rf, param_grid = random_grid,
scoring = 'accuracy',
cv = 5, verbose=2, n_jobs = -1)
# Entrenar
rf_random2.fit(X_train_new, Y_train_new)
print('Ultimo RF')
print(f'Mejores parametros: {rf_random2.best_params_}')
print(f'Mejor Desempeño 2: {rf_random2.best_score_}')
print('\n RF optimo anteriro')
print(f'Mejor Desempeño 1: {rf_under.best_score_}')
print(f'Mejores parametros: {rf_under.best_params_}')
rf_cv = pd.DataFrame(rf_random2.cv_results_)
rf_cv[['mean_test_score', 'std_test_score', 'params']].sort_values('mean_test_score'
, ascending=False).head(20)
rf_cv = pd.DataFrame(rf_under.cv_results_)
rf_cv[['mean_test_score', 'std_test_score', 'params']].sort_values('mean_test_score'
, ascending=False).head(20)
###Output
_____no_output_____
###Markdown
Parece que el desempeño es marginalmente mejor con mas estimadores, sin embargo, usemos el modelo parsimonioso. Feature Importance Ejecutar `feature_importance` con el mejor modelo de Random Forest (training)
###Code
rf_fi= RandomForestClassifier(**rf_under.best_params_, random_state=9847156)
rf_fi.fit(X_train_new, Y_train_new)
rf_fi.feature_importances_
rf_fi.feature_names_in_
feature_importances = pd.DataFrame({'name': rf_fi.feature_names_in_, 'importance': rf_fi.feature_importances_ })
feature_importances.sort_values(by='importance', ascending=False)
###Output
_____no_output_____
###Markdown
Random Forest Validacion
###Code
rf_opt = RandomForestClassifier(**rf_under.best_params_)
rf_opt.fit(X_train_new, Y_train_new)
# Estimar Test
predictions = rf_opt.predict(X_val)
print(accuracy_score(Y_val, predictions))
print(confusion_matrix(Y_val, predictions))
print(classification_report(Y_val, predictions))
df_cm = pd.DataFrame(confusion_matrix(Y_val, predictions),
columns=np.unique(Y_val), index = np.unique(Y_val))
df_cm.index.name = 'Actual'
df_cm.columns.name = 'Predicted'
sns.heatmap(df_cm, cmap="Blues", annot=True,annot_kws={"size": 16})
rf_opt = RandomForestClassifier(**rf_random2.best_params_)
rf_opt.fit(X_train_new, Y_train_new)
# Estimar Test
predictions = rf_opt.predict(X_val)
print(accuracy_score(Y_val, predictions))
print(confusion_matrix(Y_val, predictions))
print(classification_report(Y_val, predictions))
df_cm = pd.DataFrame(confusion_matrix(Y_val, predictions),
columns=np.unique(Y_val), index = np.unique(Y_val))
df_cm.index.name = 'Actual'
df_cm.columns.name = 'Predicted'
sns.heatmap(df_cm, cmap="Blues", annot=True,annot_kws={"size": 16})
###Output
0.9742368715574184
[[44323 1163]
[ 11 72]]
precision recall f1-score support
0 1.00 0.97 0.99 45486
1 0.06 0.87 0.11 83
accuracy 0.97 45569
macro avg 0.53 0.92 0.55 45569
weighted avg 1.00 0.97 0.99 45569
###Markdown
Mejor modelo + Piensa cual seria el mejor modelo si el costo de los falsos positivos es casi el mismo que el de los falsos negativos? + Que pasa esi el costo de los falsos positivos es mucho mayor?
###Code
rf_opt = RandomForestClassifier(**rf_random2.best_params_, random_state=35)
rf_opt.fit(X_train_new, Y_train_new)
# Estimar Test
predictions = rf_opt.predict(X_test)
print(accuracy_score(Y_test, predictions))
print(confusion_matrix(Y_test, predictions))
print(classification_report(Y_test, predictions))
df_cm = pd.DataFrame(confusion_matrix(Y_test, predictions),
columns=np.unique(Y_test), index = np.unique(Y_test))
df_cm.index.name = 'Actual'
df_cm.columns.name = 'Predicted'
df_cm
###Output
0.9742635441171307
[[55405 1457]
[ 9 91]]
precision recall f1-score support
0 1.00 0.97 0.99 56862
1 0.06 0.91 0.11 100
accuracy 0.97 56962
macro avg 0.53 0.94 0.55 56962
weighted avg 1.00 0.97 0.99 56962
|
Model backlog/ResNet50/19 - ResNet50 - Brightness range.ipynb | ###Markdown
Dependencies
###Code
import os
import random
import warnings
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.utils import class_weight
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, cohen_kappa_score
from keras import backend as K
from keras.models import Model
from keras import optimizers, applications
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import EarlyStopping, ReduceLROnPlateau, Callback
from keras.layers import Dense, Dropout, GlobalAveragePooling2D, Input
# Set seeds to make the experiment more reproducible.
from tensorflow import set_random_seed
def seed_everything(seed=0):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
set_random_seed(0)
seed_everything()
%matplotlib inline
sns.set(style="whitegrid")
warnings.filterwarnings("ignore")
###Output
Using TensorFlow backend.
###Markdown
Load data
###Code
train = pd.read_csv('../input/aptos2019-blindness-detection/train.csv')
test = pd.read_csv('../input/aptos2019-blindness-detection/test.csv')
print('Number of train samples: ', train.shape[0])
print('Number of test samples: ', test.shape[0])
# Preprocecss data
train["id_code"] = train["id_code"].apply(lambda x: x + ".png")
test["id_code"] = test["id_code"].apply(lambda x: x + ".png")
train['diagnosis'] = train['diagnosis'].astype('str')
display(train.head())
###Output
Number of train samples: 3662
Number of test samples: 1928
###Markdown
Model parameters
###Code
# Model parameters
BATCH_SIZE = 8
EPOCHS = 30
WARMUP_EPOCHS = 2
LEARNING_RATE = 1e-4
WARMUP_LEARNING_RATE = 1e-3
HEIGHT = 512
WIDTH = 512
CANAL = 3
N_CLASSES = train['diagnosis'].nunique()
ES_PATIENCE = 5
RLROP_PATIENCE = 3
DECAY_DROP = 0.5
def kappa(y_true, y_pred, n_classes=5):
y_trues = K.cast(K.argmax(y_true), K.floatx())
y_preds = K.cast(K.argmax(y_pred), K.floatx())
n_samples = K.cast(K.shape(y_true)[0], K.floatx())
distance = K.sum(K.abs(y_trues - y_preds))
max_distance = n_classes - 1
kappa_score = 1 - ((distance**2) / (n_samples * (max_distance**2)))
return kappa_score
###Output
_____no_output_____
###Markdown
Train test split
###Code
X_train, X_val = train_test_split(train, test_size=0.25, random_state=0)
###Output
_____no_output_____
###Markdown
Data generator
###Code
train_datagen=ImageDataGenerator(rescale=1./255,
brightness_range=[0.5, 1.5],
fill_mode='reflect',
horizontal_flip=True,
vertical_flip=True)
train_generator=train_datagen.flow_from_dataframe(
dataframe=X_train,
directory="../input/aptos2019-blindness-detection/train_images/",
x_col="id_code",
y_col="diagnosis",
batch_size=BATCH_SIZE,
class_mode="categorical",
target_size=(HEIGHT, WIDTH))
valid_generator=train_datagen.flow_from_dataframe(
dataframe=X_val,
directory="../input/aptos2019-blindness-detection/train_images/",
x_col="id_code",
y_col="diagnosis",
batch_size=BATCH_SIZE,
class_mode="categorical",
target_size=(HEIGHT, WIDTH))
test_datagen = ImageDataGenerator(rescale=1./255)
test_generator = test_datagen.flow_from_dataframe(
dataframe=test,
directory = "../input/aptos2019-blindness-detection/test_images/",
x_col="id_code",
target_size=(HEIGHT, WIDTH),
batch_size=1,
shuffle=False,
class_mode=None)
###Output
Found 2746 validated image filenames belonging to 5 classes.
Found 916 validated image filenames belonging to 5 classes.
Found 1928 validated image filenames.
###Markdown
Model
###Code
def create_model(input_shape, n_out):
input_tensor = Input(shape=input_shape)
base_model = applications.ResNet50(weights=None,
include_top=False,
input_tensor=input_tensor)
base_model.load_weights('../input/resnet50/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5')
x = GlobalAveragePooling2D()(base_model.output)
x = Dropout(0.5)(x)
x = Dense(2048, activation='relu')(x)
x = Dropout(0.5)(x)
final_output = Dense(n_out, activation='softmax', name='final_output')(x)
model = Model(input_tensor, final_output)
return model
model = create_model(input_shape=(HEIGHT, WIDTH, CANAL), n_out=N_CLASSES)
for layer in model.layers:
layer.trainable = False
for i in range(-5, 0):
model.layers[i].trainable = True
class_weights = class_weight.compute_class_weight('balanced', np.unique(train['diagnosis'].astype('int').values), train['diagnosis'].astype('int').values)
metric_list = ["accuracy", kappa]
optimizer = optimizers.Adam(lr=WARMUP_LEARNING_RATE)
model.compile(optimizer=optimizer, loss="categorical_crossentropy", metrics=metric_list)
model.summary()
###Output
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_1 (InputLayer) (None, 512, 512, 3) 0
__________________________________________________________________________________________________
conv1_pad (ZeroPadding2D) (None, 518, 518, 3) 0 input_1[0][0]
__________________________________________________________________________________________________
conv1 (Conv2D) (None, 256, 256, 64) 9472 conv1_pad[0][0]
__________________________________________________________________________________________________
bn_conv1 (BatchNormalization) (None, 256, 256, 64) 256 conv1[0][0]
__________________________________________________________________________________________________
activation_1 (Activation) (None, 256, 256, 64) 0 bn_conv1[0][0]
__________________________________________________________________________________________________
pool1_pad (ZeroPadding2D) (None, 258, 258, 64) 0 activation_1[0][0]
__________________________________________________________________________________________________
max_pooling2d_1 (MaxPooling2D) (None, 128, 128, 64) 0 pool1_pad[0][0]
__________________________________________________________________________________________________
res2a_branch2a (Conv2D) (None, 128, 128, 64) 4160 max_pooling2d_1[0][0]
__________________________________________________________________________________________________
bn2a_branch2a (BatchNormalizati (None, 128, 128, 64) 256 res2a_branch2a[0][0]
__________________________________________________________________________________________________
activation_2 (Activation) (None, 128, 128, 64) 0 bn2a_branch2a[0][0]
__________________________________________________________________________________________________
res2a_branch2b (Conv2D) (None, 128, 128, 64) 36928 activation_2[0][0]
__________________________________________________________________________________________________
bn2a_branch2b (BatchNormalizati (None, 128, 128, 64) 256 res2a_branch2b[0][0]
__________________________________________________________________________________________________
activation_3 (Activation) (None, 128, 128, 64) 0 bn2a_branch2b[0][0]
__________________________________________________________________________________________________
res2a_branch2c (Conv2D) (None, 128, 128, 256 16640 activation_3[0][0]
__________________________________________________________________________________________________
res2a_branch1 (Conv2D) (None, 128, 128, 256 16640 max_pooling2d_1[0][0]
__________________________________________________________________________________________________
bn2a_branch2c (BatchNormalizati (None, 128, 128, 256 1024 res2a_branch2c[0][0]
__________________________________________________________________________________________________
bn2a_branch1 (BatchNormalizatio (None, 128, 128, 256 1024 res2a_branch1[0][0]
__________________________________________________________________________________________________
add_1 (Add) (None, 128, 128, 256 0 bn2a_branch2c[0][0]
bn2a_branch1[0][0]
__________________________________________________________________________________________________
activation_4 (Activation) (None, 128, 128, 256 0 add_1[0][0]
__________________________________________________________________________________________________
res2b_branch2a (Conv2D) (None, 128, 128, 64) 16448 activation_4[0][0]
__________________________________________________________________________________________________
bn2b_branch2a (BatchNormalizati (None, 128, 128, 64) 256 res2b_branch2a[0][0]
__________________________________________________________________________________________________
activation_5 (Activation) (None, 128, 128, 64) 0 bn2b_branch2a[0][0]
__________________________________________________________________________________________________
res2b_branch2b (Conv2D) (None, 128, 128, 64) 36928 activation_5[0][0]
__________________________________________________________________________________________________
bn2b_branch2b (BatchNormalizati (None, 128, 128, 64) 256 res2b_branch2b[0][0]
__________________________________________________________________________________________________
activation_6 (Activation) (None, 128, 128, 64) 0 bn2b_branch2b[0][0]
__________________________________________________________________________________________________
res2b_branch2c (Conv2D) (None, 128, 128, 256 16640 activation_6[0][0]
__________________________________________________________________________________________________
bn2b_branch2c (BatchNormalizati (None, 128, 128, 256 1024 res2b_branch2c[0][0]
__________________________________________________________________________________________________
add_2 (Add) (None, 128, 128, 256 0 bn2b_branch2c[0][0]
activation_4[0][0]
__________________________________________________________________________________________________
activation_7 (Activation) (None, 128, 128, 256 0 add_2[0][0]
__________________________________________________________________________________________________
res2c_branch2a (Conv2D) (None, 128, 128, 64) 16448 activation_7[0][0]
__________________________________________________________________________________________________
bn2c_branch2a (BatchNormalizati (None, 128, 128, 64) 256 res2c_branch2a[0][0]
__________________________________________________________________________________________________
activation_8 (Activation) (None, 128, 128, 64) 0 bn2c_branch2a[0][0]
__________________________________________________________________________________________________
res2c_branch2b (Conv2D) (None, 128, 128, 64) 36928 activation_8[0][0]
__________________________________________________________________________________________________
bn2c_branch2b (BatchNormalizati (None, 128, 128, 64) 256 res2c_branch2b[0][0]
__________________________________________________________________________________________________
activation_9 (Activation) (None, 128, 128, 64) 0 bn2c_branch2b[0][0]
__________________________________________________________________________________________________
res2c_branch2c (Conv2D) (None, 128, 128, 256 16640 activation_9[0][0]
__________________________________________________________________________________________________
bn2c_branch2c (BatchNormalizati (None, 128, 128, 256 1024 res2c_branch2c[0][0]
__________________________________________________________________________________________________
add_3 (Add) (None, 128, 128, 256 0 bn2c_branch2c[0][0]
activation_7[0][0]
__________________________________________________________________________________________________
activation_10 (Activation) (None, 128, 128, 256 0 add_3[0][0]
__________________________________________________________________________________________________
res3a_branch2a (Conv2D) (None, 64, 64, 128) 32896 activation_10[0][0]
__________________________________________________________________________________________________
bn3a_branch2a (BatchNormalizati (None, 64, 64, 128) 512 res3a_branch2a[0][0]
__________________________________________________________________________________________________
activation_11 (Activation) (None, 64, 64, 128) 0 bn3a_branch2a[0][0]
__________________________________________________________________________________________________
res3a_branch2b (Conv2D) (None, 64, 64, 128) 147584 activation_11[0][0]
__________________________________________________________________________________________________
bn3a_branch2b (BatchNormalizati (None, 64, 64, 128) 512 res3a_branch2b[0][0]
__________________________________________________________________________________________________
activation_12 (Activation) (None, 64, 64, 128) 0 bn3a_branch2b[0][0]
__________________________________________________________________________________________________
res3a_branch2c (Conv2D) (None, 64, 64, 512) 66048 activation_12[0][0]
__________________________________________________________________________________________________
res3a_branch1 (Conv2D) (None, 64, 64, 512) 131584 activation_10[0][0]
__________________________________________________________________________________________________
bn3a_branch2c (BatchNormalizati (None, 64, 64, 512) 2048 res3a_branch2c[0][0]
__________________________________________________________________________________________________
bn3a_branch1 (BatchNormalizatio (None, 64, 64, 512) 2048 res3a_branch1[0][0]
__________________________________________________________________________________________________
add_4 (Add) (None, 64, 64, 512) 0 bn3a_branch2c[0][0]
bn3a_branch1[0][0]
__________________________________________________________________________________________________
activation_13 (Activation) (None, 64, 64, 512) 0 add_4[0][0]
__________________________________________________________________________________________________
res3b_branch2a (Conv2D) (None, 64, 64, 128) 65664 activation_13[0][0]
__________________________________________________________________________________________________
bn3b_branch2a (BatchNormalizati (None, 64, 64, 128) 512 res3b_branch2a[0][0]
__________________________________________________________________________________________________
activation_14 (Activation) (None, 64, 64, 128) 0 bn3b_branch2a[0][0]
__________________________________________________________________________________________________
res3b_branch2b (Conv2D) (None, 64, 64, 128) 147584 activation_14[0][0]
__________________________________________________________________________________________________
bn3b_branch2b (BatchNormalizati (None, 64, 64, 128) 512 res3b_branch2b[0][0]
__________________________________________________________________________________________________
activation_15 (Activation) (None, 64, 64, 128) 0 bn3b_branch2b[0][0]
__________________________________________________________________________________________________
res3b_branch2c (Conv2D) (None, 64, 64, 512) 66048 activation_15[0][0]
__________________________________________________________________________________________________
bn3b_branch2c (BatchNormalizati (None, 64, 64, 512) 2048 res3b_branch2c[0][0]
__________________________________________________________________________________________________
add_5 (Add) (None, 64, 64, 512) 0 bn3b_branch2c[0][0]
activation_13[0][0]
__________________________________________________________________________________________________
activation_16 (Activation) (None, 64, 64, 512) 0 add_5[0][0]
__________________________________________________________________________________________________
res3c_branch2a (Conv2D) (None, 64, 64, 128) 65664 activation_16[0][0]
__________________________________________________________________________________________________
bn3c_branch2a (BatchNormalizati (None, 64, 64, 128) 512 res3c_branch2a[0][0]
__________________________________________________________________________________________________
activation_17 (Activation) (None, 64, 64, 128) 0 bn3c_branch2a[0][0]
__________________________________________________________________________________________________
res3c_branch2b (Conv2D) (None, 64, 64, 128) 147584 activation_17[0][0]
__________________________________________________________________________________________________
bn3c_branch2b (BatchNormalizati (None, 64, 64, 128) 512 res3c_branch2b[0][0]
__________________________________________________________________________________________________
activation_18 (Activation) (None, 64, 64, 128) 0 bn3c_branch2b[0][0]
__________________________________________________________________________________________________
res3c_branch2c (Conv2D) (None, 64, 64, 512) 66048 activation_18[0][0]
__________________________________________________________________________________________________
bn3c_branch2c (BatchNormalizati (None, 64, 64, 512) 2048 res3c_branch2c[0][0]
__________________________________________________________________________________________________
add_6 (Add) (None, 64, 64, 512) 0 bn3c_branch2c[0][0]
activation_16[0][0]
__________________________________________________________________________________________________
activation_19 (Activation) (None, 64, 64, 512) 0 add_6[0][0]
__________________________________________________________________________________________________
res3d_branch2a (Conv2D) (None, 64, 64, 128) 65664 activation_19[0][0]
__________________________________________________________________________________________________
bn3d_branch2a (BatchNormalizati (None, 64, 64, 128) 512 res3d_branch2a[0][0]
__________________________________________________________________________________________________
activation_20 (Activation) (None, 64, 64, 128) 0 bn3d_branch2a[0][0]
__________________________________________________________________________________________________
res3d_branch2b (Conv2D) (None, 64, 64, 128) 147584 activation_20[0][0]
__________________________________________________________________________________________________
bn3d_branch2b (BatchNormalizati (None, 64, 64, 128) 512 res3d_branch2b[0][0]
__________________________________________________________________________________________________
activation_21 (Activation) (None, 64, 64, 128) 0 bn3d_branch2b[0][0]
__________________________________________________________________________________________________
res3d_branch2c (Conv2D) (None, 64, 64, 512) 66048 activation_21[0][0]
__________________________________________________________________________________________________
bn3d_branch2c (BatchNormalizati (None, 64, 64, 512) 2048 res3d_branch2c[0][0]
__________________________________________________________________________________________________
add_7 (Add) (None, 64, 64, 512) 0 bn3d_branch2c[0][0]
activation_19[0][0]
__________________________________________________________________________________________________
activation_22 (Activation) (None, 64, 64, 512) 0 add_7[0][0]
__________________________________________________________________________________________________
res4a_branch2a (Conv2D) (None, 32, 32, 256) 131328 activation_22[0][0]
__________________________________________________________________________________________________
bn4a_branch2a (BatchNormalizati (None, 32, 32, 256) 1024 res4a_branch2a[0][0]
__________________________________________________________________________________________________
activation_23 (Activation) (None, 32, 32, 256) 0 bn4a_branch2a[0][0]
__________________________________________________________________________________________________
res4a_branch2b (Conv2D) (None, 32, 32, 256) 590080 activation_23[0][0]
__________________________________________________________________________________________________
bn4a_branch2b (BatchNormalizati (None, 32, 32, 256) 1024 res4a_branch2b[0][0]
__________________________________________________________________________________________________
activation_24 (Activation) (None, 32, 32, 256) 0 bn4a_branch2b[0][0]
__________________________________________________________________________________________________
res4a_branch2c (Conv2D) (None, 32, 32, 1024) 263168 activation_24[0][0]
__________________________________________________________________________________________________
res4a_branch1 (Conv2D) (None, 32, 32, 1024) 525312 activation_22[0][0]
__________________________________________________________________________________________________
bn4a_branch2c (BatchNormalizati (None, 32, 32, 1024) 4096 res4a_branch2c[0][0]
__________________________________________________________________________________________________
bn4a_branch1 (BatchNormalizatio (None, 32, 32, 1024) 4096 res4a_branch1[0][0]
__________________________________________________________________________________________________
add_8 (Add) (None, 32, 32, 1024) 0 bn4a_branch2c[0][0]
bn4a_branch1[0][0]
__________________________________________________________________________________________________
activation_25 (Activation) (None, 32, 32, 1024) 0 add_8[0][0]
__________________________________________________________________________________________________
res4b_branch2a (Conv2D) (None, 32, 32, 256) 262400 activation_25[0][0]
__________________________________________________________________________________________________
bn4b_branch2a (BatchNormalizati (None, 32, 32, 256) 1024 res4b_branch2a[0][0]
__________________________________________________________________________________________________
activation_26 (Activation) (None, 32, 32, 256) 0 bn4b_branch2a[0][0]
__________________________________________________________________________________________________
res4b_branch2b (Conv2D) (None, 32, 32, 256) 590080 activation_26[0][0]
__________________________________________________________________________________________________
bn4b_branch2b (BatchNormalizati (None, 32, 32, 256) 1024 res4b_branch2b[0][0]
__________________________________________________________________________________________________
activation_27 (Activation) (None, 32, 32, 256) 0 bn4b_branch2b[0][0]
__________________________________________________________________________________________________
res4b_branch2c (Conv2D) (None, 32, 32, 1024) 263168 activation_27[0][0]
__________________________________________________________________________________________________
bn4b_branch2c (BatchNormalizati (None, 32, 32, 1024) 4096 res4b_branch2c[0][0]
__________________________________________________________________________________________________
add_9 (Add) (None, 32, 32, 1024) 0 bn4b_branch2c[0][0]
activation_25[0][0]
__________________________________________________________________________________________________
activation_28 (Activation) (None, 32, 32, 1024) 0 add_9[0][0]
__________________________________________________________________________________________________
res4c_branch2a (Conv2D) (None, 32, 32, 256) 262400 activation_28[0][0]
__________________________________________________________________________________________________
bn4c_branch2a (BatchNormalizati (None, 32, 32, 256) 1024 res4c_branch2a[0][0]
__________________________________________________________________________________________________
activation_29 (Activation) (None, 32, 32, 256) 0 bn4c_branch2a[0][0]
__________________________________________________________________________________________________
res4c_branch2b (Conv2D) (None, 32, 32, 256) 590080 activation_29[0][0]
__________________________________________________________________________________________________
bn4c_branch2b (BatchNormalizati (None, 32, 32, 256) 1024 res4c_branch2b[0][0]
__________________________________________________________________________________________________
activation_30 (Activation) (None, 32, 32, 256) 0 bn4c_branch2b[0][0]
__________________________________________________________________________________________________
res4c_branch2c (Conv2D) (None, 32, 32, 1024) 263168 activation_30[0][0]
__________________________________________________________________________________________________
bn4c_branch2c (BatchNormalizati (None, 32, 32, 1024) 4096 res4c_branch2c[0][0]
__________________________________________________________________________________________________
add_10 (Add) (None, 32, 32, 1024) 0 bn4c_branch2c[0][0]
activation_28[0][0]
__________________________________________________________________________________________________
activation_31 (Activation) (None, 32, 32, 1024) 0 add_10[0][0]
__________________________________________________________________________________________________
res4d_branch2a (Conv2D) (None, 32, 32, 256) 262400 activation_31[0][0]
__________________________________________________________________________________________________
bn4d_branch2a (BatchNormalizati (None, 32, 32, 256) 1024 res4d_branch2a[0][0]
__________________________________________________________________________________________________
activation_32 (Activation) (None, 32, 32, 256) 0 bn4d_branch2a[0][0]
__________________________________________________________________________________________________
res4d_branch2b (Conv2D) (None, 32, 32, 256) 590080 activation_32[0][0]
__________________________________________________________________________________________________
bn4d_branch2b (BatchNormalizati (None, 32, 32, 256) 1024 res4d_branch2b[0][0]
__________________________________________________________________________________________________
activation_33 (Activation) (None, 32, 32, 256) 0 bn4d_branch2b[0][0]
__________________________________________________________________________________________________
res4d_branch2c (Conv2D) (None, 32, 32, 1024) 263168 activation_33[0][0]
__________________________________________________________________________________________________
bn4d_branch2c (BatchNormalizati (None, 32, 32, 1024) 4096 res4d_branch2c[0][0]
__________________________________________________________________________________________________
add_11 (Add) (None, 32, 32, 1024) 0 bn4d_branch2c[0][0]
activation_31[0][0]
__________________________________________________________________________________________________
activation_34 (Activation) (None, 32, 32, 1024) 0 add_11[0][0]
__________________________________________________________________________________________________
res4e_branch2a (Conv2D) (None, 32, 32, 256) 262400 activation_34[0][0]
__________________________________________________________________________________________________
bn4e_branch2a (BatchNormalizati (None, 32, 32, 256) 1024 res4e_branch2a[0][0]
__________________________________________________________________________________________________
activation_35 (Activation) (None, 32, 32, 256) 0 bn4e_branch2a[0][0]
__________________________________________________________________________________________________
res4e_branch2b (Conv2D) (None, 32, 32, 256) 590080 activation_35[0][0]
__________________________________________________________________________________________________
bn4e_branch2b (BatchNormalizati (None, 32, 32, 256) 1024 res4e_branch2b[0][0]
__________________________________________________________________________________________________
activation_36 (Activation) (None, 32, 32, 256) 0 bn4e_branch2b[0][0]
__________________________________________________________________________________________________
res4e_branch2c (Conv2D) (None, 32, 32, 1024) 263168 activation_36[0][0]
__________________________________________________________________________________________________
bn4e_branch2c (BatchNormalizati (None, 32, 32, 1024) 4096 res4e_branch2c[0][0]
__________________________________________________________________________________________________
add_12 (Add) (None, 32, 32, 1024) 0 bn4e_branch2c[0][0]
activation_34[0][0]
__________________________________________________________________________________________________
activation_37 (Activation) (None, 32, 32, 1024) 0 add_12[0][0]
__________________________________________________________________________________________________
res4f_branch2a (Conv2D) (None, 32, 32, 256) 262400 activation_37[0][0]
__________________________________________________________________________________________________
bn4f_branch2a (BatchNormalizati (None, 32, 32, 256) 1024 res4f_branch2a[0][0]
__________________________________________________________________________________________________
activation_38 (Activation) (None, 32, 32, 256) 0 bn4f_branch2a[0][0]
__________________________________________________________________________________________________
res4f_branch2b (Conv2D) (None, 32, 32, 256) 590080 activation_38[0][0]
__________________________________________________________________________________________________
bn4f_branch2b (BatchNormalizati (None, 32, 32, 256) 1024 res4f_branch2b[0][0]
__________________________________________________________________________________________________
activation_39 (Activation) (None, 32, 32, 256) 0 bn4f_branch2b[0][0]
__________________________________________________________________________________________________
res4f_branch2c (Conv2D) (None, 32, 32, 1024) 263168 activation_39[0][0]
__________________________________________________________________________________________________
bn4f_branch2c (BatchNormalizati (None, 32, 32, 1024) 4096 res4f_branch2c[0][0]
__________________________________________________________________________________________________
add_13 (Add) (None, 32, 32, 1024) 0 bn4f_branch2c[0][0]
activation_37[0][0]
__________________________________________________________________________________________________
activation_40 (Activation) (None, 32, 32, 1024) 0 add_13[0][0]
__________________________________________________________________________________________________
res5a_branch2a (Conv2D) (None, 16, 16, 512) 524800 activation_40[0][0]
__________________________________________________________________________________________________
bn5a_branch2a (BatchNormalizati (None, 16, 16, 512) 2048 res5a_branch2a[0][0]
__________________________________________________________________________________________________
activation_41 (Activation) (None, 16, 16, 512) 0 bn5a_branch2a[0][0]
__________________________________________________________________________________________________
res5a_branch2b (Conv2D) (None, 16, 16, 512) 2359808 activation_41[0][0]
__________________________________________________________________________________________________
bn5a_branch2b (BatchNormalizati (None, 16, 16, 512) 2048 res5a_branch2b[0][0]
__________________________________________________________________________________________________
activation_42 (Activation) (None, 16, 16, 512) 0 bn5a_branch2b[0][0]
__________________________________________________________________________________________________
res5a_branch2c (Conv2D) (None, 16, 16, 2048) 1050624 activation_42[0][0]
__________________________________________________________________________________________________
res5a_branch1 (Conv2D) (None, 16, 16, 2048) 2099200 activation_40[0][0]
__________________________________________________________________________________________________
bn5a_branch2c (BatchNormalizati (None, 16, 16, 2048) 8192 res5a_branch2c[0][0]
__________________________________________________________________________________________________
bn5a_branch1 (BatchNormalizatio (None, 16, 16, 2048) 8192 res5a_branch1[0][0]
__________________________________________________________________________________________________
add_14 (Add) (None, 16, 16, 2048) 0 bn5a_branch2c[0][0]
bn5a_branch1[0][0]
__________________________________________________________________________________________________
activation_43 (Activation) (None, 16, 16, 2048) 0 add_14[0][0]
__________________________________________________________________________________________________
res5b_branch2a (Conv2D) (None, 16, 16, 512) 1049088 activation_43[0][0]
__________________________________________________________________________________________________
bn5b_branch2a (BatchNormalizati (None, 16, 16, 512) 2048 res5b_branch2a[0][0]
__________________________________________________________________________________________________
activation_44 (Activation) (None, 16, 16, 512) 0 bn5b_branch2a[0][0]
__________________________________________________________________________________________________
res5b_branch2b (Conv2D) (None, 16, 16, 512) 2359808 activation_44[0][0]
__________________________________________________________________________________________________
bn5b_branch2b (BatchNormalizati (None, 16, 16, 512) 2048 res5b_branch2b[0][0]
__________________________________________________________________________________________________
activation_45 (Activation) (None, 16, 16, 512) 0 bn5b_branch2b[0][0]
__________________________________________________________________________________________________
res5b_branch2c (Conv2D) (None, 16, 16, 2048) 1050624 activation_45[0][0]
__________________________________________________________________________________________________
bn5b_branch2c (BatchNormalizati (None, 16, 16, 2048) 8192 res5b_branch2c[0][0]
__________________________________________________________________________________________________
add_15 (Add) (None, 16, 16, 2048) 0 bn5b_branch2c[0][0]
activation_43[0][0]
__________________________________________________________________________________________________
activation_46 (Activation) (None, 16, 16, 2048) 0 add_15[0][0]
__________________________________________________________________________________________________
res5c_branch2a (Conv2D) (None, 16, 16, 512) 1049088 activation_46[0][0]
__________________________________________________________________________________________________
bn5c_branch2a (BatchNormalizati (None, 16, 16, 512) 2048 res5c_branch2a[0][0]
__________________________________________________________________________________________________
activation_47 (Activation) (None, 16, 16, 512) 0 bn5c_branch2a[0][0]
__________________________________________________________________________________________________
res5c_branch2b (Conv2D) (None, 16, 16, 512) 2359808 activation_47[0][0]
__________________________________________________________________________________________________
bn5c_branch2b (BatchNormalizati (None, 16, 16, 512) 2048 res5c_branch2b[0][0]
__________________________________________________________________________________________________
activation_48 (Activation) (None, 16, 16, 512) 0 bn5c_branch2b[0][0]
__________________________________________________________________________________________________
res5c_branch2c (Conv2D) (None, 16, 16, 2048) 1050624 activation_48[0][0]
__________________________________________________________________________________________________
bn5c_branch2c (BatchNormalizati (None, 16, 16, 2048) 8192 res5c_branch2c[0][0]
__________________________________________________________________________________________________
add_16 (Add) (None, 16, 16, 2048) 0 bn5c_branch2c[0][0]
activation_46[0][0]
__________________________________________________________________________________________________
activation_49 (Activation) (None, 16, 16, 2048) 0 add_16[0][0]
__________________________________________________________________________________________________
global_average_pooling2d_1 (Glo (None, 2048) 0 activation_49[0][0]
__________________________________________________________________________________________________
dropout_1 (Dropout) (None, 2048) 0 global_average_pooling2d_1[0][0]
__________________________________________________________________________________________________
dense_1 (Dense) (None, 2048) 4196352 dropout_1[0][0]
__________________________________________________________________________________________________
dropout_2 (Dropout) (None, 2048) 0 dense_1[0][0]
__________________________________________________________________________________________________
final_output (Dense) (None, 5) 10245 dropout_2[0][0]
==================================================================================================
Total params: 27,794,309
Trainable params: 4,206,597
Non-trainable params: 23,587,712
__________________________________________________________________________________________________
###Markdown
Train top layers
###Code
STEP_SIZE_TRAIN = train_generator.n//train_generator.batch_size
STEP_SIZE_VALID = valid_generator.n//valid_generator.batch_size
history_warmup = model.fit_generator(generator=train_generator,
steps_per_epoch=STEP_SIZE_TRAIN,
validation_data=valid_generator,
validation_steps=STEP_SIZE_VALID,
epochs=WARMUP_EPOCHS,
class_weight=class_weights,
verbose=1).history
###Output
Epoch 1/2
343/343 [==============================] - 461s 1s/step - loss: 1.3821 - acc: 0.6301 - kappa: 0.7207 - val_loss: 2.6848 - val_acc: 0.4890 - val_kappa: 0.2442
Epoch 2/2
343/343 [==============================] - 439s 1s/step - loss: 0.8302 - acc: 0.7095 - kappa: 0.8470 - val_loss: 3.7802 - val_acc: 0.4912 - val_kappa: 0.2589
###Markdown
Fine-tune the complete model
###Code
for layer in model.layers:
layer.trainable = True
es = EarlyStopping(monitor='val_loss', mode='min', patience=ES_PATIENCE, restore_best_weights=True, verbose=1)
rlrop = ReduceLROnPlateau(monitor='val_loss', mode='min', patience=RLROP_PATIENCE, factor=DECAY_DROP, min_lr=1e-6, verbose=1)
callback_list = [es, rlrop]
optimizer = optimizers.Adam(lr=LEARNING_RATE)
model.compile(optimizer=optimizer, loss="categorical_crossentropy", metrics=metric_list)
model.summary()
history_finetunning = model.fit_generator(generator=train_generator,
steps_per_epoch=STEP_SIZE_TRAIN,
validation_data=valid_generator,
validation_steps=STEP_SIZE_VALID,
epochs=EPOCHS,
callbacks=callback_list,
class_weight=class_weights,
verbose=1).history
###Output
Epoch 1/30
343/343 [==============================] - 474s 1s/step - loss: 0.6763 - acc: 0.7474 - kappa: 0.9081 - val_loss: 0.5887 - val_acc: 0.7841 - val_kappa: 0.9221
Epoch 2/30
343/343 [==============================] - 452s 1s/step - loss: 0.5715 - acc: 0.7912 - kappa: 0.9296 - val_loss: 0.5435 - val_acc: 0.7885 - val_kappa: 0.9439
Epoch 3/30
343/343 [==============================] - 453s 1s/step - loss: 0.5040 - acc: 0.8101 - kappa: 0.9399 - val_loss: 0.6270 - val_acc: 0.7852 - val_kappa: 0.9235
Epoch 4/30
343/343 [==============================] - 451s 1s/step - loss: 0.4829 - acc: 0.8098 - kappa: 0.9494 - val_loss: 0.5634 - val_acc: 0.7985 - val_kappa: 0.9365
Epoch 5/30
343/343 [==============================] - 447s 1s/step - loss: 0.4501 - acc: 0.8364 - kappa: 0.9601 - val_loss: 0.5185 - val_acc: 0.8194 - val_kappa: 0.9442
Epoch 6/30
343/343 [==============================] - 452s 1s/step - loss: 0.4429 - acc: 0.8294 - kappa: 0.9568 - val_loss: 0.5558 - val_acc: 0.8051 - val_kappa: 0.9396
Epoch 7/30
343/343 [==============================] - 448s 1s/step - loss: 0.4160 - acc: 0.8455 - kappa: 0.9623 - val_loss: 0.4607 - val_acc: 0.8370 - val_kappa: 0.9535
Epoch 8/30
343/343 [==============================] - 453s 1s/step - loss: 0.3647 - acc: 0.8684 - kappa: 0.9719 - val_loss: 0.4658 - val_acc: 0.8447 - val_kappa: 0.9567
Epoch 9/30
343/343 [==============================] - 451s 1s/step - loss: 0.3481 - acc: 0.8637 - kappa: 0.9687 - val_loss: 0.5907 - val_acc: 0.8084 - val_kappa: 0.9548
Epoch 10/30
343/343 [==============================] - 451s 1s/step - loss: 0.3487 - acc: 0.8630 - kappa: 0.9722 - val_loss: 0.5212 - val_acc: 0.8249 - val_kappa: 0.9457
Epoch 00010: ReduceLROnPlateau reducing learning rate to 4.999999873689376e-05.
Epoch 11/30
343/343 [==============================] - 448s 1s/step - loss: 0.2794 - acc: 0.8965 - kappa: 0.9796 - val_loss: 0.4810 - val_acc: 0.8491 - val_kappa: 0.9551
Epoch 12/30
343/343 [==============================] - 450s 1s/step - loss: 0.2610 - acc: 0.9045 - kappa: 0.9848 - val_loss: 0.4461 - val_acc: 0.8579 - val_kappa: 0.9673
Epoch 13/30
343/343 [==============================] - 454s 1s/step - loss: 0.2200 - acc: 0.9169 - kappa: 0.9852 - val_loss: 0.4263 - val_acc: 0.8612 - val_kappa: 0.9623
Epoch 14/30
343/343 [==============================] - 456s 1s/step - loss: 0.2127 - acc: 0.9209 - kappa: 0.9864 - val_loss: 0.4674 - val_acc: 0.8524 - val_kappa: 0.9613
Epoch 15/30
343/343 [==============================] - 452s 1s/step - loss: 0.1870 - acc: 0.9322 - kappa: 0.9889 - val_loss: 0.5328 - val_acc: 0.8425 - val_kappa: 0.9596
Epoch 16/30
109/343 [========>.....................] - ETA: 3:44 - loss: 0.2042 - acc: 0.9358 - kappa: 0.9904
###Markdown
Model loss graph
###Code
history = {'loss': history_warmup['loss'] + history_finetunning['loss'],
'val_loss': history_warmup['val_loss'] + history_finetunning['val_loss'],
'acc': history_warmup['acc'] + history_finetunning['acc'],
'val_acc': history_warmup['val_acc'] + history_finetunning['val_acc'],
'kappa': history_warmup['kappa'] + history_finetunning['kappa'],
'val_kappa': history_warmup['val_kappa'] + history_finetunning['val_kappa']}
sns.set_style("whitegrid")
fig, (ax1, ax2, ax3) = plt.subplots(3, 1, sharex='col', figsize=(20, 18))
ax1.plot(history['loss'], label='Train loss')
ax1.plot(history['val_loss'], label='Validation loss')
ax1.legend(loc='best')
ax1.set_title('Loss')
ax2.plot(history['acc'], label='Train accuracy')
ax2.plot(history['val_acc'], label='Validation accuracy')
ax2.legend(loc='best')
ax2.set_title('Accuracy')
ax3.plot(history['kappa'], label='Train kappa')
ax3.plot(history['val_kappa'], label='Validation kappa')
ax3.legend(loc='best')
ax3.set_title('Kappa')
plt.xlabel('Epochs')
sns.despine()
plt.show()
###Output
_____no_output_____
###Markdown
Model Evaluation
###Code
lastFullTrainPred = np.empty((0, N_CLASSES))
lastFullTrainLabels = np.empty((0, N_CLASSES))
lastFullValPred = np.empty((0, N_CLASSES))
lastFullValLabels = np.empty((0, N_CLASSES))
for i in range(STEP_SIZE_TRAIN+1):
im, lbl = next(train_generator)
scores = model.predict(im, batch_size=train_generator.batch_size)
lastFullTrainPred = np.append(lastFullTrainPred, scores, axis=0)
lastFullTrainLabels = np.append(lastFullTrainLabels, lbl, axis=0)
for i in range(STEP_SIZE_VALID+1):
im, lbl = next(valid_generator)
scores = model.predict(im, batch_size=valid_generator.batch_size)
lastFullValPred = np.append(lastFullValPred, scores, axis=0)
lastFullValLabels = np.append(lastFullValLabels, lbl, axis=0)
###Output
_____no_output_____
###Markdown
Threshold optimization
###Code
def find_best_fixed_threshold(preds, targs, do_plot=True):
best_thr_list = [0 for i in range(preds.shape[1])]
for index in reversed(range(1, preds.shape[1])):
score = []
thrs = np.arange(0, 1, 0.01)
for thr in thrs:
preds_thr = [index if x[index] > thr else np.argmax(x) for x in preds]
score.append(cohen_kappa_score(targs, preds_thr))
score = np.array(score)
pm = score.argmax()
best_thr, best_score = thrs[pm], score[pm].item()
best_thr_list[index] = best_thr
print(f'thr={best_thr:.3f}', f'F2={best_score:.3f}')
if do_plot:
plt.plot(thrs, score)
plt.vlines(x=best_thr, ymin=score.min(), ymax=score.max())
plt.text(best_thr+0.03, best_score-0.01, ('Kappa[%s]=%.3f'%(index, best_score)), fontsize=14);
plt.show()
return best_thr_list
lastFullComPred = np.concatenate((lastFullTrainPred, lastFullValPred))
lastFullComLabels = np.concatenate((lastFullTrainLabels, lastFullValLabels))
complete_labels = [np.argmax(label) for label in lastFullComLabels]
threshold_list = find_best_fixed_threshold(lastFullComPred, complete_labels, do_plot=True)
threshold_list[0] = 0 # In last instance assign label 0
train_preds = [np.argmax(pred) for pred in lastFullTrainPred]
train_labels = [np.argmax(label) for label in lastFullTrainLabels]
validation_preds = [np.argmax(pred) for pred in lastFullValPred]
validation_labels = [np.argmax(label) for label in lastFullValLabels]
train_preds_opt = [0 for i in range(lastFullTrainPred.shape[0])]
for idx, thr in enumerate(threshold_list):
for idx2, pred in enumerate(lastFullTrainPred):
if pred[idx] > thr:
train_preds_opt[idx2] = idx
validation_preds_opt = [0 for i in range(lastFullValPred.shape[0])]
for idx, thr in enumerate(threshold_list):
for idx2, pred in enumerate(lastFullValPred):
if pred[idx] > thr:
validation_preds_opt[idx2] = idx
###Output
thr=0.480 F2=0.884
###Markdown
Confusion Matrix
###Code
fig, (ax1, ax2) = plt.subplots(1, 2, sharex='col', figsize=(24, 7))
labels = ['0 - No DR', '1 - Mild', '2 - Moderate', '3 - Severe', '4 - Proliferative DR']
train_cnf_matrix = confusion_matrix(train_labels, train_preds)
validation_cnf_matrix = confusion_matrix(validation_labels, validation_preds)
train_cnf_matrix_norm = train_cnf_matrix.astype('float') / train_cnf_matrix.sum(axis=1)[:, np.newaxis]
validation_cnf_matrix_norm = validation_cnf_matrix.astype('float') / validation_cnf_matrix.sum(axis=1)[:, np.newaxis]
train_df_cm = pd.DataFrame(train_cnf_matrix_norm, index=labels, columns=labels)
validation_df_cm = pd.DataFrame(validation_cnf_matrix_norm, index=labels, columns=labels)
sns.heatmap(train_df_cm, annot=True, fmt='.2f', cmap="Blues",ax=ax1).set_title('Train')
sns.heatmap(validation_df_cm, annot=True, fmt='.2f', cmap="Blues",ax=ax2).set_title('Validation')
plt.show()
fig, (ax1, ax2) = plt.subplots(1, 2, sharex='col', figsize=(24, 7))
labels = ['0 - No DR', '1 - Mild', '2 - Moderate', '3 - Severe', '4 - Proliferative DR']
train_cnf_matrix = confusion_matrix(train_labels, train_preds_opt)
validation_cnf_matrix = confusion_matrix(validation_labels, validation_preds_opt)
train_cnf_matrix_norm = train_cnf_matrix.astype('float') / train_cnf_matrix.sum(axis=1)[:, np.newaxis]
validation_cnf_matrix_norm = validation_cnf_matrix.astype('float') / validation_cnf_matrix.sum(axis=1)[:, np.newaxis]
train_df_cm = pd.DataFrame(train_cnf_matrix_norm, index=labels, columns=labels)
validation_df_cm = pd.DataFrame(validation_cnf_matrix_norm, index=labels, columns=labels)
sns.heatmap(train_df_cm, annot=True, fmt='.2f', cmap="Blues",ax=ax1).set_title('Train optimized')
sns.heatmap(validation_df_cm, annot=True, fmt='.2f', cmap="Blues",ax=ax2).set_title('Validation optimized')
plt.show()
###Output
_____no_output_____
###Markdown
Quadratic Weighted Kappa
###Code
print("Train Cohen Kappa score: %.3f" % cohen_kappa_score(train_preds, train_labels, weights='quadratic'))
print("Validation Cohen Kappa score: %.3f" % cohen_kappa_score(validation_preds, validation_labels, weights='quadratic'))
print("Complete set Cohen Kappa score: %.3f" % cohen_kappa_score(train_preds+validation_preds, train_labels+validation_labels, weights='quadratic'))
print("Train optimized Cohen Kappa score: %.3f" % cohen_kappa_score(train_preds_opt, train_labels, weights='quadratic'))
print("Validation optimized Cohen Kappa score: %.3f" % cohen_kappa_score(validation_preds_opt, validation_labels, weights='quadratic'))
print("Complete optimized set Cohen Kappa score: %.3f" % cohen_kappa_score(train_preds_opt+validation_preds_opt, train_labels+validation_labels, weights='quadratic'))
###Output
Train Cohen Kappa score: 0.968
Validation Cohen Kappa score: 0.888
Complete set Cohen Kappa score: 0.948
Train optimized Cohen Kappa score: 0.939
Validation optimized Cohen Kappa score: 0.871
Complete optimized set Cohen Kappa score: 0.923
###Markdown
Apply model to test set and output predictions
###Code
test_generator.reset()
STEP_SIZE_TEST = test_generator.n//test_generator.batch_size
preds = model.predict_generator(test_generator, steps=STEP_SIZE_TEST)
predictions = [np.argmax(pred) for pred in preds]
predictions_opt = [0 for i in range(preds.shape[0])]
for idx, thr in enumerate(threshold_list):
for idx2, pred in enumerate(preds):
if pred[idx] > thr:
predictions_opt[idx2] = idx
filenames = test_generator.filenames
results = pd.DataFrame({'id_code':filenames, 'diagnosis':predictions})
results['id_code'] = results['id_code'].map(lambda x: str(x)[:-4])
results_opt = pd.DataFrame({'id_code':filenames, 'diagnosis':predictions_opt})
results_opt['id_code'] = results_opt['id_code'].map(lambda x: str(x)[:-4])
###Output
_____no_output_____
###Markdown
Predictions class distribution
###Code
fig, (ax1, ax2) = plt.subplots(1, 2, sharex='col', figsize=(24, 8.7))
sns.countplot(x="diagnosis", data=results, palette="GnBu_d", ax=ax1)
sns.countplot(x="diagnosis", data=results_opt, palette="GnBu_d", ax=ax2)
sns.despine()
plt.show()
val_kappa = cohen_kappa_score(validation_preds, validation_labels, weights='quadratic')
val_opt_kappa = cohen_kappa_score(validation_preds_opt, validation_labels, weights='quadratic')
if val_kappa > val_opt_kappa:
results_name = 'submission.csv'
results_opt_name = 'submission_opt.csv'
else:
results_name = 'submission_norm.csv'
results_opt_name = 'submission.csv'
results.to_csv(results_name, index=False)
results.head(10)
results_opt.to_csv(results_opt_name, index=False)
results_opt.head(10)
###Output
_____no_output_____ |
HSE Lab (linear regression).ipynb | ###Markdown
HSE Lab Linear regression This project was part of my Linear Algebra university course and that is why mathematical introduction, explanations and conclusions are written in Russian.In code blocks, my variables may have meaningless names. I intentionally did it to match the same variables in formulas above. Least squares methodРассмотрим систему уравнений $Xa = y$, в которой $a$ --- столбец неизвестных. Её можно переписать в векторном виде$$x_1 a_1 + x_2 a_2 + \ldots + x_k a_k = y,$$где $x_1,\ldots,x_n$ --- столбцы матрицы $X$. Таким образом, решить исходную систему означает найти линейную комбинацию векторов $x_1,\ldots,x_n$, равную правой части. Но что делать, если такой линейной комбинации не существует? Геометрически это означает, что вектор $y$ не лежит в подпространстве $U = \langle x_1,\ldots, x_k\rangle$. В этом случае мы можем найти *псевдорешение*: вектор коэффициентов $\hat{a}$, для которого линейная комбинация $x_1 \hat{a}_1 + x_2 \hat{a}_2 + \ldots + x_k \hat{a}_k$ хоть и не равна в точности $y$, но является наилучшим приближением --- то есть ближайшей к $y$ точкой $\hat{y}$ подпространства $U$ (иными словами, ортогональной проекцией $y$ на это подпростанство). Итак, цель наших исканий можно сформулировать двумя эквивалентными способами:1. Найти вектор $\hat{a}$, для которого длина разности $|X\hat{a} - y|$ минимальна (отсюда название "метод наименьших квадратов");2. Найти ортогональную проекцию $\hat{y}$ вектора $y$ на подпространство $U$ и представить её в виде $X\hat{a}$.Далее мы будем предполагать, что векторы $x_1,\ldots,x_n$ линейно независимы (если нет, то сначала имеет смысл выделить максимальную линейно независимую подсистему).На лекциях было показано, что проекция вектора $y$ на подпространство $U = \langle x_1,\ldots, x_k\rangle$, записывается в виде$$\hat{y} = X\left(X^TX\right)^{-1}X^Ty,$$и, соответственно, искомый вектор $\hat{a}$ равен$$\hat{a} = \left(X^TX\right)^{-1}X^Ty.$$ Linear regression problemНачнём с примера. Допустим, вы хотите найти зависимость среднего балла S студента ФКН от его роста H, веса W, длины волос L и N - количества часов, которые он ежедневно посвящает учёбе. Представьте, что мы измерили все эти параметры для $n$ студентов и получили наборы значений: $S_1,\ldots, S_n$, $H_1,\ldots, H_n$ и так далее.Тут можно подбирать много разных умных моделей, но начать имеет смысл с самой простой, линейной:$$S = a_1H + a_2W + a_3L + a_4N + a_5.$$Конечно, строгой линейной зависимости нет (иначе можно было бы радостно упразднить экзамены), но мы можем попробовать подобрать коэффициенты $a_1, a_2, a_3, a_4, a_5$, для которых отклонение правой части от наблюдаемых было бы наименьшим:$$\sum_{i=1}^n\left(S_i - ( a_1H_i + a_2W_i + a_3L_i + a_4N_i + a_5)\right)^2 \longrightarrow \min$$И сразу видно, что мы получили задачу на метод наименьших квадратов! А именно, у нас$$X =\begin{pmatrix}H_1 & W_1 & L_1 & N_1 & 1\\H_2 & W_2 & L_2 & N_2 & 1\\\dots & \dots & \dots & \dots & \dots \\H_n & W_n & L_n & N_n & 1\end{pmatrix},\qquad y=\begin{pmatrix}S_1\\ S_2\\ \vdots \\ S_n\end{pmatrix}$$Решая эту задачу с помощью уже известных формул, получаем оценки коэффициентов $\hat{a}_i$ ($i = 1\ldots,5$). Теперь проговорим общую постановку задачи линейной регрессии. У нас есть $k$ переменных $x_1,\ldots,x_k$ ("регрессоров"), через которые мы хотим выразить "объясняемую переменную" $y$:$$y = a_1x_1 + a_2x_2 + \ldots + a_kx_k$$Значения всех переменных мы измерили $n$ раз (у $n$ различных объектов, в $n$ различных моментов времени - это зависит от задачи). Подставим эти данные в предыдущее равенство:$$\begin{pmatrix}y_1\\ y_2 \\ \vdots \\ y_n\end{pmatrix} = a_1\begin{pmatrix}x_{11} \\ x_{21} \\ \vdots \\ x_{n1} \end{pmatrix} + a_2\begin{pmatrix}x_{12} \\ x_{22} \\ \vdots \\ x_{n2} \end{pmatrix} + \ldots + a_k\begin{pmatrix}x_{1k} \\ x_{2k} \\ \vdots \\ x_{nk} \end{pmatrix}$$(здесь $x_{ij}$ - это значение $j$-го признака на $i$-м измерении). Это удобно переписать в матричном виде:$$\begin{pmatrix}x_{11} & x_{12} & \ldots & x_{1k}\\x_{21} & x_{22} & \ldots & x_{2k}\\\dots & \dots & \dots & \dots\\x_{n1} & x_{n2} & \ldots & x_{nk}\end{pmatrix} \cdot\begin{pmatrix}a_1 \\ a_2 \\ \vdots \\ a_k\end{pmatrix} = \begin{pmatrix}y_1 \\ y_2 \\ \vdots \\ y_n\end{pmatrix}$$или коротко $Xa = y$. Поскольку на практике эта система уравнений зачастую не имеет решения (ибо зависимости в жизни редко бывают действительно линейными), методом наименьших квадратов ищется псевдорешение. Quality estimation. Training and testingПосле того, как вы построили регрессию и получили какую-то зависимость объясняемой переменной от регрессоров, настаёт время оценить качество регрессии. Есть много разных функционалов качества; мы пока будем говорить только о самом простом и очевидном из них: о среднеквадратичной ошибке (mean square error). Она равна$$\frac1{n}|X\hat{a} - y|^2 = \frac1{n}\sum_{i=1}^n\left(\hat{a}_1x_{i1} + \hat{a}_2x_{i2} + \ldots + \hat{a}_kx_{ik} - y_i\right)^2$$В целом, хочется искать модели с наименьшей mean square error на имеющихся данных. Однако слишком фанатичная гонка за минимизацией ошибки может привести к печальным последствиям. Например, если мы приближаем функцию одной переменной по значениям в $n$ точках, то наилучшей с точки зрения этой ошибки моделью будет многочлен $(n-1)$-й степени, для которого эта ошибка будет равна нулю. Тем не менее, вряд ли истинная зависимость имеет вид многочлена большой степени. Более того, значения вам скорее всего даны с погрешностью, то есть вы подогнали вашу модель под свои зашумлённые данные, но на любых других данных (то есть в других точках) точность, скорее всего, окажется совсем не такой хорошей. Этот эффект называют **overfitting**; говорят также, что **обобщающая способность** модели оказалась скверной.Чтобы не попадать в эту ловушку, данные обычно делят на обучающие (по которым строят модель и оценивают коэффициенты) и тестовые. Лучшей стоит счесть ту модель, для которой значение функционала качества будет меньше. Task 1. Least squares method Скачайте файлы ``train.txt`` и ``test.txt``. В первом из них находится обучающая выборка, а во втором - тестовая. Каждый из файлов содержит два столбца чисел, разделённых пробелами: в первом - $n$ точек (значения аргумента $x$), во втором - значения некоторой функции $y = f(x)$ в этих точках, искажённые случайным шумом. Ваша задача - по обучающей выборке подобрать функцию $y = g(x)$, пристойно приближающую неизвестную вам зависимость. Загрузим обучающие и тестовые данные (не забудьте ввести правильный путь!).
###Code
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import math
import copy
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression, SGDRegressor
from sklearn.metrics import mean_squared_error
data_train = np.loadtxt('train.txt', delimiter=',')
data_test = np.loadtxt('test.txt', delimiter=',')
###Output
_____no_output_____
###Markdown
Разделим значения $x$ и $y$
###Code
X_train = np.matrix(data_train[:, 0])
y_train = np.matrix(data_train[:, 1]).T
# Do the same for test data:
X_test = np.matrix(data_test[:, 0])
y_test = np.matrix(data_test[:, 1]).T
###Output
_____no_output_____
###Markdown
Найдите с помощью метода наименьших квадратов линейную функцию ($y = kx + b$), наилучшим образом приближающую неизвестную зависимость. Полезные функции: ``numpy.ones(n)`` для создания массива из единиц длины $n$ и ``numpy.concatenate((А, В), axis=1)`` для слияния двух матриц по столбцам (пара ``А`` и ``В`` превращается в матрицу ``[A B]``).
###Code
k = np.matrix((np.ones(X_train.shape[1])))
X = np.array(np.concatenate((X_train.T, k.T), axis=1))
a = np.matmul(np.matmul(np.linalg.inv(np.matmul(X.T, X)), X.T), y_train)
###Output
_____no_output_____
###Markdown
Нарисуйте на плоскости точки $(x_i, y_i)$ и полученную линейную функцию. Глядя на данные, подумайте, многочленом какой степени можно было бы лучше всего приблизить эту функцию. Найдите этот многочлен и нарисуйте его график.
###Code
xv = np.linspace(-0.5, 1.2, 2)
print(a.item([0][0]), "x + ", a.item([1][0]), sep="")
myline = a.item([0][0]) * xv + a.item([1][0])
pointsx = [X_train.item([x][0]) for x in range(X_train.shape[1])]
pointsy = [y_train.item([y][0]) for y in range(y_train.shape[0])]
plt.plot(pointsx, pointsy, 'ro', label='train points')
plt.plot(xv, myline, label = 'Approximation function')
plt.legend(loc='lower right')
plt.show()
###Output
2.279134498051949x + 4.433230905064934
###Markdown
Для $k = 1,2,3,\ldots,10$ найдите многочлен $\hat{f}_k$ степени $k$, наилучшим образом приближающий неизвестную зависимость. Для каждого из них найдите среднеквадратическую ошибку на обучающих данных и на тестовых данных: $\frac1{n}\sum_{i=1}^n\left( \hat{f}_k(x_i) - y_i \right)^2$ (в первом случае сумма ведётся по парам $(x_i, y_i)$ из обучающих данных, а во втором - по парам из тестовых данных).Для $k = 1,2,3,4,6$ напечатайте коэффициенты полученных многочленов и нарисуйте их графики на одном чертеже вместе с точками $(x_i, y_i)$ (возможно, график стоит сделать побольше; это делается командой `plt.figure(figsize=(width, height))`).
###Code
def f(p, x):
res = 0
for i in range(p.shape[0]):
res = p.item([i][0]) + res * x # Horner's method
return res
tmp = data_train[:, 0]
Xi = np.matrix((np.ones(X_train.shape[1]))).T
for i in range(1, 11):
tt = np.array([tmp ** i])
Xi = np.array(np.concatenate((tt.T, Xi), axis=1))
ai = np.matmul(np.matmul(np.linalg.inv(np.matmul(Xi.T, Xi)), Xi.T), y_train)
# Calculating MSE (Mean squared error)
mse_train = 0
for j in range(X_train.shape[1]):
mse_train += (f(ai, X_train.item([j][0])) - y_train.item([j][0])) ** 2
mse_train *= 1/(X_train.shape[1])
mse_test = 0
for j in range(X_test.shape[1]):
mse_test += (f(ai, X_test.item([j][0])) - y_test.item([j][0])) ** 2
mse_test *= 1/(X_test.shape[1])
print("k =", i, "\t mse_train =", mse_train, "\t mse_test =", mse_test, " \t diff =", mse_test - mse_train)
if i <= 6:
print("Coefficients from lowest to higher:")
print(ai.T)
pl_x = np.linspace(-0.3, 1.15, 100)
pl_y = f(ai, pl_x)
plt.figure(figsize=(8, 8))
plt.plot(pl_x, pl_y, label=('Approximation function of degree: ', i))
pointsx = [X_train.item([x][0]) for x in range(X_train.shape[1])]
pointsy = [y_train.item([y][0]) for y in range(y_train.shape[0])]
plt.plot(pointsx, pointsy, 'ro', label='train points')
plt.legend(loc='lower right')
plt.show()
###Output
k = 1 mse_train = 0.2968966332625196 mse_test = 0.43512020040488775 diff = 0.13822356714236816
Coefficients from lowest to higher:
[[2.2791345 4.43323091]]
###Markdown
Что происходит с ошибкой при росте степени многочлена? Казалось бы, чем больше степень, тем более сложным будет многочлен и тем лучше он будет приближать нашу функцию. Подтверждают ли это ваши наблюдения? Как вам кажется, чем объясняется поведение ошибки на тестовых данных при $k = 10$? **Ответ:** с ростом степени многочлена величина ошибки растет, поскольку многочлен подстаривается под тренировочный набор и все менее точно отражает настоящую зависимость нашего набора данных. Таким образом, при $k = 10$, ошибка на тренировочном наборе становится минимальной (0.1531661099), а на тестовых, напротив, максимальной (14.63202521).Наименьшая же разница ошибок на тренировочных и тестовых данных наблюдается при $k = 3$. Наименьшая ошибка на тестовых данных так же при $k = 3$. Task 2. Linear regression Скачайте файлы ``flats_moscow_mod.txt`` и ``flats_moscow_description.txt``. В первом из них содержатся данные о квартирах в Москве. Каждая строка содержит шесть характеристик некоторой квартиры, разделённые знаками табуляции; в первой строке записаны кодовые названия характеристик. Во втором файле приведены краткие описания признаков. Вашей задачей будет построить с помощью метода наименьших квадратов (линейную) зависимость между ценой квартиры и остальными доступными параметрами.С помощью известных вам формул найдите регрессионные коэффициенты. Какой смысл имеют их знаки? Согласуются ли они с вашими представлениями о жизни?Оцените качество приближения, вычислив среднеквадратическую ошибку.
###Code
flats_data_train = np.loadtxt('flats_moscow_mod.txt', delimiter='\t')
y = flats_data_train[:, 0] # 'price'
x = flats_data_train[:, 1:] # everything else
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.33, random_state=42)
k = np.matrix((np.ones(X_train.shape[0])))
X = np.array(np.concatenate((X_train, k.T), axis=1))
a = np.matmul(np.matmul(np.linalg.inv(np.matmul(X.T, X)), X.T), y_train.T)
X_t = np.array(np.concatenate((X_test, np.matrix((np.ones(X_test.shape[0]))).T), axis=1))
print(a)
###Output
[ 1.50592246 1.32591493 2.07801758 -3.45754795 -1.30314818
-13.56375539]
###Markdown
Получившаяся зависимость имеет вид $1.505 totsp + 1.325 livesp + 2.078 kitsp -3.457 dist -1.303 metrdist -13.563$. Больше всего (смотрим по абсолютному значению) цена квартиры зависит от расстояния до центра и площади кухни (еда превыше всего, конечно). Меньше всего - от расстояния до метро (ну в принципе 15 минут пешочком утром пережить можно да и полезно, чтобы взбодриться и сжечь калории, набранные на кухне, так что будем рассматривать этот фактор в последнюю очередь). Положительный знак перед признаком означает, что чем больше значение признака, тем выше цена кваритиры, отрицательный знак - наоборот: чем меньше значение признака, тем выше цена квартиры. Все вполне согласуется с реальностью, как мне кажется
###Code
def my_mean_squared_error(a, y, X):
mse = 0
for i in range(X.shape[0]):
val = 0
for j in range(a.shape[0]):
val += a[j] * X[i][j]
mse += (val - y[i]) ** 2
mse *= 1/(X.shape[0])
return mse
mse_train = my_mean_squared_error(a, y_train, X)
mse_test = my_mean_squared_error(a, y_test, X_t)
print("mse_train =", mse_train, " \t mse_test =", mse_test)
###Output
mse_train = 863.1784099407217 mse_test = 1059.913530999778
###Markdown
Improving the model Конечно, никто не гарантирует, что объясняемая переменная (цена квартиры) зависит от остальных характеристик именно линейно. Зависимость может быть, например, квадратичной или логарифмической; больше того, могут быть важны не только отдельные признаки, но и их комбинации. Это можно учитывать, добавляя в качестве дополнительных признаков разные функции от уже имеющихся характеристик: их квадраты, логарифмы, попарные произведения.В этом задании вам нужно постараться улучшить качество модели, добавляя дополнительные признаки, являющиеся функциями от уже имеющихся. Но будьте осторожны: чрезмерное усложнение модели будет приводить к переобучению. **Model comparison**Когда вы построите новую модель, вам захочется понять, лучше она или хуже, чем изначальная. Проверять это на той же выборке, на которой вы обучались, бессмысленно и даже вредно (вспомните пример с многочленами: как прекрасно падала ошибка на обучающей выборке с ростом степени!). Поэтому вам нужно будет разделить выборку на обучающую и тестовую. Делать это лучше случайным образом (ведь вы не знаете, как создатели датасета упорядочили объекты); рекомендуем вам для этого функцию `sklearn.model_selection.train_test_split`.
###Code
# Feature and coefficients playground #
sp = np.matrix((X_train[:, 0] + X_train[:, 1] + 2 * X_train[:, 2]) ** 3) # (totsp + livesp + 2*kitsp)**3
logdist = np.matrix((X_train[:, 3] + X_train[:, 4]) ** 0.5) # (dist + metrdist) ** 0.5
nm = np.matrix(X_train[:, 3] * X_train[:, 4]) # (dist * metrdist)
mm = np.matrix(X_train[:, 2] * X_train[:, 0]) # (kitsp * totsp)
new_X = np.array(np.concatenate((X_train, sp.T, logdist.T, nm.T, mm.T), axis=1))
new_X = np.array(np.concatenate((new_X, np.matrix((np.ones(X_train.shape[0]))).T), axis=1))
new_a = np.matmul(np.matmul(np.linalg.inv(np.matmul(new_X.T, new_X)), new_X.T), y_train.T)
sp_t = np.matrix((X_test[:, 0] + X_test[:, 1] + 2 * X_test[:, 2]) ** 3)
logdist_t = np.matrix((X_test[:, 3] + X_test[:, 4]) ** 0.5)
nm_t = np.matrix(X_test[:, 3] * X_test[:, 4])
mm_t = np.matrix(X_test[:, 2] * X_test[:, 0])
newX_t = np.array(np.concatenate((X_test, sp_t.T, logdist_t.T, nm_t.T, mm_t.T), axis=1))
newX_t = np.array(np.concatenate((newX_t, np.matrix((np.ones(X_test.shape[0]))).T), axis=1))
clf3 = LinearRegression(fit_intercept=False).fit(X, y_train)
clf4 = LinearRegression(fit_intercept=False).fit(new_X, y_train)
print(clf3.coef_)
print(clf4.coef_)
print('Sklearn Train_Before MSE:', mean_squared_error(y_train, clf3.predict(X)))
print('Sklearn Test_Before MSE:', mean_squared_error(y_test, clf3.predict(X_t)))
print('Sklearn Train_After MSE:', mean_squared_error(y_train, clf4.predict(new_X)))
print('Sklearn Test_After MSE:', mean_squared_error(y_test, clf4.predict(newX_t)))
print('My Train_After MSE:', my_mean_squared_error(new_a, y_train, new_X))
print('My Test_After MSE:', my_mean_squared_error(new_a, y_test, newX_t))
###Output
Sklearn Train_Before MSE: 863.1784099407236
Sklearn Test_Before MSE: 1059.9135309997778
Sklearn Train_After MSE: 810.7519736572178
Sklearn Test_After MSE: 895.2356375823234
My Train_After MSE: 810.7519736572181
My Test_After MSE: 895.2356375831185
###Markdown
Добавлю следующие признаки:$(totsp + livesp + 2kitsp)^{3} \\(dist + metrdist)^{0.5} \\ (dist \cdot metrdist) \\(kitsp \cdot totsp) \\(livesp)^{4}$Наибольшее улучшение модели произошло благодаря последним двум признакам.Интересно, а нам разрешено было удалять исходные признаки? А вообще грустно, что нам обновили только файл с описанием данных, а сами данные не обновили :(( Играться с признаками было бы веселее
###Code
# Testing new model and estimating improvement #
res_train = []
res_test = []
n = 1000
for i in range(n):
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.33)
k = np.matrix((np.ones(X_train.shape[0])))
X = np.array(np.concatenate((X_train, k.T), axis=1))
sp = np.matrix((X_train[:, 0] + X_train[:, 1] + 2 * X_train[:, 2]) ** 3)
logdist = np.matrix((X_train[:, 3] + X_train[:, 4]) ** 0.5)
nm = np.matrix(X_train[:, 3] * X_train[:, 4])
mm = np.matrix(X_train[:, 2] * X_train[:, 0])
xd = np.matrix((X_train[:, 1]) ** 4)
new_X = np.array(np.concatenate((X_train, sp.T, logdist.T, nm.T, mm.T, xd.T), axis=1))
new_X = np.array(np.concatenate((new_X, np.matrix((np.ones(X_train.shape[0]))).T), axis=1))
new_a = np.matmul(np.matmul(np.linalg.inv(np.matmul(new_X.T, new_X)), new_X.T), y_train.T)
sp_t = np.matrix((X_test[:, 0] + X_test[:, 1] + 2 * X_test[:, 2]) ** 3)
logdist_t = np.matrix((X_test[:, 3] + X_test[:, 4]) ** 0.5)
nm_t = np.matrix(X_test[:, 3] * X_test[:, 4])
mm_t = np.matrix(X_test[:, 2] * X_test[:, 0])
xd_t = np.matrix((X_test[:, 1]) ** 4)
newX_t = np.array(np.concatenate((X_test, sp_t.T, logdist_t.T, nm_t.T, mm_t.T, xd_t.T), axis=1))
newX_t = np.array(np.concatenate((newX_t, np.matrix((np.ones(X_test.shape[0]))).T), axis=1))
res_train.append(my_mean_squared_error(new_a, y_train, new_X))
res_test.append(my_mean_squared_error(new_a, y_test, newX_t))
print("train_before:", mse_train, "\t test_before:", mse_test)
print("\n \t After 1000 iterations we have: \n")
print("train_avg:", sum(res_train)/n, "\t train_max:", max(res_train), "\t train_min:", min(res_train))
print("%improve_train_avg:", (1 - (sum(res_train)/n)/mse_train) * 100, " \t %best_improve:", (1 - min(res_train)/mse_train) * 100)
print()
print("test_avg:", sum(res_test)/n, " \t test_max:", max(res_test), " \t test_min:", min(res_test))
print("%improve_test_avg:", (1 - (sum(res_test)/n)/mse_test) * 100, " \t %best_improve:", (1 - min(res_test)/mse_test) * 100)
print()
print("%improve_other_avg:", (1 - (sum(res_train)/n)/924) * 100, ",", (1 - (sum(res_test)/n)/924) * 100)
###Output
train_before: 863.1784099407217 test_before: 1059.913530999778
After 1000 iterations we have:
train_avg: 753.8231102423669 train_max: 861.6581632055196 train_min: 607.055315219035
%improve_train_avg: 12.668910440642833 %best_improve: 29.67209232437542
test_avg: 806.102567879204 test_max: 1239.3376980084108 test_min: 579.7911226118063
%improve_test_avg: 23.94638389804905 %best_improve: 45.298262013420064
%improve_other_avg: 18.41741231143216 , 12.759462350735495
###Markdown
In average new model gives about 12% improvement on train data set and 24% improvement on test data set. Task 3. Regularization. Вспомним, что задача линейной регрессии формулируется как задача нахождения проекции вектора значений объясняемой переменной $y$ на линейную оболочку $\langle x_1,\ldots,x_k\rangle$ векторов значений регрессоров. Если векторы $x_1,\ldots,x_k$ линейно зависимы, то матрица $X^TX$ вырожденна и задача не будет решаться (то есть будет, но не с помощью приведённой выше формулы). В жизни, по счастью, различные признаки редко бывают *в точности* линейно зависимы, однако во многих ситуациях они скоррелированы и становятся "почти" линейно зависимыми. Таковы, к примеру, зарплата человека, его уровень образования, цена машины и суммарная площадь недвижимости, которой он владеет. В этом случае матрица $X^TX$ будет близка к вырожденной, и это приводит к численной неустойчивости и плохому качеству решений; как следствие, будет иметь место переобучение. Один из симптомов этой проблемы - необычно большие по модулю компоненты вектора $a$.Есть много способов борьбы с этим злом. Один из них - регуляризация. Сейчас мы рассмотрим одну из её разновидностей --- **L2-regularization**. Идея в том, чтобы подправить матрицу $X^TX$, сделав её "получше". Например, это можно сделать, заменив её на $(X^TX + \lambda E)$, где $\lambda$ --- некоторый скаляр. Пожертвовав точностью на обучающей выборке, мы тем не менее получаем численно более стабильное псевдорешение $a = (X^TX + \lambda E)^{-1}X^Ty$ и снижаем эффект переобучения. Параметр $\lambda$ нужно подбирать, и каких-то универсальных способов это делать нет, но зачастую можно его подобрать таким, чтобы ошибка на тестовой выборке падала. Теперь давайте вспомним первую задачу. Если вы её сделали, то помните, что ошибка аппроксимации многочленом шестой степени довольно высокая. Убедитесь, что, используя регуляризацию с хорошо подобранным коэффициентом $\lambda$, ошибку на тестовой выборке можно сделать не больше, чем для многочлена оптимальной степени в модели без регрессии. Для этого $\lambda$ сравните $\det(X^TX)$ и $\det(X^TX + \lambda E)$.
###Code
X_train = np.matrix(data_train[:, 0])
y_train = np.matrix(data_train[:, 1]).T
X_test = np.matrix(data_test[:, 0])
y_test = np.matrix(data_test[:, 1]).T
ans = []
t = -1
print("Coefficient =", t)
print()
tmp_2 = data_train[:, 0]
Xi_2 = np.matrix((np.ones(X_train.shape[1]))).T
det_before = 0
det_after = 0
for i in range(1, 11):
tt_2 = np.array([tmp_2 ** i])
Xi_2 = np.array(np.concatenate((tt_2.T, Xi_2), axis=1))
xtx = np.matmul(Xi_2.T, Xi_2)
ai_2_no = np.matmul(np.matmul(np.linalg.inv(xtx), Xi_2.T), y_train)
ai_2 = np.matmul(np.matmul(np.linalg.inv(xtx - t * np.eye(xtx.shape[0])), Xi_2.T), y_train)
mse_train_2 = 0
for j in range(X_train.shape[1]):
mse_train_2 += (f(ai_2, X_train.item([j][0])) - y_train.item([j][0])) ** 2
mse_train_2 *= 1/(X_train.shape[1])
mse_test_2 = 0
for j in range(X_test.shape[1]):
mse_test_2 += (f(ai_2, X_test.item([j][0])) - y_test.item([j][0])) ** 2
mse_test_2 *= 1/(X_test.shape[1])
print("k =", i, "\t mse_train_2 =", mse_train_2, "\t mse_test_2 =", mse_test_2, " \t diff =", mse_test_2 - mse_train_2)
if i == 6:
det_before = np.linalg.det(xtx)
det_after = np.linalg.det(xtx - t * np.eye(xtx.shape[0]))
pl_x_2 = np.linspace(-0.3, 1.15, 100)
pl_y_2 = f(ai_2, pl_x_2)
pl_y_2_no = f(ai_2_no, pl_x_2)
print()
print("det without regularization", det_before, " \t det with regularization", det_after)
###Output
Coefficient = -1
k = 1 mse_train_2 = 0.3415526305150493 mse_test_2 = 0.404287009194863 diff = 0.06273437867981368
k = 2 mse_train_2 = 0.31222705028447084 mse_test_2 = 0.3027033800584491 diff = -0.009523670226021741
k = 3 mse_train_2 = 0.3144711960793258 mse_test_2 = 0.293324829447128 diff = -0.021146366632197766
k = 4 mse_train_2 = 0.3173082949716485 mse_test_2 = 0.2974359267709365 diff = -0.019872368200711976
k = 5 mse_train_2 = 0.31886405344795055 mse_test_2 = 0.30331263904372946 diff = -0.015551414404221087
k = 6 mse_train_2 = 0.3196110739903543 mse_test_2 = 0.308219923571227 diff = -0.011391150419127305
k = 7 mse_train_2 = 0.3199495546048587 mse_test_2 = 0.3117636451760249 diff = -0.008185909428833793
k = 8 mse_train_2 = 0.3200922473541476 mse_test_2 = 0.3140132325023869 diff = -0.006079014851760711
k = 9 mse_train_2 = 0.32014174166237686 mse_test_2 = 0.315128510461803 diff = -0.005013231200573842
k = 10 mse_train_2 = 0.3201464462599162 mse_test_2 = 0.3152719355314195 diff = -0.0048745107284966505
det without regularization 9.08286483934784e-12 det with regularization 509.21645079421813
###Markdown
На тестовой выборке среднеквадратическая ошибка для многочлена 6 степени (да и почти всех остальных) в модели с регуляризацией меньше среднеквадратической ошибки для многочлена 3 степени (0.35534645) в модели без регуляризации. А $\det(X^TX + \lambda E)$ больше $\det(X^TX)$ примерно в $10^{13}$ раз и довольно далёк от нуля. Нарисуйте на одном чертеже графики многочленов шестой степени, приближающих неизвестную функцию, для модели с регуляризацией и без. Чем первый из них выгодно отличается от второго?
###Code
plt.figure(figsize=(8, 8))
plt.plot(pl_x_2, pl_y_2, label="with regularization")
plt.plot(pl_x_2, pl_y_2_no, label="without regularization")
plt.plot(X_train[0], y_train.T[0], 'ro')
plt.legend(loc='lower right')
plt.show()
###Output
_____no_output_____ |
GloVeRNN.ipynb | ###Markdown
Load Data
###Code
train = pd.read_csv('data/train.csv')
test = pd.read_csv('data/test.csv')
output_names = ['toxic','severe_toxic','obscene','threat','insult','identity_hate']
tok=text.Tokenizer(filters = '!"#$%&()*+,-./:;<=>?@[\\]^_\'`{|}~\t\n', lower=True)
tok.fit_on_texts(np.concatenate((train.comment_text.values, test.comment_text.values)))
###Output
_____no_output_____
###Markdown
Load GloVe
###Code
f = open('data/glove.42B.300d.txt', 'r', encoding = 'utf-8')
all_unique_tokens = tok.word_index.keys()
embeddings = {}
for line in f:
values = line.split()
word = values[0]
# Whole GloVe embeddings doesn't fit in my GPU memory, so only take words which appear in data for now.
# Can always swap weights for embedding layer after model training
if word in all_unique_tokens:
coefs = np.array(values[1:], dtype = 'float32')
embeddings[word] = coefs
for i in list(tok.word_index.keys()):
if i not in embeddings.keys():
del tok.word_index[i]
for counter, i in enumerate(tok.word_index.keys()):
tok.word_index[i] = counter+1
idx2word = {b:a for a,b in tok.word_index.items()}
idx2word[0] = '<UNK>'
word2idx = defaultdict(lambda x: '<UNK>', tok.word_index)
embeddings['<UNK>'] = np.zeros((300,))
###Output
_____no_output_____
###Markdown
Data Processing
###Code
train['toks'] = tok.texts_to_sequences(train.comment_text.values)
test['toks'] = tok.texts_to_sequences(test.comment_text.values)
vocab_size = len(embeddings)
max_len = 300
n_factors = 300
def create_emb():
emb = np.zeros((vocab_size+1,n_factors), dtype = 'float32')
for i in range(0, vocab_size):
word = idx2word[i]
emb[i,:] = embeddings[word] #each row is a word
return emb
emb = create_emb()
emb.shape
# train val split
np.random.seed(10)
indexTrain = np.random.choice(range(train.shape[0]), size = int(0.9*train.shape[0]), replace = False)
indexVal = list(set(range(train.shape[0])) - set(indexTrain))
traindf = train.loc[indexTrain]
valdf = train.loc[indexVal]
dataInputTrain=sequence.pad_sequences(traindf.toks,maxlen=max_len)
dataInputVal=sequence.pad_sequences(valdf.toks,maxlen=max_len)
dataInputTest=sequence.pad_sequences(test.toks,maxlen=max_len)
' '.join([idx2word[i] for i in dataInputTrain[10,:]])
def makeModel(counter, denseNodes, convFilters, dropOut):
sequence_input = Input(shape=(max_len, ))
x = Embedding(vocab_size+1, n_factors, input_length=max_len, weights=[emb],trainable = False)(sequence_input)
x = Bidirectional(LSTM(128, return_sequences=True,dropout=0.15,recurrent_dropout=0.15))(x)
x = Conv1D(convFilters, kernel_size = 3, padding = "valid", kernel_initializer = "glorot_uniform")(x)
avg_pool = GlobalAveragePooling1D()(x)
max_pool = GlobalMaxPooling1D()(x)
x = Concatenate()([avg_pool, max_pool])
x = Dense(denseNodes, activation = 'relu')(x)
x = BatchNormalization(axis = -1)(x)
x = Dropout(dropOut)(x)
preds = Dense(6, activation="sigmoid")(x)
model = Model(sequence_input, preds)
model.compile(loss='binary_crossentropy',optimizer=Adam(lr=1e-3))
earlyStopping = EarlyStopping(monitor='val_loss', patience=5, verbose=0, mode='min')
mcp_save = ModelCheckpoint('weights/lstm_mdl' + str(counter), save_best_only=True, monitor='val_loss', mode='min')
reduce_lr_loss = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3, verbose=1, epsilon=1e-4, mode='min')
roc_callback = ROCCallBack(validation_data = [dataInputVal, valdf[output_names].values])
model.fit(x = dataInputTrain,
y = traindf[output_names].values,
batch_size = 64, epochs = 200,
validation_data = [dataInputVal, valdf[output_names].values],
callbacks=[earlyStopping, mcp_save, reduce_lr_loss, roc_callback])
pred = model.predict(dataInputTest, verbose = 1)
for c,i in enumerate(output_names):
test[i] = pred[:,c]
test[['id'] + output_names].to_csv('data/answers/lstm' + str(counter) + '.csv', index = False)
return model
params = [
{'denseNodes': 128, 'convFilters': 128, 'dropOut': 0.4},
{'denseNodes': 256, 'convFilters': 128, 'dropOut': 0.5},
{'denseNodes': 512, 'convFilters': 128, 'dropOut': 0.55},
]
models = [makeModel(counter, **i) for counter, i in enumerate(params)]
###Output
Train on 143613 samples, validate on 15958 samples
Epoch 1/200
143552/143613 [============================>.] - ETA: 0s - loss: 0.0841
roc-auc_val: 0.9801
143613/143613 [==============================] - 1716s 12ms/step - loss: 0.0841 - val_loss: 0.3135
Epoch 2/200
143552/143613 [============================>.] - ETA: 0s - loss: 0.0488
roc-auc_val: 0.9877
143613/143613 [==============================] - 1699s 12ms/step - loss: 0.0488 - val_loss: 0.0426
Epoch 3/200
143552/143613 [============================>.] - ETA: 0s - loss: 0.0451
roc-auc_val: 0.9851
143613/143613 [==============================] - 1698s 12ms/step - loss: 0.0451 - val_loss: 0.0663
Epoch 4/200
143552/143613 [============================>.] - ETA: 0s - loss: 0.0422
roc-auc_val: 0.989
143613/143613 [==============================] - 1698s 12ms/step - loss: 0.0422 - val_loss: 0.0404
Epoch 5/200
143552/143613 [============================>.] - ETA: 0s - loss: 0.0395
roc-auc_val: 0.9892
143613/143613 [==============================] - 1698s 12ms/step - loss: 0.0395 - val_loss: 0.0456
Epoch 6/200
143552/143613 [============================>.] - ETA: 0s - loss: 0.0370
roc-auc_val: 0.989
143613/143613 [==============================] - 1699s 12ms/step - loss: 0.0370 - val_loss: 0.0405
Epoch 7/200
143552/143613 [============================>.] - ETA: 0s - loss: 0.0350
roc-auc_val: 0.9891
143613/143613 [==============================] - 1699s 12ms/step - loss: 0.0350 - val_loss: 0.0456
Epoch 8/200
143552/143613 [============================>.] - ETA: 0s - loss: 0.0346
Epoch 00008: reducing learning rate to 0.00010000000474974513.
roc-auc_val: 0.9882
143613/143613 [==============================] - 1698s 12ms/step - loss: 0.0346 - val_loss: 0.0442
Epoch 9/200
143552/143613 [============================>.] - ETA: 0s - loss: 0.0284
roc-auc_val: 0.9884
143613/143613 [==============================] - 1698s 12ms/step - loss: 0.0284 - val_loss: 0.0439
153164/153164 [==============================] - 906s 6ms/step
Train on 143613 samples, validate on 15958 samples
Epoch 1/200
143552/143613 [============================>.] - ETA: 0s - loss: 0.0812
roc-auc_val: 0.9857
143613/143613 [==============================] - 1703s 12ms/step - loss: 0.0812 - val_loss: 0.0465
Epoch 2/200
143552/143613 [============================>.] - ETA: 0s - loss: 0.0481
roc-auc_val: 0.9879
143613/143613 [==============================] - 1702s 12ms/step - loss: 0.0481 - val_loss: 0.0444
Epoch 3/200
143552/143613 [============================>.] - ETA: 0s - loss: 0.0448
roc-auc_val: 0.9891
143613/143613 [==============================] - 1702s 12ms/step - loss: 0.0448 - val_loss: 0.0443
Epoch 4/200
143552/143613 [============================>.] - ETA: 0s - loss: 0.0423
roc-auc_val: 0.9893
143613/143613 [==============================] - 1702s 12ms/step - loss: 0.0423 - val_loss: 0.0418
Epoch 5/200
143552/143613 [============================>.] - ETA: 0s - loss: 0.0396
roc-auc_val: 0.9892
143613/143613 [==============================] - 1704s 12ms/step - loss: 0.0396 - val_loss: 0.0400
Epoch 6/200
143552/143613 [============================>.] - ETA: 0s - loss: 0.0372
roc-auc_val: 0.9886
143613/143613 [==============================] - 1702s 12ms/step - loss: 0.0372 - val_loss: 0.0445
Epoch 7/200
143552/143613 [============================>.] - ETA: 0s - loss: 0.0347
roc-auc_val: 0.9886
143613/143613 [==============================] - 1702s 12ms/step - loss: 0.0347 - val_loss: 0.0422
Epoch 8/200
143552/143613 [============================>.] - ETA: 0s - loss: 0.0317
roc-auc_val: 0.9881
143613/143613 [==============================] - 1701s 12ms/step - loss: 0.0317 - val_loss: 0.0439
Epoch 9/200
143552/143613 [============================>.] - ETA: 0s - loss: 0.0305
Epoch 00009: reducing learning rate to 0.00010000000474974513.
roc-auc_val: 0.9877
143613/143613 [==============================] - 1705s 12ms/step - loss: 0.0305 - val_loss: 0.0432
Epoch 10/200
143552/143613 [============================>.] - ETA: 0s - loss: 0.0249
roc-auc_val: 0.9876
143613/143613 [==============================] - 1706s 12ms/step - loss: 0.0249 - val_loss: 0.0465
153164/153164 [==============================] - 908s 6ms/step
Train on 143613 samples, validate on 15958 samples
Epoch 1/200
143552/143613 [============================>.] - ETA: 0s - loss: 0.0754
roc-auc_val: 0.9847
143613/143613 [==============================] - 1716s 12ms/step - loss: 0.0754 - val_loss: 0.0451
Epoch 2/200
143552/143613 [============================>.] - ETA: 0s - loss: 0.0487
roc-auc_val: 0.984
143613/143613 [==============================] - 1713s 12ms/step - loss: 0.0487 - val_loss: 0.1674
Epoch 3/200
143552/143613 [============================>.] - ETA: 0s - loss: 0.0448
roc-auc_val: 0.9887
143613/143613 [==============================] - 1713s 12ms/step - loss: 0.0448 - val_loss: 0.0407
Epoch 4/200
143552/143613 [============================>.] - ETA: 0s - loss: 0.0418
roc-auc_val: 0.9885
143613/143613 [==============================] - 1713s 12ms/step - loss: 0.0418 - val_loss: 0.0427
Epoch 5/200
143552/143613 [============================>.] - ETA: 0s - loss: 0.0395
roc-auc_val: 0.9888
143613/143613 [==============================] - 1712s 12ms/step - loss: 0.0395 - val_loss: 0.0422
Epoch 6/200
143552/143613 [============================>.] - ETA: 0s - loss: 0.0373
roc-auc_val: 0.9893
143613/143613 [==============================] - 1713s 12ms/step - loss: 0.0373 - val_loss: 0.0414
Epoch 7/200
143552/143613 [============================>.] - ETA: 0s - loss: 0.0347
Epoch 00007: reducing learning rate to 0.00010000000474974513.
roc-auc_val: 0.9886
143613/143613 [==============================] - 1713s 12ms/step - loss: 0.0347 - val_loss: 0.0431
Epoch 8/200
143552/143613 [============================>.] - ETA: 0s - loss: 0.0288
roc-auc_val: 0.9888
143613/143613 [==============================] - 1712s 12ms/step - loss: 0.0288 - val_loss: 0.0426
153164/153164 [==============================] - 906s 6ms/step
|
Personal_Loan_Campaign_Modelling_SALINAS.ipynb | ###Markdown
Personal Loan Campaign Modelling Project*by: Garey Salinas* Description Background and ContextAllLife Bank is a US bank that has a growing customer base. The majority of these customers are liability customers (depositors) with varying sizes of deposits. The number of customers who are also borrowers (asset customers) is quite small, and the bank is interested in expanding this base rapidly to bring in more loan business and in the process, earn more through the interest on loans. In particular, the management wants to explore ways of converting its liability customers to personal loan customers (while retaining them as depositors).A campaign that the bank ran last year for liability customers showed a healthy conversion rate of over 9% success. This has encouraged the retail marketing department to devise campaigns with better target marketing to increase the success ratio.You as a Data scientist at AllLife bank have to build a model that will help the marketing department to identify the potential customers who have a higher probability of purchasing the loan. Objective1. To predict whether a liability customer will buy a personal loan or not.2. Which variables are most significant.3. Which segment of customers should be targeted more. Data DictionaryLABELS | DESCRIPTION-------|:------------ID | Customer IDAge | Customer’s age in completed yearsExperience | years of professional experienceIncome | Annual income of the customer (in thousand dollars)ZIP Code | Home Address ZIP code.Family | the Family size of the customerCCAvg | Average spending on credit cards per month (in thousand dollars)Education | Education Level. 1: Undergrad; 2: Graduate;3: Advanced/ProfessionalMortgage | Value of house mortgage if any. (in thousand dollars)Personal_Loan | Did this customer accept the personal loan offered in the last campaign?Securities_Account | Does the customer have securities account with the bank?CD_Account | Does the customer have a certificate of deposit (CD) account with the bank?Online | Do customers use internet banking facilities?CreditCard | Does the customer use a credit card issued by any other Bank (excluding All life Bank)? Import libraries and load dataset Import libraries
###Code
import pandas as pd
import numpy as np
import math
import matplotlib.pyplot as plt
import seaborn as sns
import scipy.stats as stats
from sklearn import metrics, tree
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import (confusion_matrix, classification_report,
accuracy_score, precision_score, recall_score, f1_score)
import warnings
warnings.filterwarnings("ignore") # ignore warnings
%matplotlib inline
sns.set()
###Output
_____no_output_____
###Markdown
Read Dataset
###Code
data = pd.read_csv("Loan_Modelling.csv")
df = data.copy()
print(f"There is {df.shape[0]} rows and {df.shape[1]} columns in this dataset.")
###Output
_____no_output_____
###Markdown
Overview of Dataset
###Code
pd.concat([df.head(10), df.tail(10)])
df.columns
###Output
_____no_output_____
###Markdown
Edit column names
###Code
df.columns = df.columns.str.lower()
df.columns = df.columns.str.replace("creditcard", "credit_card")
df.columns
df.info()
###Output
_____no_output_____
###Markdown
**Observation**- All column names are lowercase- There are 5000 observations in this dataset.- All values are of a numerical type (int, float).- There are zero missing values in all columns. We will confirm. Check for duplicates
###Code
df[df.duplicated()].count()
###Output
_____no_output_____
###Markdown
Describe dataset
###Code
df.nunique()
###Output
_____no_output_____
###Markdown
**Observations**- `id` has 5000 unique values. We can drop this column.- We can change `family, education` to categorical.
###Code
df.drop(['id'], axis=1, inplace=True)
df.head()
###Output
_____no_output_____
###Markdown
Change dtypes
###Code
cat_features = ['family', 'education']
for feature in cat_features:
df[feature] = pd.Categorical(df[feature])
df.info()
df.describe(include='all').T
###Output
_____no_output_____
###Markdown
**Observations**- All columns have a count of 5000, meaning there are zero missing values in these columns.- There are 4 unique values in `family` and 3 unique values in the `education` column.- There are only 2 unique values in the `personal_loan, securities_account, cd_account, online and credit_card` columns.- `age` has a mean of 45 and a standard deviation of about 11.4. The min `age` is 23 and the max is 67. - `experience` has a mean of 20 and a standard deviation of 11.5. The min is -3 and the max is 43 years. We will inspect the negative value further.-`income` has a mean of 74K and a standard deviation of 46K. The values range from 8K to 224K.- `ccavg` has a mean of 1.93 and a standard deviation of 1.7. The values range from 0.0 to 10.0.- `mortgage` has a mean of 56.5K and a standard deviation of 101K. The standard deviation is greater than the mean. We will investigate further.- There are zero values in the `mortgage` column. We will inspect.
###Code
df.isnull().sum().sort_values(ascending=False)
df.isnull().values.any() # If there are any null values in data set
###Output
_____no_output_____
###Markdown
**Observations**- Confirming dtype changed to categorical variables for the columns mentioned previously.- Confirming there are zero missing values. Not to be confused with values that are zero. We have alot of those in the `mortgage` column. Also, we will investigate the outliers.
###Code
numerical_feature_df = df.select_dtypes(include=['int64','float64'])
numerical_feature_df.skew()
###Output
_____no_output_____
###Markdown
**Observations**- `income`, `ccavg` and `mortgage` are heavily skewed. We will investigate further. Exploratory Data Analysis Univariate Analysis
###Code
def histogram_boxplot(feature, figsize=(15, 7), bins=None):
"""
Boxplot and histogram combined
feature: 1-d feature array
figsize: size of fig (default (15,10))
bins: number of bins (default None / auto)
"""
f2, (ax_box2, ax_hist2) = plt.subplots(nrows = 2, # Number of rows of the subplot grid= 2
sharex = True, # x-axis will be shared among all subplots
gridspec_kw = {"height_ratios": (.25, .75)},
figsize = figsize
) # creating the 2 subplots
sns.boxplot(feature, ax=ax_box2, showmeans=True, color='yellow') # boxplot will be created and a star will indicate the mean value of the column
sns.distplot(feature, kde=True, ax=ax_hist2, bins=bins) if bins else sns.distplot(feature, kde=True, ax=ax_hist2) # For histogram
ax_hist2.axvline(np.mean(feature), color='green', linestyle='--') # Add mean to the histogram
ax_hist2.axvline(np.median(feature), color='blue', linestyle='-');# Add median to the histogram
def create_outliers(feature: str, data=df):
"""
Returns dataframe object of feature outliers.
feature: 1-d feature array
data: pandas dataframe (default is df)
"""
Q1 = data[feature].quantile(0.25)
Q3 = data[feature].quantile(0.75)
IQR = Q3 - Q1
#print(((df.Mileage < (Q1 - 1.5 * IQR)) | (df.Mileage > (Q3 + 1.5 * IQR))).sum())
return data[((data[feature] < (Q1 - 1.5 * IQR)) | (data[feature] > (Q3 + 1.5 * IQR)))]
###Output
_____no_output_____
###Markdown
Observations on `age`
###Code
histogram_boxplot(df.age)
###Output
_____no_output_____
###Markdown
**Observations**- No outliers in the `age` column. The mean is near the median.- Average `age` is about 45 years old.- The `age` column distribution is uniform. Observations on `income`
###Code
histogram_boxplot(df.income)
###Output
_____no_output_____
###Markdown
**Observations**- The average `income` is about 60K, with a median value of about 70K.- `income` column is right skewed and has many outliers to the upside. Observations on `income` outliers
###Code
outliers = create_outliers('income')
outliers.sort_values(by='income', ascending=False).head(20)
print(f"There are {outliers.shape[0]} outliers.")
###Output
_____no_output_____
###Markdown
Observations on `ccavg`
###Code
histogram_boxplot(df.ccavg)
###Output
_____no_output_____
###Markdown
**Observations**- `ccavg` has an average of about 1.5 and a median of about 2.- `ccavg` column is right skewed and has many outliers to the upside. Observations on `ccavg` outliers
###Code
outliers = create_outliers('ccavg')
outliers.sort_values(by='ccavg', ascending=False).head(20)
print(f"There are {outliers.shape[0]} outliers.")
###Output
_____no_output_____
###Markdown
Observations on `mortgage`
###Code
histogram_boxplot(df.mortgage)
###Output
_____no_output_____
###Markdown
**Observations**- `mortgage` has many values that aren't null but are equal to zero. We will dissect further.- `mortgage` column has many outliers to the upside. Observations on `mortgage` outliers
###Code
outliers = create_outliers('mortgage')
outliers.sort_values(by='mortgage', ascending=False)
print(f"There are {outliers.shape[0]} outliers in the outlier column.")
###Output
_____no_output_____
###Markdown
Check zero values in `mortgage` column
###Code
print(f'There are {df[df.mortgage==0].shape[0]} rows where mortgage equals to ZERO!')
###Output
_____no_output_____
###Markdown
Check `zipcodes` frequency where `mortgage` equals zero.
###Code
plt.figure(figsize=(15, 10))
sns.countplot(y=df[df.mortgage==0]['zipcode'],
data=df,
order=df[df.mortgage==0]['zipcode'].value_counts().index[:40]);
###Output
_____no_output_____
###Markdown
**Observations**- The `zipcode` 94720 has the most frequent number of mortgages that equal zero with over 120 values.- The second highest number of zero values is 94305, and the third highest is 95616. Observations on `experience`
###Code
histogram_boxplot(df.experience)
###Output
_____no_output_____
###Markdown
**Observations**- The `experience` column is uniform and has no outliers.- The average and median `experience` is about 20 years.- `experience` column is uniformly distributed. The mean is close to the median.
###Code
plt.figure(figsize=(15, 10))
sns.countplot(y=df.experience,
data=df,
order=df.experience.value_counts().index[:]);
###Output
_____no_output_____
###Markdown
**Observations**- 32 years is the greatest number of `experience` years observed with about 150 observations.- The plot shows negative values.
###Code
print(f"There are {df[df.experience<0].shape[0]} rows that have professional experience less than zero.")
df[df.experience<0].sort_values(by='experience', ascending=True).head()
###Output
_____no_output_____
###Markdown
Countplot for `experience` less than zero vs. `age`.
###Code
plt.figure(figsize=(10, 4))
sns.countplot(y=df[df.experience<0]['age'],
data=df,
order=df[df.experience<0]['age'].value_counts().index[:]);
###Output
_____no_output_____
###Markdown
**Observations**- Most of the negative values are from the 25 year old `age` group with over 17.- This is a error in the data entry. You can't have negative years of `experience` so we will take the absolute value of the `experience`. Taking absolute values of the `experience` column
###Code
df['abs_experience'] = np.abs(df.experience)
df.sort_values(by='experience', ascending=True).head(10)
histogram_boxplot(df.abs_experience)
###Output
_____no_output_____
###Markdown
**Observations**- It didn't change the distribution that much.
###Code
plt.figure(figsize=(15, 10))
sns.countplot(y=df.abs_experience,
data=df,
order=df.abs_experience.value_counts().index[:]);
###Output
_____no_output_____
###Markdown
- There are no more negative `experience` values. Overview on distributions of numerical columns.
###Code
# lets plot histogram of all plots
features = ['age', 'experience', 'income',
'ccavg', 'mortgage', 'zipcode',
'abs_experience']
n_rows = math.ceil(len(features)/3)
plt.figure(figsize=(15, n_rows*3.5))
for i, feature in enumerate(list(features)):
plt.subplot(n_rows, 3, i+1)
plt.hist(df[feature])
plt.tight_layout()
plt.title(feature, fontsize=15);
###Output
_____no_output_____
###Markdown
Overview on the dispersion of numerical columns.
###Code
# outlier detection using boxplot
plt.figure(figsize=(15, n_rows*4))
for i, feature in enumerate(features):
plt.subplot(n_rows, 3, i+1)
plt.boxplot(df[feature], whis=1.5)
plt.tight_layout()
plt.title(feature, fontsize=15);
###Output
_____no_output_____
###Markdown
Display value counts from categorical columns
###Code
# looking at value counts for non-numeric features
num_to_display = 10 # defining this up here so it's easy to change later if I want
for colname in df.dtypes[df.dtypes=='category'].index:
val_counts = df[colname].value_counts(dropna=False) # i want to see NA counts
print(f"Column: {colname}")
print("="*40)
print(val_counts[:num_to_display])
if len(val_counts) > num_to_display:
print(f"Only displaying first {num_to_display} of {len(val_counts)} values.")
print("\n") # just for more space between
###Output
_____no_output_____
###Markdown
Observations on `zipcode`
###Code
plt.figure(figsize=(15, 10))
sns.countplot(y="zipcode", data=df, order=df.zipcode.value_counts().index[0:50]);
###Output
_____no_output_____
###Markdown
**Observations**- Most of the values come from the `zipcode` 94720 with over 160.
###Code
def perc_on_bar(plot, feature):
"""
Shows the percentage on the top of bar in plot.
feature: categorical feature
The function won't work if a column is passed in hue parameter
"""
total = len(feature) # length of the column
for p in ax.patches:
# percentage = '{:.1f}%'.format(100 * p.get_height()/total) # percentage of each class of the category
percentage = 100 * p.get_height()/total
percentage_label = f"{percentage:.1f}%"
x = p.get_x() + p.get_width() / 2 - 0.05 # width of the plot
y = p.get_y() + p.get_height() # hieght of the plot
ax.annotate(percentage_label, (x, y), size = 12) # annotate the percantage
plt.show() # show the plot
###Output
_____no_output_____
###Markdown
Observations on `family`
###Code
plt.figure(figsize=(15, 7))
ax = sns.countplot(df.family, palette='mako')
perc_on_bar(ax, df.family)
###Output
_____no_output_____
###Markdown
**Observations**- The largest category of the `family` column is 1 with a percentage of 29.4%.- The second largest category of the `family` column is a size of 2, then 4. A size of 3 is the smallest portion in our dataset. Observations on `education`
###Code
plt.figure(figsize=(15, 7))
ax = sns.countplot(df.education, palette='mako')
perc_on_bar(ax, df.education)
###Output
_____no_output_____
###Markdown
**Observations**- The `education` column has 3 categories.- Category 1 (undergrad) hold the greatest proportion with 41.9%.- Category 3 holds the second highest with 30%.- Category 2 holds the third highest proportion with 28.1%. Oberservations on `personal_loan`
###Code
plt.figure(figsize=(15, 7))
ax = sns.countplot(df.personal_loan, palette='mako')
perc_on_bar(ax, df.personal_loan)
###Output
_____no_output_____
###Markdown
**Observations**- Those that didn't accept a `personal_loan` from the last campaign make up the greatest percentage with 90.4%. Observations on `securities_account`
###Code
plt.figure(figsize=(15,7))
ax = sns.countplot(df.securities_account, palette='mako')
perc_on_bar(ax, df.securities_account)
###Output
_____no_output_____
###Markdown
**Observations**- Those customers without a `securities_account` make up the greatest proportion with 89.6%. Observations on `cd_account`
###Code
plt.figure(figsize=(15, 7))
ax = sns.countplot(df.cd_account, palette='mako')
perc_on_bar(ax, df.cd_account)
###Output
_____no_output_____
###Markdown
**Observations**- Those customers without a `cd_account` make up the greatest percentage with 94% Observations on `online`
###Code
plt.figure(figsize=(15, 7))
ax = sns.countplot(df.online, palette='mako')
perc_on_bar(ax, df.online)
###Output
_____no_output_____
###Markdown
**Observations**- Those customers that use `online` banking facilities makes up the majority with 59.7%. Observations on `credit_card`
###Code
plt.figure(figsize=(15, 7))
ax = sns.countplot(df.credit_card, palette='mako')
perc_on_bar(ax, df.credit_card)
###Output
_____no_output_____
###Markdown
**Observations**- Those customers that don't use `credit_cards` issued by other banks makes up the majority with 70.6%. Bivariate Analysis
###Code
## Function to plot stacked bar chart
def stacked_plot(x, y):
"""
Shows stacked plot from x and y pandas data series
x: pandas data series
y: pandas data series
"""
info = pd.crosstab(x, y, margins=True)
info['% - 0'] = round(info[0]/info['All']*100, 2)
info['% - 1'] = round(info[1]/info['All']*100, 2)
print(info)
print('='*80)
visual = pd.crosstab(x, y, normalize='index')
visual.plot(kind='bar', stacked=True, figsize=(10,5));
def show_boxplots(cols: list, feature: str, show_fliers=True, data=df): #method call to show bloxplots
n_rows = math.ceil(len(cols)/2)
plt.figure(figsize=(15, n_rows*5))
for i, variable in enumerate(cols):
plt.subplot(n_rows, 2, i+1)
if show_fliers:
sns.boxplot(data[feature], data[variable], palette="mako", showfliers=True)
else:
sns.boxplot(data[feature], data[variable], palette="mako", showfliers=False)
plt.tight_layout()
plt.title(variable, fontsize=12)
plt.show()
###Output
_____no_output_____
###Markdown
Correlation and heatmap
###Code
plt.figure(figsize=(12, 7))
sns.heatmap(df.corr(), annot=True, cmap="coolwarm");
###Output
_____no_output_____
###Markdown
**Observations**- `age` and `experience` are heavily positively correlated.- `ccavg` and `income` are positively correlated.
###Code
sns.pairplot(data=df[['age','income','zipcode','ccavg',
'mortgage','abs_experience','personal_loan']],
hue='personal_loan');
###Output
_____no_output_____
###Markdown
**Observations**- Plot show that income is higher among those customers with personal loans.- ccavg is higher among those customers with personal loans. we will investigate.
###Code
cols = ['age','income','ccavg','mortgage','abs_experience']
show_boxplots(cols, 'personal_loan')
###Output
_____no_output_____
###Markdown
Show without outliers in boxplots
###Code
show_boxplots(cols, 'personal_loan', show_fliers=False);
###Output
_____no_output_____
###Markdown
**Observations**- On average, those customers with higher incomes have personal loans.- On average, those customers with higher credit card usage have personal loans.- 75% of those customers with personal loans have a mortgage payments of 500K or less. `personal_loan` vs `family`
###Code
stacked_plot(df.family, df.personal_loan)
###Output
_____no_output_____
###Markdown
**Observations** - Those customers with a `family` of 4 have more `personal loans`. - A family of 3 have the second most personal loans followed by a family of 1 and 2. `personal_loan` vs `education`
###Code
stacked_plot(df.education, df.personal_loan)
###Output
_____no_output_____
###Markdown
**Observations**- Those customers with an education of '2' and '3' hold a greater percentage of personal loans that those customer with an education of '1'. `personal_loan` vs `secuities_account`
###Code
stacked_plot(df.securities_account, df.personal_loan)
###Output
_____no_output_____
###Markdown
**Observations**- There is not much difference in securities account versus personal loans `personal_loan` vs `cd_account`
###Code
stacked_plot(df.cd_account, df.personal_loan)
###Output
_____no_output_____
###Markdown
**Observations**- Those customers with cd accounts. have a greater percentage of personal loans than those customer without a cd account. `personal_loan` vs `online`
###Code
stacked_plot(df.online, df.personal_loan)
###Output
_____no_output_____
###Markdown
**Observations**- There isnt much difference between customers who use online facilities and those who don't versus personal loans. `personal_loan` vs `credit_card`
###Code
stacked_plot(df.credit_card, df.personal_loan)
###Output
_____no_output_____
###Markdown
**Observations**- There isn't much difference between those who have credit cards from other banks versus personal loans. `cd_account` vs `family`
###Code
stacked_plot(df.family, df.cd_account)
###Output
_____no_output_____
###Markdown
**Observations**- A family of 3 has the greatest percentage(8.12) of customers with cd accounts. `cd_account` vs `education`
###Code
stacked_plot(df.education, df.cd_account)
###Output
_____no_output_____
###Markdown
**Observations**- There isnt much of a difference between education categories. **Observations** `cd_account` vs `securities_account`
###Code
stacked_plot(df.securities_account, df.cd_account)
###Output
_____no_output_____
###Markdown
**Observations**- A greater percentage of those customers with security accounts also have cd accounts versus those customer that dont have security accounts. `cd_account` vs `online`
###Code
stacked_plot(df.online, df.cd_account)
###Output
_____no_output_____
###Markdown
**Observations**- Customers who use the online facilities have a greater percentage cd accounts than those customer who don't use online facilities. `cd_account` vs `credit_card`
###Code
stacked_plot(df.credit_card, df.cd_account)
###Output
_____no_output_____
###Markdown
**Observations**- A greater percentage of those customers who have credit cards with other bank institutions have personal cd accounts than those customers who dont have credit cards from other institutions. Let us check which of these differences are statistically significant.The Chi-Square test is a statistical method to determine if two categorical variables have a significant correlation between them. **$H_0$:** There is no association between the two variables. **$H_a$:** There is an association between two variables.
###Code
def check_significance(feature1: str, feature2: str, data=df):
"""
Checks the significance of feature1 agaisnt feature2
feature1: column name
feature2: column name
data: pandas dataframe object (defaults to df)
"""
crosstab = pd.crosstab(data[feature1], data[feature2]) # Contingency table of region and smoker attributes
chi, p_value, dof, expected = stats.chi2_contingency(crosstab)
Ho = f"{feature1} has no effect on {feature2}" # Stating the Null Hypothesis
Ha = f"{feature1} has an effect on {feature2}" # Stating the Alternate Hypothesis
if p_value < 0.05: # Setting our significance level at 5%
print(f'{Ha.upper()} as the p_value ({p_value.round(3)}) < 0.05')
else:
print(f'{Ho} as the p_value ({p_value.round(3)}) > 0.05')
def show_significance(features: list, data=df):
"""
Prints out the significance of all the list of features passed.
features: list of column names
data: pandas dataframe object (defaults to df)
"""
for feature in features:
print("="*30, feature, "="*(50-len(feature)))
for col in list(data.columns):
if col != feature: check_significance(col , feature)
show_significance(['personal_loan', 'cd_account'])
###Output
_____no_output_____
###Markdown
Key Observations - * `cd_account`, `family` and `education` seem to be strong indicators of customers received a personal loan.* `securities_account`, `online` and `credit_card` seem to be strong indicators of customers who have cd accounts.* Other factors appear to be not very good indicators of those customers that have cd accounts. Build Model, Train and Evaluate1. Data preparation2. Partition the data into train and test set.3. Build a CART model on the train data.4. Tune the model and prune the tree, if required.5. Test the data on test set.
###Code
try:
df.drop(['experience'], axis=1, inplace=True)
except KeyError:
print(f"Column experience must already be dropped.")
df.head()
df_dummies = pd.get_dummies(df, columns=['education', 'family'], drop_first=True)
df_dummies.head()
df_dummies.info()
###Output
_____no_output_____
###Markdown
Partition Data
###Code
X = df_dummies.drop(['personal_loan'], axis=1)
X.head(10)
y = df_dummies['personal_loan']
y.head(10)
# Splitting data into training and test set:
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1)
print("The shape of X_train: ", X_train.shape)
print("The shape of X_test: ", X_test.shape)
###Output
_____no_output_____
###Markdown
Build Initial Decision Tree Model* We will build our model using the DecisionTreeClassifier function. Using default 'gini' criteria to split. * If the frequency of class A is 10% and the frequency of class B is 90%, then class B will become the dominant class and the decision tree will become biased toward the dominant classes.* In this case, we can pass a dictionary {0:0.15,1:0.85} to the model to specify the weight of each class and the decision tree will give more weightage to class 1.* class_weight is a hyperparameter for the decision tree classifier.
###Code
model = DecisionTreeClassifier(criterion='gini',
class_weight={0:0.15, 1:0.85},
random_state=1)
model.fit(X_train, y_train)
## Function to create confusion matrix
def make_confusion_matrix(model, y_actual, labels=[1, 0], xtest=X_test):
"""
model : classifier to predict values of X
y_actual : ground truth
"""
y_predict = model.predict(xtest)
cm = metrics.confusion_matrix(y_actual, y_predict, labels=[0, 1])
df_cm = pd.DataFrame(cm, index=["Actual - No","Actual - Yes"],
columns=['Predicted - No','Predicted - Yes'])
#print(df_cm)
#print("="*80)
group_counts = [f"{value:0.0f}" for value in cm.flatten()]
group_percentages = [f"{value:.2%}" for value in cm.flatten()/np.sum(cm)]
labels = [f"{gc}\n{gp}" for gc, gp in zip(group_counts, group_percentages)]
labels = np.asarray(labels).reshape(2,2)
plt.figure(figsize = (10, 7))
sns.heatmap(df_cm, annot=labels, fmt='')
plt.ylabel('True label', fontsize=14)
plt.xlabel('Predicted label', fontsize=14);
make_confusion_matrix(model, y_test)
y_train.value_counts(normalize=True)
###Output
_____no_output_____
###Markdown
**Observations**- We only have ~10% of positive classes, so if our model marks each sample as negative, then also we'll get 90% accuracy, hence accuracy is not a good metric to evaluate here.
###Code
## Function to calculate recall score
def get_recall_score(model):
'''
Prints the recall score from model
model : classifier to predict values of X
'''
pred_train = model.predict(X_train)
pred_test = model.predict(X_test)
print("Recall on training set : ", metrics.recall_score(y_train, pred_train))
print("Recall on test set : ", metrics.recall_score(y_test, pred_test))
###Output
_____no_output_____
###Markdown
Recall score from baseline model.
###Code
# Recall on train and test
get_recall_score(model)
###Output
_____no_output_____
###Markdown
Visualizing the decision tree from baseline model
###Code
feature_names = list(X.columns)
print(feature_names)
plt.figure(figsize=(20, 30))
out = tree.plot_tree(model,
feature_names=feature_names,
filled=True,
fontsize=9,
node_ids=False,
class_names=None,)
#below code will add arrows to the decision tree split if they are missing
for o in out:
arrow = o.arrow_patch
if arrow is not None:
arrow.set_edgecolor('black')
arrow.set_linewidth(1)
plt.show()
# Text report showing the rules of a decision tree -
print(tree.export_text(model,feature_names=feature_names,show_weights=True))
###Output
_____no_output_____
###Markdown
Feature importance from baseline model
###Code
def importance_plot(model):
"""
Displays feature importance barplot
model: decision tree classifier
"""
importances = model.feature_importances_
indices = np.argsort(importances)
size = len(indices)//2 # to help scale the plot.
plt.figure(figsize=(10, size))
plt.title("Feature Importances", fontsize=14)
plt.barh(range(len(indices)), importances[indices], color='blue', align='center')
plt.yticks(range(len(indices)), [feature_names[i] for i in indices])
plt.xlabel("Relative Importance", fontsize=12);
importance_plot(model=model)
# importance of features in the tree building ( The importance of a feature is computed as the
#(normalized) total reduction of the criterion brought by that feature. It is also known as the Gini importance )
pd.DataFrame(model.feature_importances_,
columns=["Imp"],
index=X_train.columns).sort_values(by='Imp', ascending=False)
###Output
_____no_output_____
###Markdown
Using GridSearch for hyperparameter tuning of our tree model.
###Code
# Choose the type of classifier.
estimator = DecisionTreeClassifier(random_state=1, class_weight={0:.15,1:.85})
# Grid of parameters to choose from
parameters = {'max_depth': np.arange(1,10),
'criterion': ['entropy','gini'],
'splitter': ['best','random'],
'min_impurity_decrease': [0.000001,0.00001,0.0001],
'max_features': ['log2','sqrt']}
# Type of scoring used to compare parameter combinations
scorer = metrics.make_scorer(metrics.recall_score)
# Run the grid search
grid_obj = GridSearchCV(estimator, param_grid=parameters, scoring=scorer, cv=5)
grid_obj = grid_obj.fit(X_train, y_train)
# Set the clf to the best combination of parameters
estimator = grid_obj.best_estimator_
# Fit the best algorithm to the data.
estimator.fit(X_train, y_train)
###Output
_____no_output_____
###Markdown
Confusion matrix using GridSearchCV
###Code
make_confusion_matrix(estimator, y_test)
###Output
_____no_output_____
###Markdown
Recall score using GridSearchCV
###Code
get_recall_score(estimator)
###Output
_____no_output_____
###Markdown
Visualizing the decision tree from the best fit estimator using GridSearchCV
###Code
plt.figure(figsize=(15, 10))
out = tree.plot_tree(estimator,
feature_names=feature_names,
filled=True,
fontsize=10,
node_ids=True,
class_names=None)
for o in out:
arrow = o.arrow_patch
if arrow is not None:
arrow.set_edgecolor('black')
arrow.set_linewidth(1)
plt.show()
# Text report showing the rules of a decision tree -
print(tree.export_text(estimator,
feature_names=feature_names,
show_weights=True))
###Output
_____no_output_____
###Markdown
Feature importance using GridSearchCV
###Code
# importance of features in the tree building ( The importance of a feature is computed as the
#(normalized) total reduction of the 'criterion' brought by that feature. It is also known as the Gini importance )
pd.DataFrame(estimator.feature_importances_,
columns=["Imp"],
index=X_train.columns).sort_values(by='Imp', ascending=False)
#Here we will see that importance of features has increased
importance_plot(model=estimator)
###Output
_____no_output_____
###Markdown
Cost Complexity PruningThe `DecisionTreeClassifier` provides parameters such as``min_samples_leaf`` and ``max_depth`` to prevent a tree from overfiting. Costcomplexity pruning provides another option to control the size of a tree. In`DecisionTreeClassifier`, this pruning technique is parameterized by thecost complexity parameter, ``ccp_alpha``. Greater values of ``ccp_alpha``increase the number of nodes pruned. Here we only show the effect of``ccp_alpha`` on regularizing the trees and how to choose a ``ccp_alpha``based on validation scores. Total impurity of leaves vs effective alphas of pruned treeMinimal cost complexity pruning recursively finds the node with the "weakestlink". The weakest link is characterized by an effective alpha, where thenodes with the smallest effective alpha are pruned first. To get an idea ofwhat values of ``ccp_alpha`` could be appropriate, scikit-learn provides`DecisionTreeClassifier.cost_complexity_pruning_path` that returns theeffective alphas and the corresponding total leaf impurities at each step ofthe pruning process. As alpha increases, more of the tree is pruned, whichincreases the total impurity of its leaves.
###Code
clf = DecisionTreeClassifier(random_state=1, class_weight = {0:0.15, 1:0.85})
path = clf.cost_complexity_pruning_path(X_train, y_train)
ccp_alphas, impurities = path.ccp_alphas, path.impurities
pd.DataFrame(path)
fig, ax = plt.subplots(figsize=(15, 7))
ax.plot(ccp_alphas[:-1], impurities[:-1], marker='o', drawstyle="steps-post")
ax.set_xlabel("Effective alpha")
ax.set_ylabel("Total impurity of leaves")
ax.set_title("Total Impurity vs effective alpha for training set")
plt.show()
clfs = []
for ccp_alpha in ccp_alphas:
clf = DecisionTreeClassifier(random_state=1,
ccp_alpha=ccp_alpha,
class_weight = {0:0.15,1:0.85})
clf.fit(X_train, y_train)
clfs.append(clf)
print(f"Number of nodes in the last tree is: {clfs[-1].tree_.node_count} with ccp_alpha: {ccp_alphas[-1]}")
clfs = clfs[:-1]
ccp_alphas = ccp_alphas[:-1]
node_counts = [clf.tree_.node_count for clf in clfs]
depth = [clf.tree_.max_depth for clf in clfs]
fig, ax = plt.subplots(2, 1, figsize=(15, 10), sharex=True)
ax[0].plot(ccp_alphas, node_counts, marker='o', drawstyle="steps-post")
ax[0].set_ylabel("Number of nodes")
ax[0].set_title("Number of nodes vs alpha")
ax[1].plot(ccp_alphas, depth, marker='o', drawstyle="steps-post")
ax[1].set_xlabel("alpha")
ax[1].set_ylabel("depth of tree")
ax[1].set_title("Depth vs alpha")
fig.tight_layout()
recall_train = []
for clf in clfs:
pred_train3 = clf.predict(X_train)
values_train = metrics.recall_score(y_train, pred_train3)
recall_train.append(values_train)
recall_test = []
for clf in clfs:
pred_test3 = clf.predict(X_test)
values_test = metrics.recall_score(y_test, pred_test3)
recall_test.append(values_test)
train_scores = [clf.score(X_train, y_train) for clf in clfs]
test_scores = [clf.score(X_test, y_test) for clf in clfs]
fig, ax = plt.subplots(figsize=(15, 7))
ax.set_xlabel("alpha")
ax.set_ylabel("Recall")
ax.set_title("Recall vs alpha for training and testing sets")
ax.plot(ccp_alphas,
recall_train,
marker='o',
label="train",
drawstyle="steps-post",)
ax.plot(ccp_alphas,
recall_test,
marker='o',
label="test",
drawstyle="steps-post")
ax.legend()
plt.show()
# creating the model where we get highest train and test recall
index_best_model = np.argmax(recall_test)
best_model = clfs[index_best_model]
print(best_model)
best_model.fit(X_train, y_train)
make_confusion_matrix(best_model, y_test)
get_recall_score(best_model)
###Output
_____no_output_____
###Markdown
Visualizing the Decision Tree
###Code
plt.figure(figsize=(20, 8))
out = tree.plot_tree(best_model,
feature_names=feature_names,
filled=True,
fontsize=12,
node_ids=True,
class_names=None)
for o in out:
arrow = o.arrow_patch
if arrow is not None:
arrow.set_edgecolor('black')
arrow.set_linewidth(1)
plt.show()
# Text report showing the rules of a decision tree -
print(tree.export_text(best_model, feature_names=feature_names, show_weights=True))
importance_plot(model=best_model)
best_model2 = DecisionTreeClassifier(ccp_alpha=0.01,
class_weight={0: 0.15, 1: 0.85},
random_state=1)
best_model2.fit(X_train, y_train)
make_confusion_matrix(best_model2, y_test)
get_recall_score(best_model2)
plt.figure(figsize=(20, 8))
out = tree.plot_tree(best_model2,
feature_names=feature_names,
filled=True,
fontsize=12,
node_ids=True,
class_names=None)
for o in out:
arrow = o.arrow_patch
if arrow is not None:
arrow.set_edgecolor('black')
arrow.set_linewidth(1)
plt.show()
print(tree.export_text(best_model2, feature_names=feature_names, show_weights=True))
importance_plot(model=best_model2)
comparison_frame = pd.DataFrame({'Model':['Initial decision tree model','Decision treee with hyperparameter tuning',
'Decision tree with post-pruning'],
'Train_Recall':[1, 0.95, 0.99],
'Test_Recall':[0.91, 0.91, 0.98]})
comparison_frame
###Output
_____no_output_____ |
1_image_classification/Implementaion1.ipynb | ###Markdown
1. 学習済みのVGGモデルを使用する方法学習済みのVGGモデルを流用して少量のデータからでもディープラーニングモデルが構築できる**転移学習**や**fine tuning**について学習する.達成目標1. PyTorchでImageNetデータセットでの学習済みモデルをロードできるようになる2. VGGモデルについて理解する3. 入力画像のサイズや色を変換できるようになる 準備 ライブラリのインポート
###Code
import numpy as np
import json
from PIL import Image
import matplotlib.pyplot as plt
%matplotlib inline
import torch
import torchvision
from torchvision import models, transforms
import os
path = 'drive/MyDrive/pytorch_deeplearning/pytorch_advanced/1_image_classification/'
!ls drive/MyDrive/pytorch_deeplearning/pytorch_advanced/
print(torch.__version__)
print(torchvision.__version__)
###Output
1.10.0+cu111
0.11.1+cu111
###Markdown
学習済みのVGGモデルを読み込む学習済みのVGG-16モデルを用いてゴールデンレトリバーの画像を分類する
###Code
# 学習済みVGG-16モデルをロード
net = models.vgg16(pretrained=True)
net.eval() # 推論モードに設定
print(net) # ネットワークの構造を出力
###Output
Downloading: "https://download.pytorch.org/models/vgg16-397923af.pth" to /root/.cache/torch/hub/checkpoints/vgg16-397923af.pth
###Markdown
VGG-16モデルのネットワーク構成はfeaturesとclassifierという2つのモジュールに分かれている.そしてそれぞれのモジュールの中に畳み込層(Conv2d)と全結合層(Linear)が含まれている.featuresでは特徴量抽出、classifierでは分類(クラス予測)を行っている. 入力画像の前処理クラスを作成VGGモデルに画像を入力するための前処理部分を作成する.VGGモデルへ入力する画像は前処理を行う必要がある.前処理1. 画像サイズを$224\times224$にリサイズ 2. 色情報の規格化2ではRGBに対して平均(0.485, 0.456, 0.406)、標準偏差(0.229, 0.224, 0.225)の条件で標準化を行う.この規格化条件はILSVRC2012データセットの教師データから求まる値である.VGG-16モデルはこの規格化を行って学習を行っているので同様の規格化を行う必要がある.実装の注意点は,PyTorchとPillowで画像の要素の順番が異なっている点である.- PyTorchでは(色チャネル, 高さ, 幅)- PILでは(高さ, 幅, 色チャネル)で画像を処理している.そのためにnumpy.transposeにより軸の順番を入れ替えて対応する.
###Code
# 入力画像の前処理のクラス
class BaseTransform():
'''画像をリサイズし,色を標準化する esize the image and standardize the colors
Attributes
------------------------
resize: int
リサイズ先の画像の大きさ The size of the image to be resized
mean: (R,G,B) tupple
各色チャネルの平均値 Mean for each color channel
std: (R,G,B) tupple Standard deviation for each color channel
各色チャネルの標準偏差
'''
def __init__(self, resize, mean, std):
self.base_transform = transforms.Compose(transforms=[
transforms.Resize(resize), # 短い辺の長さがresizeの大きさになる
transforms.CenterCrop(resize), # 画像中央をresize×resizeで切り取り
transforms.ToTensor(), # Torchテンソルに変換
transforms.Normalize(mean, std) # 標準化
])
def __call__(self, img): # インスタンス名で実行すると__call__が実行されるメソッド名を記述する手間が省ける
return self.base_transform(img)
# 前処理の動作確認
# 1. 読み込み
image_file_path = path+'data/goldenretriever-3724972_640.jpg'
img = Image.open(image_file_path)
# 2. 元画像の表示
plt.imshow(img)
plt.show()
# 3. 画像の前処理と処理済み画像の表示
resize = 224
mean = (0.485, 0.456, 0.406)
std = (0.229, 0.224, 0.225)
transform = BaseTransform(resize, mean, std)
img_transformed = transform(img) # torch.size([3, 224, 224])
# (color, height, width) to (height, width, color) and Normalize to 0,1
img_transformed = img_transformed.numpy().transpose((1,2,0))
img_transformed = np.clip(img_transformed, 0, 1) # normalization
plt.imshow(img_transformed)
plt.show()
###Output
_____no_output_____
###Markdown
出力結果からラベルを予測する後処理クラスを作成VGG-16モデルの1000次元の出力をラベル名へと変換するクラスILSVRCPredictorを作成する.ILSVRCのラベル目については,事前準備したJSONファイル「imagenet_class_index.json」を使用VGGモデルから出力される値はtorch.size([1,1000])のテンソル型となっている.これをNumpy型に変換する.そしてnp.argmaxで確率が最大となるインデックスを取得.
###Code
os.listdir(path+'data/')
# ILSVRCラベル情報をロード
ILSVRC_class_index = json.load(open(path+'data/imagenet_class_index.json', 'r'))
ILSVRC_class_index
class ILSVRCPredictor():
'''ILSVRデータに対するモデルの出力からラベルを求める
Attributes
----------------
class_index: dictionary
クラスindexとラベル名を対応させた辞書
'''
def __init__(self, class_index):
self.class_index = class_index
def predict_max(self, out):
'''確率最大のILSVRCラベル名を取得する
Parameters
-----------------
out: tourch.Size([1, 1000])
Returns
----------------
predicted_label_name: str
最も予測確率が高いラベルの名前
'''
maxid = np.argmax(out.detach().numpy()) # detach()でネットワークから出力を切り離す
predicted_label_name = self.class_index[str(maxid)][1]
return predicted_label_name
###Output
_____no_output_____
###Markdown
学習済みVGGモデルで手元の画像を予測予測過程のフロー入力画像→BaseTransform(前処理)→VGGモデル(予測)→ILSVRCPredictor(予測確率が最も高いラベルを取得)
###Code
# ILSVRラベル情報をロードし辞書が多変数を生成
ILSVRC_class_index = json.load(open(path+'data/imagenet_class_index.json', 'r'))
# ILSVRPredictorのインスタンスを生成
predictor = ILSVRCPredictor(ILSVRC_class_index)
# 入力画像を読み込む
image_file_path = path + 'data/goldenretriever-3724972_640.jpg'
img = Image.open(image_file_path)
# 前処理+バッチサイズの次元を追加
transform = BaseTransform(resize, mean, std)
img_transformed = transform(img)
inputs = img_transformed.unsqueeze_(0)
# モデルに入力し、モデル出力をラベルに変換する
out = net(inputs)
result = predictor.predict_max(out)
# 予測結果を出力
print(f'出力画像の予測結果: {result}')
###Output
出力画像の予測結果: golden_retriever
###Markdown
このように学習済みモデルをそのまま使用し,ILSVRCの1000種類のクラスから未知の画像のクラスラベルを予測できる. 2. PyTorchによるディープラーニング実装の流れ実務では,予測したい画像のラベルはILSVRCで用意された1000クラスとは異なるのが当然である.そのため自分のデータを使用して,ディープラーニングモデルを学習しなおす必要がある.本節では,PyTorchを使用したディープランニング実装の流れを学習する.次節以降では,自分のデータでニューラルネットワークを学習し直す手法を学ぶ.本節の達成目標は1. PyTorchのDatasetとDatasetLoaderについて理解する2. PyTorchでディープラーニングを実装する流れを理解する
###Code
# ライブラリインポート
import numpy as np
import json
from PIL import Image
import matplotlib.pyplot as plt
%matplotlib inline
import torch
import torchvision
from torchvision import models, transforms
import os
par_path = 'drive/MyDrive/pytorch_deeplearning/pytorch_advanced/'
path = par_path+'1_image_classification/'
orig_path = os.path.abspath('/content/')
os.chdir(orig_path+'/'+path)
cur_path = os.path.abspath(os.curdir)
print(orig_path)
print(cur_path)
from flowchart import flowchart
from IPython.display import SVG
g = flowchart(fontsize=20, dpi=4, figsize=(3,1))
SVG(g)
import warnings
warnings.filterwarnings('ignore')
nodes = [
{'name': 0, 'shape':'box', 'description':'Check pre-processing, \npost-processing, and network model I / O'},
{'name': 1, 'shape':'terminal', 'description':'create Dataset'},
{'name': 2, 'shape':'terminal', 'description':'create DataLoader'},
{'name': 3, 'shape':'terminal', 'description':'create Network model'},
{'name': 4, 'shape':'terminal', 'description':'def foward'},
{'name': 5, 'shape':'terminal', 'description':'def loss'},
{'name': 6, 'shape':'terminal', 'description':'setting opt method'},
{'name': 7, 'shape':'terminal', 'description':'train and eval'},
{'name': 8, 'shape':'terminal', 'description':'inference using test data'}
]
edges = [
{'start':(0,'S'),'end':(1,'N')},
{'start':(1,'S'),'end':(2,'N')},
{'start':(2,'S'),'end':(3,'N')},
{'start':(3,'S'),'end':(4,'N')},
{'start':(4,'S'),'end':(5,'N')},
{'start':(5,'S'),'end':(6,'N')},
{'start':(6,'S'),'end':(7,'N')},
{'start':(7,'S'),'end':(8,'N')},
]
g = flowchart(nodes,edges, fontsize=10, dpi=4, figsize=(4,8))
SVG(g)
###Output
_____no_output_____
###Markdown
1. **前処理、後処理、ネットワークモデルの入出力を確認**2. **Datasetの作成** Datasetとは入力とラベルをペアとして保持したクラスである.Datasetを学習用データと検証用データそれぞれに対して作成する.3. **DataLoaderの作成** DataLoaderはDatasetからどのようにデータを出すのかを設定するクラス.ディープランニングではミニバッチ学習を行い、複数のデータを同時にDatasetから取り出してネットワークを学習する.この際にミニバッチを取り出しやすくするのがDataLoaderである.4. **ネットワークモデルの作成**- ゼロからすべて自分で作成するケース- 学習済みモデルをロードして使用するケース- 学習済みモデルをベースに自分で改変するケース5. **順伝播fowardの定義** モデルが単純な場合はモデルを構築している層を前から後ろへ伝播するだけであるが、ディープラーニングの応用手法はモデルが複雑な場合が多く,例えばネットワークが分岐したりする.複雑なネットワークの順伝播を計算するためにforwardをしっかり定義する.6. **損失関数の定義**- 誤差逆伝播を計算するために損失関数を定義- 単純なネットワークでは2乗誤差など- 複雑なネットワークでは損失関数も複雑になる7. **最適化手法の設定**- 結合パラメータを学習する際の最適化手法を選択- 誤差逆伝播によって結合パラメータの誤差に対する勾配が求まる- 最適化手法としてMomentum SGDなどがある8. **学習・検証の実施**- epochごとに訓練データの性能と検証データでの性能を確認- 検証データの性能が向上しなかったら学習をストップ(過学習防止のため)- 途中で学習ストップさせることをearly stoppingという9. **テストデータで推論** 3. 転移学習の実装手元にある少量のデータでオリジナルの画像分類用ディープラーニングモデルを構築する手法を学ぶ達成目標1. 画像データからDatasetを作成できるようにする2. DatasetからDataLoaderを作成できるようにする3. 学習済みモデルの出力増を任意の形に変更できるようにする4. 出力層の結合パラメータのみを学習させて転移学習が実装できるようになる 転移学習(Transfer Learning)**転移学習**とは学習済みモデルをベースに、最終の出力層を付け加えて学習する手法である.付け替えた出力層への結合パラメータを手元にある少量のデータで学習しなおす.入力層に使い部分の結合パラメータは学習済みモデルのまま使用する転移学習は学習済みモデルをベースとするので手元にあるデータが少量でも性能の良いディープラーニングを実現しやすいというメリットがある.入力層に近い層の結合パラメータも学習済みの値から更新させる場合は、**ファインチューニング(FineTuning)**という.
###Code
# forループの経過時間と残りの時間を計測するtqdmをインストール
!pip install tqdm
# ライブラリのインポート
import glob
import os.path as osp
import random
import numpy as np
import json
from PIL import Image
from tqdm import tqdm
import matplotlib.pyplot as plt
%matplotlib inline
import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.data as data
import torchvision
from torchvision import models, transforms
# 乱数シードを設定
torch.manual_seed(1234)
np.random.seed(1234)
random.seed(1234)
MEAN = (0.485, 0.456, 0.406)
STD = (0.229, 0.224, 0.225)
###Output
_____no_output_____
###Markdown
Datasetを作成1. 画像の前処理クラスImageTransformを作成2. 画像へのファイルパスをリスト型変数に格納する関数make_datapath_listを作成3. DatasetクラスHymenopteraDatasetを作成本節のように単純な画像分類タスクいおいてDatasetを作成する場合,torchvision.datasets.ImageFolderクラスを使用してDatasetを作成する方法が簡単です.今回はより一般的なケースでも通用するように自前でDatasetクラスを作成する方法を学ぶ.1. 前処理クラスImageTransformの作成- 訓練時と推論時で異なる前処理を行うようにする- 訓練時にはDataAugmentationを実施する - DataAugmentationとはechoごとに異なる画像変換を適用し、データを水増しする手法- 訓練時の前処理にはRandomResizedCropとRandomHorizontalFlipを使用
###Code
# 入力画像の前処理を行うクラス
# 訓練時と推論時で処理が異なる
class ImageTransform():
'''
画像の前処理クラス 訓練時、検証時で異なる動作をする
画像のサイズをリサイズし、色を標準化する
訓練時はRandomResizedCropとRandomHorizontakFlipでデータ拡張を行う
Attributes
--------------------
resize: int
リサイズ先の画像の大きさ
mean: (R, G, B)
各色チャネルの平均値
std: (R, G, B)
各色チャネルの標準偏差
'''
def __init__(self, resize, mean, std):
self.data_transform = {
'train': transforms.Compose([ # for training
transforms.RandomResizedCrop(
resize, scale=(0.5, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean, std)
]),
'val': transforms.Compose([ # for validation
transforms.Resize(resize), # リサイズ
transforms.CenterCrop(resize), # 画像中央をresize×resizeで切り取る
transforms.ToTensor(),
transforms.Normalize(mean, std)
])
}
def __call__(self, img, phase='train'):
'''
Parameters
---------------
phase: 'train' or 'val'
前処理のモードを指定
'''
return self.data_transform[phase](img)
os.listdir(path+'data/')
# ImageTransformの動作確認
# 実行するたびに処理結果の画像が変化する
# read path of file
image_file_path = path+'data/goldenretriever-3724972_640.jpg'
img = Image.open(image_file_path)
# show original image
plt.imshow(img)
plt.show()
# show resized image
size = 224
mean = (0.485, 0.456, 0.406)
std = (0.229, 0.224, 0.225)
# rsize
transform = ImageTransform(size, mean, std)
img_transformed = transform(img, 'train')
img_transformed = img_transformed.numpy().transpose((1,2,0)) # height, width, channel
img_transformed = np.clip(img_transformed, 0, 1) # normalize
plt.imshow(img_transformed)
plt.show()
print([p for p in glob.glob(path+'data/hymenoptera_data/**', recursive=True) if os.path.isfile(p)])
# アリとハチの画像へのファイルパスへのリストを作成
import os
def make_detapath_list(phase='train'):
'''
データのパスを格納したリストを作成
Parameters
--------------------------
phase: 'train' or 'val'
訓練モードか検証モードか
Returns
--------------------------
path_list: list
データへのパスを格納したリスト
'''
rootpath ='/content/drive/MyDrive/pytorch_deeplearning/pytorch_advanced/1_image_classification/data/hymenoptera_data/'
target_path = os.path.join(rootpath + phase + '/**/*.jpg')
print(target_path)
path_list = []
# サブディレクトリまでファイルを取得
for path in glob.glob(target_path):
path_list.append(path)
return path_list
train_list = make_detapath_list('train')
val_list = make_detapath_list('val')
train_list
len(train_list[0])
print(train_list[0][30:34])
['ants' if 'ants' in path else 'bees' for path in train_list]
###Output
_____no_output_____
###Markdown
Datasetの作成label=1: 画像がアリの場合 label=0: 画像がハチの場合以下の関数を実装する- \__getitem__(): Datasetから1つのデータを取り出す- \__len__(): Datasetのファイル数を返す
###Code
# アリとハチのDatasetを作成
class HymenopteraDataseet(data.Dataset):
'''
アリとハチの画像のDatasetクラス PyTorchのDatasetクラスを継承
Attributes
--------------------------
file_list: list
画像のパスを格納したリスト
transform: object
前処理クラスのインスタンス
phase: 'train' or 'val'
training or validation
'''
def __init__(self, file_list, transform=None, phase='train'):
self.file_list = file_list
self.transform = transform
self.phase = phase
def __len__(self):
'''Datasetのファイル数を作成'''
return len(self.file_list)
def __getitem__(self, index):
'''
前処理した画像のTensor形式のデータとラベルを取得
'''
# index番目の画像をロード
img_path = self.file_list[index]
img = Image.open(img_path) # height, width, channel
# 画像の前処理を実施
img_transformed = self.transform(img, self.phase) # channel, height, width
# 画像のラベルをファイル名から抜き出す
label = 'ants' if 'ants' in img_path else 'bees'
# ラベルを数値に変換
if label == 'ants':
label=0
elif label == 'bees':
label=1
return img_transformed, label
size = 224
mean = MEAN
std = STD
train_dataset = HymenopteraDataseet(file_list=train_list, transform=ImageTransform(size, mean, std), phase='train')
val_dataset = HymenopteraDataseet(file_list=val_list, transform=ImageTransform(size, mean, std), phase='val')
index = 0
print(train_dataset.__getitem__(index)[0].size()) # データ
print(train_dataset.__getitem__(index)[1]) # ラベル
###Output
torch.Size([3, 224, 224])
0
###Markdown
DataLoaderの作成- torch.utils.data.DataLoaderクラスをそのまま使用- 訓練用と検証用の2つのDataLoaderを作成する- 訓練用のDataLoaderはshuffle=Trueとして画像がランダムになるようにする
###Code
# ミニバッチのサイズを指定
batch_size = 32
# DataLoaderを作成
train_dataloader = torch.utils.data.DataLoader(
train_dataset, batch_size=batch_size, shuffle=True
)
val_dataloader = torch.utils.data.DataLoader(
val_dataset, batch_size=batch_size, shuffle=False
)
# 辞書型変数にまとめる
dataloaders_dict = {'train': train_dataloader, 'val': val_dataloader}
# 動作確認
batch_iterator = iter(dataloaders_dict['train']) # イテレータに変換
inputs, labels = next(batch_iterator)
print(inputs.size())
print(labels)
print(len(labels))
###Output
torch.Size([32, 3, 224, 224])
tensor([0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0,
1, 1, 1, 0, 0, 1, 1, 1])
32
###Markdown
ネットワークモデルの作成VGG-16モデルをロードしてモデルを作成する今回は出力ユニット数が1000種類ではなく2種類なので,VGGモデルのclassifierモジュールの最後に全結合層を付け替える.
###Code
models.vgg16(True).classifier
# 学習済みのVGG-16モデルをロード
# VGGモデルのインスタンスを作成
use_pretrained = True
net = models.vgg16(use_pretrained)
# VGG16モデルの最後の出力層の出力ユニットをアリとハチの2つに付け替える
net.classifier[6] = nn.Linear(in_features=4096, out_features=2)
# 訓練モードに設定
net.train()
print('Network settign has completed !')
###Output
Network settign has completed !
###Markdown
損失関数を定義今回の画像分類タスクは通常のクラス分類でのため,クロスエントロピー誤差を使用する.クロスエントロピー誤差関数は全結合層からの出力に対して,ソフトマックス関数を適用した後,クラス分類の損失関数であるthe negative log likelihood lossを計算する.
###Code
criterion = nn.CrossEntropyLoss()
###Output
_____no_output_____
###Markdown
最適化手法を設定- 転移学習において学習または変化させるパラメータを設定- ネットワークモデルのパラメータに対してrequired_grad=Trueとすると誤差逆伝播計算によって勾配が計算され,パラメータが更新される- required_grad=Falseとするとパラメータは更新されない.検証時や推論時に設定する
###Code
list(net.named_parameters())
net.classifier.named_parameters()
# 学習で更新するパラメータを格納
params_to_update = []
# 学習させるパラメータ名
update_params_names = ['classifier.6.weight', 'classifier.6.bias']
for name, param in net.named_parameters():
if name in update_params_names:
param.requires_grad = True
params_to_update.append(param)
print(name)
else:
param.requires_grad = False
print('----------------------')
print(params_to_update)
# 最適化手法の設定
optimizer = optim.SGD(params=params_to_update, lr=0.001, momentum=0.9)
###Output
_____no_output_____
###Markdown
学習・検証- train_model モデルを訓練させる関数 - 学習と検証をepochごとに繰り返す - 学習時はnetをtrainモードに検証時はvalモード lossにはミニバッチの損失の平均値が記録される 損失の合計を計算するにはミニバッチサイズで掛ける必要がある
###Code
# モデルを訓練させる関数
def train_model(net, dataloaders_dict, criterion, optimizer, num_epochs):
# epochのループ
for epoch in range(num_epochs):
print(f'{epoch+1}/{num_epochs}')
print('-----------------------')
for phase in ['train', 'val']:
if phase == 'train':
net.train()
else:
net.eval()
epoch_loss = 0.0 # epochの損失和
epoch_corrects = 0 # epochの正解数
# 未学習時の検証性能をたしかめるためepoch=0の訓練は省略
if (epoch==0) and (phase=='train'):
continue
# Dataloaderからミニバッチを取りだす
for inputs, labels in tqdm(dataloaders_dict[phase]):
# optimizerを初期化
optimizer.zero_grad()
# 順伝播
with torch.set_grad_enabled(phase=='train'):
outputs = net(inputs)
loss = criterion(outputs, labels)
_, preds = torch.max(outputs, 1) # ラベル torch.maxは最大値と最大インデックスのタプルを返す
# 訓練時はbachpropagation
if phase == 'train':
loss.backward()
optimizer.step()
# イテレーション結果の確認
# lossの合計を更新
epoch_loss += loss.item() * inputs.size(0) # ミニバッチ分足す loss.item()はミニバッチの平均値になっている
epoch_corrects += torch.sum(preds==labels.data) # ミニバッチごとに足す
# epochごとにlossと正解率を表示
epoch_loss = epoch_loss / len(dataloaders_dict[phase].dataset)
epoch_acc = epoch_corrects.double() / len(dataloaders_dict[phase].dataset)
print(f'{phase} Loss: {epoch_loss:.4f} Acc: {epoch_acc:.4f}')
# 学習・検証を実施
num_epochs = 2
train_model(net, dataloaders_dict, criterion=criterion, optimizer=optimizer, num_epochs=num_epochs)
len(dataloaders_dict['train'].dataset)
n = torch.rand(4,5)
n
_, index = torch.max(n, 1)
index
m = torch.tensor([0,1,4,2])
index == m
###Output
_____no_output_____
###Markdown
4. ファインチューニングの実装ファインチューニングを使用し,手元にある少量のデータを学習させて,オリジナルのが画像分類モデルを構築する方法を学習する.達成目標- PyTorchでGPUを使用する実装ができるようになる- 最適化手法の設定において,層ごとに異なる学習率を設定したファインチューニングを実装できるようになる- 学習したネットワークを保存・ロードできるようにする ファインチューニング**`ファインチューニング`**学習済みのモデルをベースに出力層などを変更したモデルを構築し,自前のデータでニューラルネットワークモデルの結合パラメータを学習させる方法初期値には学習済みモデルのパラメータを使用する**`ファインチューニング`**は転移学習とはことなり、出力層及び出力層に近い部分だけでなく、**すべての層**のパラメータを再学習する。ただし、入力層に近い部分のパラメータは学習率を小さくしたり、パラメータを再学習しないように設定し、出力層に近い部分んはパラメータを大きく設定することが一般的である。転移学習と同じく自前のデータが少量でも性能の良いモデルを実現できるという利点がある。 DatasetとDataLoaderの作成転移学習と同様
###Code
import os
os.chdir('/content/drive/MyDrive/pytorch_deeplearning/pytorch_advanced/1_image_classification')
!pwd
from utils.dataloader_image_classification import ImageTransform, make_datapath_list, HymenopteraDataset
# アリとハチの画像へのファイルパスのリストを作成
train_list = make_datapath_list(phase='train')
val_list = make_datapath_list(phase='val')
# Datasetを作成
size = 224
mean = MEAN
std = STD
train_dataset = HymenopteraDataset(train_list, transform=ImageTransform(size, mean, std), phase='train')
val_dataset = HymenopteraDataset(val_list, transform=ImageTransform(size, mean, std), phase='val')
# DataLoaderを作成
batch_size=32
train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
val_dataloader = torch.utils.data.DataLoader(val_dataset, batch_size=batch_size, shuffle=False)
dataloaders_dict = {'train':train_dataloader, 'val':val_dataloader}
# ネットワークモデルを作成
use_pretrained = True
net = models.vgg16(use_pretrained)
net.classifier[6] = nn.Linear(4096, 2)
net.train()
print('Network setting has completed !')
# 損失関数の設定
criterion = nn.CrossEntropyLoss()
###Output
_____no_output_____
###Markdown
最適化手法の設定ファインチューニングでは最適化手法の設定が転移学習とは異なる。すべての層のパラメータを学習できるように設定する。- update_param_names_1: featuresモジュールのパラメータ- updata_param_names_2: classifierモジュールのうち最初の2つの全結合層のパラメータ- updata_param_names_3: classifierモジュールのうち最後の全結合層のパラメータそれぞれ学習率を変えて学習できるようにする。
###Code
net
[list(net.named_parameters())[i][0] for i in range(len(list(net.named_parameters())))]
# ファインチューニングで学習するパラメータを設定
params_to_update_1 = []
params_to_update_2 = []
params_to_update_3 = []
# 学習させる層のパラメータを設定
update_param_name_1 = ['features'] # featuresすべて
update_param_name_2 = ['classifier.0.weights', 'classifier.0.bias', 'classifier.3.weight', 'classifier.3.bias'] # classifierの全結合層
update_param_name_3 = ['classifier.6.weight', 'classifier.6.bias'] # classifierの出力層
for name, param in net.named_parameters():
if update_param_name_1[0] in name:
param.requires_grad = True
params_to_update_1.append(param)
elif name in update_param_name_2:
param.requires_grad = True
params_to_update_2.append(param)
elif name in update_param_name_3:
param.requires_grad = True
params_to_update_3.append(param)
else:
param.requires_grad = False
# 学習率を設定
optimizer = optim.SGD([
{'params': params_to_update_1, 'lr': 1e-4},
{'params': params_to_update_2, 'lr': 5e-4},
{'params': params_to_update_3, 'lr': 1e-3}
], momentum=0.9)
###Output
_____no_output_____
###Markdown
学習・検証を実施GPUを使用するようにtrain_modelを定義する。`device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')`でGPUが使用できるようになる。ネットワークモデルや変数に対して.to(device)とすることでGPUの計算に切り替えられる。PyTorchではイテレーションごとのニューラルネットワークの順伝播および誤差関数の計算手法がある程度共通であれば、`torch.backends.cudnn.benchmark = True`とすることでGPUでの計算が高速化される。
###Code
# モデルを学習させる関数を作成
def train_model(net, dataloaders_dict, criterion, optimizer, num_epochs):
# 初期設定
# GPUの設定
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print('using device: ', device)
# ネットワークをGPUへ
net.to(device)
# GPUでの計算を高速化させる(epochごとの処理がある程度共通の場合)
torch.backends.cudnn.benchmark = True
for epoch in range(num_epochs):
print(f'Epoch: {epoch+1}/{num_epochs}')
print('-----------------------------')
for phase in ['train', 'val']:
if phase == 'train':
net.train() # 訓練モード
else:
net.eval() # 検証モード
epoch_loss = 0.0
epoch_corrects = 0
# 未学習時は損失を計算しない
if (epoch==0) and (phase=='train'):
continue
# DataLoaderからミニバッチを取り出す
for inputs, labels in tqdm(dataloaders_dict[phase]):
# GPU設定
inputs = inputs.to(device)
labels = labels.to(device)
# optimizerを初期化
optimizer.zero_grad()
# 順伝播
# 訓練モードの場合は勾配を保持
with torch.set_grad_enabled(phase=='train'):
outputs = net(inputs)
loss = criterion(outputs, labels)
_, preds = torch.max(outputs, 1)
# 訓練時はバックプロパゲーション
if phase =='train':
loss.backward() # 勾配計算
optimizer.step() # パラメータ更新
epoch_loss += loss.item() * inputs.size(0) # ミニバッチ全体の損失を計算
epoch_corrects += torch.sum(preds == labels)
# epochごとの損失と正解率を表示
epoch_loss /= len(dataloaders_dict[phase].dataset)
epoch_acc = epoch_corrects.double() / len(dataloaders_dict[phase].dataset)
print(f'{phase} Loss: {epoch_loss:.4f} Acc: {epoch_acc:.4f}')
# dataloader labelの中身 .dataをつけるとどうなるか
data, labels = next(iter(dataloaders_dict['train']))
print(labels)
print(labels.data)
# dataloaderとdatasetの違い
# datasetをdataloaderはミニバッチに分けてシャッフルして取り出したもの
print(dataloaders_dict['train'].dataset)
print(dataloaders_dict['train'])
print(len(dataloaders_dict['train'].dataset))
print(len(dataloaders_dict['train']))
# 学習・検証を実施
num_epochs = 5
train_model(net, dataloaders_dict, criterion, optimizer, num_epochs=num_epochs)
###Output
using device: cuda:0
Epoch: 1/5
-----------------------------
###Markdown
学習したネットワークを保存・ロード- 保存する場合 ネットワークモデルnetに対して.state_dict()でパラメータを辞書変数で取り出し、torch.save()で保存する- ロードする場合 torch.load()で辞書型オブジェクトをロードし、ネットワークに対して、load_state_dict()で格納するGPU上で保存されたファイルをCPU上でロードする場合はmap_locationを使用する。
###Code
# パラメータの保存
save_path = './weights_fine_tuning.pth'
torch.save(net.state_dict(), save_path)
os.listdir(os.curdir)
# 保存したパラメータをロード
load_path = './weights_fine_tuning.pth'
load_weights = torch.load(load_path)
net.load_state_dict(load_weights)
print(next(iter(net.parameters())))
# GPU上で保存された重みをcpu上でロードする場合
'''load_weights = torch.load(load_path, map_loccation={'cuda:0': 'cpu'})
net.load_state_dict(load_weights)
print(next(iter(net.parameters())))'''
###Output
_____no_output_____ |
analysis/01__mpra/09__cis_trans_effects/09__cis_trans_effects.ipynb | ###Markdown
09__cis_trans_effectsin this notebook, i investigate the co-occurrence of cis and trans effects
###Code
import warnings
warnings.filterwarnings('ignore')
import itertools
import pandas as pd
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
import sys
from itertools import combinations
from scipy.integrate import cumtrapz
from scipy.stats import linregress
from scipy.stats import spearmanr
from scipy.stats import pearsonr
#from sklearn.preprocessing import StandardScaler
#from sklearn.neighbors import NearestNeighbors
# import utils
sys.path.append("../../../utils")
from plotting_utils import *
%matplotlib inline
%config InlineBackend.figure_format = 'svg'
mpl.rcParams['figure.autolayout'] = False
sns.set(**PAPER_PRESET)
fontsize = PAPER_FONTSIZE
np.random.seed(2019)
QUANT_ALPHA = 0.05
###Output
_____no_output_____
###Markdown
functions
###Code
def cis_trans_status(row):
if row.cis_status_one == "significant cis effect":
if row.trans_status_one == "significant trans effect":
if "higher in human" in row.cis_status_det_one:
if "higher in human" in row.trans_status_det_one:
return "cis/trans directional"
else:
return "cis/trans compensatory"
else:
if "higher in human" in row.trans_status_det_one:
return "cis/trans compensatory"
else:
return "cis/trans directional"
else:
return "cis effect only"
else:
if row.trans_status_one == "significant trans effect":
return "trans effect only"
else:
return "no cis or trans effects"
###Output
_____no_output_____
###Markdown
variables
###Code
data_f = "../../../data/02__mpra/03__results/all_processed_results.txt"
###Output
_____no_output_____
###Markdown
1. import data
###Code
data = pd.read_table(data_f, sep="\t")
data.head()
data["cis_trans_status"] = data.apply(cis_trans_status, axis=1)
###Output
_____no_output_____
###Markdown
2. filter data
###Code
data = data[~pd.isnull(data["minimal_biotype_hg19"])]
len(data)
data_filt = data[((data["HUES64_padj_hg19"] < QUANT_ALPHA) | (data["mESC_padj_mm9"] < QUANT_ALPHA))]
len(data_filt)
data_filt_sp = data_filt.drop("orig_species", axis=1)
data_filt_sp.drop_duplicates(inplace=True)
len(data_filt_sp)
data_filt_sp.cis_trans_status.value_counts()
###Output
_____no_output_____
###Markdown
3. count cis trans effects
###Code
# fisher's exact to see if cis/trans effects are enriched
cis_trans = len(data_filt_sp[data_filt_sp["cis_trans_status"].isin(["cis/trans directional", "cis/trans compensatory"])])
cis_no_trans = len(data_filt_sp[data_filt_sp["cis_trans_status"] == "cis effect only"])
trans_no_cis = len(data_filt_sp[data_filt_sp["cis_trans_status"] == "trans effect only"])
n_no_cis_trans = len(data_filt_sp[data_filt_sp["cis_trans_status"] == "no cis or trans effects"])
# fisher's exact test
arr = np.zeros((2, 2))
arr[0, 0] = cis_trans
arr[0, 1] = cis_no_trans
arr[1, 0] = trans_no_cis
arr[1, 1] = n_no_cis_trans
print(arr)
odds, p = stats.fisher_exact(arr)
print(odds)
print(p)
stats.binom_test(95, 159)
cis_trans = data_filt_sp[data_filt_sp["cis_trans_status"].isin(["cis/trans directional", "cis/trans compensatory"])]
tmp = cis_trans[((cis_trans["minimal_biotype_hg19"] == "mRNA") & (cis_trans["minimal_biotype_mm9"] == "mRNA"))]
tmp.cis_trans_status.value_counts()
tmp = cis_trans[((cis_trans["minimal_biotype_hg19"] == "lncRNA") & (cis_trans["minimal_biotype_mm9"] == "lncRNA"))]
tmp.cis_trans_status.value_counts()
###Output
_____no_output_____
###Markdown
4. look at directionality of cis/trans
###Code
min_switch_order = ["CAGE turnover - eRNA", "CAGE turnover - lncRNA", "CAGE turnover - mRNA",
"eRNA", "lncRNA", "mRNA"]
min_switch_pal = {"CAGE turnover - eRNA": sns.color_palette("Set2")[2],
"CAGE turnover - lncRNA": sns.color_palette("Set2")[2],
"CAGE turnover - mRNA": sns.color_palette("Set2")[2],
"eRNA": sns.color_palette("Set2")[7],
"lncRNA": sns.color_palette("Set2")[7],
"mRNA": sns.color_palette("Set2")[7]}
def cage_status(row):
if "CAGE turnover" in row.biotype_switch_minimal:
return "turnover"
else:
return "conserved"
def one_biotype(row):
if row.minimal_biotype_hg19 == "no CAGE activity":
return row.minimal_biotype_mm9
elif row.biotype_switch_minimal == "biotype switch":
return "biotype switch"
else:
return row.minimal_biotype_hg19
pal = {"conserved": sns.color_palette("Set2")[7], "turnover": sns.color_palette("Set2")[2]}
df = data_filt_sp
res = {}
cis_trans = df[(df["cis_status_one"] == "significant cis effect") &
(df["trans_status_one"] == "significant trans effect")]
tots = len(cis_trans)
print(tots)
res["total"] = [tots]
direc = cis_trans[((cis_trans["cis_status_det_one"].str.contains("higher in human") &
cis_trans["trans_status_det_one"].str.contains("higher in human")) |
(cis_trans["cis_status_det_one"].str.contains("higher in mouse") &
cis_trans["trans_status_det_one"].str.contains("higher in mouse")))]
direc = len(direc)
res["directional"] = [direc]
comp = cis_trans[((cis_trans["cis_status_det_one"].str.contains("higher in human") &
cis_trans["trans_status_det_one"].str.contains("higher in mouse")) |
(cis_trans["cis_status_det_one"].str.contains("higher in mouse") &
cis_trans["trans_status_det_one"].str.contains("higher in human")))]
comp = len(comp)
res["compensatory"] = [comp]
res = pd.DataFrame.from_dict(res, orient="index").reset_index()
res["perc"] = (res[0]/tots)*100
res["tmp"] = "tmp"
print(res)
fig, ax = plt.subplots(figsize=(0.5, 1.5), nrows=1, ncols=1)
sns.barplot(data=res[res["index"] == "total"],
x="tmp", y="perc", color=sns.color_palette("Set2")[7], ax=ax)
sns.barplot(data=res[res["index"] == "directional"],
x="tmp", y="perc", color=sns.color_palette("Set2")[2], ax=ax)
ax.set_xlabel("")
ax.set_ylabel("% of sequence pairs")
ax.set_xticklabels(["all pairs"], rotation=50, ha="right", va="top")
ax.annotate(str(tots), xy=(0, 5), xycoords="data", xytext=(0, 0),
textcoords="offset pixels", ha='center', va='bottom',
color="white", size=fontsize)
# fig.savefig("direc_v_comp.pdf", dpi="figure", bbox_inches="tight")
cis_trans = df[(df["cis_status_one"] == "significant cis effect") &
(df["trans_status_one"] == "significant trans effect")]
tots = cis_trans.groupby("biotype_switch_minimal")["hg19_id"].agg("count").reset_index()
direc = cis_trans[((cis_trans["cis_status_det_one"].str.contains("higher in human") &
cis_trans["trans_status_det_one"].str.contains("higher in human")) |
(cis_trans["cis_status_det_one"].str.contains("higher in mouse") &
cis_trans["trans_status_det_one"].str.contains("higher in mouse")))]
sig = direc.groupby("biotype_switch_minimal")["hg19_id"].agg("count").reset_index()
clean_sig = tots.merge(sig, on="biotype_switch_minimal", how="left").fillna(0)
clean_sig["percent_sig"] = (clean_sig["hg19_id_y"]/clean_sig["hg19_id_x"])*100
clean_sig["percent_tot"] = (clean_sig["hg19_id_x"]/clean_sig["hg19_id_x"])*100
fig = plt.figure(figsize=(2.5, 1.5))
ax = sns.barplot(data=clean_sig, x="biotype_switch_minimal", y="percent_tot",
order=min_switch_order, color=sns.color_palette("Set2")[7])
sns.barplot(data=clean_sig, x="biotype_switch_minimal", y="percent_sig",
order=min_switch_order, color=sns.color_palette("Set2")[2])
ax.set_xticklabels(["eRNA", "lncRNA", "mRNA", "eRNA", "lncRNA", "mRNA"], rotation=50, ha='right', va='top')
ax.set_xlabel("")
ax.set_ylabel("percentage")
ax.axvline(x=2.5, linestyle="dashed", color="black")
for i, l in enumerate(min_switch_order):
sub = clean_sig[clean_sig["biotype_switch_minimal"] == l]
print("%s perc sig: %s | (# sig: %s)" % (l, sub["percent_sig"].iloc[0], sub["hg19_id_y"].iloc[0]))
n = sub["hg19_id_x"].iloc[0]
ax.annotate(str(n), xy=(i, 5), xycoords="data", xytext=(0, 0),
textcoords="offset pixels", ha='center', va='bottom',
color="white", size=fontsize)
plt.show()
fig.savefig("Fig6C.pdf", dpi="figure", bbox_inches="tight")
plt.close()
sub
cis_trans_order = ["cis/trans compensatory", "cis/trans directional"]
cis_trans_pal = {"cis/trans compensatory": sns.color_palette("Set2")[7],
"cis/trans directional": sns.color_palette("Set2")[2]}
df["abs_logFC_native"] = np.abs(df["logFC_native"])
fig = plt.figure(figsize=(1, 1.75))
ax = sns.boxplot(data=df, x="cis_trans_status", y="abs_logFC_native",
flierprops = dict(marker='o', markersize=5),
order=cis_trans_order, palette=cis_trans_pal)
mimic_r_boxplot(ax)
ax.set_xticklabels(["compensatory", "directional"], rotation=50, ha='right', va='top')
ax.set_xlabel("")
ax.set_ylabel(r'$\vert$ native effect size $\vert$')
for i, l in enumerate(cis_trans_order):
sub = df[df["cis_trans_status"] == l]
n = len(sub)
color = cis_trans_pal[l]
ax.annotate(str(n), xy=(i, -0.7), xycoords="data", xytext=(0, 0),
textcoords="offset pixels", ha='center', va='bottom',
color=color, size=fontsize)
sub1 = df[df["cis_trans_status"] == "cis/trans compensatory"]
sub2 = df[df["cis_trans_status"] == "cis/trans directional"]
vals1 = np.asarray(sub1["abs_logFC_native"])
vals2 = np.asarray(sub2["abs_logFC_native"])
vals1 = vals1[~np.isnan(vals1)]
vals2 = vals2[~np.isnan(vals2)]
u, pval = stats.mannwhitneyu(vals1, vals2, alternative="less", use_continuity=False)
annotate_pval(ax, 0, 1, 5, 0, 5, pval, fontsize-1)
ax.set_ylim((-0.8, 6))
fig.savefig("Fig6F.pdf", dpi="figure", bbox_inches="tight")
fig, ax = plt.subplots(figsize=(1.75, 1.75), nrows=1, ncols=1)
ax.scatter(df["logFC_cis_one"], df["logFC_trans_one"], s=12, alpha=1,
color="black", linewidths=0.5, edgecolors="white")
plt.xlabel("cis effect size")
plt.ylabel("trans effect size")
ax.axhline(y=0, color="black", linestyle="dashed")
ax.axvline(x=0, color="black", linestyle="dashed")
ax.set_xlim((-6, 6))
ax.set_ylim((-3, 3))
# annotate corr
no_nan = df[(~pd.isnull(df["logFC_cis_one"])) & (~pd.isnull(df["logFC_trans_one"]))]
r, p = spearmanr(no_nan["logFC_cis_one"], no_nan["logFC_trans_one"])
print(p)
ax.text(0.05, 0.97, "r = {:.2f}".format(r), ha="left", va="top", fontsize=fontsize,
transform=ax.transAxes)
ax.text(0.05, 0.90, "n = %s" % (len(no_nan)), ha="left", va="top", fontsize=fontsize,
transform=ax.transAxes)
plt.show()
fig.savefig("Fig_S12.pdf", dpi="figure", bbox_inches="tight")
plt.close()
###Output
3.0063833411683962e-06
###Markdown
5. plot some examples compensatory
###Code
ex = df[df["hg19_id"] == "h.1433"]
ex = ex[["hg19_id", "mm9_id", "minimal_biotype_hg19", "minimal_biotype_mm9", "HUES64_hg19", "HUES64_mm9",
"mESC_hg19", "mESC_mm9", "trans_human_status_det", "fdr_trans_human", "trans_mouse_status_det",
"fdr_trans_mouse", "cis_HUES64_status_det", "fdr_cis_HUES64", "cis_mESC_status_det", "fdr_cis_mESC",
"logFC_trans_human", "logFC_trans_mouse", "logFC_cis_HUES64", "logFC_cis_mESC"]]
ex
ex = pd.melt(ex, id_vars=["hg19_id", "mm9_id", "minimal_biotype_hg19", "minimal_biotype_mm9"])
ex = ex[ex["variable"].isin(["HUES64_hg19", "HUES64_mm9", "mESC_hg19", "mESC_mm9",
"fdr_cis_HUES64", "fdr_cis_mESC",
"fdr_trans_human", "fdr_trans_mouse", "logFC_cis_HUES64", "logFC_cis_mESC",
"logFC_trans_human", "logFC_trans_mouse"])]
ex["cell"] = ex["variable"].str.split("_", expand=True)[0]
ex["seq"] = ex["variable"].str.split("_", expand=True)[1]
ex.head()
order = ["HUES64", "mESC"]
hue_order = ["hg19", "mm9"]
pal = {"hg19": sns.color_palette("Set2")[1], "mm9": sns.color_palette("Set2")[0]}
fig = plt.figure(figsize=(1.5, 1.5))
sub = ex[ex["cell"].isin(["HUES64", "mESC"])]
ax = sns.barplot(data=sub, x="cell", y="value", hue="seq", order=order, hue_order=hue_order, palette=pal)
ax.set_xticklabels(["hESCs", "mESCs"], rotation=50, va="top", ha="right")
ax.set_ylabel("MPRA activity")
ax.set_xlabel("")
ax.get_legend().remove()
ax.set_ylim((0, 14))
annotate_pval(ax, -0.25, 0.25, 9.5, 0, 9.5, ex[ex["variable"] == "fdr_cis_HUES64"]["value"].iloc[0], fontsize-1)
annotate_pval(ax, 0.75, 1.25, 8.25, 0, 8.25, ex[ex["variable"] == "fdr_cis_mESC"]["value"].iloc[0], fontsize-1)
annotate_pval(ax, -0.25, 0.75, 11.5, 0, 11.5, ex[ex["variable"] == "fdr_trans_human"]["value"].iloc[0], fontsize-1)
annotate_pval(ax, 0.25, 1.25, 12.75, 0, 12.75, ex[ex["variable"] == "fdr_trans_mouse"]["value"].iloc[0], fontsize-1)
# fig.savefig("compensatory_example_barplot.pdf", dpi="figure", bbox_inches="tight")
ex_sub = ex[ex["variable"].str.contains("logFC")]
ex_sub["sp"] = ex_sub["variable"].str.split("_", expand=True)[2]
ex_sub = ex_sub.sort_values(by=["seq", "sp"])
ex_sub
def sp(row):
if row.sp in ["HUES64", "human"]:
return "human"
else:
return "mouse"
ex_sub["sp"] = ex_sub.apply(sp, axis=1)
ex_sub
order = ["cis", "trans"]
fig, axarr = plt.subplots(figsize=(1.5, 1.5), nrows=1, ncols=2, sharey=True)
human_ax = axarr[0]
mouse_ax = axarr[1]
sub = ex_sub[ex_sub["sp"] == "human"]
sns.barplot(data=sub, x="seq", y="value", ax=human_ax, color=sns.color_palette("Set2")[7])
human_ax.set_xticklabels(order, rotation=50, va="top", ha="right")
human_ax.set_ylabel("effect size")
human_ax.set_xlabel("")
human_ax.axhline(y=0, linestyle="dashed", color="black")
sub = ex_sub[ex_sub["sp"] == "mouse"]
sns.barplot(data=sub, x="seq", y="value", ax=mouse_ax, color=sns.color_palette("Set2")[7])
mouse_ax.set_xticklabels(order, rotation=50, va="top", ha="right")
mouse_ax.set_ylabel("")
mouse_ax.set_xlabel("")
mouse_ax.axhline(y=0, linestyle="dashed", color="black")
fig.savefig("Fig6A.pdf", dpi="figure", bbox_inches="tight")
###Output
_____no_output_____
###Markdown
directional
###Code
ex = df[df["hg19_id"] == "h.1389"]
ex = ex[["hg19_id", "mm9_id", "minimal_biotype_hg19", "minimal_biotype_mm9", "HUES64_hg19", "HUES64_mm9",
"mESC_hg19", "mESC_mm9", "trans_human_status_det", "fdr_trans_human", "trans_mouse_status_det",
"fdr_trans_mouse", "cis_HUES64_status_det", "fdr_cis_HUES64", "cis_mESC_status_det", "fdr_cis_mESC",
"logFC_trans_human", "logFC_trans_mouse", "logFC_cis_HUES64", "logFC_cis_mESC"]]
ex
ex = pd.melt(ex, id_vars=["hg19_id", "mm9_id", "minimal_biotype_hg19", "minimal_biotype_mm9"])
ex = ex[ex["variable"].isin(["HUES64_hg19", "HUES64_mm9", "mESC_hg19", "mESC_mm9",
"fdr_cis_HUES64", "fdr_cis_mESC",
"fdr_trans_human", "fdr_trans_mouse", "logFC_cis_HUES64", "logFC_cis_mESC",
"logFC_trans_human", "logFC_trans_mouse"])]
ex["cell"] = ex["variable"].str.split("_", expand=True)[0]
ex["seq"] = ex["variable"].str.split("_", expand=True)[1]
ex.head()
order = ["HUES64", "mESC"]
hue_order = ["hg19", "mm9"]
pal = {"hg19": sns.color_palette("Set2")[1], "mm9": sns.color_palette("Set2")[0]}
fig = plt.figure(figsize=(1.5, 1.5))
sub = ex[ex["cell"].isin(["HUES64", "mESC"])]
ax = sns.barplot(data=sub, x="cell", y="value", hue="seq", order=order, hue_order=hue_order, palette=pal)
ax.set_xticklabels(["hESCs", "mESCs"], rotation=50, va="top", ha="right")
ax.set_ylabel("MPRA activity")
ax.set_xlabel("")
ax.get_legend().remove()
ax.set_ylim((0, 7))
annotate_pval(ax, -0.25, 0.25, 5, 0, 5, ex[ex["variable"] == "fdr_cis_HUES64"]["value"].iloc[0], fontsize-1)
annotate_pval(ax, 0.75, 1.25, 2.25, 0, 2.25, ex[ex["variable"] == "fdr_cis_mESC"]["value"].iloc[0], fontsize-1)
annotate_pval(ax, -0.25, 0.75, 6.15, 0, 6.15, ex[ex["variable"] == "fdr_trans_human"]["value"].iloc[0], fontsize-1)
annotate_pval(ax, 0.25, 1.25, 3.25, 0, 3.25, ex[ex["variable"] == "fdr_trans_mouse"]["value"].iloc[0], fontsize-1)
# fig.savefig("directional_example_barplot.pdf", dpi="figure", bbox_inches="tight")
ex_sub = ex[ex["variable"].str.contains("logFC")]
ex_sub["sp"] = ex_sub["variable"].str.split("_", expand=True)[2]
ex_sub = ex_sub.sort_values(by=["seq", "sp"])
ex_sub["sp"] = ex_sub.apply(sp, axis=1)
ex_sub
order = ["cis", "trans"]
fig, axarr = plt.subplots(figsize=(1.5, 1.5), nrows=1, ncols=2, sharey=True)
human_ax = axarr[0]
mouse_ax = axarr[1]
sub = ex_sub[ex_sub["sp"] == "human"]
sns.barplot(data=sub, x="seq", y="value", ax=human_ax, color=sns.color_palette("Set2")[2])
human_ax.set_xticklabels(order, rotation=50, va="top", ha="right")
human_ax.set_ylabel("effect size")
human_ax.set_xlabel("")
human_ax.axhline(y=0, linestyle="dashed", color="black")
sub = ex_sub[ex_sub["sp"] == "mouse"]
sns.barplot(data=sub, x="seq", y="value", ax=mouse_ax, color=sns.color_palette("Set2")[2])
mouse_ax.set_xticklabels(order, rotation=50, va="top", ha="right")
mouse_ax.set_ylabel("")
mouse_ax.set_xlabel("")
mouse_ax.axhline(y=0, linestyle="dashed", color="black")
fig.savefig("Fig6B.pdf", dpi="figure", bbox_inches="tight")
###Output
_____no_output_____ |
VNN/notebooks/network_experiments/kinematics/shapes.ipynb | ###Markdown
S(X,X,X)
###Code
model_fun = lambda: get_scalar_model(dataset_shapes, hidden_layer_units=[6,5,3], activation='relu', output_activation=None, \
kernel_initializer='random_normal', bias_initializer='random_normal', \
optimizer=keras.optimizers.Adam(), loss=keras.losses.MeanSquaredError(), metrics=[keras.metrics.MeanSquaredError()])
test_model(model_fun(), train_dataset, test_dataset, epochs=1, loss_name="mean_squared_error", measure_name="val_mean_squared_error", \
print_summary=True)
###Output
_____no_output_____
###Markdown
V1(X):U(2)
###Code
model_fun = lambda: get_vector_model(dataset_shapes, fractal_depth=1, hidden_layer_units=(4,), inner_hidden_layer_units=(2,), \
activation='relu', output_activation=None, \
weight_type="unique", weight_initializer='random_normal', \
optimizer=keras.optimizers.Adam(), loss=keras.losses.MeanSquaredError(), metrics=[keras.metrics.MeanSquaredError()])
test_model(model_fun(), train_dataset, test_dataset, epochs=1, loss_name="mean_squared_error", measure_name="val_mean_squared_error", \
print_summary=True)
###Output
_____no_output_____
###Markdown
V1(X):U(3)
###Code
model_fun = lambda: get_vector_model(dataset_shapes, fractal_depth=1, hidden_layer_units=(3,), inner_hidden_layer_units=(3,), \
activation='relu', output_activation=None, \
weight_type="unique", weight_initializer='random_normal', \
optimizer=keras.optimizers.Adam(), loss=keras.losses.MeanSquaredError(), metrics=[keras.metrics.MeanSquaredError()])
test_model(model_fun(), train_dataset, test_dataset, epochs=1, loss_name="mean_squared_error", measure_name="val_mean_squared_error", \
print_summary=True)
###Output
_____no_output_____ |
Examples/Jupyter Notebook Examples/Examples of batch,pfr.ipynb | ###Markdown
Examples of pfr UsageThere are 3 types of estimation that can be performed in the batch/pfr module: maximum a posteriori (MAP), Markov chain Monte Carlo (MCMC), and variational inference (VI). Below, we demonstrate all of these types on the same dataset. Please view the referenced Excel input file to see how the data should be input.
###Code
%matplotlib inline
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import arviz
import seaborn as sns
import matplotlib.patches as mpatches
from ckbit import pfr
from scipy.stats.kde import gaussian_kde
from scipy.integrate import solve_ivp
###Output
_____no_output_____
###Markdown
First, we generate the PFR data with experimental noise added to the concentrations. The smooth lines are the unperturbed data, and the data points are the noisy measurements we use as our data points.
###Code
#Seed 5 was standalone
np.random.seed(5)
HMFinit = 1
LFinit = 0.5
Huinit = 0.2
numPoints = 6
taus = np.linspace (0,60,numPoints)
Hconc = 0.1
T = [413,423]
R = 8.31446261815324*(1/1000) #units of kJ/(mol*K)
#True params
sigma = HMFinit*0.05
A0HLF = 11.31
A0HHu = 16.69
EaHLF = 94.72
EaHHu = 141.94
try:
del cHMF0,cHMF1,cLF0,cLF1,cHu0,cHu1
except:
pass
size0 = len(taus)
cHMF0 = np.linspace(10,20,size0)
cHMF1 = np.linspace(10,20,size0)
cLF0 = np.linspace(10,20,size0)
cLF1 = np.linspace(10,20,size0)
cHu0 = np.linspace(10,20,size0)
cHu1 = np.linspace(10,20,size0)
total0 = np.linspace(10,20,size0)
total1 = np.linspace(10,20,size0)
def d0(t,y):
dHMF = -(y[0]*Hconc)*(kHLF0+kHHu0)
dLF = kHLF0*y[0]*Hconc
dHu = kHHu0*y[0]*Hconc
return [dHMF, dLF, dHu]
def d1(t,y):
dHMF = -(y[0]*Hconc)*(kHLF1+kHHu1)
dLF = kHLF1*y[0]*Hconc
dHu = kHHu1*y[0]*Hconc
return [dHMF, dLF, dHu]
kHLF0 = (10**A0HLF)*np.exp(-EaHLF/(R*T[0]))
kHLF1 = (10**A0HLF)*np.exp(-EaHLF/(R*T[1]))
kHHu0 = (10**A0HHu)*np.exp(-EaHHu/(R*T[0]))
kHHu1 = (10**A0HHu)*np.exp(-EaHHu/(R*T[1]))
tspan = [min(taus),max(taus)]
#clean data
c0 = solve_ivp(d0, tspan, [HMFinit, LFinit, Huinit], method='BDF', t_eval=taus).y
c1 = solve_ivp(d1, tspan, [HMFinit, LFinit, Huinit], method='BDF', t_eval=taus).y
cHMF0 = c0[0]
cLF0 = c0[1]
cHu0 = c0[2]
cHMF1 = c1[0]
cLF1 = c1[1]
cHu1 = c1[2]
f, ax = plt.subplots(1)
ax.plot(taus,cHMF0, label='HMF, 140°C')
ax.plot(taus,cHMF1, label='HMF, 150°C')
ax.plot(taus,cLF0, label='LA+FA, 140°C')
ax.plot(taus,cLF1, label='LA+FA, 150°C')
ax.plot(taus,cHu0, label='Humins, 140°C')
ax.plot(taus,cHu1, label='Humins, 150°C')
#noisy data
for i in range(size0):
cHMF0[i] = cHMF0[i]+np.random.normal(0,sigma,1)
cHMF1[i] = cHMF1[i]+np.random.normal(0,sigma,1)
cLF0[i] = cLF0[i]+np.random.normal(0,sigma,1)
cLF1[i] = cLF1[i]+np.random.normal(0,sigma,1)
cHu0[i] = cHu0[i]+np.random.normal(0,sigma,1)
cHu1[i] = cHu1[i]+np.random.normal(0,sigma,1)
total0[i] = cHMF0[i]+cLF0[i]+cHu0[i]
total1[i] = cHMF1[i]+cLF1[i]+cHu1[i]
cHMF0[0] = HMFinit
cHMF1[0] = HMFinit
cLF0[0] = LFinit
cLF1[0] = LFinit
cHu0[0] = Huinit
cHu1[0] = Huinit
for i in range (len(cHMF0)):
cHMF0[i] = max(0,cHMF0[i])
cHMF1[i] = max(0,cHMF1[i])
cLF0[i] = max(0,cLF0[i])
cLF1[i] = max(0,cLF1[i])
cHu0[i] = max(0,cHu0[i])
cHu1[i] = max(0,cHu1[i])
#Plot the data
ax.scatter(taus,cHMF0)
ax.scatter(taus,cHMF1)
ax.scatter(taus,cLF0)
ax.scatter(taus,cLF1)
ax.scatter(taus,cHu0)
ax.scatter(taus,cHu1)
ax.set_xlabel('Time [min]')
ax.set_ylabel('Concentration [mol/L]')
#ax.set_title('Simulated Data with 5% Noise Addition')
ax.legend(bbox_to_anchor = (1,0.8))
plt.show()
###Output
_____no_output_____
###Markdown
We write this data into the appropriate format in an Excel file (see the Excel file named PFR_Data in this folder). Then we can use this data to obtain our estimates.First, the MAP estimation. This yields point estimates of the modes of the posterior. These estimates are the values that fit the model best given the data and priors.
###Code
#Import data
file = './PFR_Data.xlsx'
#Run MAP estimation with standard priors
map1 = pfr.MAP(filename=file, pH=True, rel_tol=5E-6, abs_tol=5E-6, max_num_steps=1000)
###Output
INFO:pystan:COMPILING THE C++ CODE FOR MODEL anon_model_82e6a9b54a623ac5604ef005a61f1bc0 NOW.
###Markdown
Now, the MCMC estimation. This yields estimates of the posterior distributions of each parameter being estimated.
###Code
#Run MCMC estimation with standard priors
m1, m2 = pfr.MCMC(filename=file, pH=True)
###Output
INFO:pystan:COMPILING THE C++ CODE FOR MODEL anon_model_1ae81a4b787362df01df8087f4bc0be3 NOW.
###Markdown
There are convergence checks to ensure that these samples can be relied upon. These checks are discussed in detail in the published article. This run passes all those checks, and offers a successful inference we can trust.It is important to visualize the correlation that exists between the samples of the parameters, which we can accomplish with a pair plot.
###Code
#Generate pairplot
arviz.plot_pair(m1)
plt.show()
###Output
_____no_output_____
###Markdown
Now, the VI estimation. This yields estimates of the posterior distributions of each parameter being estimated, but using the VI technique instead of the MCMC. VI is better than MCMC at generating a large number of samples, but is a less robust technique. It is still in its experimental implementation phase, and it does not iteract well with the PFR estimation module. We demonstrate this below.
###Code
#Run VI estimation with standard priors
v1, v2 = pfr.VI(filename=file, pH=True)
###Output
Using cached StanModel
###Markdown
We can also specify prior distributions and run inference with them. The following example is for a prior distribution for the Ea term of rxn 1 that is normally distributed with a mean of 100 and standard deviation of 5 and a prior distribution for the A0 term of rxn 1 that is normally distributed with a mean of 10 and standard deviation of 5. All prior distribution specification must follow Stan's implementation forms: https://mc-stan.org/docs/2_23/functions-reference/unbounded-continuous-distributions.html
###Code
#Run MCMC estimation with specified priors
p1, p2 = pfr.MCMC(filename=file, pH=True,
priors = ['A0[1] ~ normal(10,5)',
'Ea[1] ~ normal(100,5)'])
###Output
_____no_output_____
###Markdown
Finally, we demonstrate how to construct visually appealing distribution plots.
###Code
#Process datasets
data1 = m2['Ea'][:,0]
datalabel1 = 'MCMC Without Prior'
data1mean = np.mean(data1)
kdedata1 = gaussian_kde(data1)
data1x = np.linspace(min(data1), max(data1), 100)
data2 = p2['Ea'][:,0]
datalabel2 = 'MCMC With Prior'
data2mean = np.mean(data2)
kdedata2 = gaussian_kde(data2)
data2x = np.linspace(min(data2), max(data2), 100)
#Generate probability distribution graphs
sns.set(color_codes=True)
sns.set(style="white", font_scale=1.3)
f, ax = plt.subplots(1)
ax = sns.kdeplot(data1, gridsize=10000, shade=True, color='r')
ax = sns.kdeplot(data2, gridsize=10000, shade=True, color='b')
ax.axvline(data1mean, linestyle = "--", color = 'r')
ax.axvline(data2mean, linestyle = "--", color = 'b')
ax.set_title('Comparison of Inference Techniques')
ax.set_xlabel('$E_a [kJ/mol]$')
ax.set_ylabel('Probability Density')
ax.axes.get_yaxis().set_ticks([])
ax.axes.get_yaxis().set_ticklabels([])
ax.axes.set_xlim([50,125])
red_line = mpatches.Patch(color='red', label=datalabel1)
blue_line = mpatches.Patch(color='blue', label=datalabel2)
ax.legend(handles=[red_line, blue_line])
plt.show()
###Output
_____no_output_____ |
Hager Ashour WT-21-098/11.ipynb | ###Markdown
Exercise Notebook (DS)
###Code
# this code conceals irrelevant warning messages
import warnings
warnings.simplefilter('ignore', FutureWarning)
import numpy as np
###Output
_____no_output_____
###Markdown
Numpy 2D Array
###Code
a = np.array([[1,2,3],[3,4,5]])
print(a)
# Base Ball Player's Heights AS a in 2D
a = np.array([[1,2,3], [4,1,5]])
print (a)
# Addition
a+3
# Multiplication
a*2
# Subtraction
a-2
# Division
a/3
###Output
_____no_output_____
###Markdown
**Subsetting 2D NumPy Arrays**
###Code
a[0]
a[0][0] # Accessing the first element from the first array
a[1][0] # Accessing the first element from the second array
a[0][1:2] # Accessing from index 1 to 2 elements from the first array
a[1][1:2] # Accessing from index 1 to 2 elements from the second array
###Output
_____no_output_____
###Markdown
**2D Arithmetic**
###Code
# Addition
a+3
# Multiplication
a*2
# Subtraction
a-2
# Division
a/3
###Output
_____no_output_____
###Markdown
Task 1. Write a NumPy program to test whether two arrays are element-wise equal within a tolerance.
###Code
print(np.allclose([2e10,5e-8], [2.0001e10,5e-9]))
print(np.allclose([10.0, np.nan], [7.0, np.nan], equal_nan=True))
print(np.allclose([8.0, np.nan], [8.0, np.nan], equal_nan=True))
###Output
False
False
True
###Markdown
2. Write a NumPy program to create an element-wise comparison (greater, greater_equal, less and less_equal) of two given arrays.
###Code
a = np.array([10,5])
b = np.array([10,8])
print(np.greater(a, b))
print(np.greater_equal(a, b))
print(np.less(a, b))
print(np.less_equal(a, b))
###Output
[False False]
[ True False]
[False True]
[ True True]
|
Coursera NLP Specialization/Course 1/Week 4/Machine Translation.ipynb | ###Markdown
Assignment 4 - Naive Machine Translation and LSHYou will now implement your first machine translation system and then youwill see how locality sensitive hashing works. Let's get started by importingthe required functions!If you are running this notebook in your local computer, don't forget todownload the twitter samples and stopwords from nltk.```nltk.download('stopwords')nltk.download('twitter_samples')``` **NOTE**: The `Exercise xx` numbers in this assignment **_are inconsistent_** with the `UNQ_Cx` numbers. This assignment covers the folowing topics:- [1. The word embeddings data for English and French words](1) - [1.1 Generate embedding and transform matrices](1-1) - [Exercise 1](ex-01)- [2. Translations](2) - [2.1 Translation as linear transformation of embeddings](2-1) - [Exercise 2](ex-02) - [Exercise 3](ex-03) - [Exercise 4](ex-04) - [2.2 Testing the translation](2-2) - [Exercise 5](ex-05) - [Exercise 6](ex-06) - [3. LSH and document search](3) - [3.1 Getting the document embeddings](3-1) - [Exercise 7](ex-07) - [Exercise 8](ex-08) - [3.2 Looking up the tweets](3-2) - [3.3 Finding the most similar tweets with LSH](3-3) - [3.4 Getting the hash number for a vector](3-4) - [Exercise 9](ex-09) - [3.5 Creating a hash table](3-5) - [Exercise 10](ex-10) - [3.6 Creating all hash tables](3-6) - [Exercise 11](ex-11)
###Code
import pdb
import pickle
import string
import time
import gensim
import matplotlib.pyplot as plt
import nltk
import numpy as np
import scipy
import sklearn
from gensim.models import KeyedVectors
from nltk.corpus import stopwords, twitter_samples
from nltk.tokenize import TweetTokenizer
from utils import (cosine_similarity, get_dict,
process_tweet)
from os import getcwd
# add folder, tmp2, from our local workspace containing pre-downloaded corpora files to nltk's data path
filePath = f"{getcwd()}/../tmp2/"
nltk.data.path.append(filePath)
###Output
_____no_output_____
###Markdown
1. The word embeddings data for English and French wordsWrite a program that translates English to French. The dataThe full dataset for English embeddings is about 3.64 gigabytes, and the Frenchembeddings are about 629 megabytes. To prevent the Coursera workspace fromcrashing, we've extracted a subset of the embeddings for the words that you'lluse in this assignment.If you want to run this on your local computer and use the full dataset,you can download the* English embeddings from Google code archive word2vec[look for GoogleNews-vectors-negative300.bin.gz](https://code.google.com/archive/p/word2vec/) * You'll need to unzip the file first.* and the French embeddings from[cross_lingual_text_classification](https://github.com/vjstark/crosslingual_text_classification). * in the terminal, type (in one line) `curl -o ./wiki.multi.fr.vec https://dl.fbaipublicfiles.com/arrival/vectors/wiki.multi.fr.vec`Then copy-paste the code below and run it. ```python Use this code to download and process the full dataset on your local computerfrom gensim.models import KeyedVectorsen_embeddings = KeyedVectors.load_word2vec_format('./GoogleNews-vectors-negative300.bin', binary = True)fr_embeddings = KeyedVectors.load_word2vec_format('./wiki.multi.fr.vec') loading the english to french dictionariesen_fr_train = get_dict('en-fr.train.txt')print('The length of the english to french training dictionary is', len(en_fr_train))en_fr_test = get_dict('en-fr.test.txt')print('The length of the english to french test dictionary is', len(en_fr_train))english_set = set(en_embeddings.vocab)french_set = set(fr_embeddings.vocab)en_embeddings_subset = {}fr_embeddings_subset = {}french_words = set(en_fr_train.values())for en_word in en_fr_train.keys(): fr_word = en_fr_train[en_word] if fr_word in french_set and en_word in english_set: en_embeddings_subset[en_word] = en_embeddings[en_word] fr_embeddings_subset[fr_word] = fr_embeddings[fr_word]for en_word in en_fr_test.keys(): fr_word = en_fr_test[en_word] if fr_word in french_set and en_word in english_set: en_embeddings_subset[en_word] = en_embeddings[en_word] fr_embeddings_subset[fr_word] = fr_embeddings[fr_word]pickle.dump( en_embeddings_subset, open( "en_embeddings.p", "wb" ) )pickle.dump( fr_embeddings_subset, open( "fr_embeddings.p", "wb" ) )``` The subset of dataTo do the assignment on the Coursera workspace, we'll use the subset of word embeddings.
###Code
en_embeddings_subset = pickle.load(open("en_embeddings.p", "rb"))
fr_embeddings_subset = pickle.load(open("fr_embeddings.p", "rb"))
###Output
_____no_output_____
###Markdown
Look at the data* en_embeddings_subset: the key is an English word, and the vaule is a300 dimensional array, which is the embedding for that word.```'the': array([ 0.08007812, 0.10498047, 0.04980469, 0.0534668 , -0.06738281, ....```* fr_embeddings_subset: the key is an French word, and the vaule is a 300dimensional array, which is the embedding for that word.```'la': array([-6.18250e-03, -9.43867e-04, -8.82648e-03, 3.24623e-02,...``` Load two dictionaries mapping the English to French words* A training dictionary* and a testing dictionary.
###Code
# loading the english to french dictionaries
en_fr_train = get_dict('en-fr.train.txt')
print('The length of the English to French training dictionary is', len(en_fr_train))
en_fr_test = get_dict('en-fr.test.txt')
print('The length of the English to French test dictionary is', len(en_fr_train))
###Output
The length of the English to French training dictionary is 5000
The length of the English to French test dictionary is 5000
###Markdown
Looking at the English French dictionary* `en_fr_train` is a dictionary where the key is the English word and the valueis the French translation of that English word.```{'the': 'la', 'and': 'et', 'was': 'était', 'for': 'pour',```* `en_fr_test` is similar to `en_fr_train`, but is a test set. We won't look at ituntil we get to testing. 1.1 Generate embedding and transform matrices Exercise 01: Translating English dictionary to French by using embeddingsYou will now implement a function `get_matrices`, which takes the loaded dataand returns matrices `X` and `Y`.Inputs:- `en_fr` : English to French dictionary- `en_embeddings` : English to embeddings dictionary- `fr_embeddings` : French to embeddings dictionaryReturns:- Matrix `X` and matrix `Y`, where each row in X is the word embedding for anenglish word, and the same row in Y is the word embedding for the Frenchversion of that English word. Figure 2 Use the `en_fr` dictionary to ensure that the ith row in the `X` matrixcorresponds to the ith row in the `Y` matrix. **Instructions**: Complete the function `get_matrices()`:* Iterate over English words in `en_fr` dictionary.* Check if the word have both English and French embedding. Hints Sets are useful data structures that can be used to check if an item is a member of a group. You can get words which are embedded into the language by using keys method. Keep vectors in `X` and `Y` sorted in list. You can use np.vstack() to merge them into the numpy matrix. numpy.vstack stacks the items in a list as rows in a matrix.
###Code
# UNQ_C1 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
def get_matrices(en_fr, french_vecs, english_vecs):
"""
Input:
en_fr: English to French dictionary
french_vecs: French words to their corresponding word embeddings.
english_vecs: English words to their corresponding word embeddings.
Output:
X: a matrix where the columns are the English embeddings.
Y: a matrix where the columns correspong to the French embeddings.
R: the projection matrix that minimizes the F norm ||X R -Y||^2.
"""
### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ###
# X_l and Y_l are lists of the english and french word embeddings
X_l = list()
Y_l = list()
# get the english words (the keys in the dictionary) and store in a set()
english_set = set(english_vecs.keys())
# get the french words (keys in the dictionary) and store in a set()
french_set = set(french_vecs.keys())
# store the french words that are part of the english-french dictionary (these are the values of the dictionary)
french_words = set(en_fr.values())
# loop through all english, french word pairs in the english french dictionary
for en_word, fr_word in en_fr.items():
# check that the french word has an embedding and that the english word has an embedding
if fr_word in french_set and en_word in english_set:
# get the english embedding
en_vec = english_vecs[en_word]
# get the french embedding
fr_vec = french_vecs[fr_word]
# add the english embedding to the list
X_l.append(en_vec)
# add the french embedding to the list
Y_l.append(fr_vec)
# stack the vectors of X_l into a matrix X
X = np.vstack(X_l)
# stack the vectors of Y_l into a matrix Y
Y = np.vstack(Y_l)
### END CODE HERE ###
return X, Y
###Output
_____no_output_____
###Markdown
Now we will use function `get_matrices()` to obtain sets `X_train` and `Y_train`of English and French word embeddings into the corresponding vector space models.
###Code
# UNQ_C2 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# You do not have to input any code in this cell, but it is relevant to grading, so please do not change anything
# getting the training set:
X_train, Y_train = get_matrices(
en_fr_train, fr_embeddings_subset, en_embeddings_subset)
###Output
_____no_output_____
###Markdown
2. Translations Figure 1 Write a program that translates English words to French words using word embeddings and vector space models. 2.1 Translation as linear transformation of embeddingsGiven dictionaries of English and French word embeddings you will create a transformation matrix `R`* Given an English word embedding, $\mathbf{e}$, you can multiply $\mathbf{eR}$ to get a new word embedding $\mathbf{f}$. * Both $\mathbf{e}$ and $\mathbf{f}$ are [row vectors](https://en.wikipedia.org/wiki/Row_and_column_vectors).* You can then compute the nearest neighbors to `f` in the french embeddings and recommend the word that is most similar to the transformed word embedding. Describing translation as the minimization problemFind a matrix `R` that minimizes the following equation. $$\arg \min _{\mathbf{R}}\| \mathbf{X R} - \mathbf{Y}\|_{F}\tag{1} $$ Frobenius normThe Frobenius norm of a matrix $A$ (assuming it is of dimension $m,n$) is defined as the square root of the sum of the absolute squares of its elements:$$\|\mathbf{A}\|_{F} \equiv \sqrt{\sum_{i=1}^{m} \sum_{j=1}^{n}\left|a_{i j}\right|^{2}}\tag{2}$$ Actual loss functionIn the real world applications, the Frobenius norm loss:$$\| \mathbf{XR} - \mathbf{Y}\|_{F}$$is often replaced by it's squared value divided by $m$:$$ \frac{1}{m} \| \mathbf{X R} - \mathbf{Y} \|_{F}^{2}$$where $m$ is the number of examples (rows in $\mathbf{X}$).* The same R is found when using this loss function versus the original Frobenius norm.* The reason for taking the square is that it's easier to compute the gradient of the squared Frobenius.* The reason for dividing by $m$ is that we're more interested in the average loss per embedding than the loss for the entire training set. * The loss for all training set increases with more words (training examples), so taking the average helps us to track the average loss regardless of the size of the training set. [Optional] Detailed explanation why we use norm squared instead of the norm: Click for optional details The norm is always nonnegative (we're summing up absolute values), and so is the square. When we take the square of all non-negative (positive or zero) numbers, the order of the data is preserved. For example, if 3 > 2, 3^2 > 2^2 Using the norm or squared norm in gradient descent results in the same location of the minimum. Squaring cancels the square root in the Frobenius norm formula. Because of the chain rule, we would have to do more calculations if we had a square root in our expression for summation. Dividing the function value by the positive number doesn't change the optimum of the function, for the same reason as described above. We're interested in transforming English embedding into the French. Thus, it is more important to measure average loss per embedding than the loss for the entire dictionary (which increases as the number of words in the dictionary increases). Exercise 02: Implementing translation mechanism described in this section. Step 1: Computing the loss* The loss function will be squared Frobenoius norm of the difference betweenmatrix and its approximation, divided by the number of training examples $m$.* Its formula is:$$ L(X, Y, R)=\frac{1}{m}\sum_{i=1}^{m} \sum_{j=1}^{n}\left( a_{i j} \right)^{2}$$where $a_{i j}$ is value in $i$th row and $j$th column of the matrix $\mathbf{XR}-\mathbf{Y}$. Instructions: complete the `compute_loss()` function* Compute the approximation of `Y` by matrix multiplying `X` and `R`* Compute difference `XR - Y`* Compute the squared Frobenius norm of the difference and divide it by $m$. Hints Useful functions: Numpy dot , Numpy sum, Numpy square, Numpy norm Be careful about which operation is elementwise and which operation is a matrix multiplication. Try to use matrix operations instead of the numpy norm function. If you choose to use norm function, take care of extra arguments and that it's returning loss squared, and not the loss itself.
###Code
# UNQ_C3 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
def compute_loss(X, Y, R):
'''
Inputs:
X: a matrix of dimension (m,n) where the columns are the English embeddings.
Y: a matrix of dimension (m,n) where the columns correspong to the French embeddings.
R: a matrix of dimension (n,n) - transformation matrix from English to French vector space embeddings.
Outputs:
L: a matrix of dimension (m,n) - the value of the loss function for given X, Y and R.
'''
### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ###
# m is the number of rows in X
m = X.shape[0]
# diff is XR - Y
diff = np.dot(X,R)-Y
# diff_squared is the element-wise square of the difference
diff_squared = diff**2
# sum_diff_squared is the sum of the squared elements
sum_diff_squared = np.sum(diff_squared)
# loss i the sum_diff_squard divided by the number of examples (m)
loss = sum_diff_squared/m
### END CODE HERE ###
return loss
###Output
_____no_output_____
###Markdown
Exercise 03 Step 2: Computing the gradient of loss in respect to transform matrix R* Calculate the gradient of the loss with respect to transform matrix `R`.* The gradient is a matrix that encodes how much a small change in `R`affect the change in the loss function.* The gradient gives us the direction in which we should decrease `R`to minimize the loss.* $m$ is the number of training examples (number of rows in $X$).* The formula for the gradient of the loss function $𝐿(𝑋,𝑌,𝑅)$ is:$$\frac{d}{dR}𝐿(𝑋,𝑌,𝑅)=\frac{d}{dR}\Big(\frac{1}{m}\| X R -Y\|_{F}^{2}\Big) = \frac{2}{m}X^{T} (X R - Y)$$**Instructions**: Complete the `compute_gradient` function below. Hints Transposing in numpy Finding out the dimensions of matrices in numpy Remember to use numpy.dot for matrix multiplication
###Code
# UNQ_C4 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
def compute_gradient(X, Y, R):
'''
Inputs:
X: a matrix of dimension (m,n) where the columns are the English embeddings.
Y: a matrix of dimension (m,n) where the columns correspong to the French embeddings.
R: a matrix of dimension (n,n) - transformation matrix from English to French vector space embeddings.
Outputs:
g: a matrix of dimension (n,n) - gradient of the loss function L for given X, Y and R.
'''
### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ###
# m is the number of rows in X
m = X.shape[0]
# gradient is X^T(XR - Y) * 2/m
gradient = np.dot(X.transpose(),np.dot(X,R)-Y)*(2/m)
### END CODE HERE ###
return gradient
###Output
_____no_output_____
###Markdown
Step 3: Finding the optimal R with gradient descent algorithm Gradient descent[Gradient descent](https://ml-cheatsheet.readthedocs.io/en/latest/gradient_descent.html) is an iterative algorithm which is used in searching for the optimum of the function. * Earlier, we've mentioned that the gradient of the loss with respect to the matrix encodes how much a tiny change in some coordinate of that matrix affect the change of loss function.* Gradient descent uses that information to iteratively change matrix `R` until we reach a point where the loss is minimized. Training with a fixed number of iterationsMost of the time we iterate for a fixed number of training steps rather than iterating until the loss falls below a threshold. OPTIONAL: explanation for fixed number of iterations click here for detailed discussion You cannot rely on training loss getting low -- what you really want is the validation loss to go down, or validation accuracy to go up. And indeed - in some cases people train until validation accuracy reaches a threshold, or -- commonly known as "early stopping" -- until the validation accuracy starts to go down, which is a sign of over-fitting. Why not always do "early stopping"? Well, mostly because well-regularized models on larger data-sets never stop improving. Especially in NLP, you can often continue training for months and the model will continue getting slightly and slightly better. This is also the reason why it's hard to just stop at a threshold -- unless there's an external customer setting the threshold, why stop, where do you put the threshold? Stopping after a certain number of steps has the advantage that you know how long your training will take - so you can keep some sanity and not train for months. You can then try to get the best performance within this time budget. Another advantage is that you can fix your learning rate schedule -- e.g., lower the learning rate at 10% before finish, and then again more at 1% before finishing. Such learning rate schedules help a lot, but are harder to do if you don't know how long you're training. Pseudocode:1. Calculate gradient $g$ of the loss with respect to the matrix $R$.2. Update $R$ with the formula:$$R_{\text{new}}= R_{\text{old}}-\alpha g$$Where $\alpha$ is the learning rate, which is a scalar. Learning rate* The learning rate or "step size" $\alpha$ is a coefficient which decides how much we want to change $R$ in each step.* If we change $R$ too much, we could skip the optimum by taking too large of a step.* If we make only small changes to $R$, we will need many steps to reach the optimum.* Learning rate $\alpha$ is used to control those changes.* Values of $\alpha$ are chosen depending on the problem, and we'll use `learning_rate`$=0.0003$ as the default value for our algorithm. Exercise 04 Instructions: Implement `align_embeddings()` Hints Use the 'compute_gradient()' function to get the gradient in each step
###Code
# UNQ_C5 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
def align_embeddings(X, Y, train_steps=100, learning_rate=0.0003):
'''
Inputs:
X: a matrix of dimension (m,n) where the columns are the English embeddings.
Y: a matrix of dimension (m,n) where the columns correspong to the French embeddings.
train_steps: positive int - describes how many steps will gradient descent algorithm do.
learning_rate: positive float - describes how big steps will gradient descent algorithm do.
Outputs:
R: a matrix of dimension (n,n) - the projection matrix that minimizes the F norm ||X R -Y||^2
'''
np.random.seed(129)
# the number of columns in X is the number of dimensions for a word vector (e.g. 300)
# R is a square matrix with length equal to the number of dimensions in th word embedding
R = np.random.rand(X.shape[1], X.shape[1])
for i in range(train_steps):
if i % 25 == 0:
print(f"loss at iteration {i} is: {compute_loss(X, Y, R):.4f}")
### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ###
# use the function that you defined to compute the gradient
gradient = compute_gradient(X,Y,R)
# update R by subtracting the learning rate times gradient
R -= learning_rate*gradient
### END CODE HERE ###
return R
# UNQ_C6 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# You do not have to input any code in this cell, but it is relevant to grading, so please do not change anything
# Testing your implementation.
np.random.seed(129)
m = 10
n = 5
X = np.random.rand(m, n)
Y = np.random.rand(m, n) * .1
R = align_embeddings(X, Y)
###Output
loss at iteration 0 is: 3.7242
loss at iteration 25 is: 3.6283
loss at iteration 50 is: 3.5350
loss at iteration 75 is: 3.4442
###Markdown
**Expected Output:**```loss at iteration 0 is: 3.7242loss at iteration 25 is: 3.6283loss at iteration 50 is: 3.5350loss at iteration 75 is: 3.4442``` Calculate transformation matrix RUsing those the training set, find the transformation matrix $\mathbf{R}$ by calling the function `align_embeddings()`.**NOTE:** The code cell below will take a few minutes to fully execute (~3 mins)
###Code
# UNQ_C7 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# You do not have to input any code in this cell, but it is relevant to grading, so please do not change anything
R_train = align_embeddings(X_train, Y_train, train_steps=400, learning_rate=0.8)
###Output
loss at iteration 0 is: 963.0146
loss at iteration 25 is: 97.8292
loss at iteration 50 is: 26.8329
loss at iteration 75 is: 9.7893
loss at iteration 100 is: 4.3776
loss at iteration 125 is: 2.3281
loss at iteration 150 is: 1.4480
loss at iteration 175 is: 1.0338
loss at iteration 200 is: 0.8251
loss at iteration 225 is: 0.7145
loss at iteration 250 is: 0.6534
loss at iteration 275 is: 0.6185
loss at iteration 300 is: 0.5981
loss at iteration 325 is: 0.5858
loss at iteration 350 is: 0.5782
loss at iteration 375 is: 0.5735
###Markdown
Expected Output```loss at iteration 0 is: 963.0146loss at iteration 25 is: 97.8292loss at iteration 50 is: 26.8329loss at iteration 75 is: 9.7893loss at iteration 100 is: 4.3776loss at iteration 125 is: 2.3281loss at iteration 150 is: 1.4480loss at iteration 175 is: 1.0338loss at iteration 200 is: 0.8251loss at iteration 225 is: 0.7145loss at iteration 250 is: 0.6534loss at iteration 275 is: 0.6185loss at iteration 300 is: 0.5981loss at iteration 325 is: 0.5858loss at iteration 350 is: 0.5782loss at iteration 375 is: 0.5735``` 2.2 Testing the translation k-Nearest neighbors algorithm[k-Nearest neighbors algorithm](https://en.wikipedia.org/wiki/K-nearest_neighbors_algorithm) * k-NN is a method which takes a vector as input and finds the other vectors in the dataset that are closest to it. * The 'k' is the number of "nearest neighbors" to find (e.g. k=2 finds the closest two neighbors). Searching for the translation embeddingSince we're approximating the translation function from English to French embeddings by a linear transformation matrix $\mathbf{R}$, most of the time we won't get the exact embedding of a French word when we transform embedding $\mathbf{e}$ of some particular English word into the French embedding space. * This is where $k$-NN becomes really useful! By using $1$-NN with $\mathbf{eR}$ as input, we can search for an embedding $\mathbf{f}$ (as a row) in the matrix $\mathbf{Y}$ which is the closest to the transformed vector $\mathbf{eR}$ Cosine similarityCosine similarity between vectors $u$ and $v$ calculated as the cosine of the angle between them.The formula is $$\cos(u,v)=\frac{u\cdot v}{\left\|u\right\|\left\|v\right\|}$$* $\cos(u,v)$ = $1$ when $u$ and $v$ lie on the same line and have the same direction.* $\cos(u,v)$ is $-1$ when they have exactly opposite directions.* $\cos(u,v)$ is $0$ when the vectors are orthogonal (perpendicular) to each other. Note: Distance and similarity are pretty much opposite things.* We can obtain distance metric from cosine similarity, but the cosine similarity can't be used directly as the distance metric. * When the cosine similarity increases (towards $1$), the "distance" between the two vectors decreases (towards $0$). * We can define the cosine distance between $u$ and $v$ as$$d_{\text{cos}}(u,v)=1-\cos(u,v)$$ **Exercise 05**: Complete the function `nearest_neighbor()`Inputs:* Vector `v`,* A set of possible nearest neighbors `candidates`* `k` nearest neighbors to find.* The distance metric should be based on cosine similarity.* `cosine_similarity` function is already implemented and imported for you. It's arguments are two vectors and it returns the cosine of the angle between them.* Iterate over rows in `candidates`, and save the result of similarities between current row and vector `v` in a python list. Take care that similarities are in the same order as row vectors of `candidates`.* Now you can use [numpy argsort]( https://docs.scipy.org/doc/numpy/reference/generated/numpy.argsort.htmlnumpy.argsort) to sort the indices for the rows of `candidates`. Hints numpy.argsort sorts values from most negative to most positive (smallest to largest) The candidates that are nearest to 'v' should have the highest cosine similarity To get the last element of a list 'tmp', the notation is tmp[-1:]
###Code
# UNQ_C8 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
def nearest_neighbor(v, candidates, k=1):
"""
Input:
- v, the vector you are going find the nearest neighbor for
- candidates: a set of vectors where we will find the neighbors
- k: top k nearest neighbors to find
Output:
- k_idx: the indices of the top k closest vectors in sorted form
"""
### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ###
similarity_l = []
# for each candidate vector...
for row in candidates:
# get the cosine similarity
cos_similarity = cosine_similarity(v,row)
# append the similarity to the list
similarity_l.append(cos_similarity)
# sort the similarity list and get the indices of the sorted list
sorted_ids = np.argsort(similarity_l)
# get the indices of the k most similar candidate vectors
k_idx = sorted_ids[-k:]
### END CODE HERE ###
return k_idx
# UNQ_C9 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# You do not have to input any code in this cell, but it is relevant to grading, so please do not change anything
# Test your implementation:
v = np.array([1, 0, 1])
candidates = np.array([[1, 0, 5], [-2, 5, 3], [2, 0, 1], [6, -9, 5], [9, 9, 9]])
print(candidates[nearest_neighbor(v, candidates, 3)])
###Output
[[9 9 9]
[1 0 5]
[2 0 1]]
###Markdown
**Expected Output**:`[[9 9 9] [1 0 5] [2 0 1]]` Test your translation and compute its accuracy**Exercise 06**:Complete the function `test_vocabulary` which takes in Englishembedding matrix $X$, French embedding matrix $Y$ and the $R$matrix and returns the accuracy of translations from $X$ to $Y$ by $R$.* Iterate over transformed English word embeddings and check if theclosest French word vector belongs to French word that is the actualtranslation.* Obtain an index of the closest French embedding by using`nearest_neighbor` (with argument `k=1`), and compare it to the indexof the English embedding you have just transformed.* Keep track of the number of times you get the correct translation.* Calculate accuracy as $$\text{accuracy}=\frac{\(\text{correct predictions})}{\(\text{total predictions})}$$
###Code
# UNQ_C10 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
def test_vocabulary(X, Y, R):
'''
Input:
X: a matrix where the columns are the English embeddings.
Y: a matrix where the columns correspong to the French embeddings.
R: the transform matrix which translates word embeddings from
English to French word vector space.
Output:
accuracy: for the English to French capitals
'''
### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ###
# The prediction is X times R
pred = np.dot(X,R)
# initialize the number correct to zero
num_correct = 0
# loop through each row in pred (each transformed embedding)
for i in range(len(pred)):
# get the index of the nearest neighbor of pred at row 'i'; also pass in the candidates in Y
pred_idx = nearest_neighbor(pred[i],Y)
# if the index of the nearest neighbor equals the row of i... \
if pred_idx == i:
# increment the number correct by 1.
num_correct += 1
# accuracy is the number correct divided by the number of rows in 'pred' (also number of rows in X)
accuracy = num_correct/len(pred)
### END CODE HERE ###
return accuracy
###Output
_____no_output_____
###Markdown
Let's see how is your translation mechanism working on the unseen data:
###Code
X_val, Y_val = get_matrices(en_fr_test, fr_embeddings_subset, en_embeddings_subset)
# UNQ_C11 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# You do not have to input any code in this cell, but it is relevant to grading, so please do not change anything
acc = test_vocabulary(X_val, Y_val, R_train) # this might take a minute or two
print(f"accuracy on test set is {acc:.3f}")
###Output
accuracy on test set is 0.557
###Markdown
**Expected Output**:```0.557```You managed to translate words from one language to another languagewithout ever seing them with almost 56% accuracy by using some basiclinear algebra and learning a mapping of words from one language to another! 3. LSH and document searchIn this part of the assignment, you will implement a more efficient versionof k-nearest neighbors using locality sensitive hashing.You will then apply this to document search.* Process the tweets and represent each tweet as a vector (represent adocument with a vector embedding).* Use locality sensitive hashing and k nearest neighbors to find tweetsthat are similar to a given tweet.
###Code
# get the positive and negative tweets
all_positive_tweets = twitter_samples.strings('positive_tweets.json')
all_negative_tweets = twitter_samples.strings('negative_tweets.json')
all_tweets = all_positive_tweets + all_negative_tweets
###Output
_____no_output_____
###Markdown
3.1 Getting the document embeddings Bag-of-words (BOW) document modelsText documents are sequences of words.* The ordering of words makes a difference. For example, sentences "Apple pie isbetter than pepperoni pizza." and "Pepperoni pizza is better than apple pie"have opposite meanings due to the word ordering.* However, for some applications, ignoring the order of words can allowus to train an efficient and still effective model.* This approach is called Bag-of-words document model. Document embeddings* Document embedding is created by summing up the embeddings of all wordsin the document.* If we don't know the embedding of some word, we can ignore that word. **Exercise 07**:Complete the `get_document_embedding()` function.* The function `get_document_embedding()` encodes entire document as a "document" embedding.* It takes in a docoument (as a string) and a dictionary, `en_embeddings`* It processes the document, and looks up the corresponding embedding of each word.* It then sums them up and returns the sum of all word vectors of that processed tweet. Hints You can handle missing words easier by using the `get()` method of the python dictionary instead of the bracket notation (i.e. "[ ]"). See more about it here The default value for missing word should be the zero vector. Numpy will broadcast simple 0 scalar into a vector of zeros during the summation. Alternatively, skip the addition if a word is not in the dictonary. You can use your `process_tweet()` function which allows you to process the tweet. The function just takes in a tweet and returns a list of words.
###Code
# UNQ_C12 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
def get_document_embedding(tweet, en_embeddings):
'''
Input:
- tweet: a string
- en_embeddings: a dictionary of word embeddings
Output:
- doc_embedding: sum of all word embeddings in the tweet
'''
doc_embedding = np.zeros(300)
### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ###
# process the document into a list of words (process the tweet)
processed_doc = process_tweet(tweet)
for word in processed_doc:
# add the word embedding to the running total for the document embedding
doc_embedding += en_embeddings.get(word,0)
### END CODE HERE ###
return doc_embedding
# UNQ_C13 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# You do not have to input any code in this cell, but it is relevant to grading, so please do not change anything
# testing your function
custom_tweet = "RT @Twitter @chapagain Hello There! Have a great day. :) #good #morning http://chapagain.com.np"
tweet_embedding = get_document_embedding(custom_tweet, en_embeddings_subset)
tweet_embedding[-5:]
###Output
_____no_output_____
###Markdown
**Expected output**:```array([-0.00268555, -0.15378189, -0.55761719, -0.07216644, -0.32263184])``` Exercise 08 Store all document vectors into a dictionaryNow, let's store all the tweet embeddings into a dictionary.Implement `get_document_vecs()`
###Code
# UNQ_C14 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
def get_document_vecs(all_docs, en_embeddings):
'''
Input:
- all_docs: list of strings - all tweets in our dataset.
- en_embeddings: dictionary with words as the keys and their embeddings as the values.
Output:
- document_vec_matrix: matrix of tweet embeddings.
- ind2Doc_dict: dictionary with indices of tweets in vecs as keys and their embeddings as the values.
'''
# the dictionary's key is an index (integer) that identifies a specific tweet
# the value is the document embedding for that document
ind2Doc_dict = {}
# this is list that will store the document vectors
document_vec_l = []
for i, doc in enumerate(all_docs):
### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ###
# get the document embedding of the tweet
doc_embedding = get_document_embedding(doc, en_embeddings)
# save the document embedding into the ind2Tweet dictionary at index i
ind2Doc_dict[i] = doc_embedding
# append the document embedding to the list of document vectors
document_vec_l.append(doc_embedding)
### END CODE HERE ###
# convert the list of document vectors into a 2D array (each row is a document vector)
document_vec_matrix = np.vstack(document_vec_l)
return document_vec_matrix, ind2Doc_dict
document_vecs, ind2Tweet = get_document_vecs(all_tweets, en_embeddings_subset)
# UNQ_C15 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# You do not have to input any code in this cell, but it is relevant to grading, so please do not change anything
print(f"length of dictionary {len(ind2Tweet)}")
print(f"shape of document_vecs {document_vecs.shape}")
###Output
length of dictionary 10000
shape of document_vecs (10000, 300)
###Markdown
Expected Output```length of dictionary 10000shape of document_vecs (10000, 300)``` 3.2 Looking up the tweetsNow you have a vector of dimension (m,d) where `m` is the number of tweets(10,000) and `d` is the dimension of the embeddings (300). Now youwill input a tweet, and use cosine similarity to see which tweet in ourcorpus is similar to your tweet.
###Code
my_tweet = 'i am sad'
process_tweet(my_tweet)
tweet_embedding = get_document_embedding(my_tweet, en_embeddings_subset)
# UNQ_C16 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# You do not have to input any code in this cell, but it is relevant to grading, so please do not change anything
# this gives you a similar tweet as your input.
# this implementation is vectorized...
idx = np.argmax(cosine_similarity(document_vecs, tweet_embedding))
print(all_tweets[idx])
###Output
@zoeeylim sad sad sad kid :( it's ok I help you watch the match HAHAHAHAHA
###Markdown
Expected Output```@zoeeylim sad sad sad kid :( it's ok I help you watch the match HAHAHAHAHA``` 3.3 Finding the most similar tweets with LSHYou will now implement locality sensitive hashing (LSH) to identify the most similar tweet.* Instead of looking at all 10,000 vectors, you can just search a subset to findits nearest neighbors.Let's say your data points are plotted like this: Figure 3 You can divide the vector space into regions and search within one region for nearest neighbors of a given vector. Figure 4
###Code
N_VECS = len(all_tweets) # This many vectors.
N_DIMS = len(ind2Tweet[1]) # Vector dimensionality.
print(f"Number of vectors is {N_VECS} and each has {N_DIMS} dimensions.")
###Output
Number of vectors is 10000 and each has 300 dimensions.
###Markdown
Choosing the number of planes* Each plane divides the space to $2$ parts.* So $n$ planes divide the space into $2^{n}$ hash buckets.* We want to organize 10,000 document vectors into buckets so that every bucket has about $~16$ vectors.* For that we need $\frac{10000}{16}=625$ buckets.* We're interested in $n$, number of planes, so that $2^{n}= 625$. Now, we can calculate $n=\log_{2}625 = 9.29 \approx 10$.
###Code
# The number of planes. We use log2(625) to have ~16 vectors/bucket.
N_PLANES = 10
# Number of times to repeat the hashing to improve the search.
N_UNIVERSES = 25
###Output
_____no_output_____
###Markdown
3.4 Getting the hash number for a vectorFor each vector, we need to get a unique number associated to that vector in order to assign it to a "hash bucket". Hyperlanes in vector spaces* In $3$-dimensional vector space, the hyperplane is a regular plane. In $2$ dimensional vector space, the hyperplane is a line.* Generally, the hyperplane is subspace which has dimension $1$ lower than the original vector space has.* A hyperplane is uniquely defined by its normal vector.* Normal vector $n$ of the plane $\pi$ is the vector to which all vectors in the plane $\pi$ are orthogonal (perpendicular in $3$ dimensional case). Using Hyperplanes to split the vector spaceWe can use a hyperplane to split the vector space into $2$ parts.* All vectors whose dot product with a plane's normal vector is positive are on one side of the plane.* All vectors whose dot product with the plane's normal vector is negative are on the other side of the plane. Encoding hash buckets* For a vector, we can take its dot product with all the planes, then encode this information to assign the vector to a single hash bucket.* When the vector is pointing to the opposite side of the hyperplane than normal, encode it by 0.* Otherwise, if the vector is on the same side as the normal vector, encode it by 1.* If you calculate the dot product with each plane in the same order for every vector, you've encoded each vector's unique hash ID as a binary number, like [0, 1, 1, ... 0]. Exercise 09: Implementing hash bucketsWe've initialized hash table `hashes` for you. It is list of `N_UNIVERSES` matrices, each describes its own hash table. Each matrix has `N_DIMS` rows and `N_PLANES` columns. Every column of that matrix is a `N_DIMS`-dimensional normal vector for each of `N_PLANES` hyperplanes which are used for creating buckets of the particular hash table.*Exercise*: Your task is to complete the function `hash_value_of_vector` which places vector `v` in the correct hash bucket.* First multiply your vector `v`, with a corresponding plane. This will give you a vector of dimension $(1,\text{N_planes})$.* You will then convert every element in that vector to 0 or 1.* You create a hash vector by doing the following: if the element is negative, it becomes a 0, otherwise you change it to a 1.* You then compute the unique number for the vector by iterating over `N_PLANES`* Then you multiply $2^i$ times the corresponding bit (0 or 1).* You will then store that sum in the variable `hash_value`.**Intructions:** Create a hash for the vector in the function below.Use this formula:$$ hash = \sum_{i=0}^{N-1} \left( 2^{i} \times h_{i} \right) $$ Create the sets of planes* Create multiple (25) sets of planes (the planes that divide up the region).* You can think of these as 25 separate ways of dividing up the vector space with a different set of planes.* Each element of this list contains a matrix with 300 rows (the word vector have 300 dimensions), and 10 columns (there are 10 planes in each "universe").
###Code
np.random.seed(0)
planes_l = [np.random.normal(size=(N_DIMS, N_PLANES))
for _ in range(N_UNIVERSES)]
###Output
_____no_output_____
###Markdown
Hints numpy.squeeze() removes unused dimensions from an array; for instance, it converts a (10,1) 2D array into a (10,) 1D array
###Code
# UNQ_C17 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
def hash_value_of_vector(v, planes):
"""Create a hash for a vector; hash_id says which random hash to use.
Input:
- v: vector of tweet. It's dimension is (1, N_DIMS)
- planes: matrix of dimension (N_DIMS, N_PLANES) - the set of planes that divide up the region
Output:
- res: a number which is used as a hash for your vector
"""
### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ###
# for the set of planes,
# calculate the dot product between the vector and the matrix containing the planes
# remember that planes has shape (300, 10)
# The dot product will have the shape (1,10)
dot_product = np.dot(v,planes)
# get the sign of the dot product (1,10) shaped vector
sign_of_dot_product = np.sign(dot_product)
# set h to be false (eqivalent to 0 when used in operations) if the sign is negative,
# and true (equivalent to 1) if the sign is positive (1,10) shaped vector
h = sign_of_dot_product>=0
# remove extra un-used dimensions (convert this from a 2D to a 1D array)
h = np.squeeze(h)
# initialize the hash value to 0
hash_value = 0
n_planes = planes.shape[1]
for i in range(n_planes):
# increment the hash value by 2^i * h_i
hash_value += np.power(2,i) * h[i]
### END CODE HERE ###
# cast hash_value as an integer
hash_value = int(hash_value)
return hash_value
# UNQ_C18 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# You do not have to input any code in this cell, but it is relevant to grading, so please do not change anything
np.random.seed(0)
idx = 0
planes = planes_l[idx] # get one 'universe' of planes to test the function
vec = np.random.rand(1, 300)
print(f" The hash value for this vector,",
f"and the set of planes at index {idx},",
f"is {hash_value_of_vector(vec, planes)}")
###Output
The hash value for this vector, and the set of planes at index 0, is 768
###Markdown
Expected Output```The hash value for this vector, and the set of planes at index 0, is 768``` 3.5 Creating a hash table Exercise 10Given that you have a unique number for each vector (or tweet), You now want to create a hash table. You need a hash table, so that given a hash_id, you can quickly look up the corresponding vectors. This allows you to reduce your search by a significant amount of time. We have given you the `make_hash_table` function, which maps the tweet vectors to a bucket and stores the vector there. It returns the `hash_table` and the `id_table`. The `id_table` allows you know which vector in a certain bucket corresponds to what tweet. Hints a dictionary comprehension, similar to a list comprehension, looks like this: `{i:0 for i in range(10)}`, where the key is 'i' and the value is zero for all key-value pairs.
###Code
# UNQ_C19 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# This is the code used to create a hash table: feel free to read over it
def make_hash_table(vecs, planes):
"""
Input:
- vecs: list of vectors to be hashed.
- planes: the matrix of planes in a single "universe", with shape (embedding dimensions, number of planes).
Output:
- hash_table: dictionary - keys are hashes, values are lists of vectors (hash buckets)
- id_table: dictionary - keys are hashes, values are list of vectors id's
(it's used to know which tweet corresponds to the hashed vector)
"""
### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ###
# number of planes is the number of columns in the planes matrix
num_of_planes = planes.shape[1]
# number of buckets is 2^(number of planes)
num_buckets = np.power(2,num_of_planes)
# create the hash table as a dictionary.
# Keys are integers (0,1,2.. number of buckets)
# Values are empty lists
hash_table = {i:[] for i in range(num_buckets)}
# create the id table as a dictionary.
# Keys are integers (0,1,2... number of buckets)
# Values are empty lists
id_table = {i:[] for i in range(num_buckets)}
# for each vector in 'vecs'
for i, v in enumerate(vecs):
# calculate the hash value for the vector
h = hash_value_of_vector(v, planes)
# store the vector into hash_table at key h,
# by appending the vector v to the list at key h
hash_table[h].append(v)
# store the vector's index 'i' (each document is given a unique integer 0,1,2...)
# the key is the h, and the 'i' is appended to the list at key h
id_table[h].append(i)
### END CODE HERE ###
return hash_table, id_table
# UNQ_C20 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# You do not have to input any code in this cell, but it is relevant to grading, so please do not change anything
np.random.seed(0)
planes = planes_l[0] # get one 'universe' of planes to test the function
vec = np.random.rand(1, 300)
tmp_hash_table, tmp_id_table = make_hash_table(document_vecs, planes)
print(f"The hash table at key 0 has {len(tmp_hash_table[0])} document vectors")
print(f"The id table at key 0 has {len(tmp_id_table[0])}")
print(f"The first 5 document indices stored at key 0 of are {tmp_id_table[0][0:5]}")
###Output
The hash table at key 0 has 3 document vectors
The id table at key 0 has 3
The first 5 document indices stored at key 0 of are [3276, 3281, 3282]
###Markdown
Expected output```The hash table at key 0 has 3 document vectorsThe id table at key 0 has 3The first 5 document indices stored at key 0 of are [3276, 3281, 3282]``` 3.6 Creating all hash tablesYou can now hash your vectors and store them in a hash table thatwould allow you to quickly look up and search for similar vectors.Run the cell below to create the hashes. By doing so, you end up havingseveral tables which have all the vectors. Given a vector, you thenidentify the buckets in all the tables. You can then iterate over thebuckets and consider much fewer vectors. The more buckets you use, themore accurate your lookup will be, but also the longer it will take.
###Code
# Creating the hashtables
hash_tables = []
id_tables = []
for universe_id in range(N_UNIVERSES): # there are 25 hashes
print('working on hash universe #:', universe_id)
planes = planes_l[universe_id]
hash_table, id_table = make_hash_table(document_vecs, planes)
hash_tables.append(hash_table)
id_tables.append(id_table)
###Output
working on hash universe #: 0
working on hash universe #: 1
working on hash universe #: 2
working on hash universe #: 3
working on hash universe #: 4
working on hash universe #: 5
working on hash universe #: 6
working on hash universe #: 7
working on hash universe #: 8
working on hash universe #: 9
working on hash universe #: 10
working on hash universe #: 11
working on hash universe #: 12
working on hash universe #: 13
working on hash universe #: 14
working on hash universe #: 15
working on hash universe #: 16
working on hash universe #: 17
working on hash universe #: 18
working on hash universe #: 19
working on hash universe #: 20
working on hash universe #: 21
working on hash universe #: 22
working on hash universe #: 23
working on hash universe #: 24
###Markdown
Approximate K-NN Exercise 11Implement approximate K nearest neighbors using locality sensitive hashing,to search for documents that are similar to a given document at theindex `doc_id`. Inputs* `doc_id` is the index into the document list `all_tweets`.* `v` is the document vector for the tweet in `all_tweets` at index `doc_id`.* `planes_l` is the list of planes (the global variable created earlier).* `k` is the number of nearest neighbors to search for.* `num_universes_to_use`: to save time, we can use fewer than the totalnumber of available universes. By default, it's set to `N_UNIVERSES`,which is $25$ for this assignment.The `approximate_knn` function finds a subset of candidate vectors thatare in the same "hash bucket" as the input vector 'v'. Then it performsthe usual k-nearest neighbors search on this subset (instead of searchingthrough all 10,000 tweets). Hints There are many dictionaries used in this function. Try to print out planes_l, hash_tables, id_tables to understand how they are structured, what the keys represent, and what the values contain. To remove an item from a list, use `.remove()` To append to a list, use `.append()` To add to a set, use `.add()`
###Code
# UNQ_C21 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# This is the code used to do the fast nearest neighbor search. Feel free to go over it
def approximate_knn(doc_id, v, planes_l, k=1, num_universes_to_use=N_UNIVERSES):
"""Search for k-NN using hashes."""
assert num_universes_to_use <= N_UNIVERSES
# Vectors that will be checked as possible nearest neighbor
vecs_to_consider_l = list()
# list of document IDs
ids_to_consider_l = list()
# create a set for ids to consider, for faster checking if a document ID already exists in the set
ids_to_consider_set = set()
# loop through the universes of planes
for universe_id in range(num_universes_to_use):
# get the set of planes from the planes_l list, for this particular universe_id
planes = planes_l[universe_id]
# get the hash value of the vector for this set of planes
hash_value = hash_value_of_vector(v, planes)
# get the hash table for this particular universe_id
hash_table = hash_tables[universe_id]
# get the list of document vectors for this hash table, where the key is the hash_value
document_vectors_l = hash_table[hash_value]
# get the id_table for this particular universe_id
id_table = id_tables[universe_id]
# get the subset of documents to consider as nearest neighbors from this id_table dictionary
new_ids_to_consider = id_table[hash_value]
### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ###
# remove the id of the document that we're searching
if doc_id in new_ids_to_consider:
new_ids_to_consider.remove(doc_id)
print(f"removed doc_id {doc_id} of input vector from new_ids_to_search")
# loop through the subset of document vectors to consider
for i, new_id in enumerate(new_ids_to_consider):
# if the document ID is not yet in the set ids_to_consider...
if new_id not in ids_to_consider_set:
# access document_vectors_l list at index i to get the embedding
# then append it to the list of vectors to consider as possible nearest neighbors
document_vector_at_i = document_vectors_l[i]
# append the new_id (the index for the document) to the list of ids to consider
ids_to_consider_l.append(document_vector_at_i)
# also add the new_id to the set of ids to consider
# (use this to check if new_id is not already in the IDs to consider)
ids_to_consider_set.add(new_id)
### END CODE HERE ###
# Now run k-NN on the smaller set of vecs-to-consider.
print("Fast considering %d vecs" % len(vecs_to_consider_l))
# convert the vecs to consider set to a list, then to a numpy array
vecs_to_consider_arr = np.array(vecs_to_consider_l)
# call nearest neighbors on the reduced list of candidate vectors
nearest_neighbor_idx_l = nearest_neighbor(v, vecs_to_consider_arr, k=k)
# Use the nearest neighbor index list as indices into the ids to consider
# create a list of nearest neighbors by the document ids
nearest_neighbor_ids = [ids_to_consider_l[idx]
for idx in nearest_neighbor_idx_l]
return nearest_neighbor_ids
#document_vecs, ind2Tweet
doc_id = 0
doc_to_search = all_tweets[doc_id]
vec_to_search = document_vecs[doc_id]
# UNQ_C22 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# You do not have to input any code in this cell, but it is relevant to grading, so please do not change anything
# Sample
nearest_neighbor_ids = approximate_knn(
doc_id, vec_to_search, planes_l, k=3, num_universes_to_use=5)
print(f"Nearest neighbors for document {doc_id}")
print(f"Document contents: {doc_to_search}")
print("")
for neighbor_id in nearest_neighbor_ids:
print(f"Nearest neighbor at document id {neighbor_id}")
print(f"document contents: {all_tweets[neighbor_id]}")
###Output
Nearest neighbors for document 0
Document contents: #FollowFriday @France_Inte @PKuchly57 @Milipol_Paris for being top engaged members in my community this week :)
|
4. Deep Learning/politifact_binarized_augmented/Fine_Tuning_RoBERTa_for_Truth_Classification.ipynb | ###Markdown
Install Transformers Library
###Code
!pip install transformers==3.0.2
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
import transformers
from transformers import AutoModel, BertTokenizerFast, RobertaTokenizer
# specify GPU
device = torch.device("cuda")
###Output
_____no_output_____
###Markdown
Load Dataset
###Code
from google.colab import drive
drive.mount('/content/gdrive')
df=pd.read_csv('gdrive/My Drive/Licenta/Data/politifact_clean_binarized.csv')
# df=pd.read_csv('gdrive/My Drive/Licenta/Data/mafiascum_label_text.csv')
# df=pd.read_csv('gdrive/My Drive/Licenta/Data/mafiascum_label_words.csv')
df.head()
print(df[:50])
df.shape
# check class distribution
df['veracity'].value_counts(normalize = True)
###Output
_____no_output_____
###Markdown
Split train dataset into train, validation and test sets
###Code
train_text, temp_text, train_labels, temp_labels = train_test_split(df['statement'], df['veracity'],
random_state=2018,
test_size=0.3,
stratify=df['veracity'])
# we will use temp_text and temp_labels to create validation and test set
val_text, test_text, val_labels, test_labels = train_test_split(temp_text, temp_labels,
random_state=2018,
test_size=0.5,
stratify=temp_labels)
###Output
_____no_output_____
###Markdown
Import BERT Model and BERT Tokenizer
###Code
# import BERT-base pretrained model
bert = AutoModel.from_pretrained('roberta-base')
# Load the BERT tokenizer
tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
# sample data
text = ["this is a bert model tutorial", "we will fine-tune a bert model"]
# encode text
sent_id = tokenizer.batch_encode_plus(text, padding=True, return_token_type_ids=False)
# output
print(sent_id)
###Output
{'input_ids': [[0, 9226, 16, 10, 741, 2399, 1421, 35950, 2, 1, 1, 1], [0, 1694, 40, 2051, 12, 90, 4438, 10, 741, 2399, 1421, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]}
###Markdown
Tokenization
###Code
# get length of all the messages in the train set
seq_len = [len(i.split()) for i in train_text]
pd.Series(seq_len).hist(bins = 30)
max_seq_len = 50
# tokenize and encode sequences in the training set
tokens_train = tokenizer.batch_encode_plus(
train_text.tolist(),
max_length = max_seq_len,
pad_to_max_length=True,
truncation=True,
return_token_type_ids=False
)
# tokenize and encode sequences in the validation set
tokens_val = tokenizer.batch_encode_plus(
val_text.tolist(),
max_length = max_seq_len,
pad_to_max_length=True,
truncation=True,
return_token_type_ids=False
)
# tokenize and encode sequences in the test set
tokens_test = tokenizer.batch_encode_plus(
test_text.tolist(),
max_length = max_seq_len,
pad_to_max_length=True,
truncation=True,
return_token_type_ids=False
)
###Output
_____no_output_____
###Markdown
Convert Integer Sequences to Tensors
###Code
# for train set
train_seq = torch.tensor(tokens_train['input_ids'])
train_mask = torch.tensor(tokens_train['attention_mask'])
train_y = torch.tensor(train_labels.tolist())
# for validation set
val_seq = torch.tensor(tokens_val['input_ids'])
val_mask = torch.tensor(tokens_val['attention_mask'])
val_y = torch.tensor(val_labels.tolist())
# for test set
test_seq = torch.tensor(tokens_test['input_ids'])
test_mask = torch.tensor(tokens_test['attention_mask'])
test_y = torch.tensor(test_labels.tolist())
###Output
_____no_output_____
###Markdown
Create DataLoaders
###Code
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
#define a batch size
batch_size = 48
# wrap tensors
train_data = TensorDataset(train_seq, train_mask, train_y)
# sampler for sampling the data during training
train_sampler = RandomSampler(train_data)
# dataLoader for train set
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=batch_size)
# wrap tensors
val_data = TensorDataset(val_seq, val_mask, val_y)
# sampler for sampling the data during training
val_sampler = SequentialSampler(val_data)
# dataLoader for validation set
val_dataloader = DataLoader(val_data, sampler = val_sampler, batch_size=batch_size)
###Output
_____no_output_____
###Markdown
Freeze BERT Parameters
###Code
# freeze all the parameters
for param in bert.parameters():
param.requires_grad = False
###Output
_____no_output_____
###Markdown
Define Model Architecture
###Code
class BERT_Arch(nn.Module):
def __init__(self, bert):
super(BERT_Arch, self).__init__()
self.bert = bert
# dropout layer
self.dropout = nn.Dropout(0.1)
# relu activation function
self.relu = nn.ReLU()
# dense layer 1
self.fc1 = nn.Linear(768,512)
# dense layer 2 (Output layer)
self.fc2 = nn.Linear(512,2)
#softmax activation function
self.softmax = nn.LogSoftmax(dim=1)
#define the forward pass
def forward(self, sent_id, mask):
#pass the inputs to the model
_, cls_hs = self.bert(sent_id, attention_mask=mask)
x = self.fc1(cls_hs)
x = self.relu(x)
x = self.dropout(x)
# output layer
x = self.fc2(x)
# apply softmax activation
x = self.softmax(x)
return x
# pass the pre-trained BERT to our define architecture
model = BERT_Arch(bert)
# push the model to GPU
model = model.to(device)
# optimizer from hugging face transformers
from transformers import AdamW
# define the optimizer
optimizer = AdamW(model.parameters(), lr = 4e-5)
###Output
_____no_output_____
###Markdown
Find Class Weights
###Code
from sklearn.utils.class_weight import compute_class_weight
#compute the class weights
class_wts = compute_class_weight('balanced', np.unique(train_labels), train_labels)
print(class_wts)
# convert class weights to tensor
weights= torch.tensor(class_wts,dtype=torch.float)
weights = weights.to(device)
# loss function
cross_entropy = nn.NLLLoss(weight=weights)
# number of training epochs
epochs = 30
###Output
_____no_output_____
###Markdown
Fine-Tune BERT
###Code
# function to train the model
def train():
model.train()
total_loss, total_accuracy = 0, 0
# empty list to save model predictions
total_preds=[]
# iterate over batches
for step,batch in enumerate(train_dataloader):
# progress update after every 50 batches.
if step % 50 == 0 and not step == 0:
print(' Batch {:>5,} of {:>5,}.'.format(step, len(train_dataloader)))
# push the batch to gpu
batch = [r.to(device) for r in batch]
sent_id, mask, labels = batch
# clear previously calculated gradients
model.zero_grad()
# get model predictions for the current batch
preds = model(sent_id, mask)
# compute the loss between actual and predicted values
loss = cross_entropy(preds, labels)
# add on to the total loss
total_loss = total_loss + loss.item()
# backward pass to calculate the gradients
loss.backward()
# clip the the gradients to 1.0. It helps in preventing the exploding gradient problem
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
# update parameters
optimizer.step()
# model predictions are stored on GPU. So, push it to CPU
preds=preds.detach().cpu().numpy()
# append the model predictions
total_preds.append(preds)
# compute the training loss of the epoch
avg_loss = total_loss / len(train_dataloader)
# predictions are in the form of (no. of batches, size of batch, no. of classes).
# reshape the predictions in form of (number of samples, no. of classes)
total_preds = np.concatenate(total_preds, axis=0)
#returns the loss and predictions
return avg_loss, total_preds
# function for evaluating the model
def evaluate():
print("\nEvaluating...")
# deactivate dropout layers
model.eval()
total_loss, total_accuracy = 0, 0
# empty list to save the model predictions
total_preds = []
# iterate over batches
for step,batch in enumerate(val_dataloader):
# Progress update every 50 batches.
if step % 50 == 0 and not step == 0:
# Calculate elapsed time in minutes.
# elapsed = format_time(time.time() - t0)
# Report progress.
print(' Batch {:>5,} of {:>5,}.'.format(step, len(val_dataloader)))
# push the batch to gpu
batch = [t.to(device) for t in batch]
sent_id, mask, labels = batch
# deactivate autograd
with torch.no_grad():
# model predictions
preds = model(sent_id, mask)
# compute the validation loss between actual and predicted values
loss = cross_entropy(preds,labels)
total_loss = total_loss + loss.item()
preds = preds.detach().cpu().numpy()
total_preds.append(preds)
# compute the validation loss of the epoch
avg_loss = total_loss / len(val_dataloader)
# reshape the predictions in form of (number of samples, no. of classes)
total_preds = np.concatenate(total_preds, axis=0)
return avg_loss, total_preds
###Output
_____no_output_____
###Markdown
Start Model Training
###Code
# set initial loss to infinite
best_valid_loss = float('inf')
# empty lists to store training and validation loss of each epoch
train_losses=[]
valid_losses=[]
#for each epoch
for epoch in range(epochs):
print('\n Epoch {:} / {:}'.format(epoch + 1, epochs))
#train model
train_loss, _ = train()
#evaluate model
valid_loss, _ = evaluate()
#save the best model
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
torch.save(model.state_dict(), 'saved_weights.pt')
# append training and validation loss
train_losses.append(train_loss)
valid_losses.append(valid_loss)
print(f'\nTraining Loss: {train_loss:.3f}')
print(f'Validation Loss: {valid_loss:.3f}')
###Output
Epoch 1 / 30
Batch 50 of 164.
Batch 100 of 164.
Batch 150 of 164.
Evaluating...
Training Loss: 0.694
Validation Loss: 0.693
Epoch 2 / 30
Batch 50 of 164.
Batch 100 of 164.
Batch 150 of 164.
Evaluating...
Training Loss: 0.693
Validation Loss: 0.693
Epoch 3 / 30
Batch 50 of 164.
Batch 100 of 164.
Batch 150 of 164.
Evaluating...
Training Loss: 0.693
Validation Loss: 0.692
Epoch 4 / 30
Batch 50 of 164.
Batch 100 of 164.
Batch 150 of 164.
Evaluating...
Training Loss: 0.693
Validation Loss: 0.691
Epoch 5 / 30
Batch 50 of 164.
Batch 100 of 164.
Batch 150 of 164.
Evaluating...
Training Loss: 0.692
Validation Loss: 0.691
Epoch 6 / 30
Batch 50 of 164.
Batch 100 of 164.
Batch 150 of 164.
Evaluating...
Training Loss: 0.691
Validation Loss: 0.692
Epoch 7 / 30
Batch 50 of 164.
Batch 100 of 164.
Batch 150 of 164.
Evaluating...
Training Loss: 0.691
Validation Loss: 0.691
Epoch 8 / 30
Batch 50 of 164.
Batch 100 of 164.
Batch 150 of 164.
Evaluating...
Training Loss: 0.691
Validation Loss: 0.692
Epoch 9 / 30
Batch 50 of 164.
Batch 100 of 164.
Batch 150 of 164.
Evaluating...
Training Loss: 0.690
Validation Loss: 0.689
Epoch 10 / 30
Batch 50 of 164.
Batch 100 of 164.
Batch 150 of 164.
Evaluating...
Training Loss: 0.690
Validation Loss: 0.689
Epoch 11 / 30
Batch 50 of 164.
Batch 100 of 164.
Batch 150 of 164.
Evaluating...
Training Loss: 0.689
Validation Loss: 0.693
Epoch 12 / 30
Batch 50 of 164.
Batch 100 of 164.
Batch 150 of 164.
Evaluating...
Training Loss: 0.689
Validation Loss: 0.688
Epoch 13 / 30
Batch 50 of 164.
Batch 100 of 164.
Batch 150 of 164.
Evaluating...
Training Loss: 0.688
Validation Loss: 0.687
Epoch 14 / 30
Batch 50 of 164.
Batch 100 of 164.
Batch 150 of 164.
Evaluating...
Training Loss: 0.688
Validation Loss: 0.687
Epoch 15 / 30
Batch 50 of 164.
Batch 100 of 164.
Batch 150 of 164.
Evaluating...
Training Loss: 0.687
Validation Loss: 0.687
Epoch 16 / 30
Batch 50 of 164.
Batch 100 of 164.
Batch 150 of 164.
Evaluating...
Training Loss: 0.686
Validation Loss: 0.686
Epoch 17 / 30
Batch 50 of 164.
Batch 100 of 164.
Batch 150 of 164.
Evaluating...
Training Loss: 0.684
Validation Loss: 0.688
Epoch 18 / 30
Batch 50 of 164.
Batch 100 of 164.
Batch 150 of 164.
Evaluating...
Training Loss: 0.685
Validation Loss: 0.685
Epoch 19 / 30
Batch 50 of 164.
Batch 100 of 164.
Batch 150 of 164.
Evaluating...
Training Loss: 0.684
Validation Loss: 0.691
Epoch 20 / 30
Batch 50 of 164.
Batch 100 of 164.
Batch 150 of 164.
Evaluating...
Training Loss: 0.684
Validation Loss: 0.683
Epoch 21 / 30
Batch 50 of 164.
Batch 100 of 164.
Batch 150 of 164.
Evaluating...
Training Loss: 0.682
Validation Loss: 0.682
Epoch 22 / 30
Batch 50 of 164.
Batch 100 of 164.
Batch 150 of 164.
Evaluating...
Training Loss: 0.683
Validation Loss: 0.682
Epoch 23 / 30
Batch 50 of 164.
Batch 100 of 164.
Batch 150 of 164.
Evaluating...
Training Loss: 0.682
Validation Loss: 0.682
Epoch 24 / 30
Batch 50 of 164.
Batch 100 of 164.
Batch 150 of 164.
Evaluating...
Training Loss: 0.680
Validation Loss: 0.680
Epoch 25 / 30
Batch 50 of 164.
Batch 100 of 164.
Batch 150 of 164.
Evaluating...
Training Loss: 0.681
Validation Loss: 0.680
Epoch 26 / 30
Batch 50 of 164.
Batch 100 of 164.
Batch 150 of 164.
Evaluating...
Training Loss: 0.680
Validation Loss: 0.680
Epoch 27 / 30
Batch 50 of 164.
Batch 100 of 164.
Batch 150 of 164.
Evaluating...
Training Loss: 0.679
Validation Loss: 0.679
Epoch 28 / 30
Batch 50 of 164.
Batch 100 of 164.
Batch 150 of 164.
Evaluating...
Training Loss: 0.678
Validation Loss: 0.678
Epoch 29 / 30
Batch 50 of 164.
Batch 100 of 164.
Batch 150 of 164.
Evaluating...
Training Loss: 0.678
Validation Loss: 0.683
Epoch 30 / 30
Batch 50 of 164.
Batch 100 of 164.
Batch 150 of 164.
Evaluating...
Training Loss: 0.677
Validation Loss: 0.677
###Markdown
Load Saved Model
###Code
#load weights of best model
path = 'saved_weights.pt'
model.load_state_dict(torch.load(path))
###Output
_____no_output_____
###Markdown
Get Predictions for Test Data
###Code
# get predictions for test data
with torch.no_grad():
preds = model(test_seq.to(device), test_mask.to(device))
preds = preds.detach().cpu().numpy()
# model's performance
preds = np.argmax(preds, axis = 1)
print(classification_report(test_y, preds))
# confusion matrix
pd.crosstab(test_y, preds)
###Output
_____no_output_____ |
module2-random-forests/DS1_Tree_Ensembles_Assignment.ipynb | ###Markdown
###Code
!pip install kaggle
from google.colab import drive
drive.mount('/content/drive')
%env KAGGLE_CONFIG_DIR=/content/drive/My Drive/
# You also have to join the Titanic competition to have access to the data
!kaggle competitions download -c ds1-tree-ensembles
#!unzip train_features.csv.zip
#!unzip test_features.csv.zip
!pip install category_encoders
# Generic imports
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
X_train = pd.read_csv("train_features.csv")
y_train = pd.read_csv("train_labels.csv")['charged_off']
print(X_train.shape)
print(y_train.shape)
pd.set_option('display.max_columns', None) # Unlimited columns
pd.set_option('display.max_rows', None) # Unlimited rows
X_train.isnull().sum()
X_train.head()
"""
member_id 37745
emp_title 3565
emp_length 3277
url 37745
desc 37745
dti 92
mths_since_last_delinq 20881
mths_since_last_record 32170
revol_util 53
mths_since_last_major_derog 28640
annual_inc_joint 33007
dti_joint 33007
mths_since_rcnt_il 1234
il_util 6255
all_util 13
avg_cur_bal 3
bc_open_to_buy 643
bc_util 667
mo_sin_old_il_acct 1234
mths_since_recent_bc 597
mths_since_recent_bc_dlq 29995
mths_since_recent_inq 3146
mths_since_recent_revol_delinq 26574
num_tl_120dpd_2m 1010
pct_tl_nvr_dlq 1
percent_bc_gt_75 643
revol_bal_joint 33007
sec_app_earliest_cr_line 33007
sec_app_inq_last_6mths 33007
sec_app_mort_acc 33007
sec_app_open_acc 33007
sec_app_revol_util 33106
sec_app_open_act_il 33007
sec_app_num_rev_accts 33007
sec_app_chargeoff_within_12_mths 33007
sec_app_collections_12_mths_ex_med 33007
sec_app_mths_since_last_major_derog 36077
"""
# Feature Engineering
def feature_engineering(df_input):
df = df_input.copy()
df.drop(columns=['id',
'member_id',
'url',
'desc',
'mths_since_last_delinq',
'mths_since_last_record',
'mths_since_last_major_derog',
'annual_inc_joint',
'dti_joint',
'il_util',
'mths_since_recent_bc_dlq',
'mths_since_recent_revol_delinq',
'revol_bal_joint',
'sec_app_earliest_cr_line',
'sec_app_inq_last_6mths',
'sec_app_mort_acc',
'sec_app_open_acc',
'sec_app_revol_util',
'sec_app_open_act_il',
'sec_app_num_rev_accts',
'sec_app_chargeoff_within_12_mths',
'sec_app_collections_12_mths_ex_med',
'sec_app_mths_since_last_major_derog',
'num_tl_120dpd_2m', # No variance
'emp_title', # 16270 unique values
#'zip_code', # 855 unique values
#'earliest_cr_line', # 596 unique values
], inplace=True)
# term - Convert to int
def term_to_int(term_str):
return int(term_str.replace(" months",""))
df['term'] = df['term'].apply(term_to_int)
# int_rate - Convert to float
def int_rate_to_float(int_rate_str):
return float(int_rate_str.replace("%",""))
df['int_rate'] = df['int_rate'].apply(int_rate_to_float)
# emp_length - Fill NA as Unknown and use encoding
df['emp_length'].fillna("Unknown", inplace=True)
# dti - Fill NA as 0.00
df['dti'].fillna(0.00, inplace=True)
# revol_util - Convert to float
df['revol_util'].fillna("0.00%", inplace=True)
def revol_util_to_float(revol_util_str):
return float(revol_util_str.replace("%",""))
df['revol_util'] = df['revol_util'].apply(revol_util_to_float)
# mths_since_rcnt_il - Fill NA with mean
df['mths_since_rcnt_il'].fillna(df['mths_since_rcnt_il'].mean(),
inplace=True)
# all_util - Fill NA with mean
df['all_util'].fillna(df['all_util'].mean(), inplace=True)
# bc_open_to_buy - Fill NA with mean
df['bc_open_to_buy'].fillna(df['bc_open_to_buy'].median(),
inplace=True)
# bc_util - Fill NA with mean
df['bc_util'].fillna(df['bc_util'].mean(), inplace=True)
# mo_sin_old_il_acct - Fill NA with mean
df['mo_sin_old_il_acct'].fillna(df['mo_sin_old_il_acct'].mean(),
inplace=True)
# mths_since_recent_bc - Fill NA with mean
df['mths_since_recent_bc'].fillna(df['mths_since_recent_bc'].mean(),
inplace=True)
# mths_since_recent_inq - Fill NA with mean
df['mths_since_recent_inq'].fillna(df['mths_since_recent_inq'].mean(),
inplace=True)
# avg_cur_bal - Fill NA with mean
df['avg_cur_bal'].fillna(df['avg_cur_bal'].mean(),
inplace=True)
# pct_tl_nvr_dlq - Fill NA with mean
df['pct_tl_nvr_dlq'].fillna(df['pct_tl_nvr_dlq'].mean(),
inplace=True)
# percent_bc_gt_75 - Fill NA with mean
df['percent_bc_gt_75'].fillna(df['percent_bc_gt_75'].mean(),
inplace=True)
return df
X_train = feature_engineering(X_train)
import category_encoders as ce
from sklearn.model_selection import cross_val_predict
from sklearn.pipeline import make_pipeline
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
"""pipeline = make_pipeline(
ce.BinaryEncoder(),
DecisionTreeClassifier(max_depth=5)
)"""
pipeline = make_pipeline(
ce.BinaryEncoder(),
RandomForestClassifier(
n_estimators=100,
class_weight='balanced',
min_samples_leaf=0.005,
oob_score=True,
n_jobs=-1)
)
pipeline.fit(X_train, y_train)
# Out-of-Bag estimated score
from sklearn.metrics import roc_auc_score
y_pred_proba = pipeline.named_steps['randomforestclassifier'].oob_decision_function_[:, 1]
print('ROC AUC, Out-of-Bag estimate:', roc_auc_score(y_train, y_pred_proba))
X_test = pd.read_csv("test_features.csv")
X_test = feature_engineering(X_test)
# Use GridSearch CV for hyper parameter tuning
from sklearn.model_selection import GridSearchCV
param_grid = {
'randomforestclassifier__n_estimators': [400, 500, 600],
'randomforestclassifier__min_samples_leaf': [0.001, 0.002, 0.005]
}
gridsearch = GridSearchCV(pipeline, param_grid=param_grid, cv=3,
scoring='roc_auc', verbose=10)
gridsearch.fit(X_train, y_train)
# Best cross validation score
print('Cross Validation Score:', gridsearch.best_score_)
# Best parameters which resulted in the best score
print('Best Parameters:', gridsearch.best_params_)
sample_submission = pd.read_csv('sample_submission.csv')
submission = sample_submission.copy()
submission['charged_off'] = gridsearch.predict_proba(X_test)[:, 1]
submission.to_csv('submission-007.csv', index=False)
!pip install eli5
import eli5
from eli5.sklearn import PermutationImportance
encoder = ce.BinaryEncoder()
X_train_transformed = encoder.fit_transform(X_train)
model = RandomForestClassifier(
n_estimators=600,
class_weight='balanced',
min_samples_leaf=0.001,
n_jobs=-1)
model.fit(X_train_transformed, y_train)
permuter = PermutationImportance(model, scoring='roc_auc', n_iter=3, cv='prefit')
permuter.fit(X_train_transformed, y_train)
eli5.show_weights(permuter, top=None, feature_names=X_train_transformed.columns.tolist())
###Output
_____no_output_____ |
10. Kernel_Methods/Kernel_Methods_Subramani_Balaji_798924.ipynb | ###Markdown
Name : **Balaji Subramani** Matriculation Number : **798924** Kernel Methods (Primal vs. Dual View)In this lab we explore how kernel methods can be used on structured data as long as a kernel function can be defined on pairs of objects of data. Specifically, we will use the dynamic time-warping (DTW) kernel to perform learning on sequences. We then proceed to train a kernelized SVM with the DTW kernel on a sequence data set.
###Code
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
import warnings
warnings.filterwarnings('ignore')
###Output
_____no_output_____
###Markdown
1. DTW KernelGiven a metric $d: X \times X \rightarrow \mathbb{R}_{\geq 0}$ on the input space $X$, the family of *DTW Kernels* is given as:$$ k_{\text{DTW}}(x, x') = e^{- \lambda d_{\text{DTW}}(x, x'; d)}, $$for sequences $x, x' \in X^+ := \bigcup_{n \geq 1}{X^n}$ of lengths $|x|$ and $|x'|$. The *DTW distance metric* $d_{\text{DTW}}$ is then given by $\gamma(|x|, |x'|)$, where the helper function $\gamma$ is defined recursively via:$$ \gamma(i, j) = \begin{cases} d(x_i, x_j') + \min\left(\gamma(i-1, j-1), \gamma(i-1, j), \gamma(i, j-1)\right) & (1 \leq i \leq |x|, \, 1 \leq j \leq |x'|), \\ \infty & i = 0 \vee j = 0, \\0 & (i, j) = (0, 0). \end{cases}$$The intuition is that $\gamma(i, j)$ is the minimum squared distance up to time $i$ and $j$. $i = |x|$ and $j = |x'|$ are edge cases in the sense that the if a sequence has ended it cannot be matched anymore (and thus the value are infinite or the result value as both have been matched).To compute $d_{\text{DTW}}$ the technique of Dynamic Programming is being used, where you store $\gamma$ in a $(|x|+1) \times (|x'|+1)$ grid.Exercise 1:Implement the function *d_DTW(x, x2, dist)*. The inputs x and x2 are the sequences to be compared and the parameter dist is a function on a pairs of points of the input space $X$ that outputs a real number (the distance between the pairs of points). Some code is given to help you dealing with the edge cases. The function is supposed to return the value of $d_{\text{DTW}}$ with the specified parameters, *not* the $k_{\text{DTW}}$.
###Code
#https://en.wikipedia.org/wiki/Dynamic_time_warping
def d_DTW(x, x2, dist):
t1, t2 = len(x), len(x2)
if x == [] and x2 == []:
return 0.0
elif (x == []) or (x2 == []):
return np.infty
dp = np.empty((t1+1, t2+1))
dp[0, 0] = 0
for i in range(1, t1+1):
dp[i, 0] = np.infty
for j in range(1, t2+1):
dp[0, j] = np.infty
# WRITE YOU CODE HERE
for i in range(1, t1+1):
for j in range(1, t2+1):
cost = dist(x[i-1], x2[j-1])
dp[i, j] = cost + np.min([dp[i-1, j ], dp[i , j-1], dp[i-1, j-1]])
return dp[t1, t2]
d_DTW([1, 2, 3, 3], [1, 2, 2], d1)
###Output
_____no_output_____
###Markdown
Check your solution:
###Code
try:
assert d_DTW([1, 2, 3, 3], [1, 2, 3], lambda x, y: 1 if x != y else 0) == 0.0
assert d_DTW([1, 2, 3, 4], [1, 2, 3], lambda x, y: 1 if x != y else 0) == 1.0
assert d_DTW([1, 2, 3, 2], [1, 2], lambda x, y: 1 if x != y else 0) == 1.0
assert d_DTW([], [1, 2], lambda x, y: 1 if x != y else 0) == np.infty
assert d_DTW([], [], lambda x, y: 1 if x != y else 0) == 0.0
print ("There is no error in your function!")
except AssertionError:
print ("There is an error in your function!")
###Output
There is no error in your function!
###Markdown
We define three distance functions on two values $x, x' \in X$:$d_1(x_2, x_2) = \mathbb{1}[x_1 != x_2]$,$d_2(x_1, x_2) = (x_1 - x_2)^2$,$d_3(x_1, x_2) = |x_1 - x_2|$,Optional: $d_4(\Delta x_i, \Delta x'_i) = (\Delta x_i - \Delta x'_i)^2$, with$$ \Delta x_i = \frac{1}{2}\left( x_i - x_{i-1} + \frac{x_{i+1} - x_{i-1}}{2}\right) $$as *approximate derivates of order 2*. Note that the edge cases are $\Delta x_1 = 0$ and $\Delta x_{|x|} = x_{|x|} - x_{|x|-1}$. *Hint*: It's best to map the sequences $x = (x_1, \dots, x_{|x|})$ to $\Delta x = \left(\Delta x_1, \dots, \Delta x_{|x|}\right)$ and then apply $d_2$.Exercise 2:Implement the missing distance metrics.
###Code
d1(np.array([0,1,2]),np.array([1,1,2]))
def d1(x, x2):
return np.dot(np.ones(len(x)),np.equal(x,x2))
#dist=lambda x, x2: 1 if x != x2 else 0
#return dist(x,x2)
def d2(x, x2):
# WRITE YOU CODE HERE
return np.square(x-x2)
def d3(x, x2):
# WRITE YOU CODE HERE
return np.abs(x-x2)
###Output
_____no_output_____
###Markdown
The following code lifts the distance metrics to maps that map a given hyperparameter $\lambda$ return the corresponding kernel function $k_{\text{DTW}}$.
###Code
k1_hyp, k2_hyp, k3_hyp = [lambda lmbd: (lambda x, x2: np.exp(-lmbd * d_DTW(x, x2, d))) for d in [d1, d2, d3]]
k1 = k1_hyp(2.0)
k2 = k2_hyp(2.0)
k3 = k3_hyp(2.0)
###Output
_____no_output_____
###Markdown
The following code computes the Gram matrix $K$ with respect to the kernel $k$ (a parameter) and the data $xs$ (another parameter), see slide 28 and 29 in Kernel Methods lecture.
###Code
def build_dtw_gram_matrix(xs, x2s, k):
"""
xs: collection of sequences (vectors of possibly varying length)
x2s: the same, needed for prediction
k: a kernel function that maps two sequences of possibly different length to a real
The function returns the Gram matrix with respect to k of the data xs.
"""
t1, t2 = len(xs), len(x2s)
K = np.empty((t1, t2))
for i in range(t1):
for j in range(i, t2):
K[i, j] = k(xs[i], x2s[j])
if i < t2 and j < t1:
K[j, i] = K[i, j]
return K
build_dtw_gram_matrix([[1, 2], [2, 3]], [[1, 2, 3], [4]], k2)
###Output
_____no_output_____
###Markdown
2. Kernel SVMNow we implement the training algorithm for kernel SVMs. We adjust the ERM learning algorithm from the linear classification lab. First we are reusing the code for the $\mathcal{L}_2$-regularizer and the hinge loss.
###Code
def L2_reg(w, lbda):
return 0.5 * lbda * (np.dot(w.T, w)), lbda*w
def hinge_loss(h, y):
n = len(h)
l = np.maximum(0, np.ones(n) - y*h)
g = -y * (h > 0)
return l, g
###Output
_____no_output_____
###Markdown
Exercise 3:Adjust the old code (Lab 06) to actually learn the kernel linear regression. Note that there is a new parameter $k$ that encodes the kernel function. Note that lbda is not the $\lambda$ used in the definition of $k$, but the regularization coefficient (as before). Note also that the learning rate $\alpha$ has been renamed to $\eta$, because $\alpha$ coincides with the dual coefficients (see lecture).Also make sure to return the Gram matrix $K$ together with the weight vector $w$ (or $\alpha$), as it is costly to compute and needed for the inference.
###Code
def learn_reg_kernel_ERM(X, y, lbda, k, loss=hinge_loss, reg=L2_reg, max_iter=200, tol=0.001, eta=1., verbose=False):
"""Kernel Linear Regression (default: kernelized L_2 SVM)
X -- data, each row = instance
y -- vector of labels, n_rows(X) == y.shape[0]
lbda -- regularization coefficient lambda
k -- the kernel function
loss -- loss function, returns vector of losses (for each instance) AND the gradient
reg -- regularization function, returns reg-loss and gradient
max_iter -- max. number of iterations of gradient descent
tol -- stop if norm(gradient) < tol
eta -- learning rate
"""
num_features = X.shape[1]
g_old = None
K = build_dtw_gram_matrix(X, X, k) # MODIFY; fill in; hint: use gram matrix defined above
w = np.random.randn(K.shape[0]) # modify; hint: w has as many entries as training examples (K.shape[0])
for _ in range(max_iter):
h = np.dot(K, w) # MODIFY; hint: see slide 20,21, and 35 (primal vs. dual view)
l,lg = loss(h, y)
if verbose:
print('training loss: ' + str(np.mean(l)))
print('eta: ' + str(eta))
r,rg = reg(w, lbda)
g = lg + rg
if g_old is not None:
#eta = eta*(np.dot(g_old.T,g_old))/(np.dot((g_old - g).T, g_old)) # MODIFY
eta = eta*(np.dot(np.dot(g_old.T,K),g_old))/(np.dot((g_old - g).T, g_old))
# hint: gram matrix K changes scalar product from <x, x'> = x^T x to x^T K x
w = w - eta*g
if (np.linalg.norm(eta*g)<tol):
break
g_old = g
return w, K
###Output
_____no_output_____
###Markdown
The adjusted inference function is given as (for binary classification):
###Code
def predict(alpha, X, X_train, k):
K = build_dtw_gram_matrix(X_train, X, k)
y_pred = np.dot(K, alpha)
y_pred[y_pred >= 0] = 1
y_pred[y_pred < 0] = -1
return y_pred
###Output
_____no_output_____
###Markdown
3. DTW Kernel SVM in ActionNow we put our results from section $1$ and $2$ together to use a kernelized SVM for a classification task on sequence data.
###Code
import os
from scipy.io import loadmat # for matlab *.mat format, for modern once need to install hdf5
file_path = "laser_small.mat" # file path for multi os support
mat = loadmat(file_path)
X = mat['X']
y = mat['Y'].reshape(50)
print (X.shape, y.shape)
###Output
(50, 60) (50,)
###Markdown
We have only 50 training instances and thus only go for a simple train-test-split (we cannot afford a simple train-val-test-split). If we try several kernels, we are actually tuning a hyperparameter and thus are fitting on the test set. The solution to this problem would be the nested cross-validation procedure, which we learn in the evaluation lecture.
###Code
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
print (X_train.shape, X_test.shape)
alpha, K = learn_reg_kernel_ERM(X_train, y_train, lbda=1, k=k2, max_iter=20000, eta=1, tol=1e-3, verbose=True)
###Output
training loss: 1.2790708942897533
eta: 1
training loss: 0.42424242424242425
eta: 1
training loss: 0.5390841027436225
eta: 0.7579550781079082
training loss: 0.5757575757575758
eta: 1.0
training loss: 0.4134603356701362
eta: 0.29754494016030597
training loss: 0.45067732993010673
eta: 0.22931378401702648
training loss: 0.5757575757575758
eta: 1.0000000000000002
training loss: 0.474009553849984
eta: 0.18653804016391834
training loss: 0.4900055657389712
eta: 0.1572120183674418
training loss: 0.5757575757575758
eta: 0.9999999999999994
###Markdown
And evaluation of the model.
###Code
y_pred = predict(alpha, X_train, X_train, k2)
print ("Training Accuracy: {}".format(np.mean(y_train == y_pred)))
print ("Test Accuracy: {}".format(np.mean(y_test == predict(alpha,X_train, X_test, k2))))
print ("Shape of alpha {}".format(alpha.shape))
###Output
Training Accuracy: 0.9696969696969697
Test Accuracy: 0.7647058823529411
Shape of alpha (33,)
###Markdown
We see that the training accuracy is far better than the test accuracy. This *could* - but does not have to - mean that we are overfitting. Vary the choices of the kernel functions, regularization parameters and kernel smoothing parameters (the $\lambda$ in the definition of $k_{\text{DTW}}$). In the rest of the notebook you learn how you can draw learning curves we have discussed in the tutorial. To be able to use the helper function, the estimator needs to be wrapped in a scikit-learn conform way. You can find and use the example class KernelEstimator.
###Code
#from sklearn.learning_curve import learning_curve
from sklearn.model_selection import learning_curve
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,
n_jobs=1,
train_sizes=10, # list of floats that describe ratio of test data sets tried
# OR an int = # how many trials
scoring=None):
if type(train_sizes) == int:
train_sizes=np.linspace(.1, 1.0, train_sizes)
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes, scoring=scoring)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
if cv is not None:
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
if cv is not None:
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
plt.legend(loc="best")
return plt
from sklearn.base import BaseEstimator
class KernelEstimator(BaseEstimator):
def __init__(self, k, lbda):
self.k = k
self.lbda = lbda
def fit(self, X, y):
self._X_train = X
self._alpha, _ = learn_reg_kernel_ERM(X, y, lbda=self.lbda, k=self.k, max_iter=20000, eta=1, tol=1e-3)
return self
def predict(self, X):
return predict(self._alpha, self._X_train, X, self.k)
def score(self, X, y):
y_pred = self.predict(X)
return np.mean(y == y_pred)
###Output
_____no_output_____
###Markdown
Exercise 4:Vary the choices of the kernel functions, regularization parameters and kernel smoothing parameters (the $\lambda$ in the definition of $k_{\text{DTW}}$).
###Code
estimator = KernelEstimator(k2,2) # MODIFY
estimator.fit(X_train, y_train)
print("Accuracy {}".format(estimator.score(X_train, y_train)))
#plot_learning_curve(KernelEstimator(k2, 2.0), 'Euclidean distance DTW, lambda = 1.0', X_train, y_train, cv=None, scoring="accuracy")#, train_sizes=[0.5, 0.6, 0.7, 0.8, 0.9, 1.0])
estimator1 = KernelEstimator(k1,1.0) # MODIFY
estimator.fit(X_train, y_train)
print("Accuracy {}".format(estimator.score(X_test, y_test)))
estimator2 = KernelEstimator(k3,1.5) # MODIFY
estimator.fit(X_train, y_train)
print("Train Accuracy {}".format(estimator.score(X_train, y_train)))
print("Test Accuracy {}".format(estimator.score(X_test, y_test)))
###Output
Train Accuracy 0.42424242424242425
Test Accuracy 0.4117647058823529
|
handon-ml2/plot_ols_in_class_05.2020.ipynb | ###Markdown
Linear Regression ExampleThis example uses the only the first feature of the `diabetes` dataset, inorder to illustrate a two-dimensional plot of this regression technique. Thestraight line can be seen in the plot, showing how linear regression attemptsto draw a straight line that will best minimize the residual sum of squaresbetween the observed responses in the dataset, and the responses predicted bythe linear approximation.The coefficients, the residual sum of squares and the variance score are alsocalculated.
###Code
from sklearn import linear_model
reg = linear_model.LinearRegression()
regLasso = linear_model.Lasso()
import random
X = [[n] for n in range(20)] # for sci-kit we need even 1-d data to be inside a list(or ndarray)
X
y_truth = [3.4*el[0]+5.2 for el in X]
y = [3.4*el[0]+5.2-3+6*random.random() for el in X] # so our function should be f(x) = 3.4x+5.2 + plus some noise
y
import matplotlib.pyplot as plt
plt.scatter(range(20),y)
reg.fit(X,y)
regLasso.fit(X,y)
regLasso.intercept_,regLasso.coef_
# we can compare our R2 score for different algorithms and choose the one with better fit
r2_score(y_truth, regLasso.predict(X))
r2_score(y_truth, reg.predict(X))
reg.intercept_
reg.coef_
plt.scatter(range(20),y)
plt.plot(range(20),[x*reg.coef_+reg.intercept_ for x in range(20)], color = "red" )
plt.show()
# we did not have to calculate the results, we could have used predict
plt.scatter(range(20),y)
plt.plot(range(20),reg.predict(X), color = "red" )
plt.show()
reg.predict([[100]])
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, linear_model
from sklearn.metrics import mean_squared_error, r2_score
datasets. #many popular datasets included irises, wine, housing, diabetes
# Load the diabetes dataset
diabetes = datasets.load_diabetes()
# Use only one feature
diabetes_X = diabetes.data[:, np.newaxis, 2] #i get third column
len(diabetes_X), diabetes_X[:5]
print(diabetes.DESCR)
sum(diabetes_X) # normalized
min(diabetes_X), max(diabetes_X)
# this is our y the measurement we are trying to predict
diabetes.target[:10]
diabetes.data.shape
diabetes.data[:10]
diabetes_X.shape
print(diabetes.DESCR)
type(diabetes)
diabetes.feature_names
import pandas as pd
df = pd.DataFrame(diabetes.data, columns = diabetes.feature_names)
df.head()
df['progress'] = diabetes.target
df.head()
df['sex'].unique()
df['sex'].sum()
df['age'].sum()
df.sum()
(df**2).sum()
len(df['age'].unique())
df['age'].sum()
diabetes.data[:10]
print(diabetes.DESCR)
diabetes_X[:5]
diList = list(diabetes_X)
diList[:5]
diList2 = [el[0] for el in diabetes_X]
diList2[:5]
diX = pd.Series(diList2)
diX.head()
diX.describe()
diabetes_X.mean()
diabetes_X.std()
diabetes_X.max(),diabetes_X.min()
# Exercise load https://www4.stat.ncsu.edu/~boos/var.select/diabetes.tab.txt
df = pd.read_csv('https://www4.stat.ncsu.edu/~boos/var.select/diabetes.tab.txt', sep='\t')
df.head()
df.Y.describe()
df.BMI.describe()
df['BMI'].describe()
df['BMI'][:-20]
# Split the data into training/testing sets
diabetes_X_train = df['BMI'][:-20]
diabetes_X_test = df['BMI'][-20:]
# Split the targets into training/testing sets
diabetes_y_train = df['Y'][:-20]
diabetes_y_test = df['Y'][-20:]
type(diabetes_X_train)
diabetes_X_train[:5]
# for small data sets we could use regular python list of lists
myX = []
for x in diabetes_X_train:
myX.append([x])
myX[:5]
[[x] for x in diabetes_X_train] # might be more readable to use line comprehension
type(diabetes_X_train.values)
diabetes_X_train.values[:5] # just a regular 1D array but we need 2d array!
# for larger data sets we stick with numpy ndarray because of efficiency (speed and space)
# remember for single feature /column we need to add [[],[],[]]
X = diabetes_X_train.values.reshape(-1,1)
X[:5]
# Create linear regression object
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(X, diabetes_y_train)
regr.intercept_
regr.coef_
# thus or fitting formula is 10.112x - 113.804 ...
# for testing we also need to reshape our answers
X_test = diabetes_X_test.values.reshape(-1,1)
X_test[:5]
# Make predictions using the testing set
# we make our model prove its worth here! so rubber meets the road here :)
diabetes_y_pred = regr.predict(X_test)
# The coefficients
#ax + b
# a == regr.coef_
print('Coefficients: ', regr.coef_)
#ax + b
# b == regr.intercept_
regr.intercept_
type(r2_score)
print("Mean squared error: %.2f"
% mean_squared_error(diabetes_y_test, diabetes_y_pred))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % r2_score(diabetes_y_test, diabetes_y_pred))
type(diabetes_y_test)
# Plot outputs
plt.scatter(X_test, diabetes_y_test, color='blue')
plt.plot(X_test, diabetes_y_pred, color='red', linewidth=3)
# plt.xticks(())
plt.xlabel("BMI")
# plt.yticks(())
plt.ylabel("Progress after year")
plt.show()
print(__doc__)
# Code source: Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, linear_model
from sklearn.metrics import mean_squared_error, r2_score
# Load the diabetes dataset
diabetes = datasets.load_diabetes()
# Use only one feature
diabetes_X = diabetes.data[:, np.newaxis, 2]
# Split the data into training/testing sets
diabetes_X_train = diabetes_X[:-20]
diabetes_X_test = diabetes_X[-20:]
# Split the targets into training/testing sets
diabetes_y_train = diabetes.target[:-20]
diabetes_y_test = diabetes.target[-20:]
# Create linear regression object
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(diabetes_X_train, diabetes_y_train)
# Make predictions using the testing set
diabetes_y_pred = regr.predict(diabetes_X_test)
# The coefficients
print('Coefficients: \n', regr.coef_)
# The mean squared error
print("Mean squared error: %.2f"
% mean_squared_error(diabetes_y_test, diabetes_y_pred))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % r2_score(diabetes_y_test, diabetes_y_pred))
# Plot outputs
plt.scatter(diabetes_X_test, diabetes_y_test, color='black')
plt.plot(diabetes_X_test, diabetes_y_pred, color='blue', linewidth=3)
# plt.xticks(())
# plt.yticks(())
plt.show()
print(max(X))
def plotit(X,y_test,y_pred):
plt.scatter(X, y_test, color='black')
plt.plot(X, y_pred, color='blue', linewidth=2)
xstep = np.around((max(X)-min(X))/10,decimals=0)
ystep = np.around((max(y_test)-min(y_test))/10,decimals=-1)
print(xstep,ystep)
plt.xticks(np.arange(np.around(min(X), decimals=-1), max(X)+10, xstep))
plt.yticks(np.arange(np.around(min(y_test), decimals=-1), max(y_test), ystep))
plt.show()
plotit(diabetes_X_test, diabetes_y_test, diabetes_y_pred)
plotit(X_test, diabetes_y_test, diabetes_y_pred)
bmi_y_pred = diabetes_y_pred
plotit(X_test, diabetes_y_test, bmi_y_pred)
df.columns
BP_train = df['BP'].values[:-20]
BP_train = BP_train.reshape(-1,1)
BP_train[:5]
BP_test = df['BP'].values[-20:]
BP_test = BP_test.reshape(-1,1)
BP_test[:5]
regrBP = linear_model.LinearRegression()
type(regrBP)
regrBP.fit(BP_train, diabetes_y_train)
diabetes_X_test[:5]
type(diabetes.data)
predictBP = regrBP.predict(BP_test)
len(predictBP)
plotit(BP_test, diabetes_y_test, predictBP)
X_test
X_train = df[['BMI', 'BP']][:-20]
X_train.head(),len(X_train)
type(X_train)
X_train.head()
X_test = df[['BMI', 'BP']][-20:]
len(X_test)
# Train the model using the training sets
regr.fit(X_train, diabetes_y_train)
y_pred = regr.predict(X_test)
# The coefficients
print('Coefficients: \n', regr.coef_)
# The mean squared error
print("Mean squared error: %.2f"
% mean_squared_error(diabetes_y_test, y_pred))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % r2_score(diabetes_y_test, y_pred))
df['BP'].describe()
df['BMI'].describe()
from sklearn.model_selection import train_test_split
# d_train, d_test = train_test_split(diab, test_size=0.2, random_state=42)
# if i want a specific column(s) only
X_train, X_test, y_train, y_test = train_test_split(diabetes.data[:,2:4], diabetes.target, test_size=0.2, random_state=42)
X_train[:5], y_train
regr.fit(X_train, y_train)
y_predict = regr.predict(X_test)
X_test[:5]
single_predict = regr.predict([[5, 27]])
single_predict
r2_score(y_test, y_predict)
mean_squared_error(y_test, y_predict)
type(split)
split
diabetes.data.shape
df.head()
df.corr()
corrdf = df.corr()
print(diabetes.DESCR)
corrdf.sort_values(['Y'],ascending=False)
# our X
inputs = df[['BMI','S5','BP','S4']]
X_train, X_test, y_train, y_test = train_test_split(inputs, df['Y'], test_size=0.2, random_state=42)
regr.fit(X_train,y_train)
y_predict = regr.predict(X_test)
r2_score(y_test, y_predict), mean_squared_error(y_test, y_predict)
def getScore(regr, X, y):
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
regr.fit(X_train,y_train)
y_predict = regr.predict(X_test)
return r2_score(y_test, y_predict), mean_squared_error(y_test, y_predict)
getScore(regr, inputs, df["Y"])
print(regr )
X = inputs
y = df["Y"]
ridge = linear_model.Ridge(alpha=.5)
getScore(ridge, X, y)
reglist = [
linear_model.LinearRegression(),
linear_model.Ridge(alpha=.5),
linear_model.Lasso(alpha=0.1),
linear_model.LassoLars(alpha=.1)
]
for reg in reglist:
print(reg)
print(getScore(reg, X, y))
print('*'*40)
dfc = df.drop(['SEX'], axis=1)
dfc.head()
dfc_X = dfc.drop(['Y'], axis=1).values
dfc_X[:5]
# alternative to filter for all columns except one
dfc_Xb = dfc[[col for col in dfc.columns if col != "Y"]].values
dfc_Xb[:5]
# I avoided doing reshape(-1,1)
dfc_y = dfc[['Y']].values
dfc_y[:5]
# https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.htmldfc
dfc_X_train, dfc_X_test, dfc_y_train, dfc_y_test = train_test_split(dfc_X, dfc_y, test_size=0.2, random_state=42)
dfc_X_train.shape
dfc_y_test.shape
regressor = linear_model.LinearRegression()
regressor.fit(dfc_X_train, dfc_y_train)
regressor.coef_
regressor.intercept_
dfc_predict = regressor.predict(dfc_X_test)
mean_squared_error(dfc_y_test, dfc_predict)
# # Explained variance score: 1 is perfect prediction
# print('Variance score: %.2f' % r2_score(diabetes_y_test, diabetes_y_pred))
print(f"Variance score: {r2_score(dfc_y_test, dfc_predict)} ")
# we could test multiple regressor models with this function
def getScore(X, y, regressor=linear_model.LinearRegression(), random_state=43):
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
regressor.fit(X_train, y_train)
predictions = regressor.predict(X_test)
mse = mean_squared_error(y_test, predictions)
variance = r2_score(y_test, predictions)
return mse, variance
getScore(dfc_X, dfc_y)
getScore(dfc_X, dfc_y, regressor=linear_model.LassoCV())
getScore(dfc_X, dfc_y, regressor=linear_model.LassoLars(alpha=.1))
###Output
_____no_output_____ |
_notebooks/2018-05-24-who-needs-loss-functions.ipynb | ###Markdown
"Who Needs Loss Functions? Mixing Manual and Automatic Differentiation"> "Exploring mixing manual and automatic differentiation"- toc: true- branch: master- badges: false- comments: false- categories: [deep learning, pytorch]- hide: false- search_exclude: false- image: images/blog_posts/who_needs_loss_functions.png- redirect_from: blog/who_needs The purpose of this post is to demonstrate that sometimes while using modern deep learning frameworks such as PyTorch or Tensorflow it's useful to not rely wholly on automatic differentiation.The example application I'll use is regression where the labels/targets, conditional on the input, are sampled from an exponential family distribution, and where we train the network by minimizing the negative log-likelihood of the data. I.e., we'll deal with non-linear Generalized Linear Models (GLMs), or GLMs with learned representations. This encompasses regression with squared loss, Poisson regression, and classification with cross-entropy loss, the three examples I'll use in this post.I'll show that by doing part of the backpropagation manually, we can avoid explicitly specifying a loss function, and the only thing we'll have to do to switch between label distributions is change the activation function used on the final layer. I'll use PyTorch, but the following can be achieved in TensorFlow. Setting up Synthetic DatasetsFirst we need some data. Inputs will be scalar. For regression with squared loss, we'll fit a simple sin wave (with Gaussian noise). For binary classification and Poisson regression we'll fit appropriate transformations of the same data with appropriate error distributions. (Don't worry too much about the code in this block; you can skip right ahead to the plots of the data immediately below.)
###Code
#collapse-show
import numpy as np
import matplotlib.pyplot as plt
import torch
%matplotlib inline
num_examples = 400
X = np.random.random(num_examples)
X1 = torch.unsqueeze(torch.tensor(X, dtype=torch.float32), 1)
y = np.sin(10 * X)
# Labels for regression with Gaussian noise
gaussian_regression_y = np.random.normal(loc=y, scale=0.2)
# Labels for binary classification (Categorical noise)
class_1_probabilities = 1 / (1 + np.exp(-3.5 * y))
classification_y = np.random.binomial(1, p=class_1_probabilities)
classification_y_one_hot = np.zeros((num_examples, 2))
classification_y_one_hot[np.arange(num_examples), classification_y] = 1
# Labels for Poisson regression
lambdas = 2 * np.exp(y)
poisson_regression_y = np.random.poisson(lam=lambdas)
from collections import OrderedDict
datasets = OrderedDict()
datasets['Gaussian regression'] = {'data': torch.unsqueeze(torch.tensor(gaussian_regression_y, dtype=torch.float32), 1), 'plotting_data': gaussian_regression_y}
datasets['Classification'] = {'data': torch.tensor(classification_y_one_hot, dtype=torch.float32), 'plotting_data': classification_y}
datasets['Poisson regression'] = {'data': torch.unsqueeze(torch.tensor(poisson_regression_y, dtype=torch.float32), 1), 'plotting_data': poisson_regression_y}
def plot_data(regression_type, X, y, predictions=None):
plt.scatter(X, y, s=80, label="True labels", alpha=0.2)
if predictions is not None:
if regression_type == "Classification":
predictions = np.argmax(predictions, axis=1)
plt.scatter(X, predictions, s=10, label="Predictions")
plt.xlabel("x")
plt.ylabel("y")
plt.title("{} data".format(regression_type))
plt.legend()
fig = plt.figure(figsize=(17,4.4))
for data_i, dataset_key in enumerate(datasets.keys()):
data = datasets[dataset_key]['plotting_data']
fig.add_subplot(1, 3, data_i + 1)
plot_data(dataset_key, X, data)
###Output
_____no_output_____
###Markdown
Defining the NetworkNow we'll define a simple, small feed-forward neural network with dense connectivity and ReLU activation functions. We'll use the same neural network for each of our regression problem types.
###Code
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self, output_dim=1):
super(Net, self).__init__()
self.fc1 = nn.Linear(1, 30)
self.fc2 = nn.Linear(30, 20)
self.fc3 = nn.Linear(20, output_dim)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
###Output
_____no_output_____
###Markdown
A Useful Property of Natural Exponential Family DistributionsThe Gaussian, Categorical, and Poisson distributions are all instances of the natural exponential family (a subset of the exponential family) of distributions. This means that their probability functions can be expressed as$$q(y) = h(y) \exp{(\eta \cdot y - A(\eta))} \quad ,$$where $\eta$ is called the *natural parameter* of the distribution and $A$, the *log-partition function*, simply normalizes the probability function such that it sums/integrates to $1$. For each function in the exponential family, there exists a *canonical link function* $f$ which gives the relationship between the natural parameter $\eta$ and the mean of the distribution:$$\mathbb{E}_q[y] = \sum y \cdot q(y) = f^{-1}(\eta) \quad .$$For example, for labels following a Gaussian distribution, the inverse link function is the identity function. For the Categorical distribution, it's the softmax function (in which case $\eta$ is the vector of logits). For the Poisson distribution, it's the exponential function.For each of the regression problems dealt with in this post (Gaussian, Categorical, Poisson), the label $y$ is, *conditional on the input* $x$, sampled from a natural exponential family distribution. I.e., there is some function $\eta(x)$ such that the label $y$ for input $x$ has probability function$$q(y \mid \eta(x)) \quad .$$ Often, what we want to estimate is, conditional on the input $x$, the expected value of the label $\mathbb{E}_q[y]$. Call this estimate $\hat{y}(x)$. This will be the (post-activation) output of our neural network. Suppose we use the inverse link function $f^{-1}$ as the activation function of the final layer of the network. In this case, the pre-activation final layer will be an estimate of the natural parameter, which we'll call $\hat{\eta}(x)$. (I.e., we're talking about fitting Generalised Linear Models, but where the natural parameter estimate $\hat{\eta}$ is a *nonlinear* function of the inputs.) Suppose we use the negative log-likelihood of the true labels as a loss function $L$. For a single example with input $x$ and label $y$:$$L = - \ln q(y \mid \hat{\eta}(x)) = - \ln h(y) - \hat{\eta}(x) \cdot y + A(\hat{\eta}(x)) \quad .$$ In order to do parameter updates by gradient descent, we need the derivatives of the loss with respect to the network parameters, which can be decomposed by the chain rule:$$\frac{\partial L}{\partial \theta} = \frac{\partial L}{\partial \hat{\eta}} \frac{\partial \hat{\eta}}{\partial \theta} \quad, $$where $\theta$ is a particular network parameter. For every natural exponential family label distribution, the derivative of this loss with respect to the natural parameter is the same: $$\frac{\partial L}{\partial \hat{\eta}} = \mathbb{E}_\hat{\eta}[y] - y = \hat{y} - y \quad . $$ The upshot of this is that instead of explicitly defining the loss function $L$ to be the negative log-likelihood function for the relevant label distribution and doing backpropagation from the loss, we can instead define $\partial L / \partial \hat{\eta} = \hat{y} - y$ (implicitly defining the loss by our choice of activation function on the final layer) and start backpropagation from the natural parameter estimate layer. Essentially we're doing one step of the backpropagation manually, and relying on auto-differentation for the rest. An Example with Gaussian Distributed LabelsIn the following code, we fit the Gaussian distributed data by explicitly specifying and minimising a mean-squared error loss function (equivalent up to irrelevant constants to the negative log-likelihood for a Gaussian target distribution). We won't worry about evaluating on a validation set.
###Code
import torch.optim as optim
torch.manual_seed(500)
net = Net()
y = datasets['Gaussian regression']['data']
optimizer = optim.SGD(net.parameters(), lr=0.2)
loss_function = nn.MSELoss()
for i in range(5000):
optimizer.zero_grad()
eta_hat = net(X1)
y_hat = eta_hat
loss = 0.5 * loss_function(y_hat, y)
loss.backward()
optimizer.step()
if i % 500 == 0:
print("Epoch: {}\tLoss: {}".format(i, loss.item()))
plot_data("Gaussian regression", X, y, y_hat.detach())
###Output
Epoch: 0 Loss: 0.23857589066028595
Epoch: 500 Loss: 0.13250748813152313
Epoch: 1000 Loss: 0.07796521484851837
Epoch: 1500 Loss: 0.047447897493839264
Epoch: 2000 Loss: 0.032297104597091675
Epoch: 2500 Loss: 0.02540348283946514
Epoch: 3000 Loss: 0.02224355936050415
Epoch: 3500 Loss: 0.02245643362402916
Epoch: 4000 Loss: 0.022122113034129143
Epoch: 4500 Loss: 0.01919456571340561
###Markdown
Compare the above to the result of running the following code, in which instead of doing backpropagation from the loss, we do backpropagation from the natural parameter prediction $\hat{\eta}$ ($\texttt{eta}$ in the code), while setting the accumulated backprop gradient explicitly to$$\frac{1}{\text{batch_size}} * (\hat{y} - y) \quad.$$Note that we don't need to specify a loss function at all in the following, and we do so only so that the loss can be reported. For optimisation purposes, the loss function has been **implicitly** set to the negative log-likelihood for the Gaussian distribution by choosing the appropriate inverse link function (the identity function, in this case).
###Code
torch.manual_seed(500)
net = Net()
optimizer = optim.SGD(net.parameters(), lr=0.2)
loss_function = nn.MSELoss()
for i in range(5000):
optimizer.zero_grad()
eta_hat = net(X1)
y_hat = eta_hat
# Specifying the loss function is not strictly necessary; it's done here so that the value can be reported
loss = 0.5 * loss_function(y_hat, y)
eta_hat.backward(1.0/num_examples * (y_hat - y))
optimizer.step()
if i % 500 == 0:
print("Epoch: {}\tLoss: {}".format(i, loss.item()))
plot_data("Gaussian regression", X, y, y_hat.detach())
###Output
Epoch: 0 Loss: 0.23857589066028595
Epoch: 500 Loss: 0.13250748813152313
Epoch: 1000 Loss: 0.07796521484851837
Epoch: 1500 Loss: 0.047447897493839264
Epoch: 2000 Loss: 0.032297104597091675
Epoch: 2500 Loss: 0.02540348283946514
Epoch: 3000 Loss: 0.02224355936050415
Epoch: 3500 Loss: 0.02245643362402916
Epoch: 4000 Loss: 0.022122113034129143
Epoch: 4500 Loss: 0.01919456571340561
###Markdown
We achieve exactly the same results as when explicitly specifying the loss function. The General CaseThe following code demonstrates how easy it is to switch between different types of regression in this way. We pass through the main loop three times, once for regression with Gaussian distributed labels, once for classification, and once for regression with Poisson distributed labels. The only differences between these cases (marked "\ \*\*\*" in the code) are:- Loading the appropriate data- Setting the network output dimension (2 for binary classification, 1 for the regression examples)- Setting the final layer activation function to be the appropriate inverse canonical link function, which implicitly sets the loss to be minimised to be the negative log-likelihood for the corresponding distribution
###Code
datasets['Gaussian regression'].update({'final layer activation': lambda x: x, 'output_dim': 1})
datasets['Classification'].update({'final layer activation': nn.Softmax(dim=1), 'output_dim': 2})
datasets['Poisson regression'].update({'final layer activation': torch.exp, 'output_dim': 1})
fig = plt.figure(figsize=(17,4.4))
for regression_type_i, regression_type in enumerate(datasets.keys()):
# *** Difference 1: data loading
y = datasets[regression_type]['data']
plotting_y = datasets[regression_type]['plotting_data']
# *** Difference 2: setting the network output dimension
net = Net(output_dim = datasets[regression_type]['output_dim'])
optimizer = optim.SGD(net.parameters(), lr=0.2)
for i in range(5000):
optimizer.zero_grad()
eta_hat = net(X1)
# *** Difference 3: The inverse of the canonical link function for the
# label distribution is used as the final layer activation function.
y_hat = datasets[regression_type]['final layer activation'](eta_hat)
# Using the appropriate activation above means that the following results in
# implicitly minimizing the negative log-likelihood of the true labels
eta_hat.backward(1.0/num_examples * (y_hat - y))
optimizer.step()
fig.add_subplot(1, 3, regression_type_i + 1)
plot_data(regression_type, X, plotting_y, y_hat.detach())
###Output
_____no_output_____ |
AIDA2_Files/Lab Activities/58090_LabNo06_Wagler.ipynb | ###Markdown
Topic 05.2: Perceptrons, Gradient Descent, and Backpropagation$_{\text{©D.J. Lopez | 2021 | Fundamentals of Machine Learning}}$ Laboratory Activity1. For the laboratory activity, obtain a dataset of your liking from a data source. Explain the purpose of the dataset and mention any publication if it is obtained from the source. Provide a needs statement and significance for the dataset.2. Identify an algorithm or method in performing a single or multiple variable classification using the Perceptron alogrithm. 3. You must re-create your Perceptron algorithm with Gradient Descent and Backpropagation using your own code in a separate Google Colab. However, you are required to observe the following:>* Enforce object-oriented programming by implementing at least two of the pillars of OOP in the entirety of the solution.* Dedicated functions for training, predicting, and evaluating the solution.* A DataFrame of the metrics of the solution* A visualization of the solution’s results. NOTES: https://github.com/dyjdlopez/fund-of-aiml/blob/main/activities/05%20-%20Classification/fund_aiml_05v1_lec2_2021.ipynb **Purpose of the Dataset** The dataset uploaded by Caner Dabakoglu in Kaggle in 2019 aims to classify if patients have heart disease or not according to the features presented on the dataset. The purpose of this dataset is to try to predict if a patient has heart disease or not **Needs statement** There are many factors to that come into play when knowing if patients have heart disease or not. Some risk factors cannot be controlled such as the age and family history. But there are also factors that can increase the likelihood of getting a heart disease e.g. High Blood Pressure and High Cholesterol. Knowing these conditions will help them know if they have a high chance of getting one. Predicting the likelihood of it happening can help them take the necessary steps in lowering the risks by changing certain factors that can be controlled in their lifestyle. **Significance** The significance of this dataset is to know if a person has heart disease or not. Knowing early on if a patient is prone to heart disease can help with stopping or mitigating the problems that the heart disease can cause. Heart disease is rampant in Americans wherein almost half of the population (47%) have at least 1 risk factors for heart disease such as high cholesterol, high blood pressure, and smoking.
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import math
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
import os
heartDisease = pd.read_csv("/content/heart_disease.csv")
y = heartDisease.target.values
x_data = heartDisease.drop(['target'], axis = 1)
x = (x_data - np.min(x_data)) / (np.max(x_data) - np.min(x_data)).values
x_train, x_test, y_train, y_test = train_test_split(x,y,test_size = 0.2,random_state=0)
x_train,y_train = x_train.T,y_train.T
x_test,y_test = x_test.T,y_test.T
def initialize(dimension):
weight = np.full((dimension,1),0.01)
bias = 0.0
return weight,bias
def sigmoid(z):
sig = 1/(1+ np.exp(-z))
return sig
def propagation(weight,bias,x_train,y_train):
# FORWARD PROPAGATION
sig = sigmoid(np.dot(weight.T,x_train) + bias)
loss = -(y_train*np.log(sig) + (1-y_train)*np.log(1-sig))
cost = np.sum(loss) / x_train.shape[1]
# BACKWARD PROPAGATION
dw = np.dot(x_train,((sig-y_train).T))/x_train.shape[1]
db = np.sum(sig-y_train)/x_train.shape[1]
grads = {"Derivative Weight" : dw,
"Derivative Bias" : db}
return cost,grads
def optimize(weight,bias,x_train,y_train,learningRate,iteration, print_cost = True) :
costs = []
index = []
for i in range(iteration):
# Cost and gradient calculation
cost,grads = propagation(weight,bias,x_train,y_train)
# Retrieve derivatives from grads
weight = weight - learningRate * grads["Derivative Weight"]
bias = bias - learningRate * grads["Derivative Bias"]
costs.append(cost)
index.append(i)
if print_cost and i % 10 == 0:
print ("Cost after iteration %i: %f" %(i, cost))
parameters = {"weight": weight,"bias": bias}
print("iteration:",iteration)
print("cost:",cost)
plt.plot(index,costs)
plt.xlabel("Number of Iteration")
plt.ylabel("Cost")
plt.show()
return parameters, grads
def predict(weight,bias,x_test):
z = np.dot(weight.T,x_test) + bias
sig = sigmoid(z)
preds = np.zeros((1,x_test.shape[1]))
for i in range(sig.shape[1]):
if sig[0,i] <= 0.5:
preds[0,i] = 0
else:
preds[0,i] = 1
return preds
def model(x_train,y_train,x_test,y_test,learningRate,iteration):
dimension = x_train.shape[0]
weight,bias = initialize(dimension)
parameters, gradients = optimize(weight,bias,x_train,y_train,learningRate,iteration)
predsTrain = predict(parameters["weight"],parameters["bias"],x_train)
predsTest = predict(parameters["weight"],parameters["bias"],x_test)
print(" Train Accuracy: {:.2f}%".format((100 - np.mean(np.abs(predsTrain - y_train))*100)))
print(" Test Accuracy: {:.2f}%".format((100 - np.mean(np.abs(predsTest - y_test))*100)))
neuronModel = model(x_train,y_train,x_test,y_test,1,300)
logiReg = LogisticRegression()
logiReg.fit(x_train.T,y_train.T)
sigmoid_lr = logiReg.predict(x_test.T)
c_matrix = confusion_matrix(y_test,sigmoid_lr)
sns.heatmap(c_matrix,annot=True,cmap="inferno")
plt.xlabel("Ground Truths")
plt.ylabel("Predicted")
plt.title("Confusion Matrix")
###Output
_____no_output_____ |
workspace/realtime.ipynb | ###Markdown
Official documentation:http://powietrze.gios.gov.pl/pjp/content/api
###Code
%matplotlib inline
import requests
from pandas.io.json import json_normalize
import pandas as pd
import numpy as np
###Output
_____no_output_____
###Markdown
Getting all stations:
###Code
r = requests.get('http://api.gios.gov.pl/pjp-api/rest/station/findAll')
allStations = json_normalize(r.json())
print(allStations[allStations["city.name"] == u"Gdańsk"])
###Output
addressStreet city city.commune.communeName \
27 ul. Powstańców Warszawskich NaN Gdańsk
46 ul. Leczkowa NaN Gdańsk
47 ul. Ostrzycka NaN Gdańsk
120 ul. Kaczeńce NaN Gdańsk
143 ul. Wyzwolenia NaN Gdańsk
city.commune.districtName city.commune.provinceName city.id city.name \
27 Gdańsk POMORSKIE 218.0 Gdańsk
46 Gdańsk POMORSKIE 218.0 Gdańsk
47 Gdańsk POMORSKIE 218.0 Gdańsk
120 Gdańsk POMORSKIE 218.0 Gdańsk
143 Gdańsk POMORSKIE 218.0 Gdańsk
dateEnd dateStart gegrLat gegrLon id \
27 None 1996-10-01 00:00:00 54.353336 18.635283 729
46 None 1998-10-01 00:00:00 54.380279 18.620274 736
47 None 1998-05-01 00:00:00 54.328336 18.557781 733
120 None 1996-10-01 00:00:00 54.367778 18.701111 730
143 None 1998-09-01 00:00:00 54.400833 18.657497 731
stationName
27 AM1 Gdańsk Śródmieście
46 AM8 Gdańsk Wrzeszcz
47 AM5 Gdańsk Szadółki
120 AM2 Gdańsk Stogi
143 AM3 Gdańsk Nowy Port
###Markdown
Lets see what we have in "AM5 Gdańsk Szadółki" which has id: 733
###Code
stationId = 733
r = requests.get('http://api.gios.gov.pl/pjp-api/rest/station/sensors/' + str(stationId))
sensors = json_normalize(r.json())
print(sensors)
###Output
id param.idParam param.paramCode param.paramFormula \
0 4720 8 CO CO
1 4727 3 PM10 PM10
2 4723 6 NO2 NO2
3 4725 5 O3 O3
4 4730 1 SO2 SO2
param.paramName sensorDateEnd sensorDateStart stationId
0 tlenek węgla None 1998-05-01 00:00:00 733
1 pył zawieszony PM10 None 1998-05-01 00:00:00 733
2 dwutlenek azotu None 1998-05-01 00:00:00 733
3 ozon None 1998-05-01 00:00:00 733
4 dwutlenek siarki None 1998-05-01 00:00:00 733
###Markdown
Lets now see data about O3 concentration - sensorId = 4725
###Code
sensorId = 4725
r = requests.get('http://api.gios.gov.pl/pjp-api/rest/data/getData/' + str(sensorId))
concentration = json_normalize(r.json())
concentrationFrame = pd.DataFrame()
concentrationFrame["dates"] = [d[u'date'] for d in concentration["values"].values.item()]
concentrationFrame["values"] = [d[u'value'] for d in concentration["values"].values.item()]
concentrationFrame.set_index(["dates"], inplace=True)
#concentrationFrame.sort_index(inplace=True)
# We cannot sort index, because it is not unique. There is 12 hours notation used, but without AM/PM distinction ;(
# But we can just reverse it until API will be fixed
concentrationFrame = concentrationFrame.iloc[::-1]
print(concentrationFrame)
concentrationFrame.plot(figsize=(15,5), grid=True)
###Output
_____no_output_____
###Markdown
And overall air quality index for the same station
###Code
r = requests.get('http://api.gios.gov.pl/pjp-api/rest/aqindex/getIndex/' + str(stationId))
r.json()
allStations
stationsId = allStations["id"]
sid = list(stationsId)
allsensors = pd.DataFrame()
for station in sid:
print(station)
sensorlist = json_normalize(requests.get('http://api.gios.gov.pl/pjp-api/rest/station/sensors/' + str(station)).json())
print(sensorlist)
print("--------------------------")
allsensors = allsensors.append(sensorlist)
#if station == 9000:
# break
allsensors
allStations[["id", "gegrLat", "gegrLon"]]
finalData = pd.merge(allsensors, allStations[["id", "gegrLat", "gegrLon"]], how='inner', left_on="stationId", right_on="id")
def get_latest_measurement(sensorId):
concentration = json_normalize(requests.get('http://api.gios.gov.pl/pjp-api/rest/data/getData/' + str(sensorId)).json())
concentrationFrame = pd.DataFrame()
concentrationFrame["dates"] = [d[u'date'] for d in concentration["values"].values.item()]
concentrationFrame["values"] = [d[u'value'] for d in concentration["values"].values.item()]
concentrationFrame["dates"] = pd.to_datetime(concentrationFrame["dates"])
#print(concentrationFrame[concentrationFrame["dates"] == previousHourStr]["values"])
try:
return_value = concentrationFrame[concentrationFrame["dates"] == previousHourStr]["values"].item()
except ValueError:
return_value = np.NaN
#print(return_value)
return return_value
get_latest_measurement(sensorId=sensorId)
import datetime
previousHour = datetime.datetime.now() - datetime.timedelta(hours = 1)
previousHourStr = previousHour.strftime('%Y-%m-%d %H:00:00')
sdfasd[sdfasd["dates"] == previousHourStr]["values"].item()
from tqdm import tqdm, tqdm_pandas
tqdm_pandas(tqdm())
finalData["value"] = finalData["id_x"].progress_map(get_latest_measurement)
finalData
###Output
_____no_output_____ |
MovieLensDataExploration/MovieLens Project Questions.ipynb | ###Markdown
`Project - MovieLens Data Analysis`The GroupLens Research Project is a research group in the Department of Computer Science and Engineering at the University of Minnesota. The data is widely used for collaborative filtering and other filtering solutions. However, we will be using this data to act as a means to demonstrate our skill in using Python to “play” with data. `Objective:`- To implement the techniques learnt as a part of the course. `Learning Outcomes:`- Exploratory Data Analysis- Visualization using Python- Pandas – groupby, merging `Domain` - Internet and Entertainment**Note that the project will need you to apply the concepts of groupby and merging extensively.** `Datasets Information:`*rating.csv:* It contains information on ratings given by the users to a particular movie.- user id: id assigned to every user- movie id: id assigned to every movie- rating: rating given by the user- timestamp: Time recorded when the user gave a rating*movie.csv:* File contains information related to the movies and their genre.- movie id: id assigned to every movie- movie title: Title of the movie- release date: Date of release of the movie- Action: Genre containing binary values (1 - for action 0 - not action)- Adventure: Genre containing binary values (1 - for adventure 0 - not adventure)- Animation: Genre containing binary values (1 - for animation 0 - not animation)- Children’s: Genre containing binary values (1 - for children's 0 - not children's)- Comedy: Genre containing binary values (1 - for comedy 0 - not comedy)- Crime: Genre containing binary values (1 - for crime 0 - not crime)- Documentary: Genre containing binary values (1 - for documentary 0 - not documentary)- Drama: Genre containing binary values (1 - for drama 0 - not drama)- Fantasy: Genre containing binary values (1 - for fantasy 0 - not fantasy)- Film-Noir: Genre containing binary values (1 - for film-noir 0 - not film-noir)- Horror: Genre containing binary values (1 - for horror 0 - not horror)- Musical: Genre containing binary values (1 - for musical 0 - not musical)- Mystery: Genre containing binary values (1 - for mystery 0 - not mystery)- Romance: Genre containing binary values (1 - for romance 0 - not romance)- Sci-Fi: Genre containing binary values (1 - for sci-fi 0 - not sci-fi)- Thriller: Genre containing binary values (1 - for thriller 0 - not thriller)- War: Genre containing binary values (1 - for war 0 - not war)- Western: Genre containing binary values (1 - for western - not western)*user.csv:* It contains information of the users who have rated the movies.- user id: id assigned to every user- age: Age of the user- gender: Gender of the user- occupation: Occupation of the user- zip code: Zip code of the use**`Please provide you insights wherever necessary.`** 1. Import the necessary packages - 2.5 marks
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
###Output
_____no_output_____
###Markdown
2. Read the 3 datasets into dataframes - 2.5 marks
###Code
data = pd.read_csv('Data.csv')
item_data = pd.read_csv('item.csv')
user_data = pd.read_csv('user.csv')
###Output
_____no_output_____
###Markdown
3. Apply info, shape, describe, and find the number of missing values in the data - 5 marks - Note that you will need to do it for all the three datasets seperately
###Code
print('Info of data is: ')
data.info()
print('\n')
print('Shape of data is: ')
print(data.shape)
print('\n')
print('Description of data is: ')
print(data.describe())
print('\n')
print('No. of missing values in each column in the data is: ')
print(data.isnull().sum())
print('\n')
print('No. of missing values in all columns: ')
print(sum(data.isnull().sum()))
print('\n')
print('Info of item_data is: ')
item_data.info()
print('\n')
print('Shape of item_data is: ')
print(item_data.shape)
print('\n')
print('Description of item_data is: ')
print(item_data.describe())
print('\n')
print('No. of missing values in each column in the item_data is: ')
print(item_data.isnull().sum())
print('\n')
print('No. of missing values in all columns: ')
print(sum(item_data.isnull().sum()))
print('\n')
print('Info of user_data is: ')
user_data.info()
print('\n')
print('Shape of user_data is: ')
print(user_data.shape)
print('\n')
print('Description of user_data is: ')
print(user_data.describe())
print('\n')
print('No. of missing values in each column in the user_data is: ')
print(user_data.isnull().sum())
print('\n')
print('No. of missing values in all columns: ')
print(sum(user_data.isnull().sum()))
print('\n')
###Output
Info of data is:
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 100000 entries, 0 to 99999
Data columns (total 4 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 user id 100000 non-null int64
1 movie id 100000 non-null int64
2 rating 100000 non-null int64
3 timestamp 100000 non-null int64
dtypes: int64(4)
memory usage: 3.1 MB
Shape of data is:
(100000, 4)
Description of data is:
user id movie id rating timestamp
count 100000.00000 100000.000000 100000.000000 1.000000e+05
mean 462.48475 425.530130 3.529860 8.835289e+08
std 266.61442 330.798356 1.125674 5.343856e+06
min 1.00000 1.000000 1.000000 8.747247e+08
25% 254.00000 175.000000 3.000000 8.794487e+08
50% 447.00000 322.000000 4.000000 8.828269e+08
75% 682.00000 631.000000 4.000000 8.882600e+08
max 943.00000 1682.000000 5.000000 8.932866e+08
No. of missing values in each column in the data is:
user id 0
movie id 0
rating 0
timestamp 0
dtype: int64
No. of missing values in all columns:
0
Info of item_data is:
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 1681 entries, 0 to 1680
Data columns (total 22 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 movie id 1681 non-null int64
1 movie title 1681 non-null object
2 release date 1681 non-null object
3 unknown 1681 non-null int64
4 Action 1681 non-null int64
5 Adventure 1681 non-null int64
6 Animation 1681 non-null int64
7 Childrens 1681 non-null int64
8 Comedy 1681 non-null int64
9 Crime 1681 non-null int64
10 Documentary 1681 non-null int64
11 Drama 1681 non-null int64
12 Fantasy 1681 non-null int64
13 Film-Noir 1681 non-null int64
14 Horror 1681 non-null int64
15 Musical 1681 non-null int64
16 Mystery 1681 non-null int64
17 Romance 1681 non-null int64
18 Sci-Fi 1681 non-null int64
19 Thriller 1681 non-null int64
20 War 1681 non-null int64
21 Western 1681 non-null int64
dtypes: int64(20), object(2)
memory usage: 289.0+ KB
Shape of item_data is:
(1681, 22)
Description of item_data is:
movie id unknown Action Adventure Animation \
count 1681.000000 1681.000000 1681.000000 1681.000000 1681.000000
mean 841.841761 0.000595 0.149316 0.080309 0.024985
std 485.638077 0.024390 0.356506 0.271852 0.156126
min 1.000000 0.000000 0.000000 0.000000 0.000000
25% 422.000000 0.000000 0.000000 0.000000 0.000000
50% 842.000000 0.000000 0.000000 0.000000 0.000000
75% 1262.000000 0.000000 0.000000 0.000000 0.000000
max 1682.000000 1.000000 1.000000 1.000000 1.000000
Childrens Comedy Crime Documentary Drama \
count 1681.000000 1681.000000 1681.000000 1681.000000 1681.000000
mean 0.072576 0.300416 0.064842 0.029744 0.431291
std 0.259516 0.458576 0.246321 0.169931 0.495404
min 0.000000 0.000000 0.000000 0.000000 0.000000
25% 0.000000 0.000000 0.000000 0.000000 0.000000
50% 0.000000 0.000000 0.000000 0.000000 0.000000
75% 0.000000 1.000000 0.000000 0.000000 1.000000
max 1.000000 1.000000 1.000000 1.000000 1.000000
Fantasy Film-Noir Horror Musical Mystery \
count 1681.000000 1681.000000 1681.000000 1681.000000 1681.000000
mean 0.013087 0.014277 0.054729 0.033314 0.036288
std 0.113683 0.118667 0.227519 0.179507 0.187061
min 0.000000 0.000000 0.000000 0.000000 0.000000
25% 0.000000 0.000000 0.000000 0.000000 0.000000
50% 0.000000 0.000000 0.000000 0.000000 0.000000
75% 0.000000 0.000000 0.000000 0.000000 0.000000
max 1.000000 1.000000 1.000000 1.000000 1.000000
Romance Sci-Fi Thriller War Western
count 1681.000000 1681.000000 1681.000000 1681.000000 1681.000000
mean 0.146936 0.060083 0.149316 0.042237 0.016062
std 0.354148 0.237712 0.356506 0.201189 0.125751
min 0.000000 0.000000 0.000000 0.000000 0.000000
25% 0.000000 0.000000 0.000000 0.000000 0.000000
50% 0.000000 0.000000 0.000000 0.000000 0.000000
75% 0.000000 0.000000 0.000000 0.000000 0.000000
max 1.000000 1.000000 1.000000 1.000000 1.000000
No. of missing values in each column in the item_data is:
movie id 0
movie title 0
release date 0
unknown 0
Action 0
Adventure 0
Animation 0
Childrens 0
Comedy 0
Crime 0
Documentary 0
Drama 0
Fantasy 0
Film-Noir 0
Horror 0
Musical 0
Mystery 0
Romance 0
Sci-Fi 0
Thriller 0
War 0
Western 0
dtype: int64
No. of missing values in all columns:
0
Info of user_data is:
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 943 entries, 0 to 942
Data columns (total 5 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 user id 943 non-null int64
1 age 943 non-null int64
2 gender 943 non-null object
3 occupation 943 non-null object
4 zip code 943 non-null object
dtypes: int64(2), object(3)
memory usage: 37.0+ KB
Shape of user_data is:
(943, 5)
Description of user_data is:
user id age
count 943.000000 943.000000
mean 472.000000 34.051962
std 272.364951 12.192740
min 1.000000 7.000000
25% 236.500000 25.000000
50% 472.000000 31.000000
75% 707.500000 43.000000
max 943.000000 73.000000
No. of missing values in each column in the user_data is:
user id 0
age 0
gender 0
occupation 0
zip code 0
dtype: int64
No. of missing values in all columns:
0
###Markdown
4. Find the number of movies per genre using the item data - 2.5 marks
###Code
for genre in item_data.columns[3:]:
print('The no. of movies under ', genre, ' is: ', item_data[genre].sum())
###Output
The no. of movies under unknown is: 1
The no. of movies under Action is: 251
The no. of movies under Adventure is: 135
The no. of movies under Animation is: 42
The no. of movies under Childrens is: 122
The no. of movies under Comedy is: 505
The no. of movies under Crime is: 109
The no. of movies under Documentary is: 50
The no. of movies under Drama is: 725
The no. of movies under Fantasy is: 22
The no. of movies under Film-Noir is: 24
The no. of movies under Horror is: 92
The no. of movies under Musical is: 56
The no. of movies under Mystery is: 61
The no. of movies under Romance is: 247
The no. of movies under Sci-Fi is: 101
The no. of movies under Thriller is: 251
The no. of movies under War is: 71
The no. of movies under Western is: 27
###Markdown
5. Drop the movie where the genre is unknown - 2.5 marks
###Code
df5 = item_data.drop(item_data[item_data['unknown'] == 1].index, inplace = False)
print('Length of original data: ', len(item_data), ' and length data after deleting unknown genres is: ', len(df5))
###Output
Length of original data: 1681 and length data after deleting unknown genres is: 1680
###Markdown
6. Find the movies that have more than one genre - 5 markshint: use sum on the axis = 1Display movie name, number of genres for the movie in dataframeand also print(total number of movies which have more than one genres)
###Code
movie_count = len(item_data)
movie_multiple_genres = 0
for movie_index in range(movie_count):
movie_genre_count = item_data.iloc[movie_index:movie_index+1, 3:].sum(axis=1).array[0]
if movie_genre_count > 1:
movie_multiple_genres = movie_multiple_genres + 1
print('The movie ', item_data['movie title'][movie_index], 'has ', movie_genre_count , ' genres')
print('No of movies with one or more genres: ', movie_multiple_genres)
###Output
The movie Toy Story has 3 genres
The movie GoldenEye has 3 genres
The movie Four Rooms has 1 genres
The movie Get Shorty has 3 genres
The movie Copycat has 3 genres
The movie Shanghai Triad (Yao a yao yao dao waipo qiao) has 1 genres
The movie Twelve Monkeys has 2 genres
The movie Babe has 3 genres
The movie Dead Man Walking has 1 genres
The movie Richard III has 2 genres
The movie Seven (Se7en) has 2 genres
The movie Usual Suspects, The has 2 genres
The movie Mighty Aphrodite has 1 genres
The movie Postino, Il has 2 genres
The movie Mr. Holland's Opus has 1 genres
The movie French Twist (Gazon maudit) has 2 genres
The movie From Dusk Till Dawn has 5 genres
The movie White Balloon, The has 1 genres
The movie Antonia's Line has 1 genres
The movie Angels and Insects has 2 genres
The movie Muppet Treasure Island has 5 genres
The movie Braveheart has 3 genres
The movie Taxi Driver has 2 genres
The movie Rumble in the Bronx has 3 genres
The movie Birdcage, The has 1 genres
The movie Brothers McMullen, The has 1 genres
The movie Bad Boys has 1 genres
The movie Apollo 13 has 3 genres
The movie Batman Forever has 4 genres
The movie Belle de jour has 1 genres
The movie Crimson Tide has 3 genres
The movie Crumb has 1 genres
The movie Desperado has 3 genres
The movie Doom Generation, The has 2 genres
The movie Free Willy 2: The Adventure Home has 3 genres
The movie Mad Love has 2 genres
The movie Nadja has 1 genres
The movie Net, The has 2 genres
The movie Strange Days has 3 genres
The movie To Wong Foo, Thanks for Everything! Julie Newmar has 1 genres
The movie Billy Madison has 1 genres
The movie Clerks has 1 genres
The movie Disclosure has 2 genres
The movie Dolores Claiborne has 2 genres
The movie Eat Drink Man Woman has 2 genres
The movie Exotica has 1 genres
The movie Ed Wood has 2 genres
The movie Hoop Dreams has 1 genres
The movie I.Q. has 2 genres
The movie Star Wars has 5 genres
The movie Legends of the Fall has 4 genres
The movie Madness of King George, The has 1 genres
The movie Natural Born Killers has 2 genres
The movie Outbreak has 3 genres
The movie Professional, The has 4 genres
The movie Pulp Fiction has 2 genres
The movie Priest has 1 genres
The movie Quiz Show has 1 genres
The movie Three Colors: Red has 1 genres
The movie Three Colors: Blue has 1 genres
The movie Three Colors: White has 1 genres
The movie Stargate has 3 genres
The movie Santa Clause, The has 2 genres
The movie Shawshank Redemption, The has 1 genres
The movie What's Eating Gilbert Grape has 2 genres
The movie While You Were Sleeping has 2 genres
The movie Ace Ventura: Pet Detective has 1 genres
The movie Crow, The has 3 genres
The movie Forrest Gump has 3 genres
The movie Four Weddings and a Funeral has 2 genres
The movie Lion King, The has 3 genres
The movie Mask, The has 3 genres
The movie Maverick has 3 genres
The movie Faster Pussycat! Kill! Kill! has 3 genres
The movie Brother Minister: The Assassination of Malcolm X has 1 genres
The movie Carlito's Way has 2 genres
The movie Firm, The has 2 genres
The movie Free Willy has 3 genres
The movie Fugitive, The has 2 genres
The movie Hot Shots! Part Deux has 3 genres
The movie Hudsucker Proxy, The has 2 genres
The movie Jurassic Park has 3 genres
The movie Much Ado About Nothing has 2 genres
The movie Robert A. Heinlein's The Puppet Masters has 2 genres
The movie Ref, The has 1 genres
The movie Remains of the Day, The has 1 genres
The movie Searching for Bobby Fischer has 1 genres
The movie Sleepless in Seattle has 2 genres
The movie Blade Runner has 2 genres
The movie So I Married an Axe Murderer has 3 genres
The movie Nightmare Before Christmas, The has 3 genres
The movie True Romance has 3 genres
The movie Welcome to the Dollhouse has 2 genres
The movie Home Alone has 2 genres
The movie Aladdin has 4 genres
The movie Terminator 2: Judgment Day has 3 genres
The movie Dances with Wolves has 3 genres
The movie Silence of the Lambs, The has 2 genres
The movie Snow White and the Seven Dwarfs has 3 genres
The movie Fargo has 3 genres
The movie Heavy Metal has 5 genres
The movie Aristocats, The has 2 genres
The movie All Dogs Go to Heaven 2 has 3 genres
The movie Theodore Rex has 1 genres
The movie Sgt. Bilko has 1 genres
The movie Diabolique has 2 genres
The movie Moll Flanders has 1 genres
The movie Kids in the Hall: Brain Candy has 1 genres
The movie Mystery Science Theater 3000: The Movie has 2 genres
The movie Operation Dumbo Drop has 4 genres
The movie Truth About Cats & Dogs, The has 2 genres
The movie Flipper has 2 genres
The movie Horseman on the Roof, The (Hussard sur le toit, Le) has 1 genres
The movie Wallace & Gromit: The Best of Aardman Animation has 1 genres
The movie Haunted World of Edward D. Wood Jr., The has 1 genres
The movie Cold Comfort Farm has 1 genres
The movie Rock, The has 3 genres
The movie Twister has 3 genres
The movie Maya Lin: A Strong Clear Vision has 1 genres
The movie Striptease has 2 genres
The movie Independence Day (ID4) has 3 genres
The movie Cable Guy, The has 1 genres
The movie Frighteners, The has 2 genres
The movie Lone Star has 2 genres
The movie Phenomenon has 2 genres
The movie Spitfire Grill, The has 1 genres
The movie Godfather, The has 3 genres
The movie Supercop has 2 genres
The movie Bound has 4 genres
The movie Kansas City has 1 genres
The movie Breakfast at Tiffany's has 2 genres
The movie Wizard of Oz, The has 4 genres
The movie Gone with the Wind has 3 genres
The movie Citizen Kane has 1 genres
The movie 2001: A Space Odyssey has 4 genres
The movie Mr. Smith Goes to Washington has 1 genres
The movie Big Night has 1 genres
The movie D3: The Mighty Ducks has 2 genres
The movie Love Bug, The has 2 genres
The movie Homeward Bound: The Incredible Journey has 2 genres
The movie 20,000 Leagues Under the Sea has 4 genres
The movie Bedknobs and Broomsticks has 3 genres
The movie Sound of Music, The has 1 genres
The movie Die Hard has 2 genres
The movie Lawnmower Man, The has 3 genres
The movie Unhook the Stars has 1 genres
The movie Long Kiss Goodnight, The has 2 genres
The movie Ghost and the Darkness, The has 2 genres
The movie Jude has 1 genres
The movie Swingers has 2 genres
The movie Willy Wonka and the Chocolate Factory has 3 genres
The movie Sleeper has 2 genres
The movie Fish Called Wanda, A has 1 genres
The movie Monty Python's Life of Brian has 1 genres
The movie Dirty Dancing has 2 genres
The movie Reservoir Dogs has 2 genres
The movie Platoon has 2 genres
The movie Weekend at Bernie's has 1 genres
The movie Basic Instinct has 2 genres
The movie Glengarry Glen Ross has 1 genres
The movie Top Gun has 2 genres
The movie On Golden Pond has 1 genres
The movie Return of the Pink Panther, The has 1 genres
The movie Abyss, The has 4 genres
The movie Jean de Florette has 1 genres
The movie Manon of the Spring (Manon des sources) has 1 genres
The movie Private Benjamin has 1 genres
The movie Monty Python and the Holy Grail has 1 genres
The movie Wrong Trousers, The has 2 genres
The movie Cinema Paradiso has 3 genres
The movie Delicatessen has 2 genres
The movie Empire Strikes Back, The has 6 genres
The movie Princess Bride, The has 4 genres
The movie Raiders of the Lost Ark has 2 genres
The movie Brazil has 1 genres
The movie Aliens has 4 genres
The movie Good, The Bad and The Ugly, The has 2 genres
The movie 12 Angry Men has 1 genres
The movie Clockwork Orange, A has 1 genres
The movie Apocalypse Now has 2 genres
The movie Return of the Jedi has 5 genres
The movie GoodFellas has 2 genres
The movie Alien has 4 genres
The movie Army of Darkness has 5 genres
The movie Psycho has 3 genres
The movie Blues Brothers, The has 3 genres
The movie Godfather: Part II, The has 3 genres
The movie Full Metal Jacket has 3 genres
The movie Grand Day Out, A has 2 genres
The movie Henry V has 2 genres
The movie Amadeus has 2 genres
The movie Raging Bull has 1 genres
The movie Right Stuff, The has 1 genres
The movie Sting, The has 2 genres
The movie Terminator, The has 3 genres
The movie Dead Poets Society has 1 genres
The movie Graduate, The has 2 genres
The movie Nikita (La Femme Nikita) has 1 genres
The movie Bridge on the River Kwai, The has 2 genres
The movie Shining, The has 1 genres
The movie Evil Dead II has 4 genres
The movie Groundhog Day has 2 genres
The movie Unforgiven has 1 genres
The movie Back to the Future has 2 genres
The movie Patton has 2 genres
The movie Akira has 4 genres
The movie Cyrano de Bergerac has 3 genres
The movie Young Frankenstein has 2 genres
The movie This Is Spinal Tap has 3 genres
The movie Indiana Jones and the Last Crusade has 2 genres
The movie M*A*S*H has 2 genres
The movie Unbearable Lightness of Being, The has 1 genres
The movie Room with a View, A has 2 genres
The movie Pink Floyd - The Wall has 3 genres
The movie Field of Dreams has 1 genres
The movie When Harry Met Sally... has 2 genres
The movie Bram Stoker's Dracula has 2 genres
The movie Cape Fear has 1 genres
The movie Nightmare on Elm Street, A has 1 genres
The movie Mirror Has Two Faces, The has 2 genres
The movie Breaking the Waves has 1 genres
The movie Star Trek: First Contact has 3 genres
The movie Sling Blade has 2 genres
The movie Ridicule has 1 genres
The movie 101 Dalmatians has 2 genres
The movie Die Hard 2 has 2 genres
The movie Star Trek VI: The Undiscovered Country has 3 genres
The movie Star Trek: The Wrath of Khan has 3 genres
The movie Star Trek III: The Search for Spock has 3 genres
The movie Star Trek IV: The Voyage Home has 3 genres
The movie Batman Returns has 4 genres
The movie Young Guns has 3 genres
The movie Under Siege has 1 genres
The movie Jaws has 2 genres
The movie Mars Attacks! has 4 genres
The movie Citizen Ruth has 2 genres
The movie Jerry Maguire has 2 genres
The movie Raising Arizona has 1 genres
The movie Sneakers has 3 genres
The movie Beavis and Butt-head Do America has 2 genres
The movie Last of the Mohicans, The has 3 genres
The movie Kolya has 1 genres
The movie Jungle2Jungle has 2 genres
The movie Smilla's Sense of Snow has 3 genres
The movie Devil's Own, The has 4 genres
The movie Chasing Amy has 2 genres
The movie Turbo: A Power Rangers Movie has 3 genres
The movie Grosse Pointe Blank has 2 genres
The movie Austin Powers: International Man of Mystery has 1 genres
The movie Fifth Element, The has 2 genres
The movie Shall We Dance? has 1 genres
The movie Lost World: Jurassic Park, The has 4 genres
The movie Pillow Book, The has 2 genres
The movie Batman & Robin has 3 genres
The movie My Best Friend's Wedding has 2 genres
The movie When the Cats Away (Chacun cherche son chat) has 2 genres
The movie Men in Black has 4 genres
The movie Contact has 2 genres
The movie George of the Jungle has 2 genres
The movie Event Horizon has 4 genres
The movie Air Bud has 2 genres
The movie In the Company of Men has 1 genres
The movie Steel has 1 genres
The movie Mimic has 2 genres
The movie Hunt for Red October, The has 2 genres
The movie Kull the Conqueror has 2 genres
###Markdown
7. Univariate plots of columns: 'rating', 'Age', 'release year', 'Gender' and 'Occupation' - 10 marks*HINT: Use distplot for age. Use lineplot or countplot for release year.**HINT: Plot percentages in y-axis and categories in x-axis for ratings, gender and occupation**HINT: Please refer to the below snippet to understand how to get to release year from release date. You can use str.split() as depicted below or you could convert it to pandas datetime format and extract year (.dt.year)*
###Code
a = 'My*cat*is*brown'
print(a.split('*')[3])
#similarly, the release year needs to be taken out from release date
#also you can simply slice existing string to get the desired data, if we want to take out the colour of the cat
print(a[10:])
print(a[-5:])
#distplot for age
sns.distplot(user_data['age']);
#Count plot for release date
release_date_clone = item_data['release date']
df7_release_year = release_date_clone.to_frame()
#df1 = df1.apply(pd.to_datetime)
df7_release_year['year'] = df7_release_year['release date'].apply(lambda x: pd.to_numeric(x.split('-')[2]))
plt.figure(figsize=(30,10))
sns.countplot(x='year', data=df7_release_year);
#Percentages in y-axis and ratings in x-axis
total_ratings = data['rating'].count()
df_ratings = data.groupby('rating').agg({'rating': 'count'})
df_ratings.rename(columns={'rating': 'rating_count'}, inplace=True)
df_ratings.reset_index(inplace=True)
df_ratings['p'] = df_ratings['rating_count']
df_ratings['p'] = df_ratings['p'].apply(lambda n: (n/total_ratings) * 100)
df_ratings
sns.lineplot(data=df_ratings, x="rating", y="p");
#Percentages in y-axis and gender in x-axis
total_users = user_data['gender'].count()
df_gender = user_data.groupby('gender').agg({'gender': 'count'})
df_gender.rename(columns={'gender': 'gender_count'}, inplace=True)
df_gender.reset_index(inplace=True)
df_gender['p'] = df_gender['gender_count']
df_gender['p'] = df_gender['p'].apply(lambda n: (n/total_users) * 100)
sns.lineplot(data=df_gender, x="gender", y="p");
#Percentages in y-axis and occupation in x-axis
total_users = user_data['occupation'].count()
df_occu = user_data.groupby('occupation').agg({'occupation': 'count'})
df_occu.rename(columns={'occupation': 'occupation_count'}, inplace=True)
df_occu.reset_index(inplace=True)
df_occu['p'] = df_occu['occupation_count']
df_occu['p'] = df_occu['p'].apply(lambda n: (n/total_users) * 100)
plt.figure(figsize=(20,10))
sns.lineplot(data=df_occu, x="occupation", y="p");
###Output
_____no_output_____
###Markdown
8. Visualize how popularity of genres has changed over the years - 10 marksNote that you need to use the **percent of number of releases in a year** as a parameter of popularity of a genreHint 1: You need to reach to a data frame where the release year is the index and the genre is the column names (one cell shows the number of release in a year in one genre) or vice versa. (Drop unnecessary column if there are any)Hint 2: Find the total number of movies release in a year(use `sum(axis=1)` store that value in a new column as 'total'). Now divide the value of each genre in that year by total to get percentage number of release in a particular year.`(df.div(df['total'], axis= 0) * 100)`Once that is achieved, you can either use univariate plots or can use the heatmap to visualise all the changes over the years in one go. Hint 3: Use groupby on the relevant column and use sum() on the same to find out the number of releases in a year/genre.
###Code
df8 = item_data.copy() #clone the original item data
df8.drop(df8[df8['unknown'] == 1].index, inplace = True) #cleanse the unknown data
df8.reset_index(inplace=True)
df8.set_index('index', inplace=True)
#new column with year
df8['year'] = df8['release date'].apply(lambda x: pd.to_numeric(x.split('-')[2]))
df8_pivot = df8.groupby('year').sum() #group by year and sum all columns
del df8_pivot['movie id'] #delete the movie id because it does not provide useful info after sum
df8_pivot
#Calculate total movies per year and add it to df8_pivot
df8_totalMovies = df8.groupby('year').agg({'movie title': 'count'})
df8_totalMovies.rename(columns={'movie title': 'Total'}, inplace=True)
df8_pivot = pd.concat([df8_pivot,df8_totalMovies], axis = 1)
df8_pivot
#Calculate percentage number of release in a particular year
df8_pivot_perc = (df8_pivot.div(df8_pivot['Total'], axis= 0) * 100)
del df8_pivot_perc['Total'] # remove total columns because it does not provide useful data after calc percentage
df8_pivot_perc
#Draw heatmap
plt.figure(figsize=(20, 10))
sns.heatmap(df8_pivot_perc);
###Output
_____no_output_____
###Markdown
9. Find the top 25 movies according to average ratings such that each movie has number of ratings more than 100 - 10 marksHints : 1. Find the count of ratings and average ratings for every movie.2. Slice the movies which have ratings more than 100.3. Sort values according to average rating such that movie which highest rating is on top.4. Select top 25 movies.5. You will have to use the .merge() function to get the movie titles.Note: This question will need you to research about groupby and apply your findings. You can find more on groupby on https://realpython.com/pandas-groupby/.
###Code
#Clone copy of orginal data
df9 = data.copy()
#Calculate count of ratings for every movie
df9_rating_count = df9.groupby('movie id').agg({'rating': 'count'})
df9_rating_count.rename(columns={'rating': 'rating_count'}, inplace=True)
#Calculate average ratings for every movie
df9_rating_mean = df9.groupby('movie id').mean()
df9_rating_mean.rename(columns={'rating': 'rating_avg'}, inplace=True)
del df9_rating_mean['user id']
del df9_rating_mean['timestamp']
df9_pivot = pd.concat([df9_rating_count, df9_rating_mean], axis = 1)
df9_pivot
#Slice the movies which have ratings more than 100.
filtered = df9_pivot[df9_pivot['rating_count'] > 100]
#Sort values according to average rating such that movie which highest rating is on top.
sort = filtered.sort_values('rating_avg',ascending=False)
#Select top 25 movies.
top_25 = sort.head(25)
#You will have to use the .merge() function to get the movie titles.
df9_item = item_data.copy()
merged = pd.merge(df9_item, top_25, how="inner", on = 'movie id')
merged[['movie id', 'movie title', 'rating_count', 'rating_avg']]
###Output
_____no_output_____
###Markdown
10. See gender distribution across different genres check for the validity of the below statements - 10 marks* Men watch more drama than women* Women watch more Sci-Fi than men* Men watch more Romance than women**compare the percentages** 1. Merge all the datasets2. There is no need to conduct statistical tests around this. Just **compare the percentages** and comment on the validity of the above statements.3. you might want ot use the .sum(), .div() function here.4. Use number of ratings to validate the numbers. For example, if out of 4000 ratings received by women, 3000 are for drama, we will assume that 75% of the women watch drama.
###Code
#Copy orginal data into a new data frame
df10_item = item_data.copy()
df10_data = data.copy()
df10_user = user_data.copy()
#Merge items from user with items from data
df10_user_data = pd.merge(df10_user, df10_data, how="inner", on='user id')
df10_user_data
#Merge all data
df10_all = pd.merge(df10_user_data, df10_item, how="inner", on='movie id')
df10_all
def get_gender_with_high_rating(merged_df, genre):
temp = merged_df.groupby([genre, 'gender']).sum()
temp.reset_index(inplace=True)
filterMale = temp['gender'] == 'M'
filterGenre = temp[genre] == 1
filterFemale = temp['gender'] == 'F'
total_rating = temp[filterGenre]['rating'].sum()
male_rating = temp[filterMale & filterGenre]['rating'].sum()
female_rating = temp[filterFemale & filterGenre]['rating'].sum()
ret_val = 'F'
print('Men rated ', (male_rating/total_rating) * 100, ' % and women rated ', female_rating / total_rating * 100, '% for the genre ', genre)
if (male_rating/total_rating) * 100 > (female_rating / total_rating) * 100:
ret_val = 'M'
return ret_val
print('The statement "Men watch more drama than women" is', get_gender_with_high_rating(df10_all, 'Drama') == 'M')
print('The statement "Women watch more Sci-Fi than men" is', get_gender_with_high_rating(df10_all, 'Sci-Fi') == 'F')
print('The statement "Men watch more Romance than women"', get_gender_with_high_rating(df10_all, 'Romance') == 'M')
###Output
Men rated 69.61635594903663 % and women rated 30.383644050963365 % for the genre Romance
The statement "Men watch more Romance than women" True
|
mlxtend/docs/sources/user_guide/frequent_patterns/fpmax.ipynb | ###Markdown
Maximal Itemsets via the FP-Max Algorithm Function implementing FP-Max to extract maximal itemsets for association rule mining > from mlxtend.frequent_patterns import fpmax Overview The [Apriori algorithm](./apriori.md) is among the first and most popular algorithms for frequent itemset generation (frequent itemsets are then used for association rule mining). However, the runtime of Apriori can be quite small, especially for datasets with a large number of unique items, as the runtime grows exponentially depending on the number of unique items. In contrast to Apriori, [FP-Growth](./fpgrowth.md) is a frequent pattern generation algorithm that inserts items into a pattern search tree, which allows it to have a linear increase in runtime with respect to the number of unique items or entries.FP-Max is a variant of FP-Growth, which focuses on obtaining maximal itemsets.**An itemset X is said to maximal if X is frequent and there exists no frequent super-pattern containing X.** In other words, a frequent pattern X cannot be sub-pattern of larger frequent pattern to qualify for the definition *maximal itemset*. References- [1] Grahne, G., & Zhu, J. (2003, November). Efficiently using prefix-trees in mining frequent itemsets. In FIMI (Vol. 90). Related- [FP-Growth](./fpgrowth.md)- [Apriori](./apriori.md) Example 1 -- Maximal Itemsets The `fpmax` function expects data in a one-hot encoded pandas DataFrame.Suppose we have the following transaction data:
###Code
dataset = [['Milk', 'Onion', 'Nutmeg', 'Kidney Beans', 'Eggs', 'Yogurt'],
['Dill', 'Onion', 'Nutmeg', 'Kidney Beans', 'Eggs', 'Yogurt'],
['Milk', 'Apple', 'Kidney Beans', 'Eggs'],
['Milk', 'Unicorn', 'Corn', 'Kidney Beans', 'Yogurt'],
['Corn', 'Onion', 'Onion', 'Kidney Beans', 'Ice cream', 'Eggs']]
###Output
_____no_output_____
###Markdown
We can transform it into the right format via the `TransactionEncoder` as follows:
###Code
import pandas as pd
from mlxtend.preprocessing import TransactionEncoder
te = TransactionEncoder()
te_ary = te.fit(dataset).transform(dataset)
df = pd.DataFrame(te_ary, columns=te.columns_)
df
###Output
_____no_output_____
###Markdown
Now, let us return the items and itemsets with at least 60% support:
###Code
from mlxtend.frequent_patterns import fpmax
fpmax(df, min_support=0.6)
###Output
_____no_output_____
###Markdown
By default, `fpmax` returns the column indices of the items, which may be useful in downstream operations such as association rule mining. For better readability, we can set `use_colnames=True` to convert these integer values into the respective item names:
###Code
fpmax(df, min_support=0.6, use_colnames=True)
###Output
_____no_output_____
###Markdown
More Examples Please note that since the `fpmax` function is a drop-in replacement for `fpgrowth` and `apriori`, it comes with the same set of function arguments and return arguments. Thus, for more examples, please see the [`apriori`](./apriori.md) documentation. API
###Code
with open('../../api_modules/mlxtend.frequent_patterns/fpmax.md', 'r') as f:
print(f.read())
###Output
## fpmax
*fpmax(df, min_support=0.5, use_colnames=False, max_len=None, verbose=0)*
Get maximal frequent itemsets from a one-hot DataFrame
**Parameters**
- `df` : pandas DataFrame
pandas DataFrame the encoded format. Also supports
DataFrames with sparse data; for more info, please
see (https://pandas.pydata.org/pandas-docs/stable/
user_guide/sparse.html#sparse-data-structures)
Please note that the old pandas SparseDataFrame format
is no longer supported in mlxtend >= 0.17.2.
The allowed values are either 0/1 or True/False.
For example,
```
Apple Bananas Beer Chicken Milk Rice
0 True False True True False True
1 True False True False False True
2 True False True False False False
3 True True False False False False
4 False False True True True True
5 False False True False True True
6 False False True False True False
7 True True False False False False
```
- `min_support` : float (default: 0.5)
A float between 0 and 1 for minimum support of the itemsets returned.
The support is computed as the fraction
transactions_where_item(s)_occur / total_transactions.
- `use_colnames` : bool (default: False)
If true, uses the DataFrames' column names in the returned DataFrame
instead of column indices.
- `max_len` : int (default: None)
Given the set of all maximal itemsets,
return those that are less than `max_len`. If `None` (default) all
possible itemsets lengths are evaluated.
- `verbose` : int (default: 0)
Shows the stages of conditional tree generation.
**Returns**
pandas DataFrame with columns ['support', 'itemsets'] of all maximal
itemsets that are >= `min_support` and < than `max_len`
(if `max_len` is not None).
Each itemset in the 'itemsets' column is of type `frozenset`,
which is a Python built-in type that behaves similarly to
sets except that it is immutable
(For more info, see
https://docs.python.org/3.6/library/stdtypes.html#frozenset).
|
FreeCodeCamp Machine Learning/Scikit Learn/scikit_learn_freecodecamp.ipynb | ###Markdown
###Code
from sklearn import datasets
import numpy as np
iris = datasets.load_iris()
#split it in features and labels
X = iris.data
y = iris.target
print(X.shape)
print(y.shape)
print("----------------")
from sklearn.model_selection import train_test_split
#Split the data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
print("Training set shape")
print(X_train.shape)
print(y_train.shape)
print("-----------------")
print("Testing set shape")
print(X_test.shape)
print(y_test.shape)
#K Nearest Neighbor
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn import neighbors, metrics, svm
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
from google.colab import files
uploaded = files.upload()
import io
#iris = pd.read_csv(io.BytesIO(uploaded['iris.csv']))
data= pd.read_csv(io.BytesIO(uploaded['car.data']))
print(data.head())
print("-----------------------")
X = data[['buying', 'maint', 'safety']].values
y = data[['class']]
print(X, y)
print("-----------------------")
#conversion
Le = LabelEncoder()
for i in range(len(X[0])):
X[:,i] = Le.fit_transform(X[:, i])
print(X)
print("-------------------------")
#conversion y
label_mapping = {
'unacc':0,
'acc':1,
'good':2,
'vgood':3
}
y['class'] = y['class'].map(label_mapping)
y = np.array(y)
print(y)
print("----------------------")
#Create a model
# Knn Classifier
knn = neighbors.KNeighborsClassifier(n_neighbors=25, weights='uniform')
#Split the data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
knn.fit(X_train, y_train)
prediction = knn.predict(X_test)
accuracy = metrics.accuracy_score(y_test, prediction)
print("prediction: ", prediction)
print("accuracy: ", accuracy)
print("---------------------------")
print("Actual value: ", y[20])
print("predicted value: ", knn.predict(X)[20])
#Support Vector Machine
from sklearn import datasets
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn import svm
from sklearn.metrics import accuracy_score
iris = datasets.load_iris()
X = iris.data
y = iris.target
classes = ['Iris Setosa', 'Iris Versicolor', 'Iris Virginica']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
model = svm.SVC()
model.fit(X_train, y_train)
print(model)
predictions = model.predict(X_test)
accuracy = accuracy_score(y_test, predictions)
print("Predictions: ", predictions)
print("Actual: ", y_test)
print("Accuracy: ", accuracy)
for i in range(len(predictions)):
print(classes[predictions[i]])
#Linear Regression
from sklearn import datasets
import numpy as np
from sklearn.model_selection import train_test_split
#from sklearn import svm
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
from sklearn import linear_model
boston = datasets.load_boston()
#Features / labels
X = boston.data
y = boston.target
# Algorithm
l_reg = linear_model.LinearRegression()
plt.scatter(X.T[5], y)
plt.show()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
model = l_reg.fit(X_train, y_train)
predictions = model.predict(X_test)
print("Predictions: ", predictions)
#print("Actual: ", y_test)
print("R ^ 2 value: ", l_reg.score(X, y))
print("coedd: ", l_reg.coef_)
print('intercept: ', l_reg.intercept_)
from sklearn.datasets import load_breast_cancer
from sklearn.cluster import KMeans
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import scale
import pandas as pd
bc = load_breast_cancer()
#print(bc)
X = scale(bc.data)
#print(X)
y = bc.target
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
model = KMeans(n_clusters=2, random_state=0)
model.fit(X_train)
predictions = model.predict(X_test)
labels = model.labels_
print("labels: ", labels)
print("Predictions: ", predictions)
print("accuracy: ", accuracy_score(y_test, predictions))
print("Actual: ", y_test)
print(pd.crosstab(y_train, labels))
###Output
_____no_output_____ |
machine_learning/hw_training_01.ipynb | ###Markdown
Heatwave Training 01
+ Train on images to predict Heatwave
+ add day of the year to input Imports and Initial Variables
###Code
from hw_training_prep import load_images, normalize_images, build_CNN_model, download_dataset_of_images
from hw_training_01 import Heatwave_Model_Training, LayerDetails
import numpy as np
from azureml.core import Experiment
from azureml.core import Environment
from azureml.core import Workspace, Datastore, Dataset, Run
from azureml.core.compute import ComputeTarget, AmlCompute
from azureml.core.compute_target import ComputeTargetException
from azureml.core.dataset import Dataset
from matplotlib import pyplot as plt
%matplotlib inline
ws = Workspace.from_config()
print('workspace:\n', ws)
sub_folder_name = 'aoi_107_2021_11_11'
datastore_name = 'workspaceblobstore'
###Output
workspace:
Workspace.create(name='hes', subscription_id='0e150cbb-ad2f-47a1-849c-c5d0527afd2b', resource_group='hes-nasa-msft')
###Markdown
Download images from Dataset -- if the parent folder does not already exist locally
###Code
dataset_prefix = 'aoi_107_2021_11_11'
download_dataset_of_images(dataset_prefix)
###Output
The folder aoi_107_2021_11_11 already exists. Will not proceed with the download.
###Markdown
Load images using Heatwave Model Training class
###Code
hmt = Heatwave_Model_Training(images_folder_name=dataset_prefix)
hmt.load_subsets_shuffle_and_normalize(limit=0)
hmt.print_dataset_summary()
###Output
label folder aoi_107_2021_11_11/train/0
label folder aoi_107_2021_11_11/train/1
label folder aoi_107_2021_11_11/validate/0
label folder aoi_107_2021_11_11/validate/1
label folder aoi_107_2021_11_11/test/0
label folder aoi_107_2021_11_11/test/1
Count of images = 6568
Label 0 -- count 4655
Label 1 -- count 1913
len(train_X) 6568 len(train_Y) 6568 len(train_X_names) 6568
(231, 349, 3) 1 Img_hw_area_pct_30__1983__d_228__var_tasmax.png
(231, 349, 3) 0 Img_hw_area_pct_30__2029__d_250__var_tasmax.png
len(validate_X) 1406
len(test_X) 1410
###Markdown
Prepare a CNN model using Heatwave Model Training, LayerDetails classes
###Code
print('Preparing model for training 01 -- images only...')
# layers
build_with_layers = []
build_with_layers.append(LayerDetails(layerType='C', filters=16, kernel_size= (3,3), pool_size=None, strides=None))
build_with_layers.append(LayerDetails(layerType='M', filters=None, kernel_size= None, pool_size=(2,2), strides=(2,2)))
build_with_layers.append(LayerDetails(layerType='C', filters=32, kernel_size= (3,3), pool_size=None, strides=None))
model = hmt.prepare_model(build_with_layers)
learning_rate = 0.01
epochs = 35
patience=5
print(model, '\n')
hmt.train_binary_classification_model(model, learning_rate, epochs, patience)
###Output
Preparing model for training 01 -- images only...
Preparing model for training...
Creating model...
input_shape = [231, 349, 3] num_classes = 1
Model: "model_1639037569"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_1 (InputLayer) [(None, 231, 349, 3)] 0
_________________________________________________________________
conv2d (Conv2D) (None, 231, 349, 16) 448
_________________________________________________________________
max_pooling2d (MaxPooling2D) (None, 115, 174, 16) 0
_________________________________________________________________
conv2d_1 (Conv2D) (None, 115, 174, 32) 4640
_________________________________________________________________
flatten (Flatten) (None, 640320) 0
_________________________________________________________________
dense (Dense) (None, 1) 640321
=================================================================
Total params: 645,409
Trainable params: 645,409
Non-trainable params: 0
_________________________________________________________________
None
<tensorflow.python.keras.engine.training.Model object at 0x7fc665b7c978>
Train on 6568 samples, validate on 1406 samples
Epoch 1/35
6568/6568 [==============================] - 96s 15ms/sample - loss: 0.5362 - accuracy: 0.7782 - val_loss: 0.3965 - val_accuracy: 0.8222
Epoch 2/35
6568/6568 [==============================] - 96s 15ms/sample - loss: 0.4053 - accuracy: 0.8182 - val_loss: 0.3608 - val_accuracy: 0.8478
Epoch 3/35
6568/6568 [==============================] - 96s 15ms/sample - loss: 0.3778 - accuracy: 0.8331 - val_loss: 0.3281 - val_accuracy: 0.8642
Epoch 4/35
6568/6568 [==============================] - 97s 15ms/sample - loss: 0.3822 - accuracy: 0.8283 - val_loss: 0.3461 - val_accuracy: 0.8563
Epoch 5/35
6568/6568 [==============================] - 95s 15ms/sample - loss: 0.3596 - accuracy: 0.8386 - val_loss: 0.3380 - val_accuracy: 0.8400
Epoch 6/35
6568/6568 [==============================] - 95s 14ms/sample - loss: 0.3493 - accuracy: 0.8436 - val_loss: 0.3209 - val_accuracy: 0.8535
Epoch 7/35
6568/6568 [==============================] - 96s 15ms/sample - loss: 0.3328 - accuracy: 0.8502 - val_loss: 0.3149 - val_accuracy: 0.8585
Epoch 8/35
6568/6568 [==============================] - 97s 15ms/sample - loss: 0.3377 - accuracy: 0.8452 - val_loss: 0.5242 - val_accuracy: 0.7788
Epoch 9/35
6568/6568 [==============================] - 95s 15ms/sample - loss: 0.3238 - accuracy: 0.8555 - val_loss: 0.2921 - val_accuracy: 0.8642
Epoch 10/35
6568/6568 [==============================] - 96s 15ms/sample - loss: 0.3226 - accuracy: 0.8576 - val_loss: 0.4198 - val_accuracy: 0.8414
Epoch 11/35
6568/6568 [==============================] - 95s 15ms/sample - loss: 0.3178 - accuracy: 0.8607 - val_loss: 0.5134 - val_accuracy: 0.7603
Epoch 12/35
6568/6568 [==============================] - 96s 15ms/sample - loss: 0.3180 - accuracy: 0.8589 - val_loss: 0.3410 - val_accuracy: 0.8492
Epoch 13/35
6568/6568 [==============================] - 95s 15ms/sample - loss: 0.3146 - accuracy: 0.8595 - val_loss: 0.3866 - val_accuracy: 0.8421
Epoch 14/35
6568/6568 [==============================] - 96s 15ms/sample - loss: 0.3066 - accuracy: 0.8618 - val_loss: 0.2991 - val_accuracy: 0.8748
Epoch 00014: early stopping
Training execution time (mins) 22.46
1410/1410 [==============================] - 6s 4ms/sample - loss: 0.3207 - accuracy: 0.8638
test loss = 0.32, test acc = 0.8600000143051147
|
zero-to-mastery-ml-master/section-2-appendix-video-code/introduction-to-numpy.ipynb | ###Markdown
A Quick Introduction to Numerical Data Manipulation with Python and NumPy What is NumPy?[NumPy](https://docs.scipy.org/doc/numpy/index.html) stands for numerical Python. It's the backbone of all kinds of scientific and numerical computing in Python.And since machine learning is all about turning data into numbers and then figuring out the patterns, NumPy often comes into play. Why NumPy?You can do numerical calculations using pure Python. In the beginning, you might think Python is fast but once your data gets large, you'll start to notice slow downs.One of the main reasons you use NumPy is because it's fast. Behind the scenes, the code has been optimized to run using C. Which is another programming language, which can do things much faster than Python.The benefit of this being behind the scenes is you don't need to know any C to take advantage of it. You can write your numerical computations in Python using NumPy and get the added speed benefits.If your curious as to what causes this speed benefit, it's a process called vectorization. [Vectorization](https://en.wikipedia.org/wiki/Vectorization) aims to do calculations by avoiding loops as loops can create potential bottlenecks.NumPy achieves vectorization through a process called [broadcasting](https://docs.scipy.org/doc/numpy/user/basics.broadcasting.htmlmodule-numpy.doc.broadcasting). What does this notebook cover?The NumPy library is very capable. However, learning everything off by heart isn't necessary. Instead, this notebook focuses on the main concepts of NumPy and the `ndarray` datatype.You can think of the `ndarray` datatype as a very flexible array of numbers.More specifically, we'll look at:* NumPy datatypes & attributes* Creating arrays* Viewing arrays & matrices (indexing)* Manipulating & comparing arrays* Sorting arrays* Use cases (examples of turning things into numbers)After going through it, you'll have the base knolwedge of NumPy you need to keep moving forward. Where can I get help?If you get stuck or think of something you'd like to do which this notebook doesn't cover, don't fear!The recommended steps you take are:1. **Try it** - Since NumPy is very friendly, your first step should be to use what you know and try figure out the answer to your own question (getting it wrong is part of the process). If in doubt, run your code.2. **Search for it** - If trying it on your own doesn't work, since someone else has probably tried to do something similar, try searching for your problem. You'll likely end up in 1 of 2 places: * [NumPy documentation](https://docs.scipy.org/doc/numpy/index.html) - the ground truth for everything NumPy, this resource covers all of the NumPy functionality. * [Stack Overflow](https://stackoverflow.com/) - this is the developers Q&A hub, it's full of questions and answers of different problems across a wide range of software development topics and chances are, there's one related to your problem. An example of searching for a NumPy function might be:> "how to find unique elements in a numpy array"Searching this on Google leads to the NumPy documentation for the `np.uniquie()` function: https://docs.scipy.org/doc/numpy/reference/generated/numpy.unique.htmlThe next steps here are to read through the documentation, check the examples and see if they line up to the problem you're trying to solve. If they do, **rewrite the code** to suit your needs, run it, and see what the outcomes are.3. **Ask for help** - If you've been through the above 2 steps and you're still stuck, you might want to ask your question on [Stack Overflow](https://www.stackoverflow.com). Be as specific as possible and provide details on what you've tried.Remember, you don't have to learn all of the functions off by heart to begin with. What's most important is continually asking yourself, "what am I trying to do with the data?".Start by answering that question and then practicing finding the code which does it.Let's get started. 0. Importing NumPyTo get started using NumPy, the first step is to import it. The most common way (and method you should use) is to import NumPy as the abbreviation `np`.If you see the letters `np` used anywhere in machine learning or data science, it's probably referring to the NumPy library.
###Code
import numpy as np
###Output
_____no_output_____
###Markdown
1. DataTypes and attributes**NOTE:** Important to remember the main type in NumPy is `ndarray`, even seemingly different kinds of arrays are still `ndarray`'s. This means an operation you do on one array, will work on another.
###Code
# 1-dimensonal array, also referred to as a vector
a1 = np.array([1, 2, 3])
# 2-dimensional array, also referred to as matrix
a2 = np.array([[1, 2.0, 3.3],
[4, 5, 6.5]])
# 3-dimensional array, also referred to as a matrix
a3 = np.array([[[1, 2, 3],
[4, 5, 6],
[7, 8, 9]],
[[10, 11, 12],
[13, 14, 15],
[16, 17, 18]]])
a1.shape, a1.ndim, a1.dtype, a1.size, type(a1)
a2.shape, a2.ndim, a2.dtype, a2.size, type(a2)
a3.shape, a3.ndim, a3.dtype, a3.size, type(a3)
a1
a2
a3
###Output
_____no_output_____
###Markdown
Anatomy of an arrayKey terms:* **Array** - A list of numbers, can be multi-dimensional.* **Scalar** - A single number (e.g. `7`).* **Vector** - A list of numbers with 1-dimesion (e.g. `np.array([1, 2, 3])`).* **Matrix** - A (usually) multi-deminsional list of numbers (e.g. `np.array([[1, 2, 3], [4, 5, 6]])`). pandas DataFrame out of NumPy arraysThis is to examplify how NumPy is the backbone of many other libraries.
###Code
import pandas as pd
df = pd.DataFrame(np.random.randint(10, size=(5, 3)),
columns=['a', 'b', 'c'])
df
a2
df2 = pd.DataFrame(a2)
df2
###Output
_____no_output_____
###Markdown
2. Creating arrays* `np.array()`* `np.ones()`* `np.zeros()`* `np.random.rand(5, 3)`* `np.random.randint(10, size=5)`* `np.random.seed()` - pseudo random numbers* Searching the documentation example (finding `np.unique()` and using it)
###Code
# Create a simple array
simple_array = np.array([1, 2, 3])
simple_array
simple_array = np.array((1, 2, 3))
simple_array, simple_array.dtype
# Create an array of ones
ones = np.ones((10, 2))
ones
# The default datatype is 'float64'
ones.dtype
# You can change the datatype with .astype()
ones.astype(int)
# Create an array of zeros
zeros = np.zeros((5, 3, 3))
zeros
zeros.dtype
# Create an array within a range of values
range_array = np.arange(0, 10, 2)
range_array
# Random array
random_array = np.random.randint(10, size=(5, 3))
random_array
# Random array of floats (between 0 & 1)
np.random.random((5, 3))
np.random.random((5, 3))
# Random 5x3 array of floats (between 0 & 1), similar to above
np.random.rand(5, 3)
np.random.rand(5, 3)
###Output
_____no_output_____
###Markdown
NumPy uses pseudo-random numbers, which means, the numbers look random but aren't really, they're predetermined.For consistency, you might want to keep the random numbers you generate similar throughout experiments.To do this, you can use [`np.random.seed()`](https://docs.scipy.org/doc/numpy-1.15.0/reference/generated/numpy.random.seed.html).What this does is it tells NumPy, "Hey, I want you to create random numbers but keep them aligned with the seed."Let's see it.
###Code
# Set random seed to 0
np.random.seed(0)
# Make 'random' numbers
np.random.randint(10, size=(5, 3))
###Output
_____no_output_____
###Markdown
With `np.random.seed()` set, every time you run the cell above, the same random numbers will be generated.What if `np.random.seed()` wasn't set?Every time you run the cell below, a new set of numbers will appear.
###Code
# Make more random numbers
np.random.randint(10, size=(5, 3))
###Output
_____no_output_____
###Markdown
Let's see it in action again, we'll stay consistent and set the random seed to 0.
###Code
# Set random seed to same number as above
np.random.seed(0)
# The same random numbers come out
np.random.randint(10, size=(5, 3))
###Output
_____no_output_____
###Markdown
Because `np.random.seed()` is set to 0, the random numbers are the same as the cell with `np.random.seed()` set to 0 as well.Setting `np.random.seed()` is not 100% necessary but it's helpful to keep numbers the same throughout your experiments.For example, say you wanted to split your data randomly into training and test sets.Every time you randomly split, you might get different rows in each set.If you shared your work with someone else, they'd get different rows in each set too.Setting `np.random.seed()` ensures there's still randomness, it just makes the randomness repeatable. Hence the 'pseudo-random' numbers.
###Code
np.random.seed(0)
df = pd.DataFrame(np.random.randint(10, size=(5, 3)))
df
###Output
_____no_output_____
###Markdown
What unique values are in the array a3?Now you've seen a few different ways to create arrays, as an exercise, try find out what NumPy function you could use to find the unique values are within the `a3` array.You might want to search some like, "how to find the unqiue values in a numpy array".
###Code
# Your code here
###Output
_____no_output_____
###Markdown
3. Viewing arrays and matrices (indexing)Remember, because arrays and matrices are both `ndarray`'s, they can be viewed in similar ways.Let's check out our 3 arrays again.
###Code
a1
a2
a3
###Output
_____no_output_____
###Markdown
Array shapes are always listed in the format `(row, column, n, n, n...)` where `n` is optional extra dimensions.
###Code
a1[0]
a2[0]
a3[0]
# Get 2nd row (index 1) of a2
a2[1]
# Get the first 2 values of the first 2 rows of both arrays
a3[:2, :2, :2]
###Output
_____no_output_____
###Markdown
This takes a bit of practice, especially when the dimensions get higher. Usually, it takes me a little trial and error of trying to get certain values, viewing the output in the notebook and trying again.NumPy arrays get printed from outside to inside. This means the number at the end of the shape comes first, and the number at the start of the shape comes last.
###Code
a4 = np.random.randint(10, size=(2, 3, 4, 5))
a4
a4.shape
# Get only the first 4 numbers of each single vector
a4[:, :, :, :4]
###Output
_____no_output_____
###Markdown
`a4`'s shape is (2, 3, 4, 5), this means it gets displayed like so:* Inner most array = size 5* Next array = size 4* Next array = size 3* Outer most array = size 2 4. Manipulating and comparying arrays* Arithmetic * `+`, `-`, `*`, `/`, `//`, `**`, `%` * `np.exp()` * `np.log()` * [Dot product](https://www.mathsisfun.com/algebra/matrix-multiplying.html) - `np.dot()` * Broadcasting* Aggregation * `np.sum()` - faster than `.sum()`, make demo, np is really fast * `np.mean()` * `np.std()` * `np.var()` * `np.min()` * `np.max()` * `np.argmin()` - find index of minimum value * `np.argmax()` - find index of maximum value * These work on all `ndarray`'s * `a4.min(axis=0)` -- you can use axis as well* Reshaping * `np.reshape()`* Transposing * `a3.T` * Comparison operators * `>` * `<` * `<=` * `>=` * `x != 3` * `x == 3` * `np.sum(x > 3)` Arithmetic
###Code
a1
ones = np.ones(3)
ones
# Add two arrays
a1 + ones
# Subtract two arrays
a1 - ones
# Multiply two arrays
a1 * ones
# Multiply two arrays
a1 * a2
a1.shape, a2.shape
a2 * a3
a3
###Output
_____no_output_____
###Markdown
Broadcasting- What is broadcasting? - Broadcasting is a feature of NumPy which performs an operation across multiple dimensions of data without replicating the data. This saves time and space. For example, if you have a 3x3 array (A) and want to add a 1x3 array (B), NumPy will add the row of (B) to every row of (A).- Rules of Broadcasting 1. If the two arrays differ in their number of dimensions, the shape of the one with fewer dimensions is padded with ones on its leading (left) side. 2. If the shape of the two arrays does not match in any dimension, the array with shape equal to 1 in that dimension is stretched to match the other shape. 3. If in any dimension the sizes disagree and neither is equal to 1, an error is raised. **The broadcasting rule:**In order to broadcast, the size of the trailing axes for both arrays in an operation must be either the same size or one of them must be one.
###Code
a1
a1.shape
a2.shape
a2
a1 + a2
a2 + 2
# Raises an error because there's a shape mismatch
a2 + a3
# Divide two arrays
a1 / ones
# Divide using floor division
a2 // a1
# Take an array to a power
a1 ** 2
# You can also use np.square()
np.square(a1)
# Modulus divide (what's the remainder)
a1 % 2
###Output
_____no_output_____
###Markdown
You can also find the log or exponential of an array using `np.log()` and `np.exp()`.
###Code
# Find the log of an array
np.log(a1)
# Find the exponential of an array
np.exp(a1)
###Output
_____no_output_____
###Markdown
AggregationAggregation - bringing things together, doing a similar thing on a number of things.
###Code
sum(a1)
np.sum(a1)
###Output
_____no_output_____
###Markdown
Use NumPy's `np.sum()` on NumPy arrays and Python's `sum()` on Python lists.
###Code
massive_array = np.random.random(100000)
massive_array.size
%timeit sum(massive_array) # Python sum()
%timeit np.sum(massive_array) # NumPy np.sum()
import random
massive_list = [random.randint(0, 10) for i in range(100000)]
len(massive_list)
massive_list[:10]
%timeit sum(massive_list)
%timeit np.sum(massive_list)
a2
# Find the mean
np.mean(a2)
# Find the max
np.max(a2)
# Find the min
np.min(a2)
# Find the standard deviation
np.std(a2)
# Find the variance
np.var(a2)
# The standard deviation is the square root of the variance
np.sqrt(np.var(a2))
###Output
_____no_output_____
###Markdown
**What's mean?**Mean is the same as average. You can find the average of a set of numbers by adding them up and dividing them by how many there are.**What's standard deviation?**[Standard deviation](https://www.mathsisfun.com/data/standard-deviation.html) is a measure of how spread out numbers are.**What's variance?**The [variance](https://www.mathsisfun.com/data/standard-deviation.html) is the averaged squared differences of the mean.To work it out, you:1. Work out the mean2. For each number, subtract the mean and square the result3. Find the average of the squared differences
###Code
# Demo of variance
high_var_array = np.array([1, 100, 200, 300, 4000, 5000])
low_var_array = np.array([2, 4, 6, 8, 10])
np.var(high_var_array), np.var(low_var_array)
np.std(high_var_array), np.std(low_var_array)
# The standard deviation is the square root of the variance
np.sqrt(np.var(high_var_array))
%matplotlib inline
import matplotlib.pyplot as plt
plt.hist(high_var_array)
plt.show()
plt.hist(low_var_array)
plt.show()
###Output
_____no_output_____
###Markdown
Reshaping
###Code
a2
a2.shape
a2 + a3
a2.reshape(2, 3, 1)
a2.reshape(2, 3, 1) + a3
###Output
_____no_output_____
###Markdown
Transpose
###Code
a2.shape
a2.T
a2.T.shape
matrix = np.random.random(size=(5,3,3))
matrix
matrix.shape
matrix.T
matrix.T.shape
###Output
_____no_output_____
###Markdown
Dot* TODO - create graphic for dot versus element-wise also known as Hadamard product* TODO - why would someone use dot versus element-wise?* A dot product models real world problems well, it's a method of finding patterns between data
###Code
np.random.seed(0)
mat1 = np.random.randint(10, size=(3, 3))
mat2 = np.random.randint(10, size=(3, 2))
mat1.shape, mat2.shape
mat1
mat2
np.dot(mat1, mat2)
np.random.seed(0)
mat3 = np.random.randint(10, size=(4,3))
mat4 = np.random.randint(10, size=(4,3))
mat3
mat4
np.dot(mat3, mat4)
mat3.T.shape
# Dot product
np.dot(mat3.T, mat4)
# Element-wise multiplication, also known as Hadamard product
mat3 * mat4
###Output
_____no_output_____
###Markdown
Dot product practical example, nut butter sales
###Code
np.random.seed(0)
sales_amounts = np.random.randint(20, size=(5, 3))
sales_amounts
weekly_sales = pd.DataFrame(sales_amounts,
index=["Mon", "Tues", "Wed", "Thurs", "Fri"],
columns=["Almond butter", "Peanut butter", "Cashew butter"])
weekly_sales
prices = np.array([10, 8, 12])
prices
butter_prices = pd.DataFrame(prices.reshape(1, 3),
index=["Price"],
columns=["Almond butter", "Peanut butter", "Cashew butter"])
butter_prices.shape
weekly_sales.shape
# Find the total amount of sales for a whole day
total_sales = prices.dot(sales_amounts)
total_sales
###Output
_____no_output_____
###Markdown
The shapes aren't aligned, we need the middle two numbers to be the same.
###Code
prices
sales_amounts.T.shape
# To make the middle numbers the same, we can transpose
total_sales = prices.dot(sales_amounts.T)
total_sales
butter_prices.shape, weekly_sales.shape
daily_sales = butter_prices.dot(weekly_sales.T)
daily_sales
# Need to transpose again
weekly_sales["Total"] = daily_sales.T
weekly_sales
###Output
_____no_output_____
###Markdown
Comparison operators
###Code
a1
a2
a1 > a2
a1 >= a2
a1 > 5
a1 == a1
a1 == a2
###Output
_____no_output_____
###Markdown
5. Sorting arrays* `np.sort()`* `np.argsort()`* `np.argmax()`* `np.argmin()`
###Code
random_array
np.sort(random_array)
np.argsort(random_array)
a1
# Return the indices that would sort an array
np.argsort(a1)
# No axis
np.argmin(a1)
random_array
# Down the vertical
np.argmax(random_array, axis=1)
# Across the horizontal
np.argmin(random_array, axis=0)
###Output
_____no_output_____
###Markdown
6. Use caseTurning an image of a panda into numbers.
###Code
from matplotlib.image import imread
panda = imread('../images/numpy-panda.png')
print(type(panda))
panda.shape
panda
###Output
_____no_output_____
###Markdown
###Code
car = imread("../images/numpy-car-photo.png")
car.shape
car[:,:,:3].shape
###Output
_____no_output_____
###Markdown
###Code
dog = imread("../images/numpy-dog-photo.png")
dog.shape
dog
###Output
_____no_output_____ |
BloodData.ipynb | ###Markdown
Random Forest Classifier
###Code
#Use RandomForestClassifier to predict Clinical
x = df_final_select
y = train["Clinical"]
# y = np.array(y,dtype=int)
X_train,X_test,y_train,y_test = train_test_split(x,y,test_size=0.1,random_state=0)
#RandomForest
rfc = RandomForestClassifier()
#rfc=RandomForestClassifier(n_estimators=100,n_jobs = -1,random_state =50, min_samples_leaf = 10)
rfc.fit(X_train,y_train)
y_predict = rfc.predict(X_train)
score_rfc = rfc.score(X_test,y_test)
print("Random Forest Accuracy = ",score_rfc*100," %")
from sklearn.model_selection import KFold
x = df_final_select
y = train["Clinical"]
kf = KFold(n_splits=5)
best_accuracy = 0
for train_index , test_index in kf.split(x):
X_train, X_test, y_train, y_test = x.iloc[train_index], x.iloc[test_index], y.iloc[train_index], y.iloc[test_index]
rfc = RandomForestClassifier()
rfc.fit(X_train,y_train)
y_predict = rfc.predict(X_train)
accuracy = rfc.score(X_test,y_test)
print("Random Forest Accuracy = ",accuracy*100,"%")
if accuracy > best_accuracy:
best_accuracy = accuracy
best_rfc = rfc
print("Best Accuracy = ",best_accuracy*100,"%")
#random forest final accuracy
x_train_new = x
y_train_new = train["Clinical"]
y_train_new = y_train_new.reset_index(drop=True)
y_pred = best_rfc.predict(x_train_new)
count = 0
for i in range(y_pred.shape[0]):
if y_pred[i] != y_train_new[i]:
count += 1
print(y_pred[i],y_train_new[i])
rfc_accuracy = 1-count/y_pred.shape[0]
print("Random Forest Accuracy = ",rfc_accuracy*100,"%")
###Output
dep_M3 dep_M3
dep_M0 dep_M0
dep_M3 dep_M3
dep_M0 dep_M0
dep_M3 dep_M3
dep_M0 dep_M0
dep_M3 dep_M3
dep_M0 dep_M0
dep_M3 dep_M3
dep_M0 dep_M0
control control
dep_M3 dep_M3
dep_M0 dep_M0
dep_M3 dep_M3
dep_M0 dep_M0
dep_M3 dep_M3
dep_M0 dep_M0
dep_M3 dep_M3
dep_M0 dep_M0
dep_M3 dep_M3
dep_M0 dep_M0
control control
dep_M3 dep_M3
dep_M0 dep_M0
dep_M3 dep_M3
dep_M0 dep_M0
dep_M3 dep_M3
dep_M0 dep_M0
dep_M3 dep_M3
dep_M0 dep_M0
dep_M3 dep_M3
dep_M0 dep_M0
control control
dep_M3 dep_M3
dep_M0 dep_M0
control control
control control
control control
dep_M3 dep_M3
dep_M0 dep_M0
dep_M3 dep_M3
dep_M0 dep_M0
dep_M3 dep_M3
control control
dep_M0 dep_M0
dep_M3 dep_M3
dep_M0 dep_M0
dep_M3 dep_M3
dep_M0 dep_M0
dep_M3 dep_M3
dep_M0 dep_M0
dep_M3 dep_M3
dep_M0 dep_M0
control control
control control
control control
control control
control control
control control
control control
control control
control control
control control
control control
control control
control control
control control
control control
control control
control control
control control
control control
control control
control control
control control
control control
control control
control control
control control
control control
control control
control control
control control
control control
control control
control control
control control
control control
control control
control control
control control
dep_M3 dep_M3
dep_M3 dep_M0
dep_M3 dep_M3
dep_M3 dep_M0
dep_M0 dep_M3
dep_M0 dep_M0
dep_M0 dep_M3
dep_M0 dep_M0
control control
dep_M3 dep_M3
dep_M0 dep_M0
dep_M3 dep_M3
dep_M0 dep_M0
dep_M3 dep_M3
dep_M0 dep_M0
dep_M3 dep_M3
dep_M0 dep_M0
dep_M3 dep_M3
dep_M0 dep_M0
control control
dep_M3 dep_M3
dep_M0 dep_M0
dep_M3 dep_M3
dep_M0 dep_M0
dep_M3 dep_M3
dep_M0 dep_M0
control control
control control
dep_M3 dep_M3
dep_M0 dep_M0
control control
dep_M3 dep_M3
dep_M0 dep_M0
dep_M3 dep_M3
dep_M0 dep_M0
dep_M3 dep_M3
dep_M0 dep_M0
dep_M3 dep_M3
dep_M0 dep_M0
dep_M3 dep_M3
dep_M0 dep_M0
control control
dep_M3 dep_M3
dep_M0 dep_M0
dep_M3 dep_M3
dep_M0 dep_M0
dep_M3 dep_M3
dep_M0 dep_M0
dep_M3 dep_M3
dep_M0 dep_M0
dep_M3 dep_M3
dep_M0 dep_M0
control control
dep_M3 dep_M3
dep_M0 dep_M0
dep_M3 dep_M3
dep_M0 dep_M0
dep_M3 dep_M3
dep_M0 dep_M0
dep_M3 dep_M3
dep_M0 dep_M0
dep_M3 dep_M3
dep_M0 dep_M0
control control
dep_M3 dep_M3
dep_M0 dep_M0
dep_M3 dep_M3
dep_M0 dep_M0
dep_M3 dep_M3
dep_M0 dep_M0
dep_M3 dep_M3
dep_M0 dep_M0
dep_M3 dep_M3
dep_M0 dep_M0
control control
control control
control control
Random Forest Accuracy = 97.61904761904762 %
###Markdown
SVM
###Code
from sklearn import svm
#Use SVM to predict Clinical
x = df_final_select
y = train["Clinical"]
# y = np.array(y,dtype=int)
X_train,X_test,y_train,y_test = train_test_split(x,y,test_size=0.1,random_state=0)
clf = svm.SVC()
clf.fit(X_train,y_train)
y_predict = clf.predict(X_train)
score_clf = clf.score(X_test,y_test)
print("SVM Accuracy = ",score_clf*100," %")
from sklearn.model_selection import KFold
x = df_final_select
y = train["Clinical"]
kf = KFold(n_splits=5)
best_accuracy = 0
for train_index , test_index in kf.split(x):
X_train, X_test, y_train, y_test = x.iloc[train_index], x.iloc[test_index], y.iloc[train_index], y.iloc[test_index]
clf = svm.SVC()
clf.fit(X_train,y_train)
y_predict = clf.predict(X_train)
accuracy = clf.score(X_test,y_test)
print("SVM Accuracy = ",accuracy*100,"%")
if accuracy > best_accuracy:
best_accuracy = accuracy
best_clf = clf
print("Best Accuracy = ",best_accuracy*100,"%")
#SVM final accuracy
x_train_new = x
y_train_new = train["Clinical"]
y_train_new = y_train_new.reset_index(drop=True)
y_pred = best_clf.predict(x_train_new)
count = 0
for i in range(y_pred.shape[0]):
print(y_pred[i],y_train_new[i])
if y_pred[i] != y_train_new[i]:
count += 1
clf_accuracy = 1-count/y_pred.shape[0]
print("SVM Accuracy = ",clf_accuracy*100,"%")
###Output
control dep_M3
control dep_M0
control dep_M3
control dep_M0
control dep_M3
control dep_M0
dep_M0 dep_M3
control dep_M0
control dep_M3
control dep_M0
control control
control dep_M3
control dep_M0
control dep_M3
control dep_M0
control dep_M3
control dep_M0
control dep_M3
control dep_M0
control dep_M3
control dep_M0
control control
control dep_M3
control dep_M0
control dep_M3
control dep_M0
control dep_M3
control dep_M0
control dep_M3
control dep_M0
control dep_M3
control dep_M0
control control
control dep_M3
control dep_M0
control control
control control
control control
control dep_M3
control dep_M0
control dep_M3
control dep_M0
control dep_M3
control control
control dep_M0
control dep_M3
control dep_M0
control dep_M3
dep_M0 dep_M0
control dep_M3
control dep_M0
control dep_M3
control dep_M0
control control
control control
control control
control control
control control
dep_M0 control
control control
control control
control control
control control
control control
control control
control control
control control
control control
control control
control control
control control
control control
control control
control control
control control
control control
control control
control control
control control
control control
control control
control control
dep_M0 control
control control
control control
control control
control control
control control
control control
control control
control control
control dep_M3
control dep_M0
control dep_M3
control dep_M0
control dep_M3
control dep_M0
dep_M0 dep_M3
control dep_M0
control control
control dep_M3
control dep_M0
dep_M0 dep_M3
control dep_M0
control dep_M3
control dep_M0
control dep_M3
dep_M0 dep_M0
control dep_M3
control dep_M0
control control
control dep_M3
control dep_M0
control dep_M3
control dep_M0
control dep_M3
control dep_M0
control control
control control
control dep_M3
control dep_M0
control control
control dep_M3
control dep_M0
control dep_M3
control dep_M0
control dep_M3
control dep_M0
control dep_M3
control dep_M0
control dep_M3
control dep_M0
control control
control dep_M3
dep_M0 dep_M0
control dep_M3
dep_M0 dep_M0
dep_M0 dep_M3
dep_M0 dep_M0
control dep_M3
dep_M0 dep_M0
control dep_M3
control dep_M0
control control
control dep_M3
control dep_M0
control dep_M3
control dep_M0
control dep_M3
control dep_M0
control dep_M3
control dep_M0
control dep_M3
control dep_M0
control control
control dep_M3
control dep_M0
control dep_M3
control dep_M0
control dep_M3
control dep_M0
control dep_M3
control dep_M0
control dep_M3
control dep_M0
control control
control control
control control
SVM Accuracy = 35.71428571428571 %
###Markdown
Neural network MLPClassifier
###Code
from sklearn.neural_network import MLPClassifier
#Use Neural Network MLPClassifier to predict Clinical
x = df_final_select
y = train["Clinical"]
# y = np.array(y,dtype=int)
X_train,X_test,y_train,y_test = train_test_split(x,y,test_size=0.1,random_state=0)
nnclf = MLPClassifier(solver='adam', alpha=1e-5, hidden_layer_sizes=(50, 30), random_state=1, max_iter=2000)
nnclf.fit(X_train,y_train)
y_predict = nnclf.predict(X_train)
score_nnclf = nnclf.score(X_test,y_test)
print("Neural Network Accuracy = ",score_nnclf*100," %")
from sklearn.model_selection import KFold
x = df_final_select
y = train["Clinical"]
kf = KFold(n_splits=5)
best_accuracy = 0
for train_index , test_index in kf.split(x):
X_train, X_test, y_train, y_test = x.iloc[train_index], x.iloc[test_index], y.iloc[train_index], y.iloc[test_index]
nnclf = MLPClassifier(solver='adam', alpha=1e-5, hidden_layer_sizes=(50, 30), random_state=1, max_iter=2000)
nnclf.fit(X_train,y_train)
y_predict = nnclf.predict(X_train)
accuracy = nnclf.score(X_test,y_test)
print("NN Accuracy = ",accuracy*100,"%")
if accuracy > best_accuracy:
best_accuracy = accuracy
best_nnclf = nnclf
print("NN Accuracy = ",best_accuracy*100,"%")
#NN final accuracy
x_train_new = x
y_train_new = train["Clinical"]
y_train_new = y_train_new.reset_index(drop=True)
y_pred = best_nnclf.predict(x_train_new)
count = 0
for i in range(y_pred.shape[0]):
print(y_pred[i],y_train_new[i])
if y_pred[i] != y_train_new[i]:
count += 1
nnclf_accuracy = 1-count/y_pred.shape[0]
print("NN Accuracy = ",nnclf_accuracy*100,"%")
###Output
control dep_M3
control dep_M0
control dep_M3
control dep_M0
control dep_M3
control dep_M0
control dep_M3
control dep_M0
control dep_M3
control dep_M0
control control
control dep_M3
control dep_M0
control dep_M3
control dep_M0
control dep_M3
control dep_M0
control dep_M3
control dep_M0
control dep_M3
control dep_M0
control control
control dep_M3
control dep_M0
control dep_M3
control dep_M0
control dep_M3
control dep_M0
control dep_M3
control dep_M0
control dep_M3
control dep_M0
control control
control dep_M3
control dep_M0
control control
control control
control control
control dep_M3
control dep_M0
control dep_M3
control dep_M0
control dep_M3
control control
control dep_M0
control dep_M3
control dep_M0
control dep_M3
control dep_M0
control dep_M3
control dep_M0
control dep_M3
control dep_M0
control control
control control
control control
control control
control control
control control
control control
control control
control control
control control
control control
control control
control control
control control
control control
control control
control control
control control
control control
control control
control control
control control
control control
control control
control control
control control
control control
control control
control control
control control
control control
control control
control control
control control
control control
control control
control control
control control
control dep_M3
control dep_M0
control dep_M3
control dep_M0
control dep_M3
control dep_M0
control dep_M3
control dep_M0
control control
control dep_M3
control dep_M0
control dep_M3
control dep_M0
control dep_M3
control dep_M0
control dep_M3
control dep_M0
control dep_M3
control dep_M0
control control
control dep_M3
control dep_M0
control dep_M3
control dep_M0
control dep_M3
control dep_M0
control control
control control
control dep_M3
control dep_M0
control control
control dep_M3
control dep_M0
control dep_M3
control dep_M0
control dep_M3
control dep_M0
control dep_M3
control dep_M0
control dep_M3
control dep_M0
control control
control dep_M3
control dep_M0
control dep_M3
dep_M3 dep_M0
control dep_M3
dep_M3 dep_M0
control dep_M3
control dep_M0
control dep_M3
control dep_M0
control control
control dep_M3
control dep_M0
control dep_M3
control dep_M0
control dep_M3
control dep_M0
control dep_M3
control dep_M0
control dep_M3
control dep_M0
control control
control dep_M3
control dep_M0
control dep_M3
control dep_M0
control dep_M3
control dep_M0
control dep_M3
control dep_M0
control dep_M3
control dep_M0
control control
control control
control control
NN Accuracy = 33.333333333333336 %
###Markdown
Logistic Regression
###Code
from sklearn.linear_model import LogisticRegression
#Use Logistic Regression to predict Clinical
x = df_final_select
y = train["Clinical"]
# y = np.array(y,dtype=int)
X_train,X_test,y_train,y_test = train_test_split(x,y,test_size=0.1,random_state=0)
logclf = LogisticRegression(random_state=0).fit(X_train,y_train)
logclf.predict(X_train)
logclf.predict_proba(X_train)
score_logclf = logclf.score(X_test,y_test)
print("Logistic Regression Accuracy = ",score_logclf*100," %")
from sklearn.model_selection import KFold
x = df_final_select
y = train["Clinical"]
kf = KFold(n_splits=5)
best_accuracy = 0
for train_index , test_index in kf.split(x):
X_train, X_test, y_train, y_test = x.iloc[train_index], x.iloc[test_index], y.iloc[train_index], y.iloc[test_index]
logclf = LogisticRegression(random_state=0).fit(X_train,y_train)
y_predict = logclf.predict(X_train)
accuracy = logclf.score(X_test,y_test)
print("Logistic Regression Accuracy = ",accuracy*100,"%")
if accuracy > best_accuracy:
best_accuracy = accuracy
best_logclf = logclf
print("Logistic Regression Accuracy = ",best_accuracy*100,"%")
#Logistic final accuracy
x_train_new = x
y_train_new = train["Clinical"]
y_train_new = y_train_new.reset_index(drop=True)
y_pred = best_logclf.predict(x_train_new)
count = 0
for i in range(y_pred.shape[0]):
print(y_pred[i],y_train_new[i])
if y_pred[i] != y_train_new[i]:
count += 1
logclf_accuracy = 1-count/y_pred.shape[0]
print("Logistic Regression Accuracy = ",logclf_accuracy*100,"%")
print(X_train.shape,y_train.shape)
print(X_test.shape,y_test.shape)
###Output
(151, 186) (151,)
(17, 186) (17,)
|
3 - Core Learning Algorithms/3-Core_Learning_Algorithms.ipynb | ###Markdown
TensorFlow Core Learning AlgorithmsIn this notebook we will walk through 4 fundemental machine learning algorithms. We will apply each of these algorithms to unique problems and datasets before highlighting the use cases of each.The algorithms we will focus on include:- Linear Regression- Classification- Clustering- Hidden Markov ModelsIt is worth noting that there are many tools within TensorFlow that could be used to solve the problems we will see below. I have chosen the tools that I belive give the most variety and are easiest to use. Linear RegressionLinear regression is one of the most basic forms of machine learning and is used to predict numeric values. In this tutorial we will use a linear model to predict the survival rate of passangers from the titanic dataset.*This section is based on the following documentation: https://www.tensorflow.org/tutorials/estimator/linear* How it WorksBefore we dive in, I will provide a very surface level explination of the linear regression algorithm.Linear regression follows a very simple concept. If data points are related linearly, we can generate a line of best fit for these points and use it to predict future values.Let's take an example of a data set with one feature and one label.
###Code
import matplotlib.pyplot as plt
import numpy as np
x = [1, 2, 2.5, 3, 4]
y = [1, 4, 7, 9, 15]
plt.plot(x, y, 'ro')
plt.axis([0, 6, 0, 20])
###Output
_____no_output_____
###Markdown
We can see that this data has a linear coorespondence. When the x value increases, so does the y. Because of this relation we can create a line of best fit for this dataset. In this example our line will only use one input variable, as we are working with two dimensions. In larger datasets with more features our line will have more features and inputs."Line of best fit refers to a line through a scatter plot of data points that best expresses the relationship between those points." (https://www.investopedia.com/terms/l/line-of-best-fit.asp)Here's a refresher on the equation of a line in 2D.$ y = mx + b $Here's an example of a line of best fit for this graph.
###Code
plt.plot(x, y, 'ro')
plt.axis([0, 6, 0, 20])
plt.plot(np.unique(x), np.poly1d(np.polyfit(x, y, 1))(np.unique(x)))
plt.show()
###Output
_____no_output_____
###Markdown
Once we've generated this line for our dataset, we can use its equation to predict future values. We just pass the features of the data point we would like to predict into the equation of the line and use the output as our prediction. Setup and ImportsBefore we get started we must install *sklearn* and import the following modules.
###Code
!pip install -q sklearn
%tensorflow_version 2.x # this line is not required unless you are in a notebook
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from IPython.display import clear_output
from six.moves import urllib
import tensorflow.compat.v2.feature_column as fc
import tensorflow as tf
###Output
_____no_output_____
###Markdown
DataSo, if you haven't realized by now a major part of machine learning is data! In fact, it's so important that most of what we do in this tutorial will focus on exploring, cleaning and selecting appropriate data.The dataset we will be focusing on here is the titanic dataset. It has tons of information about each passanger on the ship. Our first step is always to understand the data and explore it. So, let's do that!**Below we will load a dataset and learn how we can explore it using some built-in tools. **
###Code
# Load dataset.
dftrain = pd.read_csv('https://storage.googleapis.com/tf-datasets/titanic/train.csv') # training data
dfeval = pd.read_csv('https://storage.googleapis.com/tf-datasets/titanic/eval.csv') # testing data
y_train = dftrain.pop('survived')
y_eval = dfeval.pop('survived')
###Output
_____no_output_____
###Markdown
The ```pd.read_csv()``` method will return to us a new pandas *dataframe*. You can think of a dataframe like a table. In fact, we can actually have a look at the table representation.We've decided to pop the "survived" column from our dataset and store it in a new variable. This column simply tells us if the person survived our not.To look at the data we'll use the ```.head()``` method from pandas. This will show us the first 5 items in our dataframe.
###Code
dftrain.head()
###Output
_____no_output_____
###Markdown
And if we want a more statistical analysis of our data we can use the ```.describe()``` method.
###Code
dftrain.describe()
###Output
_____no_output_____
###Markdown
And since we talked so much about shapes in the previous tutorial let's have a look at that too!
###Code
dftrain.shape
###Output
_____no_output_____
###Markdown
So have have 627 entries and 9 features, nice!Now let's have a look at our survival information.
###Code
y_train.head()
###Output
_____no_output_____
###Markdown
Notice that each entry is either a 0 or 1. Can you guess which stands for survival? **And now because visuals are always valuable let's generate a few graphs of the data.**
###Code
dftrain.age.hist(bins=20)
dftrain.sex.value_counts().plot(kind='barh')
dftrain['class'].value_counts().plot(kind='barh')
pd.concat([dftrain, y_train], axis=1).groupby('sex').survived.mean().plot(kind='barh').set_xlabel('% survive')
###Output
_____no_output_____
###Markdown
After analyzing this information, we should notice the following:- Most passengers are in their 20's or 30's - Most passengers are male- Most passengers are in "Third" class- Females have a much higher chance of survival Training vs Testing DataYou may have noticed that we loaded **two different datasets** above. This is because when we train models, we need two sets of data: **training and testing**. The **training** data is what we feed to the model so that it can develop and learn. It is usually a much larger size than the testing data.The **testing** data is what we use to evaulate the model and see how well it is performing. We must use a seperate set of data that the model has not been trained on to evaluate it. Can you think of why this is?Well, the point of our model is to be able to make predictions on NEW data, data that we have never seen before. If we simply test the model on the data that it has already seen we cannot measure its accuracy accuratly. We can't be sure that the model hasn't simply memorized our training data. This is why we need our testing and training data to be seperate. Feature ColumnsIn our dataset we have two different kinds of information: **Categorical and Numeric**Our **categorical data** is anything that is not numeric! For example, the sex column does not use numbers, it uses the words "male" and "female".Before we continue and create/train a model we must convet our categorical data into numeric data. We can do this by encoding each category with an integer (ex. male = 1, female = 2). Fortunately for us TensorFlow has some tools to help!
###Code
CATEGORICAL_COLUMNS = ['sex', 'n_siblings_spouses', 'parch', 'class', 'deck',
'embark_town', 'alone']
NUMERIC_COLUMNS = ['age', 'fare']
feature_columns = []
for feature_name in CATEGORICAL_COLUMNS:
vocabulary = dftrain[feature_name].unique() # gets a list of all unique values from given feature column
feature_columns.append(tf.feature_column.categorical_column_with_vocabulary_list(feature_name, vocabulary))
for feature_name in NUMERIC_COLUMNS:
feature_columns.append(tf.feature_column.numeric_column(feature_name, dtype=tf.float32))
print(feature_columns)
###Output
[VocabularyListCategoricalColumn(key='sex', vocabulary_list=('male', 'female'), dtype=tf.string, default_value=-1, num_oov_buckets=0), VocabularyListCategoricalColumn(key='n_siblings_spouses', vocabulary_list=(1, 0, 3, 4, 2, 5, 8), dtype=tf.int64, default_value=-1, num_oov_buckets=0), VocabularyListCategoricalColumn(key='parch', vocabulary_list=(0, 1, 2, 5, 3, 4), dtype=tf.int64, default_value=-1, num_oov_buckets=0), VocabularyListCategoricalColumn(key='class', vocabulary_list=('Third', 'First', 'Second'), dtype=tf.string, default_value=-1, num_oov_buckets=0), VocabularyListCategoricalColumn(key='deck', vocabulary_list=('unknown', 'C', 'G', 'A', 'B', 'D', 'F', 'E'), dtype=tf.string, default_value=-1, num_oov_buckets=0), VocabularyListCategoricalColumn(key='embark_town', vocabulary_list=('Southampton', 'Cherbourg', 'Queenstown', 'unknown'), dtype=tf.string, default_value=-1, num_oov_buckets=0), VocabularyListCategoricalColumn(key='alone', vocabulary_list=('n', 'y'), dtype=tf.string, default_value=-1, num_oov_buckets=0), NumericColumn(key='age', shape=(1,), default_value=None, dtype=tf.float32, normalizer_fn=None), NumericColumn(key='fare', shape=(1,), default_value=None, dtype=tf.float32, normalizer_fn=None)]
###Markdown
Let's break this code down a little bit...Essentially what we are doing here is creating a list of features that are used in our dataset. The cryptic lines of code inside the ```append()``` create an object that our model can use to map string values like "male" and "female" to integers. This allows us to avoid manually having to encode our dataframes.*And here is some relevant documentation*https://www.tensorflow.org/api_docs/python/tf/feature_column/categorical_column_with_vocabulary_list?version=stable The Training ProcessSo, we are almost done preparing our dataset and I feel as though it's a good time to explain how our model is trained. Specifically, how input data is fed to our model. For this specific model data is going to be streamed into it in small batches of 32. This means we will not feed the entire dataset to our model at once, but simply small batches of entries. We will feed these batches to our model multiple times according to the number of **epochs**. An **epoch** is simply one stream of our entire dataset. The number of epochs we define is the amount of times our model will see the entire dataset. We use multiple epochs in hope that after seeing the same data multiple times the model will better determine how to estimate it.Ex. if we have 10 ephocs, our model will see the same dataset 10 times. Since we need to feed our data in batches and multiple times, we need to create something called an **input function**. The input function simply defines how our dataset will be converted into batches at each epoch. Input FunctionThe TensorFlow model we are going to use requires that the data we pass it comes in as a ```tf.data.Dataset``` object. This means we must create a *input function* that can convert our current pandas dataframe into that object. Below you'll see a seemingly complicated input function, this is straight from the TensorFlow documentation (https://www.tensorflow.org/tutorials/estimator/linear). I've commented as much as I can to make it understandable, but you may want to refer to the documentation for a detailed explanation of each method.
###Code
def make_input_fn(data_df, label_df, num_epochs=10, shuffle=True, batch_size=32):
def input_function(): # inner function, this will be returned
ds = tf.data.Dataset.from_tensor_slices((dict(data_df), label_df)) # create tf.data.Dataset object with data and its label
if shuffle:
ds = ds.shuffle(1000) # randomize order of data
ds = ds.batch(batch_size).repeat(num_epochs) # split dataset into batches of 32 and repeat process for number of epochs
return ds # return a batch of the dataset
return input_function # return the function above to use it outside
train_input_fn = make_input_fn(dftrain, y_train) # here we will call the input_function that was returned to us to get a dataset object we can feed to the model
eval_input_fn = make_input_fn(dfeval, y_eval, num_epochs=1, shuffle=False)
###Output
_____no_output_____
###Markdown
Creating the ModelIn this tutorial we are going to use a linear estimator to utilize the linear regression algorithm. Creating one is pretty easy! Have a look below.
###Code
linear_est = tf.estimator.LinearClassifier(feature_columns=feature_columns)
# We create a linear estimtor by passing the feature columns we created earlier
###Output
INFO:tensorflow:Using default config.
WARNING:tensorflow:Using temporary folder as model directory: /tmp/tmp948t2b_4
INFO:tensorflow:Using config: {'_model_dir': '/tmp/tmp948t2b_4', '_tf_random_seed': None, '_save_summary_steps': 100, '_save_checkpoints_steps': None, '_save_checkpoints_secs': 600, '_session_config': allow_soft_placement: true
graph_options {
rewrite_options {
meta_optimizer_iterations: ONE
}
}
, '_keep_checkpoint_max': 5, '_keep_checkpoint_every_n_hours': 10000, '_log_step_count_steps': 100, '_train_distribute': None, '_device_fn': None, '_protocol': None, '_eval_distribute': None, '_experimental_distribute': None, '_experimental_max_worker_delay_secs': None, '_session_creation_timeout_secs': 7200, '_service': None, '_cluster_spec': ClusterSpec({}), '_task_type': 'worker', '_task_id': 0, '_global_id_in_cluster': 0, '_master': '', '_evaluation_master': '', '_is_chief': True, '_num_ps_replicas': 0, '_num_worker_replicas': 1}
###Markdown
Training the ModelTraining the model is as easy as passing the input functions that we created earlier.
###Code
linear_est.train(train_input_fn) # train
result = linear_est.evaluate(eval_input_fn) # get model metrics/stats by testing on test data
clear_output() # clears console output
print(str(round(result['accuracy'] * 100, 2)) + "% accuracy") # the result variable is simply a dict of stats about our model
###Output
74.24% accuracy
###Markdown
And we now have a model with a 74% accuracy (this will change each time)! Not crazy impressive but decent for our first try.Now let's see how we can actually use this model to make predicitons.We can use the ```.predict()``` method to get survival probabilities from the model. This method will return a list of dicts that store a predicition for each of the entries in our testing data set. Below we've used some pandas magic to plot a nice graph of the predictions.As you can see the survival rate is not very high :/
###Code
pred_dicts = list(linear_est.predict(eval_input_fn))
probs = pd.Series([pred['probabilities'][1] for pred in pred_dicts])
probs.plot(kind='hist', bins=20, title='predicted probabilities')
###Output
INFO:tensorflow:Calling model_fn.
WARNING:tensorflow:Layer linear/linear_model is casting an input tensor from dtype float64 to the layer's dtype of float32, which is new behavior in TensorFlow 2. The layer has dtype float32 because its dtype defaults to floatx.
If you intended to run this layer in float32, you can safely ignore this warning. If in doubt, this warning is likely only an issue if you are porting a TensorFlow 1.X model to TensorFlow 2.
To change all layers to have dtype float64 by default, call `tf.keras.backend.set_floatx('float64')`. To change just this layer, pass dtype='float64' to the layer constructor. If you are the author of this layer, you can disable autocasting by passing autocast=False to the base Layer constructor.
INFO:tensorflow:Done calling model_fn.
INFO:tensorflow:Graph was finalized.
INFO:tensorflow:Restoring parameters from /tmp/tmp948t2b_4/model.ckpt-200
INFO:tensorflow:Running local_init_op.
INFO:tensorflow:Done running local_init_op.
###Markdown
Here is some more fiddling around with the prediction data:
###Code
result_predictions = list(linear_est.predict(eval_input_fn)) # store a list of the predictions by the model for the evaluation data
person_index = 5 # we can choose any person index to see if the model was right
const_did_survive = 1 # we represented that a person survived with a 1
clear_output() # clears console output
print("Person data:")
print(dfeval.loc[person_index]) # show the data for a certain person, like sex, age, etc.
print()
print("Model estimated chances of survival:")
print(str(round(result_predictions[person_index]["probabilities"][const_did_survive] * 100, 2)) + "% chance of survival") # show the model prediction
print()
print("This person DID " + ("NOT " if not y_eval.loc[person_index] else "") + "survive.") # show the expected result (did the person really survive?)
###Output
Person data:
sex female
age 15
n_siblings_spouses 0
parch 0
fare 8.0292
class Third
deck unknown
embark_town Queenstown
alone y
Name: 5, dtype: object
Model estimated chances of survival:
79.03% chance of survival
This person DID survive.
###Markdown
In this case, the model thinks the person has a 81% chance of survival and the person did survive! :) That's it for linear regression! Now onto classification. ClassificationNow that we've covered linear regression it is time to talk about classification. Where regression was used to predict a numeric value, classification is used to seperate data points into classes of different labels. In this example we will use a TensorFlow estimator to classify flowers.Since we've touched on how estimators work earlier, I'll go a bit quicker through this example. This section is based on the following guide from the TensorFlow website.https://www.tensorflow.org/tutorials/estimator/premade Imports and Setup
###Code
%tensorflow_version 2.x # this line is not required unless you are in a notebook
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
import pandas as pd
###Output
_____no_output_____
###Markdown
DatasetThis specific dataset seperates flowers into 3 different classes of species.- Setosa- Versicolor- VirginicaThe information about each flower is the following.- sepal length- sepal width- petal length- petal width
###Code
CSV_COLUMN_NAMES = ['SepalLength', 'SepalWidth', 'PetalLength', 'PetalWidth', 'Species']
SPECIES = ['Setosa', 'Versicolor', 'Virginica']
# Lets define some constants to help us later on
train_path = tf.keras.utils.get_file(
"iris_training.csv", "https://storage.googleapis.com/download.tensorflow.org/data/iris_training.csv")
test_path = tf.keras.utils.get_file(
"iris_test.csv", "https://storage.googleapis.com/download.tensorflow.org/data/iris_test.csv")
train = pd.read_csv(train_path, names=CSV_COLUMN_NAMES, header=0)
test = pd.read_csv(test_path, names=CSV_COLUMN_NAMES, header=0)
# Here we use keras (a module inside of TensorFlow) to grab our datasets and read them into a pandas dataframe
###Output
Downloading data from https://storage.googleapis.com/download.tensorflow.org/data/iris_training.csv
8192/2194 [================================================================================================================] - 0s 0us/step
Downloading data from https://storage.googleapis.com/download.tensorflow.org/data/iris_test.csv
8192/573 [============================================================================================================================================================================================================================================================================================================================================================================================================================================] - 0s 0us/step
###Markdown
Let's have a look at our data.
###Code
train.head()
###Output
_____no_output_____
###Markdown
Now we can pop the species column off and use that as our label.
###Code
train_y = train.pop('Species')
test_y = test.pop('Species')
train.head() # the species column is now gone
train.shape # we have 120 entries with 4 features
###Output
_____no_output_____
###Markdown
Input FunctionRemember that nasty input function we created earlier. Well we need to make another one here! Fortunately for us this one is a little easier to digest.
###Code
def input_fn(features, labels, training=True, batch_size=256):
# Convert the inputs to a Dataset.
dataset = tf.data.Dataset.from_tensor_slices((dict(features), labels))
# Shuffle and repeat if you are in training mode.
if training:
dataset = dataset.shuffle(1000).repeat()
return dataset.batch(batch_size)
###Output
_____no_output_____
###Markdown
Feature ColumnsAnd you didn't think we forgot about the feature columns, did you?
###Code
# Feature columns describe how to use the input.
my_feature_columns = []
for key in train.keys():
my_feature_columns.append(tf.feature_column.numeric_column(key=key))
print(my_feature_columns)
# This time we don't need to find unique values, since they are already encoded for us in this dataset.
###Output
[NumericColumn(key='SepalLength', shape=(1,), default_value=None, dtype=tf.float32, normalizer_fn=None), NumericColumn(key='SepalWidth', shape=(1,), default_value=None, dtype=tf.float32, normalizer_fn=None), NumericColumn(key='PetalLength', shape=(1,), default_value=None, dtype=tf.float32, normalizer_fn=None), NumericColumn(key='PetalWidth', shape=(1,), default_value=None, dtype=tf.float32, normalizer_fn=None)]
###Markdown
Building the ModelAnd now we are ready to choose a model. For classification tasks there are variety of different estimators/models that we can pick from. Some options are listed below.- ```DNNClassifier``` (Deep Neural Network)- ```LinearClassifier```We can choose either model but the DNN seems to be the best choice. This is because we may not be able to find a linear coorespondence in our data. So let's build a model!
###Code
# Build a DNN with 2 hidden layers with 30 and 10 hidden nodes each.
classifier = tf.estimator.DNNClassifier( # tf.estimator contains a variety of models ready to train.
feature_columns=my_feature_columns,
# Two hidden layers of 30 and 10 nodes respectively.
hidden_units=[30, 10],
# The model must choose between 3 classes.
n_classes=3)
###Output
INFO:tensorflow:Using default config.
WARNING:tensorflow:Using temporary folder as model directory: /tmp/tmp5k5hhxfj
INFO:tensorflow:Using config: {'_model_dir': '/tmp/tmp5k5hhxfj', '_tf_random_seed': None, '_save_summary_steps': 100, '_save_checkpoints_steps': None, '_save_checkpoints_secs': 600, '_session_config': allow_soft_placement: true
graph_options {
rewrite_options {
meta_optimizer_iterations: ONE
}
}
, '_keep_checkpoint_max': 5, '_keep_checkpoint_every_n_hours': 10000, '_log_step_count_steps': 100, '_train_distribute': None, '_device_fn': None, '_protocol': None, '_eval_distribute': None, '_experimental_distribute': None, '_experimental_max_worker_delay_secs': None, '_session_creation_timeout_secs': 7200, '_service': None, '_cluster_spec': ClusterSpec({}), '_task_type': 'worker', '_task_id': 0, '_global_id_in_cluster': 0, '_master': '', '_evaluation_master': '', '_is_chief': True, '_num_ps_replicas': 0, '_num_worker_replicas': 1}
###Markdown
What we've just done is created a deep neural network that has two hidden layers. These layers have 30 and 10 neurons respectively. This is the number of neurons the TensorFlow official tutorial uses so we'll stick with it. However, it is worth mentioning that the number of hidden neurons is an arbitrary number and many experiments and tests are usually done to determine the best choice for these values. Try playing around with the number of hidden neurons and see if your results change. TrainingNow it's time to train the model!
###Code
# We include a lambda to avoid creating an inner function previously
classifier.train(
input_fn=lambda: input_fn(train, train_y, training=True),
steps=5000)
# Steps are similar to epoch, but instead of specifying how many times to look at the data,
# we just specify the number of elements to be looked at in the training.
###Output
INFO:tensorflow:Calling model_fn.
WARNING:tensorflow:Layer dnn is casting an input tensor from dtype float64 to the layer's dtype of float32, which is new behavior in TensorFlow 2. The layer has dtype float32 because its dtype defaults to floatx.
If you intended to run this layer in float32, you can safely ignore this warning. If in doubt, this warning is likely only an issue if you are porting a TensorFlow 1.X model to TensorFlow 2.
To change all layers to have dtype float64 by default, call `tf.keras.backend.set_floatx('float64')`. To change just this layer, pass dtype='float64' to the layer constructor. If you are the author of this layer, you can disable autocasting by passing autocast=False to the base Layer constructor.
INFO:tensorflow:Done calling model_fn.
INFO:tensorflow:Create CheckpointSaverHook.
INFO:tensorflow:Graph was finalized.
INFO:tensorflow:Restoring parameters from /tmp/tmp5k5hhxfj/model.ckpt-5000
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/training/saver.py:1077: get_checkpoint_mtimes (from tensorflow.python.training.checkpoint_management) is deprecated and will be removed in a future version.
Instructions for updating:
Use standard file utilities to get mtimes.
INFO:tensorflow:Running local_init_op.
INFO:tensorflow:Done running local_init_op.
INFO:tensorflow:Calling checkpoint listeners before saving checkpoint 5000...
INFO:tensorflow:Saving checkpoints for 5000 into /tmp/tmp5k5hhxfj/model.ckpt.
INFO:tensorflow:Calling checkpoint listeners after saving checkpoint 5000...
INFO:tensorflow:loss = 0.6217097, step = 5000
INFO:tensorflow:global_step/sec: 447.462
INFO:tensorflow:loss = 0.6055616, step = 5100 (0.227 sec)
INFO:tensorflow:global_step/sec: 621.449
INFO:tensorflow:loss = 0.6096381, step = 5200 (0.159 sec)
INFO:tensorflow:global_step/sec: 616.389
INFO:tensorflow:loss = 0.60297173, step = 5300 (0.163 sec)
INFO:tensorflow:global_step/sec: 600.222
INFO:tensorflow:loss = 0.596534, step = 5400 (0.167 sec)
INFO:tensorflow:global_step/sec: 563.882
INFO:tensorflow:loss = 0.5977715, step = 5500 (0.177 sec)
INFO:tensorflow:global_step/sec: 563.23
INFO:tensorflow:loss = 0.587742, step = 5600 (0.177 sec)
INFO:tensorflow:global_step/sec: 588.229
INFO:tensorflow:loss = 0.5938575, step = 5700 (0.173 sec)
INFO:tensorflow:global_step/sec: 590.042
INFO:tensorflow:loss = 0.5843288, step = 5800 (0.167 sec)
INFO:tensorflow:global_step/sec: 596.197
INFO:tensorflow:loss = 0.57950556, step = 5900 (0.167 sec)
INFO:tensorflow:global_step/sec: 558.41
INFO:tensorflow:loss = 0.58131254, step = 6000 (0.183 sec)
WARNING:tensorflow:It seems that global step (tf.train.get_global_step) has not been increased. Current value (could be stable): 6067 vs previous value: 6067. You could increase the global step by passing tf.train.get_global_step() to Optimizer.apply_gradients or Optimizer.minimize.
INFO:tensorflow:global_step/sec: 538.261
INFO:tensorflow:loss = 0.57366914, step = 6100 (0.182 sec)
INFO:tensorflow:global_step/sec: 544.382
INFO:tensorflow:loss = 0.5637631, step = 6200 (0.187 sec)
INFO:tensorflow:global_step/sec: 563.782
INFO:tensorflow:loss = 0.56694406, step = 6300 (0.178 sec)
INFO:tensorflow:global_step/sec: 580.514
INFO:tensorflow:loss = 0.5670054, step = 6400 (0.169 sec)
INFO:tensorflow:global_step/sec: 578.964
INFO:tensorflow:loss = 0.55458224, step = 6500 (0.175 sec)
INFO:tensorflow:global_step/sec: 574.592
INFO:tensorflow:loss = 0.54680526, step = 6600 (0.171 sec)
INFO:tensorflow:global_step/sec: 491.422
INFO:tensorflow:loss = 0.5486492, step = 6700 (0.207 sec)
INFO:tensorflow:global_step/sec: 569.601
INFO:tensorflow:loss = 0.54900765, step = 6800 (0.175 sec)
INFO:tensorflow:global_step/sec: 503.669
INFO:tensorflow:loss = 0.5337447, step = 6900 (0.196 sec)
INFO:tensorflow:global_step/sec: 497.073
INFO:tensorflow:loss = 0.53282267, step = 7000 (0.202 sec)
INFO:tensorflow:global_step/sec: 488.161
INFO:tensorflow:loss = 0.53346014, step = 7100 (0.205 sec)
INFO:tensorflow:global_step/sec: 531.804
INFO:tensorflow:loss = 0.5290585, step = 7200 (0.190 sec)
INFO:tensorflow:global_step/sec: 557.273
INFO:tensorflow:loss = 0.533105, step = 7300 (0.180 sec)
INFO:tensorflow:global_step/sec: 548.274
INFO:tensorflow:loss = 0.5212065, step = 7400 (0.179 sec)
INFO:tensorflow:global_step/sec: 602.054
INFO:tensorflow:loss = 0.52324986, step = 7500 (0.169 sec)
INFO:tensorflow:global_step/sec: 552.763
INFO:tensorflow:loss = 0.5164593, step = 7600 (0.181 sec)
INFO:tensorflow:global_step/sec: 543.238
INFO:tensorflow:loss = 0.5083279, step = 7700 (0.181 sec)
INFO:tensorflow:global_step/sec: 582.673
INFO:tensorflow:loss = 0.5085105, step = 7800 (0.174 sec)
INFO:tensorflow:global_step/sec: 475.231
INFO:tensorflow:loss = 0.50720966, step = 7900 (0.211 sec)
INFO:tensorflow:global_step/sec: 567.593
INFO:tensorflow:loss = 0.5043426, step = 8000 (0.173 sec)
INFO:tensorflow:global_step/sec: 536.309
INFO:tensorflow:loss = 0.4963996, step = 8100 (0.186 sec)
INFO:tensorflow:global_step/sec: 536.08
INFO:tensorflow:loss = 0.49940473, step = 8200 (0.187 sec)
INFO:tensorflow:global_step/sec: 557.507
INFO:tensorflow:loss = 0.49950457, step = 8300 (0.183 sec)
INFO:tensorflow:global_step/sec: 507.683
INFO:tensorflow:loss = 0.5037663, step = 8400 (0.194 sec)
INFO:tensorflow:global_step/sec: 568.523
INFO:tensorflow:loss = 0.5007417, step = 8500 (0.175 sec)
INFO:tensorflow:global_step/sec: 553.176
INFO:tensorflow:loss = 0.48826316, step = 8600 (0.184 sec)
INFO:tensorflow:global_step/sec: 619.357
INFO:tensorflow:loss = 0.482177, step = 8700 (0.162 sec)
INFO:tensorflow:global_step/sec: 521.854
INFO:tensorflow:loss = 0.4848107, step = 8800 (0.188 sec)
INFO:tensorflow:global_step/sec: 629.14
INFO:tensorflow:loss = 0.47875693, step = 8900 (0.159 sec)
INFO:tensorflow:global_step/sec: 601.835
INFO:tensorflow:loss = 0.5038421, step = 9000 (0.166 sec)
INFO:tensorflow:global_step/sec: 542.922
INFO:tensorflow:loss = 0.47670552, step = 9100 (0.184 sec)
INFO:tensorflow:global_step/sec: 628.124
INFO:tensorflow:loss = 0.47172242, step = 9200 (0.159 sec)
INFO:tensorflow:global_step/sec: 538.124
INFO:tensorflow:loss = 0.46971238, step = 9300 (0.189 sec)
INFO:tensorflow:global_step/sec: 574.384
INFO:tensorflow:loss = 0.47382265, step = 9400 (0.172 sec)
INFO:tensorflow:global_step/sec: 593.273
INFO:tensorflow:loss = 0.466937, step = 9500 (0.168 sec)
INFO:tensorflow:global_step/sec: 592.101
INFO:tensorflow:loss = 0.44865286, step = 9600 (0.172 sec)
INFO:tensorflow:global_step/sec: 583.18
INFO:tensorflow:loss = 0.46331257, step = 9700 (0.169 sec)
INFO:tensorflow:global_step/sec: 587.609
INFO:tensorflow:loss = 0.46025106, step = 9800 (0.172 sec)
INFO:tensorflow:global_step/sec: 612.517
INFO:tensorflow:loss = 0.44838548, step = 9900 (0.163 sec)
INFO:tensorflow:Calling checkpoint listeners before saving checkpoint 10000...
INFO:tensorflow:Saving checkpoints for 10000 into /tmp/tmp5k5hhxfj/model.ckpt.
INFO:tensorflow:Calling checkpoint listeners after saving checkpoint 10000...
INFO:tensorflow:Loss for final step: 0.45682693.
###Markdown
The only thing to explain here is the **steps** argument. This simply tells the classifier to run for 5000 steps. Try modifiying this and seeing if your results change. Keep in mind that more is not always better. EvaluationNow let's see how this trained model does!
###Code
eval_result = classifier.evaluate(
input_fn=lambda: input_fn(test, test_y, training=False))
print('\nTest set accuracy: {accuracy:0.3f}\n'.format(**eval_result))
###Output
INFO:tensorflow:Calling model_fn.
WARNING:tensorflow:Layer dnn is casting an input tensor from dtype float64 to the layer's dtype of float32, which is new behavior in TensorFlow 2. The layer has dtype float32 because its dtype defaults to floatx.
If you intended to run this layer in float32, you can safely ignore this warning. If in doubt, this warning is likely only an issue if you are porting a TensorFlow 1.X model to TensorFlow 2.
To change all layers to have dtype float64 by default, call `tf.keras.backend.set_floatx('float64')`. To change just this layer, pass dtype='float64' to the layer constructor. If you are the author of this layer, you can disable autocasting by passing autocast=False to the base Layer constructor.
INFO:tensorflow:Done calling model_fn.
INFO:tensorflow:Starting evaluation at 2020-12-14T20:50:40Z
INFO:tensorflow:Graph was finalized.
INFO:tensorflow:Restoring parameters from /tmp/tmp5k5hhxfj/model.ckpt-10000
INFO:tensorflow:Running local_init_op.
INFO:tensorflow:Done running local_init_op.
INFO:tensorflow:Inference Time : 0.22641s
INFO:tensorflow:Finished evaluation at 2020-12-14-20:50:40
INFO:tensorflow:Saving dict for global step 10000: accuracy = 0.8, average_loss = 0.5315775, global_step = 10000, loss = 0.5315775
INFO:tensorflow:Saving 'checkpoint_path' summary for global step 10000: /tmp/tmp5k5hhxfj/model.ckpt-10000
Test set accuracy: 0.800
###Markdown
Notice this time we didn't specify the number of steps. This is because during evaluation the model will only look at the testing data one time. PredictionsNow that we have a trained model it's time to use it to make predictions. I've written a little script below that allows you to type the features of a flower and see a prediction for its class.
###Code
def input_fn(features, batch_size=256):
# Convert the inputs to a Dataset without labels.
return tf.data.Dataset.from_tensor_slices(dict(features)).batch(batch_size)
features = ['SepalLength', 'SepalWidth', 'PetalLength', 'PetalWidth']
predict = {}
print("Please type numeric values as prompted.")
for feature in features:
valid = True
while valid:
val = input(feature + ": ")
if not val.isdigit(): valid = False
predict[feature] = [float(val)]
predictions = classifier.predict(input_fn=lambda: input_fn(predict)) # Predict is usually a list, not just a single element.
for pred_dict in predictions: # The predictions variable contains the predictions made. Each prediction is a dictionary.
class_id = pred_dict['class_ids'][0] # The key "class_ids" contains the elements the model thinks are the correct ones, it is a list.
probability = pred_dict['probabilities'][class_id] # The key "probabilities" contains the probabilities for each species in a list.
print('Prediction is "{}" ({:.1f}%)'.format(
SPECIES[class_id], 100 * probability))
# Here is some example input and expected classes you can try above
expected = ['Setosa', 'Versicolor', 'Virginica']
predict_x = {
'SepalLength': [5.1, 5.9, 6.9],
'SepalWidth': [3.3, 3.0, 3.1],
'PetalLength': [1.7, 4.2, 5.4],
'PetalWidth': [0.5, 1.5, 2.1],
}
###Output
_____no_output_____
###Markdown
And that's pretty much it for classification! ClusteringNow that we've covered regression and classification it's time to talk about clustering data! Clustering is a Machine Learning technique that involves the grouping of data points. In theory, data points that are in the same group should have similar properties and/or features, while data points in different groups should have highly dissimilar properties and/or features. (https://towardsdatascience.com/the-5-clustering-algorithms-data-scientists-need-to-know-a36d136ef68)Unfortunalty there are issues with the current version of TensorFlow and the implementation for KMeans. This means we cannot use KMeans without writing the algorithm from scratch. We aren't quite at that level yet, so we'll just explain the basics of clustering for now.Basic Algorithm for K-Means.- Step 1: Randomly pick K points to place K centroids- Step 2: Assign all the data points to the centroids by distance. The closest centroid to a point is the one it is assigned to.- Step 3: Average all the points belonging to each centroid to find the middle of those clusters (center of mass). Place the corresponding centroids into that position.- Step 4: Reassign every point once again to the closest centroid.- Step 5: Repeat steps 3-4 until no point changes which centroid it belongs to.*Please refer to the video for an explanation of KMeans clustering.* Hidden Markov Models"The Hidden Markov Model is a finite set of states, each of which is associated with a (generally multidimensional) probability distribution []. Transitions among the states are governed by a set of probabilities called transition probabilities." (http://jedlik.phy.bme.hu/~gerjanos/HMM/node4.html)A hidden markov model works with probabilities to predict future events or states. In this section we will learn how to create a hidden markov model that can predict the weather.*This section is based on the following TensorFlow tutorial.* https://www.tensorflow.org/probability/api_docs/python/tfp/distributions/HiddenMarkovModel DataLet's start by discussing the type of data we use when we work with a hidden markov model. In the previous sections we worked with large datasets of 100's of different entries. For a markov model we are only interested in probability distributions that have to do with states. We can find these probabilities from large datasets or may already have these values. We'll run through an example in a second that should clear some things up, but let's discuss the components of a markov model.**States:** In each markov model we have a finite set of states. These states could be something like "warm" and "cold" or "high" and "low" or even "red", "green" and "blue". These states are "hidden" within the model, which means we do not direcly observe them.**Observations:** Each state has a particular outcome or observation associated with it based on a probability distribution. An example of this is the following: *On a hot day Tim has a 80% chance of being happy and a 20% chance of being sad.***Transitions:** Each state will have a probability defining the likelyhood of transitioning to a different state. An example is the following: *a cold day has a 30% chance of being followed by a hot day and a 70% chance of being follwed by another cold day.*To create a hidden markov model we need.- States- Observation Distribution- Transition DistributionFor our purpose we will assume we already have this information available as we attempt to predict the weather on a given day. Imports and Setup
###Code
%tensorflow_version 2.x # this line is not required unless you are in a notebook
###Output
`%tensorflow_version` only switches the major version: 1.x or 2.x.
You set: `2.x # this line is not required unless you are in a notebook`. This will be interpreted as: `2.x`.
TensorFlow 2.x selected.
###Markdown
Due to a version mismatch with tensorflow v2 and tensorflow_probability we need to install the most recent version of tensorflow_probability (see below).
###Code
!pip install tensorflow_probability==0.8.0rc0 --user --upgrade
import tensorflow_probability as tfp # We are using a different module from tensorflow this time
import tensorflow as tf
###Output
_____no_output_____
###Markdown
Weather ModelTaken direclty from the TensorFlow documentation (https://www.tensorflow.org/probability/api_docs/python/tfp/distributions/HiddenMarkovModel). We will model a simple weather system and try to predict the temperature on each day given the following information.1. Cold days are encoded by a 0 and hot days are encoded by a 1.2. The first day in our sequence has an 80% chance of being cold.3. A cold day has a 30% chance of being followed by a hot day.4. A hot day has a 20% chance of being followed by a cold day.5. On each day the temperature is normally distributed with mean and standard deviation 0 and 5 on a cold day and mean and standard deviation 15 and 10 on a hot day.If you're unfamiliar with **standard deviation** it can be put simply as the range of expected values. In this example, on a hot day the average temperature is 15 and ranges from 5 to 25.To model this in TensorFlow we will do the following.
###Code
tfd = tfp.distributions # making a shortcut for later on
initial_distribution = tfd.Categorical(probs=[0.2, 0.8]) # Refer to point 2 above
transition_distribution = tfd.Categorical(probs=[[0.5, 0.5],
[0.2, 0.8]]) # refer to points 3 and 4 above
observation_distribution = tfd.Normal(loc=[0., 15.], scale=[5., 10.]) # refer to point 5 above
# the loc argument represents the mean and the scale is the standard devitation
###Output
_____no_output_____
###Markdown
We've now created distribution variables to model our system and it's time to create the hidden markov model.
###Code
model = tfd.HiddenMarkovModel(
initial_distribution=initial_distribution,
transition_distribution=transition_distribution,
observation_distribution=observation_distribution,
num_steps=7)
###Output
_____no_output_____
###Markdown
The number of steps represents the number of days that we would like to predict information for. In this case we've chosen 7, an entire week.To get the **expected temperatures** on each day we can do the following.
###Code
mean = model.mean()
# due to the way TensorFlow works on a lower level we need to evaluate part of the graph
# from within a session to see the value of this tensor
# in the new version of tensorflow we need to use tf.compat.v1.Session() rather than just tf.Session()
with tf.compat.v1.Session() as sess:
print(mean.numpy())
###Output
[12. 11.1 10.83 10.748999 10.724699 10.71741 10.715222]
|
mongodb_csv_hashtag_location.ipynb | ###Markdown
1. connect to database
###Code
# connect to database and get collections' names
db_twitter = client["Twitter"]
collections_twitter = db_twitter.collection_names()
# get current year and current week number
current_timestamp = int(time.time() * 1000)
current_year = int(datetime.datetime.now().year)
print("current year : " + str(current_year))
current_week = int((current_timestamp - 1546214400000)/1000/604800)+1
print("current week : " + str(current_week))
# list all collection and the number of records in each collection
dic_collection = {}
for i in collections_twitter:
if i.startswith("20") and contain_string in i:
year = i[0:4]
week = re.search('_(.+?)_', i).group(1)[1:]
if int(year) < current_year:
dic_collection[i] = "{:}".format(db_twitter[i].find({}).count())
else:
try:
if int(week) < current_week:
dic_collection[i] = "{:}".format(db_twitter[i].find({}).count())
except: pass
for key in sorted(dic_collection):
print("%s: %s" % (key, dic_collection[key]))
###Output
2019_W1_Twitter_Australia: 40880
###Markdown
2. create csv for each collection based on hashtag and user location
###Code
# write into csv file
def write_csv(file_name,hashtag,user_location):
# avoid user location splitted by comma
try:
user_location = ''.join(user_location.split(','))
except:
pass
row = "{},{}\n".format(hashtag,user_location)
with open(file_name, 'a') as f:
f.write(row)
# calculate running time
def calculate_time(start_time, t):
current_time = time.time()
duration = current_time - start_time
if (duration/60) >= (t+10):
t += 10
print("The program is still running, already run for about "+ str(t) + " minutes.")
return t
# create foler if not exist
def create_folder():
folder = "output/hashtag_user_location/"
if not os.path.exists(folder):
os.makedirs(folder)
return folder
# delete existed collection from the list dic_collection
def delete_collection(folder,dic_collection):
for input_file in glob.glob(os.path.join(folder,'*.csv')):
collection_name = re.search('_location/(.+?)_hashtag', input_file).group(1)
print("Existed collection: " + collection_name)
if collection_name in dic_collection:
del dic_collection[collection_name]
return dic_collection
#create folder if not exist
folder = create_folder()
dic_collection = delete_collection(folder,dic_collection)
for collection in sorted(dic_collection):
print("-----------------------")
print("processing on collection " + str(collection))
start = time. time()
t =0
file_name = folder + str(collection) + "_hashtag_user_location.csv"
with open(file_name, 'a') as f:
f.write('hashtag,user_location\n')
for document in db_twitter[collection].find():
# twitter_id = document['id']
user_location = document['user']['location']
if len(document['entities']['hashtags']) == 0:
hashtag = None
write_csv(file_name,hashtag,user_location)
t = calculate_time(start, t)
elif len(document['entities']['hashtags']) == 1:
hashtag = document['entities']['hashtags'][0]['text']
write_csv(file_name,hashtag,user_location)
t = calculate_time(start, t)
else:
for i in range(len(document['entities']['hashtags'])):
hashtag = document['entities']['hashtags'][i]['text']
write_csv(file_name,hashtag,user_location)
t = calculate_time(start, t)
print("csv file for collection " + collection + " is done.")
print("-----------------------")
###Output
Existed collection: 2019_W1_Twitter_Australia
|
airport/ABCP por meses.ipynb | ###Markdown
ABCP Cluster Por Meses
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
from matplotlib import pyplot as plt
from sklearn.ensemble import RandomForestRegressor
# Leemos los csv por mes
j = 3 # Mes
frames = []
for i in range(1,32):
day = str(i)
month = str(j)
if (i<10):
day = '0' + str(i)
if (j<10):
month = '0' + str(j)
file_name = 'D:\\Usuarios\\mdbrenes\\Documents\\Projects\\airport\\Documentos TT\\Datos de entrada\\ABPC\\abpc_2018-' + month + '-' + day + '.csv'
abpc_ = pd.read_csv(file_name, delimiter=';', names=['UPDATE','ID','FLIGHTID','LECTOR ID','CHECK DATE','STATUS','REASON','CHECKIN SEQUENCE NUMBER','PNR','DEPARTURE FLIGHT DESTINATION AIRPORT IATA CODE','DEPARTURE FLIGHT AIRLINE IATA CODE','DEPARTURE FLIGHT NUMBER','DEPARTURE FLIGHT SOBT','EXTRA 1','EXTRA 2'])
abpc_2 = abpc_.drop([0],axis=0)
frames.append(abpc_2)
abpc = pd.concat(frames)
cols = ['ID','FLIGHTID','LECTOR ID','CHECK DATE','STATUS','REASON','CHECKIN SEQUENCE NUMBER','PNR','DEPARTURE FLIGHT DESTINATION AIRPORT IATA CODE','DEPARTURE FLIGHT AIRLINE IATA CODE','DEPARTURE FLIGHT NUMBER','DEPARTURE FLIGHT SOBT','UPDATE','EXTRA 1','EXTRA 2']
abpc = abpc[cols]
abpc.reset_index(inplace=True,drop=True)
abpc.info()
abpc.head()
abpc['STATUS'].unique()
for reason in abpc['STATUS'].unique():
num = abpc['ID'].loc[abpc['STATUS']==reason].count()
print('Num passengers, STATUS:',reason)
print(num)
abpc = abpc.loc[abpc['STATUS']=='PASSED']
abpc.info()
# Puede haber alguna fila que tenga un campo de más por error
abpc.loc[pd.isnull(abpc['EXTRA 1'])!=True]
# Puede haber alguna fila que tenga un campo de más por error
abpc.loc[pd.isnull(abpc['EXTRA 2'])!=True]
abpc.loc[(pd.isnull(abpc['EXTRA 1'])!=True)&(pd.isnull(abpc['EXTRA 2'])!=True)]
# Eliminamos las filas intratables (muchos campos erroneos y no comprensibles)
for index in abpc.loc[(pd.isnull(abpc['EXTRA 1'])!=True)&(pd.isnull(abpc['EXTRA 2'])!=True)].index.values:
abpc = abpc.drop([index],axis=0)
abpc.reset_index(inplace=True,drop=True)
for index in abpc.loc[pd.isnull(abpc['EXTRA 1'])!=True].index.values:
reason_1 = abpc['REASON'].iloc[index]
reason_2 = abpc['CHECKIN SEQUENCE NUMBER'].iloc[index]
if ((reason_1 != None)&(reason_2 != None)):
reason = [reason_1 + '_&_' + reason_2]
new_row = list(abpc.iloc[index,0:5].values) + reason + list(abpc.iloc[index,7:12].values) + [abpc.iloc[index,13]] + [abpc.iloc[index,12]] + [None,None]
abpc.iloc[index,:] = new_row
abpc.reset_index(inplace=True,drop=True)
abpc.loc[pd.isnull(abpc['EXTRA 1'])!=True]
abpc.loc[pd.isnull(abpc['EXTRA 2'])!=True]
###Output
_____no_output_____
###Markdown
Añadimos el campo LOCAL (Vuelo internacional o local)
###Code
file_name = 'D:\\Usuarios\\mdbrenes\\Documents\\Projects\\airport\\Documentos TT\\DIC_AIRP_Athenas.csv'
ap = pd.read_csv(file_name,delimiter=';',header=0)
ap_sheng = ap.loc[(ap['SWITCH_SCHENGEN']==1)]
ap_sheng_loc = ap_sheng.loc[ap_sheng['SWITCH_LOCAL']==1]
ap_sheng_int = ap_sheng.loc[ap_sheng['SWITCH_LOCAL']==0]
abpc['LOCAL'] = list(map(lambda x: x in ap_sheng_loc['CODE'].values,abpc['DEPARTURE FLIGHT DESTINATION AIRPORT IATA CODE']))
abpc['SCHENGEN'] = list(map(lambda x: x in ap_sheng['CODE'].values,abpc['DEPARTURE FLIGHT DESTINATION AIRPORT IATA CODE']))
abpc['LOCAL'].sum()
abpc['SCHENGEN'].sum()
###Output
_____no_output_____
###Markdown
Preprocesamiento de datos
###Code
# Encode Categorycal Variables
# STATUS
labelencoder_X = LabelEncoder()
abpc['STATUS'] = labelencoder_X.fit_transform(abpc['STATUS'])
# 0 PASSED
# 1 NOT PASSED
abpc.head()
# Hay algun nulo en las fechas?
nulls_sobt = abpc['DEPARTURE FLIGHT SOBT'].isnull().sum()
nulls_check = abpc['CHECK DATE'].isnull().sum()
abpc.reset_index(inplace=True,drop=True)
to_drop_ds = []
if (nulls_sobt!=0):
for ind in range(0,len(abpc['DEPARTURE FLIGHT SOBT'].values)):
date = abpc['DEPARTURE FLIGHT SOBT'].iloc[ind]
if (pd.isnull(date)):
print('DEPARTURE FLIGHT SOBT:',date,'---')
to_drop_ds.append(ind)
to_drop_cd = []
if (nulls_check!=0):
for ind in range(0,len(abpc['CHECK DATE'].values)):
date = abpc['CHECK DATE'].iloc[ind]
if (pd.isnull(date)):
print('CHECK DATE:',date,'---')
to_drop_cd.append(ind)
to_drop = list(set(to_drop_ds) | set(to_drop_cd))
to_drop_ds
to_drop_cd
to_drop
abpc.drop(to_drop,inplace=True) # Solo una vez!!
abpc.reset_index(inplace=True,drop=True)
# Dates to Datetime
abpc.reset_index(inplace=True,drop=True)
# Check-Date
date_check = pd.to_datetime(abpc['CHECK DATE'],format = '%d/%m/%Y %H:%M')
# Departure Date
date_dep = pd.to_datetime(abpc['DEPARTURE FLIGHT SOBT'],format = '%d/%m/%Y %H:%M')
# Vemos si hay alguna fecha que no se haya convertido bien o es nula
nulls_check_ = date_check.isnull().sum()
nulls_sobt_ = date_dep.isnull().sum()
if (nulls_check_!=0):
for ind in range(0,len(date_check)):
if (pd.isnull(date_check[ind])):
print(ind)
print('CHECK DATE:',date_check[ind],'---')
if (nulls_sobt_!=0):
for ind in range(0,len(date_dep)):
if (pd.isnull(date_dep[ind])):
print(ind)
print('DEPARTURE FLIGHT SOBT:',date_dep[ind],'---')
# Dates to String Format %Y-%m-%d %H:%M:%S
abpc['CHECK DATE']=date_check.map(lambda x: x.strftime('%Y-%m-%d %H:%M:%S'))
abpc['DEPARTURE FLIGHT SOBT']=date_dep.map(lambda x: x.strftime('%Y-%m-%d %H:%M:%S'))
abpc.head()
# Adding a variable: Dwell Time (Check - Departure)
# Dwell Time
dwell_time = date_dep - date_check
abpc['DWELL TIME'] = dwell_time.map(lambda x: x.total_seconds()/3600)
# Adding a variable: DEPARTURE SOBT HOUR , DEPARTURE SOBT WEEKDAY
abpc['DEPARTURE SOBT HOUR'] = date_dep.map(lambda x: x.hour)
abpc['DEPARTURE SOBT WEEKDAY'] = date_dep.map(lambda x: x.weekday())
abpc.head()
# DEPARTURE SOBT CLUSTER
uno = [13,14,15,16,17,18,19] # Good Cluster
tres = [10,11,12] # Good cluster
dos = [2,4,5,6,7] # Good Cluster
# Faltan las horas sueltas: 20,21,22,23,0,1,3 (un cluster por cada una)
def clust_day_zone(x):
if (x in uno):
return 'A'
elif (x in tres):
return 'MD'
elif (x in dos):
return 'NM'
else:
return str(x)
abpc['DEPARTURE SOBT CLUSTER'] = abpc['DEPARTURE SOBT HOUR'].map(lambda x: clust_day_zone(x))
###Output
_____no_output_____
###Markdown
Representamos curvas patron
###Code
abpc.reset_index(inplace=True,drop='True')
def clust_wd_sobt(wd,sobt,local):
dw_t_h = abpc['DWELL TIME'].loc[(abpc['DEPARTURE SOBT CLUSTER']==sobt)&(abpc['DEPARTURE SOBT WEEKDAY']==wd)&(abpc['LOCAL']==local)]
return dw_t_h
cluster_prueba = clust_wd_sobt(2,'A',False)
fig = plt.figure(num=4, figsize=(14, 9), dpi=80, facecolor='w', edgecolor='k')
plt.hist(cluster_prueba,30,range=(0,4),density=True,histtype='stepfilled', alpha=0.4)
###Output
_____no_output_____
###Markdown
Predicciones
###Code
data_X = cluster_prueba
num_train = int(data_X.count()/5*4)
data_X_train_ = data_X[0:num_train] #Train
data_X_test_ = data_X[num_train:] #Test
n_train_, bins_train, patchs_train = plt.hist(data_X_train_,30,range=[0,4],alpha=0.8)
n_test_, bins_test, patchs_test = plt.hist(data_X_test_,30,range=[0,4],alpha=0.8)
tot_train = sum(n_train_)
n_train = list(map(lambda x: x/tot_train,n_train_))
tot_test = sum(n_test_)
n_test = list(map(lambda x: x/tot_test,n_test_))
bins_train_ = []
for i in range(0,len(bins_train)-1):
bins_train_.append((bins_train[i]+bins_train[i+1])/2)
bins_test_ = []
for i in range(0,len(bins_test)-1):
bins_test_.append((bins_test[i]+bins_test[i+1])/2)
plt.bar(bins_train_,n_train,width=0.15,color='m',alpha=0.5)
plt.bar(bins_test_,n_test,width=0.15,color='c',alpha=0.5)
bins_train_M = np.array([bins_train_]).transpose()
regressor = RandomForestRegressor(n_estimators=150)
regressor.fit(bins_train_M,n_train)
bins_test_M = np.array([bins_test_]).transpose()
plt.scatter(bins_test_M,regressor.predict(bins_test_M),color='m',zorder=5)
plt.bar(bins_test_,n_test,width=0.15,color='b',alpha=0.5)
plt.bar(bins_train_,n_train,width=0.15,color='c',alpha=0.5)
regressor.score(bins_test_M,n_test)
###Output
_____no_output_____ |
013-Python 直播课/Class_02_20200325_Python基础.ipynb | ###Markdown
一个 Python 基础的小例子
###Code
import pandas as pd
import pandas
a = [1, 2, 3, 8, 'javascript', 'julia', 'perl', 'php', 'python', 'ruby']
a.pop(0)
a.remove(2)
a
###Output
_____no_output_____ |
2021/sem2/lecture_1_warmup/lecture.ipynb | ###Markdown
Разминка Рассказать про материал второго курса, домашние задания и систему выставления оценок. План на лекцию: освежить в памяти основы из 1-й части курса. **Разминка 1**: обсудить код программы по выводу top-n слов песни "Yellow Submarine" ```c++// обсудить: что это и как оно работаетinclude include include include include include include include include include include // обсудить:// * что это// * что значит const// * что значит static// * где живут данные// * когда объект создаётся и когда уничтожаетсяstatic const std::string song = "\ Yellow Submarine\n\ \n\ In the town where I was born\n\ Lived a man who sailed to sea\n\ And he told us of his life\n\ In the land of submarines\n\ So we sailed up to the sun\n\ Till we found a sea of green\n\ And we lived beneath the waves\n\ in our yellow submarine\n\ \n\ We all live in a yellow submarine\n\ Yellow submarine, yellow submarine\n\ We all live in a yellow submarine\n\ Yellow submarine, yellow submarine\n\ \n\ And our friends are all aboard\n\ Many more of them live next door\n\ And the band begins to play\n\ \n\ We all live in a yellow submarine\n\ Yellow submarine, yellow submarine\n\ We all live in a yellow submarine\n\ Yellow submarine, yellow submarine\n\ \n\ (Full speed ahead Mr. Parker, full speed ahead\n\ Full speed ahead it is, Sergeant\n\ Action station, action station\n\ Aye, aye, sir, fire\n\ Captain, captain)\n\ \n\ As we live a life of ease\n\ Every one of us has all we need\n\ Sky of blue and sea of green\n\ In our yellow submarine\n\ \n\ We all live in a yellow submarine\n\ Yellow submarine, yellow submarine\n\ We all live in a yellow submarine\n\ Yellow submarine, yellow submarine\n\ We all live in a yellow submarine\n\ Yellow submarine, yellow submarine";// обсудить:// * что это// * какой код ассемблера генерирует эта строчка// * что такое unordered_map, внутреннее устройство, разница с std::mapusing WordsCounter = std::unordered_map;// обсудить:// * что значит static// * что значит &static void to_lower_inplace(std::string& s){ for (char& c : s) c = static_cast(std::tolower(static_cast(c)));}// обсудить:// * зачем erasestatic void remove_non_alpha_inplace(std::string& s){ s.erase( std::remove_if( s.begin(), s.end(), [](unsigned char c) { return !std::isalpha(c); }), s.end());}// обсудить:// * что значит nodiscard// * почему const&? правила передачи аргументов// * что даёт наличие return только от одной локальной переменной в функции// * как устроен в памяти std::vector. если сложно - рисовать// * какие ещё есть контейнеры-последовательности кроме std::vector, как они устроены[[nodiscard]] static std::vector split_by_words(const std::string& text){ std::vector words; std::istringstream ss{text}; std::for_each( std::istream_iterator(ss), std::istream_iterator(), [&](std::string s){ remove_non_alpha_inplace(s); to_lower_inplace(s); if (!s.empty()) words.emplace_back(std::move(s)); }); return words;}[[nodiscard]] static WordsCounter make_words_counter(const std::string& text){ const std::vector words = split_by_words(text); WordsCounter counter; for (const std::string& word : words) counter[word] += 1; return counter;}// обсудить:// * почему namespace// * почему struct// * разница class/structnamespace {struct CountAndWord{ int count; std::string word;};} // namespace// обсудить:// * reserve// * partial_sort (+begin/end)// * lambda (+captures)static void print_top_n_words(const WordsCounter& counter, const int topn){ if (topn <= 0) return; std::vector caws; caws.reserve(counter.size()); for (const auto& [word, count] : counter) caws.emplace_back(CountAndWord{ count, word }); const int top_ix = std::min(topn, caws.size()); std::partial_sort( caws.begin(), caws.begin() + top_ix, caws.end(), [](const CountAndWord& l, const CountAndWord& r) { return std::tie(r.count, r.word) < std::tie(l.count, l.word); }); for (int i = 0; i < top_ix; ++i) std::cout " << caws[i].count << '\n';}// обсудить:// * что такое main// * что такое argc, argvint main(int argc, char **argv){ // обсудить: // * почему 2 // * что в argv[0] // * что такое endl // * почему return 1 if (argc != 2) { std::cout << "Usage: " << argv[0] << " top_n" << std::endl; return 1; } // обсудить: // * что такое try-catch try { const int top_n = std::stoi(argv[1]); const WordsCounter words_counter = make_words_counter(song); print_top_n_words(words_counter, top_n); } catch (const std::exception& e) { // обсудить: // * что такое cerr // * почему return 1 // * что такое std::exception и почему он здесь std::cerr << "ERROR: failed to find top n words: " << e.what() << std::endl; return 1; } // просмотреть всю программу, где могут быть брошены исключения return 0;}``` **Разминка 2**: обсудить класс `RoundRobinQueue` ```c++include include include include include include // обсудить:// * шаблоны (что такое, зачем, как)// * шаблонные параметрыtemplateclass RoundRobinQueue{private: // обсудить: инварианты std::array, N> data; int start_ix = 0; // индекс первого элемента в очереди int final_ix = 0; // индекс следующего за последним элементом в очереди public: // обсудить: // * конструкторы, деструкторы, когда вызываются // * какие есть ещё спец. методы // * какие есть правила RoundRobinQueue() = default; RoundRobinQueue(std::initializer_list lst) { for (const T& t : lst) push(t); } RoundRobinQueue(const RoundRobinQueue&) = default; RoundRobinQueue& operator=(const RoundRobinQueue&) = default; // обсудить: // * что это такое // * что делает std::move // * зачем присваивать rhs // * зачем noexcept RoundRobinQueue(RoundRobinQueue&& rhs) noexcept : data(std::move(rhs.data)) , start_ix(rhs.start_ix) , final_ix(rhs.final_ix) { rhs = RoundRobinQueue(); } // обсудить: // * зачем это нужно если есть move-конструктор // * зачем проверка на this RoundRobinQueue& operator=(RoundRobinQueue&& rhs) noexcept { if (this != &rhs) { data = std::move(rhs); start_ix = rhs.start_ix; final_ix = rhs.final_ix; rhs = RoundRobinQueue(); } return *this; } // обсудить: // * что это и что здесь происходит ~RoundRobinQueue() = default; // обсудить: почему const bool empty() const { return start_ix == final_ix && !data[start_ix].has_value(); } bool full() const { return start_ix == final_ix && data[start_ix].has_value(); } T pop() { if (empty()) throw std::runtime_error("pop from empty queue"); T res = std::move(data[start_ix].value()); data[start_ix].reset(); start_ix = next_ix(start_ix); return res; } void push(T item) { if (full()) throw std::runtime_error("push to full queue"); data[final_ix].emplace(std::move(item)); final_ix = next_ix(final_ix); }private: // обсудить: // * что значит static static int next_ix(const int ix) { return (ix + 1) % N; }};int main(){ try { RoundRobinQueue q; q.push("alesha"); q.push("dobrynia"); q.push("ilya"); while (true) std::cout << q.pop() << std::endl; } catch (const std::exception& e) { std::cout << e.what() << std::endl; } // обсудить: // * что будет выведено // * будет ли скомпилирован конструктор RoundRobinQueue(std::initializer_list lst) // * сколько классов RoundRobinQueue будет скомпилировано // * как будут линковаться несколько RoundRobinQueue, если они компилируются // в разных cpp-файлах return 0;}``` **Разминка 3**: обсудить программу, печатающую животных в зоопарке ```c++include include include include include include include using namespace std;namespace {// замечание://// организуем животных в иерархию://// Animal// |// Turtle// |// NinjaTurtle class Animal{ public: Animal(const string& name, int age) : name_(name) , age_(age) {} // обсудить: // * что это такое // * зачем так делают virtual ~Animal() = default; // обсудить: // * что это такое // * = 0 virtual string greeting() const = 0; const string& name() const { return name_; } int age() const { return age_; } private: string name_; int age_;};class Turtle : public Animal{public: Turtle(const string& name, int age) : Animal(name, age) {} // обсудить: // * override // * final string greeting() const override { return "hello"; }};// обсудить:// * порядок вызова конструкторов-деструкторов// * layout класса (не трогаем что это non-standard layout)// * sizeof класса// * alignmentclass NinjaTurtle : public Turtle{public: NinjaTurtle(const string& name, const string& short_name) : Turtle(name, 12) , short_name_(short_name) {} string greeting() const override { return "camabanga!"; } private: string short_name_;};} // namespace// обсудить:// * что такое unique_ptr и что вы про него знаете// * какие ещё есть умные указатели, внутреннее устройство// * зачем нужен make_unique// * зачем нужен make_shared[[nodiscard]]static vector> make_zoo(){ vector> rv; rv.reserve(7); rv.emplace_back(make_unique("Tortilla", 100)); rv.emplace_back(make_unique("Big Turtle", 100)); rv.emplace_back(make_unique("Aunt Motya", 200)); rv.emplace_back(make_unique("Donatello")); rv.emplace_back(make_unique("Rafael")); return move(rv);}// обсудить:// * где в коде main происходит вызов виртуальных функций (подсказка: 2 места)//int main(){ for (const unique_ptr& a : make_zoo()) { printf("I'm %10s. My age is %3i. %s\n", a->name().c_str(), a->age(), a->greeting().c_str()); } return 0;}``` Forward declaration: традиционный пример с iostream Рассмотрим компиляцию cpp-файла: ```c++// example_fwd_declaration_1.cppinclude extern int read_int(std::istream& is);extern int read_float(std::istream& is);struct Person{ int age; float weight;};Person read_person(std::istream& is){ Person p; p.age = read_int(is); p.weight = read_float(is); return p;}```
###Code
# скомпилируем, замерим время
!time --format "real time %E\nuser time %U\nsys time %S" clang++-8 -c example_fwd_declaration_1.cpp
###Output
real time 0:00.24
user time 0.20
sys time 0.02
###Markdown
Заметим, что при компиляции `example_fwd_declaration_1.cpp` компилятору нет необходимости видеть реализацию класса `std::istream`, т.к. в рамках этого файла:* ни один из методов класса не вызывается* не нужно знать размер класса, т.к. передаётся только указатель на объект, а размер указателя известен_(пройтись ещё раз по коду, показать где и как используется класс `std::istream`)_Компилятору достаточно знать, что такой класс существует.Применим приём **forward declaration** - заменим definition класса на declaration.Класс `std::istream` - шаблонный, поэтому forward declaration будет выглядеть страшненько: ```c++// forward-declare std::istreamnamespace std { template struct char_traits; template> class basic_istream; using istream = basic_istream;}extern int read_int(std::istream& is);extern int read_float(std::istream& is);struct Person{ int age; float weight;};Person read_person(std::istream& is){ Person p; p.age = read_int(is); p.weight = read_float(is); return p;}```
###Code
# скомпилируем, замерим время
!time --format "real time %E\nuser time %U\nsys time %S" clang++-8 -c example_fwd_declaration_2.cpp
###Output
real time 0:00.02
user time 0.01
sys time 0.00
###Markdown
**Мы ускорили компиляцию в ~10 раз**_(объяснить, почему возникает такой эффект)_ Forward declaration: iosfwd Т.к. проблема времени компиляции для заголовочного файла `iostream` известна, для него давно в стандарт ввели `include ` - forward declaration для объектов из `iostream`https://en.cppreference.com/w/cpp/header/iosfwdПоэтому наш пример можно упростить: ```c++include extern int read_int(std::istream& is);extern int read_float(std::istream& is);struct Person{ int age; float weight;};Person read_person(std::istream& is){ Person p; p.age = read_int(is); p.weight = read_float(is); return p;}```
###Code
# скомпилируем, замерим время
!time --format "real time %E\nuser time %U\nsys time %S" clang++-8 -c example_fwd_declaration_3.cpp
###Output
real time 0:00.03
user time 0.02
sys time 0.00
|
supervised_learning/Classification .ipynb | ###Markdown
The Iris dataset in scikit-learn
###Code
dir (datasets)
#loading the iris data set using the sickit learn library
df = datasets.load_iris()
type(df)
df.keys()
print(df.DESCR)
print (df.data)
print (df.target)
print (df.feature_names)
print (df.target_names)
type (df.data)
type (df.target)
df.data.shape
df.target_names.shape
###Output
_____no_output_____
###Markdown
Exploratory data analysis (EDA)
###Code
x= iris.data
y= iris.target
iris = pd.DataFrame (x, columns= iris.feature_names)
iris.head ()
###Output
_____no_output_____
###Markdown
Visual EDA with Scatter_matrix
###Code
_ = pd.plotting.scatter_matrix(iris, c = y, figsize = [8, 8], s=150, marker = 'D')
###Output
_____no_output_____
###Markdown
Using scikit-learn to fit a classifier
###Code
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=6)
knn.fit(df['data'], df['target'])
df['data'].shape
df['target'].shape
###Output
_____no_output_____
###Markdown
Predicting on unlabeled data
###Code
X_new = np.array([[5.6, 2.8, 3.9, 1.1],
[5.7, 2.6, 3.8, 1.3],
[4.7, 3.2, 1.3, 0.2]])
prediction = knn.predict(X_new)
X_new.shape
print('Prediction: {}'.format(prediction))
###Output
Prediction: [1 1 0]
###Markdown
Train/testsplit
###Code
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,
random_state=21, stratify=y)
knn = KNeighborsClassifier(n_neighbors=8)
knn.fit(X_train, y_train)
y_pred = knn.predict(X_test)
knn.score(X_test, y_test)
###Output
_____no_output_____ |
TensorFlow_10.ipynb | ###Markdown
[묹제]x값이 [[1,11,7,9], [1,3,4,3], [1,1,0,1]] 일때, 분류 결과를 출력하시오.x, y = placeholder클래스 개수 = 3w, b = 변수 (랜덤값 초기화)learning_rate = 0.1트레이닝 횟수 = 2001
###Code
import numpy as np
import tensorflow as tf
import math
x_data = [[1, 2, 1, 1],
[2, 1, 3, 2],
[3, 1, 3, 4],
[4, 1, 5, 5],
[1, 7, 5, 5],
[1, 2, 5, 6],
[1, 6, 6, 6],
[1, 7, 7, 7]]
y_data = [[0, 0, 1],#2
[0, 0, 1],#2
[0, 0, 1],#2
[0, 1, 0],#1
[0, 1, 0],#1
[0, 1, 0],#1
[1, 0, 0],#0
[1, 0, 0]]#0
tf.set_random_seed(777)
x = tf.placeholder(tf.float32)
y = tf.placeholder(tf.float32)
w = tf.Variable(tf.random_normal([4,3]))
b = tf.Variable(tf.random_normal([1]))
z = tf.matmul(x,w)+b
hf = tf.nn.softmax(z)
cost = tf.reduce_mean(tf.reduce_sum(y*-tf.log(hf), axis = 1 ))
optimizer = tf.train.GradientDescentOptimizer(0.1)
train = optimizer.minimize(cost)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for i in range(2001) :
sess.run(train, feed_dict = {x:x_data, y:y_data})
if i % 200 == 0 :
print(i, sess.run(cost, feed_dict = {x:x_data, y:y_data}))
yhat = sess.run(hf, feed_dict = {x:[[1,11,7,9], [1,3,4,3], [1,1,0,1]]})
print("\nyhat\n",yhat)
yhat2 = sess.run(tf.argmax(yhat, axis = 1))
print("예측값 : ", yhat2)
y2 = sess.run(tf.argmax(y_data, axis = 1))
print("실제값 : ",y2)
###Output
예측값 : [1 0 2]
실제값 : [2 2 2 1 1 1 0 0]
###Markdown
* * * 동물 분류
###Code
xy = np.loadtxt('DataSet/zoo.csv', delimiter = ',', dtype = np.float32)
xdata = xy[:, 0:-1]
ydata = xy[:, [-1]]
print(xdata.shape, ydata.shape)
nb_classes = 7
x = tf.placeholder(tf.float32, [None, 16])
y = tf.placeholder(tf.int32, [None, 1]) #y에는 0~6 사이의 임의의 수 저장(원 핫 인코딩 필요)
y_one_hot = tf.one_hot(y, nb_classes) # 0 -> 100000, 3 -> 0001000
print("one hot 상태 : ", y_one_hot)
'''
원 핫 인코딩을 수행하면 차원이 1 증가(y : (None,1) -> (None,1,7))
reshape필요 -> (None, 7)
'''
y_one_hot = tf.reshape(y_one_hot, [-1, nb_classes])
print("reshape 결과 : ", y_one_hot)
w = tf.Variable(tf.random_normal([16,nb_classes]))
b = tf.Variable(tf.random_normal([nb_classes]))
logits = tf.matmul(x,w) + b
hf = tf.nn.softmax(logits)
cost_i = tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=y_one_hot)
cost = tf.reduce_mean(cost_i)
optimizer = tf.train.GradientDescentOptimizer(0.1).minimize(cost)
prediction = tf.argmax(hf, 1) # axis = 1
correct_prediction = tf.equal(prediction, tf.argmax(y_one_hot,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
with tf.Session() as sess :
sess.run(tf.global_variables_initializer())
for step in range(2001):
sess.run(optimizer, feed_dict={x:xdata, y:ydata})
if step%200==0:
cv, av = sess.run([cost, accuracy], feed_dict={x:xdata, y:ydata})
print(step, cv, av)
print("예측 동물 : ", sess.run(prediction, feed_dict={x:[[0.,0.,1.,0.,0.,
1.,1.,1.,1.,0.,
0.,1.,0.,1.,0.,0.]]}))
###Output
0 10.077507 0.02970297
200 0.5486735 0.8910891
400 0.2960701 0.9009901
600 0.18900605 0.980198
800 0.13623165 0.990099
1000 0.106259316 1.0
1200 0.08720473 1.0
1400 0.07406866 1.0
1600 0.06447227 1.0
1800 0.05715497 1.0
2000 0.051389027 1.0
예측 동물 : [3]
|
projects/python/Predicting Credit Card Approvals/notebook.ipynb | ###Markdown
1. Credit card applicationsCommercial banks receive a lot of applications for credit cards. Many of them get rejected for many reasons, like high loan balances, low income levels, or too many inquiries on an individual's credit report, for example. Manually analyzing these applications is mundane, error-prone, and time-consuming (and time is money!). Luckily, this task can be automated with the power of machine learning and pretty much every commercial bank does so nowadays. In this notebook, we will build an automatic credit card approval predictor using machine learning techniques, just like the real banks do.We'll use the Credit Card Approval dataset from the UCI Machine Learning Repository. The structure of this notebook is as follows:First, we will start off by loading and viewing the dataset.We will see that the dataset has a mixture of both numerical and non-numerical features, that it contains values from different ranges, plus that it contains a number of missing entries.We will have to preprocess the dataset to ensure the machine learning model we choose can make good predictions.After our data is in good shape, we will do some exploratory data analysis to build our intuitions.Finally, we will build a machine learning model that can predict if an individual's application for a credit card will be accepted.First, loading and viewing the dataset. We find that since this data is confidential, the contributor of the dataset has anonymized the feature names.
###Code
# Import pandas
import pandas as pd
# Load dataset
cc_apps = pd.read_csv('datasets/cc_approvals.data', header = None)
# Inspect data
cc_apps.head()
###Output
_____no_output_____
###Markdown
2. Inspecting the applicationsThe output may appear a bit confusing at its first sight, but let's try to figure out the most important features of a credit card application. The features of this dataset have been anonymized to protect the privacy, but this blog gives us a pretty good overview of the probable features. The probable features in a typical credit card application are Gender, Age, Debt, Married, BankCustomer, EducationLevel, Ethnicity, YearsEmployed, PriorDefault, Employed, CreditScore, DriversLicense, Citizen, ZipCode, Income and finally the ApprovalStatus. This gives us a pretty good starting point, and we can map these features with respect to the columns in the output. As we can see from our first glance at the data, the dataset has a mixture of numerical and non-numerical features. This can be fixed with some preprocessing, but before we do that, let's learn about the dataset a bit more to see if there are other dataset issues that need to be fixed.
###Code
# Print summary statistics
cc_apps_description = cc_apps.describe()
print(cc_apps_description)
print("\n")
# Print DataFrame information
cc_apps_info = cc_apps.info()
print(cc_apps_info)
print("\n")
# Inspect missing values in the dataset
cc_apps.tail(17)
###Output
2 7 10 14
count 690.000000 690.000000 690.00000 690.000000
mean 4.758725 2.223406 2.40000 1017.385507
std 4.978163 3.346513 4.86294 5210.102598
min 0.000000 0.000000 0.00000 0.000000
25% 1.000000 0.165000 0.00000 0.000000
50% 2.750000 1.000000 0.00000 5.000000
75% 7.207500 2.625000 3.00000 395.500000
max 28.000000 28.500000 67.00000 100000.000000
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 690 entries, 0 to 689
Data columns (total 16 columns):
0 690 non-null object
1 690 non-null object
2 690 non-null float64
3 690 non-null object
4 690 non-null object
5 690 non-null object
6 690 non-null object
7 690 non-null float64
8 690 non-null object
9 690 non-null object
10 690 non-null int64
11 690 non-null object
12 690 non-null object
13 690 non-null object
14 690 non-null int64
15 690 non-null object
dtypes: float64(2), int64(2), object(12)
memory usage: 86.3+ KB
None
###Markdown
3. Handling the missing values (part i)We've uncovered some issues that will affect the performance of our machine learning model(s) if they go unchanged:Our dataset contains both numeric and non-numeric data (specifically data that are of float64, int64 and object types). Specifically, the features 2, 7, 10 and 14 contain numeric values (of types float64, float64, int64 and int64 respectively) and all the other features contain non-numeric values.The dataset also contains values from several ranges. Some features have a value range of 0 - 28, some have a range of 2 - 67, and some have a range of 1017 - 100000. Apart from these, we can get useful statistical information (like mean, max, and min) about the features that have numerical values. Finally, the dataset has missing values, which we'll take care of in this task. The missing values in the dataset are labeled with '?', which can be seen in the last cell's output.Now, let's temporarily replace these missing value question marks with NaN.
###Code
# Import numpy
import numpy as np
# Inspect missing values in the dataset
print(cc_apps.tail(17))
# Replace the '?'s with NaN
cc_apps = cc_apps.replace('?', np.nan)
# Inspect the missing values again
print(cc_apps.tail(17))
###Output
0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
673 ? 29.50 2.000 y p e h 2.000 f f 0 f g 00256 17 -
674 a 37.33 2.500 u g i h 0.210 f f 0 f g 00260 246 -
675 a 41.58 1.040 u g aa v 0.665 f f 0 f g 00240 237 -
676 a 30.58 10.665 u g q h 0.085 f t 12 t g 00129 3 -
677 b 19.42 7.250 u g m v 0.040 f t 1 f g 00100 1 -
678 a 17.92 10.210 u g ff ff 0.000 f f 0 f g 00000 50 -
679 a 20.08 1.250 u g c v 0.000 f f 0 f g 00000 0 -
680 b 19.50 0.290 u g k v 0.290 f f 0 f g 00280 364 -
681 b 27.83 1.000 y p d h 3.000 f f 0 f g 00176 537 -
682 b 17.08 3.290 u g i v 0.335 f f 0 t g 00140 2 -
683 b 36.42 0.750 y p d v 0.585 f f 0 f g 00240 3 -
684 b 40.58 3.290 u g m v 3.500 f f 0 t s 00400 0 -
685 b 21.08 10.085 y p e h 1.250 f f 0 f g 00260 0 -
686 a 22.67 0.750 u g c v 2.000 f t 2 t g 00200 394 -
687 a 25.25 13.500 y p ff ff 2.000 f t 1 t g 00200 1 -
688 b 17.92 0.205 u g aa v 0.040 f f 0 f g 00280 750 -
689 b 35.00 3.375 u g c h 8.290 f f 0 t g 00000 0 -
0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
673 NaN 29.50 2.000 y p e h 2.000 f f 0 f g 00256 17 -
674 a 37.33 2.500 u g i h 0.210 f f 0 f g 00260 246 -
675 a 41.58 1.040 u g aa v 0.665 f f 0 f g 00240 237 -
676 a 30.58 10.665 u g q h 0.085 f t 12 t g 00129 3 -
677 b 19.42 7.250 u g m v 0.040 f t 1 f g 00100 1 -
678 a 17.92 10.210 u g ff ff 0.000 f f 0 f g 00000 50 -
679 a 20.08 1.250 u g c v 0.000 f f 0 f g 00000 0 -
680 b 19.50 0.290 u g k v 0.290 f f 0 f g 00280 364 -
681 b 27.83 1.000 y p d h 3.000 f f 0 f g 00176 537 -
682 b 17.08 3.290 u g i v 0.335 f f 0 t g 00140 2 -
683 b 36.42 0.750 y p d v 0.585 f f 0 f g 00240 3 -
684 b 40.58 3.290 u g m v 3.500 f f 0 t s 00400 0 -
685 b 21.08 10.085 y p e h 1.250 f f 0 f g 00260 0 -
686 a 22.67 0.750 u g c v 2.000 f t 2 t g 00200 394 -
687 a 25.25 13.500 y p ff ff 2.000 f t 1 t g 00200 1 -
688 b 17.92 0.205 u g aa v 0.040 f f 0 f g 00280 750 -
689 b 35.00 3.375 u g c h 8.290 f f 0 t g 00000 0 -
###Markdown
4. Handling the missing values (part ii)We replaced all the question marks with NaNs. This is going to help us in the next missing value treatment that we are going to perform.An important question that gets raised here is why are we giving so much importance to missing values? Can't they be just ignored? Ignoring missing values can affect the performance of a machine learning model heavily. While ignoring the missing values our machine learning model may miss out on information about the dataset that may be useful for its training. Then, there are many models which cannot handle missing values implicitly such as LDA. So, to avoid this problem, we are going to impute the missing values with a strategy called mean imputation.
###Code
# Impute the missing values with mean imputation
cc_apps.fillna(cc_apps.mean(), inplace=True)
# Count the number of NaNs in the dataset to verify
print(cc_apps.isnull().sum())
###Output
0 12
1 12
2 0
3 6
4 6
5 9
6 9
7 0
8 0
9 0
10 0
11 0
12 0
13 13
14 0
15 0
dtype: int64
###Markdown
5. Handling the missing values (part iii)We have successfully taken care of the missing values present in the numeric columns. There are still some missing values to be imputed for columns 0, 1, 3, 4, 5, 6 and 13. All of these columns contain non-numeric data and this why the mean imputation strategy would not work here. This needs a different treatment. We are going to impute these missing values with the most frequent values as present in the respective columns. This is good practice when it comes to imputing missing values for categorical data in general.
###Code
# Iterate over each column of cc_apps
for col in cc_apps.columns:
# Check if the column is of object type
if cc_apps[col].dtypes == 'object':
# Impute with the most frequent value
cc_apps = cc_apps.fillna(cc_apps[col].value_counts().index[0])
# Count the number of NaNs in the dataset and print the counts to verify
cc_apps.isnull().sum()
###Output
_____no_output_____
###Markdown
6. Preprocessing the data (part i)The missing values are now successfully handled.There is still some minor but essential data preprocessing needed before we proceed towards building our machine learning model. We are going to divide these remaining preprocessing steps into three main tasks:Convert the non-numeric data into numeric.Split the data into train and test sets. Scale the feature values to a uniform range.First, we will be converting all the non-numeric values into numeric ones. We do this because not only it results in a faster computation but also many machine learning models (like XGBoost) (and especially the ones developed using scikit-learn) require the data to be in a strictly numeric format. We will do this by using a technique called label encoding.
###Code
# Import LabelEncoder
from sklearn.preprocessing import LabelEncoder
# Instantiate LabelEncoder
le = LabelEncoder()
# Iterate over all the values of each column and extract their dtypes
for col in cc_apps.columns:
# Compare if the dtype is object
if cc_apps[col].dtypes == 'object':
# Use LabelEncoder to do the numeric transformation
cc_apps[col] = le.fit_transform(cc_apps[col].values)
###Output
_____no_output_____
###Markdown
7. Splitting the dataset into train and test setsWe have successfully converted all the non-numeric values to numeric ones.Now, we will split our data into train set and test set to prepare our data for two different phases of machine learning modeling: training and testing. Ideally, no information from the test data should be used to scale the training data or should be used to direct the training process of a machine learning model. Hence, we first split the data and then apply the scaling.Also, features like DriversLicense and ZipCode are not as important as the other features in the dataset for predicting credit card approvals. We should drop them to design our machine learning model with the best set of features. In Data Science literature, this is often referred to as feature selection.
###Code
# Import train_test_split
from sklearn.model_selection import train_test_split
# Drop the features 11 and 13 and convert the DataFrame to a NumPy array
cc_apps = cc_apps.drop([11, 13], axis=1)
cc_apps = cc_apps.values
# Segregate features and labels into separate variables
X,y = cc_apps[:,0:13] , cc_apps[:,13]
# Split into train and test sets
X_train, X_test, y_train, y_test = train_test_split(X,
y,
test_size = 0.33,
random_state = 42)
###Output
_____no_output_____
###Markdown
8. Preprocessing the data (part ii)The data is now split into two separate sets - train and test sets respectively. We are only left with one final preprocessing step of scaling before we can fit a machine learning model to the data. Now, let's try to understand what these scaled values mean in the real world. Let's use CreditScore as an example. The credit score of a person is their creditworthiness based on their credit history. The higher this number, the more financially trustworthy a person is considered to be. So, a CreditScore of 1 is the highest since we're rescaling all the values to the range of 0-1.
###Code
# Import MinMaxScaler
from sklearn.preprocessing import MinMaxScaler
# Instantiate MinMaxScaler and use it to rescale X_train and X_test
scaler = MinMaxScaler(feature_range=((0,1)))
rescaledX_train = scaler.fit_transform(X_train)
rescaledX_test = scaler.fit_transform(X_test)
###Output
_____no_output_____
###Markdown
9. Fitting a logistic regression model to the train setEssentially, predicting if a credit card application will be approved or not is a classification task. According to UCI, our dataset contains more instances that correspond to "Denied" status than instances corresponding to "Approved" status. Specifically, out of 690 instances, there are 383 (55.5%) applications that got denied and 307 (44.5%) applications that got approved. This gives us a benchmark. A good machine learning model should be able to accurately predict the status of the applications with respect to these statistics.Which model should we pick? A question to ask is: are the features that affect the credit card approval decision process correlated with each other? Although we can measure correlation, that is outside the scope of this notebook, so we'll rely on our intuition that they indeed are correlated for now. Because of this correlation, we'll take advantage of the fact that generalized linear models perform well in these cases. Let's start our machine learning modeling with a Logistic Regression model (a generalized linear model).
###Code
# Import LogisticRegression
from sklearn.linear_model import LogisticRegression
# Instantiate a LogisticRegression classifier with default parameter values
logreg = LogisticRegression()
# Fit logreg to the train set
logreg.fit(rescaledX_train, y_train)
###Output
_____no_output_____
###Markdown
10. Making predictions and evaluating performanceBut how well does our model perform? We will now evaluate our model on the test set with respect to classification accuracy. But we will also take a look the model's confusion matrix. In the case of predicting credit card applications, it is equally important to see if our machine learning model is able to predict the approval status of the applications as denied that originally got denied. If our model is not performing well in this aspect, then it might end up approving the application that should have been approved. The confusion matrix helps us to view our model's performance from these aspects.
###Code
# Import confusion_matrix
from sklearn.metrics import confusion_matrix
# Use logreg to predict instances from the test set and store it
y_pred = logreg.predict(rescaledX_test)
# Get the accuracy score of logreg model and print it
print("Accuracy of logistic regression classifier: ", logreg.score(rescaledX_test, y_test))
# Print the confusion matrix of the logreg model
print(confusion_matrix(y_test, y_pred))
###Output
Accuracy of logistic regression classifier: 0.8377192982456141
[[92 11]
[26 99]]
###Markdown
11. Grid searching and making the model perform betterOur model was pretty good! It was able to yield an accuracy score of almost 84%.For the confusion matrix, the first element of the of the first row of the confusion matrix denotes the true negatives meaning the number of negative instances (denied applications) predicted by the model correctly. And the last element of the second row of the confusion matrix denotes the true positives meaning the number of positive instances (approved applications) predicted by the model correctly.Let's see if we can do better. We can perform a grid search of the model parameters to improve the model's ability to predict credit card approvals.scikit-learn's implementation of logistic regression consists of different hyperparameters but we will grid search over the following two:tolmax_iter
###Code
# Import GridSearchCV
from sklearn.model_selection import GridSearchCV
# Define the grid of values for tol and max_iter
tol = [0.01, 0.001, 0.0001]
max_iter = [100, 150, 200]
# Create a dictionary where tol and max_iter are keys and the lists of their values are corresponding values
param_grid = dict(tol = tol, max_iter = max_iter)
###Output
_____no_output_____
###Markdown
12. Finding the best performing modelWe have defined the grid of hyperparameter values and converted them into a single dictionary format which GridSearchCV() expects as one of its parameters. Now, we will begin the grid search to see which values perform best.We will instantiate GridSearchCV() with our earlier logreg model with all the data we have. Instead of passing train and test sets separately, we will supply X (scaled version) and y. We will also instruct GridSearchCV() to perform a cross-validation of five folds.We'll end the notebook by storing the best-achieved score and the respective best parameters.While building this credit card predictor, we tackled some of the most widely-known preprocessing steps such as scaling, label encoding, and missing value imputation. We finished with some machine learning to predict if a person's application for a credit card would get approved or not given some information about that person.
###Code
# Instantiate GridSearchCV with the required parameters
grid_model = GridSearchCV(estimator = logreg, param_grid = param_grid, cv=5)
# Use scaler to rescale X and assign it to rescaledX
rescaledX = scaler.fit_transform(X)
# Fit data to grid_model
grid_model_result = grid_model.fit(rescaledX, y)
# Summarize results
best_score, best_params = grid_model_result.best_score_, grid_model_result.best_params_
print("Best: %f using %s" % (best_score, best_params))
###Output
Best: 0.853623 using {'tol': 0.01, 'max_iter': 100}
|
covid19-internazionale.ipynb | ###Markdown
Dati Coronavirus Internazionali
###Code
import pandas as pd
import numpy as np
from datetime import datetime,timedelta
from dateutil import relativedelta
from IPython.display import Markdown
import plotly.express as px
import plotly.io as pio
import dateutil.relativedelta
pio.renderers.default = 'notebook_connected'
pio.templates.default = "simple_white+gridon"
plt_config = {'scrollZoom':False}
who_url = "https://covid19.who.int/WHO-COVID-19-global-data.csv"
who = pd.read_csv(who_url, parse_dates=['Date_reported'],
index_col='Date_reported')
who = who.rename(columns={'Country_code':'country_code',
'Country':'country',
'WHO_region':'region',
'New_cases':'new_cases',
'Cumulative_cases':'cases',
'New_deaths':'new_deaths',
'Cumulative_deaths':'deaths'})
updated_at = who.index.max()
display(Markdown(f"### *Aggiornamento al {updated_at:%d/%m/%Y}*"))
cases = who.new_cases.resample('D').sum()
deaths = who.new_deaths.resample('D').sum()
display(Markdown(f"""
*Casi totali*: **{cases.sum():,d}**, *Decessi totali*: **{deaths.sum():,d}**
"""))
totals = pd.DataFrame(data={'cases': cases, 'deaths': deaths})
fig = px.bar(totals, title="andamento totale casi e decessi")
fig.update_xaxes(rangeslider_visible=True, title='Data')
fig.update_yaxes(title='Numero persone')
fig.show(config = plt_config)
cases_by_country = who.groupby("country").last().nlargest(20, columns=['cases']).cases
deaths_by_country = who.groupby("country").last().nlargest(20, columns=['deaths']).deaths
fig = px.bar(cases_by_country, title='casi per nazione')
fig.update_layout(showlegend=False, yaxis_fixedrange = True )
fig.update_xaxes(title='Nazione')
fig.update_yaxes(title='Numero casi')
fig.show(config=plt_config)
fig = px.bar(deaths_by_country, title='decessi per nazione')
fig.update_layout(showlegend=False, yaxis_fixedrange = True )
fig.update_xaxes(title='Nazione')
fig.update_yaxes(title='Numero decessi')
fig.show(config = plt_config)
today = datetime.now()
last_month = today - dateutil.relativedelta.relativedelta(months=1)
last = who[who.index > last_month].query('new_cases>1000').sort_values('new_cases', ascending=False)
fig = px.bar(last, x=last.index, y=last.new_cases, color='country',
title='casi > 1.000 per giorno (ultimo mese)')
fig.update_xaxes(title='Data')
fig.update_yaxes(title='Numero casi')
fig.show(config = plt_config)
last = who[who.index > last_month].query('new_deaths>300').sort_values('new_deaths', ascending=False)
fig = px.bar(last, x=last.index, y=last.new_deaths, color='country',
title='decessi > 300 per giorno (ultimo mese)')
fig.update_xaxes(title='Data')
fig.update_yaxes(title='Numero decessi')
fig.show(config = plt_config)
###Output
_____no_output_____ |
001-Jupyter/001-Tutorials/002-IPython-Cookbook/chapter04_optimization/04_memprof.ipynb | ###Markdown
4.4. Profiling the memory usage of your code with memory_profiler
###Code
%load_ext memory_profiler
%%writefile memscript.py
def my_func():
a = [1] * 1000000
b = [2] * 9000000
del b
return a
from memscript import my_func
%mprun -T mprof0 -f my_func my_func()
print(open('mprof0', 'r').read())
%%memit import numpy as np
np.random.randn(1000000)
###Output
_____no_output_____
###Markdown
Cleanup
###Code
!rm -f memscript.py
!rm -f mprof0
###Output
_____no_output_____ |
RNN_Lab_GPU.ipynb | ###Markdown
###Code
from google.colab import drive
drive.mount('/content/drive')
###Output
Go to this URL in a browser: https://accounts.google.com/o/oauth2/auth?client_id=947318989803-6bn6qk8qdgf4n4g3pfee6491hc0brc4i.apps.googleusercontent.com&redirect_uri=urn%3aietf%3awg%3aoauth%3a2.0%3aoob&response_type=code&scope=email%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdocs.test%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive.photos.readonly%20https%3a%2f%2fwww.googleapis.com%2fauth%2fpeopleapi.readonly
Enter your authorization code:
··········
Mounted at /content/drive
###Markdown
Part-of-Speech Tagging with Recurrent Neural Networks Your task in this assignment is to implement a simple part-of-speech tagger based on recurrent neural networks. Get a graphics card
###Code
import os
import warnings
# Ignore FutureWarning from numpy
warnings.simplefilter(action='ignore', category=FutureWarning)
import keras.backend as K
import tensorflow as tf
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID";
# The GPU id to use, usually either "0" or "1";
os.environ["CUDA_VISIBLE_DEVICES"]="0";
# Allow growth of GPU memory, otherwise it will always look like all the memory is being used
physical_devices = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True)
###Output
Using TensorFlow backend.
###Markdown
Problem specification Your task in this assignment is1. to build a part-of-speech tagger based on a recurrent neural network architecture2. to train this tagger on the provided training data and identify a good model2. to evaluate the performance of this model on the provided test dataTo identify a good model, you can use the provided development (validation) data. Part-of-speech tagging Part-of-speech (POS) tagging is the task of labelling words (tokens) with [parts of speech](https://en.wikipedia.org/wiki/Part_of_speech). To give an example, consider the sentence *Parker hates parsnips*. In this sentence, the word *Parker* should be labelled as a proper noun (a noun that is the name of a person), *hates* should be labelled as a verb, and *parsnips* should be labelled as a (common) noun. Part-of-speech tagging is an essential ingredient of many state-of-the-art natural language understanding systems.Part-of-speech tagging can be cast as a supervised machine learning problem where the gold-standard data consists of sentences whose words have been manually annotated with parts of speech. For the present assignment you will be using a corpus built over the source material of the [English Web Treebank](https://catalog.ldc.upenn.edu/ldc2012t13), consisting of approximately 16,000 sentences with 254,000 tokens. The corpus has been released by the [Universal Dependencies Project](http://universaldependencies.org).To make it easier to compare systems, the gold-standard data has been split into three parts: training, development (validation), and test. The following cell provides a function that can be used to load the data.
###Code
def read_data(path):
with open(path, encoding='utf-8') as fp:
result = []
for line in fp:
line = line.rstrip()
if len(line) == 0:
yield result
result = []
elif not line.startswith('#'):
columns = line.split()
if columns[0].isdigit():
result.append((columns[1], columns[3]))
###Output
_____no_output_____
###Markdown
The next cell loads the data:
###Code
train_data = list(read_data('/content/drive/My Drive/Colab Notebooks/RNN/en_ewt-ud-train.conllu'))
print('Number of sentences in the training data: {}'.format(len(train_data)))
dev_data = list(read_data('/content/drive/My Drive/Colab Notebooks/RNN/en_ewt-ud-dev.conllu'))
print('Number of sentences in the development data: {}'.format(len(dev_data)))
test_data = list(read_data('/content/drive/My Drive/Colab Notebooks/RNN/en_ewt-ud-test.conllu'))
print('Number of sentences in the test data: {}'.format(len(test_data)))
###Output
Number of sentences in the training data: 12543
Number of sentences in the development data: 2002
Number of sentences in the test data: 2077
###Markdown
From a Python perspective, each of the data sets is a list of what we shall refer to as *tagged sentences*. A tagged sentence, in turn, is a list of pairs $(w,t)$, where $w$ is a word token and $t$ is the word’s POS tag. Here is an example from the training data to show you how this looks like:
###Code
train_data[42]
###Output
_____no_output_____
###Markdown
You will see part-of-speech tags such as `VERB` for verb, `NOUN` for noun, and `ADV` for adverb. If you are interested in learning more about the tag set used in the gold-standard data, you can have a look at the documentation of the [Universal POS tags](http://universaldependencies.org/u/pos/all.html). However, you do not need to understand the meaning of the POS tags to solve this assignment; you can simply treat them as labels drawn from a finite set of alternatives. Network architecture The proposed network architecture for your tagger is a sequential model with three layers, illustrated below: an embedding, a bidirectional LSTM, and a softmax layer. The embedding turns word indexes (integers representing words) into fixed-size dense vectors which are then fed into the bidirectional LSTM. The output of the LSTM at each position of the sentence is passed to a softmax layer which predicts the POS tag for the word at that position.To implement the network architecture, you will use [Keras](https://keras.io/). Keras comes with an extensive online documentation, and reading the relevant parts of this documentation will be essential when working on this assignment. We suggest to start with the tutorial [Getting started with the Keras Sequential model](https://keras.io/getting-started/sequential-model-guide/). After that, you should have a look at some of the examples mentioned in that tutorial, and in particular the [Bidirectional LSTM](https://keras.io/examples/imdb_bidirectional_lstm/) example. Evaluation The most widely-used evaluation measure for part-of-speech tagging is per-word accuracy, which is the percentage of words to which the tagger assigns the correct tag (according to the gold standard). This is one of the default metrics in Keras.One problem that you will encounter during evaluation is that the evaluation data contains words that you did not see (and did not add to your index) during training. The simplest solution to this problem is to introduce a special ‘word’ `` and replace each unknown word with this pseudoword. Part 1: Pre-process the data Before you can start to implement the network architecture as such, you will have to bring the tagged sentences from the gold-standard data into a form that can be used with the network. One important step in this is to map the words and tags (strings) to integers. Here is code that illustrates the idea:
###Code
word_to_index = {}
for tagged_sentence in train_data:
for word, tag in tagged_sentence:
if word not in word_to_index:
word_to_index[word] = len(word_to_index)
print('Number of unique words in the training data: {}'.format(len(word_to_index)))
print('Index of the word "hates": {}'.format(word_to_index['hates']))
###Output
Number of unique words in the training data: 19672
Index of the word "hates": 4579
###Markdown
Once you have indexes for the words and the tags, you can construct the input and the gold-standard output tensor required to train the network. Constructing the input tensorThe input tensor should be of shape $(N, n)$ where $N$ is the total number of sentences in the training data and $n$ is the length of the longest sentence. Note that Keras requires all sequences in an input tensor to have the same length, which means that you will have to pad all sequences to that length. You can use the helper function [`pad_sequences`](https://www.tensorflow.org/api_docs/python/tf/keras/preprocessing/sequence/pad_sequences) for this, which by default will front-pad sequences with the value 0. It is essential then that you do not use this special padding value as the index of actual words. Constructing the target output tensorThe target output tensor should be of shape $(N, n, T)$ where $T$ is the number of unique tags in the training data, plus one to cater for the special padding value. The additional dimension corresponds to the fact that the softmax layer of the network will output one $T$-dimensional vector for each position of an input sentence. To construct this vector, you can use the helper function [`to_categorical`](https://www.tensorflow.org/api_docs/python/tf/keras/utils/to_categorical).
###Code
# Define a help function to build index from a list of words or tags, each word / tag will have a unique number
def build_index(strings, init=[]):
string_to_index = {s: i for i, s in enumerate(init)}
# Loop over strings in 'strings'
for string in strings:
# Check if string exists in variable 'string_to_index',
# if string does not exist, add a new element to 'string_to_index': the current length of 'string_to_index'
if string not in string_to_index:
string_to_index[string]=len(string_to_index)
return string_to_index
# Convert all words and tags in train_data to lists, start with empty lists and use '.append()'
# to add one word / tag at a time, similar to the cell below 'pre-process the data'
words, tags = [], []
for tagged_sentence in train_data:
for word,tag in tagged_sentence:
words.append(word)
tags.append(tag)
# Call the help function you made, to build an index for words (word_to_index), and one index for tags (tag_to_index)
word_to_index=build_index(words,['<pad>','<unk>'])
tag_to_index=build_index(tags,['<pad>'])
# Check number of words and tags
num_words = len(word_to_index)
num_tags = len(tag_to_index)
print(f'Number of unique words in the training data: {num_words}')
print(f'Number of unique tags in the training_data: {num_tags}')
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.utils import to_categorical
# Make a function that converts the tagged sentences, word indices and tag indices to
# X and Y, that can be used when training the RNN
def encode(tagged_sentences, word_to_index, tag_to_index):
# Start with empty lists that will contain all training examples and corresponding output
X, Y = [], []
# Loop over tagged sentences
for current_tagged_sentence in tagged_sentences:
Xcurrent, Ycurrent = [], []
for word,tag in current_tagged_sentence:# Loop over words and tags in current sentence
if word not in word_to_index:
Xcurrent.append(word_to_index.get('<unk>'))#adding an unkown word index
else:
Xcurrent.append(word_to_index.get(word))#adding the index of the word
if tag not in tag_to_index:
Ycurrent.append(tag_to_index.get('<unk>'))#adding an unkown tag index
else:
Ycurrent.append(tag_to_index.get(tag))#adding the index of an exitsing tag
# Append X with Xcurrent, and Y with Ycurrent
X.append(Xcurrent)
Y.append(Ycurrent)
# Pad the sequences, so that all have the same length
X=pad_sequences(sequences=X,padding='post')
Y=pad_sequences(sequences=Y,padding='post')
# Convert labels to categorical, as you did in the CNN lab
Y=to_categorical(Y,num_classes=num_tags,dtype= 'float32')
return X, Y
# Use your 'encode' function to create X and Y from train_data, word_to_index, tag_to_index
X,Y=encode(train_data,word_to_index,tag_to_index)
# Print the shape of X and Y
print('Shape of X:',X.shape)
print('Shape of Y:',Y.shape)
###Output
Shape of X: (12543, 159)
Shape of Y: (12543, 159, 18)
###Markdown
Part 2: Construct the model To implement the network architecture, you need to find and instantiate the relevant building blocks from the Keras library. Note that Keras layers support a large number of optional parameters; use the default values unless you have a good reason not to. Two mandatory parameters that you will have to specify are the dimensionality of the embedding and the dimensionality of the output of the LSTM layer. The following values are reasonable starting points, but do try a number of different settings.* dimensionality of the embedding: 100* dimensionality of the output of the bidirectional LSTM layer: 100You will also have to choose an appropriate loss function. For training we recommend the Adam optimiser.
###Code
# Import necessary layers
from keras import Sequential
from keras.layers import Dense, Dropout, Embedding, LSTM, Bidirectional
from keras.optimizers import Adam
from keras.losses import categorical_crossentropy
embedding_dim = 150
hidden_dim = 150
model = Sequential()
model.add(Embedding(input_dim=num_words,output_dim=embedding_dim,mask_zero=True))
model.add(Bidirectional(LSTM(units=hidden_dim,return_sequences=True)))
model.add(Dropout(0.5))
model.add(Dense(num_tags, activation='softmax'))
# Compile model
model.compile(loss='categorical_crossentropy', optimizer='Adam', metrics=['accuracy'])
# Print a summary of the model
model.summary()
###Output
Model: "sequential_16"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
embedding_16 (Embedding) (None, None, 150) 2951100
_________________________________________________________________
bidirectional_16 (Bidirectio (None, None, 90) 70560
_________________________________________________________________
dropout_3 (Dropout) (None, None, 90) 0
_________________________________________________________________
dense_16 (Dense) (None, None, 18) 1638
=================================================================
Total params: 3,023,298
Trainable params: 3,023,298
Non-trainable params: 0
_________________________________________________________________
###Markdown
Part 3: Train the network The next step is to train the network. Use the following parameters:* number of epochs: 10* batch size: 32Training will print the average running loss on the training data after each minibatch. In addition to that, we ask you to also print the loss and accuracy on the development data after each epoch. You can do so by providing the `validation_data` argument to the `fit` method.Note that the `fit` method returns a [`History`](https://keras.io/callbacks/history) object that contains useful information about the training. We will use that information in the next step.
###Code
# Encode the development (validation data) using the 'encode' function you created before
batch_size=128
epochs=10
#splitting the dev data into Xval and Yval to train the network
Xval,Yval=encode(dev_data,word_to_index,tag_to_index)
# Train the model and save the history, as you did in the DNN and CNN labs, provide validation data
history=model.fit(X,Y,validation_data = (Xval, Yval),batch_size = batch_size, epochs = epochs)
###Output
/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/indexed_slices.py:434: UserWarning: Converting sparse IndexedSlices to a dense Tensor of unknown shape. This may consume a large amount of memory.
"Converting sparse IndexedSlices to a dense Tensor of unknown shape. "
###Markdown
Part 4: Identify a good model The following code will plot the loss on the training data and the loss on the validation data after each epoch:
###Code
# Lets define a help function for plotting the training results
import matplotlib.pyplot as plt
def plot_results(history):
val_loss = history.history['val_loss']
acc = history.history['accuracy']
loss = history.history['loss']
val_acc = history.history['val_accuracy']
plt.figure(figsize=(10,4))
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.plot(loss)
plt.plot(val_loss)
plt.legend(['Training','Validation'])
plt.figure(figsize=(10,4))
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.plot(acc)
plt.plot(val_acc)
plt.legend(['Training','Validation'])
plt.show()
plot_results(history)
###Output
_____no_output_____
###Markdown
Look at the plot and determine the epoch after which the model starts to overfit. Then, re-train your model using that many epochs and compute the accuracy of the tagger on the test data.
###Code
# Encode the test_data using the 'encode' function you created before
Xtest,Ytest = encode(test_data, word_to_index, tag_to_index)
# Evaluate the model on test data, as you did in the DNN and CNN lab
score = model.evaluate(Xtest, Ytest)
print('Test loss: %.4f' % score[0])
print('Test accuracy: %.4f' % score[1])
###Output
2077/2077 [==============================] - 3s 2ms/step
Test loss: 0.0477
Test accuracy: 0.9083
###Markdown
Part-of-Speech Tagging with Recurrent Neural Networks Your task in this assignment is to implement a simple part-of-speech tagger based on recurrent neural networks. Get a graphics card
###Code
import os
import warnings
# Ignore FutureWarning from numpy
warnings.simplefilter(action='ignore', category=FutureWarning)
import keras.backend as K
import tensorflow as tf
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID";
# The GPU id to use, usually either "0" or "1";
os.environ["CUDA_VISIBLE_DEVICES"]="0";
# Allow growth of GPU memory, otherwise it will always look like all the memory is being used
physical_devices = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True)
###Output
Using TensorFlow backend.
###Markdown
Problem specification Your task in this assignment is1. to build a part-of-speech tagger based on a recurrent neural network architecture2. to train this tagger on the provided training data and identify a good model2. to evaluate the performance of this model on the provided test dataTo identify a good model, you can use the provided development (validation) data. Part-of-speech tagging Part-of-speech (POS) tagging is the task of labelling words (tokens) with [parts of speech](https://en.wikipedia.org/wiki/Part_of_speech). To give an example, consider the sentence *Parker hates parsnips*. In this sentence, the word *Parker* should be labelled as a proper noun (a noun that is the name of a person), *hates* should be labelled as a verb, and *parsnips* should be labelled as a (common) noun. Part-of-speech tagging is an essential ingredient of many state-of-the-art natural language understanding systems.Part-of-speech tagging can be cast as a supervised machine learning problem where the gold-standard data consists of sentences whose words have been manually annotated with parts of speech. For the present assignment you will be using a corpus built over the source material of the [English Web Treebank](https://catalog.ldc.upenn.edu/ldc2012t13), consisting of approximately 16,000 sentences with 254,000 tokens. The corpus has been released by the [Universal Dependencies Project](http://universaldependencies.org).To make it easier to compare systems, the gold-standard data has been split into three parts: training, development (validation), and test. The following cell provides a function that can be used to load the data.
###Code
def read_data(path):
with open(path, encoding='utf-8') as fp:
result = []
for line in fp:
line = line.rstrip()
if len(line) == 0:
yield result
result = []
elif not line.startswith('#'):
columns = line.split()
if columns[0].isdigit():
result.append((columns[1], columns[3]))
###Output
_____no_output_____
###Markdown
The next cell loads the data:
###Code
train_data = list(read_data('en_ewt-ud-train.conllu'))
print('Number of sentences in the training data: {}'.format(len(train_data)))
dev_data = list(read_data('en_ewt-ud-dev.conllu'))
print('Number of sentences in the development data: {}'.format(len(dev_data)))
test_data = list(read_data('en_ewt-ud-test.conllu'))
print('Number of sentences in the test data: {}'.format(len(test_data)))
###Output
Number of sentences in the training data: 9897
Number of sentences in the development data: 2002
Number of sentences in the test data: 2077
###Markdown
From a Python perspective, each of the data sets is a list of what we shall refer to as *tagged sentences*. A tagged sentence, in turn, is a list of pairs $(w,t)$, where $w$ is a word token and $t$ is the word’s POS tag. Here is an example from the training data to show you how this looks like:
###Code
train_data[42]
###Output
_____no_output_____
###Markdown
You will see part-of-speech tags such as `VERB` for verb, `NOUN` for noun, and `ADV` for adverb. If you are interested in learning more about the tag set used in the gold-standard data, you can have a look at the documentation of the [Universal POS tags](http://universaldependencies.org/u/pos/all.html). However, you do not need to understand the meaning of the POS tags to solve this assignment; you can simply treat them as labels drawn from a finite set of alternatives. Network architecture The proposed network architecture for your tagger is a sequential model with three layers, illustrated below: an embedding, a bidirectional LSTM, and a softmax layer. The embedding turns word indexes (integers representing words) into fixed-size dense vectors which are then fed into the bidirectional LSTM. The output of the LSTM at each position of the sentence is passed to a softmax layer which predicts the POS tag for the word at that position.To implement the network architecture, you will use [Keras](https://keras.io/). Keras comes with an extensive online documentation, and reading the relevant parts of this documentation will be essential when working on this assignment. We suggest to start with the tutorial [Getting started with the Keras Sequential model](https://keras.io/getting-started/sequential-model-guide/). After that, you should have a look at some of the examples mentioned in that tutorial, and in particular the [Bidirectional LSTM](https://keras.io/examples/imdb_bidirectional_lstm/) example. Evaluation The most widely-used evaluation measure for part-of-speech tagging is per-word accuracy, which is the percentage of words to which the tagger assigns the correct tag (according to the gold standard). This is one of the default metrics in Keras.One problem that you will encounter during evaluation is that the evaluation data contains words that you did not see (and did not add to your index) during training. The simplest solution to this problem is to introduce a special ‘word’ `` and replace each unknown word with this pseudoword. Part 1: Pre-process the data Before you can start to implement the network architecture as such, you will have to bring the tagged sentences from the gold-standard data into a form that can be used with the network. One important step in this is to map the words and tags (strings) to integers. Here is code that illustrates the idea:
###Code
word_to_index = {}
for tagged_sentence in train_data:
for word, tag in tagged_sentence:
if word not in word_to_index:
word_to_index[word] = len(word_to_index)
print('Number of unique words in the training data: {}'.format(len(word_to_index)))
print('Index of the word "hates": {}'.format(word_to_index['hates']))
###Output
Number of unique words in the training data: 17231
Index of the word "hates": 4579
###Markdown
Once you have indexes for the words and the tags, you can construct the input and the gold-standard output tensor required to train the network. Constructing the input tensorThe input tensor should be of shape $(N, n)$ where $N$ is the total number of sentences in the training data and $n$ is the length of the longest sentence. Note that Keras requires all sequences in an input tensor to have the same length, which means that you will have to pad all sequences to that length. You can use the helper function [`pad_sequences`](https://www.tensorflow.org/api_docs/python/tf/keras/preprocessing/sequence/pad_sequences) for this, which by default will front-pad sequences with the value 0. It is essential then that you do not use this special padding value as the index of actual words. Constructing the target output tensorThe target output tensor should be of shape $(N, n, T)$ where $T$ is the number of unique tags in the training data, plus one to cater for the special padding value. The additional dimension corresponds to the fact that the softmax layer of the network will output one $T$-dimensional vector for each position of an input sentence. To construct this vector, you can use the helper function [`to_categorical`](https://www.tensorflow.org/api_docs/python/tf/keras/utils/to_categorical).
###Code
# Define a help function to build index from a list of words or tags, each word / tag will have a unique number
def build_index(strings, init=[]):
string_to_index = {s: i for i, s in enumerate(init)}
for word_tag in strings:
if word_tag not in string_to_index.keys():
string_to_index[word_tag] = len(string_to_index)
return string_to_index
words, tags = [], []
for tagged_sentence in train_data:
for word, tag in tagged_sentence:
words.append(word)
tags.append(tag)
# Call the help function you made, to build an index for words (word_to_index), and one index for tags (tag_to_index)
word_to_index = build_index(strings = words , init=["<unk>"])
tag_to_index = build_index(strings = tags , init=["<unk>"] )
# Check number of words and tags
num_words = len(word_to_index)
num_tags = len(tag_to_index)
print(f'Number of unique words in the training data: {num_words}')
print(f'Number of unique tags in the training_data: {num_tags}')
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.utils import to_categorical
# Make a function that converts the tagged sentences, word indices and tag indices to
# X and Y, that can be used when training the RNN
def encode(tagged_sentences, word_to_index, tag_to_index):
# Start with empty lists that will contain all training examples and corresponding output
X, Y = [], []
for tagSent in tagged_sentences:
Xcurrent, Ycurrent = [], []
for currWord , currTag in tagSent:
#Word
if word_to_index.get(currWord) is None:
Xcurrent.append(word_to_index.get("<unk>"))
else:
Xcurrent.append(word_to_index.get(currWord))
#Tags
if tag_to_index.get(currTag) is None:
#Ycurrent.append("<unk>")
Ycurrent.append(tag_to_index.get("<unk>"))
else:
Ycurrent.append(tag_to_index.get(currTag))
#End of Inner for loop
X.append(Xcurrent)
Y.append(Ycurrent)
#End of outer for loop
X = pad_sequences(X)
Y = pad_sequences(Y)
Y = to_categorical(Y , num_classes=len(tag_to_index.keys()) , dtype= 'float32')
return X, Y
X,Y = encode(train_data, word_to_index, tag_to_index)
# Print the shape of X and Y
print("\n")
print(f"Shape of X is : {X.shape}")
print("\n")
print(f"Shape of Y is : {Y.shape}")
#Xval , Yval = encode(dev_data, word_to_index, tag_to_index)
###Output
_____no_output_____
###Markdown
Part 2: Construct the model To implement the network architecture, you need to find and instantiate the relevant building blocks from the Keras library. Note that Keras layers support a large number of optional parameters; use the default values unless you have a good reason not to. Two mandatory parameters that you will have to specify are the dimensionality of the embedding and the dimensionality of the output of the LSTM layer. The following values are reasonable starting points, but do try a number of different settings.* dimensionality of the embedding: 100* dimensionality of the output of the bidirectional LSTM layer: 100You will also have to choose an appropriate loss function. For training we recommend the Adam optimiser.
###Code
from tensorflow.keras import Sequential
# Import necessary layers
from tensorflow.keras.layers import Dense, Dropout, Embedding, LSTM, Bidirectional
from keras.optimizers import Adam
from keras.losses import categorical_crossentropy
#from keras.losses import binary_crossentropy
embedding_dim = 100
hidden_dim = 100
maxFeatures = len(word_to_index.keys())
maxSequence = max(word_to_index.values())
#Model Creation
model = Sequential()
# The model should have an embedding layer, a bidirectional LSTM, and a dense softmax layer
model.add(Embedding(maxFeatures , embedding_dim))
model.add(Bidirectional(LSTM(hidden_dim , return_sequences=True)))
model.add(Dropout(0.5))
#model.add(Dense(units = len(Y) , activation = 'softmax'))
model.add(Dense(18 , activation = 'softmax'))
# (see the network architecture image)
# Compile model
model.compile(loss = categorical_crossentropy , optimizer = "Adam", metrics=['accuracy'] )
#model.compile(loss = binary_crossentropy , optimizer = "Adam", metrics=['accuracy'] )
# Print a summary of the model
print(model.summary())
###Output
Model: "sequential_2"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
embedding_2 (Embedding) (None, None, 100) 1723200
_________________________________________________________________
bidirectional_2 (Bidirection (None, None, 200) 160800
_________________________________________________________________
dropout_2 (Dropout) (None, None, 200) 0
_________________________________________________________________
dense_2 (Dense) (None, None, 18) 3618
=================================================================
Total params: 1,887,618
Trainable params: 1,887,618
Non-trainable params: 0
_________________________________________________________________
None
###Markdown
Part 3: Train the network The next step is to train the network. Use the following parameters:* number of epochs: 10* batch size: 32Training will print the average running loss on the training data after each minibatch. In addition to that, we ask you to also print the loss and accuracy on the development data after each epoch. You can do so by providing the `validation_data` argument to the `fit` method.Note that the `fit` method returns a [`History`](https://keras.io/callbacks/history) object that contains useful information about the training. We will use that information in the next step.
###Code
# Encode the development (validation data) using the 'encode' function you created before
Xval , Yval = encode(dev_data , word_to_index , tag_to_index )
# Train the model and save the history, as you did in the DNN and CNN labs, provide validation data
history = model.fit(X, Y , batch_size = 32 , epochs = 10 , verbose = 1 , validation_data = (Xval,Yval))
###Output
Epoch 1/10
310/310 [==============================] - 11s 36ms/step - loss: 0.3409 - accuracy: 0.9119 - val_loss: 0.3688 - val_accuracy: 0.8979
Epoch 2/10
310/310 [==============================] - 10s 34ms/step - loss: 0.1182 - accuracy: 0.9658 - val_loss: 0.1293 - val_accuracy: 0.9657
Epoch 3/10
310/310 [==============================] - 10s 34ms/step - loss: 0.0406 - accuracy: 0.9890 - val_loss: 0.1022 - val_accuracy: 0.9739
Epoch 4/10
310/310 [==============================] - 10s 33ms/step - loss: 0.0232 - accuracy: 0.9938 - val_loss: 0.1061 - val_accuracy: 0.9742
Epoch 5/10
310/310 [==============================] - 11s 34ms/step - loss: 0.0170 - accuracy: 0.9953 - val_loss: 0.1080 - val_accuracy: 0.9745
Epoch 6/10
310/310 [==============================] - 10s 33ms/step - loss: 0.0137 - accuracy: 0.9961 - val_loss: 0.1163 - val_accuracy: 0.9730
Epoch 7/10
310/310 [==============================] - 10s 34ms/step - loss: 0.0116 - accuracy: 0.9967 - val_loss: 0.1198 - val_accuracy: 0.9733
Epoch 8/10
310/310 [==============================] - 10s 34ms/step - loss: 0.0098 - accuracy: 0.9971 - val_loss: 0.1192 - val_accuracy: 0.9736
Epoch 9/10
310/310 [==============================] - 10s 33ms/step - loss: 0.0083 - accuracy: 0.9976 - val_loss: 0.1315 - val_accuracy: 0.9716
Epoch 10/10
310/310 [==============================] - 10s 34ms/step - loss: 0.0072 - accuracy: 0.9979 - val_loss: 0.1295 - val_accuracy: 0.9726
###Markdown
Part 4: Identify a good model The following code will plot the loss on the training data and the loss on the validation data after each epoch:
###Code
# Lets define a help function for plotting the training results
import matplotlib.pyplot as plt
def plot_results(history):
val_loss = history.history['val_loss']
acc = history.history['accuracy']
loss = history.history['loss']
val_acc = history.history['val_accuracy']
plt.figure(figsize=(10,4))
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.plot(loss)
plt.plot(val_loss)
plt.legend(['Training','Validation'])
plt.figure(figsize=(10,4))
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.plot(acc)
plt.plot(val_acc)
plt.legend(['Training','Validation'])
plt.show()
plot_results(history)
###Output
_____no_output_____
###Markdown
Look at the plot and determine the epoch after which the model starts to overfit. Then, re-train your model using that many epochs and compute the accuracy of the tagger on the test data.
###Code
# Encode the test_data using the 'encode' function you created before
Xtest , Ytest = encode(test_data , word_to_index , tag_to_index )
# Evaluate the model on test data, as you did in the DNN and CNN lab
score = model.evaluate(Xtest , Ytest , verbose = 1)
print('Test loss: %.4f' % score[0])
print('Test accuracy: %.4f' % score[1])
###Output
65/65 [==============================] - 0s 6ms/step - loss: 0.1219 - accuracy: 0.9752
Test loss: 0.1219
Test accuracy: 0.9752
|
Coca_Rating_Ensemble.ipynb | ###Markdown
**Bagging**
###Code
df['Cocoa_Percent'] = df['Cocoa_Percent'] *100
df['Cocoa_Percent'] = df['Cocoa_Percent'].astype(int)
df['Cocoa_Percent']
# Input and Output Split
predictors =df.loc[:, df.columns!='Cocoa_Percent']
type(predictors)
target = df['Cocoa_Percent']
type(target)
# Train Test partition of the data
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(predictors, target, test_size = 0.2, random_state=0)
from sklearn import tree
clftree = tree.DecisionTreeClassifier()
from sklearn.ensemble import BaggingClassifier
bag_clf = BaggingClassifier(base_estimator = clftree, n_estimators = 500,
bootstrap = True, n_jobs = 1, random_state = 42)
bag_clf.fit(x_train, y_train)
from sklearn.metrics import accuracy_score, confusion_matrix
# Evaluation on Testing Data
confusion_matrix(y_test, bag_clf.predict(x_test))
acc_test = accuracy_score(y_test, bag_clf.predict(x_test))
# Evaluation on Training Data
acc_train = accuracy_score(y_train, bag_clf.predict(x_train))
confusion_matrix(y_train, bag_clf.predict(x_train))
results = pd.DataFrame([['BaggingClassifier', acc_train,acc_test]],columns = ['Model', 'Accuracy test','Accuracy train'])
results
###Output
_____no_output_____
###Markdown
**Gradient Boosting**
###Code
from sklearn.ensemble import GradientBoostingClassifier
boost_clf = GradientBoostingClassifier()
boost_clf.fit(x_train, y_train)
confusion_matrix(y_test, boost_clf.predict(x_test))
accuracy_score(y_test, boost_clf.predict(x_test))
# Hyperparameters
boost_clf2 = GradientBoostingClassifier(learning_rate = 0.02, n_estimators = 1000, max_depth = 1)
boost_clf2.fit(x_train, y_train)
# Evaluation on Testing Data
confusion_matrix(y_test, boost_clf2.predict(x_test))
acc_test =accuracy_score(y_test, boost_clf2.predict(x_test))
# Evaluation on Training Data
acc_train = accuracy_score(y_train, boost_clf2.predict(x_train))
model_results = pd.DataFrame([['"GradientBoostingClassifier', acc_test,acc_train]],
columns = ['Model', 'Accuracy test','Accuracy train'])
results = results.append(model_results, ignore_index = True)
results
###Output
_____no_output_____
###Markdown
**XGBoosting**
###Code
import xgboost as xgb
xgb_clf = xgb.XGBClassifier(max_depths = 5, n_estimators = 10000, learning_rate = 0.3, n_jobs = -1)
xgb_clf.fit(x_train, y_train)
# Evaluation on Testing Data
confusion_matrix(y_test, xgb_clf.predict(x_test))
accuracy_score(y_test, xgb_clf.predict(x_test))
xgb.plot_importance(xgb_clf)
###Output
_____no_output_____
###Markdown
**Adaboosting**
###Code
from sklearn.ensemble import AdaBoostClassifier
ada_clf = AdaBoostClassifier(learning_rate = 0.02, n_estimators = 5000)
ada_clf.fit(x_train, y_train)
# Evaluation on Testing Data
confusion_matrix(y_test, ada_clf.predict(x_test))
acc_test = accuracy_score(y_test, ada_clf.predict(x_test))
# Evaluation on Training Data
acc_train = accuracy_score(y_train, ada_clf.predict(x_train))
model_results1 = pd.DataFrame([['Adaboosting', acc_test,acc_train]],
columns = ['Model', 'Accuracy test','Accuracy train'])
results = results.append(model_results1, ignore_index = True)
results
###Output
_____no_output_____ |
elitedatascience/python-seaborn-tutorial/python-seaborn-tutorial.ipynb | ###Markdown
The Ultimate Python Seaborn TutorialThis code was developed following the tutorial [The Ultimate Python Seaborn Tutorial](https://elitedatascience.com/python-seaborn-tutorial), with slightly modifications. All work must be credited to [EliteDataScience](https://elitedatascience.com) team. Step 1: Installation Seaborn If you are reading this notebook in a Jupyter notebook environment, probably you already have all the tools you need to follow this tutorial. Otherwise, the [Anaconda suite](https://anaconda.org/) is the recomended way to install all these libraries, including the Python programming language. Step 2: Importing libraries and dataset.
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
df = pd.read_csv('Pokemon.csv', index_col=0)
df.head()
###Output
_____no_output_____
###Markdown
Step 3: Seaborn's plotting functions.
###Code
# recommended way'
_ = sns.lmplot(data=df, x='Attack', y='Defense')
# alternative way
#sns.lmplot(x=df.Attack, y=df.Defense)
###Output
_____no_output_____
###Markdown
Step 4: Customizing with Matplotlib.
###Code
sns.lmplot(data=df, x='Attack', y='Defense', fit_reg=False, hue='Stage')
_ = plt.xlim(0, None)
_ = plt.ylim(0, None)
###Output
_____no_output_____
###Markdown
Step 5: The role of Pandas.
###Code
_ = sns.boxplot(data=df)
stats_df = df.drop(columns=['Total', 'Stage', 'Legendary'])
_ = sns.boxplot(data=stats_df)
###Output
_____no_output_____
###Markdown
Step 6: Seaborn themes
###Code
# set theme
sns.set_style('whitegrid')
# violin plot
fig = plt.figure(figsize=(10, 10))
_ = sns.violinplot(x='Type 1' , y='Attack', data=df)
###Output
_____no_output_____
###Markdown
Step 7: Color palletes
###Code
pkmn_type_colors = ['#78C850', # Grass
'#F08030', # Fire
'#6890F0', # Water
'#A8B820', # Bug
'#A8A878', # Normal
'#A040A0', # Poison
'#F8D030', # Electric
'#E0C068', # Ground
'#EE99AC', # Fairy
'#C03028', # Fighting
'#F85888', # Psychic
'#B8A038', # Rock
'#705898', # Ghost
'#98D8D8', # Ice
'#7038F8', # Dragon
]
fig = plt.figure(figsize=(10, 10))
_ = sns.violinplot(x='Type 1' , y='Attack', data=df, palette=pkmn_type_colors)
# Swarm plot with Pokémon color pallete
fig = plt.figure(figsize=(10, 6))
_ = sns.swarmplot(x='Type 1', y='Attack', data=df, palette=pkmn_type_colors)
###Output
_____no_output_____
###Markdown
Step 8: Overlaying plots
###Code
# Set figure size with matplotlib
plt.figure(figsize=(10, 6))
# create plot
sns.violinplot(x='Type 1', y='Attack', data=df, inner=None, palette=pkmn_type_colors)
sns.swarmplot(x='Type 1', y='Attack', data=df, color='k', alpha=0.7)
# Set title with matplotlib
_ = plt.title('Attack by type')
###Output
_____no_output_____
###Markdown
Step 9: Putting all together
###Code
stats_df.head()
melted_df = pd.melt(stats_df, id_vars=['Name', 'Type 1', 'Type 2'], var_name='Stats')
melted_df.head()
# Swarmplot with melted_df
_ = sns.swarmplot(data=melted_df, x='Stats', y='value', hue='Type 1')
###Output
_____no_output_____
###Markdown
Next, we are going to apply some tweaks to make our plot more readable:
###Code
# 1. Enlarge the plot
plt.figure(figsize=(10, 6))
sns.swarmplot(data=melted_df,
x='Stats',
y='value',
hue='Type 1', # 2. Separate point by hue
palette=pkmn_type_colors) # 3. Use Pokémon pallete
# 4. Adjust the y-axis
plt.ylim(0, 260)
# 5. Place the legend to the right
_ = plt.legend(bbox_to_anchor=(1, 1), loc='upper left')
###Output
_____no_output_____
###Markdown
Step 10: Pokédex (mini-gallery) 10.1 - Heatmap
###Code
# Calculate the correlation matrix
corr = stats_df.corr()
# Heatmap
sns.set_style('whitegrid')
_ = sns.heatmap(corr)
###Output
_____no_output_____
###Markdown
10.2 Histograms
###Code
# Distribution plot (a.k.a Histogram)
_ = sns.distplot(df.Attack)
###Output
C:\ProgramData\Anaconda3\lib\site-packages\matplotlib\axes\_axes.py:6462: UserWarning: The 'normed' kwarg is deprecated, and has been replaced by the 'density' kwarg.
warnings.warn("The 'normed' kwarg is deprecated, and has been "
###Markdown
10.3 Bar Plot
###Code
# Count plot (a.k.a Bar Plot)
_ = plt.figure(figsize=(10, 6))
_ = sns.countplot(data=df, x='Type 1', palette=pkmn_type_colors)
# Rotate x-labels
_ = plt.xticks(rotation=-45)
###Output
_____no_output_____
###Markdown
10.4 Factor plotFactor plots make it easy separate plots by categorical values
###Code
# Factor plot
g = sns.factorplot(data=df,
x='Type 1',
y='Attack',
hue='Stage', # color by stage
col='Stage', # Separate plot by stage
kind='swarm') # Swarmplot
# Rotate x-axis labels
_ = g.set_xticklabels(rotation=-45)
# Doesn't work because only rotates last plot
# plt.xticks(rotation=-45)
###Output
_____no_output_____
###Markdown
10.5 Density plot
###Code
# Density plot
plt.figure(figsize=(8, 8))
_ = sns.kdeplot(df['Attack'], df['Defense'])
###Output
_____no_output_____
###Markdown
10.6: Joint Distribution plot
###Code
# Joint Distribution plot
sns.jointplot(data=df, x='Attack', y='Defense')
###Output
C:\ProgramData\Anaconda3\lib\site-packages\matplotlib\axes\_axes.py:6462: UserWarning: The 'normed' kwarg is deprecated, and has been replaced by the 'density' kwarg.
warnings.warn("The 'normed' kwarg is deprecated, and has been "
C:\ProgramData\Anaconda3\lib\site-packages\matplotlib\axes\_axes.py:6462: UserWarning: The 'normed' kwarg is deprecated, and has been replaced by the 'density' kwarg.
warnings.warn("The 'normed' kwarg is deprecated, and has been "
|
FinMath/Models and Pricing of Financial Derivatives/Pro/Pro_Volatility Smile_final.ipynb | ###Markdown
$$c = S_0\mathcal N(d_1) - Ke^{-rT}\mathcal N(d_2)\\d_{1,2} = \frac{1}{\sigma\sqrt{T}}\left( \log\left(\frac{S_0}{K}\right) + \left(r\pm\frac{1}{2}\sigma^2\right)T \right)\\{\large\nu} = \frac{\partial c}{\partial \sigma} = S_0 \sqrt T \mathcal N'(d_1)>0$$
###Code
import math
import numpy as np
import pandas as pd
import datetime as dt
import scipy.stats as scs
import matplotlib.pyplot as plt
import sys
import time
# data available at Tonghuashun
# data retrived at 2018-06-19
# 上证50ETF购9月
raw_data = pd.read_csv('rrawData.csv')
raw_data
# calculate T
end_date = dt.date(2018,9,26)
start_date = dt.date(2018,6,19)
T = start_date - end_date
T = -T.days/365
# S_0, the close price of 上证50ETF, available at Tonghuashun
S_0 = 2.612
# r, the interest rate of 3-year bond, available at
# http://www.cmbchina.com/CmbWebPubInfo/SaveBondInfo.aspx?chnl=savebond&keyword=&page=6
r = 0.04
print('T (in years):\t\t',T)
print('S_0 (initial price):\t',S_0)
print('r (interest rate):\t',r)
call_price = raw_data['currentPrice']
K = raw_data['K']
def get_c(t,S_t,T,K,r,sigma):
d_1 = (math.log(S_t/K) + (r+0.5*sigma**2)*(T-t))/(sigma*math.sqrt(T-t))
d_2 = (math.log(S_t/K) + (r-0.5*sigma**2)*(T-t))/(sigma*math.sqrt(T-t))
c = S_t*scs.norm.cdf(d_1)-K*math.exp(-r*T)*scs.norm.cdf(d_2)
return c
imp_vol = []
delta = 0.000001
for i in range(len(raw_data)):
sigma_up = 4.0001
sigma_down = 0.0001
while True:
sigma_mid = (sigma_up + sigma_down)/2
c_mid = c_down = get_c(0,S_0,T,K[i],r,sigma_mid)
c_price = call_price[i]
if c_price <= c_mid:
sigma_up = sigma_mid
else:
sigma_down = sigma_mid
d = c_mid - c_price
if abs(d)<delta:
imp_vol.append(sigma_mid)
print('impVol at K=:'+ str(K[i]) + '\t' ,sigma_mid)
break
fig = plt.figure()
fig.set_size_inches(10,5)
ax = fig.add_subplot(111)
A = ax.plot(K,imp_vol,label='calculted')
B = ax.plot(K,raw_data['impVola'],label='given')
ax.legend()
ax.set_xlabel('K')
ax.set_ylabel('sigma_vol')
###Output
_____no_output_____ |
Chapter13/74_Image_super_resolution_using_SRGAN.ipynb | ###Markdown
###Code
import os
if not os.path.exists('srgan.pth.tar'):
!pip install -q torch_snippets
!wget -q https://raw.githubusercontent.com/sizhky/a-PyTorch-Tutorial-to-Super-Resolution/master/models.py -O models.py
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from google.colab import auth
from oauth2client.client import GoogleCredentials
auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)
downloaded = drive.CreateFile({'id': '1_PJ1Uimbr0xrPjE8U3Q_bG7XycGgsbVo'})
downloaded.GetContentFile('srgan.pth.tar')
from torch_snippets import *
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model = torch.load('srgan.pth.tar', map_location='cpu')['generator'].to(device)
model.eval()
!wget https://www.dropbox.com/s/nmzwu68nrl9j0lf/Hema6.JPG
preprocess = T.Compose([
T.ToTensor(),
T.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225]),
T.Lambda(lambda x: x.to(device))
])
postprocess = T.Compose([
T.Lambda(lambda x: (x.cpu().detach()+1)/2),
T.ToPILImage()
])
image = readPIL('Hema6.JPG')
image.size
# (260,181)
image = image.resize((130,90))
im = preprocess(image)
sr = model(im[None])[0]
sr = postprocess(sr)
subplots([image, sr], nc=2, figsize=(10,10), titles=['Original image','High resolution image'])
###Output
_____no_output_____ |
scripts/python_scripts/Ophio_cflo_orthologs.ipynb | ###Markdown
Find orthologThis script is witten to bla bla bla.. something orthologsMajor clock genes in N. crassa with Ophio_kim orthologs* frq = Ophio5|6064* wc-1 = Ophio5|4975* wc-2 = Ophio5|889* vvd = Ophio5|6595* nik-2 = Ophio5|6786* dcc-1 = Ophio5|7010* luxQ = Ophio5|7293* phy-1 = Ophio5|4324
###Code
### Housekeeping
import pandas as pd
import sqlite3
import numpy as np
path = '/Users/roos_brouns/Dropbox/Ant-fungus/02_scripts/Git_Das_folder2/Das_et_al_2022a'
species = 'ophio_cflo'
# load in file with ophio cflo and kim orthologs
orthos = pd.read_csv(f'{path}/data/will_et_al_2020/FullBlast_EC05_RNAseq_orignal_copy_26Aug19.csv')
### Get the orthologs of clock genes in ophio_cflo
orthos_genes = 'Ophio5|6064 Ophio5|4975 Ophio5|889 Ophio5|6595 Ophio5|6786 Ophio5|7010 Ophio5|7293 Ophio5|4324'
list_w_orthos = orthos_genes.split()
df = orthos.loc[orthos['sc16a_gene'] == list_w_orthos[0]]
for no in range(1,len(list_w_orthos)):
df_line = orthos.loc[orthos['sc16a_gene'] == list_w_orthos[no]]
# print(df_line)
df = df.append(df_line, ignore_index=True)
kim_cflo = df[['arb2_gene','sc16a_gene']].drop_duplicates()
kim_cflo.reset_index(inplace=True, drop=True)
list(kim_cflo['arb2_gene'])
### Are these orthologs also rhythmic?
species='ophio_cflo'
# put orthologs of clock genes in a list
cflo_orhts = df['arb2_gene']
cflo_orhts = list(cflo_orhts)
cflo_orhts[0]
# load in data with info if gene is rhytmic or not
#
# load in data with info if gene is rhytmic or not
#
# Load in the whole csv
data = pd.read_csv(f'{path}/data/{species}_TC6_data.csv')
# select the expression values
# data.columndata.column's
rhytmic_genes = data.filter(['gene_ID_ncbi','gene_ID_robin', 'rhythmic_24h', 'GammaP_24h'])
## make df with the genes that are rhytmic
rhy_df = rhytmic_genes.loc[rhytmic_genes['gene_ID_robin'] == cflo_orhts[0]]
for i in range(1,len(cflo_orhts)):
rhy_df2 = rhytmic_genes.loc[rhytmic_genes['gene_ID_robin'] == cflo_orhts[i]]
rhy_df = rhy_df.append(rhy_df2, ignore_index = True)
rhy_df
### Are these orthologs also rhythmic in Beau?
species='beau'
# put orthologs of clock genes in a list
orhts = ['BBA_01528', 'BBA_10271', 'BBA_01403', 'BBA_02876', 'BBA_08737', 'BBA_00328', 'BBA_07925', 'BBA_02816']
# load in data with info if gene is rhytmic or not
#
# Load in the whole csv
data = pd.read_csv(f'{path}/data/{species}_TC6_data.csv')
# select the expression values
# data.columndata.column's
b_rhytmic_genes = data.filter(['gene_ID_ncbi', 'rhythmic_24h', 'GammaP_24h'])
## make df with the genes that are rhytmic
b_rhy_df = b_rhytmic_genes.loc[b_rhytmic_genes['gene_ID_ncbi'] == orhts[0]]
for i in range(1,len(orhts)):
b_rhy_df2 = b_rhytmic_genes.loc[b_rhytmic_genes['gene_ID_ncbi'] == orhts[i]]
b_rhy_df = b_rhy_df.append(b_rhy_df2, ignore_index = True)
b_rhydf
### Rename columns and make df pretty
rhy_df.drop_duplicates(inplace=True)
rhy_df.reset_index(inplace=True, drop=True)
rhy_df.columns = ['ophio_cflo_ncbi','ophio_cflo_robin_ID', 'rhythmic_cflo', 'GammaP_cflo']
# Add ophio_kim orthologs
n_df = pd.concat([rhy_df, kim_cflo], join='outer', axis=1)
# Ad beau orthologs
n_df = pd.concat([n_df, b_rhy_df], join='outer', axis=1)
# Ad N crassa orthologs
n_df['N_crassa_ortho']= ['frq','wc-1','wc-2','vvd','nik-2','dcc-1','luxQ','phy-1']
## Rename columns and make df pretty
n_df.drop('arb2_gene', axis=1, inplace=True)
n_df.columns = ['ophio_cflo_ncbi','ophio_cflo_robin_ID', 'rhythmic_cflo', 'GammaP_cflo', 'ophio_kim_ortho', 'beau_ortho','rhythmic_beau', 'GammaP_beau', 'N_crassa_ortho']
# round decimals
n_df.round(3)
# Ad N crassa orthologs
n_df['N_crassa_ortho']= ['frq','wc-1','wc-2','vvd','nik-2','dcc-1','luxQ','phy-1']
n_df = n_df.round(2)
n_df
n_df.to_excel('orthos_table.xlsx')
n_df.columns = ['ophio_cflo_ncbi','ophio_cflo_robin_ID', 'rhythmic_cflo', 'GammaP_cflo', 'ophio_kim_ortho']
n_df['Beau_ortho']=['BBA_01528', 'BBA_10271', 'BBA_01403', 'BBA_02876', 'BBA_08737', 'BBA_00328', 'BBA_07925', 'BBA_02816']
n_df.round(2)
###Output
_____no_output_____
###Markdown
Are my genes rhythmic?
###Code
### CBD genes in Beauveria
# B. bas genes
ID_input = ['BBA_04942', 'BBA_07544', 'BBA_00300', 'BBA_06126']
# = ['fluG','brlA','abaA', 'wetA']
# Ophio_cflo orthos
# ???
### Rhythmic in Ophio_kim
# Ophio_cflo orthos
ID_input =
#
name_input =
# Beau orthos
# ???
### Are these orthologs also rhythmic in Beau?
species='beau'
gene_ID = 'gene_ID_ncbi'
# input list of genes you want to test
orhts = ID_input
# load in data with info if gene is rhytmic or not
#
# Load in the whole csv
data = pd.read_csv(f'{path}/data/{species}_TC6_data.csv')
# select the expression values
# data.columndata.column's
t_rhytmic_genes = data.filter([gene_ID, 'rhythmic_24h', 'GammaP_24h'])
## make df with the genes that are rhytmic
t_rhy_df = t_rhytmic_genes.loc[t_rhytmic_genes[gene_ID] == orhts[0]]
for i in range(1,len(orhts)):
t_rhy_df2 = t_rhytmic_genes.loc[t_rhytmic_genes[gene_ID] == orhts[i]]
t_rhy_df = t_rhy_df.append(t_rhy_df2, ignore_index = True)
# ad gene names
# t_rhy_df['gene_name']= name_input
t_rhy_df
###Output
_____no_output_____ |
Diabetes Dataset/Improvements/Features Improvements with Mean/10_Pregnancies, Glucose, BloodPressure, SkinThickness, BMI and Age.ipynb | ###Markdown
We can infer that even though we do not have NaN values, there are a lot of wrong values present in our data, like:- Glucose Level cannot be above 150 or below 70- Blood Pressure cannot be below 55- Skin thickness cannot be 0- BMI index cannot be 0
###Code
# Data Cleaning
df_improv = diabetesDF.copy()
# Taking mean of valid data in the dataset
mean_Glucose = diabetesDF.loc[(diabetesDF.Glucose > 70) & (diabetesDF.Glucose < 150), 'Glucose'].mean()
mean_BloodPressure = diabetesDF.loc[diabetesDF.BloodPressure > 55, 'BloodPressure'].mean()
mean_SkinThickness = diabetesDF.loc[diabetesDF.SkinThickness != 0, 'SkinThickness'].mean()
mean_Insulin = diabetesDF.loc[(diabetesDF.Insulin > 35) & (diabetesDF.Insulin < 150), 'Insulin'].mean()
mean_BMI = diabetesDF.loc[diabetesDF.BMI != 0, 'BMI'].mean()
df_improv.head()
df_improv.describe()
# Replacing all the wrong values
# df_improv.loc[(diabetesDF.Glucose < 70) | (df_improv.Glucose > 150), 'Glucose'] = np.ceil(np.random.rand() + mean_Glucose)
df_improv.Glucose.replace(0, np.ceil(np.random.rand() + mean_Glucose), inplace = True)
# df_improv.loc[df_improv.BloodPressure < 55, 'BloodPressure'] = np.ceil(np.random.rand() + mean_BloodPressure)
df_improv.BloodPressure.replace(0, np.ceil(np.random.rand() + mean_BloodPressure), inplace = True)
df_improv.SkinThickness.replace(0, np.ceil(np.random.rand() + mean_SkinThickness), inplace = True)
df_improv.Insulin.replace(0, np.ceil(np.random.rand() + mean_Insulin), inplace = True)
df_improv.BMI.replace(0, np.ceil(np.random.rand() + mean_BMI), inplace = True)
df_improv.head()
df_improv.describe()
df_improv.drop([ 'Insulin', 'DiabetesPedigreeFunction'], axis=1, inplace=True)
df_improv.head()
# Total 768 patients record
# Using 650 data for training
# Using 100 data for testing
# Using 18 data for validation
dfTrain = df_improv[:650]
dfTest = df_improv[650:750]
dfCheck = df_improv[750:]
# Separating label and features and converting to numpy array to feed into our model
trainLabel = np.asarray(dfTrain['Outcome'])
trainData = np.asarray(dfTrain.drop('Outcome',1))
testLabel = np.asarray(dfTest['Outcome'])
testData = np.asarray(dfTest.drop('Outcome',1))
# Normalize the data
means = np.mean(trainData, axis=0)
stds = np.std(trainData, axis=0)
trainData = (trainData - means)/stds
testData = (testData - means)/stds
# models target t as sigmoid(w0 + w1*x1 + w2*x2 + ... + wd*xd)
diabetesCheck = LogisticRegression()
diabetesCheck.fit(trainData,trainLabel)
accuracy = diabetesCheck.score(testData,testLabel)
print("accuracy = ",accuracy * 100,"%")
###Output
accuracy = 78.0 %
|
Day_011_Regular_Expression_HW.ipynb | ###Markdown
正規表達式練習 在網路爬蟲當中,正規表達式常常用來過濾以及搜尋特定的pattern字串。 今天要來練習過濾IP address,以及URL。
###Code
import re #載入re模組
# 定義一個函數,用來測試是否能匹配正規表達式
def RegexMatchingTest(regex, input_text):
#將正規表達式轉換成pattern
pattern = re.compile(regex)
print('ppppp',pattern)
# 使轉換後的pattern,來測試是否匹配
result = re.search(pattern, input_text)
if result:
# 匹配完的結果會儲存在group()的屬性中,我們可以把匹配的結果列印出來
print("Matched: %s" % (result.group()))
if result.lastindex is not None:
# group(0)代表整個字串,group(1)、group(2)...代表分組中,匹配的內容
for i in range(0, result.lastindex+1):
print(" group(%d): %s" % (i, result.group(i)))
else:
print("Not matched.")
###Output
_____no_output_____
###Markdown
用正規表達式過濾IP address。 一個合法的網路IP address,其格式為:X.X.X.X, 其中X是0~255的數字。我們可以用一個regex,來表達IP address的內容。
###Code
test_string = "Google IP address is 216.58.200.227"
# 過濾IP address的regex pattern
regex = '(\d{1,3}).(\d{1,3}).(\d{1,3}).(\d{1,3})'
RegexMatchingTest(regex, test_string)
###Output
ppppp re.compile('(\\d{1,3}).(\\d{1,3}).(\\d{1,3}).(\\d{1,3})')
Matched: 216.58.200.227
group(0): 216.58.200.227
group(1): 216
group(2): 58
group(3): 200
group(4): 227
###Markdown
以上是最簡單的regex寫法。但深入思考,上面的regex也能夠匹配444.555.666.777這種無效的IP address。 我們必須再雕琢regex,只接受[0~255].[0~255].[0~255].[0~255]這種合法的IP address,而過濾不合法的IP。
###Code
'''
Your code here.
hint: 把IP可能出現的數字範圍,分開來思考
1. 000 ~ 199
2. 200 ~ 249
3. 250 ~ 255
([01]\d\d)|(2[01234]\d)|(25[012345])
'''
regex = '([01]?\d?\d|2[0-4]\d|25[0-5])\.([01]?\d?\d|2[0-4]\d|25[0-5])\.([01]?\d?\d|2[0-4]\d|25[0-5])\.([01]?\d?\d|2[0-4]\d|25[0-5])'
test_string1 = "Test IP 216.58.200.227"
RegexMatchingTest(regex, test_string1) #測試表達式是否會匹配此合法IP
test_string2 = "Test IP 999.888.777.666"
RegexMatchingTest(regex, test_string2) #測試表達式是否會匹配此不合法IP
###Output
ppppp re.compile('([01]?\\d?\\d|2[0-4]\\d|25[0-5])\\.([01]?\\d?\\d|2[0-4]\\d|25[0-5])\\.([01]?\\d?\\d|2[0-4]\\d|25[0-5])\\.([01]?\\d?\\d|2[0-4]\\d|25[0-5])')
Matched: 216.58.200.22
group(0): 216.58.200.22
group(1): 216
group(2): 58
group(3): 200
group(4): 22
ppppp re.compile('([01]?\\d?\\d|2[0-4]\\d|25[0-5])\\.([01]?\\d?\\d|2[0-4]\\d|25[0-5])\\.([01]?\\d?\\d|2[0-4]\\d|25[0-5])\\.([01]?\\d?\\d|2[0-4]\\d|25[0-5])')
Not matched.
###Markdown
用正規表達式過濾URL。 在網頁爬蟲中,常常會有外部連結的A tag,例如: 時刻表 我們要把"href="之後的URL擷取出來,用來做後續處理。
###Code
html_a_tag = "<a href=https://movies.yahoo.com.tw/movietime_result.html/id=9467> 時刻表 </a>"
'''
Your code here.
過濾URL的regex pattern
'''
regex = '((http|https|ftp):\/\/)([a-z0-9]\.|[a-z0-9][-a-z0-9]*[a-z0-9]\.)+(tw|cn|com|edu|gov|net|org|biz|info|name)[-a-z0-9_:@&?=+,.!\/~*\'%$]*'
RegexMatchingTest(regex, html_a_tag)
###Output
ppppp re.compile("((http|https|ftp):\\/\\/)([a-z0-9]\\.|[a-z0-9][-a-z0-9]*[a-z0-9]\\.)+(tw|cn|com|edu|gov|net|org|biz|info|name)[-a-z0-9_:@&?=+,.!\\/~*'%$]*")
Matched: https://movies.yahoo.com.tw/movietime_result.html/id=9467
group(0): https://movies.yahoo.com.tw/movietime_result.html/id=9467
group(1): https://
group(2): https
group(3): com.
group(4): tw
|
Deep Learning Specialisation/Sequence Models/Building a Recurrent Neural Network Step by Step.ipynb | ###Markdown
Building your Recurrent Neural Network - Step by StepWelcome to Course 5's first assignment! In this assignment, you will implement your first Recurrent Neural Network in numpy.Recurrent Neural Networks (RNN) are very effective for Natural Language Processing and other sequence tasks because they have "memory". They can read inputs $x^{\langle t \rangle}$ (such as words) one at a time, and remember some information/context through the hidden layer activations that get passed from one time-step to the next. This allows a uni-directional RNN to take information from the past to process later inputs. A bidirection RNN can take context from both the past and the future. **Notation**:- Superscript $[l]$ denotes an object associated with the $l^{th}$ layer. - Example: $a^{[4]}$ is the $4^{th}$ layer activation. $W^{[5]}$ and $b^{[5]}$ are the $5^{th}$ layer parameters.- Superscript $(i)$ denotes an object associated with the $i^{th}$ example. - Example: $x^{(i)}$ is the $i^{th}$ training example input.- Superscript $\langle t \rangle$ denotes an object at the $t^{th}$ time-step. - Example: $x^{\langle t \rangle}$ is the input x at the $t^{th}$ time-step. $x^{(i)\langle t \rangle}$ is the input at the $t^{th}$ timestep of example $i$. - Lowerscript $i$ denotes the $i^{th}$ entry of a vector. - Example: $a^{[l]}_i$ denotes the $i^{th}$ entry of the activations in layer $l$.We assume that you are already familiar with `numpy` and/or have completed the previous courses of the specialization. Let's get started! Let's first import all the packages that you will need during this assignment.
###Code
import numpy as np
from rnn_utils import *
###Output
_____no_output_____
###Markdown
1 - Forward propagation for the basic Recurrent Neural NetworkLater this week, you will generate music using an RNN. The basic RNN that you will implement has the structure below. In this example, $T_x = T_y$. **Figure 1**: Basic RNN model Here's how you can implement an RNN: **Steps**:1. Implement the calculations needed for one time-step of the RNN.2. Implement a loop over $T_x$ time-steps in order to process all the inputs, one at a time. Let's go! 1.1 - RNN cellA Recurrent neural network can be seen as the repetition of a single cell. You are first going to implement the computations for a single time-step. The following figure describes the operations for a single time-step of an RNN cell. **Figure 2**: Basic RNN cell. Takes as input $x^{\langle t \rangle}$ (current input) and $a^{\langle t - 1\rangle}$ (previous hidden state containing information from the past), and outputs $a^{\langle t \rangle}$ which is given to the next RNN cell and also used to predict $y^{\langle t \rangle}$ **Exercise**: Implement the RNN-cell described in Figure (2).**Instructions**:1. Compute the hidden state with tanh activation: $a^{\langle t \rangle} = \tanh(W_{aa} a^{\langle t-1 \rangle} + W_{ax} x^{\langle t \rangle} + b_a)$.2. Using your new hidden state $a^{\langle t \rangle}$, compute the prediction $\hat{y}^{\langle t \rangle} = softmax(W_{ya} a^{\langle t \rangle} + b_y)$. We provided you a function: `softmax`.3. Store $(a^{\langle t \rangle}, a^{\langle t-1 \rangle}, x^{\langle t \rangle}, parameters)$ in cache4. Return $a^{\langle t \rangle}$ , $y^{\langle t \rangle}$ and cacheWe will vectorize over $m$ examples. Thus, $x^{\langle t \rangle}$ will have dimension $(n_x,m)$, and $a^{\langle t \rangle}$ will have dimension $(n_a,m)$.
###Code
# GRADED FUNCTION: rnn_cell_forward
def rnn_cell_forward(xt, a_prev, parameters):
"""
Implements a single forward step of the RNN-cell as described in Figure (2)
Arguments:
xt -- your input data at timestep "t", numpy array of shape (n_x, m).
a_prev -- Hidden state at timestep "t-1", numpy array of shape (n_a, m)
parameters -- python dictionary containing:
Wax -- Weight matrix multiplying the input, numpy array of shape (n_a, n_x)
Waa -- Weight matrix multiplying the hidden state, numpy array of shape (n_a, n_a)
Wya -- Weight matrix relating the hidden-state to the output, numpy array of shape (n_y, n_a)
ba -- Bias, numpy array of shape (n_a, 1)
by -- Bias relating the hidden-state to the output, numpy array of shape (n_y, 1)
Returns:
a_next -- next hidden state, of shape (n_a, m)
yt_pred -- prediction at timestep "t", numpy array of shape (n_y, m)
cache -- tuple of values needed for the backward pass, contains (a_next, a_prev, xt, parameters)
"""
# Retrieve parameters from "parameters"
Wax = parameters["Wax"]
Waa = parameters["Waa"]
Wya = parameters["Wya"]
ba = parameters["ba"]
by = parameters["by"]
### START CODE HERE ### (≈2 lines)
# compute next activation state using the formula given above
a_next = np.tanh(np.dot(Waa,a_prev) + np.dot(Wax,xt) + ba)
# compute output of the current cell using the formula given above
yt_pred = softmax(np.dot(Wya,a_next) + by)
### END CODE HERE ###
# store values you need for backward propagation in cache
cache = (a_next, a_prev, xt, parameters)
return a_next, yt_pred, cache
np.random.seed(1)
xt = np.random.randn(3,10)
a_prev = np.random.randn(5,10)
Waa = np.random.randn(5,5)
Wax = np.random.randn(5,3)
Wya = np.random.randn(2,5)
ba = np.random.randn(5,1)
by = np.random.randn(2,1)
parameters = {"Waa": Waa, "Wax": Wax, "Wya": Wya, "ba": ba, "by": by}
a_next, yt_pred, cache = rnn_cell_forward(xt, a_prev, parameters)
print("a_next[4] = ", a_next[4])
print("a_next.shape = ", a_next.shape)
print("yt_pred[1] =", yt_pred[1])
print("yt_pred.shape = ", yt_pred.shape)
###Output
a_next[4] = [ 0.59584544 0.18141802 0.61311866 0.99808218 0.85016201 0.99980978
-0.18887155 0.99815551 0.6531151 0.82872037]
a_next.shape = (5, 10)
yt_pred[1] = [ 0.9888161 0.01682021 0.21140899 0.36817467 0.98988387 0.88945212
0.36920224 0.9966312 0.9982559 0.17746526]
yt_pred.shape = (2, 10)
###Markdown
**Expected Output**: **a_next[4]**: [ 0.59584544 0.18141802 0.61311866 0.99808218 0.85016201 0.99980978 -0.18887155 0.99815551 0.6531151 0.82872037] **a_next.shape**: (5, 10) **yt[1]**: [ 0.9888161 0.01682021 0.21140899 0.36817467 0.98988387 0.88945212 0.36920224 0.9966312 0.9982559 0.17746526] **yt.shape**: (2, 10) 1.2 - RNN forward pass You can see an RNN as the repetition of the cell you've just built. If your input sequence of data is carried over 10 time steps, then you will copy the RNN cell 10 times. Each cell takes as input the hidden state from the previous cell ($a^{\langle t-1 \rangle}$) and the current time-step's input data ($x^{\langle t \rangle}$). It outputs a hidden state ($a^{\langle t \rangle}$) and a prediction ($y^{\langle t \rangle}$) for this time-step. **Figure 3**: Basic RNN. The input sequence $x = (x^{\langle 1 \rangle}, x^{\langle 2 \rangle}, ..., x^{\langle T_x \rangle})$ is carried over $T_x$ time steps. The network outputs $y = (y^{\langle 1 \rangle}, y^{\langle 2 \rangle}, ..., y^{\langle T_x \rangle})$. **Exercise**: Code the forward propagation of the RNN described in Figure (3).**Instructions**:1. Create a vector of zeros ($a$) that will store all the hidden states computed by the RNN.2. Initialize the "next" hidden state as $a_0$ (initial hidden state).3. Start looping over each time step, your incremental index is $t$ : - Update the "next" hidden state and the cache by running `rnn_cell_forward` - Store the "next" hidden state in $a$ ($t^{th}$ position) - Store the prediction in y - Add the cache to the list of caches4. Return $a$, $y$ and caches
###Code
# GRADED FUNCTION: rnn_forward
def rnn_forward(x, a0, parameters):
"""
Implement the forward propagation of the recurrent neural network described in Figure (3).
Arguments:
x -- Input data for every time-step, of shape (n_x, m, T_x).
a0 -- Initial hidden state, of shape (n_a, m)
parameters -- python dictionary containing:
Waa -- Weight matrix multiplying the hidden state, numpy array of shape (n_a, n_a)
Wax -- Weight matrix multiplying the input, numpy array of shape (n_a, n_x)
Wya -- Weight matrix relating the hidden-state to the output, numpy array of shape (n_y, n_a)
ba -- Bias numpy array of shape (n_a, 1)
by -- Bias relating the hidden-state to the output, numpy array of shape (n_y, 1)
Returns:
a -- Hidden states for every time-step, numpy array of shape (n_a, m, T_x)
y_pred -- Predictions for every time-step, numpy array of shape (n_y, m, T_x)
caches -- tuple of values needed for the backward pass, contains (list of caches, x)
"""
# Initialize "caches" which will contain the list of all caches
caches = []
# Retrieve dimensions from shapes of x and parameters["Wya"]
n_x, m, T_x = x.shape
n_y, n_a = parameters["Wya"].shape
### START CODE HERE ###
# initialize "a" and "y" with zeros (≈2 lines)
a = np.zeros((n_a,m,T_x))
y_pred = np.zeros((n_y,m,T_x))
# Initialize a_next (≈1 line)
a_next = a0
# loop over all time-steps
for t in range(T_x):
# Update next hidden state, compute the prediction, get the cache (≈1 line)
a_next, yt_pred, cache = rnn_cell_forward(x[:,:,t],a_next,parameters)
# Save the value of the new "next" hidden state in a (≈1 line)
a[:,:,t] = a_next
# Save the value of the prediction in y (≈1 line)
y_pred[:,:,t] = yt_pred
# Append "cache" to "caches" (≈1 line)
caches.append(cache)
### END CODE HERE ###
# store values needed for backward propagation in cache
caches = (caches, x)
return a, y_pred, caches
np.random.seed(1)
x = np.random.randn(3,10,4)
a0 = np.random.randn(5,10)
Waa = np.random.randn(5,5)
Wax = np.random.randn(5,3)
Wya = np.random.randn(2,5)
ba = np.random.randn(5,1)
by = np.random.randn(2,1)
parameters = {"Waa": Waa, "Wax": Wax, "Wya": Wya, "ba": ba, "by": by}
a, y_pred, caches = rnn_forward(x, a0, parameters)
print("a[4][1] = ", a[4][1])
print("a.shape = ", a.shape)
print("y_pred[1][3] =", y_pred[1][3])
print("y_pred.shape = ", y_pred.shape)
print("caches[1][1][3] =", caches[1][1][3])
print("len(caches) = ", len(caches))
###Output
a[4][1] = [-0.99999375 0.77911235 -0.99861469 -0.99833267]
a.shape = (5, 10, 4)
y_pred[1][3] = [ 0.79560373 0.86224861 0.11118257 0.81515947]
y_pred.shape = (2, 10, 4)
caches[1][1][3] = [-1.1425182 -0.34934272 -0.20889423 0.58662319]
len(caches) = 2
###Markdown
**Expected Output**: **a[4][1]**: [-0.99999375 0.77911235 -0.99861469 -0.99833267] **a.shape**: (5, 10, 4) **y[1][3]**: [ 0.79560373 0.86224861 0.11118257 0.81515947] **y.shape**: (2, 10, 4) **cache[1][1][3]**: [-1.1425182 -0.34934272 -0.20889423 0.58662319] **len(cache)**: 2 Congratulations! You've successfully built the forward propagation of a recurrent neural network from scratch. This will work well enough for some applications, but it suffers from vanishing gradient problems. So it works best when each output $y^{\langle t \rangle}$ can be estimated using mainly "local" context (meaning information from inputs $x^{\langle t' \rangle}$ where $t'$ is not too far from $t$). In the next part, you will build a more complex LSTM model, which is better at addressing vanishing gradients. The LSTM will be better able to remember a piece of information and keep it saved for many timesteps. 2 - Long Short-Term Memory (LSTM) networkThis following figure shows the operations of an LSTM-cell. **Figure 4**: LSTM-cell. This tracks and updates a "cell state" or memory variable $c^{\langle t \rangle}$ at every time-step, which can be different from $a^{\langle t \rangle}$. Similar to the RNN example above, you will start by implementing the LSTM cell for a single time-step. Then you can iteratively call it from inside a for-loop to have it process an input with $T_x$ time-steps. About the gates - Forget gateFor the sake of this illustration, lets assume we are reading words in a piece of text, and want use an LSTM to keep track of grammatical structures, such as whether the subject is singular or plural. If the subject changes from a singular word to a plural word, we need to find a way to get rid of our previously stored memory value of the singular/plural state. In an LSTM, the forget gate lets us do this: $$\Gamma_f^{\langle t \rangle} = \sigma(W_f[a^{\langle t-1 \rangle}, x^{\langle t \rangle}] + b_f)\tag{1} $$Here, $W_f$ are weights that govern the forget gate's behavior. We concatenate $[a^{\langle t-1 \rangle}, x^{\langle t \rangle}]$ and multiply by $W_f$. The equation above results in a vector $\Gamma_f^{\langle t \rangle}$ with values between 0 and 1. This forget gate vector will be multiplied element-wise by the previous cell state $c^{\langle t-1 \rangle}$. So if one of the values of $\Gamma_f^{\langle t \rangle}$ is 0 (or close to 0) then it means that the LSTM should remove that piece of information (e.g. the singular subject) in the corresponding component of $c^{\langle t-1 \rangle}$. If one of the values is 1, then it will keep the information. - Update gateOnce we forget that the subject being discussed is singular, we need to find a way to update it to reflect that the new subject is now plural. Here is the formulat for the update gate: $$\Gamma_u^{\langle t \rangle} = \sigma(W_u[a^{\langle t-1 \rangle}, x^{\{t\}}] + b_u)\tag{2} $$ Similar to the forget gate, here $\Gamma_u^{\langle t \rangle}$ is again a vector of values between 0 and 1. This will be multiplied element-wise with $\tilde{c}^{\langle t \rangle}$, in order to compute $c^{\langle t \rangle}$. - Updating the cell To update the new subject we need to create a new vector of numbers that we can add to our previous cell state. The equation we use is: $$ \tilde{c}^{\langle t \rangle} = \tanh(W_c[a^{\langle t-1 \rangle}, x^{\langle t \rangle}] + b_c)\tag{3} $$Finally, the new cell state is: $$ c^{\langle t \rangle} = \Gamma_f^{\langle t \rangle}* c^{\langle t-1 \rangle} + \Gamma_u^{\langle t \rangle} *\tilde{c}^{\langle t \rangle} \tag{4} $$ - Output gateTo decide which outputs we will use, we will use the following two formulas: $$ \Gamma_o^{\langle t \rangle}= \sigma(W_o[a^{\langle t-1 \rangle}, x^{\langle t \rangle}] + b_o)\tag{5}$$ $$ a^{\langle t \rangle} = \Gamma_o^{\langle t \rangle}* \tanh(c^{\langle t \rangle})\tag{6} $$Where in equation 5 you decide what to output using a sigmoid function and in equation 6 you multiply that by the $\tanh$ of the previous state. 2.1 - LSTM cell**Exercise**: Implement the LSTM cell described in the Figure (3).**Instructions**:1. Concatenate $a^{\langle t-1 \rangle}$ and $x^{\langle t \rangle}$ in a single matrix: $concat = \begin{bmatrix} a^{\langle t-1 \rangle} \\ x^{\langle t \rangle} \end{bmatrix}$2. Compute all the formulas 1-6. You can use `sigmoid()` (provided) and `np.tanh()`.3. Compute the prediction $y^{\langle t \rangle}$. You can use `softmax()` (provided).
###Code
# GRADED FUNCTION: lstm_cell_forward
def lstm_cell_forward(xt, a_prev, c_prev, parameters):
"""
Implement a single forward step of the LSTM-cell as described in Figure (4)
Arguments:
xt -- your input data at timestep "t", numpy array of shape (n_x, m).
a_prev -- Hidden state at timestep "t-1", numpy array of shape (n_a, m)
c_prev -- Memory state at timestep "t-1", numpy array of shape (n_a, m)
parameters -- python dictionary containing:
Wf -- Weight matrix of the forget gate, numpy array of shape (n_a, n_a + n_x)
bf -- Bias of the forget gate, numpy array of shape (n_a, 1)
Wi -- Weight matrix of the update gate, numpy array of shape (n_a, n_a + n_x)
bi -- Bias of the update gate, numpy array of shape (n_a, 1)
Wc -- Weight matrix of the first "tanh", numpy array of shape (n_a, n_a + n_x)
bc -- Bias of the first "tanh", numpy array of shape (n_a, 1)
Wo -- Weight matrix of the output gate, numpy array of shape (n_a, n_a + n_x)
bo -- Bias of the output gate, numpy array of shape (n_a, 1)
Wy -- Weight matrix relating the hidden-state to the output, numpy array of shape (n_y, n_a)
by -- Bias relating the hidden-state to the output, numpy array of shape (n_y, 1)
Returns:
a_next -- next hidden state, of shape (n_a, m)
c_next -- next memory state, of shape (n_a, m)
yt_pred -- prediction at timestep "t", numpy array of shape (n_y, m)
cache -- tuple of values needed for the backward pass, contains (a_next, c_next, a_prev, c_prev, xt, parameters)
Note: ft/it/ot stand for the forget/update/output gates, cct stands for the candidate value (c tilde),
c stands for the memory value
"""
# Retrieve parameters from "parameters"
Wf = parameters["Wf"]
bf = parameters["bf"]
Wi = parameters["Wi"]
bi = parameters["bi"]
Wc = parameters["Wc"]
bc = parameters["bc"]
Wo = parameters["Wo"]
bo = parameters["bo"]
Wy = parameters["Wy"]
by = parameters["by"]
# Retrieve dimensions from shapes of xt and Wy
n_x, m = xt.shape
n_y, n_a = Wy.shape
### START CODE HERE ###
# Concatenate a_prev and xt (≈3 lines)
concat = np.zeros(((n_a + n_x),m))
concat[: n_a, :] = a_prev
concat[n_a :, :] = xt
# Compute values for ft, it, cct, c_next, ot, a_next using the formulas given figure (4) (≈6 lines)
ft = sigmoid((np.dot(Wf,concat)) + bf)
it = sigmoid((np.dot(Wi,concat))+ bi)
cct = np.tanh((np.dot(Wc,concat)) + bc)
c_next = np.multiply(ft,c_prev) + np.multiply(it, cct)
ot = sigmoid((np.dot(Wo,concat)) + bo)
a_next = np.multiply(ot,np.tanh(c_next))
# Compute prediction of the LSTM cell (≈1 line)
yt_pred = softmax(np.dot(Wy, a_next) + by)
### END CODE HERE ###
# store values needed for backward propagation in cache
cache = (a_next, c_next, a_prev, c_prev, ft, it, cct, ot, xt, parameters)
return a_next, c_next, yt_pred, cache
np.random.seed(1)
xt = np.random.randn(3,10)
a_prev = np.random.randn(5,10)
c_prev = np.random.randn(5,10)
Wf = np.random.randn(5, 5+3)
bf = np.random.randn(5,1)
Wi = np.random.randn(5, 5+3)
bi = np.random.randn(5,1)
Wo = np.random.randn(5, 5+3)
bo = np.random.randn(5,1)
Wc = np.random.randn(5, 5+3)
bc = np.random.randn(5,1)
Wy = np.random.randn(2,5)
by = np.random.randn(2,1)
parameters = {"Wf": Wf, "Wi": Wi, "Wo": Wo, "Wc": Wc, "Wy": Wy, "bf": bf, "bi": bi, "bo": bo, "bc": bc, "by": by}
a_next, c_next, yt, cache = lstm_cell_forward(xt, a_prev, c_prev, parameters)
print("a_next[4] = ", a_next[4])
print("a_next.shape = ", c_next.shape)
print("c_next[2] = ", c_next[2])
print("c_next.shape = ", c_next.shape)
print("yt[1] =", yt[1])
print("yt.shape = ", yt.shape)
print("cache[1][3] =", cache[1][3])
print("len(cache) = ", len(cache))
###Output
a_next[4] = [-0.66408471 0.0036921 0.02088357 0.22834167 -0.85575339 0.00138482
0.76566531 0.34631421 -0.00215674 0.43827275]
a_next.shape = (5, 10)
c_next[2] = [ 0.63267805 1.00570849 0.35504474 0.20690913 -1.64566718 0.11832942
0.76449811 -0.0981561 -0.74348425 -0.26810932]
c_next.shape = (5, 10)
yt[1] = [ 0.79913913 0.15986619 0.22412122 0.15606108 0.97057211 0.31146381
0.00943007 0.12666353 0.39380172 0.07828381]
yt.shape = (2, 10)
cache[1][3] = [-0.16263996 1.03729328 0.72938082 -0.54101719 0.02752074 -0.30821874
0.07651101 -1.03752894 1.41219977 -0.37647422]
len(cache) = 10
###Markdown
**Expected Output**: **a_next[4]**: [-0.66408471 0.0036921 0.02088357 0.22834167 -0.85575339 0.00138482 0.76566531 0.34631421 -0.00215674 0.43827275] **a_next.shape**: (5, 10) **c_next[2]**: [ 0.63267805 1.00570849 0.35504474 0.20690913 -1.64566718 0.11832942 0.76449811 -0.0981561 -0.74348425 -0.26810932] **c_next.shape**: (5, 10) **yt[1]**: [ 0.79913913 0.15986619 0.22412122 0.15606108 0.97057211 0.31146381 0.00943007 0.12666353 0.39380172 0.07828381] **yt.shape**: (2, 10) **cache[1][3]**: [-0.16263996 1.03729328 0.72938082 -0.54101719 0.02752074 -0.30821874 0.07651101 -1.03752894 1.41219977 -0.37647422] **len(cache)**: 10 2.2 - Forward pass for LSTMNow that you have implemented one step of an LSTM, you can now iterate this over this using a for-loop to process a sequence of $T_x$ inputs. **Figure 4**: LSTM over multiple time-steps. **Exercise:** Implement `lstm_forward()` to run an LSTM over $T_x$ time-steps. **Note**: $c^{\langle 0 \rangle}$ is initialized with zeros.
###Code
# GRADED FUNCTION: lstm_forward
def lstm_forward(x, a0, parameters):
"""
Implement the forward propagation of the recurrent neural network using an LSTM-cell described in Figure (3).
Arguments:
x -- Input data for every time-step, of shape (n_x, m, T_x).
a0 -- Initial hidden state, of shape (n_a, m)
parameters -- python dictionary containing:
Wf -- Weight matrix of the forget gate, numpy array of shape (n_a, n_a + n_x)
bf -- Bias of the forget gate, numpy array of shape (n_a, 1)
Wi -- Weight matrix of the update gate, numpy array of shape (n_a, n_a + n_x)
bi -- Bias of the update gate, numpy array of shape (n_a, 1)
Wc -- Weight matrix of the first "tanh", numpy array of shape (n_a, n_a + n_x)
bc -- Bias of the first "tanh", numpy array of shape (n_a, 1)
Wo -- Weight matrix of the output gate, numpy array of shape (n_a, n_a + n_x)
bo -- Bias of the output gate, numpy array of shape (n_a, 1)
Wy -- Weight matrix relating the hidden-state to the output, numpy array of shape (n_y, n_a)
by -- Bias relating the hidden-state to the output, numpy array of shape (n_y, 1)
Returns:
a -- Hidden states for every time-step, numpy array of shape (n_a, m, T_x)
y -- Predictions for every time-step, numpy array of shape (n_y, m, T_x)
caches -- tuple of values needed for the backward pass, contains (list of all the caches, x)
"""
# Initialize "caches", which will track the list of all the caches
caches = []
### START CODE HERE ###
# Retrieve dimensions from shapes of x and parameters['Wy'] (≈2 lines)
n_x, m, T_x = x.shape
n_y, n_a = parameters["Wy"].shape
# initialize "a", "c" and "y" with zeros (≈3 lines)
a = np.zeros((n_a, m, T_x))
c = a
y = np.zeros((n_y, m, T_x))
# Initialize a_next and c_next (≈2 lines)
a_next = a0
c_next = np.zeros(a_next.shape)
# loop over all time-steps
for t in range(T_x):
# Update next hidden state, next memory state, compute the prediction, get the cache (≈1 line)
a_next, c_next, yt, cache = lstm_cell_forward(x[:,:,t], a_next, c_next, parameters)
# Save the value of the new "next" hidden state in a (≈1 line)
a[:,:,t] = a_next
# Save the value of the prediction in y (≈1 line)
y[:,:,t] = yt
# Save the value of the next cell state (≈1 line)
c[:,:,t] = c_next
# Append the cache into caches (≈1 line)
caches.append(cache)
### END CODE HERE ###
# store values needed for backward propagation in cache
caches = (caches, x)
return a, y, c, caches
np.random.seed(1)
x = np.random.randn(3,10,7)
a0 = np.random.randn(5,10)
Wf = np.random.randn(5, 5+3)
bf = np.random.randn(5,1)
Wi = np.random.randn(5, 5+3)
bi = np.random.randn(5,1)
Wo = np.random.randn(5, 5+3)
bo = np.random.randn(5,1)
Wc = np.random.randn(5, 5+3)
bc = np.random.randn(5,1)
Wy = np.random.randn(2,5)
by = np.random.randn(2,1)
parameters = {"Wf": Wf, "Wi": Wi, "Wo": Wo, "Wc": Wc, "Wy": Wy, "bf": bf, "bi": bi, "bo": bo, "bc": bc, "by": by}
a, y, c, caches = lstm_forward(x, a0, parameters)
print("a[4][3][6] = ", a[4][3][6])
print("a.shape = ", a.shape)
print("y[1][4][3] =", y[1][4][3])
print("y.shape = ", y.shape)
print("caches[1][1[1]] =", caches[1][1][1])
print("c[1][2][1]", c[1][2][1])
print("len(caches) = ", len(caches))
###Output
a[4][3][6] = 0.73162451027
a.shape = (5, 10, 7)
y[1][4][3] = 0.95087346185
y.shape = (2, 10, 7)
caches[1][1[1]] = [ 0.82797464 0.23009474 0.76201118 -0.22232814 -0.20075807 0.18656139
0.41005165]
c[1][2][1] -0.855544916718
len(caches) = 2
###Markdown
**Expected Output**: **a[4][3][6]** = 0.172117767533 **a.shape** = (5, 10, 7) **y[1][4][3]** = 0.95087346185 **y.shape** = (2, 10, 7) **caches[1][1][1]** = [ 0.82797464 0.23009474 0.76201118 -0.22232814 -0.20075807 0.18656139 0.41005165] **c[1][2][1]** = -0.855544916718 **len(caches)** = 2 Congratulations! You have now implemented the forward passes for the basic RNN and the LSTM. When using a deep learning framework, implementing the forward pass is sufficient to build systems that achieve great performance. The rest of this notebook is optional, and will not be graded. 3 - Backpropagation in recurrent neural networks (OPTIONAL / UNGRADED)In modern deep learning frameworks, you only have to implement the forward pass, and the framework takes care of the backward pass, so most deep learning engineers do not need to bother with the details of the backward pass. If however you are an expert in calculus and want to see the details of backprop in RNNs, you can work through this optional portion of the notebook. When in an earlier course you implemented a simple (fully connected) neural network, you used backpropagation to compute the derivatives with respect to the cost to update the parameters. Similarly, in recurrent neural networks you can to calculate the derivatives with respect to the cost in order to update the parameters. The backprop equations are quite complicated and we did not derive them in lecture. However, we will briefly present them below. 3.1 - Basic RNN backward passWe will start by computing the backward pass for the basic RNN-cell. **Figure 5**: RNN-cell's backward pass. Just like in a fully-connected neural network, the derivative of the cost function $J$ backpropagates through the RNN by following the chain-rule from calculas. The chain-rule is also used to calculate $(\frac{\partial J}{\partial W_{ax}},\frac{\partial J}{\partial W_{aa}},\frac{\partial J}{\partial b})$ to update the parameters $(W_{ax}, W_{aa}, b_a)$. Deriving the one step backward functions: To compute the `rnn_cell_backward` you need to compute the following equations. It is a good exercise to derive them by hand. The derivative of $\tanh$ is $1-\tanh(x)^2$. You can find the complete proof [here](https://www.wyzant.com/resources/lessons/math/calculus/derivative_proofs/tanx). Note that: $ \text{sech}(x)^2 = 1 - \tanh(x)^2$Similarly for $\frac{ \partial a^{\langle t \rangle} } {\partial W_{ax}}, \frac{ \partial a^{\langle t \rangle} } {\partial W_{aa}}, \frac{ \partial a^{\langle t \rangle} } {\partial b}$, the derivative of $\tanh(u)$ is $(1-\tanh(u)^2)du$. The final two equations also follow same rule and are derived using the $\tanh$ derivative. Note that the arrangement is done in a way to get the same dimensions to match.
###Code
def rnn_cell_backward(da_next, cache):
"""
Implements the backward pass for the RNN-cell (single time-step).
Arguments:
da_next -- Gradient of loss with respect to next hidden state
cache -- python dictionary containing useful values (output of rnn_cell_forward())
Returns:
gradients -- python dictionary containing:
dx -- Gradients of input data, of shape (n_x, m)
da_prev -- Gradients of previous hidden state, of shape (n_a, m)
dWax -- Gradients of input-to-hidden weights, of shape (n_a, n_x)
dWaa -- Gradients of hidden-to-hidden weights, of shape (n_a, n_a)
dba -- Gradients of bias vector, of shape (n_a, 1)
"""
# Retrieve values from cache
(a_next, a_prev, xt, parameters) = cache
# Retrieve values from parameters
Wax = parameters["Wax"]
Waa = parameters["Waa"]
Wya = parameters["Wya"]
ba = parameters["ba"]
by = parameters["by"]
### START CODE HERE ###
# compute the gradient of tanh with respect to a_next (≈1 line)
dtanh = None
# compute the gradient of the loss with respect to Wax (≈2 lines)
dxt = None
dWax = None
# compute the gradient with respect to Waa (≈2 lines)
da_prev = None
dWaa = None
# compute the gradient with respect to b (≈1 line)
dba = None
### END CODE HERE ###
# Store the gradients in a python dictionary
gradients = {"dxt": dxt, "da_prev": da_prev, "dWax": dWax, "dWaa": dWaa, "dba": dba}
return gradients
np.random.seed(1)
xt = np.random.randn(3,10)
a_prev = np.random.randn(5,10)
Wax = np.random.randn(5,3)
Waa = np.random.randn(5,5)
Wya = np.random.randn(2,5)
b = np.random.randn(5,1)
by = np.random.randn(2,1)
parameters = {"Wax": Wax, "Waa": Waa, "Wya": Wya, "ba": ba, "by": by}
a_next, yt, cache = rnn_cell_forward(xt, a_prev, parameters)
da_next = np.random.randn(5,10)
gradients = rnn_cell_backward(da_next, cache)
print("gradients[\"dxt\"][1][2] =", gradients["dxt"][1][2])
print("gradients[\"dxt\"].shape =", gradients["dxt"].shape)
print("gradients[\"da_prev\"][2][3] =", gradients["da_prev"][2][3])
print("gradients[\"da_prev\"].shape =", gradients["da_prev"].shape)
print("gradients[\"dWax\"][3][1] =", gradients["dWax"][3][1])
print("gradients[\"dWax\"].shape =", gradients["dWax"].shape)
print("gradients[\"dWaa\"][1][2] =", gradients["dWaa"][1][2])
print("gradients[\"dWaa\"].shape =", gradients["dWaa"].shape)
print("gradients[\"dba\"][4] =", gradients["dba"][4])
print("gradients[\"dba\"].shape =", gradients["dba"].shape)
###Output
_____no_output_____
###Markdown
**Expected Output**: **gradients["dxt"][1][2]** = -0.460564103059 **gradients["dxt"].shape** = (3, 10) **gradients["da_prev"][2][3]** = 0.0842968653807 **gradients["da_prev"].shape** = (5, 10) **gradients["dWax"][3][1]** = 0.393081873922 **gradients["dWax"].shape** = (5, 3) **gradients["dWaa"][1][2]** = -0.28483955787 **gradients["dWaa"].shape** = (5, 5) **gradients["dba"][4]** = [ 0.80517166] **gradients["dba"].shape** = (5, 1) Backward pass through the RNNComputing the gradients of the cost with respect to $a^{\langle t \rangle}$ at every time-step $t$ is useful because it is what helps the gradient backpropagate to the previous RNN-cell. To do so, you need to iterate through all the time steps starting at the end, and at each step, you increment the overall $db_a$, $dW_{aa}$, $dW_{ax}$ and you store $dx$.**Instructions**:Implement the `rnn_backward` function. Initialize the return variables with zeros first and then loop through all the time steps while calling the `rnn_cell_backward` at each time timestep, update the other variables accordingly.
###Code
def rnn_backward(da, caches):
"""
Implement the backward pass for a RNN over an entire sequence of input data.
Arguments:
da -- Upstream gradients of all hidden states, of shape (n_a, m, T_x)
caches -- tuple containing information from the forward pass (rnn_forward)
Returns:
gradients -- python dictionary containing:
dx -- Gradient w.r.t. the input data, numpy-array of shape (n_x, m, T_x)
da0 -- Gradient w.r.t the initial hidden state, numpy-array of shape (n_a, m)
dWax -- Gradient w.r.t the input's weight matrix, numpy-array of shape (n_a, n_x)
dWaa -- Gradient w.r.t the hidden state's weight matrix, numpy-arrayof shape (n_a, n_a)
dba -- Gradient w.r.t the bias, of shape (n_a, 1)
"""
### START CODE HERE ###
# Retrieve values from the first cache (t=1) of caches (≈2 lines)
(caches, x) = None
(a1, a0, x1, parameters) = None
# Retrieve dimensions from da's and x1's shapes (≈2 lines)
n_a, m, T_x = None
n_x, m = None
# initialize the gradients with the right sizes (≈6 lines)
dx = None
dWax = None
dWaa = None
dba = None
da0 = None
da_prevt = None
# Loop through all the time steps
for t in reversed(range(None)):
# Compute gradients at time step t. Choose wisely the "da_next" and the "cache" to use in the backward propagation step. (≈1 line)
gradients = None
# Retrieve derivatives from gradients (≈ 1 line)
dxt, da_prevt, dWaxt, dWaat, dbat = gradients["dxt"], gradients["da_prev"], gradients["dWax"], gradients["dWaa"], gradients["dba"]
# Increment global derivatives w.r.t parameters by adding their derivative at time-step t (≈4 lines)
dx[:, :, t] = None
dWax += None
dWaa += None
dba += None
# Set da0 to the gradient of a which has been backpropagated through all time-steps (≈1 line)
da0 = None
### END CODE HERE ###
# Store the gradients in a python dictionary
gradients = {"dx": dx, "da0": da0, "dWax": dWax, "dWaa": dWaa,"dba": dba}
return gradients
np.random.seed(1)
x = np.random.randn(3,10,4)
a0 = np.random.randn(5,10)
Wax = np.random.randn(5,3)
Waa = np.random.randn(5,5)
Wya = np.random.randn(2,5)
ba = np.random.randn(5,1)
by = np.random.randn(2,1)
parameters = {"Wax": Wax, "Waa": Waa, "Wya": Wya, "ba": ba, "by": by}
a, y, caches = rnn_forward(x, a0, parameters)
da = np.random.randn(5, 10, 4)
gradients = rnn_backward(da, caches)
print("gradients[\"dx\"][1][2] =", gradients["dx"][1][2])
print("gradients[\"dx\"].shape =", gradients["dx"].shape)
print("gradients[\"da0\"][2][3] =", gradients["da0"][2][3])
print("gradients[\"da0\"].shape =", gradients["da0"].shape)
print("gradients[\"dWax\"][3][1] =", gradients["dWax"][3][1])
print("gradients[\"dWax\"].shape =", gradients["dWax"].shape)
print("gradients[\"dWaa\"][1][2] =", gradients["dWaa"][1][2])
print("gradients[\"dWaa\"].shape =", gradients["dWaa"].shape)
print("gradients[\"dba\"][4] =", gradients["dba"][4])
print("gradients[\"dba\"].shape =", gradients["dba"].shape)
###Output
_____no_output_____
###Markdown
**Expected Output**: **gradients["dx"][1][2]** = [-2.07101689 -0.59255627 0.02466855 0.01483317] **gradients["dx"].shape** = (3, 10, 4) **gradients["da0"][2][3]** = -0.314942375127 **gradients["da0"].shape** = (5, 10) **gradients["dWax"][3][1]** = 11.2641044965 **gradients["dWax"].shape** = (5, 3) **gradients["dWaa"][1][2]** = 2.30333312658 **gradients["dWaa"].shape** = (5, 5) **gradients["dba"][4]** = [-0.74747722] **gradients["dba"].shape** = (5, 1) 3.2 - LSTM backward pass 3.2.1 One Step backwardThe LSTM backward pass is slighltly more complicated than the forward one. We have provided you with all the equations for the LSTM backward pass below. (If you enjoy calculus exercises feel free to try deriving these from scratch yourself.) 3.2.2 gate derivatives$$d \Gamma_o^{\langle t \rangle} = da_{next}*\tanh(c_{next}) * \Gamma_o^{\langle t \rangle}*(1-\Gamma_o^{\langle t \rangle})\tag{7}$$$$d\tilde c^{\langle t \rangle} = dc_{next}*\Gamma_u^{\langle t \rangle}+ \Gamma_o^{\langle t \rangle} (1-\tanh(c_{next})^2) * i_t * da_{next} * \tilde c^{\langle t \rangle} * (1-\tanh(\tilde c)^2) \tag{8}$$$$d\Gamma_u^{\langle t \rangle} = dc_{next}*\tilde c^{\langle t \rangle} + \Gamma_o^{\langle t \rangle} (1-\tanh(c_{next})^2) * \tilde c^{\langle t \rangle} * da_{next}*\Gamma_u^{\langle t \rangle}*(1-\Gamma_u^{\langle t \rangle})\tag{9}$$$$d\Gamma_f^{\langle t \rangle} = dc_{next}*\tilde c_{prev} + \Gamma_o^{\langle t \rangle} (1-\tanh(c_{next})^2) * c_{prev} * da_{next}*\Gamma_f^{\langle t \rangle}*(1-\Gamma_f^{\langle t \rangle})\tag{10}$$ 3.2.3 parameter derivatives $$ dW_f = d\Gamma_f^{\langle t \rangle} * \begin{pmatrix} a_{prev} \\ x_t\end{pmatrix}^T \tag{11} $$$$ dW_u = d\Gamma_u^{\langle t \rangle} * \begin{pmatrix} a_{prev} \\ x_t\end{pmatrix}^T \tag{12} $$$$ dW_c = d\tilde c^{\langle t \rangle} * \begin{pmatrix} a_{prev} \\ x_t\end{pmatrix}^T \tag{13} $$$$ dW_o = d\Gamma_o^{\langle t \rangle} * \begin{pmatrix} a_{prev} \\ x_t\end{pmatrix}^T \tag{14}$$To calculate $db_f, db_u, db_c, db_o$ you just need to sum across the horizontal (axis= 1) axis on $d\Gamma_f^{\langle t \rangle}, d\Gamma_u^{\langle t \rangle}, d\tilde c^{\langle t \rangle}, d\Gamma_o^{\langle t \rangle}$ respectively. Note that you should have the `keep_dims = True` option.Finally, you will compute the derivative with respect to the previous hidden state, previous memory state, and input.$$ da_{prev} = W_f^T*d\Gamma_f^{\langle t \rangle} + W_u^T * d\Gamma_u^{\langle t \rangle}+ W_c^T * d\tilde c^{\langle t \rangle} + W_o^T * d\Gamma_o^{\langle t \rangle} \tag{15}$$Here, the weights for equations 13 are the first n_a, (i.e. $W_f = W_f[:n_a,:]$ etc...)$$ dc_{prev} = dc_{next}\Gamma_f^{\langle t \rangle} + \Gamma_o^{\langle t \rangle} * (1- \tanh(c_{next})^2)*\Gamma_f^{\langle t \rangle}*da_{next} \tag{16}$$$$ dx^{\langle t \rangle} = W_f^T*d\Gamma_f^{\langle t \rangle} + W_u^T * d\Gamma_u^{\langle t \rangle}+ W_c^T * d\tilde c_t + W_o^T * d\Gamma_o^{\langle t \rangle}\tag{17} $$where the weights for equation 15 are from n_a to the end, (i.e. $W_f = W_f[n_a:,:]$ etc...)**Exercise:** Implement `lstm_cell_backward` by implementing equations $7-17$ below. Good luck! :)
###Code
def lstm_cell_backward(da_next, dc_next, cache):
"""
Implement the backward pass for the LSTM-cell (single time-step).
Arguments:
da_next -- Gradients of next hidden state, of shape (n_a, m)
dc_next -- Gradients of next cell state, of shape (n_a, m)
cache -- cache storing information from the forward pass
Returns:
gradients -- python dictionary containing:
dxt -- Gradient of input data at time-step t, of shape (n_x, m)
da_prev -- Gradient w.r.t. the previous hidden state, numpy array of shape (n_a, m)
dc_prev -- Gradient w.r.t. the previous memory state, of shape (n_a, m, T_x)
dWf -- Gradient w.r.t. the weight matrix of the forget gate, numpy array of shape (n_a, n_a + n_x)
dWi -- Gradient w.r.t. the weight matrix of the update gate, numpy array of shape (n_a, n_a + n_x)
dWc -- Gradient w.r.t. the weight matrix of the memory gate, numpy array of shape (n_a, n_a + n_x)
dWo -- Gradient w.r.t. the weight matrix of the output gate, numpy array of shape (n_a, n_a + n_x)
dbf -- Gradient w.r.t. biases of the forget gate, of shape (n_a, 1)
dbi -- Gradient w.r.t. biases of the update gate, of shape (n_a, 1)
dbc -- Gradient w.r.t. biases of the memory gate, of shape (n_a, 1)
dbo -- Gradient w.r.t. biases of the output gate, of shape (n_a, 1)
"""
# Retrieve information from "cache"
(a_next, c_next, a_prev, c_prev, ft, it, cct, ot, xt, parameters) = cache
### START CODE HERE ###
# Retrieve dimensions from xt's and a_next's shape (≈2 lines)
n_x, m = None
n_a, m = None
# Compute gates related derivatives, you can find their values can be found by looking carefully at equations (7) to (10) (≈4 lines)
dot = None
dcct = None
dit = None
dft = None
# Code equations (7) to (10) (≈4 lines)
dit = None
dft = None
dot = None
dcct = None
# Compute parameters related derivatives. Use equations (11)-(14) (≈8 lines)
dWf = None
dWi = None
dWc = None
dWo = None
dbf = None
dbi = None
dbc = None
dbo = None
# Compute derivatives w.r.t previous hidden state, previous memory state and input. Use equations (15)-(17). (≈3 lines)
da_prev = None
dc_prev = None
dxt = None
### END CODE HERE ###
# Save gradients in dictionary
gradients = {"dxt": dxt, "da_prev": da_prev, "dc_prev": dc_prev, "dWf": dWf,"dbf": dbf, "dWi": dWi,"dbi": dbi,
"dWc": dWc,"dbc": dbc, "dWo": dWo,"dbo": dbo}
return gradients
np.random.seed(1)
xt = np.random.randn(3,10)
a_prev = np.random.randn(5,10)
c_prev = np.random.randn(5,10)
Wf = np.random.randn(5, 5+3)
bf = np.random.randn(5,1)
Wi = np.random.randn(5, 5+3)
bi = np.random.randn(5,1)
Wo = np.random.randn(5, 5+3)
bo = np.random.randn(5,1)
Wc = np.random.randn(5, 5+3)
bc = np.random.randn(5,1)
Wy = np.random.randn(2,5)
by = np.random.randn(2,1)
parameters = {"Wf": Wf, "Wi": Wi, "Wo": Wo, "Wc": Wc, "Wy": Wy, "bf": bf, "bi": bi, "bo": bo, "bc": bc, "by": by}
a_next, c_next, yt, cache = lstm_cell_forward(xt, a_prev, c_prev, parameters)
da_next = np.random.randn(5,10)
dc_next = np.random.randn(5,10)
gradients = lstm_cell_backward(da_next, dc_next, cache)
print("gradients[\"dxt\"][1][2] =", gradients["dxt"][1][2])
print("gradients[\"dxt\"].shape =", gradients["dxt"].shape)
print("gradients[\"da_prev\"][2][3] =", gradients["da_prev"][2][3])
print("gradients[\"da_prev\"].shape =", gradients["da_prev"].shape)
print("gradients[\"dc_prev\"][2][3] =", gradients["dc_prev"][2][3])
print("gradients[\"dc_prev\"].shape =", gradients["dc_prev"].shape)
print("gradients[\"dWf\"][3][1] =", gradients["dWf"][3][1])
print("gradients[\"dWf\"].shape =", gradients["dWf"].shape)
print("gradients[\"dWi\"][1][2] =", gradients["dWi"][1][2])
print("gradients[\"dWi\"].shape =", gradients["dWi"].shape)
print("gradients[\"dWc\"][3][1] =", gradients["dWc"][3][1])
print("gradients[\"dWc\"].shape =", gradients["dWc"].shape)
print("gradients[\"dWo\"][1][2] =", gradients["dWo"][1][2])
print("gradients[\"dWo\"].shape =", gradients["dWo"].shape)
print("gradients[\"dbf\"][4] =", gradients["dbf"][4])
print("gradients[\"dbf\"].shape =", gradients["dbf"].shape)
print("gradients[\"dbi\"][4] =", gradients["dbi"][4])
print("gradients[\"dbi\"].shape =", gradients["dbi"].shape)
print("gradients[\"dbc\"][4] =", gradients["dbc"][4])
print("gradients[\"dbc\"].shape =", gradients["dbc"].shape)
print("gradients[\"dbo\"][4] =", gradients["dbo"][4])
print("gradients[\"dbo\"].shape =", gradients["dbo"].shape)
###Output
_____no_output_____
###Markdown
**Expected Output**: **gradients["dxt"][1][2]** = 3.23055911511 **gradients["dxt"].shape** = (3, 10) **gradients["da_prev"][2][3]** = -0.0639621419711 **gradients["da_prev"].shape** = (5, 10) **gradients["dc_prev"][2][3]** = 0.797522038797 **gradients["dc_prev"].shape** = (5, 10) **gradients["dWf"][3][1]** = -0.147954838164 **gradients["dWf"].shape** = (5, 8) **gradients["dWi"][1][2]** = 1.05749805523 **gradients["dWi"].shape** = (5, 8) **gradients["dWc"][3][1]** = 2.30456216369 **gradients["dWc"].shape** = (5, 8) **gradients["dWo"][1][2]** = 0.331311595289 **gradients["dWo"].shape** = (5, 8) **gradients["dbf"][4]** = [ 0.18864637] **gradients["dbf"].shape** = (5, 1) **gradients["dbi"][4]** = [-0.40142491] **gradients["dbi"].shape** = (5, 1) **gradients["dbc"][4]** = [ 0.25587763] **gradients["dbc"].shape** = (5, 1) **gradients["dbo"][4]** = [ 0.13893342] **gradients["dbo"].shape** = (5, 1) 3.3 Backward pass through the LSTM RNNThis part is very similar to the `rnn_backward` function you implemented above. You will first create variables of the same dimension as your return variables. You will then iterate over all the time steps starting from the end and call the one step function you implemented for LSTM at each iteration. You will then update the parameters by summing them individually. Finally return a dictionary with the new gradients. **Instructions**: Implement the `lstm_backward` function. Create a for loop starting from $T_x$ and going backward. For each step call `lstm_cell_backward` and update the your old gradients by adding the new gradients to them. Note that `dxt` is not updated but is stored.
###Code
def lstm_backward(da, caches):
"""
Implement the backward pass for the RNN with LSTM-cell (over a whole sequence).
Arguments:
da -- Gradients w.r.t the hidden states, numpy-array of shape (n_a, m, T_x)
dc -- Gradients w.r.t the memory states, numpy-array of shape (n_a, m, T_x)
caches -- cache storing information from the forward pass (lstm_forward)
Returns:
gradients -- python dictionary containing:
dx -- Gradient of inputs, of shape (n_x, m, T_x)
da0 -- Gradient w.r.t. the previous hidden state, numpy array of shape (n_a, m)
dWf -- Gradient w.r.t. the weight matrix of the forget gate, numpy array of shape (n_a, n_a + n_x)
dWi -- Gradient w.r.t. the weight matrix of the update gate, numpy array of shape (n_a, n_a + n_x)
dWc -- Gradient w.r.t. the weight matrix of the memory gate, numpy array of shape (n_a, n_a + n_x)
dWo -- Gradient w.r.t. the weight matrix of the save gate, numpy array of shape (n_a, n_a + n_x)
dbf -- Gradient w.r.t. biases of the forget gate, of shape (n_a, 1)
dbi -- Gradient w.r.t. biases of the update gate, of shape (n_a, 1)
dbc -- Gradient w.r.t. biases of the memory gate, of shape (n_a, 1)
dbo -- Gradient w.r.t. biases of the save gate, of shape (n_a, 1)
"""
# Retrieve values from the first cache (t=1) of caches.
(caches, x) = caches
(a1, c1, a0, c0, f1, i1, cc1, o1, x1, parameters) = caches[0]
### START CODE HERE ###
# Retrieve dimensions from da's and x1's shapes (≈2 lines)
n_a, m, T_x = None
n_x, m = None
# initialize the gradients with the right sizes (≈12 lines)
dx = None
da0 = None
da_prevt = None
dc_prevt = None
dWf = None
dWi = None
dWc = None
dWo = None
dbf = None
dbi = None
dbc = None
dbo = None
# loop back over the whole sequence
for t in reversed(range(None)):
# Compute all gradients using lstm_cell_backward
gradients = None
# Store or add the gradient to the parameters' previous step's gradient
dx[:,:,t] = None
dWf = None
dWi = None
dWc = None
dWo = None
dbf = None
dbi = None
dbc = None
dbo = None
# Set the first activation's gradient to the backpropagated gradient da_prev.
da0 = None
### END CODE HERE ###
# Store the gradients in a python dictionary
gradients = {"dx": dx, "da0": da0, "dWf": dWf,"dbf": dbf, "dWi": dWi,"dbi": dbi,
"dWc": dWc,"dbc": dbc, "dWo": dWo,"dbo": dbo}
return gradients
np.random.seed(1)
x = np.random.randn(3,10,7)
a0 = np.random.randn(5,10)
Wf = np.random.randn(5, 5+3)
bf = np.random.randn(5,1)
Wi = np.random.randn(5, 5+3)
bi = np.random.randn(5,1)
Wo = np.random.randn(5, 5+3)
bo = np.random.randn(5,1)
Wc = np.random.randn(5, 5+3)
bc = np.random.randn(5,1)
parameters = {"Wf": Wf, "Wi": Wi, "Wo": Wo, "Wc": Wc, "Wy": Wy, "bf": bf, "bi": bi, "bo": bo, "bc": bc, "by": by}
a, y, c, caches = lstm_forward(x, a0, parameters)
da = np.random.randn(5, 10, 4)
gradients = lstm_backward(da, caches)
print("gradients[\"dx\"][1][2] =", gradients["dx"][1][2])
print("gradients[\"dx\"].shape =", gradients["dx"].shape)
print("gradients[\"da0\"][2][3] =", gradients["da0"][2][3])
print("gradients[\"da0\"].shape =", gradients["da0"].shape)
print("gradients[\"dWf\"][3][1] =", gradients["dWf"][3][1])
print("gradients[\"dWf\"].shape =", gradients["dWf"].shape)
print("gradients[\"dWi\"][1][2] =", gradients["dWi"][1][2])
print("gradients[\"dWi\"].shape =", gradients["dWi"].shape)
print("gradients[\"dWc\"][3][1] =", gradients["dWc"][3][1])
print("gradients[\"dWc\"].shape =", gradients["dWc"].shape)
print("gradients[\"dWo\"][1][2] =", gradients["dWo"][1][2])
print("gradients[\"dWo\"].shape =", gradients["dWo"].shape)
print("gradients[\"dbf\"][4] =", gradients["dbf"][4])
print("gradients[\"dbf\"].shape =", gradients["dbf"].shape)
print("gradients[\"dbi\"][4] =", gradients["dbi"][4])
print("gradients[\"dbi\"].shape =", gradients["dbi"].shape)
print("gradients[\"dbc\"][4] =", gradients["dbc"][4])
print("gradients[\"dbc\"].shape =", gradients["dbc"].shape)
print("gradients[\"dbo\"][4] =", gradients["dbo"][4])
print("gradients[\"dbo\"].shape =", gradients["dbo"].shape)
###Output
_____no_output_____ |
CH2 Basic Plotting.ipynb | ###Markdown
Plots type What were the different sports in the first olympics? Plot them using different graphs
###Code
fo = oo[oo.Edition == 1896]
fo.head()
fo.Sport.value_counts()
###Output
_____no_output_____
###Markdown
Line Plot
###Code
fo.Sport.value_counts().plot(kind="line")
###Output
_____no_output_____
###Markdown
Bar Plot
###Code
fo.Sport.value_counts().plot(kind="bar")
###Output
_____no_output_____
###Markdown
Horizontal Bar Plot
###Code
fo.Sport.value_counts().plot(kind="barh")
###Output
_____no_output_____
###Markdown
Pie Chart
###Code
fo.Sport.value_counts().plot(kind="pie")
###Output
_____no_output_____
###Markdown
Plot Color
###Code
fo.Sport.value_counts().plot(kind="line", color="red")
fo.Sport.value_counts().plot(kind="bar", color="plum")
###Output
_____no_output_____
###Markdown
Figsize()
###Code
fo.Sport.value_counts().plot(kind="line", color="red", figsize=(10,3))
###Output
_____no_output_____
###Markdown
Colormaps Sequential: representing information that has order, there is change in lightness often over a single hue. Diverging: is used when the information being plotted deviates around a middle value. Qualitative: representing information which does not have ordering or relationships.
###Code
fo.Sport.value_counts().plot(kind="pie", colormap="Reds")
###Output
_____no_output_____
###Markdown
Seaborn basic plotting Why Seaborn Attractive statistical plots A complement and not a substitute to Matplotlib Integrates well with pandas
###Code
import seaborn as sns
###Output
_____no_output_____
###Markdown
How many medals have been won by men and women in the history of the Olympics. How many gold, silver and bronze medals were won by both gender
###Code
sns.countplot(x='Medal', data=oo, hue="Gender")
###Output
_____no_output_____ |
Chapter03/SortingRanking.ipynb | ###Markdown
Managing Your Data by Sorting and Ranking Working with pandas*Curtis Miller*We will see methods for sorting and ranking data.Let's first go through initialization steps.
###Code
import numpy as np
import pandas as pd
from pandas import Series, DataFrame
df = DataFrame(np.round(np.random.randn(7, 3) * 10),
columns=["AAA", "BBB", "CCC"],
index=list("defcabg"))
df
###Output
_____no_output_____
###Markdown
Here I sort the index.
###Code
df.sort_index()
df.sort_index(axis=1, ascending=False) # Sorting columns by
# index, opposite
# order
###Output
_____no_output_____
###Markdown
Now I sort according to the values of the `DataFrame`.
###Code
df.sort_values(by='AAA') # According to contents of AAA
df.sort_values(by=['BBB', 'CCC']) # Arrange first by BBB,
# breaking ties with CCC
###Output
_____no_output_____
###Markdown
Ranking isn't much different from sorting.
###Code
df.rank()
df.rank(method="max")
###Output
_____no_output_____ |
part2/day4/cifar10_GPU.ipynb | ###Markdown
###Code
"""import bibliotek"""
from keras.datasets import cifar10
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D
from keras.utils import to_categorical
import numpy as np
np.random.seed(2018)
import matplotlib.pyplot as plt
%matplotlib inline
"""wczytanie zbioru danych cifar10"""
(X_train, y_train), (X_test,y_test) = cifar10.load_data()
#zbior train / test
#50 000 / 10 000zdjęć
#32
#32
#3 kanal zdjecia kolorowe
X_train.shape, X_test.shape
plt.figure(figsize=(10,10))
for idx in range(25):
plt.subplot(5,5, idx+1)
plt.imshow(X_train[idx], cmap ='gray')
plt.title('Class : {}'.format(y_train[idx]))
plt.tight_layout()
img_rows, img_cols = X_train.shape[1],X_train.shape[2]
num_channels = 3 #kanal 3 zdjecia kolorowe RGB
X_train = X_train.reshape(-1,img_rows,img_cols,num_channels)
X_test = X_test.reshape(-1,img_rows,img_cols,num_channels)
#rozmiar input + kanał
input_shape = (img_rows, img_cols,num_channels)
X_train.shape,X_test.shape
#normalizacja
if np.max(X_train)>1: X_train = X_train /255
if np.max(X_test)>1: X_test = X_test /255
#weryfikacja normazlizaji dla:
# train
# test
X_train.max(),X_train.min(),X_test.max(),X_test.min()
if len(y_train.shape) == 2:
y_train = y_train.reshape(-1)
y_test = y_test.reshape(-1)
if len(y_train.shape) == 1:
num_classes = len(set(y_train))
y_train = to_categorical(y_train ,num_classes)
y_test = to_categorical(y_test ,num_classes)
"""weryfikacja rozmiarow"""
#train 50 000 elementów 10 class
#test 10 000 elementow, 10 class
#num_classes 10 class
y_train.shape,y_test.shape,num_classes
""" architektura modelu """
model = Sequential([
Conv2D(filters = 32, kernel_size= (3,3),input_shape = input_shape),
MaxPool2D(pool_size = (2,2)),
Dropout(0.25),
Conv2D(filters = 64, kernel_size= (3,3)),
MaxPool2D(pool_size = (2,2)),
Dropout(0.25),
Flatten(),
Dense(512,activation = 'relu'),
Dropout(0.5),
Dense(num_classes,activation = 'softmax')
])
model.summary()
""" kompilacja modelu"""
# optymalizator adam,
# metryka accuracy,
# loss = klasyfikacja birnana w keras
model.compile(loss = 'categorical_crossentropy',optimizer='adam', metrics=['accuracy'])
###Output
_____no_output_____
###Markdown
Trenowanie
###Code
model.fit(
X_train,y_train,
batch_size = 256,
epochs = 2,verbose=2,
validation_data=(X_test,y_test)
)
model.evaluate(X_test,y_test)
###Output
10000/10000 [==============================] - 1s 86us/step
###Markdown
Jedna epoka liczyła się ponad 5 sek , val_acc: 0.5933 Zmieniono TYLKO na GPU
###Code
""" architektura modelu """
model = Sequential([
Conv2D(filters = 32, kernel_size= (3,3),input_shape = input_shape),
Conv2D(filters = 32, kernel_size= (3,3)),
MaxPool2D(pool_size = (2,2)),
Dropout(0.25),
Conv2D(filters = 64, kernel_size= (3,3)),
Conv2D(filters = 64, kernel_size= (3,3)),
MaxPool2D(pool_size = (2,2)),
Dropout(0.25),
Conv2D(filters = 128, kernel_size= (3,3)),
MaxPool2D(pool_size = (2,2)),
Dropout(0.25),
Flatten(),
Dense(512,activation = 'relu'),
Dropout(0.5),
Dense(num_classes,activation = 'softmax')
])
model.summary()
""" kompilacja modelu"""
# optymalizator adam,
# metryka accuracy,
# loss = klasyfikacja birnana w keras
model.compile(loss = 'categorical_crossentropy',optimizer='adam', metrics=['accuracy'])
model.fit(
X_train,y_train,
batch_size = 256,
epochs = 20,verbose=2,
validation_data=(X_test,y_test)
)
###Output
Train on 50000 samples, validate on 10000 samples
Epoch 1/20
- 6s - loss: 1.7711 - acc: 0.3467 - val_loss: 1.4823 - val_acc: 0.4604
Epoch 2/20
- 5s - loss: 1.4387 - acc: 0.4834 - val_loss: 1.2984 - val_acc: 0.5344
Epoch 3/20
- 5s - loss: 1.3053 - acc: 0.5373 - val_loss: 1.1839 - val_acc: 0.5805
Epoch 4/20
- 5s - loss: 1.2323 - acc: 0.5631 - val_loss: 1.1350 - val_acc: 0.5972
Epoch 5/20
- 5s - loss: 1.1824 - acc: 0.5848 - val_loss: 1.0918 - val_acc: 0.6150
Epoch 6/20
- 5s - loss: 1.1397 - acc: 0.6020 - val_loss: 1.0595 - val_acc: 0.6301
Epoch 7/20
- 5s - loss: 1.1161 - acc: 0.6102 - val_loss: 1.1454 - val_acc: 0.5976
Epoch 8/20
- 5s - loss: 1.0849 - acc: 0.6227 - val_loss: 1.0216 - val_acc: 0.6463
Epoch 9/20
- 5s - loss: 1.0618 - acc: 0.6310 - val_loss: 1.0010 - val_acc: 0.6526
Epoch 10/20
- 5s - loss: 1.0423 - acc: 0.6374 - val_loss: 0.9572 - val_acc: 0.6664
Epoch 11/20
- 5s - loss: 1.0358 - acc: 0.6372 - val_loss: 0.9507 - val_acc: 0.6695
Epoch 12/20
- 5s - loss: 1.0168 - acc: 0.6465 - val_loss: 0.9405 - val_acc: 0.6774
Epoch 13/20
- 5s - loss: 1.0044 - acc: 0.6505 - val_loss: 0.9375 - val_acc: 0.6750
Epoch 14/20
- 5s - loss: 0.9936 - acc: 0.6564 - val_loss: 0.9018 - val_acc: 0.6901
Epoch 15/20
- 5s - loss: 0.9767 - acc: 0.6617 - val_loss: 0.9475 - val_acc: 0.6731
Epoch 16/20
- 5s - loss: 0.9759 - acc: 0.6614 - val_loss: 0.9202 - val_acc: 0.6880
Epoch 17/20
- 5s - loss: 0.9629 - acc: 0.6630 - val_loss: 0.8985 - val_acc: 0.6895
Epoch 18/20
- 5s - loss: 0.9476 - acc: 0.6720 - val_loss: 0.9071 - val_acc: 0.6880
Epoch 19/20
- 5s - loss: 0.9432 - acc: 0.6727 - val_loss: 0.8832 - val_acc: 0.6958
Epoch 20/20
- 5s - loss: 0.9356 - acc: 0.6747 - val_loss: 0.9163 - val_acc: 0.6911
###Markdown
Wykorzystano GPU szybko sie liczylo kolo 5 sek dla jednej epoki Skomplikowano architekturę i uzyto 20 epok val_acc = 0,6911
###Code
model.evaluate(X_test,y_test)
###Output
10000/10000 [==============================] - 1s 96us/step
|
projects/Data_Science_in_Telco_Data_Cleansing.ipynb | ###Markdown
---Title: "Data Science in Telco: Data Cleansing"Author: "Joseph Armando Carvallo"Date: "21/03/2021"--- DQLab Telco is a Telco company that already has many branches spread everywhere. Since its establishment in 2019, DQLab Telco has been consistent in paying attention to its customer experience so that customers will not leave it. Even though it's only a little over 1 year old company, DQLab Telco already has many customers who have switched subscriptions to competitors. The management wants to reduce the number of churn customers by using machine learning.Therefore, the Data Scientist team was asked to prepare the data as well as to make the right prediction model to determine whether a customer will churn or not. As a Data Scientist, I was asked to prepare the data before doing the modeling.I will do Data Preprocessing (Data Cleansing) last month, which is June 2020. The steps to be taken are,1. Looking for a valid customer ID or telphone number2. Handling data that is still empty or missing values3. Handling outlier values from each variable4. Standardizing values of variablesThe Python package that I will be using in doing the analysis are* Pandas (Python for Data Analysis) is a Python library that focuses on data analysis processes such as data manipulation, data preparation, and data cleaning.* Matplotlib is a Python library that focuses on data visualization such as plotting graphs. Matplotlib can be used in Python scripts, Python and IPython shells, web application servers, and several other graphical user interface (GUI) toolkits.* Seaborn builds on Matplotlib and introduces additional plot types. It also makes your traditional Matplotlib plots look attractive.For the dataset used, it is provided in csv format, it can be downloaded at https://storage.googleapis.com/dqlab-dataset/dqlab_telco.csv The detailed data are as follows:`UpdatedAt` is the period of data taken`customerID` is phone number of customer`gender` is gender of customer, the values in format male or female`SeniorCitizen` is seniority status of customer, the values in format 1 (yes) or 0 (no)`Partner` is marital status of customer, the values in format Yes or No`Dependents` is dependents of customer, the values in format Yes or No`tenure` is number of months customer has stayed with the company`PhoneService`is whether customer has phone service or not, the values in format Yes or No`MultipleLines` is whether customer has multiple lines or not, the values in format Yes, No, or No phone service`InternetService` is internet service provider of customer, the values in format DSL, Fiber optic, or No`OnlineSecurity`is whether customer has online security or not, the values in format Yes, No, or No internet service`OnlineBackup` is whether customer has online backup or not, the values in format Yes, No, or No internet service`DeviceProtection` is whhether customer has device protection or not, the values in format Yes, No, or No internet service`TechSupport` is whether customer has tech support or not, the values in format Yes, No, or No internet service`StreamingTV` is whether customer has streaming TV or not, the values in format Yes, No, or No internet service`StreamingMovies` is whether customer has streaming movies or not, the values in format Yes, No, or No internet service`Contract` is the contract term of customer, the values in format Month-to-month, One year, or Two year`PaperlessBilling` is whether the customer has paperless billing or not, the values in format Yes or No`PaymentMethod` is the payment method of customer, the values in format Electronic check, Mailed check, Bank transfer (automatic), or Credit card (automatic)`MonthlyCharges` is the amount charged to the customer monthly`TotalCharges` is the total amount charged to the customer`Churn` is whether the customer churned or not, the values in format Yes or No **Import Libraries and Datasets**Based on the explanation of the libraries and datasets that will be used, now the first thing I am going to do is to import the libraries and datasets into the workspace. After the dataset is imported into the workspace, I am displaying the number of columns and rows of the data set using `.shape` and print the first five rows using `.head()` and I am finding out how many unique customerID values are using `.nunique()`
###Code
import pandas as pd
# Facilitate the appearance of row data
pd.options.display.max_columns = 50
# Importing Data Source
df_load = pd.read_csv('https://storage.googleapis.com/dqlab-dataset/dqlab_telco.csv')
print("Size of dataset", df_load.shape)
df_load.head(5)
print(df_load.customerID.nunique())
###Output
7017
###Markdown
**Filtering `customerID` with certain formats**Look for the correct `customerID` (customer phone number) format, with the following criteria:* character length is 11 to 12,* consists of numbers only, no characters other than numbers are allowed,* checkers start with the number 45 which is the first two digits.I am using `.count()` function to count the number of `Customer ID` rows, `.str.match()` and `regex` to match the criteria above, and `astype()` to change the data type to numeric.
###Code
df_load['valid_id'] = df_load['customerID'].astype(str).str.match(r'(45\d{9,10})')
df_load = (df_load[df_load['valid_id'] == True]).drop('valid_id', axis = 1)
print('The result of the filtered number of Customer IDs is',df_load['customerID'].count())
###Output
The result of the filtered number of Customer IDs is 7006
###Markdown
**Filtering Duplicate `CustomerID`**I will make sure that there are no duplicate values of `customer_ID`. Usually, the type of duplicate `CustomerID` are* duplication due to inserting more than once with the same value for each column* duplication due to inserting different data retrieval periodI am using the result of processing in the previous step to be processed in this step. I am also using `drop_duplicates()` function to remove duplicate rows, and `sort_values()` to check the last recorded data.
###Code
# Drop Duplicate Rows
df_load.drop_duplicates()
# Drop duplicate ID sorted by Periode
df_load = df_load.sort_values('UpdatedAt', ascending=False).drop_duplicates(['customerID'])
print('The number of `CustomerID` that has been removed (distinct) are',df_load['customerID'].count())
###Output
The number of `CustomerID` that has been removed (distinct) are 6993
###Markdown
The validity of `CustomerID` is needed to ensure that the data I retrieve is correct. Based on those results, there are differences in the number of `CustomerID` from the first data loaded to the final result. The number of rows of data when it was first loaded was 7113 rows and 22 columns with 7017 unique `CustomerID` numbers. Then after checking the validity of `CustomerID`, the remaining 6993 rows of data. **Handle missing values by deleting rows**The next step, I am deleting rows from data that is not detected whether the customer churned or not. It is assumed that the data modeller only accepts data that has churn flag or not. I am using `isnull()` to detect missing values and `dropna()` to remove missing values
###Code
print('Total missing values data from the Churn column are',df_load['Churn'].isnull().sum())
# Dropping all Rows with spesific column
df_load.dropna(subset=['Churn'],inplace=True)
print('Total rows and columns of the data after missing values deleted are',df_load.shape)
###Output
Total missing values data from the Churn column are 43
Total rows and columns of the data after missing values deleted are (6950, 22)
###Markdown
**Handle missing values by filling in certain values**In addition to removing rows from the data, handling missing values can use certain values. It is assumed that the data modeler requires filling in the missing values with the following criterias:* `tenure` of data modeller requires that each row that has missing values for the length of the subscription be filled with 11,* and the other numeric variables are filled with the median of each variables.
###Code
print('Status of Missing Values is',df_load.isnull().values.any())
print('\nThe number of missing values for each column is')
print(df_load.isnull().sum().sort_values(ascending=False))
# Handling missing values Tenure fill with 11
df_load['tenure'].fillna(11, inplace=True)
# Handling missing values num vars (except Tenure)
for col_name in list(['MonthlyCharges','TotalCharges']):
median = df_load[col_name].median()
df_load[col_name].fillna(median, inplace=True)
print('\nThe number of missing values after imputering the data is')
print(df_load.isnull().sum().sort_values(ascending=False))
###Output
Status of Missing Values is False
The number of missing values for each column is
Churn 0
TotalCharges 0
customerID 0
gender 0
SeniorCitizen 0
Partner 0
Dependents 0
tenure 0
PhoneService 0
MultipleLines 0
InternetService 0
OnlineSecurity 0
OnlineBackup 0
DeviceProtection 0
TechSupport 0
StreamingTV 0
StreamingMovies 0
Contract 0
PaperlessBilling 0
PaymentMethod 0
MonthlyCharges 0
UpdatedAt 0
dtype: int64
The number of missing values after imputering the data is
Churn 0
TotalCharges 0
customerID 0
gender 0
SeniorCitizen 0
Partner 0
Dependents 0
tenure 0
PhoneService 0
MultipleLines 0
InternetService 0
OnlineSecurity 0
OnlineBackup 0
DeviceProtection 0
TechSupport 0
StreamingTV 0
StreamingMovies 0
Contract 0
PaperlessBilling 0
PaymentMethod 0
MonthlyCharges 0
UpdatedAt 0
dtype: int64
###Markdown
After further analysis, it turns out that there were missing values from the data that I had validated for `CustomerID`. Missing values were found in the `Churn`, `tenure`, `MonthlyCharges` and `TotalCharges` columns. Then after I handled them by deleting rows and filling rows with certain values, they were proven no more missing values in the data, as evidenced by the number of missing values for each variable which is worth 0. **Boxplot: detect outliers**One way to detect outliers from a value is to look at the plot of the data using a boxplot which is a summary of the sample distribution presented graphically that can describe the shape of the data distribution (skewness), a measure of central tendency, and a measure of the spread (variety) of observational data.I am using matplotlib and seaborn packages to visualize boxplot of data. `describe()` function is used to view the data description.
###Code
print('Distribution of data before outliers are handled')
print(df_load[['tenure','MonthlyCharges','TotalCharges']].describe())
# Creating Box Plot
import matplotlib.pyplot as plt
import seaborn as sns
# Insert variable
plt.figure() # make a new figure
sns.boxplot(x=df_load['tenure'])
plt.show()
plt.figure() # make a new figure
sns.boxplot(x=df_load['MonthlyCharges'])
plt.show()
plt.figure() # make a new figure
sns.boxplot(x=df_load['TotalCharges'])
plt.show()
###Output
Distribution of data before outliers are handled
tenure MonthlyCharges TotalCharges
count 6950.000000 6950.000000 6950.000000
mean 32.477266 65.783741 2305.083460
std 25.188910 50.457871 2578.651143
min 0.000000 0.000000 19.000000
25% 9.000000 36.462500 406.975000
50% 29.000000 70.450000 1400.850000
75% 55.000000 89.850000 3799.837500
max 500.000000 2311.000000 80000.000000
###Markdown
**Handle outliers**After knowing which variables have outliers, then I am handling the outliers using the interquartile range (IQR) method.I am using the result of processing in the previous step to be processed in this step. I also use the `quantile()` function to see a specific quantile and `mask()` to replace the values.
###Code
# Handle with IQR
Q1 = (df_load[['tenure','MonthlyCharges','TotalCharges']]).quantile(0.25)
Q3 = (df_load[['tenure','MonthlyCharges','TotalCharges']]).quantile(0.75)
IQR = Q3 - Q1
maximum = Q3 + (1.5*IQR)
print('The maximum value of each variable')
print(maximum)
minimum = Q1 - (1.5*IQR)
print('\nThe minimum value of each variable')
print(minimum)
more_than = (df_load > maximum)
lower_than = (df_load < minimum)
df_load = df_load.mask(more_than, maximum, axis=1)
df_load = df_load.mask(lower_than, minimum, axis=1)
print('\nDistribution of data after outliers handled')
print(df_load[['tenure','MonthlyCharges','TotalCharges']].describe())
###Output
The maximum value of each variable
tenure 124.00000
MonthlyCharges 169.93125
TotalCharges 8889.13125
dtype: float64
The minimum value of each variable
tenure -60.00000
MonthlyCharges -43.61875
TotalCharges -4682.31875
dtype: float64
Distribution of data after outliers handled
tenure MonthlyCharges TotalCharges
count 6950.000000 6950.000000 6950.000000
mean 32.423165 64.992201 2286.058750
std 24.581073 30.032040 2265.702553
min 0.000000 0.000000 19.000000
25% 9.000000 36.462500 406.975000
50% 29.000000 70.450000 1400.850000
75% 55.000000 89.850000 3799.837500
max 124.000000 169.931250 8889.131250
###Markdown
From the three boxplots of `tenure`, `MonthlyCharges`, and `TotalCharges` clearly indicated there were outliers. This can be identified from the points that are far from the boxplot image. Then if I look at the distribution of the data from the `max` column, there is also a very high value of max.Then the outlier values are handled by changing its value to the maximum and minimum values of the interquartile range (IQR). After handling the outliers, and looking at the spread of the data, it appears that there are no more outlier values. **Detect non-standard values**Detects whether there are values of non-standard in categorical variables. This usually occurs due to errors in data input. The difference in terms used is one of the factors that often occur, for that I need standardization of the data that has been inputted. I am using the `value_counts()` function to see the number of unique values per variables.
###Code
# Input variables
for col_name in list(['gender','SeniorCitizen','Partner','Dependents','PhoneService','MultipleLines','InternetService','OnlineSecurity','OnlineBackup','DeviceProtection','TechSupport','StreamingTV','StreamingMovies','Contract','PaperlessBilling','PaymentMethod','Churn']):
print('\nUnique Values Count \033[1m' + 'Before Standardized \033[0m Variable',col_name)
print(df_load[col_name].value_counts())
###Output
Unique Values Count [1mBefore Standardized [0m Variable gender
Male 3499
Female 3431
Wanita 14
Laki-Laki 6
Name: gender, dtype: int64
Unique Values Count [1mBefore Standardized [0m Variable SeniorCitizen
0 5822
1 1128
Name: SeniorCitizen, dtype: int64
Unique Values Count [1mBefore Standardized [0m Variable Partner
No 3591
Yes 3359
Name: Partner, dtype: int64
Unique Values Count [1mBefore Standardized [0m Variable Dependents
No 4870
Yes 2060
Iya 20
Name: Dependents, dtype: int64
Unique Values Count [1mBefore Standardized [0m Variable PhoneService
Yes 6281
No 669
Name: PhoneService, dtype: int64
Unique Values Count [1mBefore Standardized [0m Variable MultipleLines
No 3346
Yes 2935
No phone service 669
Name: MultipleLines, dtype: int64
Unique Values Count [1mBefore Standardized [0m Variable InternetService
Fiber optic 3057
DSL 2388
No 1505
Name: InternetService, dtype: int64
Unique Values Count [1mBefore Standardized [0m Variable OnlineSecurity
No 3454
Yes 1991
No internet service 1505
Name: OnlineSecurity, dtype: int64
Unique Values Count [1mBefore Standardized [0m Variable OnlineBackup
No 3045
Yes 2400
No internet service 1505
Name: OnlineBackup, dtype: int64
Unique Values Count [1mBefore Standardized [0m Variable DeviceProtection
No 3054
Yes 2391
No internet service 1505
Name: DeviceProtection, dtype: int64
Unique Values Count [1mBefore Standardized [0m Variable TechSupport
No 3431
Yes 2014
No internet service 1505
Name: TechSupport, dtype: int64
Unique Values Count [1mBefore Standardized [0m Variable StreamingTV
No 2774
Yes 2671
No internet service 1505
Name: StreamingTV, dtype: int64
Unique Values Count [1mBefore Standardized [0m Variable StreamingMovies
No 2747
Yes 2698
No internet service 1505
Name: StreamingMovies, dtype: int64
Unique Values Count [1mBefore Standardized [0m Variable Contract
Month-to-month 3823
Two year 1670
One year 1457
Name: Contract, dtype: int64
Unique Values Count [1mBefore Standardized [0m Variable PaperlessBilling
Yes 4114
No 2836
Name: PaperlessBilling, dtype: int64
Unique Values Count [1mBefore Standardized [0m Variable PaymentMethod
Electronic check 2337
Mailed check 1594
Bank transfer (automatic) 1519
Credit card (automatic) 1500
Name: PaymentMethod, dtype: int64
Unique Values Count [1mBefore Standardized [0m Variable Churn
No 5114
Yes 1827
Churn 9
Name: Churn, dtype: int64
###Markdown
Standardize categorical variablesAfter I knew which variables have non-standard values, then I am standardizing them with the most patterns, provided that they don't change their meaning. Example: Yes -> YesThen I looked back at the unique values of each variable that had been changed. I am using `replace()` function to standardize the values.
###Code
df_load = df_load.replace(['Wanita','Laki-Laki','Churn','Iya'],['Female','Male','Yes','Yes'])
# Input variable
for col_name in list(['gender','Dependents','Churn']):
print('\nUnique Values Count \033[1m' + 'After Standardized \033[0mVariable',col_name)
print(df_load[col_name].value_counts())
###Output
Unique Values Count [1mAfter Standardized [0mVariable gender
Male 3505
Female 3445
Name: gender, dtype: int64
Unique Values Count [1mAfter Standardized [0mVariable Dependents
No 4870
Yes 2080
Name: Dependents, dtype: int64
Unique Values Count [1mAfter Standardized [0mVariable Churn
No 5114
Yes 1836
Name: Churn, dtype: int64
|
notebooks/01_basic_training.ipynb | ###Markdown
This notebook was put together by [Jake Vanderplas](http://www.vanderplas.com) for UW's [Astro 599](http://www.astro.washington.edu/users/vanderplas/Astr599_2014/) course. Source and licensing info is on [GitHub](https://github.com/jakevdp/2014_fall_ASTR599/).
###Code
%run talktools.py
###Output
_____no_output_____
###Markdown
Basic Training=============Much of this material thanks to http://www.pythonbootcamp.info/ Outline- Python is easy! Hello world revisited- Calculator / basic math- Strings- Variables- Basic control statements (indentation!) Hello World!We saw before how easy a hello world script is to create in Python:
###Code
print("Hello World!")
###Output
Hello World!
###Markdown
Some other languages...[http://www.roesler-ac.de/wolfram/hello.htm](http://www.roesler-ac.de/wolfram/hello.htm) Pythonfile: ``hello.py`````print "Hello World!"``````[~]> python hello.py``` Javafile: ``hello.java`````class HelloWorld { static public void main( String args[] ) { System.out.println( "Hello World!" ); }}``````[~]> javac hello.java[~]> java HelloWorldHello World!``` C++file: ``hello.cpp`````include int main(){ std::cout << "Hello World!" << std::endl;}``````[~]> g++ -o hello hello.cpp[~]> ./helloHello World!``` Fortranfile: ``hello.f````` PROGRAM HELLO WRITE (*,100) STOP100 FORMAT (' Hello World! ' /) END``````[~]> g77 -o hello hello.f[~]> ./helloHello World!``` Two Points- Python provides interaction in both development and execution- Executing interactively is basically the same thing as executing a script, line-by-line Types and Operations: Python as a Calculator We'll talk about a few types here:- ``int``: integer- ``float``: floating point (decimal)- ``long``: long integer- ``complex``: complex number (decimal, not integer) We'll also introduce the basic arithmetic operations- ``+`` : addition- ``-``: subtraction- ``/``: division- ``*``: multiplication- ``%``: modulus (remainder)- ``**``: exponentiationAs we go through this, note carefully how these operations interact with various types
###Code
print(2 + 2)
2 + 2
print(2.1 + 2)
###Output
4.1
###Markdown
Careful: floats are limited by their 16-bit representation (same as in other languages)
###Code
print(4.0999999999999995)
2.1 + 2 == 4.0999999999999995
4 * 2
4 / 2
5 / 2 # Note this is different in Python 2.x!!
5 // 2
###Output
_____no_output_____
###Markdown
Integer operations result in integers in Python 2.x, but floats in Python 3.x.
###Code
5 % 2 # modulus (remainder after division)
5 ** 2
# or you can use the pow() function
pow(5, 2)
###Output
_____no_output_____
###Markdown
Indentation Matters!
###Code
print(2 + 2)
3 + 3
###Output
_____no_output_____
###Markdown
Use ```` for comments- Everything after the ```` will be ignored
###Code
print(1 + 1) # easy arithmetic
###Output
2
###Markdown
Complex types
###Code
complex(1,2)
1+2j
1 + 2j - 2j
(3.0*10.0 - 25.0)/5.0
print(3.085e18 * 1e6) # this is a Megaparsec in units of cm!
###Output
3.085e+24
###Markdown
Assigning variables
###Code
t = 1.0 # declare a variable t (time)
accel = 9.8 # acceleration in units of m/s^2
# distance travelled in time t seconds is 1/2 a*t**2
dist = 0.5 * accel * t * t
print(dist) # this is the distance in meters
dist1 = accel * (t**2)/2
print(dist1)
dist2 = 0.5 * accel * pow(t,2)
print(dist2)
###Output
4.9
###Markdown
A nice trick that other languages can't do:
###Code
x, y = 4, 50
print(x)
print(y)
x, y = y, x # easy swap!
print(x)
print(y)
###Output
50
4
###Markdown
Each operator has an operate-and-assign version
###Code
x = 4
x += 8 # same as x = x + 8
print(x)
x *= 0.2 # x is upgraded to a float!
print(x)
x %= 1
print(x)
###Output
0.40000000000000036
###Markdown
Bitwise OperatorsYou might also come across bitwise operators:- ``&``: bitwise and- ``|``: bitwise or- ``^``: bitwise xor- ``<<``: bit-shift left- ``>>``: bit-shift rightAll these make more sense if you think about binary representations:
###Code
bin(14) # print binary representation
bin(13)
14 & 13
bin(14 & 13)
14 | 13
bin(14 | 13)
###Output
_____no_output_____
###Markdown
Comparison operators- ``==``, ``!=``: equal, not equal- ``<``, ``<=``: less than, less than or equal- ``>``, ``>=``: greater than, greater than or equal
###Code
2 < 4
3 >= 3
5 == 4
5 != 4
5 < 2 + 4j
###Output
_____no_output_____
###Markdown
Comparisons can also be strung together and behave as expected:
###Code
x = 4
y = 6
print(2 < x <= 4)
print(2 < y <= 4)
# This allows strange/confusion expressions
# don't do things like this!
8 > x <= 5
###Output
_____no_output_____
###Markdown
Warning about Floating Point EqualityPrecision issues can lead to seemingly strange results (again, this is the same in any modern programming language)
###Code
0.1 + 0.2 == 0.3
# this is a string formatting command (we'll cover this later)
# it says to print 20 places after the decimal
print("{0:.20f}".format(0.1 + 0.2))
print("{0:.20f}".format(0.3))
###Output
0.30000000000000004441
0.29999999999999998890
###Markdown
Moral of the story: in any language you use, be careful using equality comparisons on floating point! Boolean variables and logical operationsPython has two built-in boolean values, ``True`` and ``False`` which we've seen above.There are also built-in logical operations to test these- ``A or B`` : ``True`` if either ``A`` or ``B`` or both are ``True``- ``A and B`` : ``True`` only if both ``A`` and ``B`` are ``True``- ``not A``: ``True`` only if ``A`` is ``False``
###Code
x = 4
(x > 2) and (x < 10)
(x <= 4) or not (x > 10)
###Output
_____no_output_____
###Markdown
Built-in types can be coerced to booleans.
###Code
0 == False
not False
not 0
not -1
###Output
_____no_output_____
###Markdown
zero is evaluated to ``False``, every other number to ``True``
###Code
print(None) # None is a special object
print(None == True)
print(None == False)
print(None == None)
print(bool(None))
###Output
False
False
True
False
###Markdown
Special comparators: ``is`` and ``is not``
###Code
x = 1
y = 1
x is y
x = 1111
y = 1111
print(x is y)
###Output
False
###Markdown
Takeaway: "``is``" and "``is not``" refer to the *memory* being used by the object.If ``x`` and ``y`` point to the same location in memory, then ``x is y`` will be True.Probably their most common use is in comparisons to ``None``. All variables equal to ``None``are guaranteed to point to the same memory location (i.e. it acts as a Singleton).
###Code
x = None
print(x is None)
###Output
True
###Markdown
You don't need to fully understand this, but just be aware that the ``is`` and ``is not`` operators should generally not be used unless you do! More on Variables & Types
###Code
print(type(1))
x = 2
print(type(x))
type(2) == type(1)
print(type(True))
print(type(None))
print(type(type(1)))
print(type(pow))
###Output
<class 'builtin_function_or_method'>
###Markdown
we can test whether something is a certain type with `isinstance()`
###Code
print(isinstance(1, int))
print(isinstance("spam", str))
print(isinstance(1.212, int))
###Output
True
True
False
###Markdown
builtin-types: `int`, `bool`, `str`, `float`, `complex`, `long`.... Strings
###Code
x = "spam"
type(x)
print("hello!\n...my sire.")
"hello!\n...my sire."
"wah?!" == 'wah?!'
print("'wah?!' said the student")
print("\"wah?!\" said the student")
###Output
"wah?!" said the student
###Markdown
backslashes (`\`) start special (escape) characters: \n = newline (\r = return) \t = tab \a = bellstring literals are defined with double quotes or quotes.the outermost quote type cannot be used inside the string (unless it's escaped with a backslash)
###Code
# raw strings (marked with r) don't escape characters
print(r'This is a raw string...newlines \r\n are ignored.')
# Triple quotes are real useful for multiple line strings
y = """For score and seven minutes ago,
you folks all learned some basic mathy stuff with Python
and boy were you blown away!"""
print(y)
###Output
For score and seven minutes ago,
you folks all learned some basic mathy stuff with Python
and boy were you blown away!
###Markdown
* prepending `r` makes that string "raw"* triple quotes allow you to compose long strings* prepending `u` makes that string "unicode"http://docs.python.org/reference/lexical_analysis.htmlstring-literals Arithmetic with Strings
###Code
s = "spam"
e = "eggs"
print(s + e)
print(s + " and " + e)
print("green " + e + " and\n " + s)
print(3*s + e)
print("*" * 50)
print("spam" == "good")
print("spam" == "spam")
"spam" < "zoo"
"s" < "spam"
###Output
_____no_output_____
###Markdown
* you can concatenate strings with `+` sign* you can do multiple concatenations with the `*` sign* strings can be compared But you can't add strings and integers:
###Code
print('I want' + 3 + ' eggs and no ' + s)
print('I want ' + str(3) + ' eggs and no ' + s)
pi = 3.14159
print('I want ' + str(pi) + ' eggs and no ' + s)
print(str(True) + ":" + ' I want ' + str(pi) + ' eggs and no ' + s)
###Output
True: I want 3.14159 eggs and no spam
###Markdown
you must concatenate only strings, coercing ("casting") other variable types to `str` Getting input from the user
###Code
# Note that raw_input does not work in IPython notebook version < 1.0
# You can always do this from a file or from the command line, though
faren = input("enter a temperature (in Fahrenheit): ")
print(faren)
###Output
enter a temperature (in Fahrenheit): 280
280
###Markdown
(Note that in Python 2.x you should use ``raw_input`` rather than ``input``Remember that the input comes as a string:
###Code
cent = (faren - 32) / 1.8
cent = (float(faren) - 32) / 1.8
print(cent)
# Or in one line:
faren = float(input("enter a temperature (in Fahrenheit): "))
(faren - 32) / 1.8
###Output
enter a temperature (in Fahrenheit): 232
###Markdown
Strings as arrays We can think of strings as arrays(although, unlike in C you never really need to deal with directly addressing character locations in memory)
###Code
s = 'spam'
len(s)
len("eggs\n")
len("")
###Output
_____no_output_____
###Markdown
Indexing in Python is zero-based
###Code
print(s[0])
print(s[-1])
s[0:1]
s[1:4]
s[-2:]
s[0:100] # if the slice goes past the end, no complaints!
s[0:4:2]
s[::2]
s[::-1]
###Output
_____no_output_____
###Markdown
* `len()` gives us the length of an array* strings are zero indexed* can also count backwards Flow control: conditionals and loops
###Code
x = 1
if x > 0:
print("yo")
else:
print("dude")
###Output
yo
###Markdown
One liners
###Code
"yo" if x > 0 else "dude"
x = 1
y = 0
while y < 10:
print("yo" if x > 0 else "dude")
x *= -1
y += 1
# Could also do this with a break statement
x = 1
y = 0
while True:
print("yo" if x > 0 else "dude")
x *= -1
y += 1
if y >= 10:
break
###Output
yo
dude
yo
dude
yo
dude
yo
dude
yo
dude
###Markdown
case statements can be constructed with just a bunch of `if`, `elif`,...`else`
###Code
if x < 1:
print("t")
elif x > 100:
print("yo")
elif x == 42:
print("bingo")
else:
print("dude")
###Output
dude
###Markdown
Note: ordering matters. The first block of `True` in an if/elif gets executed then everything else does not. Blocks cannot be empty!
###Code
x = "fried goldfish"
if x == "spam for dinner":
print("I will destroy the universe")
else:
# I'm fine with that. I'll do nothing
x = "fried goldfish"
if x == "spam for dinner":
print("I will destroy the universe")
else:
# I'm fine with that. I'll do nothing
pass
###Output
_____no_output_____
###Markdown
`pass` is a "do nothing"/NOP statement Example: putting it all together
###Code
%%file number_game.py
# The above "magic" command saves the contents
# of the current cell to file. We'll see more of these later
x = 0
max_tries = 10
count = 0
while True:
x_new = int(input("Enter a new number: "))
if x_new > x:
print(" -> it's bigger than the last!")
elif x_new < x:
print(" -> it's smaller than the last!")
else:
print(" -> no change! I'll exit now")
break
x = x_new
count += 1
if count > max_tries:
print("too many tries...")
break
%run number_game.py
# this magic command runs the given file
###Output
Enter a new number: 200
-> it's bigger than the last!
Enter a new number: 200
-> no change! I'll exit now
|
1.Relational.Databases/postgreSQL_RelationalDataModel.ipynb | ###Markdown
POSTGRESQL - DATA MODEL First Excercise of PostgreSQL TopicDuring this excercise we will create a data model using PostgreSQL database and create a normalized data model from Udacity - Data Engineer Course.Resources Used:- PostgreSQL Database:https://www.postgresql.org/- PG Admin . PostgreSQL GUIhttps://www.pgadmin.org/- Library of Python Psycopg2https://pypi.org/project/psycopg2/
###Code
# Installing library in Python console --> pip install psycopg2
import psycopg2
try:
conn = psycopg2.connect(
host="localhost",
user="dantencv",
database="postgres",
password="20Masa20"
)
except psycopg2.Error as e:
print("Error could not make connection to postgres database")
print(e)
try:
cur= conn.cursor()
except psycopg2.Error as e:
print("Error could not get curser to postgres database")
print(e)
conn.set_session(autocommit=True)
###Output
_____no_output_____
###Markdown
Following code will create a new table in database/schema --> postgres.public:
###Code
try:
cur.execute("CREATE TABLE IF NOT EXISTS music_library (album_id int, \
albun_name varchar, artist_name varchar,\
year int, songs text[]);")
except psycopg2.Error as e:
print("Error Issue creating a table")
print(e)
try:
cur.execute("INSERT INTO music_library (album_id , \
albun_name , artist_name ,\
year , songs) VALUES (%s,%s,%s,%s,%s)",
(1, "Rubber Soul", "The Beatles", 1965, ["Michelle", "Think Yourself", "In my life"]))
except psycopg2.Error as e:
print("Error Inserting rows")
print(e)
try:
cur.execute("INSERT INTO music_library (album_id , \
albun_name , artist_name ,\
year , songs) VALUES (%s,%s,%s,%s,%s)",
(2, "Let it be", "The Beatles", 1970, ["Let it be", "Across the universe"]))
except psycopg2.Error as e:
print("Error Inserting rows")
print(e)
try:
cur.execute("SELECT * FROM music_library;")
except psycopg2.Error as e:
print("Error reading rows")
print(e)
row = cur. fetchone()
while row:
print(row)
row=cur.fetchone()
###Output
(1, 'Rubber Soul', 'The Beatles', 1965, ['Michelle', 'Think Yourself', 'In my life'])
(2, 'Let it be', 'The Beatles', 1970, ['Let it be', 'Across the universe'])
###Markdown
Moving to 1st Normal FormData is not normalized, first we need to remove any collecitons or list of data (songs column), we need to break up the list of songs into individual rows
###Code
try:
cur.execute("CREATE TABLE IF NOT EXISTS music_library2 (album_id int, \
albun_name varchar, artist_name varchar,\
year int, songs varchar);")
except psycopg2.Error as e:
print("Error Issue creating a table")
print(e)
# 1st Album
try:
cur.execute("INSERT INTO music_library2 (album_id , \
albun_name , artist_name ,\
year , songs) VALUES (%s,%s,%s,%s,%s)",
(1, "Rubber Soul", "The Beatles", 1965, "Michelle"))
cur.execute("INSERT INTO music_library2 (album_id , \
albun_name , artist_name ,\
year , songs) VALUES (%s,%s,%s,%s,%s)",
(1, "Rubber Soul", "The Beatles", 1965, "Think Yourself"))
cur.execute("INSERT INTO music_library2 (album_id , \
albun_name , artist_name ,\
year , songs) VALUES (%s,%s,%s,%s,%s)",
(1, "Rubber Soul", "The Beatles", 1965, "In my life"))
except psycopg2.Error as e:
print("Error Inserting rows")
print(e)
# 2nd Album
try:
cur.execute("INSERT INTO music_library2 (album_id , \
albun_name , artist_name ,\
year , songs) VALUES (%s,%s,%s,%s,%s)",
(2, "Let it be", "The Beatles", 1970, "Let it be"))
cur.execute("INSERT INTO music_library2 (album_id , \
albun_name , artist_name ,\
year , songs) VALUES (%s,%s,%s,%s,%s)",
(2, "Let it be", "The Beatles", 1970, "Across the universe"))
except psycopg2.Error as e:
print("Error Inserting rows")
print(e)
try:
cur.execute("SELECT * FROM music_library2;")
except psycopg2.Error as e:
print("Error reading rows")
print(e)
row = cur. fetchone()
while row:
print(row)
row=cur.fetchone()
###Output
(1, 'Rubber Soul', 'The Beatles', 1965, 'Michelle')
(1, 'Rubber Soul', 'The Beatles', 1965, 'Think Yourself')
(1, 'Rubber Soul', 'The Beatles', 1965, 'In my life')
(2, 'Let it be', 'The Beatles', 1970, 'Let it be')
(2, 'Let it be', 'The Beatles', 1970, 'Across the universe')
###Markdown
Moving to 2nd Normal FormWe haved moved our data to 1NF, while records are unique in 1NF,, our primary key (album id) is not unique. We need to break up into two tables, album library and song library
###Code
try:
cur.execute("CREATE TABLE IF NOT EXISTS album_library (album_id int, \
albun_name varchar, artist_name varchar,\
year int);")
except psycopg2.Error as e:
print("Error Issue creating a table")
print(e)
# 1st Album
try:
cur.execute("INSERT INTO album_library (album_id , \
albun_name , artist_name ,\
year ) VALUES (%s,%s,%s,%s)",
(1, "Rubber Soul", "The Beatles", 1965))
except psycopg2.Error as e:
print("Error Inserting rows")
print(e)
# 2nd Album
try:
cur.execute("INSERT INTO album_library (album_id , \
albun_name , artist_name ,\
year ) VALUES (%s,%s,%s,%s)",
(2, "Let it be", "The Beatles", 1970))
except psycopg2.Error as e:
print("Error Inserting rows")
print(e)
try:
cur.execute("CREATE TABLE IF NOT EXISTS song_library (album_id int, \
song varchar);")
except psycopg2.Error as e:
print("Error Issue creating a table")
print(e)
try:
cur.execute("INSERT INTO song_library (album_id , \
song) VALUES (%s,%s)",
(1, "Michelle"))
cur.execute("INSERT INTO song_library (album_id , \
song) VALUES (%s,%s)",
(1,"Think Yourself"))
cur.execute("INSERT INTO song_library (album_id , \
song) VALUES (%s,%s)",
(1,"In my life"))
except psycopg2.Error as e:
print("Error Inserting rows")
print(e)
# 2nd Album
try:
cur.execute("INSERT INTO song_library (album_id , \
song) VALUES (%s,%s)",
(2,"Let it be"))
cur.execute("INSERT INTO song_library (album_id , \
song) VALUES (%s,%s)",
(2,"Across the universe"))
except psycopg2.Error as e:
print("Error Inserting rows")
print(e)
try:
cur.execute("SELECT * FROM album_library as a join song_library as s on s.album_id=a.album_id;")
except psycopg2.Error as e:
print("Error reading rows")
print(e)
row = cur. fetchone()
while row:
print(row)
row=cur.fetchone()
###Output
(1, 'Rubber Soul', 'The Beatles', 1965, 1, 'Michelle')
(1, 'Rubber Soul', 'The Beatles', 1965, 1, 'Think Yourself')
(1, 'Rubber Soul', 'The Beatles', 1965, 1, 'In my life')
(2, 'Let it be', 'The Beatles', 1970, 2, 'Let it be')
(2, 'Let it be', 'The Beatles', 1970, 2, 'Across the universe')
###Markdown
Moving to 3rd Normal Form (NF)Check our transitive dependencies between fields. Album library can move artist name to its own table, called artist will leave us with 3 tables
###Code
##TBD
###Output
_____no_output_____
###Markdown
Dropping all tables
###Code
cur.execute("DROP table music_library")
cur.execute("DROP table music_library2")
cur.execute("DROP table song_library")
cur.execute("DROP table album_library")
cur.close()
conn.close()
###Output
_____no_output_____ |
15-Decision-Trees-and-Random-Forests/01-Decision Trees and Random Forests in Python.ipynb | ###Markdown
___ ___ Decision Trees and Random Forests in Python This is the code for the lecture video which goes over tree methods in Python. Reference the video lecture for the full explanation of the code!I also wrote a [blog post](https://medium.com/@josemarcialportilla/enchanted-random-forest-b08d418cb411.hh7n1co54) explaining the general logic of decision trees and random forests which you can check out. Import Libraries
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
###Output
_____no_output_____
###Markdown
Get the Data
###Code
df = pd.read_csv('kyphosis.csv')
df.head()
###Output
_____no_output_____
###Markdown
EDAWe'll just check out a simple pairplot for this small dataset.
###Code
sns.pairplot(df,hue='Kyphosis',palette='Set1')
###Output
_____no_output_____
###Markdown
Train Test SplitLet's split up the data into a training set and a test set!
###Code
from sklearn.model_selection import train_test_split
X = df.drop('Kyphosis',axis=1)
y = df['Kyphosis']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30)
###Output
_____no_output_____
###Markdown
Decision TreesWe'll start just by training a single decision tree.
###Code
from sklearn.tree import DecisionTreeClassifier
dtree = DecisionTreeClassifier()
dtree.fit(X_train,y_train)
###Output
_____no_output_____
###Markdown
Prediction and Evaluation Let's evaluate our decision tree.
###Code
predictions = dtree.predict(X_test)
from sklearn.metrics import classification_report,confusion_matrix
print(classification_report(y_test,predictions))
print(confusion_matrix(y_test,predictions))
###Output
[[17 3]
[ 3 2]]
###Markdown
Tree VisualizationScikit learn actually has some built-in visualization capabilities for decision trees, you won't use this often and it requires you to install the pydot library, but here is an example of what it looks like and the code to execute this:
###Code
from IPython.display import Image
from sklearn.externals.six import StringIO
from sklearn.tree import export_graphviz
import pydot
features = list(df.columns[1:])
features
dot_data = StringIO()
export_graphviz(dtree, out_file=dot_data,feature_names=features,filled=True,rounded=True)
graph = pydot.graph_from_dot_data(dot_data.getvalue())
Image(graph[0].create_png())
###Output
_____no_output_____
###Markdown
Random ForestsNow let's compare the decision tree model to a random forest.
###Code
from sklearn.ensemble import RandomForestClassifier
rfc = RandomForestClassifier(n_estimators=100)
rfc.fit(X_train, y_train)
rfc_pred = rfc.predict(X_test)
print(confusion_matrix(y_test,rfc_pred))
print(classification_report(y_test,rfc_pred))
###Output
precision recall f1-score support
absent 0.86 0.90 0.88 20
present 0.50 0.40 0.44 5
avg / total 0.79 0.80 0.79 25
|
ARIMAX-Addition of a new variable + SARIMAX.ipynb | ###Markdown
Kwiatkowski-Phillips-Schmidt-Shin (KPSS) TestThe Kwiatkowski–Phillips–Schmidt–Shin (KPSS) test figures out if a time series is stationary around a mean or linear trend, or is non-stationary due to a unit root. A stationary time series is one where statistical properties — like the mean and variance — are constant over time.For KPSS test,The Null Hypothesis : The series is stationary when p-value >0.05 Alternate Hypothesis: The series is not stationary when p-value <= 0.5
###Code
#loading kpss from statsmodel
from statsmodels.tsa.stattools import kpss
result = kpss(data['meantemp'])
print(f'KPSS Statistic: {result[0]}')
print(f'p-value: {result[1]}')
print(f'num lags: {result[2]}')
print('Critial Values:')
for key, value in result[3].items():
print('Critial Values:')
print(f' {key}, {value}')
#Loading and plotting acf
from statsmodels.graphics.tsaplots import plot_acf
plot_acf(data['meantemp'], ax=plt.gca(), lags=10)
plt.show()
###Output
_____no_output_____
###Markdown
Partial correlation function
###Code
#Loading and plottin pacf
from statsmodels.graphics.tsaplots import plot_pacf
plot_pacf(data['meantemp'], ax=plt.gca(), lags=30)
plt.show()
length_train = 1046
train = data.iloc[:length_train,:]
test=data.iloc[length_train:,: ]
train.head()
###Output
_____no_output_____
###Markdown
ARIMAX model
###Code
from statsmodels.tsa.arima_model import ARIMA
model = ARIMA(train['meantemp'], order=(1,0,3), exog= train['humidity'])
model_fit = model.fit()
print(model_fit.params)
y_arimax = data.copy()
y_arimax['arimax_forecast'] = model_fit.predict(test['meantemp'].index.min(),
test['meantemp'].index.max(),
exog= test['humidity'])
plot_smoothing(title = 'Auto regressive Integrated Moving Average with external variable model',
data = y_arimax['arimax_forecast'][test['meantemp'].index.min():],
label_value = 'ARiMAX model')
from statsmodels.tsa.arima_model import ARIMA
model = ARIMA(train['meantemp'], order=(1,0,3), exog= train['wind_speed'])
model_fit = model.fit()
print(model_fit.params)
y_arimax = data.copy()
y_arimax['arimax_forecast'] = model_fit.predict(test['meantemp'].index.min(),
test['meantemp'].index.max(),
exog= test['wind_speed'])
plot_smoothing(title = 'Auto regressive Integrated Moving Average with external variable model',
data = y_arimax['arimax_forecast'][test['meantemp'].index.min():],
label_value = 'ARiMAX model')
from statsmodels.tsa.arima_model import ARIMA
model = ARIMA(train['meantemp'], order=(1,1,2), exog= train['meanpressure'])
model_fit = model.fit()
print(model_fit.params)
y_arimax = data.copy()
y_arimax['arimax_forecast'] = model_fit.predict(test['meantemp'].index.min(),
test['meantemp'].index.max(),
exog= test['meanpressure'])
plot_smoothing(title = 'Auto regressive Integrated Moving Average with external variable model',
data = y_arimax['arimax_forecast'][test['meantemp'].index.min():],
label_value = 'ARiMAX model')
###Output
C:\Users\parve\anaconda3\lib\site-packages\statsmodels\tsa\base\tsa_model.py:524: ValueWarning: No frequency information was provided, so inferred frequency D will be used.
warnings.warn('No frequency information was'
C:\Users\parve\anaconda3\lib\site-packages\statsmodels\tsa\base\tsa_model.py:524: ValueWarning: No frequency information was provided, so inferred frequency D will be used.
warnings.warn('No frequency information was'
###Markdown
SARIMAX
###Code
from statsmodels.tsa.statespace.sarimax import SARIMAX
model = SARIMAX(train['meantemp'], order=(7,0,3), seasonal_order=(2,1,1,12), exog =train['humidity'])
model_fit = model.fit()
print(model_fit.params)
y_sarimax = data.copy()
y_sarimax['sarimax_forecast'] = model_fit.predict(test['meantemp'].index.min(),
test['meantemp'].index.max(),
exog= test['humidity'])
plot_smoothing(title = 'Seasonal Auto regressive Integrated Moving Average with external variable model',
data = y_sarimax['sarimax_forecast'][test['meantemp'].index.min():],
label_value = 'SARIMAX model')
###Output
C:\Users\parve\anaconda3\lib\site-packages\statsmodels\tsa\base\tsa_model.py:524: ValueWarning: No frequency information was provided, so inferred frequency D will be used.
warnings.warn('No frequency information was'
C:\Users\parve\anaconda3\lib\site-packages\statsmodels\tsa\base\tsa_model.py:524: ValueWarning: No frequency information was provided, so inferred frequency D will be used.
warnings.warn('No frequency information was'
C:\Users\parve\anaconda3\lib\site-packages\statsmodels\base\model.py:566: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals
warnings.warn("Maximum Likelihood optimization failed to "
|
Daily/Linked List Question.ipynb | ###Markdown
Merge Linked ListsGiven k sorted singly linked lists -- write a function that merges all the lists into one sorted singly linked list.
###Code
# sort of a hack
# you gather all the values of the linked list into a large array
# then you sort that array
# then you recreate a linked list
def merge(lists):
arr = []
for head in lists:
current = head
while current:
arr.append(current.val)
current = current.next
new_head = current = Node(-1) # dummy head
for val in sorted(arr):
current.next = Node(val)
current = current.next
return new_head.next
###Output
_____no_output_____ |
pandas/Plotting and Visualization.ipynb | ###Markdown
Lecture 11 - Plotting and Visualization 1. A Brief matplotlib API Primer
###Code
import matplotlib.pyplot as plt
%matplotlib inline
###Output
_____no_output_____
###Markdown
1.1 Figures and Subplots
###Code
fig = plt.figure()
ax1 = plt.subplot(2, 2, 1)
ax2 = plt.subplot(2, 2, 2)
ax3 = plt.subplot(2, 2, 3)
from numpy.random import randn
plt.plot(randn(50).cumsum(), 'k--')
plt.show()
ax1 = plt.subplot(2, 2, 1)
ax2 = plt.subplot(2, 2, 2)
ax3 = plt.subplot(2, 2, 3)
from numpy.random import randn
ax2.plot(randn(50).cumsum(), 'k--')
_ = ax1.hist(randn(100), bins=20, color='k', alpha=0.3)
ax1 = plt.subplot(2, 2, 1)
ax2 = plt.subplot(2, 2, 2)
ax3 = plt.subplot(2, 2, 3)
import numpy as np
from numpy.random import randn
plt.plot(randn(50).cumsum(), 'k--')
ax2.scatter(np.arange(30), np.arange(30) + 3*randn(30))
fig, axes = plt.subplots(2, 3)
###Output
_____no_output_____
###Markdown
1.2 Color, Markers, and Line Styles
###Code
plt.plot(randn(30).cumsum(), 'r*-')
data = randn(30).cumsum()
plt.plot(data, 'r-', label='Default')
plt.plot(data, 'b.', drawstyle='steps-post', label='steps-post')
plt.legend(loc='best')
###Output
_____no_output_____
###Markdown
1.3 Ticks, Labels, and Legends
###Code
fig = plt.figure()
ax = plt.subplot(2, 1, 1)
plt.plot(randn(1000).cumsum())
ticks = ax.set_xticks([0, 250, 500, 750, 1000])
labels = ax.set_xticklabels(['one', 'two', 'three', 'four', 'five'],
rotation=30, fontsize='small')
ax.set_title('My first matplotlib plot')
ax.set_xlabel('Stages')
###Output
_____no_output_____
###Markdown
1.4 Annotations and Drawing on a Subplot
###Code
from datetime import datetime
ax = plt.subplot(1, 1, 1)
###Output
_____no_output_____
###Markdown
2. Plotting Functions in pandas 2.1 Line Plots
###Code
import numpy as np
from pandas import Series
s = Series(np.random.randn(10).cumsum(), index=np.arange(0, 100, 10))
s.plot()
from pandas import DataFrame
df = DataFrame(np.random.randn(10, 4).cumsum(0),
columns=['A', 'B', 'C', 'D'],
index=np.arange(0, 100, 10))
df.plot()
A = np.arange(10)
A
A.sum()
A.cumsum()
###Output
_____no_output_____
###Markdown
2.2 Bar Plot
###Code
fig, axes = plt.subplots(2, 1)
data = Series(np.random.rand(16), index=list('abcdefghijklmnop'))
print(data)
data.plot(kind='bar', ax=axes[0], color='k', alpha=0.7)
data.plot(kind='barh', ax=axes[1], color='k', alpha=0.7)
import pandas as pd
tips = pd.read_csv('tips.csv')
party_counts = pd.crosstab(tips.day, tips.size)
party_counts
party_counts = party_counts.ix[:, 2:5]
party_pcts = party_counts.div(party_counts.sum(1).astype(float), axis=0)
party_pcts
party_pcts.plot(kind='bar', stacked=True)
###Output
_____no_output_____
###Markdown
2.3 Histograms and Density Plots
###Code
tips['tip_pct'] = tips['tip'] / tips['total_bill']
tips['tip_pct'].hist(bins=50)
tips['tip_pct'].plot(kind='kde')
comp1 = np.random.normal(0, 1, size=200)
comp2 = np.random.normal(10, 3, size=200)
values = Series(np.concatenate([comp1, comp2]))
values
values.hist(bins=100, alpha=0.3, color='k', normed=True)
values.plot(kind='kde', style='k--')
###Output
_____no_output_____
###Markdown
2.4 Scatter Plots
###Code
macro = pd.read_csv('macrodata.csv')
data = macro[['cpi', 'm1', 'tbilrate', 'unemp']]
trans_data = np.log(data).diff().dropna()
trans_data[-5:]
A = arange(10)
A
A = [0, 1]
np.log(A)
plt.scatter(trans_data['m1'], trans_data['unemp'])
plt.title('Changes in log %s vs. log %s' % ('m1', 'unemp'))
pd.scatter_matrix(trans_data, diagonal='kde', color='r', alpha=0.3)
###Output
_____no_output_____
###Markdown
3. Exercise
###Code
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as dates
import numpy as np
from scipy.stats import norm
T = 1.0
N = 252
dt = T / N
delta = 0.25
x = 0.0
pos = [x]
for k in range(N-1):
x = x + norm.rvs(scale=delta**2*dt)
pos.append(x)
t = np.linspace(0.0, N*dt, N)
# Plot the Time Series with Dates 2014-01-01 to 2014-12-31
# Use: pd.date_range(start='2014-01-01', periods=252, freq='D')
dates = pd.date_range(start='2014-01-01', periods=252, freq='D')
plt.plot(dates, pos)
!type rw.py
###Output
(np.abs(walk) >= 10).argmax()
|
2016/tutorial_final/188/R_ggplot.ipynb | ###Markdown
Introduction: R vs PythonBoth Python and R are popular programming languages for statistics. While R’s functionality is developed with statisticians in mind (such as R's strong data visualization capabilities), Python is often praised for its easy-to-understand syntax.The purpose of R was to develop a language that focused on delivering a better and more user-friendly way to do data analysis, statistics and graphical models. The main difference for R and Python is that you will find R only in a data science environment; As a general purpose language, Python, on the other hand, is widely used in many fields, such as web development. When to use R?R is mainly used when the data analysis task requires standalone computing or analysis on individual servers. It’s great for exploratory work, and it's handy for almost any type of data analysis because of the huge number of packages and readily usable tests that often provide you with the necessary tools to get up and running quickly. R can even be part of a big data solution. When to use Python?You can use Python when your data analysis tasks need to be integrated with web apps or if statistics code needs to be incorporated into a production database. Being a fully fledged programming language, it’s a great tool to implement algorithms for production use.(Cited from http://www.kdnuggets.com/2015/05/r-vs-python-data-science.html) Tutorial Content- [R essentials](R-essentials)- [Data visualization with R and ggplot 2](Data-visualization-with-R-and-ggplot-2)- [Example Application: R and ggplot based on land-data](Example-Application:-R-and-ggplot-based-on-land-data) - [GGplot Structures and Syntax](GGplot-Structures-and-Syntax) - [Comparison of R essentials and ggplot on graphs generation](Comparison-of-R-essentials-and-ggplot-on-graphs-generation)- [Example Application: Geo-spatial analysis based on R libraries](Example-Application:-Geo-spatial-analysis-based-on-R-libraries) R essentials “R Essentials” setupThe Anaconda team has created an “R Essentials” bundle with the IRKernel and over 80 of the most used R packages for data science, including dplyr, shiny, ggplot2, tidyr,caret and nnet.Downloading “R Essentials” requires conda. Once you have conda, you may install “R Essentials” into the current environment: conda install -c r r-essentialsThen you could create a new environment just for “R essentials”: conda create -n my-r-env -c r r-essentialsOpen a shell and run this command to start the Jupyter notebook interface in your browser: jupyter notebook Start a new R notebook by selecting new: new R notebook at the right side of the interface. Check if the R essentials are installed correctly.- Run the dplyr library.- Run the native dataset of R: iris
###Code
library(dplyr)
head(iris)
###Output
_____no_output_____
###Markdown
Data processing experiments with basic RUse groupby, summarise, arrange, mean to do some simple aggregation on the data by R.
###Code
iris %>%
group_by(Species) %>%
summarise(Sepal.Width.Avg = mean(Sepal.Width),Sepal.Length.Avg = mean(Sepal.Length)) %>%
arrange(Sepal.Width.Avg,Sepal.Length.Avg)
iris %>%
group_by(Species) %>%
summarise(Petal.Width.Max = max(Petal.Width),Petal.Length.Max = max(Petal.Length)) %>%
arrange(Petal.Width.Max,Petal.Length.Max)
###Output
_____no_output_____
###Markdown
Data visualization with R and ggplot 2the graphic from the Economist Why use ggplot2?- consistent underlying `grammar of graphics` (Wilkinson, 2005)- plot specification at a high level of abstraction- very flexible- theme system for polishing plot appearance- mature and complete graphics system- many users, active mailing listThat said, there are some things you cannot (or should not) do With ggplot2:- 3-dimensional graphics (see the rgl package)- Graph-theory type graphs (nodes/edges layout; see the igraph package)- Interactive graphics (see the ggvis package)(Cited from: http://tutorials.iq.harvard.edu/R/Rgraphics/Rgraphics.html)
###Code
library(ggplot2)
###Output
_____no_output_____
###Markdown
Simply plot the Petal.Width vs. Petal.Length
###Code
ggplot(data=iris, aes(x=Petal.Length, y=Petal.Width, color=Species)) + geom_point(size=3)
###Output
_____no_output_____
###Markdown
Using native diamonds library to make some simple graphs
###Code
head(diamonds)
ggplot(data=diamonds, aes(x=carat, y=price)) + geom_point() +
ggtitle("Simple diamonds ggplot")
###Output
_____no_output_____
###Markdown
Color the points by a factor variable: 'depth'
###Code
# color by a factor variable
ggplot(data = diamonds, aes(x = carat, y = price, colour = depth)) + geom_point()
###Output
_____no_output_____
###Markdown
Color the points by a factor variable: 'clarity'
###Code
ggplot(data = diamonds, aes(x = carat, y = price, colour = clarity)) + geom_point()
###Output
_____no_output_____
###Markdown
Example Application: R and ggplot based on land-data Start to use some more complex and real dataset(Data from https://www.lincolninst.edu/subcenters/land-values/land-prices-by-state.asp)
###Code
housing <- read.csv("dataSets/landdata-states.csv")
head(housing)
###Output
_____no_output_____
###Markdown
Comparison of R essentials and ggplot on graphs generation
###Code
# Generate simple histogram by R
hist(housing$Structure.Cost)
#Generate simple histogram by ggplot2
library(ggplot2)
ggplot(housing, aes(x = Structure.Cost)) +
geom_histogram()
#Easily change the binwidth param
library(ggplot2)
ggplot(housing, aes(x = Structure.Cost) ) +
geom_histogram(binwidth=4000)
###Output
`stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
###Markdown
Compare base R and ggplot on a more complex graph with legends
###Code
# Generate annotated scatter points graph by R
plot(Structure.Cost ~ Date, col="blue",
data=subset(housing, State == "PA"))
points(Structure.Cost ~ Date, col="red",
data=subset(housing, State == "WI"))
legend(x = 'topleft',
c("WI", "PA"), title="State",
col=c("red", "blue"),
pch=c(1, 1))
# Generate annotated scatter points graph by ggplot
ggplot(subset(housing, State %in% c("WI", "PA")),
aes(x=Date,
y=Structure.Cost,
color=State))+
geom_point()
###Output
_____no_output_____
###Markdown
GGplot Structures and Syntax GGplot Aesthetic attributeIn ggplot `aesthetic` means the visual attributes. Such as:- position (coordinates)- color (border)- fill- shape (of points)- linetype- size Geometric Objects`geom` objects are the actual marks we put on a plot. Such as:- points (`geom_point`, for scatter plots, dot plots, etc)- lines (`geom_line`, for time series, trend lines, etc)- boxplot (`geom_boxplot`, for, well, boxplots!) Combining geometric and aesthetic attribute together to make a graphA plot must have at least one geom; there is no upper limit. You can add a geom to a plot using the `+` operator Scatter points with geom_point() attributegeom_point requires mappings for x and y, all others are optional.
###Code
h20101 <- subset(housing, Date == 20101)
ggplot(h20101,
aes(x = Structure.Cost, y = Home.Value)) +
geom_point()
###Output
_____no_output_____
###Markdown
Lines A plot constructed with `ggplot` can have more than one geom. In that case the mappings established in the `ggplot()` call are plot defaults that can be added to or overridden. Our plot could use a regression line: Simple line connecting all the scatter points
###Code
p1 <- ggplot(h20101, aes(x = Structure.Cost, y = Home.Value))
p1 + geom_point(aes(color = Structure.Cost)) +
geom_line(aes(y = Home.Value))
###Output
_____no_output_____
###Markdown
Predict the line trend (smoother version than simply connecting all the points, using predict method)
###Code
p1 <- ggplot(h20101, aes(x = Structure.Cost, y = Home.Value))
h20101$pred.HV <- predict(lm(Home.Value ~ Structure.Cost, data = h20101))
p1 + geom_point(aes(color = 'red'))+
geom_line(aes(y = pred.HV ))
###Output
_____no_output_____
###Markdown
Another way to do simple linear regression, with better visuals: geom_smooth
###Code
p1 + geom_point(aes(color = Structure.Cost)) +
geom_smooth(method='lm')
###Output
_____no_output_____
###Markdown
Text labels: geom_textLike all the `geom` objects that accept a certain series of mappings, `geom_text()` takes in the mapping as a `labels` .All the points would be map to that label given the certain mapping. (Usually mapped to its id, 'State' is the id here in my dataset. Sometimes mapped to its category, 'region' is the category here in my dataset. )
###Code
p1 +
geom_text(aes(label=State), size = 3)
p1 +
geom_text(aes(label=region), size = 3)
###Output
_____no_output_____
###Markdown
Mapping the points to a shape instead of a text label(i.e. circle, triangle, etc)
###Code
p1 +
geom_point(aes(color=Home.Value, shape = region))
###Output
Warning message:
"Removed 1 rows containing missing values (geom_point)."
###Markdown
Example Application: Geo-spatial analysis based on R libraries Installing relevant libraries and Loading Data - Run the following commands.- Or download the packages from http://cran.us.r-project.org, unzip and move to /library folder of your R directory.
###Code
x <- c("ggmap", "rgdal", "rgeos", "maptools", "dplyr", "tidyr", "tmap")
install.packages("tmap",repos='http://cran.cnr.berkeley.edu/') # warning: this may take a number of minutes
lapply("tmap", library, character.only = TRUE) # load the required packages
###Output
package 'tmap' successfully unpacked and MD5 sums checked
The downloaded binary packages are in
C:\Users\Yao\AppData\Local\Temp\RtmpMfBCDx\downloaded_packages
###Markdown
Loading Datasets in .shape fileThe files beginning london_sport in the data/ directory contain the population of London Boroughs in 2001 and the percentage of the population participating in sporting activities. This data originates from the Active People Survey. The boundary data is from the Ordnance Survey. (Cited from cran.r-project.org)readOGR takes in dsn and layer as parameters: - dsn which stands for “data source name” and specifies the location where the file is stored- layer which specifies the file name.
###Code
library(rgdal)
lnd <- readOGR(dsn = "dataSets", layer = "london_sport")
###Output
Loading required package: sp
rgdal: version: 1.1-10, (SVN revision 622)
Geospatial Data Abstraction Library extensions to R successfully loaded
Loaded GDAL runtime: GDAL 2.0.1, released 2015/09/15
Path to GDAL shared files: C:/Users/Yao/Anaconda2/R/library/rgdal/gdal
Loaded PROJ.4 runtime: Rel. 4.9.2, 08 September 2015, [PJ_VERSION: 492]
Path to PROJ.4 shared files: C:/Users/Yao/Anaconda2/R/library/rgdal/proj
Linking to sp version: 1.2-3
###Markdown
The structure of spatial data in R Spatial objects are made up of a number of different slots, the key ones being @data and @polygons (or @lines for line data) geometry data. @data is just a table of some relevant geographical attributes like following.
###Code
head(lnd@data, n = 2)
mean(lnd$Partic_Per)
###Output
_____no_output_____
###Markdown
To inspect the @polygon slot, selects the first polygon of lnd and then selects the first Polygon within this spatial unit (there is usually only one) and then returns the coordinates of this. The plot shows a region circled by these selected coordinates.
###Code
head(lnd@polygons[[3]]@Polygons[[1]]@coords, 3)
plot(lnd@polygons[[3]]@Polygons[[1]]@coords)
###Output
_____no_output_____
###Markdown
Plot the geographical map using lnd spatial object
###Code
plot(lnd)
###Output
_____no_output_____
###Markdown
Queries and Analysis based on the spatial data Select rows of lnd@data where sports participation is less than 15
###Code
lnd@data[lnd$Partic_Per < 15, ]
###Output
_____no_output_____
###Markdown
Select zones where sports participation is between 10% and 20% and plot these zones in a map.
###Code
sel <- lnd$Partic_Per > 10 & lnd$Partic_Per < 20
# head output of previous selection, a bunch of booleans
head(sel)
# output plot shown below
plot(lnd[sel, ])
###Output
_____no_output_____
###Markdown
Plotting the selected regions with the whole map
###Code
# plot the london_sport object, with color lightgrey
plot(lnd, col = "grey")
# select zones where sports participation is greater than 25%.
sel <- lnd$Partic_Per > 20
# add selected zones to map, with color red
plot(lnd[ sel, ], col = "red", add = TRUE)
###Output
_____no_output_____
###Markdown
More complex selection: the center of the map 1) Select the center of the city using rgeos library
###Code
library(rgeos)
plot(lnd, col = "grey")
# find the geographic center of London
cent_lnd <- gCentroid(lnd[lnd$name == "City of London",])
points(cent_lnd, cex = 4)
###Output
_____no_output_____
###Markdown
2) Select the regions that have some part within 10km from the center.Creating a gBuffer of coordinates within 10km from the calculated center.Selects the regions both in the buffer and in the whole map data set and plot.
###Code
plot(lnd, col = "grey")
# set 10 km buffer
lnd_buffer <- gBuffer(spgeom = cent_lnd, width = 10000)
# select the indexes in both lnd_buffer and lnd
lnd_central <- lnd[lnd_buffer,]
# plot the selected regions as red
plot(lnd_central, col = "red",border = "grey", add = T)
# plot the margin of the buffer, the circle that is 10km from the center
plot(lnd_buffer, add = T)
# add text labels to the plot
text(coordinates(cent_lnd), "Central\nLondon")
###Output
_____no_output_____
###Markdown
3) Another way of selection: select only the regions whose centers are within 10km
###Code
plot(lnd, col = "grey")
# selects only points within the buffer
# create spatialpoints (centers) of all the regions
lnd_cents <- SpatialPoints(coordinates(lnd),
proj4string = CRS(proj4string(lnd)))
# select the points inside the buffer (within 10km)
sel <- lnd_cents[lnd_buffer,]
# show where the points are located
points(sel)
plot(lnd, col = "grey")
# select regions intersecting with sel from above
lnd_central <- lnd[sel,]
plot(lnd_central, add = T, col = "red",
border = "grey")
plot(lnd_buffer, add = T, border = "black", lwd = 3)
# add text labels to the plot
text(coordinates(cent_lnd), "Central\nLondon")
###Output
_____no_output_____
###Markdown
Adding attributes to tables and joining tables The non-spatial data we are going to join to the lnd object contains records of crimes in London. This is stored in a comma separated values (.csv) file called “mps-recordedcrime-borough”.
###Code
library(rgdal)
# Create new object called "lnd" from "london_sport" shapefile
lnd <- readOGR(dsn = "dataSets", "london_sport")
crime_data <- read.csv("dataSets/mps-recordedcrime-borough.csv", stringsAsFactors = FALSE)
###Output
OGR data source with driver: ESRI Shapefile
Source: "dataSets", layer: "london_sport"
with 33 features
It has 4 fields
###Markdown
We are going to use a function called aggregate to aggregate the crimes at the borough level, ready to join to our spatial lnd dataset. A new object called crime_data is created to store this data.
###Code
head(crime_data, 5) # display first 5 lines
head(crime_data$CrimeType) # information about crime type
###Output
_____no_output_____
###Markdown
Select the CrimeType "Theft & Handling"
###Code
# Filter out "Theft & Handling" crimes and save to crime_theft
crime_theft <- crime_data[crime_data$CrimeType == "Theft & Handling", ]
head(crime_theft, 5) # take a look at the result (replace 2 with 10 to see more rows)
###Output
_____no_output_____
###Markdown
Calculate CrimeCount of each region
###Code
# Calculate the sum of the crime count for each district, save result
crime_ag <- aggregate(CrimeCount ~ Borough, FUN = sum, data = crime_theft)
# Show the first two rows of the aggregated crime data
head(crime_ag, 5)
###Output
_____no_output_____
###Markdown
Join the attribute CrimeCount to the lnd table
###Code
# dataset to add the attribute CrimeCount to
head(lnd$name)
# the attribute to join
head(crime_ag$Borough)
crime_ag <- rename(crime_ag, name = Borough) # rename the 'Borough' heading to 'name' #
head(left_join(lnd@data, crime_ag,by = "name"))
lnd@data <- left_join(lnd@data, crime_ag)
###Output
_____no_output_____
###Markdown
Plot different levels of CrimeCount with different shades of Color
###Code
plot(lnd, col = "grey")
# select zones where sports participation is greater than 25%.
sel <- lnd$CrimeCount > 15000
head(na.omit(sel))
# add selected zones to map, with color red
plot(lnd[ na.omit(sel), ], col = "coral2", add = TRUE)
sel <- lnd$CrimeCount > 20000
head(na.omit(sel))
# add selected zones to map, with color red
plot(lnd[ na.omit(sel), ], col = "coral3", add = TRUE)
sel <- lnd$CrimeCount > 25000
head(na.omit(sel))
# add selected zones to map, with color red
plot(lnd[ na.omit(sel), ], col = "coral4", add = TRUE)
###Output
_____no_output_____ |
demo/example_portreat_diagram.ipynb | ###Markdown
Generating a Portraet diagram The GlecklerPlot class provides functionality to generate a Portraet diagram as introduced by [Gleckler et al. (2008)](http://onlinelibrary.wiley.com/doi/10.1029/2007JD008972/abstract). The class is very usefull if you want to vizualize e.g. different statistical results. Single variable
###Code
%matplotlib inline
from pycmbs.plots import GlecklerPlot
G = GlecklerPlot()
#register first models (these fill become the rows in the plot)
G.add_model('echam5')
G.add_model('mpi-esm-LR')
G.add_model('mpi-esm-MR')
#then register variables (these become the columns in the plot)
G.add_variable('ta')
#after that you can add values to be plotted; pos=1 mean that result is plotted in upper triangle
# you can specify up to 4 positions
# assign values you want to plot
G.add_data('ta','echam5',0.2,pos=1)
G.add_data('ta','mpi-esm-LR',0.5,pos=1)
G.add_data('ta','mpi-esm-MR',1.3,pos=1)
# do the plot
G.plot()
# save figure to file
#G.fig.savefig('filename.png')
###Output
/usr/local/lib/python2.7/dist-packages/matplotlib-1.3.1-py2.7-linux-x86_64.egg/matplotlib/__init__.py:1155: UserWarning: This call to matplotlib.use() has no effect
because the backend has already been chosen;
matplotlib.use() must be called *before* pylab, matplotlib.pyplot,
or matplotlib.backends is imported for the first time.
warnings.warn(_use_error_msg)
###Markdown
Note that two figures might be plotted here. This is just a problem with the ipython notebook used and not a problem with the actual GlecklerPlot! Multiple variables
###Code
del G
G = GlecklerPlot()
#register first models (these fill become the rows in the plot)
G.add_model('echam5')
G.add_model('mpi-esm-LR')
G.add_model('mpi-esm-MR')
#then register variables (these become the columns in the plot)
G.add_variable('ta')
G.add_variable('P')
G.add_variable('sea_ice')
#after that you can add values to be plotted; pos=1 mean that result is plotted in upper triangle
# you can specify up to 4 positions
G.add_data('ta','echam5',0.5,pos=1)
G.add_data('ta','mpi-esm-LR',0.3,pos=1)
G.add_data('P','echam5',0.25,pos=1)
G.add_data('P','mpi-esm-MR',-0.25,pos=2)
G.add_data('P','mpi-esm-MR',1.3,pos=1)
G.add_data('P','mpi-esm-LR',0.3,pos=1)
G.add_data('P','mpi-esm-LR',0.6,pos=2)
# random numbers are generated here using numpy
import numpy as np
G.add_data('sea_ice','echam5',np.random.random(1),pos=1)
G.add_data('sea_ice','echam5',np.random.random(1),pos=2)
G.add_data('sea_ice','echam5',np.random.random(1),pos=3)
G.add_data('sea_ice','echam5',np.random.random(1),pos=4)
G.add_data('sea_ice','mpi-esm-MR',np.random.random(1),pos=1)
G.add_data('sea_ice','mpi-esm-MR',np.random.random(1),pos=2)
G.add_data('sea_ice','mpi-esm-MR',np.random.random(1),pos=3)
G.add_data('sea_ice','mpi-esm-MR',np.random.random(1),pos=4)
G.add_data('sea_ice','mpi-esm-LR',np.random.random(1),pos=1)
G.add_data('sea_ice','mpi-esm-LR',np.random.random(1),pos=2)
G.add_data('sea_ice','mpi-esm-LR',np.random.random(1),pos=3)
G.add_data('sea_ice','mpi-esm-LR',np.random.random(1),pos=4)
G.plot() #do plot
###Output
/usr/local/lib/python2.7/dist-packages/numpy/core/_methods.py:59: RuntimeWarning: Mean of empty slice.
warnings.warn("Mean of empty slice.", RuntimeWarning)
/usr/local/lib/python2.7/dist-packages/numpy/core/_methods.py:71: RuntimeWarning: invalid value encountered in double_scalars
ret = ret.dtype.type(ret / rcount)
|
010_files.ipynb | ###Markdown
010_files[Source](https://github.com/iArunava/Python-TheNoTheoryGuide/)
###Code
# Opening a file
'''
Make sure you have a file in your directory before running this code.
If you have a file but still getting errors, then make sure the path is correct.
'''
# Be sure to close a file after use
file = open ("./others/test.txt")
print (file.read())
file.close()
# Modes to open a file
file = open ("./others/test.txt", 'r') # Opens in read mode
file = open ("./others/test.txt", 'w') # Opens in write mode
file = open ("./others/test.txt", 'a') # Opens in append mode
file = open ("./others/test.txt", 'r+') # Allows to read and write
file.close()
# Reading the entire file at once
# 'with' keeps the file open as long as its needed then closes it
with open ("./others/test.txt") as file:
content = file.read()
print (content)
# Reading the file line by line
with open ("./others/test.txt") as file:
i = 0
for line in file:
i += 1
print ("Line " + str(i) + " : " + str(line))
# Writing to an empty file
# When opening a file in 'w' mode, if the files contains something it will get erased
# To retain the contents of the file open in 'a' mode, namely append mode
with open("./others/test.txt", 'w') as file:
file.write ("Python is Love!")
file.write ("This is a second line.")
file.write ("Python doesn't add new lines on its own.")
file.write ("\nSo lets provide some new lines.\n")
file.write ("\nBy the way, you just wrote multiple lines to a file :)")
file.write ("\nAnd just remember 'with' closes the opened file.")
file.write (" So you do not have to close it yourself. When opened a file with 'with' :P")
# Appending to a file
with open("./others/test.txt", 'a') as file:
file.write ("\nWell now the text in the opened file is retained.")
file.write ("\nAnd you are appending text to already present text.")
file.write ("\nCool!")
###Output
_____no_output_____ |
quantum_hello_world.ipynb | ###Markdown
Queremos gerar e medir o seguinte estado:$$\vert \psi \rangle = \frac{\vert 00 \rangle + \vert 11 \rangle}{\sqrt{2}}$$Começamos importando o qiskit e em seguida criamos o circuito.
###Code
from qiskit import*
nQ = 2 # <−− NUMERO DE QUBITS
nC = 2 # <−− NUMERO DE BITS CLASSICOS
Qr = QuantumRegister(nQ)
Cr = ClassicalRegister(nC)
circuit = QuantumCircuit(Qr , Cr )
circuit.h(0) # <−− Porta Hadamard aplicada ao primeiro qubit
circuit.cx(0 , 1) # <−− Porta CNOT: a ordem eh controle , alvo
circuit.measure([ 0 , 1 ] , [ 0 , 1 ]) # <−− Os qubits [ 0 , 1 ] sao medidos e
# os resultados sao armazenados em bits classicos [ 0 , 1 ] nesta ordem
circuit.draw(output='mpl') # <−− Desenha o circuito
###Output
_____no_output_____
###Markdown
Simulação na máquina local
###Code
simulator = Aer.get_backend('qasm_simulator')
result = execute( circuit , backend = simulator, shots = 8192 ).result( )
from qiskit.visualization import plot_histogram
plot_histogram( result.get_counts(circuit) )
###Output
_____no_output_____
###Markdown
Registrando-se na IBM Quantum Computing
###Code
# Obs. : esta linha de codigo deve ser executada uma unica vez .
# Caso precise executa−la novamente RESTARTE o KERNEL.
from qiskit import IBMQ
#−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−
# Você pode obter o seu token em:
# https://quantum-computing.ibm.com/
QX_TOKEN = "Cole seu token aqui"
QX_URL = "https://quantumexperience.ng.bluemix.net/api"
#−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−
try :
IBMQ.save_account(QX_TOKEN);
print('Registado com sucesso!')
except :
print('Algo deu errado. \nVocê inseriu o token correto?')
###Output
configrc.store_credentials:WARNING:2021-09-01 13:00:30,253: Credentials already present. Set overwrite=True to overwrite.
###Markdown
Agora vem a parte divertida, que é executar o circuito que construímos em um computador quântico. Existem varios dispositivos quânticos que a IBM disponibiliza o acesso via nuvem. Para usar esses dispositivos, primeiro teremos que carregar uma conta IBMQ.
###Code
IBMQ.load_account()
###Output
_____no_output_____
###Markdown
Verificando as máquinas disponíveis:
###Code
from qiskit.tools.monitor import backend_overview
backend_overview()
###Output
ibmq_manila ibmq_quito ibmq_belem
----------- ---------- ----------
Num. Qubits: 5 Num. Qubits: 5 Num. Qubits: 5
Pending Jobs: 1 Pending Jobs: 6 Pending Jobs: 6
Least busy: True Least busy: False Least busy: False
Operational: True Operational: True Operational: True
Avg. T1: 129.9 Avg. T1: 88.8 Avg. T1: 94.2
Avg. T2: 62.7 Avg. T2: 104.0 Avg. T2: 122.4
ibmq_lima ibmq_bogota ibmq_santiago
--------- ----------- -------------
Num. Qubits: 5 Num. Qubits: 5 Num. Qubits: 5
Pending Jobs: 58 Pending Jobs: 1402 Pending Jobs: 20
Least busy: False Least busy: False Least busy: False
Operational: True Operational: True Operational: True
Avg. T1: 74.2 Avg. T1: 92.1 Avg. T1: 76.1
Avg. T2: 84.0 Avg. T2: 127.6 Avg. T2: 97.0
ibmq_armonk
-----------
Num. Qubits: 1
Pending Jobs: 27
Least busy: False
Operational: True
Avg. T1: 172.4
Avg. T2: 260.9
###Markdown
A seguir, teremos que fornecer detalhes do provedor IBMQ e do computador quântico que escolhemos para executar nosso circuito.
###Code
provider=IBMQ.get_provider('ibm-q')
quantum_computer=provider.get_backend('ibmq_belem')
###Output
_____no_output_____
###Markdown
Usamos a função execute() para executar nosso circuito quântico usando ibmq_belem como nosso back-end:
###Code
execute_circuit=execute(circuit,backend=quantum_computer)
###Output
_____no_output_____
###Markdown
Para ver os resultados, basta executar o seguinte comando:
###Code
result=execute_circuit.result()
###Output
_____no_output_____
###Markdown
Para visualizar os resultados, execute o seguinte comando:
###Code
plot_histogram(result.get_counts(circuit))
import qiskit.tools.jupyter
%qiskit_version_table
###Output
C:\Users\warle\anaconda3\lib\site-packages\qiskit\aqua\__init__.py:86: DeprecationWarning: The package qiskit.aqua is deprecated. It was moved/refactored to qiskit-terra For more information see <https://github.com/Qiskit/qiskit-aqua/blob/main/README.md#migration-guide>
warn_package('aqua', 'qiskit-terra')
|
code/chap23-Mine.ipynb | ###Markdown
Modeling and Simulation in PythonChapter 23Copyright 2017 Allen DowneyLicense: [Creative Commons Attribution 4.0 International](https://creativecommons.org/licenses/by/4.0)
###Code
# Configure Jupyter so figures appear in the notebook
%matplotlib inline
# Configure Jupyter to display the assigned value after an assignment
%config InteractiveShell.ast_node_interactivity='last_expr_or_assign'
# import functions from the modsim.py module
from modsim import *
###Output
_____no_output_____
###Markdown
Code from the previous chapter
###Code
m = UNITS.meter
s = UNITS.second
kg = UNITS.kilogram
degree = UNITS.degree
params = Params(x = 0 * m,
y = 1 * m,
g = 9.8 * m/s**2,
mass = 145e-3 * kg,
diameter = 73e-3 * m,
rho = 1.2 * kg/m**3,
C_d = 0.3,
angle = 45 * degree,
velocity = 40 * m / s,
t_end = 20 * s)
def make_system(params):
"""Make a system object.
params: Params object with angle, velocity, x, y,
diameter, duration, g, mass, rho, and C_d
returns: System object
"""
unpack(params)
# convert angle to degrees
theta = np.deg2rad(angle)
# compute x and y components of velocity
vx, vy = pol2cart(theta, velocity)
# make the initial state
init = State(x=x, y=y, vx=vx, vy=vy)
# compute area from diameter
area = np.pi * (diameter/2)**2
return System(params, init=init, area=area)
def drag_force(V, system):
"""Computes drag force in the opposite direction of `V`.
V: velocity
system: System object with rho, C_d, area
returns: Vector drag force
"""
unpack(system)
mag = -rho * V.mag**2 * C_d * area / 2
direction = V.hat()
f_drag = mag * direction
return f_drag
def slope_func(state, t, system):
"""Computes derivatives of the state variables.
state: State (x, y, x velocity, y velocity)
t: time
system: System object with g, rho, C_d, area, mass
returns: sequence (vx, vy, ax, ay)
"""
x, y, vx, vy = state
unpack(system)
V = Vector(vx, vy)
a_drag = drag_force(V, system) / mass
a_grav = Vector(0, -g)
a = a_grav + a_drag
return vx, vy, a.x, a.y
def event_func(state, t, system):
"""Stop when the y coordinate is 0.
state: State object
t: time
system: System object
returns: y coordinate
"""
x, y, vx, vy = state
return y
###Output
_____no_output_____
###Markdown
Optimal launch angleTo find the launch angle that maximizes distance from home plate, we need a function that takes launch angle and returns range.
###Code
def range_func(angle, params):
"""Computes range for a given launch angle.
angle: launch angle in degrees
params: Params object
returns: distance in meters
"""
params = Params(params, angle=angle)
system = make_system(params)
results, details = run_ode_solver(system, slope_func, events=event_func)
x_dist = get_last_value(results.x) * m
print(angle)
return x_dist
###Output
_____no_output_____
###Markdown
Let's test `range_func`.
###Code
%time range_func(45, params)
###Output
45
Wall time: 147 ms
###Markdown
And sweep through a range of angles.
###Code
angles = linspace(20, 80, 21)
sweep = SweepSeries()
for angle in angles:
x_dist = range_func(angle, params)
print(angle, x_dist)
sweep[angle] = x_dist
###Output
20.0
20.0 79.96823513701818 meter
23.0
23.0 86.2962864918857 meter
26.0
26.0 91.59647908800756 meter
29.0
29.0 95.89089380357947 meter
32.0
32.0 99.20335822576214 meter
35.0
35.0 101.55668007973463 meter
38.0
38.0 102.97173880917646 meter
41.0
41.0 103.46740813177843 meter
44.0
44.0 103.060922479178 meter
47.0
47.0 101.7684506860653 meter
50.0
50.0 99.60572853320414 meter
53.0
53.0 96.58867331645769 meter
56.0
56.0 92.7339915489422 meter
59.0
59.0 88.05990483905572 meter
62.0
62.0 82.58716276454999 meter
65.0
65.0 76.34016117578483 meter
68.0
68.0 69.34714056465755 meter
71.0
71.0 61.63878192638946 meter
74.0
74.0 53.256101549629825 meter
77.0
77.0 44.246680677829886 meter
80.0
80.0 34.6702130194327 meter
###Markdown
Plotting the `Sweep` object, it looks like the peak is between 40 and 45 degrees.
###Code
plot(sweep, color='C2')
decorate(xlabel='Launch angle (degree)',
ylabel='Range (m)',
title='Range as a function of launch angle',
legend=False)
savefig('figs/chap10-fig03.pdf')
###Output
Saving figure to file figs/chap10-fig03.pdf
###Markdown
We can use `max_bounded` to search for the peak efficiently.
###Code
%time res = max_bounded(range_func, [0, 90], params)
###Output
34.37694101250946
55.62305898749054
21.246117974981075
41.405491236206636
41.23748723573612
41.141723142200014
41.139142795614596
41.13947673914003
41.13880885208916
Wall time: 919 ms
###Markdown
`res` is an `ModSimSeries` object with detailed results:
###Code
res
###Output
_____no_output_____
###Markdown
`x` is the optimal angle and `fun` the optional range.
###Code
optimal_angle = res.x * degree
max_x_dist = res.fun
###Output
_____no_output_____
###Markdown
Under the hoodRead the source code for `max_bounded` and `min_bounded`, below.Add a print statement to `range_func` that prints `angle`. Then run `max_bounded` again so you can see how many times it calls `range_func` and what the arguments are.
###Code
%psource max_bounded
%psource min_bounded
###Output
_____no_output_____
###Markdown
The Manny Ramirez problemFinally, let's solve the Manny Ramirez problem:*What is the minimum effort required to hit a home run in Fenway Park?*Fenway Park is a baseball stadium in Boston, Massachusetts. One of its most famous features is the "Green Monster", which is a wall in left field that is unusually close to home plate, only 310 feet along the left field line. To compensate for the short distance, the wall is unusually high, at 37 feet.Although the problem asks for a minimum, it is not an optimization problem. Rather, we want to solve for the initial velocity that just barely gets the ball to the top of the wall, given that it is launched at the optimal angle.And we have to be careful about what we mean by "optimal". For this problem, we don't want the longest range, we want the maximum height at the point where it reaches the wall.If you are ready to solve the problem on your own, go ahead. Otherwise I will walk you through the process with an outline and some starter code.As a first step, write a function called `height_func` that takes a launch angle and a params as parameters, simulates the flights of a baseball, and returns the height of the baseball when it reaches a point 94.5 meters (310 feet) from home plate.
###Code
# Solution goes here
def event_func(state, t, system):
"""Stop when the x coordinate is 94.5.
state: State object
t: time
system: System object
returns: y coordinate
"""
x, y, vx, vy = state
return x - 94.488 * m
###Output
_____no_output_____
###Markdown
Always test the slope function with the initial conditions.
###Code
# Solution goes here
system = make_system(params)
event_func(system.init, 0, system)
# Solution goes here
def height_func(angle, params):
"""Computes range for a given launch angle.
angle: launch angle in degrees
params: Params object
returns: distance in meters
"""
params = Params(params, angle=angle)
system = make_system(params)
results, details = run_ode_solver(system, slope_func, events=event_func)
height = get_last_value(results.y) * m
return height
###Output
_____no_output_____
###Markdown
Test your function with a launch angle of 45 degrees:
###Code
# Solution goes here
height_func(45 * degree, params)
###Output
_____no_output_____
###Markdown
Now use `max_bounded` to find the optimal angle. Is it higher or lower than the angle that maximizes range?
###Code
# Solution goes here
res = max_bounded(height_func, [0, 90], params)
# Solution goes here
optimal_angle = res.x * degree
# Solution goes here
optimal_height = res.fun
height_func(optimal_angle, params)
###Output
_____no_output_____
###Markdown
With initial velocity 40 m/s and an optimal launch angle, the ball clears the Green Monster with a little room to spare.Which means we can get over the wall with a lower initial velocity. Finding the minimum velocityEven though we are finding the "minimum" velocity, we are not really solving a minimization problem. Rather, we want to find the velocity that makes the height at the wall exactly 11 m, given given that it's launched at the optimal angle. And that's a job for `fsolve`.Write an error function that takes a velocity and a `Params` object as parameters. It should use `max_bounded` to find the highest possible height of the ball at the wall, for the given velocity. Then it should return the difference between that optimal height and 11 meters.
###Code
# Solution goes here
def error_func(velocity, params):
params = Params(params, velocity=velocity)
res = max_bounded(height_func, [0, 90], params)
return res.fun - 11.2276 * m
###Output
_____no_output_____
###Markdown
Test your error function before you call `fsolve`.
###Code
# Solution goes here
error_func(40 * m/s, params)
###Output
_____no_output_____
###Markdown
Then use `fsolve` to find the answer to the problem, the minimum velocity that gets the ball out of the park.
###Code
# Solution goes here
res = fsolve(error_func, 40 * m/s, params)
# Solution goes here
min_velocity = res[0] * m/s
###Output
_____no_output_____
###Markdown
And just to check, run `error_func` with the value you found.
###Code
# Solution goes here
error_func(min_velocity, params)
###Output
_____no_output_____ |
{{cookiecutter.project_slug}}/00.02-use-eeg-data.ipynb | ###Markdown
Using the EEG data you downloadedTo use the data you just downloaded, simply import it in the same way you import anything else in python. Check out the `data` folder to find out how the data is structured - it works in a hierarchical way.For example, to use your EEG data, you can import it in the following way:
###Code
from data.eeg.preprocessed.resting_state import raws
###Output
_____no_output_____
###Markdown
The imported object is a __generator__ that you can loop over. Whenever the next value is requested (i.e. on each loop iteration), the `raws` function loads the next raw dataset, and produces an `MNE-python` raw data structure that you can then proceed to use.Just to show what that's like, you can try out the following, which just produces 1 iteration:
###Code
next(raws())
###Output
Creating RawArray with float64 data, n_channels=111, n_times=175062
Range : 0 ... 175061 = 0.000 ... 350.122 secs
Ready.
###Markdown
You can equally get the data in `mne.Epochs`, so divided based on the input. You can pass any keyword arguments you want to the `epochs()` generator, which will pass them on to `mne.Epochs`, so you can still specifiy baselines or other stuff if you want.The way this deals with tasks split across multiple recordings is it will concatenate them all for you.An example where this would be appropriate is the surround-suppression paradigm:
###Code
from data.eeg.preprocessed.surround_suppression import epochs
epoch = next(epochs(tmin=-0.2, tmax=0.5, baseline=(-0.2, 0)))
epoch.average().plot(spatial_colors=True);
###Output
Creating RawArray with float64 data, n_channels=111, n_times=144564
Range : 0 ... 144563 = 0.000 ... 289.126 secs
Ready.
Creating RawArray with float64 data, n_channels=111, n_times=144564
Range : 0 ... 144563 = 0.000 ... 289.126 secs
Ready.
64 matching events found
0 projection items activated
64 matching events found
0 projection items activated
NDARCX221CWA
Loading data for 64 events and 351 original time points ...
0 bad epochs dropped
Loading data for 64 events and 351 original time points ...
0 bad epochs dropped
128 matching events found
0 bad epochs dropped
|
tutorials/creating_a_heating_network.ipynb | ###Markdown
Creation of a pandapipes Heating Network There are several aspects to consider when constructing a heating network. To get a basic introduction to creating a pandapipes network, please see the tutorial ["Creating pandapipes Networks"](https://github.com/e2nIEE/pandapipes/blob/master/tutorials/creating_a_simple_network.ipynb) first. For the pipes, additional parameters must be specified: The heat transfer coefficient (`alpha_w_per_m2k`), which determines how well or poorly the pipe insulates and the number of internal pipe sections (`sections`). Likewise the ambient temperature of the pipe (`text_k`) can be changed, which is 293 K by default. In this case the variable `ambient_temperature` in the [`pipeflow` function](https://pandapipes.readthedocs.io/en/latest/pipeflow.html) should be set to the same value. In addition, for an external grid the variable `type` should be set to "pt" or "t" and a constant temperature value for `t_k` should be defined. Furthermore, start values for the temperatures at the junctions (`tfluid_k`) should be specified.Please note that only incompressible media can be used for the heating network calculation and the [`mode`](https://pandapipes.readthedocs.io/en/latest/pipeflow/calculation_modes.htmltemperature-calculations-pipeflow-option-mode-all-or-mode-heat) in the `pipeflow` function has to be set to "all" or "heat". In case `mode` equals "heat", the user must manually specify a solution vector for the hydraulic calculations. It should also be noted that the temperature calculations are currently still sequential. This means that the calculated temperature values do not influence the hydraulic properties of the medium. Therefore, the calculations are only valid if the properties are not very temperature-dependent or if there are minor changes in temperature. In the following a simple example for the creation and calculation of a network is presented. Here water is used as fluid and the mode "all" is selected.
###Code
import pandapipes
from pandapipes.component_models import Pipe
# create empty network
net = pandapipes.create_empty_network("net", add_stdtypes=False)
# create fluid
pandapipes.create_fluid_from_lib(net, "water", overwrite=True)
# create junctions
junction1 = pandapipes.create_junction(net, pn_bar=3, tfluid_k=290, name="Junction 1", geodata=(0, 0))
junction2 = pandapipes.create_junction(net, pn_bar=3, tfluid_k=290, name="Junction 2", geodata=(2, 0))
junction3 = pandapipes.create_junction(net, pn_bar=3, tfluid_k=290, name="Junction 3", geodata=(4, 0))
junction4 = pandapipes.create_junction(net, pn_bar=3, tfluid_k=290, name="Junction 4", geodata=(2, 2))
# create external grid
pandapipes.create_ext_grid(net, junction=junction1, p_bar=6, t_k=363.15, name="External Grid", type="pt")
# creat sinks
pandapipes.create_sink(net, junction=junction3, mdot_kg_per_s=1, name="Sink 1")
pandapipes.create_sink(net, junction=junction4, mdot_kg_per_s=2, name="Sink 2")
# create pipes
pandapipes.create_pipe_from_parameters(net, from_junction=junction1, to_junction=junction2, length_km=0.1,
diameter_m=0.075, k_mm=0.025, sections=5, alpha_w_per_m2k=100,
text_k=298.15, name="Pipe 1", geodata=[(0, 0), (2, 0)])
pandapipes.create_pipe_from_parameters(net, from_junction=junction2, to_junction=junction3, length_km=2,
diameter_m=0.05, k_mm=0.025, sections=4, alpha_w_per_m2k=100,
text_k=298.15, name="Pipe 2", geodata=[(2, 0), (4, 0)])
pandapipes.create_pipe_from_parameters(net, from_junction=junction2, to_junction=junction4, length_km=1,
diameter_m=0.1, k_mm=0.025, sections=8, alpha_w_per_m2k=50,
text_k=298.15, name="Pipe 3", geodata=[(2, 0), (2, 2)])
# run pipeflow
pandapipes.pipeflow(net, stop_condition="tol", iter=3, friction_model="colebrook",
mode="all", transient=False, nonlinear_method="automatic", tol_p=1e-4,
tol_v=1e-4, ambient_temperature=298.15)
###Output
_____no_output_____
###Markdown
The general results for the junctions and pipes can still be accessed as follows:
###Code
net.res_junction
net.res_pipe
###Output
_____no_output_____
###Markdown
To get the internal results regarding the division of the pipes into sections, use the following function:
###Code
pipe_1_results = Pipe.get_internal_results(net, [0])
###Output
_____no_output_____
###Markdown
Here the results of Pipe 1 (`[0]`) are accessed. In general these include three matrices with the values of `PINIT`, `VINIT` and `TINIT`.The internal results of the pipe can also be accessed separately, as shown here for Pipe 1:
###Code
pipe_1_results["PINIT"]
pipe_1_results["TINIT"]
pipe_1_results["VINIT"]
###Output
_____no_output_____ |
MIDOSS/Diesel.ipynb | ###Markdown
NetCDF Thickness:disappears after it = 12
###Code
fig, axs = plt.subplots(1, 2, figsize=(15, 5))
it = 12
ds.Thickness_2D[it, 460:500 , 230:270].plot(ax=axs[0], cmap='copper')
axs[1].plot(ds.time[0:24], ds.Thickness_2D[0:24].max(axis=1).max(axis=1));
###Output
_____no_output_____
###Markdown
NetCDF 2D Concentration:Very similar response, also disappears after it = 12
###Code
fig, axs = plt.subplots(1, 2, figsize=(15, 5))
it = 5
ds.OilConcentration_2D[it, 460:500 , 230:270].plot(ax=axs[0], cmap='copper')
axs[1].plot(ds.time[0:24], ds.OilConcentration_2D[0:24].max(axis=1).max(axis=1));
axs[1].plot(ds.time[0:24], ds.OilConcentration_2D[0:24].sum(axis=1).sum(axis=1), 'o-');
###Output
_____no_output_____
###Markdown
NetCDF 3D Oil Concentration
###Code
fig, axs = plt.subplots(1, 2, figsize=(15, 5))
it = 5
ds.OilConcentration_3D[it, :, 460:500 , 230:270].sum(axis=0).plot(ax=axs[0], cmap='copper')
axs[1].plot(ds.time[0:24], ds.OilConcentration_3D[0:24].sum(axis=1).max(axis=1).max(axis=1));
axs[1].plot(ds.time[0:24], ds.OilConcentration_3D[0:24].sum(axis=1).sum(axis=1).sum(axis=1), 'o-');
fig, ax = plt.subplots(1, 1)
for i in range(465, 475):
ax.plot(ds.OilConcentration_3D[it, :, i, 250], -mesh.gdepw_1d[0, ::-1], 'o-', label=str(i));
ax.legend();
###Output
_____no_output_____
###Markdown
HDF5 File Considering the Particles
###Code
hdf['Grid']['VerticalZ']['Vertical_00010']
imin, imax = 470, 485
jmin, jmax = 230, 255
plt.pcolormesh(hdf['Grid']['VerticalZ']['Vertical_00001'][39].transpose(), cmap='bwr')
plt.plot([240, 241], [480, 478], 'rx');
plt.xlim((jmin,jmax))
plt.ylim((imin, imax));
plt.colorbar();
hdf['Time']['Time_00019'][:]
hdf['Results']['Number']['Number_00010']
imin, imax = 470, 485
jmin, jmax = 230, 255
plt.pcolormesh(hdf['Results']['Number']['Number_00012'][38].transpose(), cmap=cm.speed)
plt.plot([240, 241], [480, 478], 'rx');
plt.xlim((jmin,jmax))
plt.ylim((imin, imax));
plt.colorbar();
hdf['Results']['Percentage Contaminated']['Percentage Contaminated_00012']
imin, imax = 470, 485
jmin, jmax = 230, 255
plt.pcolormesh(hdf['Results']['Percentage Contaminated']['Percentage Contaminated_00014'][39].transpose(), cmap=cm.speed)
plt.plot([240, 241], [480, 478], 'rx');
plt.xlim((jmin,jmax))
plt.ylim((imin, imax));
plt.colorbar();
fig, ax = plt.subplots(1, 1, figsize = (10, 10))
imin, imax = 470, 485
jmin, jmax = 230, 255
colour = ax.pcolormesh(hdf['Grid']['Longitude'][:], hdf['Grid']['Latitude'][:], hdf['Grid']['Bathymetry'][:], cmap=cm.deep)
fig.colorbar(colour)
for i in range (2000):
if hdf['Results']['OilSpill']['Beached']['Beached_00190'][i] > 1:
# print (i, hdf['Results']['OilSpill']['Longitude']['Longitude_00190'][i],
# hdf['Results']['OilSpill']['Latitude']['Latitude_00190'][i])
ax.plot(hdf['Results']['OilSpill']['Longitude']['Longitude_00190'][i],
hdf['Results']['OilSpill']['Latitude']['Latitude_00190'][i], 'rx')
ax.set_xlim((-125, -123.5))
ax.set_ylim((49.2, 49.7));
diffy = (hdf['Results']['Group_1']['Data_1D']['Beached']['Beached_00010'][:]
- hdf['Results']['OilSpill']['Beached']['Beached_00010'][:])
diffy.max()
hdf['Results']['OilSpill']['Data_2D'].keys()
hdf['Results']['OilSpill']['Data_3D'].keys()
hdf['Results']['OilSpill']['Data_2D']['Beaching Volume']['Beaching Volume_00190'][:].min()
for i in range(9):
vstring = f'Volume_0000{i+1}'
print (i+1, hdf['Results']['OilSpill']['Volume'][vstring][:].max())
for i in range(10):
vstring = f'Volume_0001{i}'
print (i+10, hdf['Results']['OilSpill']['Volume'][vstring][:].max())
summit =np.zeros((40, 396, 896))
for i in range(9):
vstring = f'OilConcentration_3D_0000{i+1}'
print (i+1, hdf['Results']['OilSpill']['Data_3D']['OilConcentration_3D'][vstring][:].max())
summit = summit + hdf['Results']['OilSpill']['Data_3D']['OilConcentration_3D'][vstring][:]
for i in range(10):
vstring = f'OilConcentration_3D_0001{i}'
print (i+10, hdf['Results']['OilSpill']['Data_3D']['OilConcentration_3D'][vstring][:].max())
summit = summit + hdf['Results']['OilSpill']['Data_3D']['OilConcentration_3D'][vstring][:]
plt.pcolormesh(summit[39].transpose())
plt.colorbar()
imin, imax = 470, 485
jmin, jmax = 230, 255
plt.xlim((jmin, jmax));
plt.ylim((imin-19, imax));
OilSpill = hdf['Results']['OilSpill']
Xpos, Ypos, Zpos = OilSpill['X Pos'], OilSpill['Y Pos'], OilSpill['Z Pos']
fig, ax = plt.subplots(1, 1)
for i in range(9):
xstring = f'X Position_0000{i+1}'
ystring = f'Y Position_0000{i+1}'
ax.plot(Xpos[xstring][0] - Xpos[f'X Position_00001'][0], Ypos[ystring][0] - Ypos[f'Y Position_00001'][0], 'o', color='tab:blue')
ax.plot(Xpos[xstring][10] - Xpos[f'X Position_00001'][0], Ypos[ystring][10] - Ypos[f'Y Position_00001'][0], 'o', color='tab:orange')
ax.plot(Xpos[xstring][20] - Xpos[f'X Position_00001'][0], Ypos[ystring][20] - Ypos[f'Y Position_00001'][0], 'o', color='tab:green')
ax.plot(Xpos[xstring][50] - Xpos[f'X Position_00001'][0], Ypos[ystring][50] - Ypos[f'Y Position_00001'][0], 'o', color='tab:pink')
for i in range(10):
xstring = f'X Position_0001{i}'
ystring = f'Y Position_0001{i}'
ax.plot(Xpos[xstring][0] - Xpos[f'X Position_00001'][0], Ypos[ystring][0] - Ypos[f'Y Position_00001'][0], 'x', color='tab:blue')
ax.plot(Xpos[xstring][10] - Xpos[f'X Position_00001'][0], Ypos[ystring][10] - Ypos[f'Y Position_00001'][0], 'x', color='tab:orange')
ax.plot(Xpos[xstring][20] - Xpos[f'X Position_00001'][0], Ypos[ystring][20] - Ypos[f'Y Position_00001'][0], 'x', color='tab:green')
ax.plot(Xpos[xstring][50] - Xpos[f'X Position_00001'][0], Ypos[ystring][50] - Ypos[f'Y Position_00001'][0], 'x', color='tab:pink')
fig, ax = plt.subplots(1, 1)
for i in range(9):
xstring = f'X Position_0000{i+1}'
zstring = f'Z Position_0000{i+1}'
ax.plot(Xpos[xstring][0] - Xpos[f'X Position_00001'][0], Zpos[zstring][0] - Zpos[f'Z Position_00001'][0], 'o', color='tab:blue')
ax.plot(Xpos[xstring][10] - Xpos[f'X Position_00001'][0], Zpos[zstring][10] - Zpos[f'Z Position_00001'][0], 'o', color='tab:orange')
ax.plot(Xpos[xstring][20] - Xpos[f'X Position_00001'][0], Zpos[zstring][20] - Zpos[f'Z Position_00001'][0], 'o', color='tab:green')
ax.plot(Xpos[xstring][50] - Xpos[f'X Position_00001'][0], Zpos[zstring][50] - Zpos[f'Z Position_00001'][0], 'o', color='tab:pink')
for i in range(10):
xstring = f'X Position_0001{i}'
zstring = f'Z Position_0001{i}'
ax.plot(Xpos[xstring][0] - Xpos[f'X Position_00001'][0], Zpos[zstring][0] - Zpos[f'Z Position_00001'][0], 'x', color='tab:blue')
ax.plot(Xpos[xstring][10] - Xpos[f'X Position_00001'][0], Zpos[zstring][10] - Zpos[f'Z Position_00001'][0], 'x', color='tab:orange')
ax.plot(Xpos[xstring][20] - Xpos[f'X Position_00001'][0], Zpos[zstring][20] - Zpos[f'Z Position_00001'][0], 'x', color='tab:green')
ax.plot(Xpos[xstring][50] - Xpos[f'X Position_00001'][0], Zpos[zstring][50] - Zpos[f'Z Position_00001'][0], 'x', color='tab:pink')
field = hdf['Results']['Number']['Number_00013']
plt.pcolormesh(field[-1].transpose());
plt.plot(251+0.5, 473+0.5, 'rx')
plt.colorbar();
plt.xlim(240, 255);
plt.ylim(465, 480);
print(field[:].max(axis=0).max(axis=0).max(axis=0),
field[:].sum(axis=0).sum(axis=0).sum(axis=0),
field[-1].sum(axis=0).sum(axis=0))
for i in range(9):
nstring = f'Number_0000{i+1}'
zstring = f'Z Position_0000{i+1}'
print(i, Zpos[zstring][:].max(), hdf['Results']['Number'][nstring][:].sum(axis=0).sum(axis=0).sum(axis=0))
for i in range(10):
nstring = f'Number_0001{i}'
zstring = f'Z Position_0001{i}'
print(i+10, Zpos[zstring][:].max(), hdf['Results']['Number'][nstring][:].sum(axis=0).sum(axis=0).sum(axis=0))
fig, ax = plt.subplots(1, 1)
for j in range(243, 251):
plt.plot(ds.Oil_Arrival_Time[:, j], label=str(j));
ax.set_xlim(460, 480)
ax.legend();
fig, ax = plt.subplots(1, 1)
for j in range(240, 248):
plt.plot(ds.Beaching_Time[:, j], label=str(j));
ax.set_xlim(470, 490)
ax.legend();
fig, ax = plt.subplots(1, 1)
ax.pcolormesh(mesh.tmask[0, 0])
ax.set_ylim(460, 485)
ax.set_xlim(230, 270)
ax.plot([240, 241], [480, 478], 'rx');
it = 17
fig, ax = plt.subplots(1, 1)
ax.pcolormesh(mesh.tmask[0, 0])
imin, imax = 470, 485
jmin, jmax = 230, 255
ax.set_ylim(imin, imax)
ax.set_xlim(jmin, jmax)
for i in range(imin, imax):
for j in range(jmin, jmax):
if ds.OilConcentration_2D[it, i, j] != 0:
# print(i, j, ds.time[it].values, ds.OilConcentration_2D[it, i, j].values)
plt.plot(j, i, 'bo')
if ds.Beaching_Time[i, j] > ds.time[0].values and ds.Beaching_Time[i, j] <= ds.time[it].values:
# print(i, j, ds.time[it].values, ds.Beaching_Time[i, j].values)
plt.plot(j, i, 'rx')
ds.Dissolution_3D[17, :, imin:imax, jmin:jmax].sum(axis=0).plot(cmap='copper')
it = 17
fig, ax = plt.subplots(1, 1)
ax.pcolormesh(mesh.tmask[0, 0])
imin, imax = 470, 485
jmin, jmax = 230, 255
ax.set_ylim(imin, imax)
ax.set_xlim(jmin, jmax)
for i in range(imin, imax):
for j in range(jmin, jmax):
if ds.OilConcentration_2D[it, i, j] != 0:
# print(i, j, ds.time[it].values, ds.OilConcentration_2D[it, i, j].values)
plt.plot(j, i, 'bo')
if ds.Beaching_Time[i, j] > ds.time[0].values and ds.Beaching_Time[i, j] <= ds.time[it].values:
# print(i, j, ds.time[it].values, ds.Beaching_Time[i, j].values)
plt.plot(j, i, 'rx')
if ds.Dissolution_3D[it, :, i, j].sum() != 0:
plt.plot(j, i, 'k+')
plt.plot(ds.Dissolution_3D[it, :, 480, 240], mesh.gdepw_1d[0] - 432)
plt.pcolormesh(mesh.mbathy[0, imin:imax, jmin:jmax])
plt.colorbar();
it = 20
fig, ax = plt.subplots(1, 1)
ax.pcolormesh(mesh.mbathy[0])
imin, imax = 475, 490
jmin, jmax = 230, 255
ax.set_ylim(imin, imax)
ax.set_xlim(jmin, jmax)
for i in range(imin, imax):
for j in range(jmin, jmax):
if ds.OilConcentration_2D[it, i, j] != 0:
# print(i, j, ds.time[it].values, ds.OilConcentration_2D[it, i, j].values)
plt.plot(j, i, 'bo')
if ds.Beaching_Time[i, j] > ds.time[0].values and ds.Beaching_Time[i, j] <= ds.time[it].values:
# print(i, j, ds.time[it].values, ds.Beaching_Time[i, j].values)
plt.plot(j, i, 'rx')
if ds.Dissolution_3D[it, :, i, j].sum() != 0:
plt.plot(j, i, 'm+')
plt.plot(ds.Dissolution_3D[20, :, :, :].sum(axis=1).sum(axis=1), -mesh.gdepw_1d[0, ::-1], 'o-');
it = 2
fig, ax = plt.subplots(1, 1)
ax.pcolormesh(mesh.mbathy[0])
imin, imax = 470, 485
jmin, jmax = 230, 255
ax.set_ylim(imin, imax)
ax.set_xlim(jmin, jmax)
for i in range(imin, imax):
for j in range(jmin, jmax):
if ds.OilConcentration_2D[it, i, j] != 0:
# print(i, j, ds.time[it].values, ds.OilConcentration_2D[it, i, j].values)
plt.plot(j, i, 'bo')
if ds.Beaching_Time[i, j] > ds.time[0].values and ds.Beaching_Time[i, j] <= ds.time[it].values:
# print(i, j, ds.time[it].values, ds.Beaching_Time[i, j].values)
plt.plot(j, i, 'rx')
if ds.OilConcentration_3D[it, :, i, j].sum() != 0:
plt.plot(j, i, 'm+')
hdf['Results']['OilSpill']['Data_3D']['Dissolution_3D'].keys()
field = hdf['Results']['OilSpill']['Data_3D']['Dissolution_3D']['Dissolution_3D_00031']
for item in field.attrs.keys():
print (item + ":", field.attrs[item])
print (field.attrs["Units"].decode())
field = hdf['Results']['OilSpill']['Data_2D']['Beaching Volume']['Beaching Volume_00031']
for item in field.attrs.keys():
print (item + ":", field.attrs[item])
print (field.attrs["Units"].decode())
print (hdf5_file)
for group in hdf5_file.root.Results.OilSpill.Data_2D:
print (group)
###Output
/data/sallen/results/MIDOSS/Lagrangian_DieselFuel_refined_15jan18-22jan18_Diesel.hdf5
|
IBM_skillsnetwork/a2_w1_s3_SparkML_SVM.ipynb | ###Markdown
This notebook is designed to run in a IBM Watson Studio Apache Spark runtime. In case you are running it in an IBM Watson Studio standard runtime or outside Watson Studio, we install Apache Spark in local mode for test purposes only. Please don't use it in production.
###Code
!pip install --upgrade pip
if not ('sc' in locals() or 'sc' in globals()):
print('It seems you are note running in a IBM Watson Studio Apache Spark Notebook. You might be running in a IBM Watson Studio Default Runtime or outside IBM Waston Studio. Therefore installing local Apache Spark environment for you. Please do not use in Production')
from pip import main
main(['install', 'pyspark==2.4.5'])
from pyspark import SparkContext, SparkConf
from pyspark.sql import SparkSession
sc = SparkContext.getOrCreate(SparkConf().setMaster("local[*]"))
spark = SparkSession \
.builder \
.getOrCreate()
###Output
_____no_output_____
###Markdown
In case you want to learn how ETL is done, please run the following notebook first and update the file name below accordinglyhttps://github.com/IBM/coursera/blob/master/coursera_ml/a2_w1_s3_ETL.ipynb
###Code
# delete files from previous runs
!rm -f hmp.parquet*
# download the file containing the data in PARQUET format
!wget https://github.com/IBM/coursera/raw/master/hmp.parquet
# create a dataframe out of it
df = spark.read.parquet('hmp.parquet')
# register a corresponding query table
df.createOrReplaceTempView('df')
splits = df.randomSplit([0.8, 0.2])
df_train = splits[0]
df_test = splits[1]
from pyspark.ml.feature import StringIndexer
from pyspark.ml.feature import OneHotEncoder
from pyspark.ml.linalg import Vectors
from pyspark.ml.feature import VectorAssembler
from pyspark.ml.feature import MinMaxScaler
indexer = StringIndexer(inputCol="class", outputCol="label")
encoder = OneHotEncoder(inputCol="label", outputCol="labelVec")
vectorAssembler = VectorAssembler(inputCols=["x","y","z"],
outputCol="features")
normalizer = MinMaxScaler(inputCol="features", outputCol="features_norm")
from pyspark.ml.classification import LinearSVC
lsvc = LinearSVC(maxIter=10, regParam=0.1)
df.createOrReplaceTempView('df')
df_two_class = spark.sql("select * from df where class in ('Use_telephone','Standup_chair')")
splits = df_two_class.randomSplit([0.8, 0.2])
df_train = splits[0]
df_test = splits[1]
from pyspark.ml import Pipeline
pipeline = Pipeline(stages=[indexer, encoder, vectorAssembler, normalizer,lsvc])
model = pipeline.fit(df_train)
prediction = model.transform(df_train)
from pyspark.ml.evaluation import BinaryClassificationEvaluator
# Evaluate model
evaluator = BinaryClassificationEvaluator(rawPredictionCol="rawPrediction")
evaluator.evaluate(prediction)
prediction = model.transform(df_test)
evaluator = BinaryClassificationEvaluator(rawPredictionCol="rawPrediction")
evaluator.evaluate(prediction)
###Output
_____no_output_____ |
DAY 201 ~ 300/DAY279_[Programmers] 이상한 문자 만들기 (Python).ipynb | ###Markdown
2020년 12월 19일 토요일 Programmers - 이상한 문자 만들기 (Python) 문제 : https://programmers.co.kr/learn/courses/30/lessons/12930 블로그 : https://somjang.tistory.com/entry/Programmers-이상한-문자-만들기-Python 첫번째 시도
###Code
def solution(s):
s_split = s.split(" ")
for k in range(len(s_split)):
s_list = list(s_split[k])
for i in range(len(s_list)):
if i % 2 == 0:
s_list[i] = s_list[i].upper()
elif i % 2 == 1:
s_list[i] = s_list[i].lower()
s_split[k] = "".join(s_list)
answer = " ".join(s_split)
return answer
###Output
_____no_output_____ |
Session3/1.TextProcessing.ipynb | ###Markdown
Text MiningText mining is the process of automatically extracting high-quality information from text. High-quality information is typically derived through the devising of patterns and trends through means such as statistical pattern learning.Typical text mining applications include:- Text classification (or text categorization),- Text clustering, - Sentiment analysis,- Named entity recognition, etc. In this notebook:1. Preprocessing: textual normalization, simple tokenization2. Stopword removal: its importance3. Verify Zipf Law with Oshumed medical abstract collection --- How to use this notebookThis environment is called [*Jupyter Notebook*](http://jupyter.org/).It has two types of *cells*: * Markdown cells (like this one, where you can write notes) * Code cellsRun code cells by pressing **Shift+Enter**. Let's try...
###Code
# Run me: press Shift+Enter
print("Hello, world!!")
###Output
_____no_output_____
###Markdown
This is a hands on session, so this is time you write some of code. Let's try that.
###Code
# Write code to print any string...
# Then run the code.
###Output
_____no_output_____
###Markdown
--- Preprocessing Upper case, PunctuationsA computer does not **require** upper case letters and punctuations. Note: Python already provides a list of punctuations. We simply need to import it.
###Code
from string import punctuation
s = "Hello, World!!"
# Write code to lower case the string
s = ...
# Write code to remove punctuations
# HINT: for loop and for each punctuation use string replace() method
for ...
s = ...
print(s)
###Output
_____no_output_____
###Markdown
Tokenization : NLTK[Natural Language Toolkit (NLTK)](http://www.nltk.org/) is a platform to work with human or natural language data using Python.As usual, we will first convert everything to lowercase and remove punctuations.
###Code
raw1 = "Grenoble is a city in southeastern France, at the foot of the French Alps, on the banks of Isère."
raw2 = "Grenoble is the capital of the department of Isère and is an important scientific centre in France."
# Write code here to convert everything in lower case and to remove punctuation.
print(raw1)
print(raw2)
# Again, SHIFT+ENTER to run the code.
###Output
_____no_output_____
###Markdown
NLTK already provides us with modules to easily tokenize the text. We will tokenize pieces of raw texts using `word_tokenize` function of NLTK package.
###Code
import nltk
# Tokenization using NLTK
tokens1 = nltk.word_tokenize(raw1)
tokens2 = nltk.word_tokenize(raw2)
# print the tokens
print(tokens1)
print(tokens2)
###Output
_____no_output_____
###Markdown
We now build a **NLTK Text** object to store tokenized texts. One or more text then can be merged to form a **TextCollection**. This provides many useful operations helpful to statistically analyze a collection of text.
###Code
# Build NLTK Text objects
text1 = nltk.Text(tokens1)
text2 = nltk.Text(tokens2)
# A list of Text objects
text_list = [text1, text2]
# Build NLTK text collection
text_collection = nltk.text.TextCollection(text_list)
###Output
_____no_output_____
###Markdown
NLTK TextCollection object can be used to calculate basic statistics. 1. count the number of occurances (or term frequency) of a word 2. obtain a frequency distribution of all the words in the textNote: The NLTK Text objects created in the intermediate steps can also be used to calculate similar statistics at document level.
###Code
# Frequency of a word
freq = text_collection.count("grenoble")
print("Frequency of word \'grenoble\' = ", freq)
# Frequency distribution
freq_dist = nltk.FreqDist(text_collection)
freq_dist
###Output
_____no_output_____
###Markdown
Let's automate: write a functionUsing above steps, we will now write a function. We will call this function **raw_to_text**. This function will take a list of raw texts and will return a NLTK TextCollection objects, representing the list of input text.
###Code
"""
Converts a list of raw text to a NLTK TextCollection object.
Applies lower-casing and punctuation removal.
Returns:
text_collection - a NLTK TextCollection object
"""
def raw_to_text(raw_list):
text_list = []
for raw in raw_list:
# Write code for lower-casing and punctuation removal
# Write code to tokenize and create NLTK Text object
# Name the variable 'text' to store the Text object
# storing the text in the list
text_list.append(text)
# Write code to create TextCollection from the list text_list
text_collection = nltk.text.TextCollection(text_list) # TO DELETE
# return text collection
return text_collection
###Output
_____no_output_____
###Markdown
Let's test the function with some sample data
###Code
raw_list_sample = ["The dog sat on the mat.",
"The cat sat on the mat!",
"We have a mat in our house."]
# Call the above raw_to_text function for the sample text
text_collection_sample = ...
###Output
_____no_output_____
###Markdown
Like before we can compute the frequency distribution for this collection.
###Code
# Write code to compute the frequency 'mat' in the collection.
freq = ...
print("Frequency of word \'mat\' = ", freq)
# Write code to compute and display the frequency distribution of text_collection_sample
###Output
_____no_output_____
###Markdown
Something biggerWe will use [**DBPedia** Ontology Classification Dataset](https://drive.google.com/open?id=0Bz8a_Dbh9QhbQ2Vic1kxMmZZQ1k). It includes first paragraphs of Wikipedia articles. Each paragraph is assigned one of **14 categories**. Here is an example of an abstract under *Written Work* catgory:>The Regime: Evil Advances/Before They Were Left Behind is the second prequel novel in the Left Behind series written by Tim LaHaye and Jerry B. Jenkins. It was released on Tuesday November 15 2005. This book covers more events leading up to the first novel Left Behind. It takes place from 9 years to 14 months before the Rapture.In this hands-on we will use 15,000 documents belonging to three categories, namely *Album*, *Film* and *Written Work*.The file **corpus.txt** supplied here, contains 15,000 documents. Each line of the file is a document.Now we will: 1. Load the documents as a list 2. Create a NLTK TextCollection 3. Analyze different counts Note: Each line of the file **corpus.txt** is a document
###Code
# Write code to load documents as a list
"""
Hint 1: open the file using open()
Hint 2: use read() to load the content
Hint 3: use splitlines() to get separate documents
"""
raw_docs = ...
print("Loaded " + str(len(raw_docs)) + " documents.")
# Write code to create a NLTK TextCollection
# Hint: use raw_to_text function
text_collection = ...
# Print total number of words in these documents
print("Total number of words = ", len(text_collection))
print("Total number of unique words = ", len(set(text_collection)))
###Output
_____no_output_____
###Markdown
Calculate the freq distribution for this text collection of documents. Then let's see the most common words.
###Code
# Write code to compute frequency distribution of text_collection
freq_dist = ...
# Let's see most common 10 words.
freq_dist.most_common(10)
###Output
_____no_output_____
###Markdown
**Something does not seem right!!** Can you point out what?Let's try by visualizing it.
###Code
# importing Python package for plotting
import matplotlib.pyplot as plt
# To plot
plt.subplots(figsize=(12,10))
freq_dist.plot(30, cumulative=True)
###Output
_____no_output_____
###Markdown
**Observations:** 1. Just 30 most frequent tokens make up around 260,000 out of 709,460 ($\approx 36.5\%$) 2. Most of these are very common words such as articles, pronouns etc. Stop word filtering Stop words are words which are filtered out before or after processing of natural language data (text). There is no universal stop-word list. Often, stop word lists include short function words, such as "the", "is", "at", "which", and "on". Removing stop-words has been shown to increase the performance of different tasks like search. A file of **stop_words.txt** is included. We will now: 1. Load the contents of the file 'stop_words.txt' where each line is a stop word, and create a stop-word list. 2. Modify the function **raw_to_text** to perform (a) stop-word removal (b) numeric words removal Note: Each line of the file **stop_words.txt** is a stop word.
###Code
# Write code to load stop-word list from file 'stop_words.txt'
# Hint: use the same strategy you used to load documents
stopwords = set(...)
"""
VERSION 2
Converts a list of raw text to a NLTK TextCollection object.
Applies lower-casing, punctuation removal and stop-word removal.
Returns:
text_collection: a NLTK TextCollection object
"""
# Write function "raw_to_text_2".
"""
Hint 1: consult the above function "raw_to_text",
Hint 2: add a new block in the function for removing stop words
Hint 3: to remove stop words from a of tokens -
- create an ampty list to store clean tokens
- for each token in the token list:
if the token is not in stop word list
store it in the clean token list
"""
###Output
_____no_output_____
###Markdown
Retest our small sample with the new version.
###Code
raw_list_sample = ["The dog sat on the mat.",
"The cat sat on the mat!",
"We have a mat in our house."]
# Write code to obtain and see freq_dist_sample with the new raw_to_text_2
# Note: raw_to_text_2 takes two inputs/arguments
text_collection_sample = ...
freq_dist_sample = ...
freq_dist_sample
###Output
_____no_output_____
###Markdown
Finally, rerun with the bigger document set and replot the cumulative word frequencies.Recall that we already have the documents loaded in the variable **raw_docs**
###Code
# Write code to create a NLTK TextCollection with raw_to_text_2
text_collection = ...
# Write code to compute frequency distribution of text_collection
freq_dist = ...
# Write code to plot the frequencies again
###Output
_____no_output_____
###Markdown
Zipf lawVerify whether the dataset follows the Zipf law, by plotting the data on a log-log graph, with the axes being log (rank order) and log (frequency). You expect to obtain an alomost straight line.
###Code
import numpy as np
import math
counts = np.array(list(freq_dist.values()))
tokens = np.array(list(freq_dist.keys()))
ranks = np.arange(1, len(freq_dist)+1)
# Obtaining indices that would sort the array in descending order
indices = np.argsort(-counts)
frequencies = counts[indices]
# Plotting the ranks vs frequencies
plt.subplots(figsize=(12,10))
plt.yscale('log')
plt.xscale('log')
plt.title("Zipf plot for our data")
plt.xlabel("Frequency rank of token")
plt.ylabel("Absolute frequency of token")
plt.grid()
plt.plot(ranks, frequencies, 'o', markersize=0.9)
for n in list(np.logspace(-0.5, math.log10(len(counts)-1), 17).astype(int)):
dummy = plt.text(ranks[n], frequencies[n], " " + tokens[indices[n]],
verticalalignment="bottom", horizontalalignment="left")
plt.show()
###Output
_____no_output_____ |
notebooks/04-Persistence-model.ipynb | ###Markdown
Persistence modelThe persistence model is used as a reference model for both the LSTM and the DTW measure.__Remark: Make sure that the previous notebooks have at least ran once to ensure the necessary files exists__
###Code
import sys
import numpy as np
import pandas as pd
sys.path.append('../')
from src.data.build_input import controlled_train_test_split
from src.dtw.dtw_measure import dtw_measure
from src.model.metrics import evaluate
###Output
_____no_output_____
###Markdown
Start by reading in the data
###Code
startdate = '14-01-2001'
enddate = '01-01-2016'
data = pd.read_hdf('../data/interim/data.h5', 'data')
data = data[startdate:enddate]
_, test = controlled_train_test_split(data)
output = 'Dst'
time_forward = 6
###Output
_____no_output_____
###Markdown
To make the method as accurate as possible, we first divide the data into continuous blocks.
###Code
def extract_cont_intervals_from_index(index):
r'''Check lookup table for time discontinuities
output:
Returns list of continouos times inside the lookup table
'''
min_size = 10
timeseries = []
p = True
series = index
while len(series) > 0:
# We can assume that the series starts from non-missing values, so the first diff gives sizes of continous intervals
diff = pd.date_range(series[0], series[-1], freq='H').difference(series)
if len(diff) > 0:
if pd.Timedelta(diff[0] - pd.Timedelta('1h') - series[0])/pd.Timedelta('1h') > min_size:
v1 = np.datetime64(series[0])
v2 = np.datetime64(diff[0] - pd.Timedelta('1h'))
timeseries.append([v1, v2])
if pd.Timedelta(series[-1] - diff[-1] - pd.Timedelta('1h'))/pd.Timedelta('1h') > min_size:
v1 = np.datetime64(diff[-1] + pd.Timedelta('1h'))
v2 = np.datetime64(series[-1])
timeseries.append([v1, v2])
diff = pd.date_range(diff[0], diff[-1], freq='H').difference(diff)
else:
# Only when diff is empty
v1 = np.datetime64(series[0])
v2 = np.datetime64(series[-1])
timeseries.append([v1, v2])
series = diff
return np.array(timeseries)
###Output
_____no_output_____
###Markdown
Define the persistence model
###Code
def persistence_predict(data, time):
'''Forecast a given feature for a given forecast time
Input:
data: pandas dataframe containing all the to be forecasted features
time: time to be forecasted
Output:
res: panas dataframe
'''
res = data.shift(time)
return res
###Output
_____no_output_____
###Markdown
Now we can apply the dtw measure to every continuous block extracted from the previous method.
###Code
def persistence_dtw_measure(data, time_forward):
# Allow only one feature at the time
assert(data.shape[1] == 1)
pers = data.copy()
for i in range(time_forward):
pers['T_{}'.format(i+1)] = persistence_predict(data, i+1)
pers = pers.dropna() # remove NaN-values
intervals = extract_cont_intervals_from_index(pers.index)
bincounts = np.zeros((time_forward,7))
length = intervals.shape[0]
for num, (start, stop) in enumerate(intervals):
print('{} out of {} blocks'.format(num+1, length))
month = pers[start:stop]
for i in range(time_forward):
_, path, _ = dtw_measure(month['T_{}'.format(i+1)].to_numpy(), month.iloc[:,0].to_numpy(), 6)
bins, counts = np.unique(abs(path[0, :] - path[1, :]), return_counts=True)
bincounts[i, bins] += counts
bincounts = pd.DataFrame(data=bincounts, index=np.arange(1, time_forward+1), columns=np.arange(7))
return bincounts
###Output
_____no_output_____
###Markdown
The persistence model can also be evaluated with the metrics defined by Liemohn et. al. This method combines both the dtw measure and this evaluation.
###Code
def persistence_eval(features, time_forward, dtw=True):
r'''Evaluation of the persistence model.
This model does the standard metric test, together with a dtw count.
The dtw count keeps into consideration discontinuities, splitting the data
in continuous pieces first.
Evaluates times [1, 2, ..., time_forward]
Input:
data: Pandas dataframe with DateTime index and to be forecasted features
time_forward: Number of hours evaluated
dtw: boolean, run dtw measure when true
Output:
dtw-result is written to a file directly
res: Metric evaluation
'''
if dtw:
bincounts = persistence_dtw_measure(features, time_forward)
else:
bincounts = None
data_all = np.repeat(features.to_numpy()[time_forward+1:-time_forward], time_forward, axis=1)
pers_all = np.zeros(data_all.shape)
for i, t in enumerate(range(1, 1+time_forward)):
persist = persistence_predict(features, t)
pers_all[:, t-1] = persist.to_numpy()[time_forward+1:-time_forward, 0]
i += 1
res = evaluate(pers_all, data_all)
return res, bincounts
pers_res, bincounts = persistence_eval(test[[output]], 6)
###Output
1 out of 45
2 out of 45
3 out of 45
4 out of 45
5 out of 45
6 out of 45
7 out of 45
8 out of 45
9 out of 45
10 out of 45
11 out of 45
12 out of 45
13 out of 45
14 out of 45
15 out of 45
16 out of 45
17 out of 45
18 out of 45
19 out of 45
20 out of 45
21 out of 45
22 out of 45
23 out of 45
24 out of 45
25 out of 45
26 out of 45
27 out of 45
28 out of 45
29 out of 45
30 out of 45
31 out of 45
32 out of 45
33 out of 45
34 out of 45
35 out of 45
36 out of 45
37 out of 45
38 out of 45
39 out of 45
40 out of 45
41 out of 45
42 out of 45
43 out of 45
44 out of 45
45 out of 45
###Markdown
Here the results are displayed. The first are the metric results, the second presents the dtw measure results.
###Code
new_ind = dict(list(enumerate(['t+{}'.format(i+1) for i in range(6)])))
pd.DataFrame.from_dict(pers_res).rename(index=new_ind)
bincounts
def reformat_dtw_res(df, filename=None):
'''Normalize the result from the dtw measure
'''
res = df.div(df.sum(axis=1), axis=0)
shifts = np.array(['t+{}h'.format(i+1) for i in np.arange(res.shape[0])])
res['Prediction'] = shifts.T
res = res.set_index('Prediction')
res.columns = ['{}h'.format(i) for i in res.columns]
res = res.apply(lambda x: round(x, 3))
if filename:
res.to_csv('{}reformated_{}'.format(path, filename))
return res
reformat_dtw_res(bincounts)
###Output
_____no_output_____ |
Day6/5_Lists_Tuples.ipynb | ###Markdown
Lists--> It is similar to arrays. > It can contain any type of variable of distinct type
###Code
# empty list
# type your code here3
l=[]
print(l)
# type your code here1
st = [1,2,3,4]
print(st)
# type your code here2
st = [1,22.33,"Machine"]
print(st)
# type your code here
print(type(st))
st = ['A','B','C','D','E']
# type your code here4
print(st[0])
print(st[3])
print(st[-1])
print('A' in st)
print('X' in st)
###Output
A
D
E
True
False
###Markdown
Lists are mutable. It means we can change an item in a list by accessing it as a part of the assignment statement.
###Code
st = ['A','B','C','D','E']
print(st)
# type your code here5
st[1]=88
print(st)
# type your code here6
'B' in st
# List of lists
l=[1,2,[100,200,300],22.3,[99,100,[44,32],21],76]
# type your code here1
print(l)
print(l[0])
print(l[2])
print(l[2][0])
print(l[2][2])
print(l[4][2][1])
# type your code here2
l=(1,2,[100,200,300],22.3,[99,100,[44,32],21],76)
print(l[4][2][1])
l=[1,2,3,2,4,5]
print(l)
# type your code here1.6
l.remove(2)
print(l)
l.remove(2)
print(l)
l=[1,2,3,4,5]
l.remove(22)
print(l)
# type your code here1*7
l=[1,2,3,4,5]
print(l)
del l[1]
print(l)
# Concatenation
l1=[1,2,3]
l2=[11,12,13]
# type your code here18
l1+l2
###Output
_____no_output_____
###Markdown
Traversing a list--
###Code
names=["Darshan","Python Trainer", "Deep Learning Trainer", "ML Trainer"]
# type your code here7
for i in names:
print(i)
# pop method
s=[1,2,3,4,5,6,7,55,44]
# type your code here1*-9
s.pop()
print(s)
# type your code here21
s.pop(1)
s
# type your code here2*3
l1=[3,2,1,5,4]
print(l1)
print(l1[:])
print(min(l1))
print(max(l1))
l1.insert(2,999)
print(l1)
l1.append(777)
print(l1)
l1.sort()
print(l1)
l1.reverse()
print(l1)
l1.clear()
print(l1)
l = [10,20]
c = ["a","b"]
print(l)
print(c)
l.extend(c)
print(l)
# * means repetition
# type your code here2/4
[0]*5
# type your code here2=5
[1,"Lets Upgrade",5]*5
###Output
_____no_output_____
###Markdown
Tuples--> Tuple is similar to a list. However, in tuples, we cannot change the elements after assignment i.e they are immutable.> Each element or value inside the tuple is called an item.
###Code
# type your code here2=6
s = ('y','v','angle')
print(s)
print(type(s))
# empty tuple
# type your code here2..8
t = ()
print(t)
print(type(t))
# type your code here3
t = (1.0)
print(t)
print(type(t))
# Single value tuple must use a __________________
# type your code here
t=(1,)
print(t)
print(type(t))
l = [1.0]
print(type(l))
# type your code here3+3
t = (1,2,33.4443,5,6)
# type your code here
print(t[1:3])
print(t[1:])
print(t[:4])
print(t[::-1])
print(t[-5:-1])
t1 = (1,2,3)
t2 = (4,5,6)
t1+t2
t1*4
3 in t1
len(t1)
max(t1)
min(t1)
###Output
_____no_output_____
###Markdown
Variable Length Arguments.--
###Code
def var(*args):
print(args)
# type your code here4%5
var(1)
var(1,2,3)
var("darshan","Lets Upgrade",32)
var()
# + concatenation
# * repetition
t1=(1,2,3)
t2=(5,6,4,1)
print(t1+t2)
print(t1*3)
print(len(t1))
print(max(t1))
print(min(t1))
print(tuple([1,2,3,4,5,6]))
###Output
(1, 2, 3, 5, 6, 4, 1)
(1, 2, 3, 1, 2, 3, 1, 2, 3)
3
3
1
(1, 2, 3, 4, 5, 6)
|
files/02_HackyHour_2017-03-14/hackyhour-notebook.ipynb | ###Markdown
What are jupyter-notebooks?* Documents that contain rich text and executable code* Browser based app that allows editing and execution of notebooks* Kernels as "computational engines" that execute the code* (Dashboard as filebrowser for notebooks) Commanding (running) and editing cellsNotebook content is structured in different *cells* of arbitrary sizeEnter *command mode* with __ESC__ key -> navigate between cells, copy & paste content, run cells...Enter *editing mode* with __RETURN__ key -> normal text editing Cell content: markdownMake cell a *markdown* cell with __M__ key (in command mode) -> write headings, bullet points, emphasis, LaTeX... this is a new heading*this is italic text** this is a list of * some* bullet points fancy latex $\Rightarrow\mu\nabla^2\underbrace{\left[\frac{\partial^2w}{\partial X^2} + \frac{\partial^2w}{\partial Y^2} + \frac{\partial^2w}{\partial Z^2}\right.}_{=\nabla^2w} - \frac{\partial}{\partial Z}\underbrace{\left.\left(\frac{\partial u}{\partial X} + \frac{\partial v}{\partial Y} + \frac{\partial w}{\partial Z}\right)\right]}_{\overset{(4)}{=} 0}= g\underbrace{\left(\frac{\partial^2\theta}{\partial Y^2} + \frac{\partial\theta^2}{\partial X^2}\right)}_{=\nabla^2\theta - \frac{\partial^2\theta}{\partial Z^2}}$ Cell content: codeMake cell a *code* cell with __Y__ key -> write normal python code, cells share single namespace over the whole document
###Code
#comments and numbers work normally
1 + 1
#strings too
s = 'hello hacky people'
s
#variables can be assigned and will be known to all cells below this one
a,b = (10,10)
#output without print-statement only works if at the end of a cell
print(a+b)
#functions and classes can be defined like normal
def MyFunction(a,b):
return [a+b,a-b,a*b,a/b]
MyFunction(a,b)
###Output
20
###Markdown
LibrariesJupyter-notebooks have acess to all python libraries in your python distribution!
###Code
import numpy as np
c = np.arange(0,16).reshape((4,4))
d = np.ones((4,4))
#numpy arrays work!
print('marix c:\n',c)
print('matrix d:\n',d)
#a is still know from before... see?
print('scalar times matrix:\n',a*c)
print('matrix times matrix:\n',np.dot(c,d))
#by the way: error messages and tracebacks work as well... :/
import seaborn as sns
#get the package via "conda install seaborn" in your terminal
import matplotlib.pyplot as plt
#allows plots to be shown embedded in the notebook
%matplotlib inline
#create a beautiful poser-plot
x = np.linspace(0, 2 * np.pi, 500)
y1 = np.sin(x)
y2 = np.sin(3 * x)
fig, ax = plt.subplots()
nice_plot = ax.fill(x, y1, 'b', x, y2, 'r', alpha=0.3)
###Output
_____no_output_____
###Markdown
other nice things
###Code
#magic commands are available
%timeit(a*b)
#run other notebooks or .py files
%run polar-chart.ipynb
#direct access to docstrings
?np.reshape()
###Output
_____no_output_____ |
MongoDB-MapReduce.ipynb | ###Markdown
Importing libraries
###Code
import pymongo
from pymongo import MongoClient
import json
import requests
from bson.code import Code
###Output
_____no_output_____
###Markdown
Establishing connetion with mongoDB
###Code
client = MongoClient("mongodb://localhost:27017/")
###Output
_____no_output_____
###Markdown
Reads "reviews_electronics.16.json" and uploads each review as a separate document to the collection "reviews" in the database "amazon". Creating a database called 'amazon'
###Code
amazon_db = client["amazon"]
###Output
_____no_output_____
###Markdown
Creating a collection called "reviews"
###Code
reviews_collection = amazon_db["reviews"]
###Output
_____no_output_____
###Markdown
Loading the data into a list of dictionaries
###Code
reviews_list = []
for line in open('reviews_electronics.16.json', 'r'):
reviews_list.append(json.loads(line))
type(reviews_list)
###Output
_____no_output_____
###Markdown
Saving the data into mongoDB amazon database reviews collection
###Code
reviews_collection.insert_many(reviews_list)
reviews_list[3]
###Output
_____no_output_____
###Markdown
Uses MongoDB's map reduce function to build a new collection "avg_scores" that averages review scores by product ("asin"). Print the first 100 entries of "avg_scores" to screen. Making the Map function for finding average
###Code
map_1 = Code( "function () { emit(this.asin, this.overall) }")
###Output
_____no_output_____
###Markdown
Making the Reduce function for finding average
###Code
reduce_1 = Code("function(asin, overall) { return Array.avg(overall) }")
###Output
_____no_output_____
###Markdown
Running the MapReduce function
###Code
results_1 = reviews_collection.map_reduce(map_1, reduce_1, out="avg_scores")
###Output
_____no_output_____
###Markdown
Checking the collection names in the database
###Code
amazon_db.collection_names()
resul1_first100 = amazon_db.avg_scores.find({}).limit(100)
###Output
_____no_output_____
###Markdown
Printing the first 100 averages
###Code
for x in resul1_first100:
print('Product = {0} \n Average Score = {1}\n'.format(x['_id'], x['value']))
###Output
Product = 0132793040
Average Score5.0
Product = B00E4KP4W6
Average Score4.545454545454546
Product = B00E4KP8VI
Average Score5.0
Product = B00E4KPMC8
Average Score2.0
Product = B00E4KQ5C4
Average Score5.0
Product = B00E4KQ9GG
Average Score3.2857142857142856
Product = B00E4KQ9K2
Average Score5.0
Product = B00E4KQD4E
Average Score4.0
Product = B00E4KZBX8
Average Score4.0
Product = B00E4KZDJ0
Average Score5.0
Product = B00E4L35DA
Average Score4.0
Product = B00E4L3N9Q
Average Score4.0
Product = B00E4L48EA
Average Score5.0
Product = B00E4L7FLI
Average Score1.0
Product = B00E4L7TS2
Average Score4.0
Product = B00E4LAL82
Average Score3.0
Product = B00E4LBZZK
Average Score5.0
Product = B00E4LF2Z4
Average Score4.333333333333333
Product = B00E4LFP0G
Average Score4.444444444444445
Product = B00E4LFWWW
Average Score4.4
Product = B00E4LGTVU
Average Score4.195658625514055
Product = B00E4LGTXS
Average Score3.923076923076923
Product = B00E4LGVYA
Average Score3.272727272727273
Product = B00E4LGWLW
Average Score3.5
Product = B00E4LGXL6
Average Score5.0
Product = B00E4LGY88
Average Score3.8421052631578947
Product = B00E4LI86O
Average Score1.0
Product = B00E4LJ8VI
Average Score1.6666666666666667
Product = B00E4LQ9B0
Average Score1.0
Product = B00E4M2K08
Average Score5.0
Product = B00E4M3KW0
Average Score4.0
Product = B00E4M9H40
Average Score3.3333333333333335
Product = B00E4MC3LO
Average Score2.0
Product = B00E4MHBOI
Average Score3.0
Product = B00E4ML766
Average Score5.0
Product = B00E4MNXYA
Average Score5.0
Product = B00E4MQO8C
Average Score4.933333333333334
Product = B00E4MQODW
Average Score2.6666666666666665
Product = B00E4MQOE6
Average Score4.611111111111111
Product = B00E4MT07Y
Average Score1.6
Product = B00E4MVHTI
Average Score3.9
Product = B00E4MYDTY
Average Score4.538461538461538
Product = B00E4NC912
Average Score5.0
Product = B00E4O3CD0
Average Score4.0
Product = B00E4O7EO8
Average Score4.2
Product = B00E4OCCJK
Average Score5.0
Product = B00E4OCECU
Average Score3.0
Product = B00E4OHBNM
Average Score4.0
Product = B00E4OHRJ0
Average Score4.0
Product = B00E4OI5H8
Average Score5.0
Product = B00E4OKJKY
Average Score5.0
Product = B00E4OKJUE
Average Score1.0
Product = B00E4OKZA8
Average Score5.0
Product = B00E4ON1YK
Average Score5.0
Product = B00E4OSIO8
Average Score5.0
Product = B00E4PM406
Average Score4.0
Product = B00E4PMDIO
Average Score5.0
Product = B00E4POW42
Average Score5.0
Product = B00E4PP8PY
Average Score4.0
Product = B00E4QD7D8
Average Score4.0
Product = B00E4QM3TC
Average Score1.0
Product = B00E4QX5J4
Average Score2.0
Product = B00E4RD4VC
Average Score4.375
Product = B00E4RIYPI
Average Score4.5
Product = B00E4RKKVY
Average Score4.0
Product = B00E4RS3DG
Average Score4.5
Product = B00E4RUZGO
Average Score2.0
Product = B00E4RZOQ0
Average Score5.0
Product = B00E4RZQM2
Average Score2.6
Product = B00E4RZU00
Average Score4.0
Product = B00E4RZV6S
Average Score3.75
Product = B00E4RZW44
Average Score3.0
Product = B00E4RZYMO
Average Score1.0
Product = B00E4S5BQ2
Average Score5.0
Product = B00E4SDU0Q
Average Score1.0
Product = B00E4SEBAY
Average Score5.0
Product = B00E4SPPHW
Average Score3.6666666666666665
Product = B00E4T58NC
Average Score2.676470588235294
Product = B00E4T699E
Average Score3.0
Product = B00E4T69DK
Average Score5.0
Product = B00E4T6MWI
Average Score2.0
Product = B00E4T7GP0
Average Score2.0
Product = B00E4T7VOG
Average Score5.0
Product = B00E4T8XZC
Average Score4.666666666666667
Product = B00E4TADN2
Average Score3.0
Product = B00E4TASKK
Average Score3.5
Product = B00E4TBST0
Average Score5.0
Product = B00E4TEKC2
Average Score5.0
Product = B00E4TKYOU
Average Score5.0
Product = B00E4TN3MA
Average Score1.0
Product = B00E4TOWR0
Average Score3.0
Product = B00E4TV36I
Average Score5.0
Product = B00E4TWMWC
Average Score1.0
Product = B00E4U83B0
Average Score3.1333333333333333
Product = B00E4UA7SW
Average Score4.571428571428571
Product = B00E4UD9TQ
Average Score4.666666666666667
Product = B00E4UGIVC
Average Score3.0
Product = B00E4UGJV6
Average Score4.0
Product = B00E4UIU1I
Average Score5.0
Product = B00E4UVVYG
Average Score5.0
###Markdown
Uses MongoDB's map reduce function to build a new collection "weighted_avg_scores" that averages review scores by product ("asin"), weighted by the number of helpful votes (The base weight is 1 and for every additional helpful vote add 1 to weight. e.g. a "[3, 5]" value on "helpful" column should use 3 + 1 = 4 as weight, 3 being the additional votes and 1 being the base weight). Print the first 100 entries of "weighted_avg_scores" to screen. Making the Map function for finding weighted average
###Code
map_2 = Code('''function(){
var wtp1=this.helpful[0]+1;
var value= {
oa: wtp1*this.overall,
wt: wtp1
};
emit(this.asin,value);
};''')
###Output
_____no_output_____
###Markdown
Making the Reduce function for finding weighted average
###Code
reduce_2 = Code('''function (key, values) {
reducedVal= { oa: 0, wt: 0};
for (var i=0; i<values.length; i++) {
reducedVal.oa+=values[i].oa;
reducedVal.wt+=values[i].wt;
}
return reducedVal;
};''')
###Output
_____no_output_____
###Markdown
Making the Finalize function for finding weighted average
###Code
finalize_2 = Code('''function (key, reducedVal) {
reducedVal.wtavg= reducedVal.oa/reducedVal.wt;
return reducedVal.wtavg;
};''')
###Output
_____no_output_____
###Markdown
Running the MapReduce function to calculate weighted average
###Code
results_2 = reviews_collection.map_reduce(map_2, reduce_2, out="weighted_avg_score", finalize=finalize_2)
###Output
_____no_output_____
###Markdown
Checking the collection names in the database
###Code
amazon_db.collection_names()
result2_first100 = amazon_db.weighted_avg_score.find({}).limit(100)
###Output
_____no_output_____
###Markdown
Printing the first 100 averages
###Code
for y in result2_first100:
print('Product = {0} \n Weighted Average Score = {1}\n'.format(y['_id'], y['value']))
###Output
Product = 0132793040
Weighted Average Score = 5.0
Product = B00E4KP4W6
Weighted Average Score = 4.684210526315789
Product = B00E4KP8VI
Weighted Average Score = 5.0
Product = B00E4KPMC8
Weighted Average Score = 2.0
Product = B00E4KQ5C4
Weighted Average Score = 5.0
Product = B00E4KQ9GG
Weighted Average Score = 3.6875
Product = B00E4KQ9K2
Weighted Average Score = 5.0
Product = B00E4KQD4E
Weighted Average Score = 4.0
Product = B00E4KZBX8
Weighted Average Score = 4.0
Product = B00E4KZDJ0
Weighted Average Score = 5.0
Product = B00E4L35DA
Weighted Average Score = 3.0
Product = B00E4L3N9Q
Weighted Average Score = 4.0
Product = B00E4L48EA
Weighted Average Score = 5.0
Product = B00E4L7FLI
Weighted Average Score = 1.0
Product = B00E4L7TS2
Weighted Average Score = 4.0
Product = B00E4LAL82
Weighted Average Score = 3.0
Product = B00E4LBZZK
Weighted Average Score = 5.0
Product = B00E4LF2Z4
Weighted Average Score = 4.153846153846154
Product = B00E4LFP0G
Weighted Average Score = 4.434782608695652
Product = B00E4LFWWW
Weighted Average Score = 4.4
Product = B00E4LGTVU
Weighted Average Score = 3.9064516129032256
Product = B00E4LGTXS
Weighted Average Score = 4.0
Product = B00E4LGVYA
Weighted Average Score = 3.8
Product = B00E4LGWLW
Weighted Average Score = 3.8181818181818183
Product = B00E4LGXL6
Weighted Average Score = 5.0
Product = B00E4LGY88
Weighted Average Score = 4.248407643312102
Product = B00E4LI86O
Weighted Average Score = 1.0
Product = B00E4LJ8VI
Weighted Average Score = 1.6666666666666667
Product = B00E4LQ9B0
Weighted Average Score = 1.0
Product = B00E4M2K08
Weighted Average Score = 5.0
Product = B00E4M3KW0
Weighted Average Score = 4.0
Product = B00E4M9H40
Weighted Average Score = 3.25
Product = B00E4MC3LO
Weighted Average Score = 2.0
Product = B00E4MHBOI
Weighted Average Score = 3.0
Product = B00E4ML766
Weighted Average Score = 5.0
Product = B00E4MNXYA
Weighted Average Score = 5.0
Product = B00E4MQO8C
Weighted Average Score = 4.978260869565218
Product = B00E4MQODW
Weighted Average Score = 2.6470588235294117
Product = B00E4MQOE6
Weighted Average Score = 4.674418604651163
Product = B00E4MT07Y
Weighted Average Score = 1.3125
Product = B00E4MVHTI
Weighted Average Score = 4.0
Product = B00E4MYDTY
Weighted Average Score = 4.133333333333334
Product = B00E4NC912
Weighted Average Score = 5.0
Product = B00E4O3CD0
Weighted Average Score = 4.0
Product = B00E4O7EO8
Weighted Average Score = 4.230769230769231
Product = B00E4OCCJK
Weighted Average Score = 5.0
Product = B00E4OCECU
Weighted Average Score = 3.5
Product = B00E4OHBNM
Weighted Average Score = 4.0
Product = B00E4OHRJ0
Weighted Average Score = 4.0
Product = B00E4OI5H8
Weighted Average Score = 5.0
Product = B00E4OKJKY
Weighted Average Score = 5.0
Product = B00E4OKJUE
Weighted Average Score = 1.0
Product = B00E4OKZA8
Weighted Average Score = 5.0
Product = B00E4ON1YK
Weighted Average Score = 5.0
Product = B00E4OSIO8
Weighted Average Score = 5.0
Product = B00E4PM406
Weighted Average Score = 4.0
Product = B00E4PMDIO
Weighted Average Score = 5.0
Product = B00E4POW42
Weighted Average Score = 5.0
Product = B00E4PP8PY
Weighted Average Score = 4.0
Product = B00E4QD7D8
Weighted Average Score = 4.0
Product = B00E4QM3TC
Weighted Average Score = 1.0
Product = B00E4QX5J4
Weighted Average Score = 2.0
Product = B00E4RD4VC
Weighted Average Score = 4.545454545454546
Product = B00E4RIYPI
Weighted Average Score = 4.333333333333333
Product = B00E4RKKVY
Weighted Average Score = 4.0
Product = B00E4RS3DG
Weighted Average Score = 4.5
Product = B00E4RUZGO
Weighted Average Score = 2.2941176470588234
Product = B00E4RZOQ0
Weighted Average Score = 5.0
Product = B00E4RZQM2
Weighted Average Score = 2.4285714285714284
Product = B00E4RZU00
Weighted Average Score = 3.7857142857142856
Product = B00E4RZV6S
Weighted Average Score = 3.75
Product = B00E4RZW44
Weighted Average Score = 3.0
Product = B00E4RZYMO
Weighted Average Score = 1.0
Product = B00E4S5BQ2
Weighted Average Score = 5.0
Product = B00E4SDU0Q
Weighted Average Score = 1.0
Product = B00E4SEBAY
Weighted Average Score = 5.0
Product = B00E4SPPHW
Weighted Average Score = 2.689655172413793
Product = B00E4T58NC
Weighted Average Score = 3.206060606060606
Product = B00E4T699E
Weighted Average Score = 3.0
Product = B00E4T69DK
Weighted Average Score = 5.0
Product = B00E4T6MWI
Weighted Average Score = 2.0
Product = B00E4T7GP0
Weighted Average Score = 2.0
Product = B00E4T7VOG
Weighted Average Score = 5.0
Product = B00E4T8XZC
Weighted Average Score = 4.75
Product = B00E4TADN2
Weighted Average Score = 3.0
Product = B00E4TASKK
Weighted Average Score = 3.5
Product = B00E4TBST0
Weighted Average Score = 5.0
Product = B00E4TEKC2
Weighted Average Score = 5.0
Product = B00E4TKYOU
Weighted Average Score = 5.0
Product = B00E4TN3MA
Weighted Average Score = 1.0
Product = B00E4TOWR0
Weighted Average Score = 3.0
Product = B00E4TV36I
Weighted Average Score = 5.0
Product = B00E4TWMWC
Weighted Average Score = 1.0
Product = B00E4U83B0
Weighted Average Score = 2.7
Product = B00E4UA7SW
Weighted Average Score = 4.769230769230769
Product = B00E4UD9TQ
Weighted Average Score = 4.666666666666667
Product = B00E4UGIVC
Weighted Average Score = 3.0
Product = B00E4UGJV6
Weighted Average Score = 4.0
Product = B00E4UIU1I
Weighted Average Score = 5.0
Product = B00E4UVVYG
Weighted Average Score = 5.0
|
CTR Prediction/RS_Kaggle_create_train_and_val.ipynb | ###Markdown
In this notebook we created our train and validation sets on which we trained our models of catboost and xgboost. We got 2 kinds of files - based on newest rows and based on the distribution of the feature user_target_recs Mount drive
###Code
from google.colab import drive
drive.mount("/content/drive")
import os
import pandas as pd
home_path = "/content/drive/MyDrive/RS_Kaggle_Competition"
test_path = home_path + "/" + "test" + "/test_file.csv"
test_df = pd.read_csv(test_path)
test_df.head()
print(f"test shape: {test_df.shape}")
# get the distribution of user_target_recs
print("user_target_recs histogram:")
test_df["user_target_recs"].hist()
test_df["user_target_recs"].value_counts()
# get the distribution of user_recs
print("user_recs_histogram")
test_df["user_recs"].hist()
test_df["user_recs"].value_counts()
###Output
user_recs_histogram
###Markdown
Data starting at the 17th of April We take only new data - starting on 17.4.2020
###Code
def get_train_files_paths(path):
dir_paths = [ os.path.join(path, dir_name) for dir_name in os.listdir(path) if dir_name.startswith("train")]
file_paths = []
for dir_path in dir_paths:
curr_dir_file_paths = [ os.path.join(dir_path, file_name) for file_name in os.listdir(dir_path) ]
file_paths.extend(curr_dir_file_paths)
return file_paths
train_file_paths = get_train_files_paths(home_path)
def get_data_starting_at(train_file_paths, starting_day=17, max_rows=4000000):
df=None
i=0
for train_file_path in train_file_paths:
curr_df = pd.read_csv(train_file_path)
curr_df = curr_df[ pd.to_datetime(curr_df["page_view_start_time"], unit='ms').dt.day >= starting_day]
if df is None:
df = curr_df
else:
df = pd.concat([df,curr_df])
print(f"processed file {i}, now have shape: {df.shape}")
if df.shape[0] > max_rows:
return df
del curr_df
i+=1
return df
train_data = get_data_starting_at(train_file_paths[:-10], starting_day=17)
train_data = train_data.iloc[:4000000]
val_data = get_data_starting_at(train_file_paths[-10:], starting_day=17, max_rows=1000000)
save_train_path = "/content/drive/MyDrive/RS_Kaggle_Competition/train_val_with_distribution/train_new_data.csv"
save_val_path = "/content/drive/MyDrive/RS_Kaggle_Competition/train_val_with_distribution/val_new_data.csv"
train_data.to_csv(save_train_path)
val_data.to_csv(save_val_path)
pd.to_datetime(val_data["page_view_start_time"], unit='ms').dt.day.unique()
###Output
_____no_output_____
###Markdown
Create train 10 times bigger than test with same dist on user_target_recs
###Code
def get_train_files_paths(path):
dir_paths = [ os.path.join(path, dir_name) for dir_name in os.listdir(path) if dir_name.startswith("train")]
file_paths = []
for dir_path in dir_paths:
curr_dir_file_paths = [ os.path.join(dir_path, file_name) for file_name in os.listdir(dir_path) ]
file_paths.extend(curr_dir_file_paths)
return file_paths
train_file_paths = get_train_files_paths(home_path)
def reached_desired_dist(df, dist_dict, dist_col):
curr_dict = df[dist_col].value_counts()
for key,val in dist_dict.items():
if curr_dict[key] < val:
return False
return True
def get_train_df_with_dist(train_file_paths, test_df, dist_col, times=10):
values_dict = test_df.value_counts(dist_col)
# print(values_dict.keys())
# return values_dict
df = None
#multiply values by times factor
for key, val in values_dict.items():
values_dict[key] = val * times
print("needed distribution:")
print(values_dict)
for train_file_path in train_file_paths:
if df is None:
df = pd.read_csv(train_file_path)
df = df[df[dist_col].isin(values_dict.keys())]
else:
curr_df = pd.read_csv(train_file_path)
for key in values_dict.keys():
curr_key_needed_target_recs = values_dict[key]
if df[df[dist_col] == key].shape[0] > curr_key_needed_target_recs:
continue
else:
curr_key_df = curr_df[curr_df[dist_col] == key]
df = pd.concat([df, curr_key_df])
if reached_desired_dist(df, values_dict, dist_col):
return df
print(df[dist_col].value_counts())
return df
train_df = get_train_df_with_dist(train_file_paths, test_df, "user_target_recs", times = 15)
val_df = get_train_df_with_dist(train_file_paths[-10:], test_df, "user_target_recs", times = 3)
print(train_df["user_target_recs"].value_counts(normalize=True))
print(val_df["user_target_recs"].value_counts(normalize=True))
print(test_df["user_target_recs"].value_counts(normalize=True))
save_train_path = "/content/drive/MyDrive/RS_Kaggle_Competition/train_val_with_distribution/train_15_time.csv"
save_val_path = "/content/drive/MyDrive/RS_Kaggle_Competition/train_val_with_distribution/val_3_times.csv"
train_df.to_csv(save_train_path)
val_df.to_csv(save_val_path)
###Output
_____no_output_____ |
examples/Tutorials/Tutorial.ipynb | ###Markdown
Tutorial Notebook This notebook gives a tutorial on how to use the realpy package to perform reainforcement learning.Here, we use the two algorithms in realpy: 1. Gaussian PRocess Batch Upper Confidence Bound (GP-BUCB) Use a Gaussian Process model to predict expected metric and subsequently select the inputs that are expected to yield the highest metric. The hyperparameter beta can be tuned to exentuate exploration versus exploitation of the input parameter space. 2. Genetic Algorithm (GA) Tests different inputs, or actions, following a genetic algorithm, which maximizes fitness while also including random crossover and mutations.Both algorithms utilize batch mode. The experiment we will test in this tutorial is to mix different concentrations of red, blue, and green dye to acheive a desired UV/ Vis spectrum.
###Code
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import realpy.UCB.ucb as ucb
import realpy.genetic.genetic as genetic
from sklearn.metrics.pairwise import cosine_similarity
import visualization
# supressing numpy 1.20 deprecation warnings
# (when taking the cosine similarity)
import warnings
warnings.filterwarnings("ignore")
###Output
_____no_output_____
###Markdown
Initialize and process target data
###Code
# read in excel file
df = pd.read_excel('target_spectra.xlsx')
data = np.asarray(df)
# get wavelength
wavelength = data[:,0]
# subtract baseline
TARGET = data[:,1] - data[:,2]
# peak normalize spectra
TARGET = TARGET / np.max(TARGET)
# visualizaiton
plt.rcParams.update({'font.size': 18})
# plot target spectra
fig, ax = plt.subplots(figsize=(6,4))
plt.plot(wavelength, TARGET, 'k-', linewidth=3)
# formatting
ax.tick_params(direction='out', width=2, length=8)
plt.xlabel("Wavelength (nm)")
plt.ylabel("Intensity (a.u.)")
plt.show()
###Output
_____no_output_____
###Markdown
Read in basis spectra for use in virtual testing
###Code
# read in excel file
df = pd.read_excel('basis_spectra.xlsx')
data = np.asarray(df)
# get wavelength
wavelength = data[:,0]
# get colors
RED = data[:,1]
BLUE = data[:,2]
GREEN = data[:,3]
# peak normalize spectra
RED = RED / np.max(RED)
BLUE = BLUE / np.max(BLUE)
GREEN = GREEN / np.max(GREEN)
# visualizaiton
plt.rcParams.update({'font.size': 18})
# plot target spectra
fig, ax = plt.subplots(figsize=(6,4))
plt.plot(wavelength, RED, 'r-', linewidth=3)
plt.plot(wavelength, GREEN, 'g-', linewidth=3)
plt.plot(wavelength, BLUE, 'b-', linewidth=3)
# formatting
ax.tick_params(direction='out', width=2, length=8)
plt.xlabel("Wavelength (nm)")
plt.ylabel("Intensity (a.u.)")
plt.show()
###Output
_____no_output_____
###Markdown
1. Using the GP-BUCB agent Initialize Environment Requirements:The environment class must have a sample function that takes in a set of actions and the current time step and produces the corresponding set of results. Two example environment classes are included below. 1. Environment_IO This includes functions that read and write data files for actions and results, respectively. Thus, the files can be used as instructions to, for example, an OT2 pipetting robot. This is an example of a environment class that can be used in a physical experiment. 2. Environemt_Virtual Instead of reading and wiritng data files, this class computes the result of the actions via a hueristic and would be an example of a virtual testing environment. For the purpose of this tutorial, we will utilize this environment class. Example environment with I/O
###Code
class Environment_IO(object):
def __init__(self, target):
"""Initialize environment with target spectrum."""
self.target = target
def get_cos_sim(self, target, y):
"""Similarity metric to maximize."""
return np.average(cosine_similarity(target.reshape(1, -1), Y=y.reshape(1, -1)).squeeze())
def write_actions(self, actions, time_step):
"""Write batch of actions to csv file."""
# we are using three dyes
# therefore the action is a concentration for each of the 3 dyes
df = pd.DataFrame(actions, columns=['1','2','3'])
df.to_csv(f'Batch_{time_step + 1}.csv')
def spectra_from_conc(self):
"""Read and write data."""
# waits for experiment to be performed and takes in the filename of the results
print("\ncsv file written with robot instructions. Waiting for experiment...")
print('What is the csv file name of the experimental results?')
file_name = input()
if os.path.exists(file_name):
# read in excel file
df = pd.read_excel(file_name)
data = np.asarray(df)
# subtract baseline (ignore first colm because it's wavelength)
results = data[:,1:-1].squeeze() - data[:,-1]
# peak normalize spectra
results = results / np.max(results, axis=0)
return results
elif file_name == 'END' or file_name == 'STOP':
warnings.warn("Ending experiment.")
else:
warnings.warn(f'{file_name} does not exist')
def sample(self, xs, time_step=0):
"""The agent calls this function during each learning step."""
self.write_actions(xs, time_step)
results = self.spectra_from_conc()
metrics = []
for result in results:
metric = self.get_cos_sim(self.target, result)
metrics.append(metric)
return np.array(metrics)
###Output
_____no_output_____
###Markdown
Example environment for virtual testing
###Code
class Environment_Virtual(object):
def __init__(self, target, red, green, blue):
"""Initialize environment with target spectrum."""
self.target = target
self.red = red
self.green = green
self.blue = blue
def get_cos_sim(self, target, y):
"""Similarity metric to maximize."""
return np.average(cosine_similarity(target.reshape(1, -1), Y=y.reshape(1, -1)).squeeze())
def spectra_from_conc(self, x):
"""Use beers law to get spectra."""
# normalize concentrations to add to one
x = x / np.max(x)
return x[0]*self.red + x[1]*self.blue + x[2]*self.green
def sample(self, xs, time_step=None):
"""The agent calls this function during each learning step."""
metrics = []
for x in xs:
result = self.spectra_from_conc(x)
metric = self.get_cos_sim(self.target, result)
metrics.append(metric)
return np.array(metrics)
# initializing virtual
env = Environment_Virtual(TARGET, RED, GREEN, BLUE)
###Output
_____no_output_____
###Markdown
Initialize experimental constraints
###Code
#constraints
min_conc = 0.05
max_conc = 1
# parameter space
N = 20 # grid size
# construct param space
coeffs = np.linspace(min_conc, max_conc, N)
param_space = np.meshgrid(coeffs, coeffs, coeffs)
###Output
_____no_output_____
###Markdown
Initialize agent
###Code
batch_size = 15
UCB_agent = ucb.BatchGPUCB(batch_size, param_space, env, beta=1.5)
###Output
_____no_output_____
###Markdown
Learn
###Code
# training loop
epochs = 8
for i in range(epochs):
UCB_agent.learn()
###Output
_____no_output_____
###Markdown
Visualization of Resultsvisualization.py has a few example plotting functions for visualization of learning.
###Code
X = np.array(UCB_agent.X)
visualization.plot_batch_stack(X, wavelength, TARGET, batch_size, epochs, env)
actions = np.array(UCB_agent.X[-1])
Results = np.array([env.spectra_from_conc(action) for action in actions])
visualization.plot_batch(Results, actions, wavelength, TARGET, epochs)
visualization.plot_best_spectrum(X, UCB_agent.Y, batch_size, wavelength, TARGET, env)
###Output
_____no_output_____
###Markdown
Using the Genetic Algorithm (GA) The first iteration, i.e, generation, will use a Latin hypercube sampling. Thus, the agent will randomle sample in a (hyper)grid of the parameter space. For consistency, we will use the same sampling as the GP-BUCB agent.
###Code
# get LH sampling of input space
first_generation_actions = UCB_agent.X[0]
###Output
_____no_output_____
###Markdown
Initialize agent
###Code
# use the same batch_size and Environment_Virtual class as the GP-BUCB agent
# the first generation must be the spectra of the first (zeroth iteration) batch
ga_agent = genetic.GA(env, batch_size, first_generation_actions)
###Output
_____no_output_____
###Markdown
Learn
###Code
# same number of epochs as the GP-BUCB agent (6)
for i in range(epochs):
ga_agent.learn()
###Output
_____no_output_____
###Markdown
Visualization of Results
###Code
actions = ga_agent.generation
Results = [env.spectra_from_conc(action) for action in actions]
Results = np.array(Results)
visualization.plot_batch(Results, actions, wavelength, TARGET, i+1)
actions = np.array(actions)
actions = actions.reshape(1, batch_size, 3)
visualization.plot_best_spectrum(actions, ga_agent.fitness(), batch_size, wavelength, TARGET, env)
###Output
_____no_output_____ |
PINN/Shrodinger_Equation.ipynb | ###Markdown
Shrodinger Equation 1-d Shrodinger Equation$$\begin{equation}-\frac{\hbar^2}{2\mu}\frac{\partial^2{\Psi(x, t)}}{\partial x^2} + U(x, t)\Psi(x, t)=i\hbar\frac{\partial\Psi(x, t)}{\partial t}\end{equation}$$ 2-d Shrodinger Equation$$\begin{equation}-\frac{\hbar^2}{2\mu}(\frac{\partial^2{\Psi(x, y, t)}}{\partial x^2}+\frac{\partial^2{\Psi(x, y, t)}}{\partial y^2}) + U(x, y, t)\Psi(x, y, t)=i\hbar\frac{\partial\Psi(x, y, t)}{\partial t}\end{equation}$$ 3-d Shrodinger Equation$$\begin{equation}-\frac{\hbar^2}{2\mu}(\frac{\partial^2{\Psi}}{\partial x^2}+\frac{\partial^2{\Psi}}{\partial y^2}+\frac{\partial^2{\Psi}}{\partial z^2}) + U(x, y, z, t)\Psi=i\hbar\frac{\partial\Psi}{\partial t}\end{equation}$$ Infinite Potential Well(1d) potential energy$$\begin{equation} V(x) = \left\{ \begin{array}{cc} 0, & -\frac{L}{2} < x < \frac{L}{2}, \\ \infin, & otherwise, \end{array} \right.\end{equation}$$where $L$ is the length of the box, the location of the center of the box is 0 and $x$ is the position of the particle. Def PDE of Shrodinger Equation in a infinite potential well$$\begin{equation} \left\{ \begin{array}{cc} i\hbar\frac{\partial\Psi(x, t)}{\partial t}+\frac{\hbar^2}{2}\frac{\partial^2\Psi(x, t)}{\partial x^2}=0 \\ \\ \Psi(x, 0) = Asin(k_n(x+\frac{L}{2})) \\ \\ \Psi(-\frac{L}{2}, t)=0 \\ \\ \Psi(\frac{L}{2}, t)=0 \\ \\ \end{array} \right. , -\frac{L}{2} < x < \frac{L}{2}\end{equation}$$where $k_n=\frac{n\pi}{L}$, $n$ is a positive integer, and $|A|=\sqrt{\frac{2}{L}}$ Finite Difference$$\begin{equation} \begin{array}{cc} \begin{split} \frac{\partial\Psi}{\partial t} &= \frac{i}{2}\frac{\partial^2\Psi}{\partial x^2} \\ &= \frac{i}{2}\frac{\Psi_{j+1}^n-2\Psi_j^n+\Psi_{j-1}^n}{\Delta x^2} \end{split} \end{array}\end{equation}$$obtain,$$\begin{equation} \frac{\Psi_j^{n+1}-\Psi_j^n}{\Delta t} = \frac{i}{2}\frac{\Psi_{j+1}^n-2\Psi_j^n+\Psi_{j-1}^n}{\Delta x^2}\end{equation}$$where $\Psi_j^{n}$ is the wave function value at j-th point when time equal to $n\Delta t$then,$$\begin{equation} \Psi_j^{n+1} = \Psi_j^n+\frac{i\Delta t}{2} \frac{\Psi_{j+1}^n-2\Psi_j^n+\Psi_{j-1}^n}{\Delta x^2}\end{equation}$$matrix$$\begin{equation} \bm{\Psi^{n+1}} = \bm{\Psi^n} + \frac{i\Delta t}{2\Delta x^2} \left[ \begin{array}{cc} -2&1&0&0&\cdots&0&0&0 \\ 1&-2&1&0&\cdots&0&0&0 \\ 0&1&-2&1&\cdots&0&0&0 \\ \vdots&\ddots&\ddots&\ddots&\ddots&\ddots&\ddots&\vdots \\ \vdots&\ddots&\ddots&\ddots&\ddots&\ddots&\ddots&\vdots \\ 0&0&0&0&\cdots&1&-2&1 \\ 0&0&0&0&\cdots&0&1&-2 \\ \end{array} \right] \left[ \begin{array}{cc} \Psi^n_1 \\ \Psi^n_2 \\ \Psi^n_3 \\ \vdots \\ \vdots \\ \Psi^n_{J-1} \\ \Psi^n_J \\ \end{array} \right]\end{equation}$$because $\Psi(-\frac{L}{2}, t)=0$ and $\Psi(\frac{L}{2}, t)=0$obtain$$\begin{equation} \bm{\Psi^{n+1}} = \bm{\Psi^n} + \frac{i\Delta t}{2\Delta x^2} \left[ \begin{array}{cc} 0&0&0&0&\cdots&0&0&0 \\ 1&-2&1&0&\cdots&0&0&0 \\ 0&1&-2&1&\cdots&0&0&0 \\ \vdots&\ddots&\ddots&\ddots&\ddots&\ddots&\ddots&\vdots \\ \vdots&\ddots&\ddots&\ddots&\ddots&\ddots&\ddots&\vdots \\ 0&0&0&0&\cdots&1&-2&1 \\ 0&0&0&0&\cdots&0&0&0 \\ \end{array} \right] \left[ \begin{array}{cc} \Psi^n_1 \\ \Psi^n_2 \\ \Psi^n_3 \\ \vdots \\ \vdots \\ \Psi^n_{J-1} \\ \Psi^n_J \\ \end{array} \right]\end{equation}$$
###Code
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# Finite Difference
# state num
state_num = 1
# box length
box_l = 2
# cal time
time_total = 2
# time step
delta_time = 0.00001
# space step
delta_x = 0.1
# time discrete num
time_n = int(time_total/delta_time)
# space discrete num
space_n = int(box_l/delta_x)
# result matrix space_point * time_point
phi_matrix = np.zeros((int(space_n), int(time_n))).astype(np.complex64)
# def A matrix
parm_matrix = -2*np.eye(int(space_n)) + np.eye(int(space_n), k=1) + np.eye(int(space_n), k=-1) + 0.j
parm_matrix[0, :] = 0
parm_matrix[-1, :] = 0
# init wave
phi_matrix[:, 0] = np.sin((state_num*np.pi/box_l)*(np.linspace(-box_l/2, box_l/2, space_n)+box_l/2))
# iter
constant_ = 1.j*delta_time/(2*np.power(delta_x, 2))
for i in range(time_n-1):
temp_value = phi_matrix[:, i] + constant_ * np.matmul(parm_matrix, phi_matrix[:, i])
phi_matrix[:, i+1] = temp_value
print("done...")
# plot
plt.figure(figsize=(10, 10), dpi=150)
time_list = np.linspace(0, time_total, time_n)
position_list = np.linspace(-box_l/2, box_l/2, space_n)
position_labels = np.around(np.linspace(-box_l/2, box_l/2, 4), 1)
# the index position of the tick labels
position_ticks = list()
for label in position_labels:
idx_pos = len(position_list) - np.argmin(np.abs(label-position_list))
position_ticks.append(idx_pos)
time_labels = np.around(np.linspace(0, time_total, 4), 1)
time_ticks = list()
for label in time_labels:
idx_pos = np.argmin(np.abs(label-time_list))
time_ticks.append(idx_pos)
# real
plt.subplot(2, 1, 1)
ax = sns.heatmap(np.real(phi_matrix), annot=False)
ax.set_xlabel("time")
ax.set_ylabel("position")
ax.set_yticks(position_ticks)
ax.set_xticks(time_ticks)
ax.set_title("real part of wave function —— time")
ax.set_xticklabels(time_labels)
ax.set_yticklabels(position_labels)
# imag
plt.subplot(2, 1, 2)
ax_imag = sns.heatmap(np.imag(phi_matrix), annot=False)
ax_imag.set_xlabel("time")
ax_imag.set_ylabel("position")
ax_imag.set_yticks(position_ticks)
ax_imag.set_xticks(time_ticks)
ax_imag.set_title("imaginary part of wave function —— time")
ax_imag.set_xticklabels(time_labels)
ax_imag.set_yticklabels(position_labels)
plt.show()
###Output
_____no_output_____
###Markdown
Finite Difference Runge-Kutta MethodObviously, the accuracy of finite difference is not enough, and consider to use Runge-Kutta method Fourth Runge-Kutta Method$$\begin{equation} \left\{ \begin{array}{cc} y^{n+1} = y^n + \frac{h}{6}(k_1+2k_2+2k_3+k_4) \\ \\ k_1 = f(y^n, t^n) \\ \\ k_2 = f(y^n+k_1\frac{h}{2}, t^n+\frac{h}{2}) \\ \\ k_3 = f(y^n+k_2\frac{h}{2}, t^n+\frac{h}{2}) \\ \\ k_4 = f(y^n+hk_3, t^n+h) \end{array} \right.\end{equation}$$ Finite difference with Fourth Runge-Kutta Method$$\begin{equation} \begin{split} \frac{\partial\Psi}{\partial t} &= \frac{i}{2}\frac{\Psi_{j+1}^n-2\Psi_j^n+\Psi_{j-1}^n}{\Delta x^2} \\ &= f(\Psi, t) \end{split}\end{equation}$$obtain$$\begin{equation} \left\{ \begin{array}{cc} \begin{split} \Psi^{n+1}_j &= \Psi^n_j + \Delta t k \\ &= \Psi^n_j + \frac{h}{6}(k_1+2k_2+2k_3+k_4) \end{split} \\ \\ h = \Delta t \\ \\ \begin{split} k_{1j} &= f(\Psi^n_j, t^n)\\ &= \frac{i}{2}\frac{\Psi_{j+1}^n-2\Psi_j^n+\Psi_{j-1}^n}{\Delta x^2} \end{split} \\ \\ \begin{split} k_{2j} &= f(\Psi^n_j+k_{1j}\frac{h}{2}, t^n+\frac{h}{2})\\ &= \frac{i}{2}\frac{\Psi_{j+1+k_1\frac{h}{2}}^{n+\frac{h}{2}}-2\Psi_{j+k_1\frac{h}{2}}^{n+\frac{h}{2}}+\Psi_{j-1+k_1\frac{h}{2}}^{n+\frac{h}{2}}}{\Delta x^2}\\ &= \frac{i}{2}\frac{\Psi_{j+1}^{n}+\frac{h}{2}k_{1(j+1)}-2\Psi_{j}^{n}-hk_{1j}+\Psi_{j-1}^{n}+\frac{h}{2}k_{1(j-1)}}{\Delta x^2} \end{split} \\ \\ \begin{split} k_{3j} &= f(\Psi^n_j+k_{2j}\frac{h}{2}, t^n+\frac{h}{2})\\ &= \frac{i}{2}\frac{\Psi_{j+1+k_2\frac{h}{2}}^{n+\frac{h}{2}}-2\Psi_{j+k_2\frac{h}{2}}^{n+\frac{h}{2}}+\Psi_{j-1+k_2\frac{h}{2}}^{n+\frac{h}{2}}}{\Delta x^2}\\ &= \frac{i}{2}\frac{\Psi_{j+1}^{n}+\frac{h}{2}k_{2(j+1)}-2\Psi_{j}^{n}-hk_{2j}+\Psi_{j-1}^{n}+\frac{h}{2}k_{2(j-1)}}{\Delta x^2} \end{split} \\ \\ \begin{split} k_{4j} &= f(\Psi^n_j+hk_{3j}, t^n+h)\\ &= \frac{i}{2}\frac{\Psi_{j+1+hk_3}^{n+h}-2\Psi_{j+hk_3}^{n+h}+\Psi_{j-1+hk_3}^{n+h}}{\Delta x^2}\\ &= \frac{i}{2}\frac{\Psi_{j+1}^{n}+hk_{3(j+1)}-2\Psi_{j}^{n}-2hk_{3j}+\Psi_{j-1}^{n}+hk_{3(j-1)}}{\Delta x^2} \end{split} \end{array} \right.\end{equation}$$matrix$$\begin{equation} \begin{array}{cc} \begin{split} \bm{k_1^n} &= \left[ \begin{array}{cc} f(\Psi^n_1, t^n) \\ f(\Psi^n_2, t^n) \\ \vdots \\ f(\Psi^n_J, t^n) \\ \end{array} \right] \\ &= \frac{i}{2\Delta x^2} \left[ \begin{array}{cc} -2&1&0&0&\cdots&0&0&0 \\ 1&-2&1&0&\cdots&0&0&0 \\ 0&1&-2&1&\cdots&0&0&0 \\ \vdots&\ddots&\ddots&\ddots&\ddots&\ddots&\ddots&\vdots \\ \vdots&\ddots&\ddots&\ddots&\ddots&\ddots&\ddots&\vdots \\ 0&0&0&0&\cdots&1&-2&1 \\ 0&0&0&0&\cdots&0&1&-2 \\ \end{array} \right] \left[ \begin{array}{cc} \Psi^n_1 \\ \Psi^n_2 \\ \Psi^n_3 \\ \vdots \\ \vdots \\ \Psi^n_{J-1} \\ \Psi^n_J \\ \end{array} \right] \end{split} \end{array}\end{equation}$$$$\begin{equation} \begin{split} \bm{k_2^n} &= \left[ \begin{array}{cc} f(\Psi^n_1+k_1\frac{h}{2}, t^n+\frac{h}{2}) \\ f(\Psi^n_2+k_1\frac{h}{2}, t^n+\frac{h}{2}) \\ \vdots \\ f(\Psi^n_J+k_1\frac{h}{2}, t^n+\frac{h}{2}) \\ \end{array} \right] \\ &= \frac{i}{2\Delta x^2} \left[ \begin{array}{cc} -2&1&0&0&\cdots&0&0&0 \\ 1&-2&1&0&\cdots&0&0&0 \\ 0&1&-2&1&\cdots&0&0&0 \\ \vdots&\ddots&\ddots&\ddots&\ddots&\ddots&\ddots&\vdots \\ \vdots&\ddots&\ddots&\ddots&\ddots&\ddots&\ddots&\vdots \\ 0&0&0&0&\cdots&1&-2&1 \\ 0&0&0&0&\cdots&0&1&-2 \\ \end{array} \right] \left( \left[ \begin{array}{cc} \Psi^n_1 \\ \Psi^n_2 \\ \Psi^n_3 \\ \vdots \\ \vdots \\ \Psi^n_{J-1} \\ \Psi^n_J \\ \end{array} \right] + \frac{h}{2}\bm{k_1^n} \right) \end{split}\end{equation}$$$$\begin{equation} \begin{split} \bm{k_3^n} &= \left[ \begin{array}{cc} f(\Psi^n_1+k_2\frac{h}{2}, t^n+\frac{h}{2}) \\ f(\Psi^n_2+k_2\frac{h}{2}, t^n+\frac{h}{2}) \\ \vdots \\ f(\Psi^n_J+k_2\frac{h}{2}, t^n+\frac{h}{2}) \\ \end{array} \right] \\ &= \frac{i}{2\Delta x^2} \left[ \begin{array}{cc} -2&1&0&0&\cdots&0&0&0 \\ 1&-2&1&0&\cdots&0&0&0 \\ 0&1&-2&1&\cdots&0&0&0 \\ \vdots&\ddots&\ddots&\ddots&\ddots&\ddots&\ddots&\vdots \\ \vdots&\ddots&\ddots&\ddots&\ddots&\ddots&\ddots&\vdots \\ 0&0&0&0&\cdots&1&-2&1 \\ 0&0&0&0&\cdots&0&1&-2 \\ \end{array} \right] \left( \left[ \begin{array}{cc} \Psi^n_1 \\ \Psi^n_2 \\ \Psi^n_3 \\ \vdots \\ \vdots \\ \Psi^n_{J-1} \\ \Psi^n_J \\ \end{array} \right] + \frac{h}{2}\bm{k_2^n} \right) \end{split}\end{equation}$$$$\begin{equation} \begin{split} \bm{k_4^n} &= \left[ \begin{array}{cc} f(\Psi^n_1+hk_3, t^n+h) \\ f(\Psi^n_2+hk_3, t^n+h) \\ \vdots \\ f(\Psi^n_J+hk_3, t^n+h) \\ \end{array} \right] \\ &= \frac{i}{2\Delta x^2} \left[ \begin{array}{cc} -2&1&0&0&\cdots&0&0&0 \\ 1&-2&1&0&\cdots&0&0&0 \\ 0&1&-2&1&\cdots&0&0&0 \\ \vdots&\ddots&\ddots&\ddots&\ddots&\ddots&\ddots&\vdots \\ \vdots&\ddots&\ddots&\ddots&\ddots&\ddots&\ddots&\vdots \\ 0&0&0&0&\cdots&1&-2&1 \\ 0&0&0&0&\cdots&0&1&-2 \\ \end{array} \right] \left( \left[ \begin{array}{cc} \Psi^n_1 \\ \Psi^n_2 \\ \Psi^n_3 \\ \vdots \\ \vdots \\ \Psi^n_{J-1} \\ \Psi^n_J \\ \end{array} \right] + h\bm{k_3^n} \right) \end{split}\end{equation}$$$$\begin{equation} \bm{\Psi^{n+1}} = \bm{\Psi^n} + \frac{h}{6}(\bm{k_1^n}+2\bm{k_2^n}+2\bm{k_3^n}+\bm{k_4^n})\end{equation}$$
###Code
# Finite Difference with Fourth Lunge-Kutta Method
# state num
state_num = 2
# box length
box_l = 2
# cal time
time_total = 2
# time step
delta_time = 2e-5
# space step
delta_x = 1e-2
# time discrete num
time_n = int(time_total/delta_time)
# space discrete num
space_n = int(box_l/delta_x)
# result matrix space_point * time_point
phi_matrix = np.zeros((int(space_n), int(time_n))).astype(np.complex64)
# def A matrix
parm_matrix = -2*np.eye(int(space_n)) + np.eye(int(space_n), k=1) + np.eye(int(space_n), k=-1) + 0.j
parm_matrix[0, :] = 0
parm_matrix[-1, :] = 0
# def k1, k2, k3, k4
# k1 = k_vector_matrix[:, 0]
# k2 = k_vector_matrix[:, 1]
# k3 = k_vector_matrix[:, 2]
# k4 = k_vector_matrix[:, 3]
k_vector_matrix = np.zeros((int(space_n), 4)).astype(np.complex64)
# init wave
phi_matrix[:, 0] = np.sin((state_num*np.pi/box_l)*(np.linspace(-box_l/2, box_l/2, space_n)+box_l/2))
# iter
constant_ = 1.j/(2*np.power(delta_x, 2))
for i in range(time_n-1):
# k1
k_vector_matrix[:, 0] = constant_ * np.matmul(parm_matrix, phi_matrix[:, i])
# k2
k_vector_matrix[:, 1] = constant_ * (np.matmul(parm_matrix, phi_matrix[:, i]+(delta_time/2)*k_vector_matrix[:, 0]))
# k3
k_vector_matrix[:, 2] = constant_ * (np.matmul(parm_matrix, phi_matrix[:, i]+(delta_time/2)*k_vector_matrix[:, 1]))
# k4
k_vector_matrix[:, 3] = constant_ * (np.matmul(parm_matrix, phi_matrix[:, i]+(delta_time)*k_vector_matrix[:, 2]))
# if i % 1000 == 0:
# print(np.max(k_vector_matrix))
phi_matrix[:, i+1] = phi_matrix[:, i] + (delta_time/6)*(k_vector_matrix[:, 0] + 2*k_vector_matrix[:, 1] + 2*k_vector_matrix[:, 2] + k_vector_matrix[:, 3])
print("done...")
# plot
plt.figure(figsize=(10, 10), dpi=150)
draw_time_list = np.linspace(0, time_n-1, min(400, time_n)).astype(np.int32)
draw_position_list = np.linspace(0, space_n-1, min(400, space_n)).astype(np.int32)
phi_matrix_draw = phi_matrix[draw_position_list, :][:, draw_time_list]
time_list = np.linspace(0, time_total, len(draw_time_list))
position_list = np.linspace(-box_l/2, box_l/2, len(draw_position_list))
position_labels = np.around(np.linspace(-box_l/2, box_l/2, 4), 1)
# the index position of the tick labels
position_ticks = list()
for label in position_labels:
idx_pos = len(position_list) - np.argmin(np.abs(label-position_list))
position_ticks.append(idx_pos)
time_labels = np.around(np.linspace(0, time_total, 4), 1)
time_ticks = list()
for label in time_labels:
idx_pos = np.argmin(np.abs(label-time_list))
time_ticks.append(idx_pos)
# real
plt.subplot(2, 1, 1)
ax = sns.heatmap(np.real(phi_matrix_draw), annot=False)
ax.set_xlabel("time")
ax.set_ylabel("position")
ax.set_yticks(position_ticks)
ax.set_xticks(time_ticks)
ax.set_title("real part of wave function —— time")
ax.set_xticklabels(time_labels)
ax.set_yticklabels(position_labels)
# imag
plt.subplot(2, 1, 2)
ax_imag = sns.heatmap(np.imag(phi_matrix_draw), annot=False)
ax_imag.set_xlabel("time")
ax_imag.set_ylabel("position")
ax_imag.set_yticks(position_ticks)
ax_imag.set_xticks(time_ticks)
ax_imag.set_title("imaginary part of wave function —— time")
ax_imag.set_xticklabels(time_labels)
ax_imag.set_yticklabels(position_labels)
plt.show()
###Output
_____no_output_____
###Markdown
PINNUsing PINN to solve the Shrodinger Equation in a infinite potential well model* Inputsposition $x_i$, and time $t_i$* outputthe wave function value $u$ at $(x_i, t_i)$* Conditions * **PDE** $$ \begin{equation} f(x, t) = i \frac{\partial{u}}{\partial{t}} + 2\frac{\partial^2{u}}{\partial{x}^2} = 0, \\ -\frac{L}{2}\leq x \leq \frac{L}{2} \end{equation} $$ * **Boundary Conditions** $$ \begin{equation} \begin{array}{cc} u(-\frac{L}{2}, t)=0 \\ u(\frac{L}{2}, t)=0 \end{array} \end{equation} $$ * **Initial Conditions** $$ \begin{equation} \begin{array}{cc} u(x, 0) = Asin(k_n(x+\frac{L}{2})) \\ k_n = \frac{n\pi}{L} \\ |A|=\sqrt{\frac{2}{L}} \end{array} \end{equation} $$* Loss Function$$\begin{equation} \begin{array}{cc} \mathcal{L}=MSE_b+MSE_0+MSE_f, \\ \\ MSE_b = \frac{1}{|N_b|}\sum_{i=1}^{|N_b|}(|u(-\frac{L}{2}, t^i_b)|^2+|u(\frac{L}{2}, t^i_b)|^2), \\ \\ MSE_0 = \frac{1}{|N_0|}\sum_{i=1}^{|N_0|} |u(x_0^i, 0)-u_0^i|^2, \\ \\ MSE_f = \frac{1}{|N_f|}\sum_{i=1}^{|N_f|} |f(x_f^i, t_f^i)|^2 \end{array}\end{equation}$$where $N_f = \{ (x_f^1, t_f^1), (x_f^2, t_f^2), \cdots, (x_f^{|N_f|}, t_f^{|N_f|}) \}$ is the dataset to calculate the loss of PDE, $|N_f|$ is the total number of $N_f$, $N_0 = \{ (x_0^1, u_0^1), (x_0^2, u_0^2), \cdots, (x_0^{|N_0|}, u_0^{|N_0|}) \}$ denotes the initial data,$|N_0|$ is the total number of $N_0$, $N_b=\{ t^1_b, t^2_b, \cdots, t^{|N_b|}_b \}$ corresponds to the collocation points on the boundary,and $|N_b|$ is the total number of $N_b$,* Optimization MethodIn order to calculate the value of $f(x_f^i, t_f^i)$, we need to obtain the value of $\frac{\partial{u}}{\partial{t}}$ and $\frac{\partial^2{u}}{\partial{x}^2}$, which can not be obtained directly.Consider to use Automatic Differentiation(AD) to obtain these two value, which is an important part of Gradient Descent.* Other Info * Self-Supervised Learning or Supervised Learning? There is no need to provide train data to train PINN model, and the numerical data is only used to calculate the accuracy of PINN outputs
###Code
def heatmap_draw_func(input_matrix, time_range, position_range):
# plot
time_n_raw = len(input_matrix[0, :])
space_n_raw = len(input_matrix[:, 0])
plt.figure(figsize=(10, 10), dpi=150)
draw_time_list = np.linspace(0, time_n_raw-1, min(400, time_n_raw)).astype(np.int32)
draw_position_list = np.linspace(0, space_n_raw-1, min(400, space_n_raw)).astype(np.int32)
phi_matrix_draw = input_matrix[draw_position_list, :][:, draw_time_list]
time_list = np.linspace(time_range[0], time_range[1], len(draw_time_list))
position_list = np.linspace(position_range[0], position_range[1], len(draw_position_list))
position_labels = np.around(np.linspace(position_range[0], position_range[1], 4), 1)
# the index position of the tick labels
position_ticks = list()
for label in position_labels:
idx_pos = len(position_list) - np.argmin(np.abs(label-position_list))
position_ticks.append(idx_pos)
time_labels = np.around(np.linspace(time_range[0], time_range[1], 4), 1)
time_ticks = list()
for label in time_labels:
idx_pos = np.argmin(np.abs(label-time_list))
time_ticks.append(idx_pos)
# real
plt.subplot(2, 1, 1)
ax = sns.heatmap(np.real(phi_matrix_draw), annot=False)
ax.set_xlabel("time")
ax.set_ylabel("position")
ax.set_yticks(position_ticks)
ax.set_xticks(time_ticks)
ax.set_title("real part of wave function —— time")
ax.set_xticklabels(time_labels)
ax.set_yticklabels(position_labels)
# imag
plt.subplot(2, 1, 2)
ax_imag = sns.heatmap(np.imag(phi_matrix_draw), annot=False)
ax_imag.set_xlabel("time")
ax_imag.set_ylabel("position")
ax_imag.set_yticks(position_ticks)
ax_imag.set_xticks(time_ticks)
ax_imag.set_title("imaginary part of wave function —— time")
ax_imag.set_xticklabels(time_labels)
ax_imag.set_yticklabels(position_labels)
plt.show()
# PINN
# data
# boundary_data, initial_data, f_data
# obtain from numerical method
# boundary data number
boundary_data_num = 50
# initial data number
initial_data_num = 50
# f data number
f_data_num = 2000000
# total num
total_initial_num = len(phi_matrix[:, 0])
total_boundary_num = len(phi_matrix[0, :]) * 2
total_f_num = np.prod(np.shape(phi_matrix))
# x and t data
time_list = np.linspace(0, time_total, len(phi_matrix[0, :]))
position_list = np.linspace(-box_l/2, box_l/2, len(phi_matrix[:, 0]))
# obtain the index of test data
import random
np.random.seed(1024)
random.seed(1024)
initial_data_index_list = np.random.choice(range(0, total_initial_num), initial_data_num, replace=False)
boundary_data_index_list = np.random.choice(range(0, total_boundary_num), boundary_data_num, replace=False)
f_data_index_list = np.random.choice(range(0, total_f_num), f_data_num, replace=False)
# obtain data
# (x, t, u)
initial_position = position_list[initial_data_index_list]
initial_time = np.zeros_like(initial_data_index_list)
initial_data = np.array(list(zip(initial_position, initial_time, phi_matrix[initial_data_index_list, 0])), dtype=np.complex64)
# boundary
boundary_position_loc_list = (boundary_data_index_list//len(time_list))*-1
boundary_time_loc_list = boundary_data_index_list%len(time_list)
boundary_position = position_list[boundary_position_loc_list]
boundary_time = time_list[boundary_time_loc_list]
boundary_data = np.array(list(zip(boundary_position, boundary_time, phi_matrix[boundary_position_loc_list, boundary_time_loc_list])), dtype=np.complex64)
# f
f_data_position_loc_list = f_data_index_list//len(time_list)
f_data_time_loc_list = f_data_index_list%len(time_list)
f_data_position = position_list[f_data_position_loc_list]
f_data_time = time_list[f_data_time_loc_list]
f_data = np.array(list(zip(f_data_position, f_data_time, phi_matrix[f_data_position_loc_list, f_data_time_loc_list])), dtype=np.complex64)
# draw test data
test_data_matrix = np.ones_like(phi_matrix).astype(np.complex64)*-1
test_data_matrix[initial_data_index_list, 0] = initial_data[:, 2]
test_data_matrix[boundary_position_loc_list, boundary_time_loc_list] = boundary_data[:, 2]
test_data_matrix[f_data_position_loc_list, f_data_time_loc_list] = f_data[:, 2]
heatmap_draw_func(test_data_matrix, [0, time_total], [-box_l/2, box_l/2])
f_data = f_data[:20000, :]
print("data create done...")
print("the number of initial data:{}".format(len(initial_data)))
print("the number of boundary data:{}".format(len(boundary_data)))
print("the number of f data:{}".format(len(f_data)))
from torch.utils.data import DataLoader, Dataset
import torch
from torch import nn
from collections import OrderedDict
# dataset and dataloader
class MyDataset(Dataset):
def __init__(self, data_list) -> None:
super(MyDataset, self).__init__()
self.data_list = data_list
def __getitem__(self, item):
input_x = self.data_list[item][0]
input_t = self.data_list[item][1]
true_y_real = self.data_list[item][2]
true_y_imag = self.data_list[item][3]
return {"input_x": input_x, "input_t": input_t, "true_y": [true_y_real, true_y_imag]}
def __len__(self):
return len(self.data_list)
@staticmethod
def collate_fn(batch):
input_tensor = torch.cat([torch.unsqueeze([s["input_x"], s["input_t"]], 0) for s in batch], dim=0).type(torch.float32)
true_y = torch.as_tensor([s["true_y"] for s in batch])
return {"input": input_tensor, "output": true_y}
# model
# input (x, t)
# output (pred_real, pred_imag, du/dt, d^2u/dx^2)
class PINN(nn.Module):
def __init__(self):
super(PINN, self).__init__()
self.pinn_network = nn.Sequential(OrderedDict([
("layer 1", nn.Linear(2, 100)),
("tanh 1", nn.Tanh()),
("layer 2", nn.Linear(100, 100)),
("tanh 2", nn.Tanh()),
("layer 3", nn.Linear(100, 100)),
("tanh 3", nn.Tanh()),
("layer 4", nn.Linear(100, 100)),
("tanh 4", nn.Tanh()),
("output layer", nn.Linear(100, 2)),
]))
def forward(self, x):
return self.pinn_network(x)
bx = torch.tensor([[1., 2.], [3., 4.], [5., 6.]], requires_grad=True)
y = torch.cat((torch.unsqueeze(torch.pow(bx[:, 0], 2)+torch.pow(bx[:, 1], 2), dim=-1),
torch.unsqueeze(torch.pow(bx[:, 0], 3)+torch.pow(bx[:, 1], 3), dim=-1)), dim=-1)
print(y)
print(y.shape, bx.shape)
dydx_1 = torch.autograd.grad(y[:, 0], bx, grad_outputs=torch.ones(bx.shape[0]), create_graph=True, retain_graph=True)
dydx_2 = torch.autograd.grad(y[:, 1], bx, grad_outputs=torch.ones(bx.shape[0]), create_graph=True, retain_graph=True)
print(torch.unsqueeze(dydx_1[0][:, 0], dim=-1).shape)
print(dydx_2)
print(torch.cat((torch.unsqueeze(dydx_1[0][:, 0], dim=-1), torch.unsqueeze(dydx_2[0][:, 0], dim=-1)), dim=1).shape)
dy_2_dx_1 = torch.autograd.grad(dydx_2[0], bx, grad_outputs=torch.ones(bx.shape), create_graph=True, retain_graph=True)
print(dy_2_dx_1)
# -------------- global params --------------
if torch.cuda.is_available():
device = torch.device("cuda")
else:
device = torch.device("cpu")
max_epochs = 100
recorder = dict()
recorder["acc"] = list()
recorder["loss"] = list()
# -------------- data --------------
# data set
boundary_dataset = MyDataset(boundary_data)
initial_dataset = MyDataset(initial_data)
f_dataset = MyDataset(f_data)
# data loader
boundary_dataloader = DataLoader(dataset=boundary_dataset, batch_size=boundary_data_num, shuffle=True,
collate_fn=boundary_dataset.collate_fn)
initial_dataloader = DataLoader(dataset=initial_dataset, batch_size=initial_data_num, shuffle=True,
collate_fn=initial_dataset.collate_fn)
f_data_dataloader = DataLoader(dataset=f_dataset, batch_size=f_data_num, shuffle=True,
collate_fn=f_dataset.collate_fn)
# -------------- model --------------
pinn_model = PINN().to(device)
# -------------- loss and optimizer --------------
params = [p for p in pinn_model.parameters() if p.requires_grad]
optimizer = torch.optim.LBFGS(params)
criterion = nn.MSELoss(reduction="mean")
# train
for epoch in range(1, max_epochs+1):
pinn_model.train()
optimizer.zero_grad()
# boundary data
for step, batch in enumerate(boundary_dataloader):
input_ = batch["input"].to(device)
true_output = batch["output"].to(device)
pred_ = pinn_model(input_)
boundary_loss = criterion(pred_, torch.zeros_like(pred_).to(device))
# initial data
for step, batch in enumerate(initial_dataloader):
input_ = batch["input"].to(device)
true_output = batch["output"].to(device)
pred_ = pinn_model(input_)
initial_loss = criterion(pred_, true_output)
# f data
for step, batch in enumerate(f_data_dataloader):
input_ = batch["input"].to(device)
pred_ = pinn_model(input_)
# cal du/dt and du/dx
# du/d(input) : (batch, input_len)
# du/dt = du/d(input)[:, 1]
# du/dx = du/d(input)[:, 0]
du_dinput_real = torch.autograd.grad(pred_[:, 0], input_, grad_outputs=torch.ones(input_.shape[0]), create_graph=True, retain_graph=True)
du_dinput_imag = torch.autograd.grad(pred_[:, 1], input_, grad_outputs=torch.ones(input_.shape[0]), create_graph=True, retain_graph=True)
# cal d^2u/dt^2 and d^2u/dx^2
# d^2u/d(input)^2 : (batch, input_len)
# d^u/dt^2 : d^2u/d(input)^2[:, 1]
# d^u/dx^2 : d^2u/d(input)^2[:, 0]
du_dinput_real_2 = torch.autograd.grad(du_dinput_real[0], input_, grad_outputs=torch.ones(input_.shape), create_graph=True, retain_graph=True)
du_dinput_imag_2 = torch.autograd.grad(du_dinput_imag[0], input_, grad_outputs=torch.ones(input_.shape), create_graph=True, retain_graph=True)
# obtain du/dt
du_dt_real = du_dinput_real[0][:, 1]
du_dt_imag = du_dinput_imag[0][:, 1]
# obtain d^2u/dx^2
du_dx_2_real = du_dinput_real_2[0][:, 0]
du_dx_2_imag = du_dinput_imag_2[0][:, 0]
f_func_value_real = torch.unsqueeze(-du_dt_imag + 2*du_dx_2_real, dim=-1)
f_func_value_imag = torch.unsqueeze(du_dt_real + 2*du_dx_2_imag, dim=-1)
f_func_output = torch.cat((f_func_value_real, f_func_value_imag), dim=-1)
f_loss = criterion(f_func_output, torch.zeros_like(f_func_output).to(device))
total_loss = initial_loss + boundary_loss + f_loss
total_loss.backward()
optimizer.step()
if epoch%10 == 0:
print("[{}/{}]\t loss:{}".format(epoch, max_epochs, total_loss.item()))
###Output
_____no_output_____ |
dados_desbalanceados_com_random_forest.ipynb | ###Markdown
Carregamento da base de dados
###Code
import pandas as pd
import random
import numpy as np
dataset = pd.read_csv('csv_result-ebay_confianca_completo.csv')
dataset.shape
dataset.head()
dataset['blacklist'] = dataset['blacklist'] == 'S'
import seaborn as sns
sns.countplot(dataset['reputation']);
X = dataset.iloc[:, 0:74].values
X.shape
X
y = dataset.iloc[:, 74].values
y.shape
y
np.unique(y, return_counts=True)
###Output
_____no_output_____
###Markdown
Base de treinamento e teste
###Code
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, stratify = y)
X_train.shape, y_train.shape
###Output
_____no_output_____
###Markdown
Classificação com Random Forest
###Code
from sklearn.ensemble import RandomForestClassifier
modelo = RandomForestClassifier()
modelo.fit(X_train, y_train)
previsoes = modelo.predict(X_test)
from sklearn.metrics import accuracy_score
accuracy_score(previsoes, y_test)
###Output
_____no_output_____
###Markdown
Subamostragem - undersampling (tomek links)
###Code
from imblearn.under_sampling import TomekLinks
tl = TomekLinks(return_indices=True, ratio='majority')
X_under, y_under, id_under = tl.fit_sample(X,y)
X_under.shape, y_under.shape, id_under
np.unique(y, return_counts=True)
np.unique(y_under, return_counts=True)
X_train_u, X_test_u, y_train_u, y_test_u = train_test_split(X_under, y_under, test_size=0.2, stratify=y_under)
X_train_u.shape, X_test_u.shape
modelo_u = RandomForestClassifier()
modelo_u.fit(X_train_u, y_train_u)
previsoes_u = modelo_u.predict(X_test_u)
accuracy_score(previsoes_u, y_test_u)
###Output
_____no_output_____
###Markdown
Sobreamostragem - oversampling (SMOTE)
###Code
from imblearn.over_sampling import SMOTE
smote = SMOTE(ratio='minority')
X_over, y_over = smote.fit_sample(X, y)
X_over.shape, y_over.shape
np.unique(y, return_counts=True)
np.unique(y_over, return_counts=True)
sns.countplot(y_over);
X_train_o, X_test_o, y_train_o, y_test_o = train_test_split(X_over, y_over, test_size=0.2, stratify=y_over)
X_train_o.shape, X_test_o.shape
modelo_o = RandomForestClassifier()
modelo_o.fit(X_train_o, y_train_o)
previsoes_o = modelo_o.predict(X_test_o)
accuracy_score(previsoes_o, y_test_o)
###Output
_____no_output_____ |
snippets/Steganography.ipynb | ###Markdown
一些隐写相关的代码片段 http://www.epubit.com.cn/article/1041
###Code
from PIL import Image
def encodeDataInImage(image, data):
evenImage = makeImageEven(image) # 获得最低有效位为 0 的图片副本
binary = ''.join(map(constLenBin,bytearray(data, 'utf-8'))) # 将需要被隐藏的字符串转换成二进制字符串
if len(binary) > len(image.getdata()) * 4: # 如果不可能编码全部数据, 抛出异常
raise Exception("Error: Can't encode more than " + len(evenImage.getdata()) * 4 + " bits in this image. ")
encodedPixels = [(r+int(binary[index*4+0]),g+int(binary[index*4+1]),b+int(binary[index*4+2]),t+int(binary[index*4+3])) if index*4 < len(binary) else (r,g,b,t) for index,(r,g,b,t) in enumerate(list(evenImage.getdata()))] # 将 binary 中的二进制字符串信息编码进像素里
encodedImage = Image.new(evenImage.mode, evenImage.size) # 创建新图片以存放编码后的像素
encodedImage.putdata(encodedPixels) # 添加编码后的数据
return encodedImage
def makeImageEven(image):
pixels = list(image.getdata()) # 得到一个这样的列表: [(r,g,b,t),(r,g,b,t)...]
evenPixels = [(r>>1<<1,g>>1<<1,b>>1<<1,t>>1<<1) for [r,g,b,t] in pixels] # 更改所有值为偶数(魔法般的移位)
evenImage = Image.new(image.mode, image.size) # 创建一个相同大小的图片副本
evenImage.putdata(evenPixels) # 把上面的像素放入到图片副本
return evenImage
def constLenBin(int):
binary = "0"*(8-(len(bin(int))-2))+bin(int).replace('0b','') # 去掉 bin() 返回的二进制字符串中的 '0b',并在左边补足 '0' 直到字符串长度为 8
return binary
def decodeImage(image):
pixels = list(image.getdata()) # 获得像素列表
binary = ''.join([str(int(r>>1<<1!=r))+str(int(g>>1<<1!=g))+str(int(b>>1<<1!=b))+str(int(t>>1<<1!=t)) for (r,g,b,t) in pixels]) # 提取图片中所有最低有效位中的数据
# 找到数据截止处的索引
locationDoubleNull = binary.find('0000000000000000')
endIndex = locationDoubleNull+(8-(locationDoubleNull % 8)) if locationDoubleNull%8 != 0 else locationDoubleNull
data = binaryToString(binary[0:endIndex])
return data
encodeDataInImage(Image.open("coffee.png"), '你好世界,Hello world!')
###Output
_____no_output_____ |
dft_workflow/__misc__/finding_nonconstrained_mistakes/find_unconstr_slabs.ipynb | ###Markdown
Import Modules
###Code
import os
import sys
import pandas as pd
pd.set_option("display.max_columns", None)
# pd.set_option('display.max_rows', None)
# pd.options.display.max_colwidth = 100
from methods import get_df_jobs_data
###Output
_____no_output_____
###Markdown
Read Data
###Code
df_jobs_data = get_df_jobs_data()
df_jobs_data_i = df_jobs_data[
~df_jobs_data.final_atoms.isna()
]
# job_id_i = "sesepado_97"
# df_jobs_data_i = df_jobs_data_i.loc[[job_id_i]]
bad_job_ids = []
for job_id_i, row_i in df_jobs_data_i.iterrows():
# #####################################################
final_atoms_i = row_i.final_atoms
# #####################################################
# print(job_id_i)
has_constraints = False
if len(final_atoms_i.constraints) > 0:
has_constraints = True
if not has_constraints:
# print(job_id_i)
bad_job_ids.append(job_id_i)
if len(bad_job_ids) > 0:
print(50 * "ALERT | There are slabs with no constraints!!")
# ['vuvukara_45', 'setumaha_18', 'nububewo_52', 'fowonifu_15']
###Output
_____no_output_____
###Markdown
###Code
# if not has_constraints:
# print("IDSJSFIDSif")
# len(final_atoms_i.constraints)
###Output
_____no_output_____ |
hw2/Plots-for-CartePole-v0.ipynb | ###Markdown
Small batch, no reward_to_go, no normalize_advantage
###Code
infiles = sorted(glob.glob('./data/sb_no_rtg_no_na/CartPole-v0/*.csv'),
key=lambda s: int(os.path.basename(s).replace('.csv', '')))
print(len(infiles))
fig, axes = plt.subplots(1, 3, figsize=(14, 4))
axes = axes.ravel()
for k, col in enumerate(['avg_ret', 'min_ret', 'max_ret']):
ax = axes[k]
dfs = []
for f in infiles:
_df = pd.read_csv(f)
dfs.append(_df[col])
cat_df = pd.concat(dfs, axis=1)
# cat_df.columns=np.arange(1, 6)
avg = cat_df.mean(axis=1)
std = cat_df.std(axis=1)
xs = cat_df.index.values + 1
ax.plot(xs, avg)
ax.fill_between(xs, avg - std, avg+std, alpha=0.2)
ax.set_title(col)
ax.grid()
###Output
_____no_output_____
###Markdown
Large batch, no reward_to_go, no normalize_advantage
###Code
infiles = sorted(glob.glob('./data/lb_no_rtg_no_na/CartPole-v0/*.csv'),
key=lambda s: int(os.path.basename(s).replace('.csv', '')))
print(len(infiles))
fig, axes = plt.subplots(1, 3, figsize=(14, 4))
axes = axes.ravel()
for k, col in enumerate(['avg_ret', 'min_ret', 'max_ret']):
ax = axes[k]
dfs = []
for f in infiles:
_df = pd.read_csv(f)
dfs.append(_df[col])
cat_df = pd.concat(dfs, axis=1)
# cat_df.columns=np.arange(1, 6)
avg = cat_df.mean(axis=1)
std = cat_df.std(axis=1)
xs = cat_df.index.values + 1
ax.plot(xs, avg)
ax.fill_between(xs, avg - std, avg+std, alpha=0.2)
ax.set_title(col)
ax.grid()
###Output
_____no_output_____
###Markdown
Small batch, reward_to_go, no normalize_advantage
###Code
infiles = sorted(glob.glob('./data/sb_rtg_no_na/CartPole-v0/*.csv'),
key=lambda s: int(os.path.basename(s).replace('.csv', '')))
print(len(infiles))
fig, axes = plt.subplots(1, 3, figsize=(14, 4))
axes = axes.ravel()
for k, col in enumerate(['avg_ret', 'min_ret', 'max_ret']):
ax = axes[k]
dfs = []
for f in infiles:
_df = pd.read_csv(f)
dfs.append(_df[col])
cat_df = pd.concat(dfs, axis=1)
avg = cat_df.mean(axis=1)
std = cat_df.std(axis=1)
xs = cat_df.index.values + 1
ax.plot(xs, avg)
ax.fill_between(xs, avg - std, avg+std, alpha=0.2)
ax.set_title(col)
ax.grid()
###Output
_____no_output_____
###Markdown
Large batch, reward_to_go, no normalize_advantage
###Code
infiles = sorted(glob.glob('./data/lb_rtg_no_na/CartPole-v0/*.csv'),
key=lambda s: int(os.path.basename(s).replace('.csv', '')))
print(len(infiles))
fig, axes = plt.subplots(1, 3, figsize=(14, 4))
axes = axes.ravel()
for k, col in enumerate(['avg_ret', 'min_ret', 'max_ret']):
ax = axes[k]
dfs = []
for f in infiles:
_df = pd.read_csv(f)
dfs.append(_df[col])
cat_df = pd.concat(dfs, axis=1)
# cat_df.columns=np.arange(1, 6)
avg = cat_df.mean(axis=1)
std = cat_df.std(axis=1)
xs = cat_df.index.values + 1
ax.plot(xs, avg)
ax.fill_between(xs, avg - std, avg+std, alpha=0.2)
ax.set_title(col)
ax.grid()
###Output
_____no_output_____
###Markdown
Small batch, reward_to_go, normalize_advantage
###Code
infiles = sorted(glob.glob('./data/sb_rtg_na/CartPole-v0/*.csv'),
key=lambda s: int(os.path.basename(s).replace('.csv', '')))
print(len(infiles))
fig, axes = plt.subplots(1, 3, figsize=(14, 4))
axes = axes.ravel()
for k, col in enumerate(['avg_ret', 'min_ret', 'max_ret']):
ax = axes[k]
dfs = []
for f in infiles:
_df = pd.read_csv(f)
dfs.append(_df[col])
cat_df = pd.concat(dfs, axis=1)
avg = cat_df.mean(axis=1)
std = cat_df.std(axis=1)
xs = cat_df.index.values + 1
ax.plot(xs, avg)
ax.fill_between(xs, avg - std, avg+std, alpha=0.2)
ax.set_title(col)
ax.grid()
###Output
_____no_output_____
###Markdown
Large batch, reward_to_go, normalize_advantage
###Code
infiles = sorted(glob.glob('./data/lb_rtg_na/CartPole-v0/*.csv'),
key=lambda s: int(os.path.basename(s).replace('.csv', '')))
print(len(infiles))
fig, axes = plt.subplots(1, 3, figsize=(14, 4))
axes = axes.ravel()
for k, col in enumerate(['avg_ret', 'min_ret', 'max_ret']):
ax = axes[k]
dfs = []
for f in infiles:
_df = pd.read_csv(f)
dfs.append(_df[col])
cat_df = pd.concat(dfs, axis=1)
avg = cat_df.mean(axis=1)
std = cat_df.std(axis=1)
xs = cat_df.index.values + 1
ax.plot(xs, avg)
ax.fill_between(xs, avg - std, avg+std, alpha=0.2)
ax.set_title(col)
ax.grid()
###Output
_____no_output_____
###Markdown
Q & A reward-to-go and advantage normalization help, but not as apparent as batch size, which makes a huge impact.The empirical results do match theory: with policy gradient, the expected return improves, which is the goal of RL. Small batch, reward_to_go, normalize_advantage, neural network baseline
###Code
infiles = sorted(glob.glob('./data/sb_rtg_na_nb/CartPole-v0/*.csv'),
key=lambda s: int(os.path.basename(s).replace('.csv', '')))
print(len(infiles))
fig, axes = plt.subplots(1, 3, figsize=(14, 4))
axes = axes.ravel()
for k, col in enumerate(['avg_ret', 'min_ret', 'max_ret']):
ax = axes[k]
dfs = []
for f in infiles:
_df = pd.read_csv(f)
dfs.append(_df[col])
cat_df = pd.concat(dfs, axis=1)
avg = cat_df.mean(axis=1)
std = cat_df.std(axis=1)
xs = cat_df.index.values + 1
ax.plot(xs, avg)
ax.fill_between(xs, avg - std, avg+std, alpha=0.2)
ax.set_title(col)
ax.grid()
###Output
_____no_output_____
###Markdown
Large batch, reward_to_go, normalize_advantage, neural network baseline
###Code
infiles = sorted(glob.glob('./data/lb_rtg_na_nb/CartPole-v0/*.csv'),
key=lambda s: int(os.path.basename(s).replace('.csv', '')))
print(len(infiles))
fig, axes = plt.subplots(1, 3, figsize=(14, 4))
axes = axes.ravel()
for k, col in enumerate(['avg_ret', 'min_ret', 'max_ret']):
ax = axes[k]
dfs = []
for f in infiles:
_df = pd.read_csv(f)
dfs.append(_df[col])
cat_df = pd.concat(dfs, axis=1)
avg = cat_df.mean(axis=1)
std = cat_df.std(axis=1)
xs = cat_df.index.values + 1
ax.plot(xs, avg)
ax.fill_between(xs, avg - std, avg+std, alpha=0.2)
ax.set_title(col)
ax.grid()
###Output
_____no_output_____ |
processing/.ipynb_checkpoints/Make_bank_configs_overview_table-checkpoint.ipynb | ###Markdown
Make bank configs for simulations
###Code
<agent identifier="SBSA">
<parameter type="state_variables" name="equity" value="86200249000.0"></parameter>
<parameter type="parameters" name="leverage" value="13.0780116888061"></parameter>
<parameter type="state_variables" name="debt" value="1127327864000.0"></parameter>
<parameter type="parameters" name="m_1" value="0.0245212259042243" label="Cash and gold reserves "></parameter>
<parameter type="parameters" name="m_2" value="0.0107197211672673" label="SA Interbank deposits, loans and advances "></parameter>
<parameter type="parameters" name="m_3" value="0.00405856934605684" label="Rand Deposits with and loans to foreign banks"></parameter>
<parameter type="parameters" name="m_4" value="0.0359414516505725" label="Loans granted under repo agreement"></parameter>
<parameter type="parameters" name="m_5" value="0.129182760844701" label="Foreign currency loans and advances d 134),"></parameter>
<parameter type="parameters" name="m_6" value="0.0170625845237423" label="Redeemable preference shares"></parameter>
<parameter type="parameters" name="m_7" value="0.0296141684852751" label="corporate instalment credit "></parameter>
<parameter type="parameters" name="m_8" value="0.0252157207337808" label="household instalment credit "></parameter>
<parameter type="parameters" name="m_9" value="0.0690936667241429" label="corporate mortgage"></parameter>
<parameter type="parameters" name="m_10" value="0.217515842585181" label="household mortgage"></parameter>
<parameter type="parameters" name="m_11" value="0.114607784121438" label="Unsecured lending corporate"></parameter>
<parameter type="parameters" name="m_12" value="0.0247813962262941" label="Unsecured lending households"></parameter>
<parameter type="parameters" name="m_13" value="0.0589651490010434" label="Other credit (credit card + leasing + Overdarft + factoring debt)"></parameter>
<parameter type="parameters" name="m_14" value="0.0412585423144622" label="Central and provincial government bonds"></parameter>
<parameter type="parameters" name="m_15" value="0.00466172471770335" label="Other public-sector bonds"></parameter>
<parameter type="parameters" name="m_16" value="0.0123382885320927" label="Private sector bonds"></parameter>
<parameter type="parameters" name="m_17" value="0.000201706905161743" label="Equity holdings in subsidiaries and joint ventures"></parameter>
<parameter type="parameters" name="m_18" value="0.0129940574355693" label="Listed and unlisted equities"></parameter>
<parameter type="parameters" name="m_19" value="0.0839139051737815" label="Securitisation/ asset-backed securities"></parameter>
<parameter type="parameters" name="m_20" value="0.00454473608062181" label="Derivative instruments"></parameter>
<parameter type="parameters" name="m_21" value="0.0408429870466462" label="Treasury bills, SA Reserve Bank bills, Land Bank bills "></parameter>
<parameter type="parameters" name="m_22" value="0.00209378503289771" label="Other investments"></parameter>
<parameter type="parameters" name="m_23" value="0.0358702254473441" label="Non financial assets"></parameter>
</agent>
###Output
_____no_output_____
###Markdown
Make overview table
###Code
pwd
import os
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import datetime
import os
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import datetime
file_list=[]
import os
os.chdir('/Users/admin/git_repos/ba900/')
for filename in os.listdir('./data/output/'):
if filename.endswith(".pkl") and '2015' in filename or '2014' in filename:
unpickle = './data/output/'+str(filename)
print(unpickle)
file_list.append(pd.read_pickle(unpickle))
MASTER = pd.concat(file_list)
MASTER['time'] = pd.to_datetime(MASTER['time'])
MASTER['Value'] = pd.to_numeric(MASTER['Value'])
import sys
import os
import sys
sys.path.append("..")
from ba900 import assets_to_weights
transform=assets_to_weights.tranformer()
from_=['ABSA BANK LTD ','THE STANDARD BANK OF S A LTD','FIRSTRAND BANK LIMITED ','NEDBANK LTD ','INVESTEC BANK LTD ',\
'CITIBANK N.A ','CAPITEC BANK ' , 'AFRICAN BANK LIMITED ','JPMORGAN CHASE BANK ',\
'THE HONGKONG AND SHANGHAI BANKING CORPORATION LIMITED - JOHANNESBURG BRANCH ','STANDARD CHARTERED BANK ',\
'CHINA CONSTRUCTION BANK CORPORATION - JHB BRANCH '
]
to_=['ABSA', 'STANDARDBANK', 'FNB','NEDBANK', 'INVESTEC',\
'CITYBANK','CAPITEC','AfricanB','JPM',\
'HSBC','CHARTERED','ChinaConstruction']
renamed=transform.relabel_banknames(from_,to_,MASTER)
t=transform.get_biggest_banks( "2015", "12", renamed, 10)
top10=t.values.tolist()
renamed=renamed[renamed.InstitutionDescription.isin(top10)]
years = ['2014','2015']
months = ['12','11']
df=transform.get_overview_timeseries(top10,months,years,renamed)
# def get_overview_timeseries(banklist, months,years,dfrenamed):
# import string
# letters = list(string.ascii_uppercase)[:26]
# temp1=[]
# temp2=[]
# temp3=[]
# for y in years:
# for m in months:
# # Get bank totals for each of those strings
# for name,l in zip(banklist,letters):
# temp1.append(self.get_bank_totals(l,name,y, m, dfrenamed))
# temp2.append(pd.concat(temp1))
# temp3.append(pd.concat(temp2))
# df=pd.concat(temp3)
# df=df.drop_duplicates()
# return df
d=transform.get_bank_totals(l,'FNB','2015', '11', renamed)
print(round(df4.iloc[0:1,-29:].sum(axis=1).values[0]))
renamed[renamed.InstitutionDescription=='FNB']
# id Name Equity Leverage Debt m1 Cash and gold reserves
# A SBSA 86 200 249 000 13.1 1127327864000 2.5%
# C FNB 75 526 054 370 12.0 904393561840 2.7%
# B ABSA 57 255 883 000 15.2 869270227000 2.8%
# D NEDBANK 57 683 474 000 13.0 751035802000 2.9%
# E INVESTEC 24 265 229 000 14.5 352780580000 1.9%
# F CITYBANK 5 123 291 030 13.9 71388112090 0.8%
# G CAPITEC 13 056 412 000 3.7 48913724000 5.8%
# H AfricanB 7 466 274 000 6.8 50419037000 2.2%
# I JPM 3 246 873 000 16.6 53788923000 0.3%
# J HSBC 3 919 312 000 11.7 45886507000 1.9%
# K CHARTERED 3 688 896 000 9.4 34731129000 1.7%
# L BoCHINA 4 178 651 000 7.5 31464510000 1.1%
# M DeutscheB 1 433 355 000 14.9 21321901000 0.1%
# N BNP 636 568 000 21.8 13862348000 0.3%
# O SOCIETEG 666 836 000 16.6 11058957000 1.3%
df3
debt/equity
absa_assets['InstitutionDescription'] = absa_assets['InstitutionDescription'].apply(lambda x: x.replace('ABSA BANK LTD ', 'ABSA'))
absa_assets
# id Name Equity Leverage Debt m1 Cash and gold reserves
# A SBSA 86 200 249 000 13.1 1127327864000 2.5%
# C FNB 75 526 054 370 12.0 904393561840 2.7%
# B ABSA 57 255 883 000 15.2 869270227000 2.8%
# D NEDBANK 57 683 474 000 13.0 751035802000 2.9%
# E INVESTEC 24 265 229 000 14.5 352780580000 1.9%
# F CITYBANK 5 123 291 030 13.9 71388112090 0.8%
# G CAPITEC 13 056 412 000 3.7 48913724000 5.8%
# H AfricanB 7 466 274 000 6.8 50419037000 2.2%
# I JPM 3 246 873 000 16.6 53788923000 0.3%
# J HSBC 3 919 312 000 11.7 45886507000 1.9%
# K CHARTERED 3 688 896 000 9.4 34731129000 1.7%
# L BoCHINA 4 178 651 000 7.5 31464510000 1.1%
# M DeutscheB 1 433 355 000 14.9 21321901000 0.1%
# N BNP 636 568 000 21.8 13862348000 0.3%
# O SOCIETEG 666 836 000 16.6 11058957000 1.3%
THE STANDARD BANK OF S A LTD
14 FIRSTRAND BANK LIMITED
14 ABSA BANK LTD
14 NEDBANK LTD
14 INVESTEC BANK LTD
14 CAPITEC BANK
14 THE HONGKONG AND SHANGHAI BANKING CORPORATION ...
14 CITIBANK N.A
14 STANDARD CHARTERED BANK
14 CHINA CONSTRUCTION BANK CORPORATION - ...
absa_equity.Value.values[0]+absa_debt.Value.values[0]
absa_equity=MASTER[(MASTER['TheYear']=='2008')&(MASTER['TheMonth']=='11')&
(MASTER['InstitutionDescription']=='ABSA BANK LTD ')&(MASTER['ItemNumber']=='95')]
absa_equity
###Output
_____no_output_____ |
notebooks/pattern_enumeration.ipynb | ###Markdown
AMPLPY: Pattern EnumerationDocumentation: http://amplpy.readthedocs.ioGitHub Repository: https://github.com/ampl/amplpyPyPI Repository: https://pypi.python.org/pypi/amplpy Imports
###Code
from __future__ import print_function
from amplpy import AMPL
import os
###Output
_____no_output_____
###Markdown
Basic pattern-cutting model
###Code
with open(os.path.join('models', 'cut.mod'), 'r') as f:
print(f.read())
###Output
param nPatterns integer > 0;
set PATTERNS = 1..nPatterns; # patterns
set WIDTHS; # finished widths
param order {WIDTHS} >= 0; # rolls of width j ordered
param overrun; # permitted overrun on any width
param rolls {WIDTHS,PATTERNS} >= 0 default 0; # rolls of width i in pattern j
var Cut {PATTERNS} integer >= 0; # raw rolls to cut in each pattern
minimize TotalRawRolls: sum {p in PATTERNS} Cut[p];
subject to FinishedRollLimits {w in WIDTHS}:
order[w] <= sum {p in PATTERNS} rolls[w,p] * Cut[p] <= order[w] + overrun;
###Markdown
Enumeration routine
###Code
from math import floor
def patternEnum(roll_width, widths, prefix=[]):
max_rep = int(floor(roll_width/widths[0]))
if len(widths) == 1:
patmat = [prefix+[max_rep]]
else:
patmat = []
for n in reversed(range(max_rep+1)):
patmat += patternEnum(roll_width-n*widths[0], widths[1:], prefix+[n])
return patmat
###Output
_____no_output_____
###Markdown
Plotting routine
###Code
def cuttingPlot(roll_width, widths, solution):
import numpy as np
import matplotlib.pyplot as plt
ind = np.arange(len(solution))
acc = [0]*len(solution)
for p, (patt, rep) in enumerate(solution):
for i in range(len(widths)):
for j in range(patt[i]):
vec = [0]*len(solution)
vec[p] = widths[i]
plt.bar(ind, vec, width=0.35, bottom=acc)
acc[p] += widths[i]
plt.title('Solution')
plt.xticks(ind, tuple("x {:}".format(rep) for patt, rep in solution))
plt.yticks(np.arange(0, roll_width, 10))
plt.show()
###Output
_____no_output_____
###Markdown
Set & generate data
###Code
roll_width = 64.5
overrun = 6
orders = {
6.77: 10,
7.56: 40,
17.46: 33,
18.76: 10
}
widths = list(sorted(orders.keys(), reverse=True))
patmat = patternEnum(roll_width, widths)
###Output
_____no_output_____
###Markdown
Set up AMPL model
###Code
# Initialize
ampl = AMPL()
ampl.read(os.path.join('models', 'cut.mod'))
###Output
_____no_output_____
###Markdown
Send data to AMPL (Java/C++ style)
###Code
# Send scalar values
ampl.getParameter('overrun').set(overrun)
ampl.getParameter('nPatterns').set(len(patmat))
# Send order vector
ampl.getSet('WIDTHS').setValues(widths)
ampl.getParameter('order').setValues(orders)
# Send pattern matrix
ampl.getParameter('rolls').setValues({
(widths[i], 1+p): patmat[p][i]
for i in range(len(widths))
for p in range(len(patmat))
})
###Output
_____no_output_____
###Markdown
Send data to AMPL (alternative style)
###Code
# Send scalar values
ampl.param['overrun'] = overrun
ampl.param['nPatterns'] = len(patmat)
# Send order vector
ampl.set['WIDTHS'] = widths
ampl.param['order'] = orders
# Send pattern matrixc
ampl.param['rolls'] = {
(widths[i], 1+p): patmat[p][i]
for i in range(len(widths))
for p in range(len(patmat))
}
###Output
_____no_output_____
###Markdown
Solve and report
###Code
# Solve
ampl.option['solver'] = 'gurobi'
ampl.solve()
# Retrieve solution
cutting_plan = ampl.var['Cut'].getValues()
cutvec = list(cutting_plan.getColumn('Cut.val'))
# Display solution
solution = [
(patmat[p], cutvec[p])
for p in range(len(patmat))
if cutvec[p] > 0
]
cuttingPlot(roll_width, widths, solution)
###Output
Gurobi 7.5.1: optimal solution; objective 18
8 simplex iterations
1 branch-and-cut nodes
|
Big-Data-Clusters/CU2/Public/content/sample/sam002-query-hdfs-in-sql-server.ipynb | ###Markdown
SAM002 - Storage Pool (2 of 2) - Query HDFS===========================================Description-----------In this 2nd part of the Storage Pool tutorial, you’ll learn how to:- **Create an external table pointing to HDFS data in a big data cluster**- **Join this data with high-value data in the master instance** Common functionsDefine helper functions used in this notebook.
###Code
# Define `run` function for transient fault handling, suggestions on error, and scrolling updates on Windows
import sys
import os
import re
import json
import platform
import shlex
import shutil
import datetime
from subprocess import Popen, PIPE
from IPython.display import Markdown
retry_hints = {} # Output in stderr known to be transient, therefore automatically retry
error_hints = {} # Output in stderr where a known SOP/TSG exists which will be HINTed for further help
install_hint = {} # The SOP to help install the executable if it cannot be found
first_run = True
rules = None
debug_logging = False
def run(cmd, return_output=False, no_output=False, retry_count=0):
"""Run shell command, stream stdout, print stderr and optionally return output
NOTES:
1. Commands that need this kind of ' quoting on Windows e.g.:
kubectl get nodes -o jsonpath={.items[?(@.metadata.annotations.pv-candidate=='data-pool')].metadata.name}
Need to actually pass in as '"':
kubectl get nodes -o jsonpath={.items[?(@.metadata.annotations.pv-candidate=='"'data-pool'"')].metadata.name}
The ' quote approach, although correct when pasting into Windows cmd, will hang at the line:
`iter(p.stdout.readline, b'')`
The shlex.split call does the right thing for each platform, just use the '"' pattern for a '
"""
MAX_RETRIES = 5
output = ""
retry = False
global first_run
global rules
if first_run:
first_run = False
rules = load_rules()
# When running `azdata sql query` on Windows, replace any \n in """ strings, with " ", otherwise we see:
#
# ('HY090', '[HY090] [Microsoft][ODBC Driver Manager] Invalid string or buffer length (0) (SQLExecDirectW)')
#
if platform.system() == "Windows" and cmd.startswith("azdata sql query"):
cmd = cmd.replace("\n", " ")
# shlex.split is required on bash and for Windows paths with spaces
#
cmd_actual = shlex.split(cmd)
# Store this (i.e. kubectl, python etc.) to support binary context aware error_hints and retries
#
user_provided_exe_name = cmd_actual[0].lower()
# When running python, use the python in the ADS sandbox ({sys.executable})
#
if cmd.startswith("python "):
cmd_actual[0] = cmd_actual[0].replace("python", sys.executable)
# On Mac, when ADS is not launched from terminal, LC_ALL may not be set, which causes pip installs to fail
# with:
#
# UnicodeDecodeError: 'ascii' codec can't decode byte 0xc5 in position 4969: ordinal not in range(128)
#
# Setting it to a default value of "en_US.UTF-8" enables pip install to complete
#
if platform.system() == "Darwin" and "LC_ALL" not in os.environ:
os.environ["LC_ALL"] = "en_US.UTF-8"
# When running `kubectl`, if AZDATA_OPENSHIFT is set, use `oc`
#
if cmd.startswith("kubectl ") and "AZDATA_OPENSHIFT" in os.environ:
cmd_actual[0] = cmd_actual[0].replace("kubectl", "oc")
# To aid supportabilty, determine which binary file will actually be executed on the machine
#
which_binary = None
# Special case for CURL on Windows. The version of CURL in Windows System32 does not work to
# get JWT tokens, it returns "(56) Failure when receiving data from the peer". If another instance
# of CURL exists on the machine use that one. (Unfortunately the curl.exe in System32 is almost
# always the first curl.exe in the path, and it can't be uninstalled from System32, so here we
# look for the 2nd installation of CURL in the path)
if platform.system() == "Windows" and cmd.startswith("curl "):
path = os.getenv('PATH')
for p in path.split(os.path.pathsep):
p = os.path.join(p, "curl.exe")
if os.path.exists(p) and os.access(p, os.X_OK):
if p.lower().find("system32") == -1:
cmd_actual[0] = p
which_binary = p
break
# Find the path based location (shutil.which) of the executable that will be run (and display it to aid supportability), this
# seems to be required for .msi installs of azdata.cmd/az.cmd. (otherwise Popen returns FileNotFound)
#
# NOTE: Bash needs cmd to be the list of the space separated values hence shlex.split.
#
if which_binary == None:
which_binary = shutil.which(cmd_actual[0])
if which_binary == None:
if user_provided_exe_name in install_hint and install_hint[user_provided_exe_name] is not None:
display(Markdown(f'HINT: Use [{install_hint[user_provided_exe_name][0]}]({install_hint[user_provided_exe_name][1]}) to resolve this issue.'))
raise FileNotFoundError(f"Executable '{cmd_actual[0]}' not found in path (where/which)")
else:
cmd_actual[0] = which_binary
start_time = datetime.datetime.now().replace(microsecond=0)
print(f"START: {cmd} @ {start_time} ({datetime.datetime.utcnow().replace(microsecond=0)} UTC)")
print(f" using: {which_binary} ({platform.system()} {platform.release()} on {platform.machine()})")
print(f" cwd: {os.getcwd()}")
# Command-line tools such as CURL and AZDATA HDFS commands output
# scrolling progress bars, which causes Jupyter to hang forever, to
# workaround this, use no_output=True
#
# Work around a infinite hang when a notebook generates a non-zero return code, break out, and do not wait
#
wait = True
try:
if no_output:
p = Popen(cmd_actual)
else:
p = Popen(cmd_actual, stdout=PIPE, stderr=PIPE, bufsize=1)
with p.stdout:
for line in iter(p.stdout.readline, b''):
line = line.decode()
if return_output:
output = output + line
else:
if cmd.startswith("azdata notebook run"): # Hyperlink the .ipynb file
regex = re.compile(' "(.*)"\: "(.*)"')
match = regex.match(line)
if match:
if match.group(1).find("HTML") != -1:
display(Markdown(f' - "{match.group(1)}": "{match.group(2)}"'))
else:
display(Markdown(f' - "{match.group(1)}": "[{match.group(2)}]({match.group(2)})"'))
wait = False
break # otherwise infinite hang, have not worked out why yet.
else:
print(line, end='')
if rules is not None:
apply_expert_rules(line)
if wait:
p.wait()
except FileNotFoundError as e:
if install_hint is not None:
display(Markdown(f'HINT: Use {install_hint} to resolve this issue.'))
raise FileNotFoundError(f"Executable '{cmd_actual[0]}' not found in path (where/which)") from e
exit_code_workaround = 0 # WORKAROUND: azdata hangs on exception from notebook on p.wait()
if not no_output:
for line in iter(p.stderr.readline, b''):
try:
line_decoded = line.decode()
except UnicodeDecodeError:
# NOTE: Sometimes we get characters back that cannot be decoded(), e.g.
#
# \xa0
#
# For example see this in the response from `az group create`:
#
# ERROR: Get Token request returned http error: 400 and server
# response: {"error":"invalid_grant",# "error_description":"AADSTS700082:
# The refresh token has expired due to inactivity.\xa0The token was
# issued on 2018-10-25T23:35:11.9832872Z
#
# which generates the exception:
#
# UnicodeDecodeError: 'utf-8' codec can't decode byte 0xa0 in position 179: invalid start byte
#
print("WARNING: Unable to decode stderr line, printing raw bytes:")
print(line)
line_decoded = ""
pass
else:
# azdata emits a single empty line to stderr when doing an hdfs cp, don't
# print this empty "ERR:" as it confuses.
#
if line_decoded == "":
continue
print(f"STDERR: {line_decoded}", end='')
if line_decoded.startswith("An exception has occurred") or line_decoded.startswith("ERROR: An error occurred while executing the following cell"):
exit_code_workaround = 1
# inject HINTs to next TSG/SOP based on output in stderr
#
if user_provided_exe_name in error_hints:
for error_hint in error_hints[user_provided_exe_name]:
if line_decoded.find(error_hint[0]) != -1:
display(Markdown(f'HINT: Use [{error_hint[1]}]({error_hint[2]}) to resolve this issue.'))
# apply expert rules (to run follow-on notebooks), based on output
#
if rules is not None:
apply_expert_rules(line_decoded)
# Verify if a transient error, if so automatically retry (recursive)
#
if user_provided_exe_name in retry_hints:
for retry_hint in retry_hints[user_provided_exe_name]:
if line_decoded.find(retry_hint) != -1:
if retry_count < MAX_RETRIES:
print(f"RETRY: {retry_count} (due to: {retry_hint})")
retry_count = retry_count + 1
output = run(cmd, return_output=return_output, retry_count=retry_count)
if return_output:
return output
else:
return
elapsed = datetime.datetime.now().replace(microsecond=0) - start_time
# WORKAROUND: We avoid infinite hang above in the `azdata notebook run` failure case, by inferring success (from stdout output), so
# don't wait here, if success known above
#
if wait:
if p.returncode != 0:
raise SystemExit(f'Shell command:\n\n\t{cmd} ({elapsed}s elapsed)\n\nreturned non-zero exit code: {str(p.returncode)}.\n')
else:
if exit_code_workaround !=0 :
raise SystemExit(f'Shell command:\n\n\t{cmd} ({elapsed}s elapsed)\n\nreturned non-zero exit code: {str(exit_code_workaround)}.\n')
print(f'\nSUCCESS: {elapsed}s elapsed.\n')
if return_output:
return output
def load_json(filename):
"""Load a json file from disk and return the contents"""
with open(filename, encoding="utf8") as json_file:
return json.load(json_file)
def load_rules():
"""Load any 'expert rules' from the metadata of this notebook (.ipynb) that should be applied to the stderr of the running executable"""
try:
# Load this notebook as json to get access to the expert rules in the notebook metadata.
#
j = load_json("sam002-query-hdfs-in-sql-server.ipynb")
except:
pass # If the user has renamed the book, we can't load ourself. NOTE: Is there a way in Jupyter, to know your own filename?
else:
if "metadata" in j and \
"azdata" in j["metadata"] and \
"expert" in j["metadata"]["azdata"] and \
"rules" in j["metadata"]["azdata"]["expert"]:
rules = j["metadata"]["azdata"]["expert"]["rules"]
rules.sort() # Sort rules, so they run in priority order (the [0] element). Lowest value first.
# print (f"EXPERT: There are {len(rules)} rules to evaluate.")
return rules
def apply_expert_rules(line):
"""Determine if the stderr line passed in, matches the regular expressions for any of the 'expert rules', if so
inject a 'HINT' to the follow-on SOP/TSG to run"""
global rules
for rule in rules:
# rules that have 9 elements are the injected (output) rules (the ones we want). Rules
# with only 8 elements are the source (input) rules, which are not expanded (i.e. TSG029,
# not ../repair/tsg029-nb-name.ipynb)
if len(rule) == 9:
notebook = rule[1]
cell_type = rule[2]
output_type = rule[3] # i.e. stream or error
output_type_name = rule[4] # i.e. ename or name
output_type_value = rule[5] # i.e. SystemExit or stdout
details_name = rule[6] # i.e. evalue or text
expression = rule[7].replace("\\*", "*") # Something escaped *, and put a \ in front of it!
if debug_logging:
print(f"EXPERT: If rule '{expression}' satisfied', run '{notebook}'.")
if re.match(expression, line, re.DOTALL):
if debug_logging:
print("EXPERT: MATCH: name = value: '{0}' = '{1}' matched expression '{2}', therefore HINT '{4}'".format(output_type_name, output_type_value, expression, notebook))
match_found = True
display(Markdown(f'HINT: Use [{notebook}]({notebook}) to resolve this issue.'))
print('Common functions defined successfully.')
# Hints for binary (transient fault) retry, (known) error and install guide
#
retry_hints = {'kubectl': ['A connection attempt failed because the connected party did not properly respond after a period of time, or established connection failed because connected host has failed to respond'], 'azdata': ['Endpoint sql-server-master does not exist', 'Endpoint livy does not exist', 'Failed to get state for cluster', 'Endpoint webhdfs does not exist', 'Adaptive Server is unavailable or does not exist', 'Error: Address already in use']}
error_hints = {'kubectl': [['no such host', 'TSG010 - Get configuration contexts', '../monitor-k8s/tsg010-get-kubernetes-contexts.ipynb'], ['no such host', 'TSG011 - Restart sparkhistory server', '../repair/tsg011-restart-sparkhistory-server.ipynb'], ['No connection could be made because the target machine actively refused it', 'TSG056 - Kubectl fails with No connection could be made because the target machine actively refused it', '../repair/tsg056-kubectl-no-connection-could-be-made.ipynb']], 'azdata': [['azdata login', 'SOP028 - azdata login', '../common/sop028-azdata-login.ipynb'], ['The token is expired', 'SOP028 - azdata login', '../common/sop028-azdata-login.ipynb'], ['Reason: Unauthorized', 'SOP028 - azdata login', '../common/sop028-azdata-login.ipynb'], ['Max retries exceeded with url: /api/v1/bdc/endpoints', 'SOP028 - azdata login', '../common/sop028-azdata-login.ipynb'], ['Look at the controller logs for more details', 'TSG027 - Observe cluster deployment', '../diagnose/tsg027-observe-bdc-create.ipynb'], ['provided port is already allocated', 'TSG062 - Get tail of all previous container logs for pods in BDC namespace', '../log-files/tsg062-tail-bdc-previous-container-logs.ipynb'], ['Create cluster failed since the existing namespace', 'SOP061 - Delete a big data cluster', '../install/sop061-delete-bdc.ipynb'], ['Failed to complete kube config setup', 'TSG067 - Failed to complete kube config setup', '../repair/tsg067-failed-to-complete-kube-config-setup.ipynb'], ['Error processing command: "ApiError', 'TSG110 - Azdata returns ApiError', '../repair/tsg110-azdata-returns-apierror.ipynb'], ['Error processing command: "ControllerError', 'TSG036 - Controller logs', '../log-analyzers/tsg036-get-controller-logs.ipynb'], ['ERROR: 500', 'TSG046 - Knox gateway logs', '../log-analyzers/tsg046-get-knox-logs.ipynb'], ['Data source name not found and no default driver specified', 'SOP069 - Install ODBC for SQL Server', '../install/sop069-install-odbc-driver-for-sql-server.ipynb'], ["Can't open lib 'ODBC Driver 17 for SQL Server", 'SOP069 - Install ODBC for SQL Server', '../install/sop069-install-odbc-driver-for-sql-server.ipynb'], ['Control plane upgrade failed. Failed to upgrade controller.', 'TSG108 - View the controller upgrade config map', '../diagnose/tsg108-controller-failed-to-upgrade.ipynb']]}
install_hint = {'kubectl': ['SOP036 - Install kubectl command line interface', '../install/sop036-install-kubectl.ipynb'], 'azdata': ['SOP063 - Install azdata CLI (using package manager)', '../install/sop063-packman-install-azdata.ipynb']}
###Output
_____no_output_____
###Markdown
Instantiate Kubernetes client
###Code
# Instantiate the Python Kubernetes client into 'api' variable
import os
try:
from kubernetes import client, config
from kubernetes.stream import stream
if "KUBERNETES_SERVICE_PORT" in os.environ and "KUBERNETES_SERVICE_HOST" in os.environ:
config.load_incluster_config()
else:
try:
config.load_kube_config()
except:
display(Markdown(f'HINT: Use [TSG112 - App-Deploy Proxy Nginx Logs](../log-analyzers/tsg112-get-approxy-nginx-logs.ipynb) to resolve this issue.'))
raise
api = client.CoreV1Api()
print('Kubernetes client instantiated')
except ImportError:
from IPython.display import Markdown
display(Markdown(f'HINT: Use [SOP059 - Install Kubernetes Python module](../install/sop059-install-kubernetes-module.ipynb) to resolve this issue.'))
raise
###Output
_____no_output_____
###Markdown
Get the namespace for the big data clusterGet the namespace of the Big Data Cluster from the Kuberenetes API.**NOTE:**If there is more than one Big Data Cluster in the target Kubernetescluster, then either:- set \[0\] to the correct value for the big data cluster.- set the environment variable AZDATA\_NAMESPACE, before starting Azure Data Studio.
###Code
# Place Kubernetes namespace name for BDC into 'namespace' variable
if "AZDATA_NAMESPACE" in os.environ:
namespace = os.environ["AZDATA_NAMESPACE"]
else:
try:
namespace = api.list_namespace(label_selector='MSSQL_CLUSTER').items[0].metadata.name
except IndexError:
from IPython.display import Markdown
display(Markdown(f'HINT: Use [TSG081 - Get namespaces (Kubernetes)](../monitor-k8s/tsg081-get-kubernetes-namespaces.ipynb) to resolve this issue.'))
display(Markdown(f'HINT: Use [TSG010 - Get configuration contexts](../monitor-k8s/tsg010-get-kubernetes-contexts.ipynb) to resolve this issue.'))
display(Markdown(f'HINT: Use [SOP011 - Set kubernetes configuration context](../common/sop011-set-kubernetes-context.ipynb) to resolve this issue.'))
raise
print('The kubernetes namespace for your big data cluster is: ' + namespace)
###Output
_____no_output_____
###Markdown
Create an external table to HDFSThe storage pool contains web clickstream data in a .csv file stored inHDFS. Use the following steps to define an external table that canaccess the data in that file.
###Code
from IPython.display import Markdown
try:
%load_ext sql
except ModuleNotFoundError:
display(Markdown(f'HINT: Use [SOP062 - Install ipython-sql and pyodbc modules](../install/sop062-install-ipython-sql-module.ipynb) to resolve this issue.'))
raise
import json
import base64
controller_username = run(f'kubectl get secret/controller-login-secret -n {namespace} -o jsonpath={{.data.username}}', return_output=True)
controller_username = base64.b64decode(controller_username).decode('utf-8')
controller_password = run(f'kubectl get secret/controller-login-secret -n {namespace} -o jsonpath={{.data.password}}', return_output=True)
controller_password = base64.b64decode(controller_password).decode('utf-8')
master_endpoint_details = run('azdata bdc endpoint list --endpoint="sql-server-master"', return_output=True)
json = json.loads(master_endpoint_details)
sql_master_tcp_and_port = json['endpoint']
%sql mssql+pyodbc://{controller_username}:{controller_password}@{sql_master_tcp_and_port}/master?driver=SQL+Server+Native+Client+11.0&autocommit=True
%%sql
-- Create the new database if it does not exist already
IF NOT EXISTS (
SELECT [name]
FROM sys.databases
WHERE [name] = N'Testing'
)
CREATE DATABASE Testing
###Output
_____no_output_____
###Markdown
Run the following Transact-SQL command to change context to the database you created in the master instance
###Code
%%sql
USE Testing
###Output
_____no_output_____
###Markdown
Define the format of the .csv or Parquet file to read from HDFSFor CSV:
###Code
%%sql
CREATE EXTERNAL FILE FORMAT csv_file
WITH (
FORMAT_TYPE = DELIMITEDTEXT,
FORMAT_OPTIONS(
FIELD_TERMINATOR = ',',
STRING_DELIMITER = '"',
USE_TYPE_DEFAULT = TRUE)
)
###Output
_____no_output_____
###Markdown
For Parquet:
###Code
%%sql
CREATE EXTERNAL FILE FORMAT PARQUET
WITH (
FORMAT_TYPE = PARQUET
)
###Output
_____no_output_____
###Markdown
Create an external data source to the storage pool if it does not already exist
###Code
%%sql
--DROP EXTERNAL DATA SOURCE SqlStoragePool
IF NOT EXISTS(SELECT * FROM sys.external_data_sources WHERE name = 'SqlStoragePool')
BEGIN
CREATE EXTERNAL DATA SOURCE SqlStoragePool
WITH (LOCATION = 'sqlhdfs://controller-svc/default')
END
###Output
_____no_output_____
###Markdown
Create an external table that can read the `/tmp/clickstream_data` from the storage poolThe SQLStoragePool is accesible from the master instance of a big datacluster.For CSV:
###Code
%%sql
CREATE EXTERNAL TABLE [clickstream_data_table_csv]
("NumberID" BIGINT ,
"Name" Varchar(120) ,
"Name2" Varchar(120),
"Price" Decimal ,
"Discount" Decimal ,
"Money" Decimal,
"Money2" Decimal,
"Type" Varchar(120),
"Space" Varchar(120))
WITH
(
DATA_SOURCE = SqlStoragePool,
LOCATION = '/tmp/clickstream_data',
FILE_FORMAT = csv_file
)
###Output
_____no_output_____
###Markdown
For Parquet:
###Code
%%sql
CREATE EXTERNAL TABLE [clickstream_data_table_parquet]
("NumberID" BIGINT ,
"Name" Varchar(120) ,
"Name2" Varchar(120),
"Price" BIGINT ,
"Discount" FLOAT,
"Money" FLOAT,
"Money2" FLOAT,
"Type" Varchar(120),
"Space" Varchar(120))
WITH
(
DATA_SOURCE = SqlStoragePool,
LOCATION = '/tmp/clickstream_data_parquet',
FILE_FORMAT = PARQUET
)
###Output
_____no_output_____
###Markdown
Query the data1. Run the following query to join the HDFS data in the `clickstream_hdfs` external table with teh relational data in the local database you loaded the data in.For CSV:
###Code
%%sql
select * from [clickstream_data_table_csv]
###Output
_____no_output_____
###Markdown
For Parquet:
###Code
%%sql
select * from [clickstream_data_table_parquet]
###Output
_____no_output_____ |
SphereMailRU/BD-11/ABDP/hw1.ipynb | ###Markdown
**Общая информация****Срок сдачи:** 13 марта 2017, 06:00 **Штраф за опоздание:** -2 балла после 06:00 13 марта, -4 балла после 06:00 20 марта, -6 баллов после 06:00 27 мартаПри отправлении ДЗ указывайте фамилию в названии файлаПрисылать ДЗ необходимо в виде ссылки на свой github репозиторий в slack @alkhamushНеобходимо в slack создать таск в приватный чат:/todo Фамилия Имя ссылка на гитхаб @alkhamushПример:/todo Ксения Стройкова https://github.com/stroykova/spheremailru/stroykova_hw1.ipynb @alkhamushИспользуйте данный Ipython Notebook при оформлении домашнего задания. Задание 1 (2 баллов)Реализовать KNN в классе MyKNeighborsClassifier (обязательное условие: точность не ниже sklearn реализации)Разберитесь самостоятельно, какая мера расстояния используется в KNeighborsClassifier дефолтно и реализуйте свой алгоритм именно с этой мерой. Самостоятельно разберитесь, как считается score из KNeighborsClassifier и реализуйте аналог в своём классе. Задание 2 (2 балла)Добиться скорости работы на fit, predict и predict_proba сравнимой со sklearn 4 балла для iris и mnistДля этого используем numpy Задание 3 (2 балла)Для iris найдите такой параметр n_neighbors, при котором выдаётся наилучший score. Нарисуйте график зависимости score от n_neighbors Задание 3 (2 балла)Выполнить требования pep8 Задание 5 (2 балла)Описать для чего нужны следующие библиотеки/классы/функции (список будет ниже)
###Code
import numpy as np
import matplotlib.pyplot as plt
from line_profiler import LineProfiler
from sklearn.metrics.pairwise import pairwise_distances
import seaborn as sns
from sklearn import datasets
from sklearn.base import ClassifierMixin
from sklearn.datasets import fetch_mldata
from sklearn.neighbors.base import NeighborsBase, KNeighborsMixin, SupervisedIntegerMixin
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
%load_ext pycodestyle_magic
def profile_print(func_to_call, *args):
profiler = LineProfiler()
profiler.add_function(func_to_call)
profiler.runcall(func_to_call, *args)
profiler.print_stats()
%%pycodestyle
class MyKNeighborsClassifier(NeighborsBase, KNeighborsMixin,
SupervisedIntegerMixin, ClassifierMixin):
def __init__(self, n_neighbors=3):
self.n_neighbors = n_neighbors
def fit(self, X, y):
self.X = np.float64(X)
self.classes, self.y = np.unique(y, return_inverse=True)
def euclidean_metric(self, v):
return np.sqrt(((self.X - v) ** 2).sum(axis=1))
'''
def cnt(self, v):
z = np.zeros(self.classes.size)
for i in v:
z[i] += 1
return z
def predict_proba(self, X): # more understandable
X = np.float64(X)
# euclidean by default, can use multithreading
dist = pairwise_distances(X, self.X)
ind = np.argsort(dist, axis=1)[:, :self.n_neighbors]
return np.apply_along_axis(self.cnt, 1, self.y[ind]) / self.n_neighbors
'''
# '''
def predict_proba(self, X): # more quickly
X = np.float64(X)
# euclidean by default, can use multithreading
dist = pairwise_distances(X, self.X)
ind = np.argsort(dist, axis=1)[:, :self.n_neighbors]
classes = self.y[ind]
crange = np.arange(self.classes.shape[0])
clss = classes.reshape((classes.shape[0], 1, classes.shape[1]))
crng = crange.reshape((1, crange.shape[0], 1))
counts = np.sum(clss == crng, axis=2)
return counts / self.n_neighbors
# '''
def predict(self, X):
proba = self.predict_proba(X)
return self.classes[np.argsort(proba, axis=1)[:, -1]]
def score(self, X, y):
pred = self.predict(X)
return 1 - np.count_nonzero(y - pred) / y.shape[0]
###Output
_____no_output_____
###Markdown
**IRIS**
###Code
iris = datasets.load_iris()
X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target, test_size=0.1, stratify=iris.target)
clf = KNeighborsClassifier(n_neighbors=17)
my_clf = MyKNeighborsClassifier(n_neighbors=17)
%time clf.fit(X_train, y_train)
%time my_clf.fit(X_train, y_train)
%time clf.predict(X_test)
%time my_clf.predict(X_test)
#profile_print(my_clf.predict, X_test)
%time clf.predict_proba(X_test)
#%time my_clf.predict_proba(X_test)
profile_print(my_clf.predict_proba, X_test)
clf.score(X_test, y_test)
my_clf.score(X_test, y_test)
# Задание 3
# 16 - 17
num_n = 30
num_av = 2000
scm = np.zeros(num_n)
sc = np.zeros(num_av)
for n in range(1, num_n + 1):
print (n)
for i in range(num_av):
X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target, test_size=0.1, stratify=iris.target)
my_clf = MyKNeighborsClassifier(n_neighbors=n)
my_clf.fit(X_train, y_train)
sc[i] = my_clf.score(X_test, y_test)
scm[n - 1] = sc.mean()
plt.plot(range(1, num_n + 1), scm, 'ro-')
plt.show()
###Output
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
###Markdown
**MNIST**
###Code
mnist = fetch_mldata('MNIST original')
X_train, X_test, y_train, y_test = train_test_split(mnist.data, mnist.target, test_size=0.01, stratify=mnist.target)
y_train.shape
clf = KNeighborsClassifier(n_neighbors=5)
my_clf = MyKNeighborsClassifier(n_neighbors=5)
%time clf.fit(X_train, y_train)
%time my_clf.fit(X_train, y_train)
%time clf.predict(X_test)
%time my_clf.predict(X_test)
%time clf.predict_proba(X_test)
#%time my_clf.predict_proba(X_test)
%time profile_print(my_clf.predict_proba, X_test)
clf.score(X_test, y_test)
my_clf.score(X_test, y_test)
# n_neighbors = 5
num_n = 30
num_av = 20
scm = np.zeros(num_n)
sc = np.zeros(num_av)
for n in range(1, num_n + 1):
print (n)
for i in range(num_av):
print (n, ' ', i)
X_train, X_test, y_train, y_test = train_test_split(mnist.data, mnist.target, test_size=0.001, stratify=mnist.target)
my_clf = MyKNeighborsClassifier(n_neighbors=n)
my_clf.fit(X_train, y_train)
sc[i] = my_clf.score(X_test, y_test)
scm[n - 1] = sc.mean()
plt.plot(range(1, num_n + 1), scm, 'ro-')
plt.show()
print (1)
###Output
1
###Markdown
Задание 5
###Code
# seaborn - красивые и простые в написании графики и визуализация
# matplotlib - более сложные в написании и более функциональные, чем seaborn
# train_test_split - разбиение данных на обучающую и тестовую часть
# Pipelin%load_ext e (from sklearn.pipeline import Pipeline) - конвейерный классификатор
# StandardScaler (from sklearn.preprocessing import StandardScaler) - нормировка
# ClassifierMixin - общий Mixin для классификаторов, в нем реализован score
# NeighborsBase - базовый класс Knn
# KNeighborsMixin - Mixin содержащий метод поиска ближайших соседей
# SupervisedIntegerMixin - Mixin с функцией fit для установления соответствия
# между данными и целевыми переменными
###Output
_____no_output_____ |
Design_Patterns/Examples/Python/Notebook.ipynb | ###Markdown
Python Objects Everything in Python is an object Almost everything has attributes Let's prove this
###Code
i = 1
type(i)
i.__doc__
i??
dir(i)
###Output
_____no_output_____
###Markdown
Classes Classes are a way to group data and functionality all > under one roof Properties vs. Methods Lets start with a simple example, suppose we want to be able to convert Celsius to Fahrenheit, so we write a class _Example from [here](https://www.programiz.com/python-programming/property)_ Naive Public Class
###Code
class Celsius:
"""Celsius is WAY buttah then fahrenheits
"""
def __init__(self, temperature = 0):
self.temperature = temperature
def to_fahrenheit(self):
"""Convert yo
"""
return (self.temperature * (9/5)) + 32
c1 = Celsius(temperature=37)
c1.temperature
c1.to_fahrenheit()
c1.to_fahrenheit??
###Output
_____no_output_____
###Markdown
What if we wanted to implement a limit on the temperature, as in, we can't g lower than `-273` celsius Class With Getters and Setters
###Code
class Celsius:
def __init__(self, temperature = 0):
self.set_temperature(temperature)
def to_fahrenheit(self):
return (self.get_temperature() * (9/5)) + 32
# new update
def get_temperature(self):
return self._temperature
def set_temperature(self, value):
if value < -273:
raise ValueError("Temperature below -273 is not possible")
self._temperature = value
###Output
_____no_output_____
###Markdown
Lets make sure the original stuff still works
###Code
c2 = Celsius(37)
c2.get_temperature()
c2.to_fahrenheit()
###Output
_____no_output_____
###Markdown
And lets prove our new limit is in place
###Code
c3 = Celsius(-277)
###Output
_____no_output_____
###Markdown
We see the error happened, which is expected Using Property
###Code
class Celsius:
def __init__(self, temperature = 0):
self.temperature = temperature
@property
def to_fahrenheit(self):
return (self.temperature * (9/5)) + 32
def get_temperature(self):
print("Getting value")
return self._temperature
def set_temperature(self, value):
if value < -273:
raise ValueError("Temperature below -273 is not possible")
print("Setting value")
self._temperature = value
c6 = Celsius(100)
c6.to_fahrenheit
c4 = Celsius(37)
c4.to_fahrenheit()
###Output
_____no_output_____
###Markdown
Initialization Another initialization example
###Code
class Car(object):
def __init__(self, model, color, company, speed_limit):
print("Initialized!")
self.color = color
self.company = company
self.speed_limit = speed_limit
self.model = model
def start(self):
print("Started!")
def stop(self):
print("Stopped!")
def accelarate(self):
print("Accelarating!")
def change_gear(self, gear_type):
print("Gear changed!")
car = Car("Camry", "Blue", "Toyota", "110")
###Output
_____no_output_____
###Markdown
Create or Extend a Class "Closed for modification, open for extension" Code shouldn't be changed once being used, it should be extended. What if we had to write code to send SMSs?_Example from [here](https://hashedin.com/blog/open-closed-principle-in-python-designing-modules-part-4/)_ Lets say we wrote the following code:
###Code
class SmsClient:
def send_sms(self, phone_number, message):
# send the SMS
return
###Output
_____no_output_____
###Markdown
And then later we are requested to resend the SMS if delivery failed. We could just change our code to be:
###Code
class SmsClient:
def send_sms(self, phone_number, message):
# send the SMS
# retry if failed
return
###Output
_____no_output_____
###Markdown
But this would be modifying what we already proved worked, instead we can extend our original code
###Code
class SmsClientWithRetry(SmsClient):
def __init__(self, username, password):
super(SmsClient, self).__init__(username, password)
def send_sms(self, phone_number, message):
# this is the original sending code
super(SmsClient, self).send_sms(phone_number, message)
# this is our extension
# retry if failed
###Output
_____no_output_____ |
line_fitting_matpotlib.ipynb | ###Markdown
Example of performing linear least squares fitting First we import numpy and matplotlib as usual
###Code
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Now, let's generate some random data about a trend line.
###Code
#set a random number seed
np.random.seed(119)
#set number of data points
npoints = 50
#set x
x = np.linspace(0,10.,npoints)
#set slope, intercept, and scatter rms
m = 2.0
b = 1.0
sigma = 2.0
#generate y points
y = m*x + b + np.random.normal(scale=sigma,size=npoints)
###Output
_____no_output_____
###Markdown
Let's just plot the data first
###Code
f = plt.figure(figsize=(7,7))
plt.errorbar(x,y,sigma,fmt='o')
plt.xlabel('x')
plt.ylabel('y')
###Output
_____no_output_____
###Markdown
Method 1, polyfit()
###Code
m_fit, b_fit = np.polyld(np.polyfit(x, y, 1, w=1./y_err)) #weights with uncertainties
print(m_fit, b_fit)
y_fit = m_fit * x + b_fit
###Output
_____no_output_____ |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.