path
stringlengths
7
265
concatenated_notebook
stringlengths
46
17M
.ipynb_checkpoints/Merge_Wikipedia_and_Kaggle-checkpoint.ipynb
###Markdown movies_df[['release_date_wiki','release_date_kaggle']].plot(x='release_date_wiki', y='release_date_kaggle', style='.') ###Code movies_df[(movies_df['release_date_wiki'] > '1996-01-01') & (movies_df['release_date_kaggle'] < '1965-01-01')] movies_df[(movies_df['release_date_wiki'] > '1996-01-01') & (movies_df['release_date_kaggle'] < '1965-01-01')].index movies_df = movies_df.drop(movies_df[(movies_df['release_date_wiki'] > '1996-01-01') & (movies_df['release_date_kaggle'] < '1965-01-01')].index) movies_df[movies_df['release_date_wiki'].isnull()] movies_df['Language'].value_counts() movies_df['Language'].apply(lambda x: tuple(x) if type(x) == list else x).value_counts(dropna=False) movies_df['original_language'].value_counts(dropna=False) movies_df[['Production company(s)','production_companies']] movies_df.drop(columns=['title_wiki','release_date_wiki','Language','Production company(s)'], inplace=True) def fill_missing_kaggle_data(df, kaggle_column, wiki_column): df[kaggle_column] = df.apply( lambda row: row[wiki_column] if row[kaggle_column] == 0 else row[kaggle_column] , axis=1) df.drop(columns=wiki_column, inplace=True) fill_missing_kaggle_data(movies_df, 'runtime', 'running_time') fill_missing_kaggle_data(movies_df, 'budget_kaggle', 'budget_wiki') fill_missing_kaggle_data(movies_df, 'revenue', 'box_office') movies_df for col in movies_df.columns: lists_to_tuples = lambda x: tuple(x) if type(x) == list else x value_counts = movies_df[col].apply(lists_to_tuples).value_counts(dropna=False) num_values = len(value_counts) if num_values == 1: print(col) movies_df['video'].value_counts(dropna=False) movies_df = movies_df.loc[:, ['imdb_id','id','title_kaggle','original_title','tagline','belongs_to_collection','url','imdb_link', 'runtime','budget_kaggle','revenue','release_date_kaggle','popularity','vote_average','vote_count', 'genres','original_language','overview','spoken_languages','Country', 'production_companies','production_countries','Distributor', 'Producer(s)','Director','Starring','Cinematography','Editor(s)','Writer(s)','Composer(s)','Based on' ]] movies_df.rename({'id':'kaggle_id', 'title_kaggle':'title', 'url':'wikipedia_url', 'budget_kaggle':'budget', 'release_date_kaggle':'release_date', 'Country':'country', 'Distributor':'distributor', 'Producer(s)':'producers', 'Director':'director', 'Starring':'starring', 'Cinematography':'cinematography', 'Editor(s)':'editors', 'Writer(s)':'writers', 'Composer(s)':'composers', 'Based on':'based_on' }, axis='columns', inplace=True) rating_counts = ratings.groupby(['movieId','rating'], as_index=False).count() \ .rename({'userId':'count'}, axis=1) \ .pivot(index='movieId',columns='rating', values='count') rating_counts.columns = ['rating_' + str(col) for col in rating_counts.columns] movies_with_ratings_df = pd.merge(movies_df, rating_counts, left_on='kaggle_id', right_index=True, how='left') movies_with_ratings_df[rating_counts.columns] = movies_with_ratings_df[rating_counts.columns].fillna(0) movies_with_ratings_df.head() ###Output _____no_output_____ ###Markdown Connect pandas with sql ###Code from sqlalchemy import create_engine pip install psycopg2-binary from config import db_password from config import password db_string = f'postgres://postgres:db_password@localhost:5432/movie_data' # {protocol}://{user}:{db_password}@{location}:{port}/{db} engine = create_engine(db_string) protocol='postgres' user='postgres' location='localhost' port='5432' db='movie_data' def connect(): db_string=f'{protocol}://{user}:{db_password}@{location}:{port}/{db}' print('connected!') cursor=conn.cursor() return conn,cursor connect() ###Output connected!
Exercises/03-ImageEnhancement/02-Demonstration.ipynb
###Markdown Setup playground ###Code import numpy as np import matplotlib.pyplot as plt from scipy import ndimage %matplotlib inline ###Output _____no_output_____ ###Markdown Playing with noise generators ###Code nimg = np.random.normal(0,1,size=[100,100]) uimg = np.random.uniform(-1,1,size=[100,100]) pimg = np.random.poisson(5,size=[100,100]) plt.figure(figsize=[15,8]) plt.subplot(1,3,1) plt.imshow(nimg) plt.subplot(1,3,2) plt.imshow(uimg) plt.subplot(1,3,3) plt.imshow(pimg) ###Output _____no_output_____ ###Markdown Playing with filters ###Code gimg=ndimage.filters.gaussian_filter(nimg,1.0) fimg=ndimage.filters.uniform_filter(nimg,3) mimg=ndimage.filters.median_filter(nimg,3) aimg=ndimage.filters.convolve(nimg,[[1,2,1],[2,4,2],[1,2,1]]) plt.figure(figsize=[15,13]) plt.subplot(2,2,1) plt.imshow(gimg) plt.title('Gaussian') plt.subplot(2,2,2) plt.imshow(fimg) plt.title('Uniform') plt.subplot(2,2,3) plt.imshow(fimg) plt.title('Median') plt.subplot(2,2,4) plt.imshow(fimg) plt.title('Convolve (binomial)') ###Output _____no_output_____ ###Markdown Demonstrating edge blurringWe want to observe what happens with edges when filtering Make test dataThe test data is a wedge structure with increasing gap. ###Code img=np.zeros([100,100]) img[0:50,:]=1 for i in range(img.shape[0]) : img[(51+int(i/10)):img.shape[0],i]=1 plt.imshow(img,cmap='gray') ###Output _____no_output_____ ###Markdown Evaluate under different conditions - Add noise of different strength - Apply different uniform filter kernels - Observe what happens on thresholded data Make noisy image ###Code SNR=4 sigma=1/SNR noise=np.random.normal(0,sigma,img.shape) nimg=img+noise plt.imshow(nimg) threshold = 0.5 # Threshold for segmentation fig=plt.figure(figsize=[15,13]) for i in range(3) : if (i==0) : plt.subplot(3,3,i+1) plt.imshow(0.5<nimg) plt.title('Thresholded noisy image') plt.subplot(3,3,3+i+1) N=3+i*2 fimg=ndimage.filters.uniform_filter(nimg,N) plt.imshow(fimg) plt.subplot(3,3,6+i+1) plt.imshow(threshold<fimg) ###Output _____no_output_____
iris_decision_tree.ipynb
###Markdown Visualize a Decision Tree ###Code import graphviz import graphviz dot_data = tree.export_graphviz(cls, out_file=None, feature_names=iris.feature_names, class_names=iris.target_names, filled=True, rounded=True, special_characters=True) graph = graphviz.Source(dot_data) graph ###Output _____no_output_____
Chapter24-KerasPart2/Keras-Notebook-14-RNN-Sequence-Shapes.ipynb
###Markdown Copyright (c) 2017 Andrew GlassnerPermission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. Deep Learning From Basics to Practice by Andrew Glassner, https://dlbasics.com, http://glassner.com------ Chapter 23: Keras Notebook 14: RNN sequence shapes ###Code import numpy as np from keras.models import Sequential from keras.layers import LSTM, Dense from sklearn.preprocessing import MinMaxScaler from sklearn.metrics import mean_squared_error import matplotlib.pyplot as plt import math def show_output_shapes(data_shape): def make_model(X_train, return_seqs): # create and fit the LSTM network model = Sequential() model.add(LSTM(4, input_shape=X_train[0].shape, return_sequences=return_seqs)) model.compile(loss='mean_squared_error', optimizer='adam') return model data = np.zeros(data_shape) model = make_model(data, False) pred = model.predict(data, batch_size=1, verbose=2) print("Input shape: ",data.shape) print(" Without sequences: ",pred.shape) model = make_model(data, True) pred = model.predict(data, batch_size=1, verbose=2) print(" With sequences: ",pred.shape) show_output_shapes([2,5,3]) show_output_shapes([1,3,1]) show_output_shapes([1,5,1]) show_output_shapes([1,3,2]) show_output_shapes([1,5,2]) show_output_shapes([2,3,2]) show_output_shapes([2,5,2]) ###Output Input shape: (2, 5, 3) Without sequences: (2, 4) With sequences: (2, 5, 4) Input shape: (1, 3, 1) Without sequences: (1, 4) With sequences: (1, 3, 4) Input shape: (1, 5, 1) Without sequences: (1, 4) With sequences: (1, 5, 4) Input shape: (1, 3, 2) Without sequences: (1, 4) With sequences: (1, 3, 4) Input shape: (1, 5, 2) Without sequences: (1, 4) With sequences: (1, 5, 4) Input shape: (2, 3, 2) Without sequences: (2, 4) With sequences: (2, 3, 4) Input shape: (2, 5, 2) Without sequences: (2, 4) With sequences: (2, 5, 4)
basics/first/Ch10 Solution.ipynb
###Markdown Final Solution ###Code import pandas as pd import matplotlib.pyplot as plt %matplotlib inline oo = pd.read_csv('data/olympics.csv',skiprows=4) oo.head() ###Output _____no_output_____ ###Markdown In every Olympics, which US athlete has won the most total number of medals? Include the athlete's discipline. ###Code gy = oo[oo.NOC == 'USA'] gy = gy.groupby(['Edition','Athlete','Medal']).size().unstack('Medal',fill_value=0) gy['Total'] = gy['Gold'] + gy['Silver'] + gy['Bronze'] gy.reset_index(inplace=True) tu = [group.sort_values('Total',ascending=False)[:1] for year,group in gy.groupby('Edition')] tu top = pd.DataFrame() for i in tu: top = top.append(i) top ###Output _____no_output_____
labs/Deep_Reinforcement_Learning/GridWorld Using Q-Learning.ipynb
###Markdown GridWorld using Q-LearningJay UrbainThis lab consists of two parts: - Introduction of an `epsilon-greedy` agent that solves the shortest path problem for GridWorld. In this part, the code is provided.- Extenstion of the `epsilon-greedy` agent to include additional obstacles in GridWorld. For example, an unavailable square due to a rock, a pit that the agent can fall down and immediately loose this round of a game, etc. In this part, you ar required to add this functionality to the agent.We will use Q-learning to train an `epsilon-greedy` agent to find the shortest path between position $(0, 0)$ to opposing corner $(Ny-1, Nx-1)$ of a 2D rectangular grid in the 2D GridWorld environment of size $(Ny, Nx)$.The agent is restricted to displacing itself up/down/left/right by $1$ grid square per action. The agent receives a $-0.1$ penalty for every action not reaching the terminal state (to incentivize shortest path search), and a $100$ reward upon reaching the terminal state (not necessary but helps improve the value signal). The agent exploration parameter epsilon also decays by a multiplicative constant after every training episode. Tabular forms of the action-value $Q(s,a)$, reward $R(s,a)$, and policy $P(s)$ functions are used.*Note: the optimal policy exists but is a highly degenerate solution because of the multitude of ways one can traverse down the grid in the minimum number of steps. Therefore a greedy policy that always moves the agent closer towards the goal can be considered an optimal policy (can get to the goal in Ny + Nx - 2 actions). In our example, this corresponds to actions of moving right or down to the bottom-right corner.*ReferencesDemystifying Deep Reinforcement LearningDeep Q Learning Example optimal policy: [[1 1 1 1 1 2] [1 1 1 1 1 2] [1 1 1 1 1 2] [1 1 1 1 1 2] [1 1 1 1 1 2] [1 1 1 1 1 0]] action['up'] = 0 action['right'] = 1 action['down'] = 2 action['left'] = 3 Imports ###Code import os, sys, random, operator import numpy as np ###Output _____no_output_____ ###Markdown Define the agent environment ###Code class Environment: def __init__(self, Ny=8, Nx=8): # Define state space self.Ny = Ny # y grid size self.Nx = Nx # x grid size self.state_dim = (Ny, Nx) # Define action space self.action_dim = (4,) # up, right, down, left self.action_dict = {"up": 0, "right": 1, "down": 2, "left": 3} self.action_coords = [(-1, 0), (0, 1), (1, 0), (0, -1)] # translations # Define rewards table self.R = self._build_rewards() # R(s,a) agent rewards # Check action space consistency if len(self.action_dict.keys()) != len(self.action_coords): exit("err: inconsistent actions given") def reset(self): # Reset agent state to top-left grid corner self.state = (0, 0) return self.state def step(self, action): # Evolve agent state state_next = (self.state[0] + self.action_coords[action][0], self.state[1] + self.action_coords[action][1]) # Collect reward reward = self.R[self.state + (action,)] # Terminate if we reach bottom-right grid corner done = (state_next[0] == self.Ny - 1) and (state_next[1] == self.Nx - 1) # Update state self.state = state_next return state_next, reward, done def allowed_actions(self): # Generate list of actions allowed depending on agent grid location actions_allowed = [] y, x = self.state[0], self.state[1] if (y > 0): # no passing top-boundary actions_allowed.append(self.action_dict["up"]) if (y < self.Ny - 1): # no passing bottom-boundary actions_allowed.append(self.action_dict["down"]) if (x > 0): # no passing left-boundary actions_allowed.append(self.action_dict["left"]) if (x < self.Nx - 1): # no passing right-boundary actions_allowed.append(self.action_dict["right"]) actions_allowed = np.array(actions_allowed, dtype=int) return actions_allowed def _build_rewards(self): # Define agent rewards R[s,a] r_goal = 100 # reward for arriving at terminal state (bottom-right corner) r_nongoal = -0.1 # penalty for not reaching terminal state R = r_nongoal * np.ones(self.state_dim + self.action_dim, dtype=float) # R[s,a] R[self.Ny - 2, self.Nx - 1, self.action_dict["down"]] = r_goal # arrive from above R[self.Ny - 1, self.Nx - 2, self.action_dict["right"]] = r_goal # arrive from the left return R ###Output _____no_output_____ ###Markdown Define the agent class ###Code class Agent: def __init__(self, env): # Store state and action dimension self.state_dim = env.state_dim self.action_dim = env.action_dim # Agent learning parameters self.epsilon = 1.0 # initial exploration probability self.epsilon_decay = 0.99 # epsilon decay after each episode self.beta = 0.99 # learning rate self.gamma = 0.99 # reward discount factor # Initialize Q[s,a] table self.Q = np.zeros(self.state_dim + self.action_dim, dtype=float) def get_action(self, env): # Epsilon-greedy agent policy if random.uniform(0, 1) < self.epsilon: # explore return np.random.choice(env.allowed_actions()) else: # exploit on allowed actions state = env.state; actions_allowed = env.allowed_actions() Q_s = self.Q[state[0], state[1], actions_allowed] actions_greedy = actions_allowed[np.flatnonzero(Q_s == np.max(Q_s))] return np.random.choice(actions_greedy) def train(self, memory): # ----------------------------- # Update: # # Q[s,a] <- Q[s,a] + beta * (R[s,a] + gamma * max(Q[s,:]) - Q[s,a]) # # R[s,a] = reward for taking action a from state s # beta = learning rate # gamma = discount factor # ----------------------------- (state, action, state_next, reward, done) = memory sa = state + (action,) self.Q[sa] += self.beta * (reward + self.gamma*np.max(self.Q[state_next]) - self.Q[sa]) def display_greedy_policy(self): # greedy policy = argmax[a'] Q[s,a'] greedy_policy = np.zeros((self.state_dim[0], self.state_dim[1]), dtype=int) for x in range(self.state_dim[0]): for y in range(self.state_dim[1]): greedy_policy[y, x] = np.argmax(self.Q[y, x, :]) print("\nGreedy policy(y, x):") print(greedy_policy) print() ###Output _____no_output_____ ###Markdown Main line execution loop ###Code # Settings env = Environment(Ny=8, Nx=8) agent = Agent(env) # Train agent print("\nTraining agent...\n") N_episodes = 500 for episode in range(N_episodes): # Generate an episode iter_episode, reward_episode = 0, 0 state = env.reset() # starting state while True: action = agent.get_action(env) # get action state_next, reward, done = env.step(action) # evolve state by action agent.train((state, action, state_next, reward, done)) # train agent iter_episode += 1 reward_episode += reward if done: break state = state_next # transition to next state # Decay agent exploration parameter agent.epsilon = max(agent.epsilon * agent.epsilon_decay, 0.01) # Print if (episode == 0) or (episode + 1) % 10 == 0: print("[episode {}/{}] eps = {:.3F} -> iter = {}, rew = {:.1F}".format( episode + 1, N_episodes, agent.epsilon, iter_episode, reward_episode)) # Print greedy policy if (episode == N_episodes - 1): agent.display_greedy_policy() for (key, val) in sorted(env.action_dict.items(), key=operator.itemgetter(1)): print(" action['{}'] = {}".format(key, val)) print() ###Output Training agent... [episode 1/500] eps = 0.990 -> iter = 200, rew = 80.1 [episode 10/500] eps = 0.904 -> iter = 144, rew = 85.7 [episode 20/500] eps = 0.818 -> iter = 108, rew = 89.3 [episode 30/500] eps = 0.740 -> iter = 46, rew = 95.5 [episode 40/500] eps = 0.669 -> iter = 28, rew = 97.3 [episode 50/500] eps = 0.605 -> iter = 48, rew = 95.3 [episode 60/500] eps = 0.547 -> iter = 26, rew = 97.5 [episode 70/500] eps = 0.495 -> iter = 38, rew = 96.3 [episode 80/500] eps = 0.448 -> iter = 46, rew = 95.5 [episode 90/500] eps = 0.405 -> iter = 22, rew = 97.9 [episode 100/500] eps = 0.366 -> iter = 24, rew = 97.7 [episode 110/500] eps = 0.331 -> iter = 14, rew = 98.7 [episode 120/500] eps = 0.299 -> iter = 14, rew = 98.7 [episode 130/500] eps = 0.271 -> iter = 20, rew = 98.1 [episode 140/500] eps = 0.245 -> iter = 16, rew = 98.5 [episode 150/500] eps = 0.221 -> iter = 18, rew = 98.3 [episode 160/500] eps = 0.200 -> iter = 20, rew = 98.1 [episode 170/500] eps = 0.181 -> iter = 16, rew = 98.5 [episode 180/500] eps = 0.164 -> iter = 18, rew = 98.3 [episode 190/500] eps = 0.148 -> iter = 16, rew = 98.5 [episode 200/500] eps = 0.134 -> iter = 20, rew = 98.1 [episode 210/500] eps = 0.121 -> iter = 14, rew = 98.7 [episode 220/500] eps = 0.110 -> iter = 14, rew = 98.7 [episode 230/500] eps = 0.099 -> iter = 16, rew = 98.5 [episode 240/500] eps = 0.090 -> iter = 14, rew = 98.7 [episode 250/500] eps = 0.081 -> iter = 18, rew = 98.3 [episode 260/500] eps = 0.073 -> iter = 14, rew = 98.7 [episode 270/500] eps = 0.066 -> iter = 14, rew = 98.7 [episode 280/500] eps = 0.060 -> iter = 14, rew = 98.7 [episode 290/500] eps = 0.054 -> iter = 14, rew = 98.7 [episode 300/500] eps = 0.049 -> iter = 14, rew = 98.7 [episode 310/500] eps = 0.044 -> iter = 16, rew = 98.5 [episode 320/500] eps = 0.040 -> iter = 14, rew = 98.7 [episode 330/500] eps = 0.036 -> iter = 14, rew = 98.7 [episode 340/500] eps = 0.033 -> iter = 18, rew = 98.3 [episode 350/500] eps = 0.030 -> iter = 14, rew = 98.7 [episode 360/500] eps = 0.027 -> iter = 14, rew = 98.7 [episode 370/500] eps = 0.024 -> iter = 14, rew = 98.7 [episode 380/500] eps = 0.022 -> iter = 14, rew = 98.7 [episode 390/500] eps = 0.020 -> iter = 14, rew = 98.7 [episode 400/500] eps = 0.018 -> iter = 14, rew = 98.7 [episode 410/500] eps = 0.016 -> iter = 14, rew = 98.7 [episode 420/500] eps = 0.015 -> iter = 14, rew = 98.7 [episode 430/500] eps = 0.013 -> iter = 14, rew = 98.7 [episode 440/500] eps = 0.012 -> iter = 14, rew = 98.7 [episode 450/500] eps = 0.011 -> iter = 14, rew = 98.7 [episode 460/500] eps = 0.010 -> iter = 14, rew = 98.7 [episode 470/500] eps = 0.010 -> iter = 16, rew = 98.5 [episode 480/500] eps = 0.010 -> iter = 14, rew = 98.7 [episode 490/500] eps = 0.010 -> iter = 14, rew = 98.7 [episode 500/500] eps = 0.010 -> iter = 14, rew = 98.7 Greedy policy(y, x): [[1 1 1 1 1 1 2 2] [1 1 1 1 1 1 1 2] [1 1 1 1 1 1 1 2] [1 1 1 1 1 1 1 2] [1 1 1 1 1 1 1 2] [1 1 1 1 1 1 1 2] [1 1 1 1 1 1 1 2] [1 1 1 1 1 1 1 0]] action['up'] = 0 action['right'] = 1 action['down'] = 2 action['left'] = 3
examples/data-type-list.ipynb
###Markdown Data Type Lists Built-in data container-like type ###Code odds = [1, 3, 5, 7] print('odds are: ', odds) print('first element', odds[0]) print('last element', odds[-1]) names = ['Curie', 'Darwing', 'Turing'] print('names is originally: ', names) names[1] = 'Darwin' print('final names is: ', names) name = names[1] print(name) print('the first letter in name is: ', name[0]) name[0] = 'd' name = 'darwin'# overwrite name = 'Darwin' print(name.lower())# casefolding print(name) name = 'Darwin' #name = 'darwin' name_casefold = name.lower() salsa = ['peppers', 'onions', 'cilantro', 'tomatoes'] my_salsa = salsa salsa[0] = 'hot peppers' print('What is in my salsa', my_salsa) ###Output What is in my salsa ['hot peppers', 'onions', 'cilantro', 'tomatoes'] ###Markdown Nested lists ###Code fridge = [['pepper', 'squash', 'onion'], ['milk', 'eggs', 'cheese'], ['ham', 'sausage', 'bacon']] print(fridge) print(fridge[0]) print(fridge[-1]) print(fridge[0][0][0]) print(fridge[0] + fridge[-1]) print('get a "p": ', fridge[0][0][0]) ###Output [['pepper', 'squash', 'onion'], ['milk', 'eggs', 'cheese'], ['ham', 'sausage', 'bacon']] ['pepper', 'squash', 'onion'] ['ham', 'sausage', 'bacon'] p ['pepper', 'squash', 'onion', 'ham', 'sausage', 'bacon'] get a "p": p
11d_viseme_image_model.ipynb
###Markdown Image model> Conv net, trained with fastai running in onnx. ###Code #export from expoco.viseme_image.data import * from pathlib import Path import numpy as np import cv2, onnxruntime ###Output _____no_output_____ ###Markdown Prepare data for inferenceWe need to replicate what fastai data loaders do ... ###Code #export def prepare_for_inference(image, image_size=256): "Convert a cv2 style image to something that can be used as input to a CNN" if image.shape[0] > image_size: image = ImageHelper().face_crop(image) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) image = np.transpose(image, (2, 0, 1)) image = (image/255.) image = image.astype(np.float32) return image _image = np.zeros((4, 5, 3), np.uint8) B, G, R = 255, 125, 0 _image[:,] = B, G, R _image = prepare_for_inference(_image) assert _image.shape == (3, 4, 5) assert np.allclose(_image[0], R/255) assert np.allclose(_image[1], G/255) assert np.allclose(_image[2], B/255) #export class VisemeClassifier: def __init__(self, model_path, image_size=256): self.model = onnxruntime.InferenceSession(str(model_path)) self.image_size = image_size self.image_helper = ImageHelper() self.vocab = ['AH', 'EE', 'NO_EXPRESSION', 'OO'] self.item_queue = [] def _to_image(self, item): return self.image_helper.read_image(item) if isinstance(item, (str, Path)) else item def predict(self, items=None): if items is None: items = self.item_queue self.item_queue = [] else: items = [self._to_image(i) for i in items] items = [prepare_for_inference(i, self.image_size) for i in items] items = np.array(items) model_output = self.model.run(None, {'input': items}) output = model_output[0] class_ids = np.argmax(output, axis=1) class_names = [self.vocab[class_id] for class_id in class_ids] return class_names def queue_item(self, item): self.item_queue.append(prepare_for_inference(self._to_image(item), self.image_size)) ###Output _____no_output_____ ###Markdown `queue_item` prepares for inference as this prep is quite often slower that actually running inference ###Code model_path = Path('models/model_20211202_143854/resnet_3_256_256.onnx') img_path = Path('test/data/raw_images') imgs = [img_path/'oo_1.png', img_path/'oo_2.png', img_path/'ee_1.png', img_path/'ee_2.png'] img_path = Path('test/data/processed_images') imgs += [img_path/'ah_1.png', img_path/'ah_2.png'] viseme_classifier = VisemeClassifier(model_path) class_names = viseme_classifier.predict(imgs) for img in imgs: viseme_classifier.queue_item(img) class_names2 = viseme_classifier.predict() assert class_names == class_names2 == ['OO', 'OO', 'EE', 'EE', 'AH', 'AH'] #hide from nbdev.export import notebook2script notebook2script() ###Output Converted 00_core.ipynb. Converted 01a_camera_capture.ipynb. Converted 10a_viseme_tabular_identify_landmarks.ipynb. Converted 10b_viseme_tabular_data.ipynb. Converted 10d_viseme_tabular_model.ipynb. Converted 10e_viseme_tabular_train_model.ipynb. Converted 10f_viseme_tabular_test_model.ipynb. Converted 11b_viseme_image_data.ipynb. Converted 11d_viseme_image_model.ipynb. Converted 11e_viseme_image_train_model.ipynb. Converted 11f_viseme_image_test_model.ipynb. Converted 20a_gui_capture_command.ipynb. Converted 20a_gui_main.ipynb. Converted 70_cli.ipynb. Converted index.ipynb. Converted project_lifecycle.ipynb.
nbs/10c-activation-function-relu.ipynb
###Markdown สร้าง x เป็นเลข ระหว่าง -10 บวกทีละ 0.01 ไปจนถึง 10 เราจะเอามาเป็นแกน x ###Code x = torch.arange(-10., 10., 0.01) x ###Output _____no_output_____ ###Markdown นำ x ผ่าน activation ฟังก์ชัน ได้ค่า y ###Code activation = torch.nn.Sigmoid() y1 = activation(x) y1 activation = torch.nn.ReLU() y3 = activation(x) y3 ###Output _____no_output_____ ###Markdown แสดงกราฟ ###Code plt.ylim(top=2.0, bottom=-0.5) plt.xlim(right=2.0, left=-2.0) plt.grid() plt.plot(x.numpy(), y1.numpy()) plt.plot(x.numpy(), y3.numpy()) plt.legend(['Sigmoid', 'ReLU'], loc='upper left') plt.show() ###Output _____no_output_____ ###Markdown Leaky ReLU ด้วยค่า negative_slope = 0.05 ###Code activation = torch.nn.LeakyReLU(0.05) y4 = activation(x) ###Output _____no_output_____ ###Markdown แสดงกราฟ ###Code fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12,6)) fig.suptitle('Comparing ReLU and LeakyReLU') ax1.set_xlim([-3, 3]) ax1.set_ylim([-1, 3]) ax1.grid() ax1.set_title('ReLU') ax2.set_xlim([-3, 3]) ax2.set_ylim([-1, 3]) ax2.grid() ax2.set_title('LeakyReLU') ax1.plot(x.numpy(), y3.numpy()) ax2.plot(x.numpy(), y4.numpy()) ###Output _____no_output_____
L-CNN.ipynb
###Markdown Code related to the following paper: [1] Radu Dogaru and Ioana Dogaru, "BCONV-ELM: Binary Weights Convolutional Neural Network Simulator based on Keras/Tensorflow, for Low Complexity Implementations", Proceedings of the ISEEE 2019 conference, submitted.Please cite the above paper if you found this code useful.Copyright Radu and Ioana Dogaru correspondence: [email protected] First two cells allow loading and preparing various datasest L-CNN is implemented in the third cell (it is trainable version of the lightweight architecture in [1]) ###Code # Datasets from SKLEARN - here the ORL Face recognition # with possibility to select the fraction for training import keras from sklearn import datasets as dat import numpy as np e=dat.fetch_olivetti_faces(data_home=None, shuffle=True, random_state=0, download_if_missing=True) k1=300 toate=400 x_train=e.images[0:k1,:,:] x_test=e.images[k1:toate,:,:] y_train=e.target[0:k1] y_test=e.target[k1:toate] if (np.ndim(x_train)==3): # E.g. MNIST or F-MNIST x_train=np.reshape(x_train, [np.shape(x_train)[0],np.shape(x_train)[1],np.shape(x_train)[2], 1]) x_test=np.reshape(x_test, [np.shape(x_test)[0],np.shape(x_test)[1],np.shape(x_test)[2], 1] ) # place a 1 in the end to keep it compatible with kernel in conv2d # scaling in ([0,1]) x_train = x_train.astype('float32') x_test = x_test.astype('float32') y_train=np.reshape(y_train, [np.shape(y_train)[0], 1]) y_test=np.reshape(y_test, [np.shape(y_test)[0], 1] ) num_classes=np.max(y_train)+1 y_train = keras.utils.to_categorical(y_train, num_classes) y_test = keras.utils.to_categorical(y_test, num_classes) inp_chan=np.shape(x_train)[3] num_inputs = np.shape(x_test)[1] input_shape=np.shape(x_train)[1:4] # Datasets from Keras import numpy as np import keras dataset='mnist' # mnist or f-mnist or cifar10 reduced=0 dformat='channels_last' from keras.datasets import mnist, cifar10, cifar100, fashion_mnist if dataset=='mnist': (x_train, y_train), (x_test, y_test) = mnist.load_data() # incarca date nescalate elif dataset=='cifar10': (x_train, y_train), (x_test, y_test) = cifar10.load_data() # incarca date nescalate elif dataset=='cifar100': (x_train, y_train), (x_test, y_test) = cifar100.load_data() # incarca date nescalate elif dataset=='f-mnist': (x_train, y_train), (x_test, y_test) = fashion_mnist.load_data() if (np.ndim(x_train)==3): # E.g. MNIST or F-MNIST x_train=np.reshape(x_train, [np.shape(x_train)[0],np.shape(x_train)[1],np.shape(x_train)[2], 1]) x_test=np.reshape(x_test, [np.shape(x_test)[0],np.shape(x_test)[1],np.shape(x_test)[2], 1] ) # place a 1 in the end to keep it compatible with kernel in conv2d # scaling in ([0,1]) x_train = x_train.astype('float32') x_test = x_test.astype('float32') x_train /= 255 x_test /=255 inp_chan=np.shape(x_train)[3] print('Number of input channels in image:', inp_chan) num_classes=np.max(y_train)+1 num_inputs = np.shape(x_test)[1] input_shape=np.shape(x_train)[1:4] # one can choose a lower numbers of training samples (when GPU MEM is overloaded) if reduced>0: Ntr1=reduced x_train=x_train[0:Ntr1,:,:,:] y_train=y_train[0:Ntr1] y_train = keras.utils.to_categorical(y_train, num_classes) y_test = keras.utils.to_categorical(y_test, num_classes) # L-CNN implementation # Up to 3 convolution layers + MLP0 (a linear output layer - additional # fully connected hidden layers may be added but the lightweight character # will be lost) #----------------- for reproductibility ---------------------- from numpy.random import seed seed(1) from tensorflow import set_random_seed set_random_seed(2) #---------------------------------------------------- import keras import numpy as np # linear algebra import keras.backend as K import time as ti import scipy.io as sio from keras.models import Sequential from keras.layers import Dense, Dropout, Flatten, Activation from keras.layers import Conv2D, DepthwiseConv2D, MaxPooling2D, AveragePooling2D # straturi convolutionale si max-pooling from keras.optimizers import RMSprop, SGD, Adadelta, Adam import matplotlib.pyplot as plt #========================= Parameters (model and training) ============================================ # batch_size = 50 # useful for MNIST, CIFAR, for ORL take 1 epoci = 20 # maximal number of training epochs (the best result may be obtained earlier) train_style = 2 # 1-standard Keras (not recommended) ; 2 epoch by epoch and keeping best test accuracy until "epoci"; #-------------- Output layer MLP (nhid1=nhid2=0 recommended for low complexity) nhid1 = 0 # hidden-1 neurons (put 0 if nhid2=0, or a desired value) nhid2 = 0 # hidden-2 neurons (take 0 for 0 or 1 hidden layer) # # ----------------- CONV expander layers (up to 3) ------------------------------------------------------------ nr_conv=2 # 0, 1, 2 sau 3 (number of convolution layers) filtre1=64 ; filtre2=64 ; filtre3=100 # filters (kernels) per each layer csize1=3; csize2=3 ; csize3=3 # convolution kernel size (square kernel) psize1=4; psize2=4 ; psize3=2 # pooling size (square) str1=2; str2=2; str3=2 # stride pooling (downsampling rate) pad='same'; # padding style ('valid' is also an alternative) type_conv=2 # 1='depth_wise' or 2='normal' # ------------------- Optimizer ----------------------------------------------------------------- #myopt = SGD(lr=0.01, decay=1e-6, momentum=.9, nesterov=True) #myopt =Adadelta(lr=.1) # implicit are lr=1 # cum influenteaza valoarea procesul de antrenare ?? myopt = RMSprop(lr=0.0005) #myopt = Adam(lr=0.001) #myopt = Adam() # -------------------------- LOSS function ------------------------------------ #my_loss='mean_squared_error' #my_loss='mean_absolute_error' my_loss='categorical_crossentropy' #-------------------------- MODEL DESCRIPTION ------------------------------ model = Sequential() if nr_conv>=1: if type_conv==2: model.add(Conv2D(filtre1, padding=pad, kernel_size=(csize1, csize1), input_shape=input_shape)) elif type_conv==1: model.add(DepthwiseConv2D(kernel_size=csize2, padding=pad, input_shape=input_shape, depth_multiplier=filtre1, use_bias=False)) #model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(psize1, psize1),strides=(str1,str1),padding=pad)) #model.add(Activation('relu')) if nr_conv>=2: if type_conv==2: model.add(Conv2D(filtre2, padding=pad, kernel_size=(csize2, csize2)) ) elif type_conv==1: model.add(DepthwiseConv2D(kernel_size=csize2, padding=pad, depth_multiplier=filtre2, use_bias=False)) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(psize2, psize2),strides=(str2,str2),padding=pad)) #model.add(Activation('relu')) if nr_conv==3: if type_conv==2: model.add(Conv2D(filtre3, padding=pad, kernel_size=(csize3, csize3)) ) elif type_conv==1: model.add(DepthwiseConv2D(kernel_size=csize3, padding=pad, depth_multiplier=filtre3, use_bias=False)) #model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(psize3, psize3),strides=(str3,str3),padding=pad)) #model.add(Activation('relu')) model.add(Activation('relu')) model.add(Flatten()) #model.add(Activation('relu')) #model.add(Dropout(0.25)) elif nr_conv==0: model.add(Flatten(input_shape=input_shape)) # ---- first fc hidden layer if nhid1>0: model.add(Dense(nhid1, activation='relu')) #model.add(Dropout(0.5)) # ---- second fc hidden layer if nhid2>0: model.add(Dense(nhid2, activation='relu')) # model.add(Dropout(0.2)) # output layer if (nhid1+nhid2)==0: model.add(Dense(num_classes, activation='softmax',input_shape=(num_inputs,))) else: model.add(Dense(num_classes, activation='softmax')) model.summary() # --- MODEL COMPILE -------------------------------------------------------- model.compile(loss=my_loss, optimizer=myopt, # se poate alege oricare dintre obiectele optimizer definite mai sus metrics=['accuracy']) # - MODEL TRAINING -------------------------------------------------- if train_style==1: t1=ti.time() history = model.fit(x_train, y_train, batch_size=batch_size, epochs=epoci, verbose=2, # aici 0 (nu afiseaza nimic) 1 (detaliat) 2(numai epocile) validation_data=(x_test, y_test)) t2=ti.time() print(t2-t1) score = model.evaluate(x_test, y_test, verbose=0) print('Test loss:', score[0]) print('Test accuracy:', score[1]) plt.plot(history.history['acc']) plt.plot(history.history['val_acc']) elif train_style==2: err_test=np.zeros(epoci) # For plotting test error evolution best_acc=0.0 best_ep=0 t1=ti.time() for k in range(epoci): model.fit(x_train, y_train, batch_size=batch_size, epochs=1, verbose=0, # aici 0 (nu afiseaza nimic) 1 (detaliat) 2(numai epocile) validation_data=(x_test, y_test)) score = model.evaluate(x_test, y_test, verbose=0) err_test[k]=score[1] if score[1]>best_acc : print('Improved in epoch:', k, ' New accuracy: ', 100*score[1],'%') best_acc=score[1] best_ep=k bp=model.get_weights() t2=ti.time() print('Best accuracy:', best_acc*100, '% reached in epoch: ',best_ep, ' running ',epoci,' epochs lasts ',t2-t1,' seconds') plt.plot(err_test) model.set_weights((bp)) # evaluete prediction time on all test samples t1=ti.time() score = model.evaluate(x_test, y_test, verbose=0) t2=ti.time() print('Test accuracy:', score[1]) print ('Time for test set : ',t2-t1) print('Latency (per input sample):', 1000*(t2-t1)/np.shape(x_test)[0], 'ms') # QMLP0 (quantified MLP0 module) # Quantization of the above resulted model (only for MLP0) # Copyright Radu and Ioana DOGARU; #============================================================= nb_out=8 outW=np.copy(bp) Qout=-1+pow(2,nb_out-1) if (nb_out >0) & (nhid1==0) : O=np.max(np.abs(outW[0])) outW[0]=np.round(outW[0]*(1/O)*Qout) outW[1]=np.round(outW[1]*(1/O)*Qout) model.set_weights(outW) score = model.evaluate(x_test, y_test, verbose=0) best_acc=score[1] print('Output layer quantized with:', nb_out, 'bits') print('Quantified accuracy is:', best_acc*100,'%') outW=model.get_weights() # the resulting model with fixed point weights ###Output _____no_output_____
notebooks/tutorial/Register.ipynb
###Markdown This example demonstrates the use of d-flip-flops and registers. ###Code import magma as m m.set_mantle_target("ice40") ###Output _____no_output_____ ###Markdown DFFTo use a DFF we import that `mantle` circuit `DFF`.Calling `DFF()` creates an instance of a DFF.A sequential logic element like a DFF is very similarto a combinational logic element like a full adder.It has inputs and outputs.The inputs and outputs are wired up in the same way asa combinational circuit. ###Code from loam.boards.icestick import IceStick from mantle import DFF icestick = IceStick() icestick.Clock.on() # Need to turn on the clock for sequential logic icestick.J1[0].input().on() icestick.J3[0].output().on() main = icestick.DefineMain() dff = DFF() m.wire( dff(main.J1), main.J3 ) m.EndDefine() ###Output import lattice ice40 import lattice mantle40 ###Markdown Since a flip-flop is a sequential logic element,it has a clock.The clock generator is a peripheral on the FPGA.We need to turn it on if we want to use the clock.This creates a global clock signal on the FPGA.Note that we did not need to wire the clock to the DFF;`magma` automatically wires the clock to the flip-flop's clock input. Let's compile and build. ###Code m.compile("build/dff", main) %%bash cd build yosys -q -p 'synth_ice40 -top main -blif dff.blif' dff.v arachne-pnr -q -d 1k -o dff.txt -p dff.pcf dff.blif icepack dff.txt dff.bin iceprog dff.bin ###Output /Users/hanrahan/git/magmathon/notebooks/tutorial/build ###Markdown If we inspect the compiled verilog, we see that our mantle `DFF` uses the `SB_DFF` ice40 primitive. ###Code %cat build/dff.v ###Output module main (input J1, output J3, input CLKIN); wire inst0_Q; SB_DFF inst0 (.C(CLKIN), .D(J1), .Q(inst0_Q)); assign J3 = inst0_Q; endmodule ###Markdown RegisterA register is simply an array of flip-flops.To create an instance of a register, call `Register`with the number of bits `n` in the register. ###Code from loam.boards.icestick import IceStick from mantle import Register icestick = IceStick() icestick.Clock.on() # Need to turn on the clock for sequential logic for i in range(4): icestick.J1[i].input().on() icestick.J3[i].output().on() main = icestick.DefineMain() register4 = Register(4) m.wire( register4(main.J1), main.J3 ) m.EndDefine() ###Output _____no_output_____ ###Markdown Registers and DFFs are very similar to each other.The only difference is that the input and output to a DFFare `Bit` values,whereas the inputs and the outputs to registers are `Bits(n)`. ###Code m.compile("build/register4", main) %%bash cd build yosys -q -p 'synth_ice40 -top main -blif register4.blif' register4.v arachne-pnr -q -d 1k -o register4.txt -p register4.pcf register4.blif icepack register4.txt register4.bin iceprog register4.bin ###Output /Users/hanrahan/git/magmathon/notebooks/tutorial/build ###Markdown If we inspect the compiled verilog, we see that our register is a module that instances a set of `SB_DFF`s. ###Code %cat build/register4.v ###Output module Register4 (input [3:0] I, output [3:0] O, input CLK); wire inst0_Q; wire inst1_Q; wire inst2_Q; wire inst3_Q; SB_DFF inst0 (.C(CLK), .D(I[0]), .Q(inst0_Q)); SB_DFF inst1 (.C(CLK), .D(I[1]), .Q(inst1_Q)); SB_DFF inst2 (.C(CLK), .D(I[2]), .Q(inst2_Q)); SB_DFF inst3 (.C(CLK), .D(I[3]), .Q(inst3_Q)); assign O = {inst3_Q,inst2_Q,inst1_Q,inst0_Q}; endmodule module main (input [3:0] J1, output [3:0] J3, input CLKIN); wire [3:0] inst0_O; Register4 inst0 (.I(J1), .O(inst0_O), .CLK(CLKIN)); assign J3 = inst0_O; endmodule ###Markdown Enables and ResetsThere are other flip-flops and registers with clock enablesand reset inputs.The flip-flop will only be enabled if its clock enable input is true.And it will be reset to its initial value if reset is true.To create registers with these additional inputs,set the optional arguments `has_ce` and/or `has_reset`when instancing the register. ###Code icestick = IceStick() icestick.Clock.on() for i in range(4): icestick.J1[i].input().on() icestick.J3[i].output().on() icestick.J1[4].input().on() # ce signal icestick.J1[5].input().on() # reset signal main = icestick.DefineMain() register4 = Register(4, init=5, has_ce=True, has_reset=True ) m.wire( register4(main.J1[0:4], ce=main.J1[4], reset=main.J1[5]), main.J3) m.EndDefine() ###Output _____no_output_____ ###Markdown To wire the optional clock inputs, clock enable and reset,use named arguments when you call the register with its inputs.As a general rule, clock inputs are handled differently thanother inputs. Compile, build, and upload. ###Code m.compile("build/register4ce", main) %%bash cd build yosys -q -p 'synth_ice40 -top main -blif register4ce.blif' register4ce.v arachne-pnr -q -d 1k -o register4ce.txt -p register4ce.pcf register4ce.blif icepack register4ce.txt register4ce.bin iceprog register4ce.bin ###Output /Users/hanrahan/git/magmathon/notebooks/tutorial/build
isda_sample_notebook.ipynb
###Markdown Set Seed for Reproducibility ###Code def set_seed(seed): torch.manual_seed(seed) np.random.seed(seed) random.seed(seed) os.environ['PYTHONHASHSEED']=str(seed) set_seed(72) ###Output _____no_output_____ ###Markdown Preprocess and Load Data ###Code preprocess = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (.2023, .1994, .2010)), #CIFAR10 # transforms.Normalize((0.507, 0.487, 0.441), (0.267, 0.256, 0.276)) #CIFAR100 # Cutout(n_holes=1, length=16), # CutOut ]) train_transform = transforms.Compose([ transforms.RandomHorizontalFlip(), transforms.RandomCrop(32, padding=4, fill=128), # CIFAR10Policy(), # AutoAugment preprocess ]) # train_transform.transforms.insert(0, RandAugment(1, 5)) #RandAugment test_transform = preprocess # test_transform = transforms.Compose([ # transforms.ToTensor(), # transforms.Normalize((0.4914, 0.4822, 0.4465), (.2023, .1994, .2010)), #CIFAR10 # transforms.Normalize((0.507, 0.487, 0.441), (0.267, 0.256, 0.276)) #CIFAR100 # ]) batch_size = 4 train_data = datasets.CIFAR10(root="./data", train=True, download=True, transform=train_transform) test_data= datasets.CIFAR10(root="./data", train=False, download=True, transform=test_transform) # load training data in batches train_loader = torch.utils.data.DataLoader( # AugMixDataset(train_data, preprocess, no_jsd=True), # Augmix train_data, batch_size=batch_size, num_workers=8, shuffle=True, pin_memory=True ) # load test data in batches test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, num_workers=8, shuffle=False, pin_memory=True ) classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck') print(f'Length of train loader is {len(train_loader)}') print(f'Length of test loader is {len(test_loader)}') ###Output _____no_output_____ ###Markdown Build Model ###Code model = CustomModel() num_feature = 192*4*4 num_classes = len(classes) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') model = model.to(device) model = torch.nn.DataParallel(model) fc = Full_layer(num_feature=num_feature, num_classes=len(classes)).to(device) fc = torch.nn.DataParallel(fc) best_top1 = 0 # train from start start_epoch = 0 num_epochs = 50 combine_ratio = 0.5 isda_criterion = ISDALoss(num_feature, num_classes) ce_criterion = CrossEntropyLoss(smooth_eps=0.1).to(device) optimizer = Lookahead(DeepMemory([{'params': model.parameters()}, {'params': fc.parameters()}], len_memory=len(train_data.data)//batch_size)) # rectified adam wtih lookahead lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=len(train_data.data)//batch_size) print('Number of model parameters: {}'.format( sum([p.data.nelement() for p in model.parameters()]) + sum([p.data.nelement() for p in fc.parameters()]) )) ###Output _____no_output_____ ###Markdown Load Checkpoints to resume training ###Code # checkpoint = torch.load('./checkpoint/CustomModel_standard_dmla_isda_ckpt.pth') # model.module.load_state_dict(checkpoint['model_state_dict'], strict=False) # optimizer.load_state_dict(checkpoint['optimizer_state_dict']) # fc.module.load_state_dict(checkpoint['fc']) # start_epoch = checkpoint['epoch'] # best_loss = checkpoint['loss'] # best_top1 = checkpoint['top1'] # resume training # best_top5 = checkpoint['top5'] # print(f'Top-1 Acc: {best_top1}\t Top-5 Acc: {best_top5}\t Best loss: {best_loss:.4f}\t Best epoch: {start_epoch}') # print(f'Loaded checkpoint with \n {best_top1}% Top-1 Accuracy, {best_top5}% Top-5 Accuracy, after training for {start_epoch} epochs.') ###Output _____no_output_____ ###Markdown Train Model ###Code %%time def train(train_loader, model, fc, criterion, optimizer, epoch): print('Training model...\n') batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() top5 = AverageMeter() ratio = combine_ratio * (epoch / (num_epochs)) # switch to train mode model.train() fc.train() end = time.time() for i, (input, target) in enumerate(train_loader): # measure data loading time data_time.update(time.time() - end) target = target.to(device) input_var = Variable(input) target_var = Variable(target) optimizer.zero_grad() # compute output loss, output = criterion(model, fc, input_var, target_var, ratio) # measure accuracy and record loss prec1, prec5 = accuracy(output.data, target, topk=(1, 5)) losses.update(loss.data.item(), input.size(0)) top1.update(prec1.item(), input.size(0)) top5.update(prec5.item(), input.size(0)) loss.backward() optimizer.step() lr_scheduler.step() # measure elapsed time batch_time.update(time.time() - end) end = time.time() if i % 1500 == 0: print('Epoch: [{0}][{1}/{2}]\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' 'Data {data_time.val:.3f} ({data_time.avg:.3f})\t' 'Loss {loss.val:.4f} ({loss.avg:.4f})\t' 'Acc@1 {top1.val:.3f} ({top1.avg:.3f})\t' 'Acc@5 {top5.val:.3f} ({top5.avg:.3f})'.format( epoch, i, len(train_loader), batch_time=batch_time, data_time=data_time, loss=losses, top1=top1, top5=top5)) print(' * Acc@1 {top1.avg:.3f} Acc@1 Error {top1_err:.3f}\n' ' * Acc@5 {top5.avg:.3f} Acc@5 Error {top5_err:.3f}' .format(top1=top1, top1_err=100-top1.avg, top5=top5, top5_err=100-top5.avg)) ###Output _____no_output_____ ###Markdown Accuracy on test data ###Code %%time def validate(test_loader, model, fc, criterion, epoch): print('Evaluating model on test data...\n') batch_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() top5 = AverageMeter() # switch to evaluate mode model.eval() fc.eval() end = time.time() for i, (input, target) in enumerate(test_loader): target = target.to(device) input_var = Variable(input) target_var = Variable(target) # compute output with torch.no_grad(): features = model(input_var) output = fc(features) loss = criterion(output, target_var) # measure accuracy and record loss prec1, prec5 = accuracy(output.data, target.data, topk=(1, 5)) losses.update(loss.data.item(), input.size(0)) top1.update(prec1.item(), input.size(0)) top5.update(prec5.item(), input.size(0)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() if i % 250 == 0: print('Test: [{0}/{1}]\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' 'Loss {loss.val:.4f} ({loss.avg:.4f})\t' 'Acc@1 {top1.val:.3f} ({top1.avg:.3f})\t' 'Acc@5 {top5.val:.3f} ({top5.avg:.3f})'.format( i, len(test_loader), batch_time=batch_time, loss=losses, top1=top1, top5=top5)) print(' * Acc@1 {top1.avg:.3f} Acc@1 Error {top1_err:.3f}\n' ' * Acc@5 {top5.avg:.3f} Acc@5 Error {top5_err:.3f}' .format(top1=top1, top1_err=100-top1.avg, top5=top5, top5_err=100-top5.avg)) return top1, top5, losses ###Output _____no_output_____ ###Markdown Checkpoint Model ###Code %%time for epoch in range(start_epoch, num_epochs): train(train_loader, model, fc, isda_criterion, optimizer, epoch) top1, top5, losses = validate(test_loader, model, fc, ce_criterion, epoch) if top1.avg > best_top1: print('Saving checkpoint') state = { 'model_state_dict': model.module.state_dict(), 'optimizer_state_dict': optimizer.state_dict(), 'fc': fc.module.state_dict(), 'epoch': epoch, 'loss': losses.avg, 'top1': top1.avg, 'top5': top5.avg} if not os.path.isdir('checkpoint'): os.mkdir('checkpoint') torch.save(state, './checkpoint/CustomModel_standard_dmla_isda_ckpt.pth') best_top1 = top1.avg ###Output _____no_output_____ ###Markdown Accuracy for each class ###Code # class_correct = list(0. for i in range(100)) # class_total = list(0. for i in range(100)) # with torch.no_grad(): # for data in test_loader: # images, labels = data # outputs = model(images) # _, predicted = torch.max(outputs, 1) # c = (predicted == labels).squeeze() # for i in range(4): # label = labels[i] # class_correct[label] += c[i].item() # class_total[label] += 1 # for i in range(10): # print('Accuracy of %5s : %2d %%' % ( # classes[i], 100 * class_correct[i] / class_total[i])) ###Output _____no_output_____
02.Mathematical Concepts/Mathematical+Concepts.ipynb
###Markdown Visualizing Complex Numbers ###Code %matplotlib inline import numpy as np import matplotlib.pyplot as plt from matplotlib.transforms import Affine2D theta = 2*np.pi/5 r=np.sin(theta) def getRectCoords(r,theta): x= r*np.cos(theta) y = r*np.sin(theta) yield x yield y def plotComplexNumbers(x,y): for i in getRectCoords(r,theta): x=i y=i+1 return plt.quiver(x,y) def visualizeComplexNumbers(): plotComplexNumbers(r,theta) return plt.show() plt.axhline(0, linestyle="--", color ="black") plt.axvline(0, linestyle="--",color ="black") visualizeComplexNumbers() ###Output _____no_output_____ ###Markdown Polar Plot ###Code def getRandTheta(): theta= np.arange(-2*np.pi,2*np.pi,0.001) r= np.sin(theta) return(theta,r) def plotPolar(theta,r): theta,r=getRandTheta() plt.polar(theta,r) plt.plot(theta,r) plt.show() plotPolar(theta,r) ###Output _____no_output_____ ###Markdown Matrix Operations 3X3 Matrices Addition ###Code def matrixInput(): firstrow=[0,0,0] secondrow=[0,0,0] thirdrow =[0,0,0] for i in range(3): text = input().split(' ') if i==0: firstrow[0]=int(text[0]) firstrow[1]=int(text[1]) firstrow[2]=int(text[2]) elif i==1: secondrow[0]=int(text[0]) secondrow[1]=int(text[1]) secondrow[2]=int(text[2]) elif i==2: thirdrow[0]=int(text[0]) thirdrow[1]=int(text[1]) thirdrow[2]=int(text[2]) return [firstrow,secondrow,thirdrow] FirstMatrix =matrixInput() SecondMatrix=matrixInput() result =[[0,0,0], [0,0,0], [0,0,0]] for i in range(3): for a in range(3): result[i][a]=FirstMatrix[i][a]+SecondMatrix[i][a] print(result[0]) print(result[1]) print(result[2]) ###Output _____no_output_____ ###Markdown Multiply a matrix with a scalar ###Code scalar=int(input("Write Scalar here:")) Matrix = matrixInput() result =[[0,0,0], [0,0,0], [0,0,0]] for i in range(3): for a in range(3): result[i][a]= Matrix[i][a]*scalar print(result[0]) print(result[1]) print(result[2]) ###Output _____no_output_____ ###Markdown Multiply two 3X3 Matrices ###Code firstMatrix=matrixInput() secondMatrix=matrixInput() result =np.matmul(firstMatrix,secondMatrix) print(result[0]) print(result[1]) print(result[2]) ###Output _____no_output_____ ###Markdown Multiplying two vectors ###Code def vectorInput(): vector=input().split(' ') vectorNum=[] for i in vector: vectorNum.append(int(i)) return vectorNum vectorOne= vectorInput() vectorTwo = vectorInput() result= np.dot(vectorOne,vectorTwo) print(result) ###Output _____no_output_____ ###Markdown Solving linear equations ###Code a = np.array([[3,1], [1,2]]) b = np.array([9,8]) result= np.linalg.solve(a,b) print(result) ###Output _____no_output_____ ###Markdown Functions ###Code a=4 b=5 c=6 x=np.arange(-2*np.pi,2*np.pi,0.001) y=a*x+b plt.plot(x,y) plt.show() x=np.arange(-2*np.pi,2*np.pi,0.001) y=a*x**2 +b*x +c plt.plot(x,y) plt.show() x=np.arange(-2*np.pi,2*np.pi,0.001) y=np.sin(a*x+b) plt.plot(x,y) plt.show() x=np.arange(-2*np.pi,2*np.pi,0.001) y=a**(b*x) plt.plot(x,y) plt.show() ###Output _____no_output_____ ###Markdown Derivative ###Code x=1 y=x**2 def derrivative(x,y): change=0.0000001 dy=(x+change)**2-y dx=change answer = dy/dx return answer print(derrivative(x,y)) ###Output 2.0000001010878066
labs/3.Segmentation/mask_rcnn.ipynb
###Markdown Mask R-CNN DemoA quick introduction to using the pre-trained Mask R-CNN model to detect and segment objects.The model was trained on the Microsoft Common Objects in Context (MS-COCO) dataset http://cocodataset.org/home ###Code #download pretrained model !wget https://gmfilestore.blob.core.windows.net/pretrained/mask_rcnn_coco.h5 import os import sys import random import math import numpy as np np.random.seed(42) import skimage.io import matplotlib import matplotlib.pyplot as plt # Root directory of the project ROOT_DIR = os.path.abspath("./") # Import Mask RCNN sys.path.append(ROOT_DIR) # To find local version of the library from mrcnn import utils import mrcnn.model as modellib from mrcnn import visualize # Import COCO config sys.path.append(os.path.join(ROOT_DIR, "samples/coco/")) # To find local version import coco %matplotlib inline # Directory to save logs and trained model MODEL_DIR = os.path.join(ROOT_DIR, "logs") # Local path to trained weights file COCO_MODEL_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5") # Download COCO trained weights from Releases if needed if not os.path.exists(COCO_MODEL_PATH): utils.download_trained_weights(COCO_MODEL_PATH) # Directory of images to run detection on IMAGE_DIR = os.path.join(ROOT_DIR, "images") ###Output _____no_output_____ ###Markdown ConfigurationsWe'll be using a model trained on the MS-COCO dataset. The configurations of this model are in the ```CocoConfig``` class in ```coco.py```.For inferencing, modify the configurations a bit to fit the task. To do so, sub-class the ```CocoConfig``` class and override the attributes you need to change. ###Code class InferenceConfig(coco.CocoConfig): # Set batch size to 1 since we'll be running inference on # one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU GPU_COUNT = 1 IMAGES_PER_GPU = 1 config = InferenceConfig() config.display() ###Output Configurations: BACKBONE resnet101 BACKBONE_STRIDES [4, 8, 16, 32, 64] BATCH_SIZE 1 BBOX_STD_DEV [0.1 0.1 0.2 0.2] COMPUTE_BACKBONE_SHAPE None DETECTION_MAX_INSTANCES 100 DETECTION_MIN_CONFIDENCE 0.7 DETECTION_NMS_THRESHOLD 0.3 FPN_CLASSIF_FC_LAYERS_SIZE 1024 GPU_COUNT 1 GRADIENT_CLIP_NORM 5.0 IMAGES_PER_GPU 1 IMAGE_CHANNEL_COUNT 3 IMAGE_MAX_DIM 1024 IMAGE_META_SIZE 93 IMAGE_MIN_DIM 800 IMAGE_MIN_SCALE 0 IMAGE_RESIZE_MODE square IMAGE_SHAPE [1024 1024 3] LEARNING_MOMENTUM 0.9 LEARNING_RATE 0.001 LOSS_WEIGHTS {'rpn_class_loss': 1.0, 'rpn_bbox_loss': 1.0, 'mrcnn_class_loss': 1.0, 'mrcnn_bbox_loss': 1.0, 'mrcnn_mask_loss': 1.0} MASK_POOL_SIZE 14 MASK_SHAPE [28, 28] MAX_GT_INSTANCES 100 MEAN_PIXEL [123.7 116.8 103.9] MINI_MASK_SHAPE (56, 56) NAME coco NUM_CLASSES 81 POOL_SIZE 7 POST_NMS_ROIS_INFERENCE 1000 POST_NMS_ROIS_TRAINING 2000 PRE_NMS_LIMIT 6000 ROI_POSITIVE_RATIO 0.33 RPN_ANCHOR_RATIOS [0.5, 1, 2] RPN_ANCHOR_SCALES (32, 64, 128, 256, 512) RPN_ANCHOR_STRIDE 1 RPN_BBOX_STD_DEV [0.1 0.1 0.2 0.2] RPN_NMS_THRESHOLD 0.7 RPN_TRAIN_ANCHORS_PER_IMAGE 256 STEPS_PER_EPOCH 1000 TOP_DOWN_PYRAMID_SIZE 256 TRAIN_BN False TRAIN_ROIS_PER_IMAGE 200 USE_MINI_MASK True USE_RPN_ROIS True VALIDATION_STEPS 50 WEIGHT_DECAY 0.0001 ###Markdown Create Model and Load Trained Weights ###Code # Create model object in inference mode. model = modellib.MaskRCNN(mode="inference", model_dir=MODEL_DIR, config=config) # Load weights trained on MS-COCO model.load_weights(COCO_MODEL_PATH, by_name=True) ###Output WARNING: Logging before flag parsing goes to stderr. W1008 16:53:04.605910 140167099500288 deprecation_wrapper.py:119] From /anaconda/envs/azureml_py36/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py:517: The name tf.placeholder is deprecated. Please use tf.compat.v1.placeholder instead. W1008 16:53:04.624322 140167099500288 deprecation_wrapper.py:119] From /anaconda/envs/azureml_py36/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py:74: The name tf.get_default_graph is deprecated. Please use tf.compat.v1.get_default_graph instead. W1008 16:53:04.627759 140167099500288 deprecation_wrapper.py:119] From /anaconda/envs/azureml_py36/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py:4138: The name tf.random_uniform is deprecated. Please use tf.random.uniform instead. W1008 16:53:04.651838 140167099500288 deprecation_wrapper.py:119] From /anaconda/envs/azureml_py36/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py:1919: The name tf.nn.fused_batch_norm is deprecated. Please use tf.compat.v1.nn.fused_batch_norm instead. W1008 16:53:04.654616 140167099500288 deprecation_wrapper.py:119] From /anaconda/envs/azureml_py36/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py:3976: The name tf.nn.max_pool is deprecated. Please use tf.nn.max_pool2d instead. W1008 16:53:07.473295 140167099500288 deprecation_wrapper.py:119] From /anaconda/envs/azureml_py36/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py:2018: The name tf.image.resize_nearest_neighbor is deprecated. Please use tf.compat.v1.image.resize_nearest_neighbor instead. W1008 16:53:07.944221 140167099500288 deprecation_wrapper.py:119] From /mnt/azmnt/code/Users/gmarchet/dev/dnnworkshop5/labs/3.Segmentation/mrcnn/model.py:341: The name tf.log is deprecated. Please use tf.math.log instead. W1008 16:53:07.953891 140167099500288 deprecation.py:323] From /mnt/azmnt/code/Users/gmarchet/dev/dnnworkshop5/labs/3.Segmentation/mrcnn/model.py:399: add_dispatch_support.<locals>.wrapper (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version. Instructions for updating: Use tf.where in 2.0, which has the same broadcast rule as np.where W1008 16:53:07.960452 140167099500288 deprecation.py:506] From /mnt/azmnt/code/Users/gmarchet/dev/dnnworkshop5/labs/3.Segmentation/mrcnn/model.py:423: calling crop_and_resize_v1 (from tensorflow.python.ops.image_ops_impl) with box_ind is deprecated and will be removed in a future version. Instructions for updating: box_ind is deprecated, use box_indices instead W1008 16:53:08.380446 140167099500288 deprecation_wrapper.py:119] From /mnt/azmnt/code/Users/gmarchet/dev/dnnworkshop5/labs/3.Segmentation/mrcnn/model.py:720: The name tf.sets.set_intersection is deprecated. Please use tf.sets.intersection instead. W1008 16:53:08.385413 140167099500288 deprecation_wrapper.py:119] From /mnt/azmnt/code/Users/gmarchet/dev/dnnworkshop5/labs/3.Segmentation/mrcnn/model.py:722: The name tf.sparse_tensor_to_dense is deprecated. Please use tf.sparse.to_dense instead. W1008 16:53:08.487473 140167099500288 deprecation.py:323] From /mnt/azmnt/code/Users/gmarchet/dev/dnnworkshop5/labs/3.Segmentation/mrcnn/model.py:772: to_float (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version. Instructions for updating: Use `tf.cast` instead. ###Markdown ClassificationThe model classifies objects and returns class IDs, which are integer values. For example, in the MS-COCO dataset, the 'person' class is 1 and 'teddy bear' is 88. The IDs are not always sequential. The COCO dataset, for example, has class IDs 70 and 72, but not 71.To improve consistency, the ```Dataset``` class assigns its own sequential IDs. For example, if you load the COCO dataset using our ```Dataset``` class, the 'person' class has ID = 1 (just like COCO) and the 'teddy bear' class has 78 (different from COCO). Keep that in mind when mapping class IDs to class names.To get the list of class names, load the dataset and then use the ```class_names``` property.``` Load COCO datasetdataset = coco.CocoDataset()dataset.load_coco(COCO_DIR, "train")dataset.prepare() Print class namesprint(dataset.class_names)```We don't want to download the entire COCO dataset just to run this notebook, so we're including the list of class names below. The index of the class name in the list represent its ID starting with 0. ###Code # COCO Class names # Index of the class in the list is its ID. For example, to get ID of # the teddy bear class, use: class_names.index('teddy bear') class_names = ['BG', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush'] ###Output _____no_output_____ ###Markdown Run Object Detection ###Code # Load a random image from the images folder file_names = next(os.walk(IMAGE_DIR))[2] image = skimage.io.imread(os.path.join(IMAGE_DIR, random.choice(file_names))) # Run detection results = model.detect([image], verbose=1) # Visualize results r = results[0] visualize.display_instances(image, r['rois'], r['masks'], r['class_ids'], class_names, r['scores']) ###Output Processing 1 images image shape: (448, 640, 3) min: 0.00000 max: 255.00000 uint8 molded_images shape: (1, 1024, 1024, 3) min: -123.70000 max: 151.10000 float64 image_metas shape: (1, 93) min: 0.00000 max: 1024.00000 float64 anchors shape: (1, 261888, 4) min: -0.35390 max: 1.29134 float32
day06/PROJECT - Speeches - Word Embeddings and Topic Modelling.ipynb
###Markdown Project: state-of-the-union speeches In this project, we will load and process the `us_president_speeches.csv` file, preprocess the speeches, embed their words and do topic modeling on them.**The expected answers may vary, depending on how you decide to preprocess the speeches**. Preamble ###Code !pip install --upgrade gensim !pip install pyldavis from typing import List import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import nltk nltk.download("wordnet") nltk.download("stopwords") from nltk import WordNetLemmatizer from wordcloud import WordCloud from gensim.utils import simple_preprocess from gensim.models import Word2Vec, KeyedVectors, LdaMulticore from gensim.corpora import Dictionary import pyLDAvis import pyLDAvis.gensim_models as gensimvis ###Output [nltk_data] Downloading package wordnet to /root/nltk_data... [nltk_data] Package wordnet is already up-to-date! [nltk_data] Downloading package stopwords to /root/nltk_data... [nltk_data] Package stopwords is already up-to-date! ###Markdown Loading up the data Exercise 1 Load up the `us_president_speeches.csv` into an array named `speeches` and print the first 5 rows. ###Code ###Output _____no_output_____ ###Markdown Exercise 2 Fill in the `???`s.1. How many speeches are in the DataFrame?2. How many unique presidents are in the DataFrame?3. What's the longest speech? How long is it (in characters)? Who gave it and when? ###Code print("Nr. Speeches", ???) print("Nr. Presidents", ???) print("Longest speech is at index:", ???) print(f"It is {???} characters long.") president = ??? year = ??? print(f"President {president} gave it in {year}") ###Output Nr. Speeches 227 Nr. Presidents 38 Longest speech is at index: 189 It is 217947 characters long. President Carter gave it in 1981 ###Markdown Expected solution:```Nr. Speeches 227Nr. Presidents 38Longest speech is at index: 189It is 217947 characters long.President Carter gave it in 1981``` Preprocessing the data Exercise 3 In this exercise we will write a function that cleans the column `speech` of the dataframe, and returns a corpus of type `List[List[str]]`. Include `"wa", "ha"` in your list of stopwords. ###Code ###Output _____no_output_____ ###Markdown Define a variable `corpus` with the result of applying this function to our `speeches` df. ###Code ###Output _____no_output_____ ###Markdown Exercise 4 Print the first 50 characters speech alongside the first 10 words of its corresponding `List[str]` inside corpus. Check that both match in a meaningful way. ###Code ###Output Fellow-Citizens of the Senate and House of Represe ['fellow', 'citizen', 'senate', 'house', 'representative', 'vain', 'may', 'expect', 'peace', 'indian'] ###Markdown Expected answer:```Fellow-Citizens of the Senate and House of Represe['fellow', 'citizen', 'senate', 'house', 'representative', 'vain', 'may', 'expect', 'peace', 'indian']``` Exercise 5 Define a `Dictionary` on this corpus and call it `dct`. How many unique tokens are there? ###Code ###Output _____no_output_____ ###Markdown Embedding words Exercise 6 Train a `word2vec` model using `gensim`'s `Word2Vec`. Call it `model_w2v`. Leave all the hyperparameters as the default ones. ###Code ###Output _____no_output_____ ###Markdown Exercise 7 What's the most similar word to `woman` according to the model? What about `senate`? ###Code ###Output _____no_output_____ ###Markdown Getting topics Exercise 8 Define a `bows` variable with the result of applying `dct.doc2bow` to all documents in `corpus`. ###Code ###Output _____no_output_____ ###Markdown Exercise 9 Fit a Latent Dirichlet Allocation to our bags-of-words `bows` using `gensim`'s `LdaMulticore` for 5, 10, 15 and 20 topics. To get comparable results, set the `random_state` to my favorite number, 42. ###Code ###Output _____no_output_____ ###Markdown Exercise 10 Plot a line with number of topics (5, 10, 15 and 20) in the x axis, and log-perplexity of the model in the y axis. Interpret it. What does the log-perplexity usually say? ###Code ###Output _____no_output_____ ###Markdown Exercise 11 Use `gensimvis` to visualize the model with 20 topics. (The result might be lackluster, why?) ###Code ###Output _____no_output_____
slides/data_visualization.ipynb
###Markdown ###Code !pip install davos import davos ###Output Collecting davos Downloading davos-0.1.0-py3-none-any.whl (76 kB) [?25l  |████▎ | 10 kB 22.0 MB/s eta 0:00:01  |████████▋ | 20 kB 9.4 MB/s eta 0:00:01  |████████████▉ | 30 kB 6.0 MB/s eta 0:00:01  |█████████████████▏ | 40 kB 5.9 MB/s eta 0:00:01  |█████████████████████▍ | 51 kB 5.1 MB/s eta 0:00:01  |█████████████████████████▊ | 61 kB 5.2 MB/s eta 0:00:01  |██████████████████████████████ | 71 kB 5.3 MB/s eta 0:00:01  |████████████████████████████████| 76 kB 2.3 MB/s [?25hRequirement already satisfied: packaging in /usr/local/lib/python3.7/dist-packages (from davos) (21.3) Requirement already satisfied: setuptools in /usr/local/lib/python3.7/dist-packages (from davos) (57.4.0) Requirement already satisfied: pyparsing!=3.0.5,>=2.0.2 in /usr/local/lib/python3.7/dist-packages (from packaging->davos) (3.0.7) Installing collected packages: davos Successfully installed davos-0.1.0 ###Markdown OverviewThis notebook provides an overview of some different types of plots you can make in Python. We'll be using several sample datasets that come with the scikit-learn Python toolbox: - `california`: California house prices dataset - `digits`: images of hand-drawn numerals - `wine`: wine characteristicsWe'll explore these datasets (and some others, introduced later) in a few different ways to illustrate different ways of manipulating and visualizing the data. Import libraries ###Code #visualization tools smuggle matplotlib.pyplot as plt #matplotlib is a basic plotting library smuggle seaborn as sns #seaborn is a library that uses matplotlib to make styled plots from plotly smuggle express as px #plotly express is a library for drawing interactive figures smuggle hypertools as hyp #hypertools is a library for visualizing "high-dimensional" data #machine learning library from sklearn smuggle svm, metrics #sklearn is a python machine learning library # - svm is a pattern classification tool # - metrics is a tool for evaluating the performance # of machine learning algorithms from sklearn.datasets smuggle fetch_openml # function for downloading open data from sklearn smuggle datasets # another data source #data wrangling tools smuggle pandas as pd smuggle numpy as np ###Output Collecting hypertools Downloading hypertools-0.7.0-py3-none-any.whl (59 kB)  |████████████████████████████████| 59 kB 3.2 MB/s [?25hRequirement already satisfied: future in /usr/local/lib/python3.7/dist-packages (from hypertools) (0.16.0) Collecting PPCA>=0.0.2 Downloading ppca-0.0.4-py3-none-any.whl (6.7 kB) Requirement already satisfied: seaborn>=0.8.1 in /usr/local/lib/python3.7/dist-packages (from hypertools) (0.11.2) Requirement already satisfied: matplotlib>=1.5.1 in /usr/local/lib/python3.7/dist-packages (from hypertools) (3.2.2) Collecting deepdish Downloading deepdish-0.3.7-py2.py3-none-any.whl (37 kB) Requirement already satisfied: pandas>=0.18.0 in /usr/local/lib/python3.7/dist-packages (from hypertools) (1.3.5) Requirement already satisfied: numpy>=1.10.4 in /usr/local/lib/python3.7/dist-packages (from hypertools) (1.19.5) Requirement already satisfied: scipy>=1.0.0 in /usr/local/lib/python3.7/dist-packages (from hypertools) (1.4.1) Collecting umap-learn>=0.4.6 Downloading umap-learn-0.5.2.tar.gz (86 kB)  |████████████████████████████████| 86 kB 4.4 MB/s [?25hRequirement already satisfied: requests in /usr/local/lib/python3.7/dist-packages (from hypertools) (2.23.0) Requirement already satisfied: six in /usr/local/lib/python3.7/dist-packages (from hypertools) (1.15.0) Collecting scikit-learn!=0.22,<0.24,>=0.19.1 Downloading scikit_learn-0.23.2-cp37-cp37m-manylinux1_x86_64.whl (6.8 MB)  |████████████████████████████████| 6.8 MB 47.0 MB/s [?25hRequirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib>=1.5.1->hypertools) (3.0.7) Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib>=1.5.1->hypertools) (1.3.2) Requirement already satisfied: python-dateutil>=2.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib>=1.5.1->hypertools) (2.8.2) Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.7/dist-packages (from matplotlib>=1.5.1->hypertools) (0.11.0) Requirement already satisfied: pytz>=2017.3 in /usr/local/lib/python3.7/dist-packages (from pandas>=0.18.0->hypertools) (2018.9) Requirement already satisfied: joblib>=0.11 in /usr/local/lib/python3.7/dist-packages (from scikit-learn!=0.22,<0.24,>=0.19.1->hypertools) (1.1.0) Requirement already satisfied: threadpoolctl>=2.0.0 in /usr/local/lib/python3.7/dist-packages (from scikit-learn!=0.22,<0.24,>=0.19.1->hypertools) (3.1.0) Requirement already satisfied: numba>=0.49 in /usr/local/lib/python3.7/dist-packages (from umap-learn>=0.4.6->hypertools) (0.51.2) Collecting pynndescent>=0.5 Downloading pynndescent-0.5.6.tar.gz (1.1 MB)  |████████████████████████████████| 1.1 MB 44.1 MB/s [?25hRequirement already satisfied: tqdm in /usr/local/lib/python3.7/dist-packages (from umap-learn>=0.4.6->hypertools) (4.62.3) Requirement already satisfied: llvmlite<0.35,>=0.34.0.dev0 in /usr/local/lib/python3.7/dist-packages (from numba>=0.49->umap-learn>=0.4.6->hypertools) (0.34.0) Requirement already satisfied: setuptools in /usr/local/lib/python3.7/dist-packages (from numba>=0.49->umap-learn>=0.4.6->hypertools) (57.4.0) Requirement already satisfied: tables in /usr/local/lib/python3.7/dist-packages (from deepdish->hypertools) (3.4.4) Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests->hypertools) (1.24.3) Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests->hypertools) (2021.10.8) Requirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests->hypertools) (2.10) Requirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests->hypertools) (3.0.4) Requirement already satisfied: numexpr>=2.5.2 in /usr/local/lib/python3.7/dist-packages (from tables->deepdish->hypertools) (2.8.1) Requirement already satisfied: packaging in /usr/local/lib/python3.7/dist-packages (from numexpr>=2.5.2->tables->deepdish->hypertools) (21.3) Building wheels for collected packages: umap-learn, pynndescent Building wheel for umap-learn (setup.py) ... [?25l[?25hdone Created wheel for umap-learn: filename=umap_learn-0.5.2-py3-none-any.whl size=82708 sha256=e20dddd00dfea2586aa681bf1d2299264d55d59f0c94fc349b4b91aeced43fb4 Stored in directory: /root/.cache/pip/wheels/84/1b/c6/aaf68a748122632967cef4dffef68224eb16798b6793257d82 Building wheel for pynndescent (setup.py) ... [?25l[?25hdone Created wheel for pynndescent: filename=pynndescent-0.5.6-py3-none-any.whl size=53943 sha256=30976985df7adf209153e39c9849226ceb4e58ce72a73dfefaa7aae4a5d0ba8c Stored in directory: /root/.cache/pip/wheels/03/f1/56/f80d72741e400345b5a5b50ec3d929aca581bf45e0225d5c50 Successfully built umap-learn pynndescent Installing collected packages: scikit-learn, pynndescent, umap-learn, PPCA, deepdish, hypertools Attempting uninstall: scikit-learn Found existing installation: scikit-learn 1.0.2 Uninstalling scikit-learn-1.0.2: Successfully uninstalled scikit-learn-1.0.2 ERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts. imbalanced-learn 0.8.1 requires scikit-learn>=0.24, but you have scikit-learn 0.23.2 which is incompatible. Successfully installed PPCA-0.0.4 deepdish-0.3.7 hypertools-0.7.0 pynndescent-0.5.6 scikit-learn-0.23.2 umap-learn-0.5.2 ###Markdown Load in datasets ###Code #load the datasets california = fetch_openml(name='house_prices', as_frame=True) digits = datasets.load_digits() wine = datasets.load_wine() #convert the datasets into Pandas DataFrames california['data']['SalePrice'] = california['target'] california = california['data'].set_index('Id') digits = pd.DataFrame(digits['data'], index=digits['target']) wine = pd.DataFrame(wine['data'], columns=wine['feature_names'], index=wine['target']) california.head() digits.head() wine.head() ###Output _____no_output_____ ###Markdown HypertoolsHyperTools is a nice tool for starting to explore a dataset. Plotting a dataset with HyperTools entails projecting the data onto a 3-dimensional space, where similarly valued datapoints are shown close together. Documentation for Hypertools may be found [here](https://hypertools.readthedocs.io/en/latest/). ###Code #Digits dataset hyp.plot(digits, 'o', hue=np.array(digits.index)); #each digit is a drawn digit, colored by the number it refers to ###Output /usr/local/lib/python3.7/dist-packages/hypertools/plot/plot.py:509: VisibleDeprecationWarning: Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray kwargs[kwarg]=np.array(kwargs[kwarg]) ###Markdown Visualizing some raw dataThe `digits` dataset contains 8x8 drawings of different digits. The hypertools plot above suggests that drawings of different digits are "clustered" in different ways. Let's visualize a few example digits using seaborn heatmaps ###Code #plot the first 10 example digits from the dataset for i in range(10): print(f'Digit identity: {digits.index[i]}') sns.heatmap(np.reshape(digits.iloc[i].values, [8, 8]), cmap='gray') plt.show() ###Output Digit identity: 0 ###Markdown Scatterplots with plotly express ###Code px.scatter(california, x='YearBuilt', y='SalePrice') px.scatter_3d(wine, x='alcohol', y='magnesium', z='ash', color='hue', size='color_intensity') ###Output _____no_output_____ ###Markdown Simple line plot with matplotlib ###Code plt.plot(np.sort(california['YearBuilt']), 'k-'); plt.xlabel('House age rank'); plt.ylabel('Year built'); ###Output _____no_output_____ ###Markdown Grouped bar plots with seabornWe'll use a new example dataset that works well for this example: - `titanic`: list of Titanic passengers and their attributes ###Code titanic = sns.load_dataset("titanic") titanic.head() #source: https://seaborn.pydata.org/examples/grouped_barplot.html g = sns.catplot(x="class", y="survived", hue="sex", data=titanic, height=6, kind="bar", palette="muted") g.despine(left=True) g.set_ylabels("survival probability") ###Output _____no_output_____ ###Markdown Many other example plots using plotly expressWe'll use a new dataset for these examples: - `gapminder`: a dataset of life expectancy information by year and location ###Code #source: https://nbviewer.jupyter.org/github/plotly/plotly_express/blob/master/walkthrough.ipynb gapminder = px.data.gapminder() gapminder2007 = gapminder.query("year == 2007") gapminder2007.head() px.scatter(gapminder2007, x="gdpPercap", y="lifeExp", color="continent", size="pop", size_max=60, hover_name="country") px.scatter(gapminder2007, x="gdpPercap", y="lifeExp", color="continent", size="pop", size_max=60, hover_name="country", facet_col="continent", log_x=True) #neat animation px.scatter(gapminder, x="gdpPercap", y="lifeExp",size="pop", size_max=60, color="continent", hover_name="country", animation_frame="year", animation_group="country", log_x=True, range_x=[100,100000], range_y=[25,90], labels=dict(pop="Population", gdpPercap="GDP per Capita", lifeExp="Life Expectancy")) px.choropleth(gapminder, locations="iso_alpha", color="lifeExp", hover_name="country", animation_frame="year", color_continuous_scale=px.colors.sequential.Plasma, projection="natural earth") ###Output _____no_output_____
Python/AbsoluteAndOtherAlgorithms/3Activity/InfFS_50.ipynb
###Markdown 1. Import libraries ###Code #----------------------------Reproducible---------------------------------------------------------------------------------------- import numpy as np import random as rn import os seed=0 os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) rn.seed(seed) #----------------------------Reproducible---------------------------------------------------------------------------------------- os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' import pandas as pd import scipy.sparse as sparse import scipy.io from sklearn.linear_model import LinearRegression import time from sklearn.model_selection import cross_val_score from sklearn.model_selection import ShuffleSplit from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score from sklearn.preprocessing import MinMaxScaler #-------------------------------------------------------------------------------------------------------------------------------- #Import ourslef defined methods import sys sys.path.append(r"../Defined") import Functions as F ###Output Using TensorFlow backend. ###Markdown 2. Loading data ###Code train_data_arr=np.array(pd.read_csv('../Dataset/final_X_train.txt',header=None)) test_data_arr=np.array(pd.read_csv('../Dataset/final_X_test.txt',header=None)) train_label_arr=(np.array(pd.read_csv('../Dataset/final_y_train.txt',header=None))-1) test_label_arr=(np.array(pd.read_csv('../Dataset/final_y_test.txt',header=None))-1) data_arr=np.r_[train_data_arr,test_data_arr] label_arr=np.r_[train_label_arr,test_label_arr] label_arr_onehot=label_arr#to_categorical(label_arr) print(data_arr.shape) print(label_arr_onehot.shape) data_arr=MinMaxScaler(feature_range=(0,1)).fit_transform(data_arr) key_feture_number=50 ###Output _____no_output_____ ###Markdown 3. Calculation ###Code #-------------------------------------------------------------------------------------------------------------------------------- def IsnanAndIsinf(p_data): p_data=np.array(p_data) for i in np.arange(p_data.shape[0]): for j in np.arange(p_data.shape[1]): if np.isnan(p_data[i,j]) or np.isinf(p_data[i,j]): p_data[i,j]=0 return p_data #-------------------------------------------------------------------------------------------------------------------------------- def write_to_csv(p_data,p_path): dataframe = pd.DataFrame(p_data) dataframe.to_csv(p_path, mode='a',header=False,index=False,sep=',') del dataframe #-------------------------------------------------------------------------------------------------------------------------------- def mse_check(train, test): LR = LinearRegression(n_jobs = -1) LR.fit(train[0], train[1]) MSELR = ((LR.predict(test[0]) - test[1]) ** 2).mean() return MSELR #-------------------------------------------------------------------------------------------------------------------------------- def InfFS(p_data_arr,p_alpha,use_specify_number=False,specify_number=50): df = pd.DataFrame(p_data_arr) corr_ij_spearman__=df.corr(method ='spearman') corr_ij_spearman_=IsnanAndIsinf(corr_ij_spearman__) corr_ij_spearman=1-np.abs(corr_ij_spearman_) STD=np.std(p_data_arr,axis=0) STDMatrix_=np.zeros((STD.shape[0],STD.shape[0])) for i in np.arange(STD.shape[0]): for j in np.arange(STD.shape[0]): STDMatrix_[i,j]=max(STD[i],STD[j]) STDMatrix_min=STDMatrix_-np.min(STDMatrix_) STDMatrix_max=np.max(STDMatrix_min) STDMatrix__=STDMatrix_min/STDMatrix_max STDMatrix=IsnanAndIsinf(STDMatrix__) N=p_data_arr.shape[1] eps = (5e-06) * N; factor = 1 - eps A = ( p_alpha*STDMatrix + (1-p_alpha)*corr_ij_spearman ) rho = np.max(np.sum(A,axis=1)) A = A / (rho+eps) I = np.eye(A.shape[0]) r = factor/rho y = I - ( r * A ) S=np.linalg.inv(y) WEIGHT = np.sum( S , axis=1 ) RANKED=np.argsort(-WEIGHT) RANKED = RANKED WEIGHT = WEIGHT e = np.ones(N) t = np.dot(S, e) nbins = 0.5*N cnts, bins = np.histogram(t, bins=int(nbins)) thr =np.mean(cnts) size_sub = np.sum(cnts>thr) if use_specify_number: size_sub=specify_number SUBSET = RANKED[0:size_sub] return SUBSET #-------------------------------------------------------------------------------------------------------------------------------- def cal(p_data_arr,\ p_label_arr_onehot,\ p_key_feture_number,\ p_seed): C_train_x,C_test_x,C_train_y,C_test_y= train_test_split(p_data_arr,p_label_arr_onehot,test_size=0.2,random_state=p_seed) os.environ['PYTHONHASHSEED'] = str(p_seed) np.random.seed(p_seed) rn.seed(p_seed) #-------------------------------------------------------------------------------------------------------------------------------- train_feature=C_train_x test_feature=C_test_x t_start = time.time() train_idx=InfFS(train_feature,p_alpha,use_specify_number=True,specify_number=p_key_feture_number) t_used=time.time() - t_start C_train_selected_x = train_feature[:, train_idx] test_idx=InfFS(test_feature,p_alpha,use_specify_number=True,specify_number=p_key_feture_number) C_test_selected_x = test_feature[:, test_idx] # Classification on original features train_feature=C_train_x train_label=C_train_y test_feature=C_test_x test_label=C_test_y orig_train_acc,orig_test_acc=F.ETree(train_feature,train_label,test_feature,test_label,0) # Classification on selected features train_feature=C_train_selected_x train_label=C_train_y test_feature=C_test_selected_x test_label=C_test_y selec_train_acc,selec_test_acc=F.ETree(train_feature,train_label,test_feature,test_label,0) # Linear reconstruction train_feature_tuple=(C_train_selected_x,C_train_x) test_feature_tuple=(C_test_selected_x,C_test_x) reconstruction_loss=mse_check(train_feature_tuple, test_feature_tuple) results=np.array([orig_train_acc,orig_test_acc,selec_train_acc,selec_test_acc,reconstruction_loss]) print(results) return orig_train_acc,orig_test_acc,selec_train_acc,selec_test_acc,reconstruction_loss p_data_arr=data_arr p_alpha=0.5 p_label_arr_onehot=label_arr_onehot p_key_feture_number=key_feture_number p_seed=0 orig_train_acc,orig_test_acc,selec_train_acc,selec_test_acc,reconstruction_loss=cal(p_data_arr,\ p_label_arr_onehot,\ p_key_feture_number,\ p_seed) ###Output ../Defined/Functions.py:196: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel(). clf.fit(p_train_feature, p_train_label)
TimeSeries/StackedLSTMForecastModel.ipynb
###Markdown Stacked LSTM Forescating modelhttps://machinelearningmastery.com/how-to-develop-lstm-models-for-time-series-forecasting/ ###Code from numpy import array from tensorflow.keras.models import Sequential from tensorflow.keras.layers import LSTM, Dense # Split an univariate sequence into samples def split_sequence(sequence, n_steps): X, y = list(), list() for i in range(len(sequence)): # Find the end of the pattern end_ix = i + n_steps # Check if we are bound by sequence if end_ix > len(sequence) - 1: break # Gather input and output parts of the pattern seq_x, seq_y = sequence[i:end_ix], sequence[end_ix] X.append(seq_x) y.append(seq_y) return array(X), array(y) # Define input sequence raw_seq = [10, 20, 30, 40, 50, 60, 70, 80, 90] # Choose a number of time steps n_steps = 3 # Split into samples X, y = split_sequence(raw_seq, n_steps) # Reshape from [samples, timesteps] into [samples, timesteps, features] n_features = 1 X = X.reshape((X.shape[0], X.shape[1], n_features)) # Define model model = Sequential() model.add(LSTM(50, activation='relu', return_sequences=True, input_shape=(n_steps, n_features))) model.add(LSTM(50, activation='relu')) model.add(Dense(1)) model.compile(optimizer='adam', loss='mse') # Fit model model.fit(X, y, epochs=200) # Demonstrate prediction x_input = array([70, 80, 90]) x_input = x_input.reshape((1, n_steps, n_features)) yhat = model.predict(x_input) yhat ###Output _____no_output_____
Big-Data-Clusters/CU9/public/content/repair/tsg126-accept-license-terms.ipynb
###Markdown TSG126 - azdata fails with ‘accept the license terms to use this product’ DescriptionWhen running an `azdata` command, it fails with: ERROR: Please accept the license terms to use this product through the prompt in an interactive env, or through the environment variable ACCEPT_EULA set to 'yes' in a non-interactive env. Common functionsDefine helper functions used in this notebook. ###Code # Define `run` function for transient fault handling, hyperlinked suggestions, and scrolling updates on Windows import sys import os import re import platform import shlex import shutil import datetime from subprocess import Popen, PIPE from IPython.display import Markdown retry_hints = {} # Output in stderr known to be transient, therefore automatically retry error_hints = {} # Output in stderr where a known SOP/TSG exists which will be HINTed for further help install_hint = {} # The SOP to help install the executable if it cannot be found def run(cmd, return_output=False, no_output=False, retry_count=0, base64_decode=False, return_as_json=False): """Run shell command, stream stdout, print stderr and optionally return output NOTES: 1. Commands that need this kind of ' quoting on Windows e.g.: kubectl get nodes -o jsonpath={.items[?(@.metadata.annotations.pv-candidate=='data-pool')].metadata.name} Need to actually pass in as '"': kubectl get nodes -o jsonpath={.items[?(@.metadata.annotations.pv-candidate=='"'data-pool'"')].metadata.name} The ' quote approach, although correct when pasting into Windows cmd, will hang at the line: `iter(p.stdout.readline, b'')` The shlex.split call does the right thing for each platform, just use the '"' pattern for a ' """ MAX_RETRIES = 5 output = "" retry = False # When running `azdata sql query` on Windows, replace any \n in """ strings, with " ", otherwise we see: # # ('HY090', '[HY090] [Microsoft][ODBC Driver Manager] Invalid string or buffer length (0) (SQLExecDirectW)') # if platform.system() == "Windows" and cmd.startswith("azdata sql query"): cmd = cmd.replace("\n", " ") # shlex.split is required on bash and for Windows paths with spaces # cmd_actual = shlex.split(cmd) # Store this (i.e. kubectl, python etc.) to support binary context aware error_hints and retries # user_provided_exe_name = cmd_actual[0].lower() # When running python, use the python in the ADS sandbox ({sys.executable}) # if cmd.startswith("python "): cmd_actual[0] = cmd_actual[0].replace("python", sys.executable) # On Mac, when ADS is not launched from terminal, LC_ALL may not be set, which causes pip installs to fail # with: # # UnicodeDecodeError: 'ascii' codec can't decode byte 0xc5 in position 4969: ordinal not in range(128) # # Setting it to a default value of "en_US.UTF-8" enables pip install to complete # if platform.system() == "Darwin" and "LC_ALL" not in os.environ: os.environ["LC_ALL"] = "en_US.UTF-8" # When running `kubectl`, if AZDATA_OPENSHIFT is set, use `oc` # if cmd.startswith("kubectl ") and "AZDATA_OPENSHIFT" in os.environ: cmd_actual[0] = cmd_actual[0].replace("kubectl", "oc") # To aid supportability, determine which binary file will actually be executed on the machine # which_binary = None # Special case for CURL on Windows. The version of CURL in Windows System32 does not work to # get JWT tokens, it returns "(56) Failure when receiving data from the peer". If another instance # of CURL exists on the machine use that one. (Unfortunately the curl.exe in System32 is almost # always the first curl.exe in the path, and it can't be uninstalled from System32, so here we # look for the 2nd installation of CURL in the path) if platform.system() == "Windows" and cmd.startswith("curl "): path = os.getenv('PATH') for p in path.split(os.path.pathsep): p = os.path.join(p, "curl.exe") if os.path.exists(p) and os.access(p, os.X_OK): if p.lower().find("system32") == -1: cmd_actual[0] = p which_binary = p break # Find the path based location (shutil.which) of the executable that will be run (and display it to aid supportability), this # seems to be required for .msi installs of azdata.cmd/az.cmd. (otherwise Popen returns FileNotFound) # # NOTE: Bash needs cmd to be the list of the space separated values hence shlex.split. # if which_binary == None: which_binary = shutil.which(cmd_actual[0]) # Display an install HINT, so the user can click on a SOP to install the missing binary # if which_binary == None: print(f"The path used to search for '{cmd_actual[0]}' was:") print(sys.path) if user_provided_exe_name in install_hint and install_hint[user_provided_exe_name] is not None: display(Markdown(f'HINT: Use [{install_hint[user_provided_exe_name][0]}]({install_hint[user_provided_exe_name][1]}) to resolve this issue.')) raise FileNotFoundError(f"Executable '{cmd_actual[0]}' not found in path (where/which)") else: cmd_actual[0] = which_binary start_time = datetime.datetime.now().replace(microsecond=0) print(f"START: {cmd} @ {start_time} ({datetime.datetime.utcnow().replace(microsecond=0)} UTC)") print(f" using: {which_binary} ({platform.system()} {platform.release()} on {platform.machine()})") print(f" cwd: {os.getcwd()}") # Command-line tools such as CURL and AZDATA HDFS commands output # scrolling progress bars, which causes Jupyter to hang forever, to # workaround this, use no_output=True # # Work around a infinite hang when a notebook generates a non-zero return code, break out, and do not wait # wait = True try: if no_output: p = Popen(cmd_actual) else: p = Popen(cmd_actual, stdout=PIPE, stderr=PIPE, bufsize=1) with p.stdout: for line in iter(p.stdout.readline, b''): line = line.decode() if return_output: output = output + line else: if cmd.startswith("azdata notebook run"): # Hyperlink the .ipynb file regex = re.compile(' "(.*)"\: "(.*)"') match = regex.match(line) if match: if match.group(1).find("HTML") != -1: display(Markdown(f' - "{match.group(1)}": "{match.group(2)}"')) else: display(Markdown(f' - "{match.group(1)}": "[{match.group(2)}]({match.group(2)})"')) wait = False break # otherwise infinite hang, have not worked out why yet. else: print(line, end='') if wait: p.wait() except FileNotFoundError as e: if install_hint is not None: display(Markdown(f'HINT: Use {install_hint} to resolve this issue.')) raise FileNotFoundError(f"Executable '{cmd_actual[0]}' not found in path (where/which)") from e exit_code_workaround = 0 # WORKAROUND: azdata hangs on exception from notebook on p.wait() if not no_output: for line in iter(p.stderr.readline, b''): try: line_decoded = line.decode() except UnicodeDecodeError: # NOTE: Sometimes we get characters back that cannot be decoded(), e.g. # # \xa0 # # For example see this in the response from `az group create`: # # ERROR: Get Token request returned http error: 400 and server # response: {"error":"invalid_grant",# "error_description":"AADSTS700082: # The refresh token has expired due to inactivity.\xa0The token was # issued on 2018-10-25T23:35:11.9832872Z # # which generates the exception: # # UnicodeDecodeError: 'utf-8' codec can't decode byte 0xa0 in position 179: invalid start byte # print("WARNING: Unable to decode stderr line, printing raw bytes:") print(line) line_decoded = "" pass else: # azdata emits a single empty line to stderr when doing an hdfs cp, don't # print this empty "ERR:" as it confuses. # if line_decoded == "": continue print(f"STDERR: {line_decoded}", end='') if line_decoded.startswith("An exception has occurred") or line_decoded.startswith("ERROR: An error occurred while executing the following cell"): exit_code_workaround = 1 # inject HINTs to next TSG/SOP based on output in stderr # if user_provided_exe_name in error_hints: for error_hint in error_hints[user_provided_exe_name]: if line_decoded.find(error_hint[0]) != -1: display(Markdown(f'HINT: Use [{error_hint[1]}]({error_hint[2]}) to resolve this issue.')) # Verify if a transient error, if so automatically retry (recursive) # if user_provided_exe_name in retry_hints: for retry_hint in retry_hints[user_provided_exe_name]: if line_decoded.find(retry_hint) != -1: if retry_count < MAX_RETRIES: print(f"RETRY: {retry_count} (due to: {retry_hint})") retry_count = retry_count + 1 output = run(cmd, return_output=return_output, retry_count=retry_count) if return_output: if base64_decode: import base64 return base64.b64decode(output).decode('utf-8') else: return output elapsed = datetime.datetime.now().replace(microsecond=0) - start_time # WORKAROUND: We avoid infinite hang above in the `azdata notebook run` failure case, by inferring success (from stdout output), so # don't wait here, if success known above # if wait: if p.returncode != 0: raise SystemExit(f'Shell command:\n\n\t{cmd} ({elapsed}s elapsed)\n\nreturned non-zero exit code: {str(p.returncode)}.\n') else: if exit_code_workaround !=0 : raise SystemExit(f'Shell command:\n\n\t{cmd} ({elapsed}s elapsed)\n\nreturned non-zero exit code: {str(exit_code_workaround)}.\n') print(f'\nSUCCESS: {elapsed}s elapsed.\n') if return_output: if base64_decode: import base64 return base64.b64decode(output).decode('utf-8') else: return output # Hints for tool retry (on transient fault), known errors and install guide # retry_hints = {'azdata': ['Endpoint sql-server-master does not exist', 'Endpoint livy does not exist', 'Failed to get state for cluster', 'Endpoint webhdfs does not exist', 'Adaptive Server is unavailable or does not exist', 'Error: Address already in use', 'Login timeout expired (0) (SQLDriverConnect)', 'SSPI Provider: No Kerberos credentials available', ], 'kubectl': ['A connection attempt failed because the connected party did not properly respond after a period of time, or established connection failed because connected host has failed to respond', ], 'python': [ ], } error_hints = {'azdata': [['Please run \'azdata login\' to first authenticate', 'SOP028 - azdata login', '../common/sop028-azdata-login.ipynb'], ['The token is expired', 'SOP028 - azdata login', '../common/sop028-azdata-login.ipynb'], ['Reason: Unauthorized', 'SOP028 - azdata login', '../common/sop028-azdata-login.ipynb'], ['Max retries exceeded with url: /api/v1/bdc/endpoints', 'SOP028 - azdata login', '../common/sop028-azdata-login.ipynb'], ['Look at the controller logs for more details', 'TSG027 - Observe cluster deployment', '../diagnose/tsg027-observe-bdc-create.ipynb'], ['provided port is already allocated', 'TSG062 - Get tail of all previous container logs for pods in BDC namespace', '../log-files/tsg062-tail-bdc-previous-container-logs.ipynb'], ['Create cluster failed since the existing namespace', 'SOP061 - Delete a big data cluster', '../install/sop061-delete-bdc.ipynb'], ['Failed to complete kube config setup', 'TSG067 - Failed to complete kube config setup', '../repair/tsg067-failed-to-complete-kube-config-setup.ipynb'], ['Data source name not found and no default driver specified', 'SOP069 - Install ODBC for SQL Server', '../install/sop069-install-odbc-driver-for-sql-server.ipynb'], ['Can\'t open lib \'ODBC Driver 17 for SQL Server', 'SOP069 - Install ODBC for SQL Server', '../install/sop069-install-odbc-driver-for-sql-server.ipynb'], ['Control plane upgrade failed. Failed to upgrade controller.', 'TSG108 - View the controller upgrade config map', '../diagnose/tsg108-controller-failed-to-upgrade.ipynb'], ['NameError: name \'azdata_login_secret_name\' is not defined', 'SOP013 - Create secret for azdata login (inside cluster)', '../common/sop013-create-secret-for-azdata-login.ipynb'], ['ERROR: No credentials were supplied, or the credentials were unavailable or inaccessible.', 'TSG124 - \'No credentials were supplied\' error from azdata login', '../repair/tsg124-no-credentials-were-supplied.ipynb'], ['Please accept the license terms to use this product through', 'TSG126 - azdata fails with \'accept the license terms to use this product\'', '../repair/tsg126-accept-license-terms.ipynb'], ], 'kubectl': [['no such host', 'TSG010 - Get configuration contexts', '../monitor-k8s/tsg010-get-kubernetes-contexts.ipynb'], ['No connection could be made because the target machine actively refused it', 'TSG056 - Kubectl fails with No connection could be made because the target machine actively refused it', '../repair/tsg056-kubectl-no-connection-could-be-made.ipynb'], ], 'python': [['Library not loaded: /usr/local/opt/unixodbc', 'SOP012 - Install unixodbc for Mac', '../install/sop012-brew-install-odbc-for-sql-server.ipynb'], ['WARNING: You are using pip version', 'SOP040 - Upgrade pip in ADS Python sandbox', '../install/sop040-upgrade-pip.ipynb'], ], } install_hint = {'azdata': [ 'SOP063 - Install azdata CLI (using package manager)', '../install/sop063-packman-install-azdata.ipynb' ], 'kubectl': [ 'SOP036 - Install kubectl command line interface', '../install/sop036-install-kubectl.ipynb' ], } print('Common functions defined successfully.') ###Output _____no_output_____ ###Markdown StepsRun `azdata` once with the ACCEPT_EULA command, e.g: ###Code os.environ["ACCEPT_EULA"] run('azdata bdc config list') print("Notebook execution is complete.") ###Output _____no_output_____
statrethink_stan_w08.ipynb
###Markdown Rethinking Statistics course in Stan - Week 8 Lecture 15: Multilevel Models- [Video](https://www.youtube.com/watch?v=AALYPv5xSos)- [Slides](https://speakerdeck.com/rmcelreath/l15-statistical-rethinking-winter-2019)Lecture 16: Multilevel Models 2- [Video](https://www.youtube.com/watch?v=ZG3Oe35R5sY)- [Slides](https://speakerdeck.com/rmcelreath/l16-statistical-rethinking-winter-2019)[Proposed problems](https://github.com/gbosquechacon/statrethinking_winter2019/blob/master/homework/week08.pdf) and [solutions in R](https://github.com/gbosquechacon/statrethinking_winter2019/blob/master/homework/week08_solutions.pdf) for the exercises of the week. ###Code import pandas as pd import numpy as np from cmdstanpy import CmdStanModel from plotnine import * %load_ext watermark %watermark -n -u -p pandas,numpy,cmdstanpy,plotnine def waic(stan_fit): log_lik = stan_fit.stan_variable('log_lik') lik_mean = np.ma.masked_invalid(np.exp(log_lik).mean(axis=0)) lppd = np.log(lik_mean).sum() p_waic = np.var(log_lik, axis=0).sum() waic = -2*lppd + 2*p_waic return round(waic, 3) ###Output _____no_output_____ ###Markdown Exercise 1 > Revisit the Reed frog survival data, `reedfrogs`, and add the predation and size treatment variables to the varying intercepts model. Consider models with either predictor alone, both predictors, as well as a model including their interaction. What do you infer about the causal influence of these predictor variables? Also focus on the inferred variation across tanks (the σ across tanks). Explain why it changes as it does across models with different predictors included. Let's get the data. ###Code d = pd.read_csv('./dat/reedfrogs.csv', header=0, sep=';') d['tank'] = d.index d['pred'] = pd.factorize(d['pred'])[0] d['volume'] = d['size'].replace({'small':0, 'big':1}) d.tail(3) ###Output _____no_output_____ ###Markdown Now to define a series of models. The first is just the varying intercepts model from the text. Model 1: ###Code model = ''' data { int n; int tank[n]; int density[n]; int surv[n]; } parameters { real a_bar; real sigma; vector[n] a; } model { // prior a_bar ~ normal(0, 1.5); sigma ~ exponential(1); a ~ normal(a_bar, sigma); // likelihood surv ~ binomial_logit(density, a); } generated quantities { vector[n] log_lik; for (i in 1:n) { log_lik[i] = binomial_logit_lpmf(surv[i] | density[i], a[i]); } } ''' stan_file = './stn/week08_01a.stan' with open(stan_file, 'w') as f: print(model, file=f) model_1a = CmdStanModel(stan_file=stan_file) model_1a.compile() data = d[['tank', 'density', 'surv']].copy() data = data.to_dict(orient='list') data['n'] = len(data['surv']) fit_1a = model_1a.sample(data=data, chains=4) ###Output INFO:cmdstanpy:compiling stan file /home/jovyan/work/statret/stn/week08_01a.stan to exe file /home/jovyan/work/statret/stn/week08_01a INFO:cmdstanpy:compiled model executable: /home/jovyan/work/statret/stn/week08_01a INFO:cmdstanpy:found newer exe file, not recompiling INFO:cmdstanpy:CmdStan start procesing ###Markdown Model 2 (pred): ###Code model = ''' data { int n; int tank[n]; int density[n]; int pred[n]; int surv[n]; } parameters { real a_bar; real sigma; real bp; vector[n] a; } transformed parameters { vector[n] alpha; for (i in 1:n) {alpha[i] = a[i] + bp*pred[i];} } model { // prior a_bar ~ normal(0, 1.5); sigma ~ exponential(1); a ~ normal(a_bar, sigma); bp ~ normal(-0.5, 1); // likelihood for (i in 1:n) {surv[i] ~ binomial_logit(density[i], alpha[i]);} } generated quantities { vector[n] log_lik; for (i in 1:n) { log_lik[i] = binomial_logit_lpmf(surv[i] | density[i], alpha[i]); } } ''' stan_file = './stn/week08_01b.stan' with open(stan_file, 'w') as f: print(model, file=f) model_1b = CmdStanModel(stan_file=stan_file) model_1b.compile() data = d[['tank', 'density', 'pred', 'surv']].copy() data = data.to_dict(orient='list') data['n'] = len(data['surv']) fit_1b = model_1b.sample(data=data, chains=4) ###Output INFO:cmdstanpy:compiling stan file /home/jovyan/work/statret/stn/week08_01b.stan to exe file /home/jovyan/work/statret/stn/week08_01b INFO:cmdstanpy:compiled model executable: /home/jovyan/work/statret/stn/week08_01b INFO:cmdstanpy:found newer exe file, not recompiling INFO:cmdstanpy:CmdStan start procesing ###Markdown Model 3 (size): ###Code model = ''' data { int k; int n; int tank[n]; int density[n]; int volume[n]; int surv[n]; } parameters { real a_bar; real sigma; vector[k] s; vector[n] a; } transformed parameters { vector[n] alpha; for (i in 1:n) {alpha[i] = a[i] + s[volume[i]];} } model { // prior a_bar ~ normal(0, 1.5); sigma ~ exponential(1); a ~ normal(a_bar, sigma); s ~ normal(0, 0.5); // likelihood for (i in 1:n) {surv[i] ~ binomial_logit(density[i], alpha[i]);} } generated quantities { vector[n] log_lik; for (i in 1:n) { log_lik[i] = binomial_logit_lpmf(surv[i] | density[i], alpha[i]); } } ''' stan_file = './stn/week08_01c.stan' with open(stan_file, 'w') as f: print(model, file=f) model_1c = CmdStanModel(stan_file=stan_file) model_1c.compile() data = d[['tank', 'density', 'volume', 'surv']].copy() data.volume = data.volume + 1 data = data.to_dict(orient='list') data['n'] = d.surv.size data['k'] = d.volume.nunique() fit_1c = model_1c.sample(data=data, chains=4) ###Output INFO:cmdstanpy:compiling stan file /home/jovyan/work/statret/stn/week08_01c.stan to exe file /home/jovyan/work/statret/stn/week08_01c INFO:cmdstanpy:compiled model executable: /home/jovyan/work/statret/stn/week08_01c INFO:cmdstanpy:found newer exe file, not recompiling INFO:cmdstanpy:CmdStan start procesing ###Markdown Model 4 (pred + size): ###Code model = ''' data { int n; int k; int tank[n]; int density[n]; int volume[n]; int pred[n]; int surv[n]; } parameters { real a_bar; real sigma; real bp; vector[k] s; vector[n] a; } transformed parameters { vector[n] alpha; for (i in 1:n) {alpha[i] = a[i] + bp*pred[i] + s[volume[i]];} } model { // prior a_bar ~ normal(0, 1.5); sigma ~ exponential(1); a ~ normal(a_bar, sigma); s ~ normal(0, 0.5); // likelihood for (i in 1:n) {surv[i] ~ binomial_logit(density[i], alpha[i]);} } generated quantities { vector[n] log_lik; for (i in 1:n) { log_lik[i] = binomial_logit_lpmf(surv[i] | density[i], alpha[i]); } } ''' stan_file = './stn/week08_01d.stan' with open(stan_file, 'w') as f: print(model, file=f) model_1d = CmdStanModel(stan_file=stan_file) model_1d.compile() data = d[['tank', 'density', 'volume', 'pred', 'surv']].copy() data.volume = data.volume + 1 data = data.to_dict(orient='list') data['n'] = d.surv.size data['k'] = d.volume.nunique() fit_1d = model_1d.sample(data=data, chains=4) ###Output INFO:cmdstanpy:compiling stan file /home/jovyan/work/statret/stn/week08_01d.stan to exe file /home/jovyan/work/statret/stn/week08_01d INFO:cmdstanpy:compiled model executable: /home/jovyan/work/statret/stn/week08_01d INFO:cmdstanpy:found newer exe file, not recompiling INFO:cmdstanpy:CmdStan start procesing ###Markdown Model 5 (pred + size + interaction): ###Code model = ''' data { int n; int k; int tank[n]; int density[n]; int volume[n]; int pred[n]; int surv[n]; } parameters { real a_bar; real sigma; vector[k] bp; vector[k] s; vector[n] a; } transformed parameters { vector[n] alpha; for (i in 1:n) {alpha[i] = a[i] + bp[volume[i]]*pred[i] + s[volume[i]];} } model { // prior a_bar ~ normal(0, 1.5); sigma ~ exponential(1); a ~ normal(a_bar, sigma); s ~ normal(0, 0.5); // likelihood for (i in 1:n) {surv[i] ~ binomial_logit(density[i], alpha[i]);} } generated quantities { vector[n] log_lik; for (i in 1:n) { log_lik[i] = binomial_logit_lpmf(surv[i] | density[i], alpha[i]); } } ''' stan_file = './stn/week08_01e.stan' with open(stan_file, 'w') as f: print(model, file=f) model_1e = CmdStanModel(stan_file=stan_file) model_1e.compile() data = d[['tank', 'density', 'volume', 'pred', 'surv']].copy() data.volume = data.volume + 1 data = data.to_dict(orient='list') data['n'] = d.surv.size data['k'] = d.volume.nunique() fit_1e = model_1e.sample(data=data, chains=4) ###Output INFO:cmdstanpy:compiling stan file /home/jovyan/work/statret/stn/week08_01e.stan to exe file /home/jovyan/work/statret/stn/week08_01e INFO:cmdstanpy:compiled model executable: /home/jovyan/work/statret/stn/week08_01e INFO:cmdstanpy:found newer exe file, not recompiling INFO:cmdstanpy:CmdStan start procesing ###Markdown I coded the interaction model (`samples_5`) using a non-centered parameterization. The interaction itself is done by creating a `bp` parameter for each size value. In this way, the effect of `pred` depends upon `volume`. First let's consider the WAIC scores: ###Code sc = { '1' : waic(fit_1a), '2' : waic(fit_1b), '3' : waic(fit_1c), '4' : waic(fit_1d), '5' : waic(fit_1e), } scores = pd.DataFrame(sc.items(), columns=['model','waic']) scores.sort_values(by='waic') ###Output _____no_output_____ ###Markdown These models are really very similar in expected out-of-sample accuracy. The tank variation is huge. But take a look at the posterior distributions for predation and size. You'll see that predation does seem to matter, as you'd expect. Size matters a lot less. So while predation doesn't explain much of the total variation, there is plenty of evidence that it is a real effect. Remember: We don't select a model using WAIC(or LOO). A predictor can make little difference in total accuracy but still be a real causal effect.Let's look at all the sigma posterior distributions: ###Code atts = ['sigma'] fits = {'1':fit_1a, '2':fit_1b, '3':fit_1c, '4':fit_1d, '5':fit_1e} akk = [] for k in fits.keys(): df = fits[k].draws_pd(vars=atts) df['model'] = k akk.append(df) im = pd.concat(akk) ( ggplot(im) + aes(x='model', y='sigma') + coord_flip() + geom_boxplot(outlier_alpha=0.1) + theme_light() + theme(figure_size=(6, 3)) ) ###Output _____no_output_____ ###Markdown The two models that omit predation, `model_11` and `model_13`, have larger values of `sigma`. This is because predation explains some of the variation among tanks. So when you add it to the model, the variation in the tank intercepts gets smaller. Exercise 2 > In 1980, a typical Bengali woman could have 5 or more children in her lifetime. By the year 2000, a typical Bengali woman had only 2 or 3. You're going to look at a historical set of data, when contraception was widely available but many families chose not to use it. These data reside in data `bangladesh` and come from the 1988 Bangladesh Fertility Survey. Each row is one of 1934 women. There are six variables, but you can focus on two of them for this practice problem:> 1. `district`: ID number of administrative district each woman resided in2. `use.contraception`: An indicator (0/1) of whether the woman was using contraception ###Code d = pd.read_csv('./dat/bangladesh.csv', header=0, sep=';') d['did'] = pd.factorize(d['district'])[0] d['conc'] = d['use.contraception'] d.head() model = ''' data { int n; int k; int did[n]; int conc[n]; } parameters { vector[k] alpha; } model { // prior alpha ~ normal(0, 1.5); // likelihood for (i in 1:n) { conc[i] ~ bernoulli_logit(alpha[did[i]]); } } ''' stan_file = './stn/week08_02a.stan' with open(stan_file, 'w') as f: print(model, file=f) model_2a = CmdStanModel(stan_file=stan_file) model_2a.compile() data = d[['did', 'conc']].copy() data.did = data.did + 1 data = data.to_dict(orient='list') data['n'] = d.conc.size data['k'] = d.did.nunique() fit_2a = model_2a.sample(data=data, chains=4) ###Output INFO:cmdstanpy:compiling stan file /home/jovyan/work/statret/stn/week08_02a.stan to exe file /home/jovyan/work/statret/stn/week08_02a INFO:cmdstanpy:compiled model executable: /home/jovyan/work/statret/stn/week08_02a INFO:cmdstanpy:found newer exe file, not recompiling INFO:cmdstanpy:CmdStan start procesing ###Markdown Now for the ordinary fixed effect model: ###Code model = ''' data { int n; int k; int did[n]; int conc[n]; } parameters { real alpha_bar; real sigma; vector[k] alpha; } model { // prior alpha_bar ~ normal(0, 1.5); sigma ~ exponential(1); alpha ~ normal(alpha_bar, sigma); // likelihood for (i in 1:n) { conc[i] ~ bernoulli_logit(alpha[did[i]]); } } ''' stan_file = './stn/week08_02b.stan' with open(stan_file, 'w') as f: print(model, file=f) model_2b = CmdStanModel(stan_file=stan_file) model_2b.compile() data = d[['did', 'conc']].copy() data.did = data.did + 1 data = data.to_dict(orient='list') data['n'] = d.conc.size data['k'] = d.did.nunique() fit_2b = model_2b.sample(data=data, chains=4) ###Output INFO:cmdstanpy:compiling stan file /home/jovyan/work/statret/stn/week08_02b.stan to exe file /home/jovyan/work/statret/stn/week08_02b INFO:cmdstanpy:compiled model executable: /home/jovyan/work/statret/stn/week08_02b INFO:cmdstanpy:found newer exe file, not recompiling INFO:cmdstanpy:CmdStan start procesing ###Markdown Now let's extract the samples, compute posterior mean probabilities in each district, and plot it all: ###Code def logit2prob(logits): odds = np.exp(logits) probs = odds/(1+odds) return probs prob2a = logit2prob(fit_2a.stan_variable(var='alpha').mean(axis=0)) prob2b = logit2prob(fit_2b.stan_variable(var='alpha').mean(axis=0)) prob_line = logit2prob(fit_2b.stan_variable(var='alpha_bar').mean(axis=0)) ###Output _____no_output_____ ###Markdown The red points are the fixed estimations. The blue points are the varying effects. As you'd expect, they are shrunk towards the mean (the _red_ line). Some are shrunk more than others. The third district from the left shrunk a lot. Let's look at the sample size in each district: district 3 has only 2 women sampled. So it shrinks a lot. There are couple of other districts, like 49 and 54, that also have very few women sampled. But their fixed estimates aren't as extreme, so they don't shrink as much as district 3 does. All of this is explained by partial pooling, of course. ###Code im = pd.DataFrame({'model_2a':prob2a, 'model_2b':prob2b}).reset_index() im['district'] = im.index+1 im = pd.melt(im, id_vars=['district'], value_vars=['model_2a','model_2b'], var_name='model', value_name='prob') ( ggplot(im) + aes(x='district', y='prob', color='model') + geom_point() + geom_hline(yintercept=prob_line, linetype='dotted') + theme_light() + theme(figure_size=(9,2)) ) ###Output _____no_output_____ ###Markdown Exercise 3 Return to the Trolley data, (`Trolley`), from Chapter 12. Define and fit a varying intercepts model for these data. By this I mean to add an intercept parameter for the individual to the linear model. Cluster the varying intercepts on individual participants, as indicated by the unique values in the id variable. Include action, intention, and contact as before. Compare the varying intercepts model and a model that ignores individuals, using both WAIC/LOO and posterior predictions. What is the impact of individual variation in these data? ###Code d = pd.read_csv('./dat/Trolley.csv', header=0, sep=';') elvl = d['edu'].unique() cat = {elvl[i]:i for i in [7 , 0 , 6 , 4 , 2 , 1, 3, 5]} d['edu_cat'] = d.edu.replace(cat) d['age_std'] = (d['age'] - d['age'].mean())/d['age'].std() d['id'] = pd.factorize(d['id'])[0] d.tail(3) ###Output _____no_output_____ ###Markdown First, let's load the data and re-run the old model from Chapter 12: ###Code model = ''' data { int n; int k; int action[n]; int contact[n]; int intention[n]; int response[n]; } parameters { ordered[k] cutpoints; real bA; real bC; real bI; real bIA; real bIC; } transformed parameters { vector[n] phi; vector[n] BI; for (i in 1:n) { BI[i] = bI + bIA*action[i] + bIC*contact[i]; phi[i] = bA*action[i] + bC*contact[i] + BI[i]*intention[i]; } } model { // prior cutpoints ~ normal(0,15); bA ~ normal(0,0.5); bC ~ normal(0,0.5); bI ~ normal(0,0.5); bIA ~ normal(0,0.5); bIC ~ normal(0,0.5); // likelihood for (i in 1:n) { response[i] ~ ordered_logistic(phi[i], cutpoints); } } generated quantities { vector[n] log_lik; for (i in 1:n) { log_lik[i] = ordered_logistic_lpmf(response[i] | phi[i], cutpoints); } } ''' stan_file = './stn/week08_03a.stan' with open(stan_file, 'w') as f: print(model, file=f) model_3a = CmdStanModel(stan_file=stan_file) model_3a.compile() data = d[['action', 'contact', 'intention', 'response']].copy() data = data.to_dict(orient='list') data['n'] = len(data['response']) data['k'] = d.response.nunique()-1 fit_3a = model_3a.sample(data=data, chains=4) ###Output INFO:cmdstanpy:CmdStan start procesing ###Markdown Now to run the varying intercept model, we need to build a valid individual ID variable. The IDs in the data are long tags, so we can coerce them to integers in many ways. What is important is that the index values go from 1 (_zero in python_) to the number of individuals. ###Code model = ''' data { int n; int k; int p; int id[n]; int action[n]; int contact[n]; int intention[n]; int response[n]; } parameters { ordered[k] cutpoints; vector[p] alpha; real sigma; real bA; real bC; real bI; real bIA; real bIC; } transformed parameters { vector[n] phi; vector[n] BI; for (i in 1:n) { BI[i] = bI + bIA*action[i] + bIC*contact[i]; phi[i] = alpha[id[i]] + bA*action[i] + bC*contact[i] + BI[i]*intention[i]; } } model { // prior cutpoints ~ normal(0,15); bA ~ normal(0,0.5); bC ~ normal(0,0.5); bI ~ normal(0,0.5); bIA ~ normal(0,0.5); bIC ~ normal(0,0.5); sigma ~ exponential(1); alpha ~ normal(0, sigma); // likelihood for (i in 1:n) { response[i] ~ ordered_logistic(phi[i], cutpoints); } } generated quantities { vector[n] log_lik; for (i in 1:n) { log_lik[i] = ordered_logistic_lpmf(response[i] | phi[i], cutpoints); } } ''' stan_file = './stn/week08_03b.stan' with open(stan_file, 'w') as f: print(model, file=f) model_3b = CmdStanModel(stan_file=stan_file) model_3b.compile() data = d[['action', 'contact', 'intention', 'response', 'id']].copy() data['id'] = data.id+1 data = data.to_dict(orient='list') data['n'] = len(data['response']) data['k'] = d.response.nunique()-1 data['p'] = d.id.nunique() fit_3b = model_3b.sample(data=data, chains=4) ###Output INFO:cmdstanpy:CmdStan start procesing ###Markdown We can begin by comparing the posterior distributions. The original coefficients are: ###Code fit_3a.summary().loc[['bA','bC','bI','bIA', 'bIC']] ###Output _____no_output_____ ###Markdown And the new ones, having added the individual IDs, are: ###Code fit_3b.summary().loc[['bA','bC','bI','bIA', 'bIC']] ###Output _____no_output_____ ###Markdown Everything has gotten more negative. This is because there is a lot of individual variation in average rating, look at the distribution for sigma. That is on the $logit$ scale, so that's a lot of variation on the probability scale. That variation in average rating was hiding some of the effect of the treatments. We get more precision by conditioning on individual.The WAIC comparison can also help show how much variation comes from individual differences in average rating: ###Code sc = { 'a' : waic(fit_3a), 'b' : waic(fit_3b), } scores = pd.DataFrame(sc.items(), columns=['model','waic']) scores.sort_values(by='waic') ###Output _____no_output_____
Data Analysis with Pandas/Pandas.ipynb
###Markdown Pandas * Data analysis library* Advanced version of Excel with more features than excel Topics * Series* DataFrame* Operations* Missing Data* Merging and Joining* File Reading and Writing ###Code import pandas as pd ###Output _____no_output_____ ###Markdown Series in Pandas ###Code import numpy as np import pandas as pd pd.Series([2, 6, 10]) #Equivalent to 1D numpy array pd.Series([2, 6, 10], index = ['a', 'b', 'c']) #Custom indexing s1 = pd.Series([2, 6, 10], index = ['a', 'b', 'c']) #Custom indexing s1['a'] s1[['a','c']] pd.Series({'a1' : 5, 'b1' : 7, 'c1' : 4}) #Create Series using dict pd.Series({'a1' : 'Hello', 'b1' : 7, 'c1' : True}) #Multiple datatypes-- objects s1 = pd.Series([1, 3, 5], index = ['a', 'b', 'c']) s2 = pd.Series([2, 7, 9], index = ['b', 'c', 'd']) s1 s2 s1 * s2 #Multiplies values with same index s1 - s2 #Performs operations on values with same index ###Output _____no_output_____ ###Markdown DataFrame Can be considered as 2D Array ###Code df = pd.DataFrame(data = [[2,4,6,-1],[3,-4,2,-1],[-10,3,5,-7]], columns=['C1','C2','C3','C4'], index=['R1','R2','R3']) df df.head(2) #First few rows df['newC'] = 7 #Create new column df df['newC'] = df['C1'] + 5 df df.drop('newC', axis=1) #Delete column #Axis=0 --> row, #axis=1 -->columna df.drop('R1', axis=0) #Delete row df df.drop('newC', axis=1, inplace=True) #inplace --> modify existing DataFrame df df.drop('R1', axis=0, inplace=True) df ###Output _____no_output_____ ###Markdown Select ###Code df df['C2'] df[['C2', 'C4']] ###Output _____no_output_____ ###Markdown .loc(row, column) .iloc(row, column) -- refer by integer value ###Code df df.loc['R2', :] df.loc['R2', ['C2', 'C3']] df.iloc[1,:] #Refers to R3-- iloc references by integer df.iloc[1, 1:3] ###Output _____no_output_____ ###Markdown Index, Multiindex Single index ###Code df = pd.DataFrame(data = [[2,4,6,-1], [3,-4,2,-1],[-10,2,5,-7]], columns = ['C1', 'C2', 'C3', 'C4'], index = ['R1', 'R2', 'R3']) df df.index #Displays indexes df.reset_index() df.set_index('C3', inplace=True) #Set 'C3' as index for the DataFrame df df['C2'] df df.loc[2, :] df.loc[5,:] df.index ###Output _____no_output_____ ###Markdown Multiindex Creates multiple primary indexes for the DataFrame ###Code df = pd.DataFrame(data=np.array([['Math', 'C1', 's1', 93], ['Science', 'C1', 's2', 78], ['English', 'C1', 's3', 86], ['Math', 'C2', 's4', 58], ['English', 'C2', 's5', 71], ['Science', 'C2', 's6', 69]]), columns=['Subject', 'Class', 'Student','MaxScore']) df df.index df.set_index(['Class', 'Subject'], inplace=True) #Create multi-index df df.index df.loc['C1'] #Returns a sub-DataFrame since it has multi-index df.loc['C2'].loc['English'] #Multiple loc functions to grab the data ###Output _____no_output_____
notebooks/experiments/03-lwt-experimental-setup-ti-mf-ps.ipynb
###Markdown Load Data ###Code # set path to where repo is located REPO_PATH = os.path.join("..", "..") # setting path DATA_PATH = os.path.join(REPO_PATH, "data", "evaluation") LDA_PATH = os.path.join(REPO_PATH, "models", "lda") # category to train CATEGORY = "Pet_Supplies" # LDA training parameters EPOCHS = 10 # ti-mf training parameters, training epochs, learning, and regularisation rate PARAMS = {"num_epochs": 5, "learning_rate": 0.005, "beta": 0.1} # reproducibility checks SEED = 42 np.random.seed(42) # load train/test dataset and lda trained model train = pd.read_csv(f"{DATA_PATH}/{CATEGORY}_train.csv") test = pd.read_csv(f"{DATA_PATH}/{CATEGORY}_test.csv") lda = pickle.load(open(f"{LDA_PATH}/{CATEGORY}_lda.model", "rb")) # checking train dataframe train.head().append(train.tail()) test.head().append(test.tail()) # generating test history test_user_history = (pd.DataFrame(test.groupby(['reviewerID'])['asin'] .apply(list).reset_index())) print(test_user_history) ###Output reviewerID asin 0 A04173782GDZSQ91AJ7OD [B0090Z9AYS, B00CPDWT2M] 1 A042274212BJJVOBS4Q85 [B005AZ4M3Q, B00771WQIY] 2 A0436342QLT4257JODYJ [B0018CDR68, B003SJTM8Q, B00474A3DY] 3 A04795073FIBKY8GSLZYI [B001PKT30M, B005DGI2RY] 4 A06658082A27F4VB5UG8E [B000TZ1TTM, B0019VUHH0] ... ... ... 18993 AZYJE40XW6MFG [B00HVAKJZS, B00IDZT294] 18994 AZZ56WF4X19G2 [B004A7X218] 18995 AZZNK89PXD006 [B0002DHV16, B005BP8MQ8, B009RTX4SU] 18996 AZZV9PDNMCOZW [B007EQL390, B00ISBWVT6] 18997 AZZYW4YOE1B6E [B0002AQPA2, B0002AQPA2, B0002ARQV4] [18998 rows x 2 columns] ###Markdown Preparing Topic Vectors [Train/Load] ###Code # # generating tokenized reviews # processed_reviews = train["processedReviewText"].apply(lambda x: x.split()) # # instantiate lda model # lda_model = lda.LDA(reviews=processed_reviews, n_epochs=EPOCHS) # %%time # # training the LDA model # lda_model.train() # # save model # pickle.dump(lda_model, open(LDA_PATH, "wb")) ###Output _____no_output_____ ###Markdown Generating User/Item Topic Vectors ###Code user_idx_map, user_vecs, item_idx_map, item_vecs = utilities.generate_user_item_vectors(train, lda) # converting factors into numpy obj user_factors = user_vecs.to_numpy() item_factors = item_vecs.to_numpy() # check user factors user_factors[0,:] # check item factors item_factors[0,:] ###Output _____no_output_____ ###Markdown Generate N-Recommendations = {5, 10, 15, 20} Instantiate Pre-Initialised Matrix Factorization (Topic Modelling) ###Code # instantiating ti_mf ti_mf = algorithms.PreInitialisedMF(user_map=user_idx_map, item_map=item_idx_map, user_factor=user_factors, item_factor=item_factors, num_factors=50, **PARAMS) ###Output _____no_output_____ ###Markdown Training Topic Initialized-Matrix Factorization (TI-MF) ###Code %%time # fitting to training data ti_mf.fit(train, verbose=True) %%time # generate candidate items for user to predict rating testset = ti_mf.trainset.build_anti_testset() %%time # predict ratings for all pairs (u, i) that are NOT in the training set candidate_items = ti_mf.test(testset, verbose=False) ###Output CPU times: user 12min 8s, sys: 4min 35s, total: 16min 44s Wall time: 19min 9s ###Markdown Loop through N = {5, 10, 15, 20}For each top-N setting, we will generate candidates items up to *N*-items and run metrics evaluation of `Recall@N` and `Novelty@N` on all users. ###Code # generate item popularity item_popularity = evaluate_model.generate_item_popularity(train) %%time n_recommendations = {} for n in [5, 10, 15, 20]: # retrieve the top-n items based on similarities top_ns = ti_mf.get_top_n(candidate_items, n) # evaluate how well the recommended items predicted the future purchases n_recommended_items = (evaluate_model. evaluate_recommendations(model_name="TI-MF", top_ns=top_ns, user_rating_history=test_user_history, item_popularity=item_popularity, n=n, mf_based=True)) # saving the n-value and recommended items n_recommendations[n] = (top_ns, n_recommended_items) ###Output The TI-MF has an average recall@5: 0.00351, average novelty@5: 0.91345 The TI-MF has an average recall@10: 0.00498, average novelty@10: 0.92980 The TI-MF has an average recall@15: 0.00679, average novelty@15: 0.93399 The TI-MF has an average recall@20: 0.00888, average novelty@20: 0.93343 CPU times: user 6min 23s, sys: 3min 28s, total: 9min 52s Wall time: 13min 6s ###Markdown Evaluate N-RecommendationsFor each top-N recommendation list, we pick a randomly sampled user to look at their *N*-number of recommendations based on their purchase history. N=5 ###Code top_ns_05 = n_recommendations[5][0] utilities.retrieve_recommendations(train, top_ns_05, mf_based=True) ###Output For user: A3T87QAUUPTMZK: Purchase History: asin title 43366 B0010OSIHW Zoo Med Eco Earth Compressed Coconut Fiber Sub... 45100 B00167VVP4 Zoo Med Eco Earth Loose Coconut Fiber Substra... 46998 B0019IJXD2 Zoo Med Reptile Fogger Terrarium Humidifier Recommending: asin title 0 B000MLHDS4 Wellness Pure Rewards Natural Grain Free Dog T... 1 B000255P9E Seachem Neutral Regulator 2 B001HN5Z4K Bit-O-Luv Bistro Beef Recipe Dog Treats, 4.0-O... 3 B000ILEIUE Blue Dog Bakery | Dog Treats | All-Natural | P... 4 B0012KB4D4 Purina Friskies Gravy Sensations Wet Cat Food ... ###Markdown N=10 ###Code top_ns_10 = n_recommendations[10][0] utilities.retrieve_recommendations(train, top_ns_10, mf_based=True) ###Output For user: A1XEZIHQIUAOR1: Purchase History: asin title 1487 B000084E6V Nylabone Dental Dinosaur Chew 1727 B000084E6V Nylabone Dental Dinosaur Chew 28789 B0006VMN4O Pioneer Pet SmartCat Peek-A-Prize Toy Box with... 37658 B000JZ1WSU SmartCat 3836 Tick Tock Teaser 42603 B000XZDV44 Hill'S Science Diet Kitten Savory Salmon Entre... 43867 B0012KCUOG Whisker Lickin'S Soft &amp; Delicious Chicken ... Recommending: asin title 0 B0002AQL5G API REPLACEMENT TEST TUBES WITH CAPS For Any A... 1 B000HHSLEI Pet Stages Mini Jingle Cage 2 B000255MZG API STRESS COAT Aquarium Water Conditioner 3 B0002DI1W4 CO2 Natural Plant System Bubble Counter with S... 4 B000YIYSH4 Acurel Premium Activated Filter Carbon Granule... 5 B0002563MM Clear &amp; Flexible Air Line Tubing 6 B0002565SY Marineland Rite-Size Penguin Power Filter Cart... 7 B003JFRQQ4 Scaredy Cut Tiny Trim by Small Pet Grooming Sa... 8 B001CZXZEU Fluval External Power Filter Pre-Filter Media 9 B000F4AVPA Chuckit! Ultra Ball ###Markdown N=15 ###Code top_ns_15 = n_recommendations[15][0] utilities.retrieve_recommendations(train, top_ns_15, mf_based=True) ###Output For user: A89LQAXW1IY6S: Purchase History: asin title 12819 B0002ARP2O Marshall Ferret Deluxe Leisure Lounge, Pattern... 12825 B0002ARP2O Marshall Ferret Deluxe Leisure Lounge, Pattern... 38837 B000MD3NLS MidWest Homes for Pets Snap'y Fit Stainless St... 42683 B000Y8UNAU Pro Select Fleece Cat Perch Covers - Comfortab... 46465 B0018CJZ32 SmartCat Corner Litter Box Recommending: asin title 0 B00290K0C2 LitterLocker Refill Cartridge 10 pk 1 B001OQXEHK Fresh Step Crystals, Premium Cat Litter, Scent... 2 B001U8FOES Curvations Litter Scoop Size 3 B004U8Z2YW Arm &amp; Hammer Double Duty Clumping Litter, ... 4 B008W8IC4I Cat's Pride Fresh and Light Multi-Cat Premium ... 5 B0014CHDYO Breeze Tidy Cat Litter Pads 16.9&quot;x11.4&qu... 6 B001HSMYSU LitterMaid Ultimate Accessories Kit for Elite... 7 B000NSGKYY Litter Locker Refill Cartridge 5 pk 8 B0015ZN54C Pureness Giant Litter Scoop 9 B005KSHGW2 Purina Pro Plan Dog Toothbrush Stick Rubber Ma... 10 B000EUC9J6 Northeastern Products Cedarific Natural Cedar ... 11 B007Z8LBZI Litter Genie Ultimate Cat Litter Disposal Syst... 12 B000PKSW5A Precious Cat Dr. Elsey's Kitten Attract Scoopa... 13 B0002DHY4K Tetra Whisper Power Filter for Aquariums, 3 Fi... 14 B007Z8LBJE LitterLocker 6-Pack Genie Refill Cartridge ###Markdown N=20 ###Code top_ns_20 = n_recommendations[20][0] utilities.retrieve_recommendations(train, top_ns_20, mf_based=True) ###Output For user: A3C2ECIXEQ0YFQ: Purchase History: asin title 30751 B0009YS4P0 Nutri-Vet Hip &amp; Joint Extra Strength Chewa... 37067 B000IBRI2Y Dog Dazer II Ultrasonic Dog Deterrent 40175 B000OV4VAU Nutri-Vet Alaska Salmon Oil 53623 B0029NQTI8 Pedigree Choice Cuts Variety Pack Lamb/Vegetab... Recommending: asin title 0 B000F4AVPA Chuckit! Ultra Ball 1 B001LNUKE6 Purebites Cheddar Cheese Dog Treats 2 B000255MZG API STRESS COAT Aquarium Water Conditioner 3 B003JFRQQ4 Scaredy Cut Tiny Trim by Small Pet Grooming Sa... 4 B0002DJVQY JW Pet Company Activitoys Triple Mirror Bird Toy 5 B0002AROVQ Marshall Ferret Litter Pan 6 B00006IX59 Chuckit! Dog Ball Launcher 7 B0006JKCN0 KONG Frog Dog Toy, Extra Small, Green 8 B0002563S6 Magic Coat Cat Tearless Shampoo, 12-Ounce 9 B0017J8NDY Mammoth Flossy Chews Cottonblend Color 5-Knot ... 10 B00025YUR2 Marineland Magnum Dual Purpose Canister Filter 11 B001B4TV2W Chuckit! Max Glow Ball, 12 B001GS71KW Precision Pet Petmate Soft Side Play Yard Heav... 13 B0002XUJFG Nature's Miracle Quick Results Training Pads 14 B000062WUT Multipet Plush Dog Toy 15 B00025646W Kordon Methylene Blue-General Disease Preventi... 16 B0002DHZ9Y Python No Spill Clean and Fill Aquarium Gravel... 17 B0017JFNNC Redbarn Naturals Bully Springs 18 B000QSON4K Greenies Pill Pockets Soft Dog Treats, Beef, C... 19 B000K9JRH8 GoCat DaBird Feather Refill, Assorted Colors, ... ###Markdown Cross-Analysis for Cold-Start Users (<= 2 Purchased Items)For each top-N setting, we will generate candidates items up to *N*-items and run metrics evaluation of `Recall@N` and `Novelty@N` on cold-start users (e.g., users who purchased two or less items based on items per user in the training set). ###Code cold_start_users = utilities.generate_cold_start_users(train) for n in tuple(zip([5, 10, 15, 20], [top_ns_05, top_ns_10, top_ns_15, top_ns_20])): cold_start_top_ns = dict(filter(lambda x: x[0] in cold_start_users, n[1].items())) # evaluate how well the recommended items predicted the future purchases # on cold start users n_recommended_items = (evaluate_model. evaluate_recommendations(model_name="TI-MF", top_ns=cold_start_top_ns, user_rating_history=test_user_history, item_popularity=item_popularity, n=n[0], mf_based=True)) ###Output The TI-MF has an average recall@5: 0.00331, average novelty@5: 0.91336 The TI-MF has an average recall@10: 0.00452, average novelty@10: 0.92967 The TI-MF has an average recall@15: 0.00674, average novelty@15: 0.93392 The TI-MF has an average recall@20: 0.00939, average novelty@20: 0.93340 ###Markdown Generating Recommended Items DataFrame ###Code max_recommendations = (utilities .generate_recommendations_df(train=train, n_recommendations=n_recommendations, algo_name="TI-MF", mf_based=True, max_recommended=20)) max_recommendations ###Output _____no_output_____ ###Markdown Store in `SQLite` DB ###Code # engine = create_engine("sqlite:///recommender.db", echo=True) # max_recommendations.to_sql(f"{CATEGORY}", con=engine, if_exists="append") ###Output _____no_output_____
_notebooks/2020-06-28-quantization.ipynb
###Markdown 5 Myths about quantized neural networks> A post about misconceptions and myths around quantization of neural networks- toc: false - badges: false- comments: false- categories: [jupyter, quantization]- image: images/die.jpg- author: Mathias Lechner Background: What are quantized neural networks? When we run numerical algorithms on our computer, we need to make sacrifices in terms of precision for the sake of runtime. For instance, the square root of 2 is an irrational number and has an infinite amount of decimal digits. Thus we need to decide how many digits we really need for our application.Each extra digit of precision increases the memory and time requirements to store and compute a variable.For example, the IEEE-754 standard specifies four types of floating-point formats: ###Code #hide_input import numpy as np print("np.sqrt(2):") print("float128:",str(np.sqrt(np.float128(2)))) print("float64: ",str(np.sqrt(np.float64(2)))) print("float32: ",str(np.sqrt(np.float32(2)))) print("float16: ",str(np.sqrt(np.float16(2)))) ###Output np.sqrt(2): float128: 1.4142135623730950488 float64: 1.4142135623730951 float32: 1.4142135 float16: 1.414 ###Markdown For machine learning applications, the ```float32``` format has been the default choice, as it provides a decent performance while avoiding extreme numerical errors. However, in the past decade researcher have made the following two observations:- During the training phase, certain types of layers can be run and trained with lower precision (e.g., ```float16```)- After the training phase (=inference phase), neural networks can run with much lower precision levels without sacrificing much accuracyConsequently, Nvidia's latest [A100 GPU](https://www.nvidia.com/content/dam/en-zz/Solutions/Data-Center/nvidia-ampere-architecture-whitepaper.pdf) supports the following six numerical format: ###Code #hide_input import pandas as pd import altair as alt df = pd.DataFrame({'Data type': ["float64"], 'TOPS': [9.7], 'Significand precision': ['52-bits'], 'Exponent': ['11-bits'], 'Comment': ['Double precision IEE-754 floating-point'], }) df = df.append({'TOPS': 19.5, 'Data type': "float32", 'Comment': 'Single precision IEE-754 floating-point', 'Significand precision': '23-bits', 'Exponent': '8-bits', },ignore_index=True) df = df.append({'TOPS': 156, 'Data type': "TensorFloat32", 'Comment': '32-bit floating-point format with reduced significand precision', 'Significand precision': '10-bits', 'Exponent': '8-bits', },ignore_index=True) df = df.append({'TOPS': 312, 'Data type': "float16", 'Comment': 'Half precision IEE-754 floating-point', 'Significand precision': '10-bits', 'Exponent': '5-bits', },ignore_index=True) df = df.append({'TOPS': 312, 'Data type': "bfloat16", 'Comment': '16-bit brain-float format with larger range but reduced significand precision', 'Significand precision': '7-bits', 'Exponent': '8-bits', },ignore_index=True) df = df.append({'TOPS': 624, 'Data type': "int8", 'Comment': '8-bit integer format for fixed-point arithmetic', 'Significand precision': '7-bits', 'Exponent': '0-bits', },ignore_index=True) df[['Data type','Significand precision','Exponent']] ###Output _____no_output_____ ###Markdown One item that stands out in this list is the last row: While all other formats are based on a floating-point representation, ```int8``` is an integer type.This raises the question: > **How can we run a neural network with integer operations?**The answer is quantization. > Quantization translates a network that operates over floating-point variables into a network that uses fixed-point arithmetic[Fixed-point arithmetic](https://en.wikipedia.org/wiki/Fixed-point_arithmetic) is a numerical format that can be implemented relatively efficiently used integer operations.For instance, we can use the first four bits of an ```int8``` value to represent the digits before the comma, and the last four bits to represent fractional digits that come after the comma:```Decimal: 0.5 + 1.25 = 1.75Binary: 0000.1000 + 0001.0100 = 0001.1100```A fixed-point addition can be implemented by simple integer addition and a fixed-point multiplication by an integer multiplication followed by a bit-wise shift operation.Obviously, the precision achieved with an 8-bit fixed-point format is not enough for training a neural network. However, most types of layers can be quantized for inferencing without suffering a significant loss in accuracy.The quantization step itself rounds the ```float32``` weight values to their nearest corresponding fixed-point value.The clear advantages of running a network using ```int8``` is that:1. It requires less memory, which improves cache and memory bandwidth efficiency.2. Can run using more efficient integer operationsIn particular, a [2017 Google paper](https://arxiv.org/pdf/1704.04760.pdf) writes:> Eight-bit integer multiplies can be 6X less energy and 6X less area than IEEE 754 16-bit floating-point multiplies and the> advantage for integer addition is 13X in energy and 38X in area [Dal16].> > ***- ''In-Datacenter Performance Analysis of a Tensor Processing Unit'' - Jouppi et al.***Despite this relatively simple concept, there are several misconceptions and myths regarding quantized neural networks: Myth 1: Quantization is only necessary for ultra-low-power embedded systemsFar from it. Datacenter applications currently benefit the most from quantization. For instance, the first generation of [Google's Tensor Processing Units (TPUs)](https://arxiv.org/pdf/1704.04760.pdf) only supported quantized networks. Computation units for floating-point arithmetic were only added in the [second generation](https://www.tomshardware.com/news/tpu-v2-google-machine-learning,35370.html).Likewise, Nvidia's [V100](https://www.microway.com/knowledge-center-articles/in-depth-comparison-of-nvidia-tesla-volta-gpu-accelerators/) and latest [A100](https://www.anandtech.com/show/15801/nvidia-announces-ampere-architecture-and-a100-products) canperform four times as many ```int8``` tensor operations compared to ```float32``` operations per second (or twice as much ```int8``` as ```float16``` tensor operations per second).This means that you can **quadruple the throughput of your datacenter application** with quantization in a best-case scenario. Myth 2: Quantization makes networks smaller but not fasterAs already hinted in the myth above, modern AI accelerators such as GPU and TPU can run integer operations faster than floating-point operations.Let's look at the compute performance of Nvidia's latest [A100 GPU](https://www.nvidia.com/content/dam/en-zz/Solutions/Data-Center/nvidia-ampere-architecture-whitepaper.pdf): ###Code #hide_input points = alt.Chart(df).mark_bar().encode( alt.X('Data type',sort=list(df['Data type']),axis=alt.Axis(labels=False,title='Data format')), y=alt.Y('TOPS',axis=alt.Axis(title="Tera op/s")), color=alt.Color('Data type',legend=None,scale=alt.Scale(scheme='dark2')), tooltip=['Data type','TOPS','Comment', 'Significand precision','Exponent'] ).properties( width=600, height=300, title="Nvidia A100 compute performance" ) #points text = points.mark_text( align='center', fontSize=16, baseline='middle', dy=-10 ).encode( text='Data type' ) points + text ###Output _____no_output_____ ###Markdown Essentially, quantization does not only make the network smaller, but makes them also **runs faster**! Myth 3: Any layer in a neural network can be quantizedSome types of layers do not tolerate quantization very well. For example, in [a discussion of an ICLR paper by Max Welling's group](https://openreview.net/forum?id=HkxjYoCqKX&noteId=rygmk1EDT7) we see that quantizing the first or the last layer of a network results in a considerable drop in accuracy. This gap does not entirely close even if we train the network using [quantization-aware training](https://arxiv.org/pdf/1712.05877.pdf) techniques.One trick often used to avoid this drop in accuracy is not to quantize the first and the last layer. As these two layers only take up a small fraction of the computations inside a network, running the first and the last layer with ```float32``` does not hurt throughput much, but significantly benefits the accuracy of the network. However, one some end-devices, this approach is not an option. For instance, [Google's Edge TPU](https://cloud.google.com/edge-tpu) only supports ```int8```. Therefore, in such cases, every layer of the network must be quantized to 8-bit integers. Myth 4: It's easy to compare different quantization approachesComparing two different quantization methods is not a trivial job. Connecting to the discussion above, let's imagine we have a network and quantize it with two different methods to obtain network A and network B.While network A achieves a 90% accuracy by quantized all layers, network B achieves a 92% accuracy but leaves the first layer running with floating-point precision.Which method is better?The answer to this question depends on the context; which target device will the network run on?If it's a device without a floating-point unit such as the Edge TPU or a microcontroller, then method A is clearly better. Contrarily, if we plan to run the network on a V100 or A100 GPU, then method B might be the better approach.Another technique that causes a lot of misconceptions found in [the discussion of the ICLR paper by Max Welling's group](https://openreview.net/forum?id=HkxjYoCqKX&noteId=rygmk1EDT7) are **non-uniform quantization schemes**:Fixed-point formats partition the representable value range using a uniform grid, e.g., there are the same amount of intermediate values between 1.5 and 2.5 as between 2.5 and 3.5. Looking at the typical weight distribution of neural networks, we notice that they follow a Gaussian-like bell curve distribution with smaller values occurring more frequently than large weight values. ###Code #hide import numpy as np from tensorflow import keras from tensorflow.keras import layers (x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data() num_classes = 10 input_shape = (28, 28, 1) # Scale images to the [0, 1] range x_train = x_train.astype("float32") / 255 x_train = np.expand_dims(x_train, -1) y_train = keras.utils.to_categorical(y_train, num_classes) model = keras.Sequential( [ keras.Input(shape=input_shape), layers.Conv2D(32, kernel_size=(3, 3), activation="relu"), layers.MaxPooling2D(pool_size=(2, 2)), layers.Conv2D(64, kernel_size=(3, 3), activation="relu"), layers.MaxPooling2D(pool_size=(2, 2)), layers.Flatten(), layers.Dropout(0.5), layers.Dense(num_classes, activation="softmax"), ] ) model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"]) model.fit(x_train, y_train, batch_size=32, epochs=1, validation_split=0.1) #hide_input weights = model.get_weights() weights = [weights[0].flatten(),weights[2].flatten(),weights[4].flatten()] weights = np.concatenate(weights,axis=0) import seaborn as sns import matplotlib.pyplot as plt plt.figure(figsize=(8,4)) sns.distplot(weights,hist=True,norm_hist=True) plt.title("Weight distribution of a 3-layer CNN trained on MNIST") plt.xlabel("Weight value") plt.ylabel("Density") plt.show() ###Output _____no_output_____
assunto-7_geopandas/assunto-7_geopandas.ipynb
###Markdown https://geopandas.orghttps://geopandas.org/ Geopandas é um projeto open-source que torna mais fácil lidar com dados espaciais em Python.Sua funcionalidade deriva de outras bibliotecas:- PANDAS: Combinan e estendendo as funcionalidades utilizadas pela biblioteca pandas para objetos geométricos- SHAPELY: As operações geométricas são possibilitadas por essa biblioteca- FIONA: Possibilita o acesso aos arquivos- DESCARTES E MATPLOTLIB: São utilizados para a plotagem dos mapas. InstalaçãoA instalação com os seguintes comandos dá conta da biblioteca e de suas dependências. conda install geopandas para ambientes criados e geridos com condapip install geopandas para ambienter criados e geridos com pip Para a visualização dos plots, a biblioteca matplotlib e descartes também deve ser instaladasconda install matplotlib descartespip install matplotlib descartes Funcionalidade básicaNa mesma pasta deste notebook, há uma outra pasta chamada Data. Abriremos tal arquivo para apresentar a funcionalidade básica ###Code import geopandas as gpd ## Geopandas será usado prefixado por gpd caminho = "Data/DAMSELFISH_distributions.shp" ## String data = gpd.read_file(fp)## Função do gpd type(data) ###Output _____no_output_____ ###Markdown Um geodataframe é uma estrutura de dados tabulares com a adição de uma coluna chamada geometry.Podemos ler essa tabela chamando a variável data. ###Code data ## Variável onde colocamos o geodataframe ###Output _____no_output_____ ###Markdown Para olharmos apenas o topo da tabela, podemos chamar o método head do geodataframe. Por padrão, o argumento é 5, mas pode ser modificado. ###Code data.head() data.head(2) ###Output _____no_output_____ ###Markdown A geometria pode ser visualizada chamando o método plot do geodataframe ###Code data.plot() ###Output _____no_output_____ ###Markdown Estrutura de dados GeoseriesAs series podem ser visualizadas como colunas dessa tabela.As geosseries são um tipo específico de série. São uma coleção de geometrias, possibilitando atributos e métodos tais quais áreas, coordenadas máximas e minimas e validação de geometrias.A lista completa pode ser encontrada [aqui](https://geopandas.org/reference.html). Geopandas compartilha a sintaxe do pandas para selecionar colunas nesse geodataframe. Essa sintaxe é um pouco esquisita!Sua abstração é a seguinte dataframe['coluna'] ###Code geometria = data['geometry'] type(geometria) geometria.head() ## O método head também vale aqui # Podemos encadear o método # Não precisando guardar em uma variável data['BINOMIAL'].head() ###Output _____no_output_____ ###Markdown Um recorte de linhas da coluna pode ser feita da seguinte maneira:dataframe[linha_inicial:linha_final] ###Code data[0:5] ###Output _____no_output_____ ###Markdown O método .iterrows() permite passar de linha em linha desse dataframe, em um for loop ###Code for index, row in selection.iterrows(): poly_area = row['geometry'].area print("Polygon area at index {0} is: {1:.3f}".format(index, poly_area)) data['area'] = data.area data.head(2) ###Output _____no_output_____
notebooks/4.dpp_diversity_phrases.ipynb
###Markdown Dimensionality reduction ###Code ## Plot embeddings in low dim space %matplotlib inline from sklearn.manifold import TSNE import matplotlib.pyplot as plt all_cvs_numpy.shape[0] n_comps = all_cvs_numpy.shape[0] ## Convert vectors to TSNE tsne=TSNE(n_components=n_comps, perplexity=3, method ='exact', verbose=1) sentences_tsne = tsne.fit_transform(all_cvs_numpy).astype('float64') plt.figure(num=None, figsize=(16, 8), dpi=120, facecolor='w', edgecolor='k') plt.grid() plt.scatter(sentences_tsne[:,0], sentences_tsne[:,1]) for label, x, y in zip(CV,sentences_tsne[:,0], sentences_tsne[:,1]): font = {'size' : 12, 'weight' : 'normal'} plt.rc('font', **font) plt.annotate(label.split('.')[0], xy=(x, y), xytext=(0, 0), textcoords='offset points') import numpy as np from scipy.linalg import qr from dppy.finite_dpps import FiniteDPP from numpy import linalg as LA seed = 0 rng = np.random.RandomState(seed) eig_vecs, _ = qr(all_cvs_numpy) eigenvals_sentences = LA.eigvals(eig_vecs).astype('float64') DPP = FiniteDPP(kernel_type='correlation', **{'K': (eig_vecs * eigenvals_sentences).dot(eig_vecs.T)}) DPP.plot_kernel() rng = np.random.RandomState(seed) DPP.flush_samples() n_sets = 5 for _ in range(n_sets): DPP.sample_exact(mode='GS', random_state=rng,) for x, samp in enumerate(DPP.list_of_samples): print("SET {} ---".format(x)) print("--- ELEMENTS {}".format(samp)) for i,j in enumerate(samp): print(" - {}".format(CV[j])) ###Output SET 0 --- --- ELEMENTS [11 5 6] - Dog Walker. Pets, 5 years. pet shop, free time, dog, walk, breed, pet, Sizes. - Staff Accountant. Finance, 15 years. IRS, Tax, SEC Reporting, Revenue Recognition, Consolidation, market and money, financial audit, fiscal and sales. - Senior Accountant. Finance, 18 years. Tax, IRS, SEC Reporting, Consolidation, market, Investments, sales Credit. SET 1 --- --- ELEMENTS [ 6 7 10] - Senior Accountant. Finance, 18 years. Tax, IRS, SEC Reporting, Consolidation, market, Investments, sales Credit. - Controller Accountant. Finance, 22 years. Management, SEC Reporting, Balance, sales, market, Consolidation, Revenue, Investments, audit. - Doctor Cardiologist. Health, 22 years. Specialist cardiovascular system, patient health, treatment, disease, medical, clinical SET 2 --- --- ELEMENTS [10 4 5] - Doctor Cardiologist. Health, 22 years. Specialist cardiovascular system, patient health, treatment, disease, medical, clinical - Software Associate. Technology, 4 years. programming,Javascript, React, Angular, AWS, Python. - Staff Accountant. Finance, 15 years. IRS, Tax, SEC Reporting, Revenue Recognition, Consolidation, market and money, financial audit, fiscal and sales. SET 3 --- --- ELEMENTS [3 9 2] - Quality Assurance. Technology, 15 years. Test, Automation, Java, TDD, Python, Regression, Requirements. - Certified Nurse. Health, 16 years. nurse, Lead, Assess patient health, treatment care plans, hospital, disease, medical records, clinical support - Software Architect. Technology, 22 years. Oracle, programming, Java, NoSQL, Javascript,programming, Python, Go. SET 4 --- --- ELEMENTS [3 6 2] - Quality Assurance. Technology, 15 years. Test, Automation, Java, TDD, Python, Regression, Requirements. - Senior Accountant. Finance, 18 years. Tax, IRS, SEC Reporting, Consolidation, market, Investments, sales Credit. - Software Architect. Technology, 22 years. Oracle, programming, Java, NoSQL, Javascript,programming, Python, Go.
Improving Deep Neural Networks Hyperparameter Tuning, Regularization and Optimization/Tensorflow_introduction.ipynb
###Markdown Introduction to TensorFlowWelcome to this week's programming assignment! Up until now, you've always used Numpy to build neural networks, but this week you'll explore a deep learning framework that allows you to build neural networks more easily. Machine learning frameworks like TensorFlow, PaddlePaddle, Torch, Caffe, Keras, and many others can speed up your machine learning development significantly. TensorFlow 2.3 has made significant improvements over its predecessor, some of which you'll encounter and implement here!By the end of this assignment, you'll be able to do the following in TensorFlow 2.3:* Use `tf.Variable` to modify the state of a variable* Explain the difference between a variable and a constant* Train a Neural Network on a TensorFlow datasetProgramming frameworks like TensorFlow not only cut down on time spent coding, but can also perform optimizations that speed up the code itself. Table of Contents- [1- Packages](1) - [1.1 - Checking TensorFlow Version](1-1)- [2 - Basic Optimization with GradientTape](2) - [2.1 - Linear Function](2-1) - [Exercise 1 - linear_function](ex-1) - [2.2 - Computing the Sigmoid](2-2) - [Exercise 2 - sigmoid](ex-2) - [2.3 - Using One Hot Encodings](2-3) - [Exercise 3 - one_hot_matrix](ex-3) - [2.4 - Initialize the Parameters](2-4) - [Exercise 4 - initialize_parameters](ex-4)- [3 - Building Your First Neural Network in TensorFlow](3) - [3.1 - Implement Forward Propagation](3-1) - [Exercise 5 - forward_propagation](ex-5) - [3.2 Compute the Cost](3-2) - [Exercise 6 - compute_cost](ex-6) - [3.3 - Train the Model](3-3)- [4 - Bibliography](4) 1 - Packages ###Code import h5py import numpy as np import tensorflow as tf import matplotlib.pyplot as plt from tensorflow.python.framework.ops import EagerTensor from tensorflow.python.ops.resource_variable_ops import ResourceVariable import time ###Output _____no_output_____ ###Markdown 1.1 - Checking TensorFlow Version You will be using v2.3 for this assignment, for maximum speed and efficiency. ###Code tf.__version__ ###Output _____no_output_____ ###Markdown 2 - Basic Optimization with GradientTapeThe beauty of TensorFlow 2 is in its simplicity. Basically, all you need to do is implement forward propagation through a computational graph. TensorFlow will compute the derivatives for you, by moving backwards through the graph recorded with `GradientTape`. All that's left for you to do then is specify the cost function and optimizer you want to use! When writing a TensorFlow program, the main object to get used and transformed is the `tf.Tensor`. These tensors are the TensorFlow equivalent of Numpy arrays, i.e. multidimensional arrays of a given data type that also contain information about the computational graph.Below, you'll use `tf.Variable` to store the state of your variables. Variables can only be created once as its initial value defines the variable shape and type. Additionally, the `dtype` arg in `tf.Variable` can be set to allow data to be converted to that type. But if none is specified, either the datatype will be kept if the initial value is a Tensor, or `convert_to_tensor` will decide. It's generally best for you to specify directly, so nothing breaks! Here you'll call the TensorFlow dataset created on a HDF5 file, which you can use in place of a Numpy array to store your datasets. You can think of this as a TensorFlow data generator! You will use the Hand sign data set, that is composed of images with shape 64x64x3. ###Code train_dataset = h5py.File('datasets/train_signs.h5', "r") test_dataset = h5py.File('datasets/test_signs.h5', "r") x_train = tf.data.Dataset.from_tensor_slices(train_dataset['train_set_x']) y_train = tf.data.Dataset.from_tensor_slices(train_dataset['train_set_y']) x_test = tf.data.Dataset.from_tensor_slices(test_dataset['test_set_x']) y_test = tf.data.Dataset.from_tensor_slices(test_dataset['test_set_y']) type(x_train) ###Output _____no_output_____ ###Markdown Since TensorFlow Datasets are generators, you can't access directly the contents unless you iterate over them in a for loop, or by explicitly creating a Python iterator using `iter` and consuming itselements using `next`. Also, you can inspect the `shape` and `dtype` of each element using the `element_spec` attribute. ###Code print(x_train.element_spec) print(next(iter(x_train))) ###Output tf.Tensor( [[[227 220 214] [227 221 215] [227 222 215] ... [232 230 224] [231 229 222] [230 229 221]] [[227 221 214] [227 221 215] [228 221 215] ... [232 230 224] [231 229 222] [231 229 221]] [[227 221 214] [227 221 214] [227 221 215] ... [232 230 224] [231 229 223] [230 229 221]] ... [[119 81 51] [124 85 55] [127 87 58] ... [210 211 211] [211 212 210] [210 211 210]] [[119 79 51] [124 84 55] [126 85 56] ... [210 211 210] [210 211 210] [209 210 209]] [[119 81 51] [123 83 55] [122 82 54] ... [209 210 210] [209 210 209] [208 209 209]]], shape=(64, 64, 3), dtype=uint8) ###Markdown The dataset that you'll be using during this assignment is a subset of the sign language digits. It contains six different classes representing the digits from 0 to 5. ###Code unique_labels = set() for element in y_train: unique_labels.add(element.numpy()) print(unique_labels) ###Output {0, 1, 2, 3, 4, 5} ###Markdown You can see some of the images in the dataset by running the following cell. ###Code images_iter = iter(x_train) labels_iter = iter(y_train) plt.figure(figsize=(10, 10)) for i in range(25): ax = plt.subplot(5, 5, i + 1) plt.imshow(next(images_iter).numpy().astype("uint8")) plt.title(next(labels_iter).numpy().astype("uint8")) plt.axis("off") ###Output _____no_output_____ ###Markdown There's one more additional difference between TensorFlow datasets and Numpy arrays: If you need to transform one, you would invoke the `map` method to apply the function passed as an argument to each of the elements. ###Code def normalize(image): """ Transform an image into a tensor of shape (64 * 64 * 3, ) and normalize its components. Arguments image - Tensor. Returns: result -- Transformed tensor """ image = tf.cast(image, tf.float32) / 255.0 image = tf.reshape(image, [-1,]) return image new_train = x_train.map(normalize) new_test = x_test.map(normalize) new_train.element_spec print(next(iter(new_train))) ###Output tf.Tensor([0.8901961 0.8627451 0.8392157 ... 0.8156863 0.81960785 0.81960785], shape=(12288,), dtype=float32) ###Markdown 2.1 - Linear FunctionLet's begin this programming exercise by computing the following equation: $Y = WX + b$, where $W$ and $X$ are random matrices and b is a random vector. Exercise 1 - linear_functionCompute $WX + b$ where $W, X$, and $b$ are drawn from a random normal distribution. W is of shape (4, 3), X is (3,1) and b is (4,1). As an example, this is how to define a constant X with the shape (3,1):```pythonX = tf.constant(np.random.randn(3,1), name = "X")```Note that the difference between `tf.constant` and `tf.Variable` is that you can modify the state of a `tf.Variable` but cannot change the state of a `tf.constant`.You might find the following functions helpful: - tf.matmul(..., ...) to do a matrix multiplication- tf.add(..., ...) to do an addition- np.random.randn(...) to initialize randomly ###Code # GRADED FUNCTION: linear_function def linear_function(): """ Implements a linear function: Initializes X to be a random tensor of shape (3,1) Initializes W to be a random tensor of shape (4,3) Initializes b to be a random tensor of shape (4,1) Returns: result -- Y = WX + b """ np.random.seed(1) """ Note, to ensure that the "random" numbers generated match the expected results, please create the variables in the order given in the starting code below. (Do not re-arrange the order). """ # (approx. 4 lines) # X = ... # W = ... # b = ... # Y = ... # YOUR CODE STARTS HERE X = np.random.randn(3, 1) W = np.random.randn(4, 3) b = np.random.randn(4, 1) Y = tf.add(tf.matmul(W, X), b) # YOUR CODE ENDS HERE return Y result = linear_function() print(result) assert type(result) == EagerTensor, "Use the TensorFlow API" assert np.allclose(result, [[-2.15657382], [ 2.95891446], [-1.08926781], [-0.84538042]]), "Error" print("\033[92mAll test passed") ###Output tf.Tensor( [[-2.15657382] [ 2.95891446] [-1.08926781] [-0.84538042]], shape=(4, 1), dtype=float64) All test passed ###Markdown **Expected Output**: ```result = [[-2.15657382] [ 2.95891446] [-1.08926781] [-0.84538042]]``` 2.2 - Computing the Sigmoid Amazing! You just implemented a linear function. TensorFlow offers a variety of commonly used neural network functions like `tf.sigmoid` and `tf.softmax`.For this exercise, compute the sigmoid of z. In this exercise, you will: Cast your tensor to type `float32` using `tf.cast`, then compute the sigmoid using `tf.keras.activations.sigmoid`. Exercise 2 - sigmoidImplement the sigmoid function below. You should use the following: - `tf.cast("...", tf.float32)`- `tf.keras.activations.sigmoid("...")` ###Code # GRADED FUNCTION: sigmoid def sigmoid(z): """ Computes the sigmoid of z Arguments: z -- input value, scalar or vector Returns: a -- (tf.float32) the sigmoid of z """ # tf.keras.activations.sigmoid requires float16, float32, float64, complex64, or complex128. # (approx. 2 lines) # z = ... # a = ... # YOUR CODE STARTS HERE z = tf.cast(z, tf.float32) a = tf.keras.activations.sigmoid(z) # YOUR CODE ENDS HERE return a result = sigmoid(-1) print ("type: " + str(type(result))) print ("dtype: " + str(result.dtype)) print ("sigmoid(-1) = " + str(result)) print ("sigmoid(0) = " + str(sigmoid(0.0))) print ("sigmoid(12) = " + str(sigmoid(12))) def sigmoid_test(target): result = target(0) assert(type(result) == EagerTensor) assert (result.dtype == tf.float32) assert sigmoid(0) == 0.5, "Error" assert sigmoid(-1) == 0.26894143, "Error" assert sigmoid(12) == 0.9999939, "Error" print("\033[92mAll test passed") sigmoid_test(sigmoid) ###Output type: <class 'tensorflow.python.framework.ops.EagerTensor'> dtype: <dtype: 'float32'> sigmoid(-1) = tf.Tensor(0.26894143, shape=(), dtype=float32) sigmoid(0) = tf.Tensor(0.5, shape=(), dtype=float32) sigmoid(12) = tf.Tensor(0.9999939, shape=(), dtype=float32) All test passed ###Markdown **Expected Output**: typeclass 'tensorflow.python.framework.ops.EagerTensor' dtype"dtype: 'float32' Sigmoid(-1)0.2689414 Sigmoid(0)0.5 Sigmoid(12)0.999994 2.3 - Using One Hot EncodingsMany times in deep learning you will have a $Y$ vector with numbers ranging from $0$ to $C-1$, where $C$ is the number of classes. If $C$ is for example 4, then you might have the following y vector which you will need to convert like this:This is called "one hot" encoding, because in the converted representation, exactly one element of each column is "hot" (meaning set to 1). To do this conversion in numpy, you might have to write a few lines of code. In TensorFlow, you can use one line of code: - [tf.one_hot(labels, depth, axis=0)](https://www.tensorflow.org/api_docs/python/tf/one_hot)`axis=0` indicates the new axis is created at dimension 0 Exercise 3 - one_hot_matrixImplement the function below to take one label and the total number of classes $C$, and return the one hot encoding in a column wise matrix. Use `tf.one_hot()` to do this, and `tf.reshape()` to reshape your one hot tensor! - `tf.reshape(tensor, shape)` ###Code # GRADED FUNCTION: one_hot_matrix def one_hot_matrix(label, depth=6): """ Computes the one hot encoding for a single label Arguments: label -- (int) Categorical labels depth -- (int) Number of different classes that label can take Returns: one_hot -- tf.Tensor A single-column matrix with the one hot encoding. """ # (approx. 1 line) # one_hot = ... # YOUR CODE STARTS HERE one_hot = tf.reshape(tf.one_hot(label,depth,axis = 0), [depth, ]) # YOUR CODE ENDS HERE return one_hot def one_hot_matrix_test(target): label = tf.constant(1) depth = 4 result = target(label, depth) print("Test 1:",result) assert result.shape[0] == depth, "Use the parameter depth" assert np.allclose(result, [0., 1. ,0., 0.] ), "Wrong output. Use tf.one_hot" label_2 = [2] result = target(label_2, depth) print("Test 2:", result) assert result.shape[0] == depth, "Use the parameter depth" assert np.allclose(result, [0., 0. ,1., 0.] ), "Wrong output. Use tf.reshape as instructed" print("\033[92mAll test passed") one_hot_matrix_test(one_hot_matrix) ###Output Test 1: tf.Tensor([0. 1. 0. 0.], shape=(4,), dtype=float32) Test 2: tf.Tensor([0. 0. 1. 0.], shape=(4,), dtype=float32) All test passed ###Markdown **Expected output**```Test 1: tf.Tensor([0. 1. 0. 0.], shape=(4,), dtype=float32)Test 2: tf.Tensor([0. 0. 1. 0.], shape=(4,), dtype=float32)``` ###Code new_y_test = y_test.map(one_hot_matrix) new_y_train = y_train.map(one_hot_matrix) print(next(iter(new_y_test))) ###Output tf.Tensor([1. 0. 0. 0. 0. 0.], shape=(6,), dtype=float32) ###Markdown 2.4 - Initialize the Parameters Now you'll initialize a vector of numbers with the Glorot initializer. The function you'll be calling is `tf.keras.initializers.GlorotNormal`, which draws samples from a truncated normal distribution centered on 0, with `stddev = sqrt(2 / (fan_in + fan_out))`, where `fan_in` is the number of input units and `fan_out` is the number of output units, both in the weight tensor. To initialize with zeros or ones you could use `tf.zeros()` or `tf.ones()` instead. Exercise 4 - initialize_parametersImplement the function below to take in a shape and to return an array of numbers using the GlorotNormal initializer. - `tf.keras.initializers.GlorotNormal(seed=1)` - `tf.Variable(initializer(shape=())` ###Code # GRADED FUNCTION: initialize_parameters def initialize_parameters(): """ Initializes parameters to build a neural network with TensorFlow. The shapes are: W1 : [25, 12288] b1 : [25, 1] W2 : [12, 25] b2 : [12, 1] W3 : [6, 12] b3 : [6, 1] Returns: parameters -- a dictionary of tensors containing W1, b1, W2, b2, W3, b3 """ initializer = tf.keras.initializers.GlorotNormal(seed=1) #(approx. 6 lines of code) # W1 = ... # b1 = ... # W2 = ... # b2 = ... # W3 = ... # b3 = ... # YOUR CODE STARTS HERE init = tf.keras.initializers.GlorotNormal(seed = 1) W1 = tf.Variable(init(shape = (25, 12288))) b1 = tf.Variable(init(shape = (25, 1))) W2 = tf.Variable(init(shape = (12, 25))) b2 = tf.Variable(init(shape = (12, 1))) W3 = tf.Variable(init(shape = (6, 12))) b3 = tf.Variable(init(shape = (6, 1))) # YOUR CODE ENDS HERE parameters = {"W1": W1, "b1": b1, "W2": W2, "b2": b2, "W3": W3, "b3": b3} return parameters def initialize_parameters_test(target): parameters = target() values = {"W1": (25, 12288), "b1": (25, 1), "W2": (12, 25), "b2": (12, 1), "W3": (6, 12), "b3": (6, 1)} for key in parameters: print(f"{key} shape: {tuple(parameters[key].shape)}") assert type(parameters[key]) == ResourceVariable, "All parameter must be created using tf.Variable" assert tuple(parameters[key].shape) == values[key], f"{key}: wrong shape" assert np.abs(np.mean(parameters[key].numpy())) < 0.5, f"{key}: Use the GlorotNormal initializer" assert np.std(parameters[key].numpy()) > 0 and np.std(parameters[key].numpy()) < 1, f"{key}: Use the GlorotNormal initializer" print("\033[92mAll test passed") initialize_parameters_test(initialize_parameters) ###Output W1 shape: (25, 12288) b1 shape: (25, 1) W2 shape: (12, 25) b2 shape: (12, 1) W3 shape: (6, 12) b3 shape: (6, 1) All test passed ###Markdown **Expected output**```W1 shape: (25, 12288)b1 shape: (25, 1)W2 shape: (12, 25)b2 shape: (12, 1)W3 shape: (6, 12)b3 shape: (6, 1)``` ###Code parameters = initialize_parameters() ###Output _____no_output_____ ###Markdown 3 - Building Your First Neural Network in TensorFlowIn this part of the assignment you will build a neural network using TensorFlow. Remember that there are two parts to implementing a TensorFlow model:- Implement forward propagation- Retrieve the gradients and train the modelLet's get into it! 3.1 - Implement Forward Propagation One of TensorFlow's great strengths lies in the fact that you only need to implement the forward propagation function and it will keep track of the operations you did to calculate the back propagation automatically. Exercise 5 - forward_propagationImplement the `forward_propagation` function.**Note** Use only the TF API. - tf.math.add- tf.linalg.matmul- tf.keras.activations.relu ###Code # GRADED FUNCTION: forward_propagation @tf.function def forward_propagation(X, parameters): """ Implements the forward propagation for the model: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR Arguments: X -- input dataset placeholder, of shape (input size, number of examples) parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3" the shapes are given in initialize_parameters Returns: Z3 -- the output of the last LINEAR unit """ # Retrieve the parameters from the dictionary "parameters" W1 = parameters['W1'] b1 = parameters['b1'] W2 = parameters['W2'] b2 = parameters['b2'] W3 = parameters['W3'] b3 = parameters['b3'] #(approx. 5 lines) # Numpy Equivalents: # Z1 = ... # Z1 = np.dot(W1, X) + b1 # A1 = ... # A1 = relu(Z1) # Z2 = ... # Z2 = np.dot(W2, A1) + b2 # A2 = ... # A2 = relu(Z2) # Z3 = ... # Z3 = np.dot(W3, A2) + b3 # YOUR CODE STARTS HERE Z1 = tf.matmul(W1, X) + b1 A1 = tf.keras.activations.relu(Z1) Z2 = tf.matmul(W2, A1) + b2 A2 = tf.keras.activations.relu(Z2) Z3 = tf.matmul(W3, A2) + b3 # YOUR CODE ENDS HERE return Z3 def forward_propagation_test(target, examples): minibatches = examples.batch(2) for minibatch in minibatches: forward_pass = target(tf.transpose(minibatch), parameters) print(forward_pass) assert type(forward_pass) == EagerTensor, "Your output is not a tensor" assert forward_pass.shape == (6, 2), "Last layer must use W3 and b3" assert np.allclose(forward_pass, [[-0.13430887, 0.14086473], [ 0.21588647, -0.02582335], [ 0.7059658, 0.6484556 ], [-1.1260961, -0.9329492 ], [-0.20181894, -0.3382722 ], [ 0.9558965, 0.94167566]]), "Output does not match" break print("\033[92mAll test passed") forward_propagation_test(forward_propagation, new_train) ###Output tf.Tensor( [[-0.13430887 0.14086473] [ 0.21588647 -0.02582335] [ 0.7059658 0.6484556 ] [-1.1260961 -0.9329492 ] [-0.20181894 -0.3382722 ] [ 0.9558965 0.94167566]], shape=(6, 2), dtype=float32) All test passed ###Markdown **Expected output**```tf.Tensor([[-0.13430887 0.14086473] [ 0.21588647 -0.02582335] [ 0.7059658 0.6484556 ] [-1.1260961 -0.9329492 ] [-0.20181894 -0.3382722 ] [ 0.9558965 0.94167566]], shape=(6, 2), dtype=float32)``` 3.2 Compute the CostAll you have to do now is define the loss function that you're going to use. For this case, since we have a classification problem with 6 labels, a categorical cross entropy will work! Exercise 6 - compute_costImplement the cost function below. - It's important to note that the "`y_pred`" and "`y_true`" inputs of [tf.keras.losses.categorical_crossentropy](https://www.tensorflow.org/api_docs/python/tf/keras/losses/categorical_crossentropy) are expected to be of shape (number of examples, num_classes). - `tf.reduce_mean` basically does the summation over the examples. ###Code # GRADED FUNCTION: compute_cost def compute_cost(logits, labels): """ Computes the cost Arguments: logits -- output of forward propagation (output of the last LINEAR unit), of shape (6, num_examples) labels -- "true" labels vector, same shape as Z3 Returns: cost - Tensor of the cost function """ #(1 line of code) # cost = ... # YOUR CODE STARTS HERE logits = tf.transpose(logits) labels = tf.reshape(tf.transpose(labels),[logits.shape[0],6]) cost = tf.reduce_mean(tf.keras.losses.categorical_crossentropy(labels, logits,from_logits=True)) # YOUR CODE ENDS HERE return cost def compute_cost_test(target, Y): pred = tf.constant([[ 2.4048107, 5.0334096 ], [-0.7921977, -4.1523376 ], [ 0.9447198, -0.46802214], [ 1.158121, 3.9810789 ], [ 4.768706, 2.3220146 ], [ 6.1481323, 3.909829 ]]) minibatches = Y.batch(2) for minibatch in minibatches: result = target(pred, tf.transpose(minibatch)) break print(result) assert(type(result) == EagerTensor), "Use the TensorFlow API" assert (np.abs(result - (0.25361037 + 0.5566767) / 2.0) < 1e-7), "Test does not match. Did you get the mean of your cost functions?" print("\033[92mAll test passed") compute_cost_test(compute_cost, new_y_train ) ###Output tf.Tensor(0.4051435, shape=(), dtype=float32) All test passed ###Markdown **Expected output**```tf.Tensor(0.4051435, shape=(), dtype=float32)``` 3.3 - Train the ModelLet's talk optimizers. You'll specify the type of optimizer in one line, in this case `tf.keras.optimizers.Adam` (though you can use others such as SGD), and then call it within the training loop. Notice the `tape.gradient` function: this allows you to retrieve the operations recorded for automatic differentiation inside the `GradientTape` block. Then, calling the optimizer method `apply_gradients`, will apply the optimizer's update rules to each trainable parameter. At the end of this assignment, you'll find some documentation that explains this more in detail, but for now, a simple explanation will do. ;) Here you should take note of an important extra step that's been added to the batch training process: - `tf.Data.dataset = dataset.prefetch(8)` What this does is prevent a memory bottleneck that can occur when reading from disk. `prefetch()` sets aside some data and keeps it ready for when it's needed. It does this by creating a source dataset from your input data, applying a transformation to preprocess the data, then iterating over the dataset the specified number of elements at a time. This works because the iteration is streaming, so the data doesn't need to fit into the memory. ###Code def model(X_train, Y_train, X_test, Y_test, learning_rate = 0.0001, num_epochs = 1500, minibatch_size = 32, print_cost = True): """ Implements a three-layer tensorflow neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SOFTMAX. Arguments: X_train -- training set, of shape (input size = 12288, number of training examples = 1080) Y_train -- test set, of shape (output size = 6, number of training examples = 1080) X_test -- training set, of shape (input size = 12288, number of training examples = 120) Y_test -- test set, of shape (output size = 6, number of test examples = 120) learning_rate -- learning rate of the optimization num_epochs -- number of epochs of the optimization loop minibatch_size -- size of a minibatch print_cost -- True to print the cost every 10 epochs Returns: parameters -- parameters learnt by the model. They can then be used to predict. """ costs = [] # To keep track of the cost train_acc = [] test_acc = [] # Initialize your parameters #(1 line) parameters = initialize_parameters() W1 = parameters['W1'] b1 = parameters['b1'] W2 = parameters['W2'] b2 = parameters['b2'] W3 = parameters['W3'] b3 = parameters['b3'] optimizer = tf.keras.optimizers.Adam(learning_rate) # The CategoricalAccuracy will track the accuracy for this multiclass problem test_accuracy = tf.keras.metrics.CategoricalAccuracy() train_accuracy = tf.keras.metrics.CategoricalAccuracy() dataset = tf.data.Dataset.zip((X_train, Y_train)) test_dataset = tf.data.Dataset.zip((X_test, Y_test)) # We can get the number of elements of a dataset using the cardinality method m = dataset.cardinality().numpy() minibatches = dataset.batch(minibatch_size).prefetch(8) test_minibatches = test_dataset.batch(minibatch_size).prefetch(8) #X_train = X_train.batch(minibatch_size, drop_remainder=True).prefetch(8)# <<< extra step #Y_train = Y_train.batch(minibatch_size, drop_remainder=True).prefetch(8) # loads memory faster # Do the training loop for epoch in range(num_epochs): epoch_cost = 0. #We need to reset object to start measuring from 0 the accuracy each epoch train_accuracy.reset_states() for (minibatch_X, minibatch_Y) in minibatches: with tf.GradientTape() as tape: # 1. predict Z3 = forward_propagation(tf.transpose(minibatch_X), parameters) # 2. loss minibatch_cost = compute_cost(Z3, tf.transpose(minibatch_Y)) # We acumulate the accuracy of all the batches train_accuracy.update_state(tf.transpose(Z3), minibatch_Y) trainable_variables = [W1, b1, W2, b2, W3, b3] grads = tape.gradient(minibatch_cost, trainable_variables) optimizer.apply_gradients(zip(grads, trainable_variables)) epoch_cost += minibatch_cost # We divide the epoch cost over the number of samples epoch_cost /= m # Print the cost every 10 epochs if print_cost == True and epoch % 10 == 0: print ("Cost after epoch %i: %f" % (epoch, epoch_cost)) print("Train accuracy:", train_accuracy.result()) # We evaluate the test set every 10 epochs to avoid computational overhead for (minibatch_X, minibatch_Y) in test_minibatches: Z3 = forward_propagation(tf.transpose(minibatch_X), parameters) test_accuracy.update_state(tf.transpose(Z3), minibatch_Y) print("Test_accuracy:", test_accuracy.result()) costs.append(epoch_cost) train_acc.append(train_accuracy.result()) test_acc.append(test_accuracy.result()) test_accuracy.reset_states() return parameters, costs, train_acc, test_acc parameters, costs, train_acc, test_acc = model(new_train, new_y_train, new_test, new_y_test, num_epochs=100) ###Output Cost after epoch 0: 0.057612 Train accuracy: tf.Tensor(0.17314816, shape=(), dtype=float32) Test_accuracy: tf.Tensor(0.24166666, shape=(), dtype=float32) Cost after epoch 10: 0.049332 Train accuracy: tf.Tensor(0.35833332, shape=(), dtype=float32) Test_accuracy: tf.Tensor(0.3, shape=(), dtype=float32) Cost after epoch 20: 0.043173 Train accuracy: tf.Tensor(0.49907407, shape=(), dtype=float32) Test_accuracy: tf.Tensor(0.43333334, shape=(), dtype=float32) Cost after epoch 30: 0.037322 Train accuracy: tf.Tensor(0.60462964, shape=(), dtype=float32) Test_accuracy: tf.Tensor(0.525, shape=(), dtype=float32) Cost after epoch 40: 0.033147 Train accuracy: tf.Tensor(0.6490741, shape=(), dtype=float32) Test_accuracy: tf.Tensor(0.5416667, shape=(), dtype=float32) Cost after epoch 50: 0.030203 Train accuracy: tf.Tensor(0.68333334, shape=(), dtype=float32) Test_accuracy: tf.Tensor(0.625, shape=(), dtype=float32) Cost after epoch 60: 0.028050 Train accuracy: tf.Tensor(0.6935185, shape=(), dtype=float32) Test_accuracy: tf.Tensor(0.625, shape=(), dtype=float32) Cost after epoch 70: 0.026298 Train accuracy: tf.Tensor(0.72407407, shape=(), dtype=float32) Test_accuracy: tf.Tensor(0.64166665, shape=(), dtype=float32) Cost after epoch 80: 0.024799 Train accuracy: tf.Tensor(0.7425926, shape=(), dtype=float32) Test_accuracy: tf.Tensor(0.68333334, shape=(), dtype=float32) Cost after epoch 90: 0.023551 Train accuracy: tf.Tensor(0.75277776, shape=(), dtype=float32) Test_accuracy: tf.Tensor(0.68333334, shape=(), dtype=float32) ###Markdown **Expected output**```Cost after epoch 0: 0.057612Train accuracy: tf.Tensor(0.17314816, shape=(), dtype=float32)Test_accuracy: tf.Tensor(0.24166666, shape=(), dtype=float32)Cost after epoch 10: 0.049332Train accuracy: tf.Tensor(0.35833332, shape=(), dtype=float32)Test_accuracy: tf.Tensor(0.3, shape=(), dtype=float32)...```Numbers you get can be different, just check that your loss is going down and your accuracy going up! ###Code # Plot the cost plt.plot(np.squeeze(costs)) plt.ylabel('cost') plt.xlabel('iterations (per fives)') plt.title("Learning rate =" + str(0.0001)) plt.show() # Plot the train accuracy plt.plot(np.squeeze(train_acc)) plt.ylabel('Train Accuracy') plt.xlabel('iterations (per fives)') plt.title("Learning rate =" + str(0.0001)) # Plot the test accuracy plt.plot(np.squeeze(test_acc)) plt.ylabel('Test Accuracy') plt.xlabel('iterations (per fives)') plt.title("Learning rate =" + str(0.0001)) plt.show() ###Output _____no_output_____
Deep_learning.ipynb
###Markdown ###Code import numpy as np from scipy import stats import matplotlib.pyplot as plt import matplotlib as mpl import seaborn as sns sns.set() pi = np.pi def probA_1(x, y): # only accepts points betwen -5 and +5 in both axes assert all(np.abs(x)<5) and all(np.abs(y)<5) assert x.shape==y.shape pA = stats.norm(0, 1).pdf(x)*stats.norm(0, 1).pdf(y) pB = np.ones_like(pA)/200. return pA/(pA+pB) def probA_2(x, y): # only accepts points betwen -5 and +5 in both axes assert all(np.abs(x)<5) and all(np.abs(y)<5) assert x.shape==y.shape pA = (stats.norm(4, 0.5).pdf(x)*stats.norm(0, 1).pdf(y) + stats.norm(3, 0.5).pdf(x)*stats.norm(-3, .5).pdf(y) + stats.norm(-5, 0.5).pdf(x)*stats.norm(-3, 1.2).pdf(y) + stats.norm(1, 0.5).pdf(x)*stats.norm(1, 0.5).pdf(y)) pB = np.ones_like(pA)/200. return pA/(pA+pB) def probA_3(x, y): # only accepts points betwen -5 and +5 in both axes assert all(np.abs(x)<5) and all(np.abs(y)<5) assert x.shape==y.shape pA = (stats.norm(4, 0.5).pdf(x)*stats.arcsine(scale=5).pdf(y) + stats.hypsecant(1, 0.4).pdf(x)*stats.norm(-3, 0.2).pdf(y) + stats.norm(-5, 0.5).pdf(x)*stats.norm(-3, 0.2).pdf(y) + stats.norm(1, 0.5).pdf(x)*stats.hypsecant(1, 0.5).pdf(y)) pB = np.ones_like(pA)/200. return pA/(pA+pB) def plot_points(X, labels, title=None, ax=None, polar=False): if not ax: if polar: fig = plt.figure(figsize=(6,6)) ax = fig.add_subplot(111, projection='polar') else: fig, ax = plt.subplots(figsize=(6,6)) mask = labels=='A' A, = ax.plot(X[mask, 0], X[mask, 1], 'rx', markersize=7, label='A') B, = ax.plot(X[~mask, 0], X[~mask, 1], 'bo', markersize=3, label='B') fontdict={ 'fontsize': 15 } if polar: try: fig.set_size_inches(6*np.sqrt(2), 6*np.sqrt(2)) except: pass else: ax.set_xlabel('$X_0$', fontdict=fontdict) ax.set_ylabel('$X_1$', fontdict=fontdict) ax.set_xticks(np.arange(-5, 6)) ax.set_yticks(np.arange(-5, 6)) ax.set_xlim((-5, 5)) ax.set_ylim((-5, 5)) ax.legend(handles=(A, B), labels=('A', 'B'), loc='upper right', fancybox=True, framealpha=1, **fontdict) if title: ax.set_title(title, fontdict=fontdict) return ax ###Output _____no_output_____ ###Markdown ---**Representations** are often necessary in order for a linear classification model to be able to separate a set of classes.These representations are made by applying [nonlinear transformations](https://www.youtube.com/watch?v=kYB8IZa5AuE) to the data.Take, for instance, the following dataset:--- ###Code # Create a simple binary classification problem requiring nonlinear separation N = 400 X = (np.random.random((N, 2))-0.5)*10 Y = probA_1(X[:, 0], X[:, 1]) labels = np.asarray(['A' if m else 'B' for m in np.random.random(Y.shape)<Y]) plot_points(X, labels, title='True labels'); ###Output _____no_output_____ ###Markdown ---Evidently, classes A and B are not [linearly separable](https://en.wikipedia.org/wiki/Linear_separability), and as such a linear classifier such as a linear [support vector machine](https://www.svm-tutorial.com/2017/02/svms-overview-support-vector-machines/) would be unable to succeed in a classification.--- ###Code from sklearn import svm # Here, we uset he sklearn implementation of the SVM classifier with a linear kernel # build and train the model SVM_classifier = svm.SVC(kernel='linear') SVM_classifier.fit(X, labels) predictions = SVM_classifier.predict(X) plot_points(X, predictions, title='Predictions') # add R squared to evaluate prediction quality R2 = 1 - np.std(labels!=predictions)/np.std(labels=='A') plt.gcf().text(0.95, 0.5, '$R^2$: '+str(R2), fontdict={'fontsize':20}); ###Output _____no_output_____ ###Markdown ---To classify these points using a linear model, we must create a an **intermediate representation** of the data such that a [hyperplane](https://towardsdatascience.com/support-vector-machine-introduction-to-machine-learning-algorithms-934a444fca47) can separate classes A and B.In this case, a [polar projection](https://en.wikipedia.org/wiki/Polar_coordinate_system) appears to be an appropriate representation. Let's try it out:$R = \sqrt{X^2+Y^2}$$\theta = arctan(\frac{Y}{X})$--- ###Code # Transform our coordinates from cartesian to polar R = np.sqrt(X[:,0]**2 + X[:,1]**2) ratio = X[:,1]/X[:,0] THETA = np.arctan(ratio)+np.pi*(np.sign(X[:,0])<0) X_radial = np.stack((THETA, R), axis=1) plot_points(X_radial, labels, title='Polar projection of true labels', polar=True); ###Output _____no_output_____ ###Markdown ---This projection allows us to clearly see that A and B are at least somewhat separable in the **radial** direction.--- ###Code # Let's try the linear SVM classifier once more, on the polar projectionl # build and train the model SVM_classifier = svm.SVC(kernel='linear') SVM_classifier.fit(X_radial, labels) predictions = SVM_classifier.predict(X_radial) ax = plot_points(np.stack((THETA, R), axis=1), predictions, title='Polar projection predictions', polar=True) # add R squared to evaluate prediction quality R2 = 1 - np.std(labels!=predictions)/np.std(labels=='A') plt.gcf().text(0.95, 0.5, '$R^2$: '+str(round(R2,3)), fontdict={'fontsize':20}); ###Output _____no_output_____ ###Markdown ---Clearly, our predictions have vastly improved! Often it is difficult or even impossible to achieve perfect classification due to inherent [stochasticity](https://www.merriam-webster.com/dictionary/stochastic) of the data. This is one of those cases!The **polar representation** has allowed our model to separate the two classes along the radial axis:--- ###Code ax = plot_points(np.stack((THETA, R), axis=1), labels, title='Polar projection deicision boundary, true labels', polar=True) weights = SVM_classifier.coef_[0] intercept = SVM_classifier.intercept_[0] # plot decision boundary x0_mesh, x1_mesh = np.meshgrid(np.linspace(-pi/2, 3*pi/2, 200), np.linspace(0, 5*np.sqrt(2), 200)) pred_mesh = SVM_classifier.predict(np.stack((x0_mesh.flatten(), x1_mesh.flatten()), axis=1)).reshape(x0_mesh.shape) pred_mesh_bool = pred_mesh=='A' c1 = plt.contour(x0_mesh, x1_mesh, pred_mesh_bool, levels=[0.5], alpha=0.5, linestyles='--' ) # manually add decision boundary to legend plt.plot([], [], 'k--', alpha=0.5, label='Decision \nboundary') ax.legend(framealpha=1); ###Output _____no_output_____ ###Markdown ---We can see that the decision boundary is a line nearly perpendicular to the $R$ axis. This is easier to see by plotting our polar coordinates in a cartesian graph:--- ###Code fig = plt.figure(figsize=(14,7)) ax = fig.add_subplot(121) plot_points(np.stack((THETA, R), axis=1), labels, title='Polar coordinates, cartesian projection', ax=ax) ax.set_xlim(-pi/2, 3*pi/2) ax.set_ylim(0, 5*2**0.5) # plot decision boundary c1 = ax.contour(x0_mesh, x1_mesh, pred_mesh_bool, levels=[0.5], alpha=0.5, linestyles='--' ) ax.set_xlabel('Theta') ax.set_ylabel('R') # manually add decision boundary to legend ax.plot([], [], 'k--', alpha=0.5, label='Decision \nboundary') ax.legend(framealpha=1); ax2 = fig.add_subplot(122) plot_points(X, labels, title='Original data', ax=ax2); ###Output _____no_output_____ ###Markdown ---For this problem, a hand-made polar representation conveniently made our classes linearly separable, but suppose you have a more complex distribution...How easy would it be to design an alternative representation for the following dataset?--- ###Code N = 400 X_2 = (np.random.random((N, 2))-0.5)*10 Y_2 = probA_2(X_2[:, 0], X_2[:, 1]) labels_2 = np.asarray(['A' if m else 'B' for m in np.random.random(Y_2.shape)<Y_2]) plot_points(X_2, labels_2, title='True labels'); ###Output _____no_output_____ ###Markdown ---Not easy at all!For this we turn once again to machine learning. What if our model could also **learn the intermediate representations** necessary for classification? This idea lies in the heart of deep learning.While hand-designed representations are limited by the creativity and knowledge of the human who programs them, a machine is able to learn arbitrarily complex representations, as long as we give it the **capacity** to learn them.We call these machine-learned representations **hidden representations**. This is because, since only the machine needs to process them, these representations do not need to be human-interpretable, and typically look like nonsense even to an expert.We can take this idea one step further--if the machine will learn them anyways, why limit ourselves to only a single hidden representation?This is what gives "depth" to deep learning algorithms. We allow our model to learn many hidden representations, one after another, gradually transforming the data. If this is properly done, by the final transformation our output should be **linearly separable**.Let's try this out!--- ###Code from sklearn import neural_network # build neural net classifier layers = (10, 10, 10,) NN_classifier = neural_network.MLPClassifier(hidden_layer_sizes = layers, alpha = 0.1, learning_rate = 'invscaling', learning_rate_init = 0.01, max_iter = 2000) NN_classifier.fit(X_2, labels_2) predictions = NN_classifier.predict(X_2) plot_points(X_2, labels_2, title='True labels'); # plot decision boundary x0_mesh, x1_mesh = np.meshgrid(np.linspace(-5, 5, 200), np.linspace(-5, 5, 200)) pred_mesh = NN_classifier.predict(np.stack((x0_mesh.flatten(), x1_mesh.flatten()), axis=1)).reshape(x0_mesh.shape) pred_mesh_bool = pred_mesh=='A' c1 = plt.contour(x0_mesh, x1_mesh, pred_mesh_bool, levels=[0.5], alpha=0.5, linestyles='--' ) # add R squared to evaluate prediction quality R2 = 1 - np.std(labels_2!=predictions)/np.std(labels_2=='A') plt.gcf().text(0.95, 0.5, '$R^2$: '+str(round(R2,3)), fontdict={'fontsize':20}); # manually add decision boundary to legend plt.plot([], [], 'k--', alpha=0.5, label='Decision \nboundary') ax.legend(framealpha=1); ###Output _____no_output_____ ###Markdown ---Pretty easy right?Deep learning relies exclusively on a class of models called **[neural networks](https://www.youtube.com/watch?v=aircAruvnKk)** (I **strongly** recommend watching this series of videos).These biologically-inspired models are made up of a network of **[artifical neurons](https://en.wikipedia.org/wiki/Artificial_neuron)**.Each neuron applies a linear transformations to its inputs, followed by a simple nonlinear function (the **activation function**), such as the [softmax function](https://medium.com/data-science-bootcamp/understand-the-softmax-function-in-minutes-f3a59641e86d).While a single neuron is very simple and easy to model by hand, by stacking many layers of these neurons and chaining them together, very complex behavior can [emerge](https://theconversation.com/emergence-the-remarkable-simplicity-of-complexity-30973).In the image below, each circle represents a single neuron, and each column of neurons is a layer in the neural network. The lines are connections between neurons, from the output of one to the input of the otherImage source: https://www.kdnuggets.com/2019/11/designing-neural-networks.html--- ---The more layers a model has, the more complex the function its transformations can represent, and as such the more complex the problems it is able to solve.Three layers may have been enough for the above problem, but deep machine vision networks may have hundreds of much larger layers, with millions and millions of parameters.Deep neural networks have proven able to learn very complex problems, as long as they consist of mapping an input vector to an output vector, and can quickly be done by a human.Other tasks that cannot be described as a vector mapping, or would take a human time to think and reflect upon, often prove beyond the scope of deep learning as of now.Let's test this for ourselves:--- ###Code N = 400 X_3 = (np.random.random((N, 2))-0.5)*10 Y_3 = probA_3(X_3[:, 0], X_3[:, 1]) labels_3 = np.asarray(['A' if m else 'B' for m in np.random.random(Y_3.shape)<Y_3]) plot_points(X_3, labels_3, title='True labels'); neural_network.MLPClassifier? # build neural net classifier layers = (40, 40, 40) NN_classifier = neural_network.MLPClassifier(hidden_layer_sizes = layers, alpha = 0.01, activation = 'relu', learning_rate = 'invscaling', learning_rate_init = 0.01, max_iter = 2000) NN_classifier.fit(X_3, labels_3) predictions = NN_classifier.predict(X_3) plot_points(X_3, labels_3, title='True labels'); # plot decision boundary x0_mesh, x1_mesh = np.meshgrid(np.linspace(-5, 5, 200), np.linspace(-5, 5, 200)) pred_mesh = NN_classifier.predict(np.stack((x0_mesh.flatten(), x1_mesh.flatten()), axis=1)).reshape(x0_mesh.shape) pred_mesh_bool = pred_mesh=='A' c1 = plt.contour(x0_mesh, x1_mesh, pred_mesh_bool, levels=[0.5], alpha=0.5, linestyles='--' ) # add R squared to evaluate prediction quality R2 = 1 - np.std(labels_3!=predictions)/np.std(labels_3=='A') plt.gcf().text(0.95, 0.5, '$R^2$: '+str(round(R2,3)), fontdict={'fontsize':20}); # manually add decision boundary to legend plt.plot([], [], 'k--', alpha=0.5, label='Decision \nboundary') ax.legend(framealpha=1); ###Output _____no_output_____ ###Markdown ---Although neural networks were invented in the 1940s undet he name of cybernetics, its current popularity is relatively recent, traced back to Geoffrey Hinton's landmark work from 2006.This can be attributed to two factors:- Increasing dataset sizes - Driven by the digitization of society, larger datasets give us the resources necessary to train larger and more complex models.- Increasing computational power - [More computational power](http://www.mooreslaw.org/) has allowed us to make larger and larger models. Many studies have shown that model size is the biggest contributor to its capacity, which allows us to use them for more and more complex tasks.--- ---Nearly all machine learning involves an optimization process of some sort. This is the process of **learning** the data, where the model parameters change to better model the problem we would like to solve.This is an optimization problem of the sort:- Minimize **MODEL LOSS**- Subject to **DATA**Where the variables we optimize are the parameters of the model.Some models are more complex than others. Let's compare them.--- ###Code # The parameters of the SVM classifiers are its support vectors and coefficients n_SVM = 0 # n_SVM += np.multiply.reduce(SVM_classifier.support_vectors_.shape) n_SVM += np.multiply.reduce(SVM_classifier.coef_.shape) n_SVM += np.multiply.reduce(SVM_classifier.intercept_.shape) print('The SVM classifier has {} parameters.'.format(n_SVM)) n_NN = 0 for layer in NN_classifier.coefs_: n_NN += np.multiply.reduce(layer.shape) for layer in NN_classifier.intercepts_: n_NN += np.multiply.reduce(layer.shape) print('The NN classifier has {} parameters.'.format(n_NN)) ###Output The SVM classifier has 3 parameters. The NN classifier has 2081 parameters.
notebooks/idea_DBNSL-smaller.ipynb
###Markdown 1st variant Dynamic Bayesian Network Structure Learning with hybrid network = (classic) encoder + (quantum) circuit.This is a toy example with dummy generated training data of 7 variables, that means 14 vertices (7 for t and 7 for t+1). input=(1,14)n_qubits=2 * ceil(log2(2 * nnodes)) -> for nnodes = 7, n_qubits = 8 where first four digits correspond to vertice where the edge begins and four last digits correspond to vertice where edge ends i.e. 00100101 corresponds to: vertice_2 ---> vertice_5 It was tested with two sets of test data at the end of the notebook. The last test is with real data ###Code import pandas as pd from torch.utils.data import Dataset import torch import torchvision from torch import nn import numpy as np import pennylane as qml import random import networkx as nx from matplotlib import pyplot as plt path = "smaller_elu.csv" ds = pd.read_csv(path) ds ds['t0'] = ds.apply(lambda x: x.t1 if pd.isna(x.t0) else x.t0, axis = 1) ds['t1'] = ds.apply(lambda x: x.t0 if pd.isna(x.t1) else x.t1, axis = 1) ds[ds['t0'].isnull()] ds[ds['t1'].isnull()] nnodes = 7 ds = ds.sort_values(by=['T', 'NAME']) t01_list = [ds[['t0','t1']].iloc[f*nnodes:(f+1)*nnodes].values.T for f in range(len(ds)//nnodes)] dst = pd.DataFrame({'T':range(len(ds)//nnodes), 't01':t01_list}) dst nodes_names = {f:ds[['NAME']].iloc[0:nnodes].values[f][0] for f in range(nnodes)} nodes_genes = {f:ds[['GENE']].iloc[0:nnodes].values[f][0] for f in range(nnodes)} scale = np.frompyfunc(lambda x, min, max: (x-min)/(max - min), 3, 1) def get_edges(n=4): num_edges = random.randint(n, n+3) e1 = [(random.randint(0, n-1),random.randint(0, (n*2)-1)) for f in range(num_edges//2)] e2 = [(random.randint(0, (n*2)-1),random.randint(n, (n*2)-1)) for f in range(num_edges//2)] return e1 + e2 def get_t0(edges, weights, n=4): t0 = np.zeros(n) + 0.01 edges0 = [edge for i in range(n) for edge in edges if edge[0] == i and edge[1] < n] if len(edges0) > 0: t0[edges0[0][0]] = random.random() for edge in edges0: t0[edge[1]] += weights[edge[0]] + weights[edge[1]] * t0[edge[0]] return t0 def get_t1(edges, weights, t0, n=4): t1 = np.zeros(n) + 0.01 edges1 = [edge for edge in edges if edge[1] >= n] for edge in edges1: if edge[0] < n: t1[edge[1]-n] += weights[edge[0]] + weights[edge[1]-n] * t0[edge[0]] else: t1[edge[1]-n] += weights[edge[0]-n] + weights[edge[1]-n] * t1[edge[0]-n] return t1 # generate training dataset exper = 1000 n_qubits = 8 arr_list = [] edges_list = [] for f in range(exper): weights = [random.randint(1, 10)/10 for f in range(nnodes)] edges = get_edges(n = nnodes) t0 = get_t0(edges, weights, n = nnodes) t1 = get_t1(edges, weights, t0, n = nnodes) arr_list.append(scale(np.stack([t0,t1]),np.min(np.stack([t0,t1])), np.max(np.stack([t0,t1]))).astype(float)) edges_list.append(edges) arr = np.concatenate(arr_list, axis=1) dsa = pd.DataFrame({'t01':arr_list}) dsa #int("110100010",2) = 418 edges_bin_list = [[np.binary_repr(ed[0], width=n_qubits//2) + np.binary_repr(ed[1], width=n_qubits//2) for ed in edges] for edges in edges_list] ya_list = [[int(edge,2) for edge in edges] for edges in edges_bin_list] dsa['y'] = ya_list dsa dev = qml.device("default.qubit", wires=n_qubits) @qml.qnode(dev) def qnode(inputs, weights): qml.AngleEmbedding(inputs, wires=range(n_qubits)) qml.BasicEntanglerLayers(weights[0], wires=range(n_qubits), rotation=qml.RX) qml.BasicEntanglerLayers(weights[1], wires=range(n_qubits), rotation=qml.RY) qml.BasicEntanglerLayers(weights[2], wires=range(n_qubits), rotation=qml.RZ) return qml.probs(wires=range(n_qubits)) n_layers = 2 weight_shapes = {"weights": (3, n_layers, n_qubits)} qlayer = qml.qnn.TorchLayer(qnode, weight_shapes) input_size = nnodes * 2 hidden_size = input_size - 2 code_size = n_qubits encoder_hidden_layer = nn.Linear( in_features=input_size, out_features=hidden_size ) encoder_output_layer = nn.Linear( in_features=hidden_size, out_features=code_size ) layers = [encoder_hidden_layer, encoder_output_layer, qlayer] model = torch.nn.Sequential(*layers) #optimizer = torch.optim.SGD(model.parameters(), lr=0.2) #criterion = torch.nn.L1Loss() optimizer = torch.optim.Adam(model.parameters(), lr=1e-4) criterion = nn.MSELoss() def error(predictions, y): error = np.sum(abs(y.detach().numpy() - predictions.detach().numpy()))/len(y[0].detach().numpy()) return error def get_ranks(outputs, y, weighted = False): rp = np.flip(np.argsort(outputs.detach().numpy())) if weighted: a = [np.argwhere(rp == x)[0][1]*outputs.detach().numpy()[0][x]*len(np.nonzero(y.detach().numpy())[1]) for x in np.nonzero(y.detach().numpy())[1]] else: a = [np.argwhere(rp == x)[0][1] for x in np.nonzero(y.detach().numpy())[1]] return a def score(outputs, y, weighted = False): ly = len(np.nonzero(y.detach().numpy())[1]) lo = len(y[0].detach().numpy()) ranks = get_ranks(outputs, y, weighted) sr = sum(ranks) sy = sum(range(ly)) sw = sum(range(lo-ly,lo)) return 1 - (sr - sy)/(sw - sy) class CustomDataset(Dataset): def __init__(self, ds, n, q, transform=None): self.ds_full = ds self.n = n self.q = q self.x_csv = self.ds_full[["t01"]] self.y_csv = self.ds_full[["y"]] self.transform = transform def __len__(self): return len(self.x_csv) def __getitem__(self, idx): x = np.array(self.x_csv.iloc[idx].tolist()[0]) y = np.zeros(2**self.q) for i in self.y_csv.iloc[idx].tolist()[0]: #011000 24 y[i] = 1/len(self.y_csv.iloc[idx].tolist()[0]) if self.transform: x = self.transform(x) y = self.transform(y) return x, y batch_size = 1 transform = torchvision.transforms.Lambda(lambda y: torch.from_numpy(y).float()) train_dataset = CustomDataset(dsa, nnodes, n_qubits, transform) train_loader = torch.utils.data.DataLoader( train_dataset, batch_size=batch_size, shuffle=True, pin_memory=True ) test_loader = torch.utils.data.DataLoader( train_dataset, batch_size=batch_size, shuffle=False ) %%time epochs = 1 for epoch in range(epochs): loss = 0 err = 0 metr = 0 wmetr = 0 for batch_features, y_batch in train_loader: batch_features = batch_features.view(-1, input_size) optimizer.zero_grad() outputs = model(batch_features) train_loss = criterion(outputs, y_batch) train_loss.backward() optimizer.step() loss += train_loss.item() err += error(outputs, y_batch) metr += score(outputs, y_batch, False) wmetr += score(outputs, y_batch, True) loss = loss / len(train_loader) err = err / len(train_loader) metr = metr / len(train_loader) wmetr = wmetr / len(train_loader) print("epoch : {}/{}, loss = {:.6f}, error = {:.6f}, score = {:.6f}, weighted_score = {:.6f}".format(epoch + 1, epochs, loss, err, metr, wmetr)) ###Output /home/common_user/.local/lib/python3.8/site-packages/torch/autograd/__init__.py:154: UserWarning: Casting complex values to real discards the imaginary part (Triggered internally at ../aten/src/ATen/native/Copy.cpp:244.) Variable._execution_engine.run_backward( ###Markdown testing with generated data ###Code # generate Testing dataset exper = 12 num_res = 12 arr_list = [] edges_list = [] edges = get_edges(n = nnodes) for f in range(exper): weights = [random.randint(1, 10)/10 for f in range(nnodes)] t0 = get_t0(edges, weights, n = nnodes) t1 = get_t1(edges, weights, t0, n = nnodes) arr_list.append(scale(np.stack([t0,t1]),np.min(np.stack([t0,t1])), np.max(np.stack([t0,t1]))).astype(float)) edges_list.append(edges) arr = np.concatenate(arr_list, axis=1) dstest = pd.DataFrame({'t01':arr_list}) dstest #int("110100010",2) = 418 edges_bin_list = [[np.binary_repr(ed[0], width=n_qubits//2) + np.binary_repr(ed[1], width=n_qubits//2) for ed in edges] for edges in edges_list] ya_list = [[int(edge,2) for edge in edges] for edges in edges_bin_list] dstest['y'] = ya_list dstest batch_size = 1 transform = torchvision.transforms.Lambda(lambda y: torch.from_numpy(y).float()) test_dataset = CustomDataset(dstest, nnodes, n_qubits, transform) test_loader = torch.utils.data.DataLoader( test_dataset, batch_size=batch_size, shuffle=False, pin_memory=True ) experiments = [] outputs_list = [] for batch_features, _ in test_loader: batch_features = batch_features.view(-1, input_size) batch_features outputs = model(batch_features) outputs_list.append(outputs) experiments.append(np.flip(np.argsort(outputs.detach().numpy()))) experiments ol = [o.detach().numpy() for o in outputs_list] results_list = np.mean(np.array(ol), axis=0) norm_results_list = scale(results_list, np.min(results_list), np.max(results_list)).astype(float) results = np.flip(np.argsort(results_list)) np.max(sum(outputs_list).detach().numpy()),np.min(sum(outputs_list).detach().numpy()) results_bin = [np.binary_repr(f, width=n_qubits) for f in results.tolist()[0]] results_weights = [norm_results_list[0][results[0][i]] for i in range(len(results[0]))] results.tolist()[0][:num_res], ya_list[0] results_bin[:num_res] #number of parameters model_parameters = filter(lambda p: p.requires_grad, model.parameters()) sum([np.prod(p.size()) for p in model_parameters]) def get_edges_array(n_qubits,y): arr = [np.binary_repr(f, width=n_qubits) for f in y] return [(int(f[:n_qubits//2],2), int(f[n_qubits//2:],2)) for f in arr] y_edges = get_edges_array(n_qubits,ya_list[0]) p_edges = get_edges_array(n_qubits,results.tolist()[0][:num_res]) p_weights = results_weights[:num_res] graph_y = None graph_p = None graph_y = nx.DiGraph() graph_p = nx.DiGraph() graph_y.add_nodes_from(range(nnodes*2)) graph_p.add_nodes_from(range(nnodes*2)) graph_y.add_edges_from(y_edges) graph_p.add_edges_from(p_edges) mapping = {0: "a0", 1: "b0", 2: "c0", 3: "d0", 4: "e0", 5: "f0", 6: "g0", 7: "a1", 8: "b1", 9: "c1", 10: "d1", 11: "e1", 12: "f1", 13: "g1"} graph_y = nx.relabel_nodes(graph_y, mapping, copy=False) graph_p = nx.relabel_nodes(graph_p, mapping, copy=False) #pos = nx.shell_layout(graph_y, nlist=[range(nnodes),range(nnodes,nnodes*2)], rotate=0.1, center=(1,5)) pos = nx.bipartite_layout(graph_y, nodes=['a0','b0','c0','d0', 'e0', 'f0', 'g0']) subax1 = plt.subplot(121) nx.draw(graph_y, pos, node_color='c', edge_color='k', width=5.0, edge_cmap=plt.cm.Blues, with_labels=True) subax2 = plt.subplot(122) nx.draw(graph_p, pos, node_color='c', edge_color=p_weights, width=5.0, edge_cmap=plt.cm.Blues, with_labels=True) plt.show() ###Output _____no_output_____ ###Markdown testing with real data ###Code batch_size = 1 transform = torchvision.transforms.Lambda(lambda y: torch.from_numpy(y).float()) test_dataset = CustomDataset(dst, nnodes, n_qubits, transform) test_loader = torch.utils.data.DataLoader( test_dataset, batch_size=batch_size, shuffle=False, pin_memory=True ) experiments = [] outputs_list = [] for batch_features, _ in test_loader: batch_features = batch_features.view(-1, input_size) batch_features outputs = model(batch_features) outputs_list.append(outputs) experiments.append(np.flip(np.argsort(outputs.detach().numpy()))) experiments ol = [o.detach().numpy() for o in outputs_list] results_list = np.mean(np.array(ol), axis=0) norm_results_list = scale(results_list, np.min(results_list), np.max(results_list)).astype(float) results = np.flip(np.argsort(results_list)) np.max(sum(outputs_list).detach().numpy()),np.min(sum(outputs_list).detach().numpy()) results_bin = [np.binary_repr(f, width=n_qubits) for f in results.tolist()[0]] results_weights = [norm_results_list[0][results[0][i]] for i in range(len(results[0]))] results.tolist()[0][:num_res] results_bin[:num_res] p_edges = get_edges_array(n_qubits,results.tolist()[0][:num_res]) p_weights = results_weights[:num_res] graph_p = None graph_p2 = None graph_p = nx.DiGraph() graph_p2 = nx.DiGraph() graph_p.add_nodes_from(range(nnodes*2)) graph_p2.add_nodes_from(range(nnodes*2)) graph_p.add_edges_from(p_edges) graph_p2.add_edges_from(p_edges) rnodes = [v + '_t0' for _, v in nodes_names.items()] rgenes = [v + '_t0' for _, v in genes_names.items()] nodes_names.update({k:v + '_t0' for k, v in nodes_names.items()}) nodes_names.update({k + len(nodes_names):v[:-1] + '1' for k, v in nodes_names.items()}) genes_names.update({k:v + '_t0' for k, v in genes_names.items()}) genes_names.update({k + len(genes_names):v[:-1] + '1' for k, v in genes_names.items()}) graph_p = nx.relabel_nodes(graph_p, nodes_names, copy=False) graph_p2 = nx.relabel_nodes(graph_p2, genes_names, copy=False) #pos = nx.shell_layout(graph_y, nlist=[range(nnodes),range(nnodes,nnodes*2)], rotate=0.1, center=(1,5)) pos = nx.bipartite_layout(graph_p, nodes=rnodes) pos2 = nx.bipartite_layout(graph_p2, nodes=rgenes) subax1 = plt.subplot(121) nx.draw(graph_p, pos, node_color='c', edge_color=p_weights, width=5.0, edge_cmap=plt.cm.Blues, with_labels=True) subax2 = plt.subplot(122) nx.draw(graph_p2, pos2, node_color='c', edge_color=p_weights, width=5.0, edge_cmap=plt.cm.Blues, with_labels=True) plt.show() ###Output _____no_output_____
Tutorial/Transforming Dictionaries to Pandas DataFrames Tutorial.ipynb
###Markdown Transforming Dictionaries to Pandas DataframesDictionaries of two forms can be transformed into pandas dataframes. A dictionary can have 1 layer of keys, each of which point to a list:dct = {"key1":list1, "key2:list2, "key3:list3}This results in a dataframe where the keys are interepreted as column names and the list indices are interpreted as row numbers. ###Code dct = {"list1": [1,2,3,4,5], "list2":[1 ** 2, 2 ** 2, 3 ** 2, 4 ** 2, 5 ** 2], "list3":[1,4,6,3,5]} dct import pandas as pd df = pd.DataFrame(dct) df ###Output _____no_output_____ ###Markdown The other way to create a dataframe using a dictionary is to create dictionary of dictionaries:dct = {"key1:{ind0:elem0, ind1:elem1, ind2:elem2, ind3:elem3, ind4:elem4}, "key2 :{. . .}} ###Code #dct = {"key1:{ind0:elem0, # ind1:elem1, # ind2:elem2, # ind3:elem3, # ind4:elem4}, # "key2 :{. . .}} # using generator function for list1 dct = {"list1":{i:i for i in range(5)}, "list2": {0:1**2, 1:2**2, 2:3**2, 3:4**2, 4:5**2}, "list3": {0:4, 1:5, 2:7, 3:5, 4:8} } dct df = pd.DataFrame(dct) df ###Output _____no_output_____
Notebooks/connor_notebooks/places_of_interest/walmart_cleaning.ipynb
###Markdown Notebook Objectives The objective of this notebook is to transform Walmart location data for the United States and Canada downloaded from [poi factory](http://www.poi-factory.com/poifiles) for later use in the RV Nav API. This CSV was partly cleaned in MS Excel due to its smaller size. MS Excel is still a great tool for data cleaning! ###Code import pandas as pd import numpy as np df = pd.read_csv('../Data/Walmart_United States & Canada.csv', header=None, names= ['Longitude', 'Latitude', 'Store Type', 'Store ID', 'Gas', 'Address', 'City', 'State', 'Zip Code', 'Parking', 'Phone Number']) df.head() df['Gas'] = df['Gas'].replace('Gas.', 'Gas') df['Gas'].value_counts() # Replace NaN values with 'No Gas' df['Gas'] = df['Gas'].replace(np.nan, 'No Gas') # Verify results df['Gas'].value_counts() df.head() # Clean correct parking values df['Parking'] = df['Parking'].replace('(NOP)', 'No Parking') df['Parking'] = df['Parking'].replace(np.nan, 'Parking Available') # Check Store Type Values df['Store Type'].value_counts() # Clean/Correct Store Types df['Store Type'] = df['Store Type'].replace('Walmart SC', 'Walmart Supercenter') df['Store Type'] = df['Store Type'].replace('Walmart SC', 'Walmart Supercenter') df['Store Type'] = df['Store Type'].replace('Walmart Superenter', 'Walmart Supercenter') df['Store Type'] = df['Store Type'].replace('Wm Nbrhd Mkt', 'Walmart Neighborhood Market') df['Store Type'] = df['Store Type'].replace('Wm Pharm/Clinic', 'Walmart Pharmacy/Clinic') df['Store Type'] = df['Store Type'].replace('WM Nbrhd Mkt', 'Walmart Neighborhood Market') df['Store Type'] = df['Store Type'].replace('wm Nbrhd Mkt', 'Walmart Neighborhood Market') # Verify corrections df['Store Type'].value_counts() # Check to ensure no null values or Longitude/Latitude columns df.info() # One last verification df.head() ###Output _____no_output_____ ###Markdown Our dataset is now ready to be used in our API! ###Code df.to_csv('Walmart_API.csv') ###Output _____no_output_____
inflearn_machine_learning/code/ch7/5_multiple_linear_regression_with_sklearn.ipynb
###Markdown The Boston Housing Datasethttps://archive.ics.uci.edu/ml/datasets/Housing ###Code from sklearn.datasets import load_boston import matplotlib.pyplot as plt import numpy as np boston = load_boston() boston["data"] x_data = boston.data y_data = boston.target.reshape(boston.target.size,1) y_data.shape from sklearn import preprocessing minmax_scale = preprocessing.MinMaxScaler(feature_range=(0,5)).fit(x_data) # standard_scale = preprocessing.StandardScaler().fit(x_data) x_scaled_data = minmax_scale.transform(x_data) x_scaled_data[:3] from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(x_scaled_data, y_data, test_size=0.33) X_train.shape, X_test.shape, y_train.shape, y_test.shape from sklearn import linear_model regr = linear_model.LinearRegression(fit_intercept=True, normalize=False, copy_X=True, n_jobs=8) regr.fit(X_train, y_train) regr regr.coef_ , regr.intercept_ # # The coefficients print('Coefficients: ', regr.coef_) print('intercept: ', regr.intercept_) regr.predict(x_data[:5]) x_data[:5].dot(regr.coef_.T) + regr.intercept_ from sklearn.metrics import r2_score from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_squared_error y_true = y_test y_hat = regr.predict(X_test) r2_score(y_true, y_hat), mean_absolute_error(y_true, y_hat), mean_squared_error(y_true, y_hat) y_true = y_train y_hat = regr.predict(X_train) r2_score(y_true, y_hat), mean_absolute_error(y_true, y_hat), mean_squared_error(y_true, y_hat) ###Output _____no_output_____
Problem 3.17.ipynb
###Markdown Solution {-}The derivative of a first order Markov process doesn't exist, however the derivative of second order Markov process does exist.$$S_x(j\omega) = \frac{2\sqrt{2}\omega_0^3\sigma^2}{\omega^4 + \omega_0^4}$$This can be demonstrated by a Monte Carlo simulation as follows: ###Code from numpy import array, sqrt, random, zeros, arange from scipy.linalg import sqrtm, inv, cholesky import matplotlib.pyplot as plt from lib.vanloan import numeval dt = 1 sigma = 1 omega0 = 0.1 num = (sigma**2)*(omega0**3)*2*sqrt(2) # Dynamic matrix F = array([[0, 1], [-(omega0**2), -2/sqrt(2)*omega0]]) # White noise coefficients G = array([[0], [sqrt(num)]]) # Van Loan [phi, Q] = numeval(F, G, dt) # Vector of random numbers randn = random.randn(2, 100) # Cholesky upper triangular C = cholesky(Q) wk = zeros([2, 100]) x = zeros([2, 100 + 1]) t = arange(0, 100 + 1) # Generate white noise sequence for i in range(0, 100): wk[:, i] = C@randn[:, i] for j in range(0, 100): x[:, j + 1] = phi@x[:, j] + wk[:, j] plt.plot(t, x[0], 'r', label='Position') plt.plot(t, x[1], 'b', label='Velocity') plt.title('2. order Markov process') plt.xlabel('Time') plt.ylabel('Position / Velocity') plt.legend(loc='lower right') plt.grid() plt.show() ###Output _____no_output_____
notebooks/error-analysis/Preprocessing-related/ddi-Test-Error-Analysis-Stop-Words.ipynb
###Markdown Error analysis for the type of examples that Stop words get wrong compared to the baseline ###Code %load_ext autoreload %autoreload import os from sys import path import re import pandas as pd path.append('../../..') import numpy as np from scipy.stats import ttest_rel from relation_extraction.data.converters.converter_ddi import relation_dict output_path = '/scratch/geeticka/relation-extraction-result/ddi-analyze/' def res(path): return os.path.join(output_path, path) original_sentences_path = os.path.join('/scratch/geeticka/relation-extraction-result/ddi-analyze/test_original.txt') stop_words_sentences_path = os.path.join('/crimea/geeticka/data/relation_extraction/ddi/pre-processed/punct_stop_digit/test_punct_stop_digit.txt') relation_dict def read_answers_line(line): linenum, relation = line.strip().split() return linenum, relation def asstring(list_of_strings): return " ".join(list_of_strings) def read_sentence_and_entities(line): line = line.strip().split() sentence = line[5:] relation = relation_dict[int(line[0])] entity1_idx = (int(line[1]), int(line[2])) entity2_idx = (int(line[3]), int(line[4])) entity1 = sentence[entity1_idx[0] : entity1_idx[1] + 1] entity2 = sentence[entity2_idx[0] : entity2_idx[1] + 1] return relation, asstring(entity1), asstring(entity2), asstring(sentence) needed_linenum_and_relation = {} with open(res("answers_for_dev-stop-word.txt")) as textfile1, open(res('answers_for_dev-baseline.txt')) as textfile2, \ open(res('answers_for_dev-baseline_gold.txt')) as textfile3: for x, y, z in zip(textfile1, textfile2, textfile3): linenum, stop_words_relation = read_answers_line(x) _, baseline_relation = read_answers_line(y) _, gold_relation = read_answers_line(z) if baseline_relation == gold_relation and stop_words_relation != gold_relation: # punct digit making mistakes needed_linenum_and_relation[int(linenum) - 1] = (baseline_relation, stop_words_relation) len(list(needed_linenum_and_relation.keys())) len(list(needed_linenum_and_relation.keys()))/len(open(res('answers_for_dev-baseline_gold.txt')).readlines()) * 100 ###Output _____no_output_____ ###Markdown Remember that baseline is correct here so the first relation listed is the correct one, unlike in the i2b2 notebook where the order is reversed because baseline is the incorrect one there ###Code interaction = 0; mechanism = 0; advice = 0; effect = 0; none = 0 print('We print the baseline first and then the stop words version. Gold relation corresponds to baseline\n\n') curr_linenum = 0 with open(original_sentences_path) as original_sentences, open(stop_words_sentences_path) as stop_words_sentences: for x, y in zip(original_sentences, stop_words_sentences): needed_linenums = list(needed_linenum_and_relation.keys()) if curr_linenum in needed_linenums: _, e1_b, e2_b, s_b = read_sentence_and_entities(x.strip()) _, e1_c, e2_c, s_c = read_sentence_and_entities(y.strip()) r_b, r_c = needed_linenum_and_relation[curr_linenum] if r_b == 'int': interaction += 1 elif r_b == 'mechanism': mechanism += 1 elif r_b == 'advise': advice += 1 elif r_b == 'effect': effect += 1 elif r_b == 'none': none += 1 print('Predicted Relation: \t {0}, {1} \nEntities: \t {2}, {3} \t {4}, {5} \nSentences: \n\t{6} \n\t {7}'.format( r_b, r_c, e1_b, e1_c, e2_b, e2_c, s_b, s_c)) print('\n') curr_linenum += 1 print(interaction, mechanism, advice, effect, none) interaction + mechanism + advice + effect + none ###Output _____no_output_____
3.CNN_Basic/CNN_only_Tensorflow.ipynb
###Markdown TensorFlow Tutorial - 6. CNN본 문서는 TensorFlow 를 사용하여 Deep Learning을 구현하기 위한 기초적인 실습 자료이다.The code and comments are written by Dong-Hyun Kwak Upgraed to Tensorflow v1.10 by NamJungGu This work is licensed under a Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License. Convolutional Neural Networks이번에는 이미지 인식 분야에서 가장 성공적으로 쓰이고 있는 Convolutional Neural Networks를 실습해본다.Convolutional Neural Networks, 이하 CNN은 아래와 같은 Convolutional Layer를 여러층 가진 딥러닝 모델을 뜻한다.![](http://ufldl.stanford.edu/tutorial/images/Cnn_layer.png)(출처: http://ufldl.stanford.edu/tutorial/images/Cnn_layer.png)이번에는 간단한 구조를 가진 CNN을 구현하고 방금전에 사용했던 MNIST 데이터를 학습시켜 보고, MLP와의 성능 차이를 비교해본다.아래의 코드를 보면 MLP와 전체 구조는 매우 유사한데, 중간에 Convolutional을 비롯해 처음 보는 여러 연산들이 추가 된 것을 알 수 있다. 또한 CNN을 효과적으로 학습하기 위해서는 Weight의 초기화를 0으로 하는 것이 아니라, 랜덤으로 해주어야하는데, 여기서는 간단히 가우시안을 이용하여 초기화 하였다. 그밖에 dropout과 relu 등이 사용되었다.각 함수와 연산들의 자세한 설명은 아래 코드를 보면서 하나하나 분석해보자. ###Code %matplotlib inline %tensorflow_version 1.15.2 import tensorflow as tf tf.reset_default_graph() from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets('./MNIST_data', one_hot=True) # placeholder is used for feeding data. x = tf.placeholder("float", shape=[None, 784], name = 'x') # none represents variable length of dimension. 784 is the dimension of MNIST data. y_target = tf.placeholder("float", shape=[None, 10], name = 'y_target') # shape argument is optional, but this is useful to debug. # reshape input data x_image = tf.reshape(x, [-1,28,28,1], name="x_image") # Build a convolutional layer and maxpooling with random initialization W_conv1 = tf.Variable(tf.truncated_normal([5, 5, 1, 32], stddev=0.1), name="W_conv1") # W is [row, col, channel, feature] b_conv1 = tf.Variable(tf.zeros([32]), name="b_conv1") h_conv1 = tf.nn.relu(tf.nn.conv2d(x_image, W_conv1, strides=[1, 1, 1, 1], padding='SAME') + b_conv1, name="h_conv1") h_pool1 = tf.nn.max_pool( h_conv1 , ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name = "h_pool1") # Repeat again with 64 number of filters W_conv2 = tf.Variable(tf.truncated_normal([5, 5, 32, 64], stddev=0.1), name="W_conv2") # W is [row, col, channel, feature] b_conv2 = tf.Variable(tf.zeros([64]), name="b_conv2") h_conv2 = tf.nn.relu(tf.nn.conv2d(h_pool1, W_conv2, strides=[1, 1, 1, 1], padding='SAME') + b_conv2, name="h_conv2") h_pool2 = tf.nn.max_pool( h_conv2 , ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name = "h_pool2") # Build a fully connected layer h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64], name="h_pool2_flat") W_fc1 = tf.Variable(tf.truncated_normal([7 * 7 * 64, 1024], stddev=0.1), name = 'W_fc1') b_fc1 = tf.Variable(tf.zeros([1024]), name = 'b_fc1') h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1, name="h_fc1") # Dropout Layer keep_prob = tf.placeholder("float", name="keep_prob") h_fc1 = tf.nn.dropout(h_fc1, keep_prob, name="h_fc1_drop") # Build a fully connected layer with softmax W_fc2 = tf.Variable(tf.truncated_normal([1024, 10], stddev=0.1), name = 'W_fc2') b_fc2 = tf.Variable(tf.zeros([10]), name = 'b_fc2') y=tf.nn.softmax(tf.matmul(h_fc1, W_fc2) + b_fc2, name="y") # define the Loss function cross_entropy = -tf.reduce_sum(y_target*tf.log(y), name = 'cross_entropy') # define optimization algorithm #train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy) train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy) correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_target, 1)) # correct_prediction is list of boolean which is the result of comparing(model prediction , data) accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float")) # tf.cast() : changes true -> 1 / false -> 0 # tf.reduce_mean() : calculate the mean # create summary of parameters tf.summary.histogram('weights_1', W_conv1) tf.summary.histogram('weights_2', W_conv2) tf.summary.histogram('y', y) tf.summary.scalar('cross_entropy', cross_entropy) merged = tf.summary.merge_all() summary_writer = tf.summary.FileWriter("summary") # Create Session with tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth =True))) as sess: # open a session which is a envrionment of computation graph. sess.run(tf.global_variables_initializer())# initialize the variables # training the MLP for i in range(5001): # minibatch iteraction batch = mnist.train.next_batch(100) # minibatch size sess.run(train_step, feed_dict={x: batch[0], y_target: batch[1], keep_prob: 0.5}) # placeholder's none length is replaced by i:i+100 indexes if i%500 == 0: train_accuracy = sess.run(accuracy, feed_dict={x:batch[0], y_target: batch[1], keep_prob: 1}) print ("step %d, training accuracy: %.3f"%(i, train_accuracy)) # calculate the summary and write. summary = sess.run(merged, feed_dict={x:batch[0], y_target: batch[1], keep_prob: 1}) summary_writer.add_summary(summary , i) # for given x, y_target data set print ("test accuracy: %g"% sess.run(accuracy, feed_dict={x: mnist.test.images[0:250], y_target: mnist.test.labels[0:250], keep_prob: 1})) ###Output step 0, training accuracy: 0.250 step 500, training accuracy: 0.980 step 1000, training accuracy: 0.990 step 1500, training accuracy: 0.980 step 2000, training accuracy: 0.990 step 2500, training accuracy: 1.000 step 3000, training accuracy: 0.990 step 3500, training accuracy: 0.970 step 4000, training accuracy: 1.000 step 4500, training accuracy: 1.000 step 5000, training accuracy: 1.000 test accuracy: 1
RFClassifier.ipynb
###Markdown Random ForestRandom Forests is a **slight variation of bagged trees** that has even better performance:- Exactly like bagging, we create an ensemble of decision trees using bootstrapped samples of the training set.- However, when building each tree, each time a split is considered, a **random sample of m features** is chosen as split candidates from the **full set of p features**. The split is only allowed to use **one of those m features**. - A new random sample of features is chosen for **every single tree at every single split**. - For **classification**, m is typically chosen to be the square root of p. - For **regression**, m is typically chosen to be somewhere between p/3 and p.What's the point?- Suppose there is **one very strong feature** in the data set. When using bagged trees, most of the trees will use that feature as the top split, resulting in an ensemble of similar trees that are **highly correlated**.- Averaging highly correlated quantities does not significantly reduce variance (which is the entire goal of bagging).- By randomly leaving out candidate features from each split, **Random Forests "decorrelates" the trees**, such that the averaging process can reduce the variance of the resulting model. --- Tuning n_estimatorsOne important tuning parameter is **n_estimators**, which is the number of trees that should be grown. It should be a large enough value that the error seems to have "stabilized". --- Tuning max_featuresThe other important tuning parameter is **max_features**, which is the number of features that should be considered at each split. --- Comparing Random Forests with decision trees**Advantages of Random Forests:**- Performance is competitive with the best supervised learning methods- Provides a more reliable estimate of feature importance- Allows you to estimate out-of-sample error without using train/test split or cross-validation**Disadvantages of Random Forests:**- Less interpretable- Slower to train- Slower to predict ###Code import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt import statsmodels.formula.api as sm import scipy.stats as stats %matplotlib inline plt.rcParams['figure.figsize'] = 10, 7.5 plt.rcParams['axes.grid'] = True plt.gray() from matplotlib.backends.backend_pdf import PdfPages from sklearn.model_selection import train_test_split from sklearn import metrics from sklearn.linear_model import LogisticRegression from statsmodels.stats.outliers_influence import variance_inflation_factor from patsy import dmatrices import sklearn.tree as dt import sklearn.ensemble as en from sklearn import metrics from sklearn.tree import DecisionTreeClassifier, export_graphviz, export from sklearn.model_selection import GridSearchCV from sklearn.ensemble import RandomForestClassifier import pydotplus as pdot from IPython.display import Image url = 'https://raw.githubusercontent.com/mkmritunjay/machineLearning/master/HR_comma_sep.csv' hr_df = pd.read_csv(url) # now we need to create dummy variables for categorical variables(dtype=object) numerical_features = ['satisfaction_level', 'last_evaluation', 'number_project', 'average_montly_hours', 'time_spend_company'] categorical_features = ['Work_accident','promotion_last_5years', 'department', 'salary'] categorical_features numerical_features # A utility function to create dummy variable def create_dummies( df, colname ): col_dummies = pd.get_dummies(df[colname], prefix=colname) col_dummies.drop(col_dummies.columns[0], axis=1, inplace=True) df = pd.concat([df, col_dummies], axis=1) df.drop( colname, axis = 1, inplace = True ) return df for c_feature in categorical_features: hr_df = create_dummies( hr_df, c_feature ) hr_df.head() #Splitting the data feature_columns = hr_df.columns.difference( ['left'] ) feature_columns ###Output _____no_output_____ ###Markdown Train Test split ###Code train_X, test_X, train_y, test_y = train_test_split( hr_df[feature_columns], hr_df['left'], test_size = 0.3, random_state = 42 ) ###Output _____no_output_____ ###Markdown Building the model ###Code radm_clf = RandomForestClassifier(oob_score=True,n_estimators=100 ) radm_clf.fit( train_X, train_y ) radm_test_pred = pd.DataFrame( { 'actual': test_y, 'predicted': radm_clf.predict( test_X ) } ) metrics.accuracy_score( radm_test_pred.actual, radm_test_pred.predicted ) ###Output _____no_output_____ ###Markdown Creating confusion matrix ###Code tree_cm = metrics.confusion_matrix( radm_test_pred.predicted, radm_test_pred.actual, [1,0] ) sns.heatmap(tree_cm, annot=True, fmt='.2f', xticklabels = ["Left", "No Left"] , yticklabels = ["Left", "No Left"] ) plt.ylabel('True label') plt.xlabel('Predicted label') ###Output _____no_output_____ ###Markdown Find for the important features ###Code radm_clf.feature_importances_ indices = np.argsort(radm_clf.feature_importances_)[::-1] feature_rank = pd.DataFrame( columns = ['rank', 'feature', 'importance'] ) for f in range(train_X.shape[1]): feature_rank.loc[f] = [f+1, train_X.columns[indices[f]], radm_clf.feature_importances_[indices[f]]] sns.barplot( y = 'feature', x = 'importance', data = feature_rank ) ###Output _____no_output_____ ###Markdown Note: As per the model, the most important features which influence whether to leave the company,in descending order, are- satisfaction_level- time_spend_company- number_project- average_montly_hours- last_evaluation- work_accident ###Code ###Output _____no_output_____
examples/toyB.ipynb
###Markdown Toy Process BMerge two files, ```alpha_norm.csv``` and ```beta.csv``` by concatenating the columns. ###Code import pandas as pd alpha = pd.read_csv('alpha_norm.csv') beta = pd.read_csv('beta.csv') beta.head() common = pd.concat([alpha, beta], axis=1) common.head() common.to_csv('common.csv', index=False) ###Output _____no_output_____
EuroVoc/Explore EuroVoc.ipynb
###Markdown Analyse EuroVocGet the number of concepts and the depth of the ontologyIt is important to have rdflib, SPARQLWrapper, pandas, and numpy* pip install rdflib* pip install SPARQLWrapper* pip install pandas* pip install numpyDownload the lastest version of EuroVoc from https://op.europa.eu/en/web/eu-vocabularies/dataset/-/resource?uri=http://publications.europa.eu/resource/dataset/eurovoc ###Code from rdflib import Graph from rdflib.namespace import RDFS from rdflib import URIRef import rdflib import json from collections import deque import numpy as np import pandas as pd input_file = "eurovoc-skos-ap-eu.rdf" g = Graph() g.parse(input_file) qres = g.query( """PREFIX skos:<http://www.w3.org/2004/02/skos/core#> SELECT DISTINCT ?a WHERE { ?a a skos:Concept . }""") topics = dict() for row in qres: topics[row[0]] = True print("Number of concepts: {}".format(len(topics))) qres = g.query( """PREFIX skos:<http://www.w3.org/2004/02/skos/core#> SELECT DISTINCT ?a ?b WHERE { ?a skos:broader ?b . }""") broaders = dict() narrowers = dict() for row in qres: if row[0] not in broaders: broaders[row[0]] = list() broaders[row[0]].append(row[1]) if row[1] not in narrowers: narrowers[row[1]] = list() narrowers[row[1]].append(row[0]) unhier = broaders concepts = topics for concept, value in concepts.items(): queue = deque() max_depth = value queue.append({"t":concept,"d":value}) while len(queue) > 0: dequeued = queue.popleft() if dequeued["t"] in unhier: broads = unhier[dequeued["t"]] new_depth = dequeued["d"]+1 if new_depth > max_depth: max_depth = new_depth for broader in broads: queue.append({"t":broader,"d":dequeued["d"]+1}) concepts[concept] = max_depth list_of_depths = pd.DataFrame.from_dict(concepts, orient='index', columns=['depth']) list_of_depths.sort_values('depth', inplace=True, ascending=False) print("Concepts are ranked by maximum depth") list_of_depths.head(20) for k, v in broaders.items(): if len(v) > 1: print("{} has {} parents".format(k, len(v))) ###Output http://eurovoc.europa.eu/1236 has 4 parents http://eurovoc.europa.eu/2084 has 6 parents http://eurovoc.europa.eu/4092 has 4 parents http://eurovoc.europa.eu/5693 has 2 parents http://eurovoc.europa.eu/5563 has 4 parents http://eurovoc.europa.eu/690 has 6 parents http://eurovoc.europa.eu/4816 has 6 parents http://eurovoc.europa.eu/4246 has 2 parents http://eurovoc.europa.eu/2037 has 4 parents http://eurovoc.europa.eu/5652 has 3 parents http://eurovoc.europa.eu/2543 has 8 parents http://eurovoc.europa.eu/1019 has 5 parents http://eurovoc.europa.eu/4466 has 4 parents http://eurovoc.europa.eu/2058 has 4 parents http://eurovoc.europa.eu/4620 has 2 parents http://eurovoc.europa.eu/5965 has 6 parents http://eurovoc.europa.eu/205 has 3 parents http://eurovoc.europa.eu/4862 has 3 parents http://eurovoc.europa.eu/249 has 4 parents http://eurovoc.europa.eu/4841 has 7 parents http://eurovoc.europa.eu/314 has 6 parents http://eurovoc.europa.eu/8373 has 2 parents http://eurovoc.europa.eu/1220 has 2 parents http://eurovoc.europa.eu/5709 has 6 parents http://eurovoc.europa.eu/5713 has 5 parents http://eurovoc.europa.eu/3416 has 3 parents http://eurovoc.europa.eu/5469 has 4 parents http://eurovoc.europa.eu/5020 has 5 parents http://eurovoc.europa.eu/2138 has 4 parents http://eurovoc.europa.eu/2286 has 7 parents http://eurovoc.europa.eu/1318 has 6 parents http://eurovoc.europa.eu/1774 has 4 parents http://eurovoc.europa.eu/1899 has 7 parents http://eurovoc.europa.eu/1532 has 6 parents http://eurovoc.europa.eu/1819 has 4 parents http://eurovoc.europa.eu/1196 has 5 parents http://eurovoc.europa.eu/4580 has 6 parents http://eurovoc.europa.eu/3784 has 3 parents http://eurovoc.europa.eu/4320 has 5 parents http://eurovoc.europa.eu/2224 has 4 parents http://eurovoc.europa.eu/2055 has 5 parents http://eurovoc.europa.eu/3833 has 5 parents http://eurovoc.europa.eu/3763 has 6 parents http://eurovoc.europa.eu/4572 has 5 parents http://eurovoc.europa.eu/1611 has 3 parents http://eurovoc.europa.eu/1124 has 4 parents http://eurovoc.europa.eu/4782 has 2 parents http://eurovoc.europa.eu/1613 has 4 parents http://eurovoc.europa.eu/1649 has 3 parents http://eurovoc.europa.eu/1968 has 2 parents http://eurovoc.europa.eu/1518 has 3 parents http://eurovoc.europa.eu/4376 has 4 parents http://eurovoc.europa.eu/6036 has 5 parents http://eurovoc.europa.eu/247 has 5 parents http://eurovoc.europa.eu/6161 has 2 parents http://eurovoc.europa.eu/1559 has 4 parents http://eurovoc.europa.eu/4353 has 4 parents http://eurovoc.europa.eu/496 has 5 parents http://eurovoc.europa.eu/1519 has 6 parents http://eurovoc.europa.eu/5898 has 6 parents http://eurovoc.europa.eu/1729 has 2 parents http://eurovoc.europa.eu/4846 has 4 parents http://eurovoc.europa.eu/1504 has 4 parents http://eurovoc.europa.eu/4351 has 3 parents http://eurovoc.europa.eu/1557 has 4 parents http://eurovoc.europa.eu/5072 has 3 parents http://eurovoc.europa.eu/5859 has 7 parents http://eurovoc.europa.eu/1085 has 6 parents http://eurovoc.europa.eu/4148 has 3 parents http://eurovoc.europa.eu/6223 has 2 parents http://eurovoc.europa.eu/5619 has 7 parents http://eurovoc.europa.eu/863 has 6 parents http://eurovoc.europa.eu/841 has 5 parents http://eurovoc.europa.eu/4212 has 4 parents http://eurovoc.europa.eu/1768 has 3 parents http://eurovoc.europa.eu/5100 has 6 parents http://eurovoc.europa.eu/1767 has 3 parents http://eurovoc.europa.eu/3401 has 5 parents http://eurovoc.europa.eu/1291 has 2 parents http://eurovoc.europa.eu/3410 has 4 parents http://eurovoc.europa.eu/1182 has 6 parents http://eurovoc.europa.eu/1216 has 3 parents http://eurovoc.europa.eu/4772 has 3 parents http://eurovoc.europa.eu/4732 has 2 parents http://eurovoc.europa.eu/1355 has 4 parents http://eurovoc.europa.eu/2222 has 2 parents http://eurovoc.europa.eu/993 has 4 parents http://eurovoc.europa.eu/4966 has 2 parents http://eurovoc.europa.eu/1842 has 6 parents http://eurovoc.europa.eu/4570 has 6 parents http://eurovoc.europa.eu/3857 has 4 parents http://eurovoc.europa.eu/8300 has 2 parents http://eurovoc.europa.eu/4160 has 4 parents http://eurovoc.europa.eu/1500 has 6 parents http://eurovoc.europa.eu/3405 has 5 parents http://eurovoc.europa.eu/2088 has 3 parents http://eurovoc.europa.eu/4354 has 5 parents http://eurovoc.europa.eu/111 has 3 parents http://eurovoc.europa.eu/888 has 7 parents http://eurovoc.europa.eu/4465 has 4 parents http://eurovoc.europa.eu/1769 has 4 parents http://eurovoc.europa.eu/6278 has 4 parents http://eurovoc.europa.eu/1555 has 2 parents http://eurovoc.europa.eu/4583 has 4 parents http://eurovoc.europa.eu/1185 has 6 parents http://eurovoc.europa.eu/3814 has 3 parents http://eurovoc.europa.eu/1255 has 8 parents http://eurovoc.europa.eu/451486 has 2 parents http://eurovoc.europa.eu/1771 has 2 parents http://eurovoc.europa.eu/4161 has 3 parents http://eurovoc.europa.eu/1639 has 6 parents http://eurovoc.europa.eu/5694 has 2 parents http://eurovoc.europa.eu/1128 has 4 parents http://eurovoc.europa.eu/1219 has 6 parents http://eurovoc.europa.eu/336 has 6 parents http://eurovoc.europa.eu/5860 has 7 parents http://eurovoc.europa.eu/4324 has 4 parents http://eurovoc.europa.eu/5454 has 3 parents http://eurovoc.europa.eu/33 has 4 parents http://eurovoc.europa.eu/2866 has 6 parents http://eurovoc.europa.eu/5108 has 4 parents http://eurovoc.europa.eu/1298 has 2 parents http://eurovoc.europa.eu/5892 has 4 parents http://eurovoc.europa.eu/5989 has 4 parents http://eurovoc.europa.eu/1561 has 2 parents http://eurovoc.europa.eu/1188 has 2 parents http://eurovoc.europa.eu/1509 has 7 parents http://eurovoc.europa.eu/1522 has 6 parents http://eurovoc.europa.eu/484 has 5 parents http://eurovoc.europa.eu/3830 has 2 parents http://eurovoc.europa.eu/4567 has 6 parents http://eurovoc.europa.eu/5040 has 6 parents http://eurovoc.europa.eu/1289 has 2 parents http://eurovoc.europa.eu/1207 has 2 parents http://eurovoc.europa.eu/5925 has 2 parents http://eurovoc.europa.eu/5063 has 6 parents http://eurovoc.europa.eu/7198 has 4 parents http://eurovoc.europa.eu/2229 has 4 parents http://eurovoc.europa.eu/3411 has 2 parents http://eurovoc.europa.eu/3858 has 4 parents http://eurovoc.europa.eu/1166 has 3 parents http://eurovoc.europa.eu/4621 has 5 parents http://eurovoc.europa.eu/4662 has 6 parents http://eurovoc.europa.eu/1876 has 2 parents http://eurovoc.europa.eu/1965 has 2 parents http://eurovoc.europa.eu/5930 has 3 parents http://eurovoc.europa.eu/6148 has 2 parents http://eurovoc.europa.eu/4779 has 2 parents http://eurovoc.europa.eu/4780 has 2 parents http://eurovoc.europa.eu/2331 has 6 parents http://eurovoc.europa.eu/1584 has 2 parents http://eurovoc.europa.eu/2563 has 6 parents http://eurovoc.europa.eu/5946 has 3 parents http://eurovoc.europa.eu/5029 has 3 parents http://eurovoc.europa.eu/1125 has 6 parents http://eurovoc.europa.eu/1874 has 2 parents http://eurovoc.europa.eu/4342 has 6 parents http://eurovoc.europa.eu/3774 has 5 parents http://eurovoc.europa.eu/1841 has 3 parents http://eurovoc.europa.eu/1168 has 2 parents http://eurovoc.europa.eu/5445 has 3 parents http://eurovoc.europa.eu/1560 has 5 parents http://eurovoc.europa.eu/5876 has 5 parents http://eurovoc.europa.eu/2057 has 4 parents http://eurovoc.europa.eu/2525 has 5 parents http://eurovoc.europa.eu/5746 has 5 parents http://eurovoc.europa.eu/1843 has 2 parents http://eurovoc.europa.eu/3816 has 2 parents http://eurovoc.europa.eu/1617 has 3 parents http://eurovoc.europa.eu/4648 has 4 parents http://eurovoc.europa.eu/2203 has 4 parents http://eurovoc.europa.eu/3403 has 5 parents http://eurovoc.europa.eu/891 has 2 parents http://eurovoc.europa.eu/1228 has 5 parents http://eurovoc.europa.eu/235 has 3 parents http://eurovoc.europa.eu/1208 has 5 parents http://eurovoc.europa.eu/4848 has 2 parents http://eurovoc.europa.eu/2027 has 3 parents http://eurovoc.europa.eu/4707 has 4 parents http://eurovoc.europa.eu/1712 has 7 parents http://eurovoc.europa.eu/2547 has 3 parents http://eurovoc.europa.eu/321 has 3 parents http://eurovoc.europa.eu/5049 has 4 parents http://eurovoc.europa.eu/5370 has 2 parents http://eurovoc.europa.eu/4410 has 3 parents http://eurovoc.europa.eu/4578 has 4 parents http://eurovoc.europa.eu/6280 has 3 parents http://eurovoc.europa.eu/5095 has 3 parents http://eurovoc.europa.eu/451912 has 2 parents http://eurovoc.europa.eu/4839 has 7 parents http://eurovoc.europa.eu/3402 has 2 parents http://eurovoc.europa.eu/2820 has 2 parents http://eurovoc.europa.eu/691 has 5 parents http://eurovoc.europa.eu/2089 has 4 parents http://eurovoc.europa.eu/5770 has 2 parents http://eurovoc.europa.eu/5617 has 2 parents http://eurovoc.europa.eu/451462 has 2 parents http://eurovoc.europa.eu/1218 has 4 parents http://eurovoc.europa.eu/1217 has 5 parents http://eurovoc.europa.eu/6281 has 2 parents http://eurovoc.europa.eu/5349 has 3 parents http://eurovoc.europa.eu/2052 has 2 parents http://eurovoc.europa.eu/1524 has 3 parents http://eurovoc.europa.eu/2231 has 5 parents http://eurovoc.europa.eu/4453 has 3 parents http://eurovoc.europa.eu/8547 has 2 parents http://eurovoc.europa.eu/4217 has 4 parents http://eurovoc.europa.eu/333 has 2 parents http://eurovoc.europa.eu/1338 has 3 parents http://eurovoc.europa.eu/1986 has 3 parents http://eurovoc.europa.eu/2018 has 2 parents http://eurovoc.europa.eu/4461 has 3 parents http://eurovoc.europa.eu/2336 has 4 parents http://eurovoc.europa.eu/5458 has 2 parents http://eurovoc.europa.eu/5858 has 4 parents http://eurovoc.europa.eu/3415 has 4 parents http://eurovoc.europa.eu/5706 has 6 parents http://eurovoc.europa.eu/4375 has 2 parents http://eurovoc.europa.eu/3888 has 3 parents http://eurovoc.europa.eu/5695 has 2 parents http://eurovoc.europa.eu/1308 has 2 parents http://eurovoc.europa.eu/1253 has 5 parents http://eurovoc.europa.eu/4775 has 4 parents http://eurovoc.europa.eu/5969 has 2 parents http://eurovoc.europa.eu/4411 has 2 parents http://eurovoc.europa.eu/726 has 2 parents http://eurovoc.europa.eu/168 has 2 parents http://eurovoc.europa.eu/1254 has 2 parents http://eurovoc.europa.eu/6252 has 2 parents http://eurovoc.europa.eu/6131 has 2 parents http://eurovoc.europa.eu/1822 has 2 parents http://eurovoc.europa.eu/2368 has 3 parents http://eurovoc.europa.eu/2201 has 2 parents http://eurovoc.europa.eu/4164 has 3 parents http://eurovoc.europa.eu/3823 has 2 parents http://eurovoc.europa.eu/451885 has 2 parents http://eurovoc.europa.eu/5944 has 2 parents http://eurovoc.europa.eu/1501 has 3 parents http://eurovoc.europa.eu/1294 has 2 parents http://eurovoc.europa.eu/1299 has 2 parents http://eurovoc.europa.eu/1766 has 2 parents
KDEExplorersApp_withweight2.ipynb
###Markdown Explore Kernel Density Estimate Fitting on WNS 'Endemic Area' ###Code import os import ipywidgets as widgets from ipywidgets import interact from osgeo import gdal import geopandas as gpd import numpy as np from scipy import stats import matplotlib.pyplot as plt from shapely.geometry import mapping, Polygon, MultiPolygon, LineString, Point target_crs = {'proj': 'aea', 'lat_1': 29.5, 'lat_2': 45.5, 'lat_0': 23, 'lon_0': -96, 'x_0': 0, 'y_0': 0, 'datum': 'NAD83', 'units': 'm', 'no_defs': True} wns_fname = os.path.join('data', "WNS_Status.shp") wns = gpd.GeoDataFrame.from_file(wns_fname).dropna(subset=['WNS_MAP_YR']) wns['year'] = wns.WNS_MAP_YR.str[:4].astype(int) wns = wns.to_crs(target_crs) wns.WNS_STATUS.unique() states_fname = os.path.join('data', "cb_2017_us_state_20m.shp") states = gpd.read_file(states_fname) states_aea = states.to_crs(target_crs) conus_states = states_aea[~states_aea.NAME.isin(['Alaska', 'Hawaii', 'Puerto Rico'])] bounds = conus_states.bounds x_bounds = [bounds.minx.min(), bounds.maxx.max()] y_bounds = [bounds.miny.min(), bounds.maxy.max()] centroids = wns.centroid centroids = centroids.to_crs(target_crs) wns['x'] = centroids.x wns['y'] = centroids.y wns['area'] = centroids.to_crs(target_crs).geometry.area wns['area_weight'] = wns.area/wns.area.max() # wns.head(5) def reclass_as_pcnt(Z): uniques = np.unique(Z)[::-1] z_reclass = np.copy(Z) total_dens = Z.sum() cum_area = 0.0 for u in uniques: cum_area += np.count_nonzero(Z==u)*u z_reclass[Z==u] = cum_area/total_dens return z_reclass def create_kernel(x, y, X, Y, factor=1.2, weights=None): positions = np.vstack([X.ravel(), Y.ravel()]) values = np.vstack([x, y]) kernel = gaussian_kde(values, weights=weights) kernel.set_bandwidth( factor) Z = np.reshape(kernel(positions).T, X.shape) return reclass_as_pcnt(Z) def create_kernel_contours(x, y, z, levels=[0.5, 0.75, 0.95]): cset = plt.contour(x, y, z, levels=levels, colors=['red', 'white', 'blue'], linewidths=4) return cset def plot_one(cset, title='', isopleth=0.75): fig = plt.figure(figsize=(25, 15)) ax = fig.add_subplot(111) ax.set_xlim([-3000000, 3000000]) ax.set_ylim([0, 3700000]) states_aea.plot(color='None', edgecolor='black', ax=ax, alpha=0.4) plt.title(title,fontsize=30) ax.set_aspect('equal') return ax # copied from https://gist.github.com/tillahoffmann/f844bce2ec264c1c8cb5 import numpy as np from scipy.spatial.distance import cdist class gaussian_kde(object): """Representation of a kernel-density estimate using Gaussian kernels. Kernel density estimation is a way to estimate the probability density function (PDF) of a random variable in a non-parametric way. `gaussian_kde` works for both uni-variate and multi-variate data. It includes automatic bandwidth determination. The estimation works best for a unimodal distribution; bimodal or multi-modal distributions tend to be oversmoothed. Parameters ---------- dataset : array_like Datapoints to estimate from. In case of univariate data this is a 1-D array, otherwise a 2-D array with shape (# of dims, # of data). bw_method : str, scalar or callable, optional The method used to calculate the estimator bandwidth. This can be 'scott', 'silverman', a scalar constant or a callable. If a scalar, this will be used directly as `kde.factor`. If a callable, it should take a `gaussian_kde` instance as only parameter and return a scalar. If None (default), 'scott' is used. See Notes for more details. weights : array_like, shape (n, ), optional, default: None An array of weights, of the same shape as `x`. Each value in `x` only contributes its associated weight towards the bin count (instead of 1). Attributes ---------- dataset : ndarray The dataset with which `gaussian_kde` was initialized. d : int Number of dimensions. n : int Number of datapoints. neff : float Effective sample size using Kish's approximation. factor : float The bandwidth factor, obtained from `kde.covariance_factor`, with which the covariance matrix is multiplied. covariance : ndarray The covariance matrix of `dataset`, scaled by the calculated bandwidth (`kde.factor`). inv_cov : ndarray The inverse of `covariance`. Methods ------- kde.evaluate(points) : ndarray Evaluate the estimated pdf on a provided set of points. kde(points) : ndarray Same as kde.evaluate(points) kde.pdf(points) : ndarray Alias for ``kde.evaluate(points)``. kde.set_bandwidth(bw_method='scott') : None Computes the bandwidth, i.e. the coefficient that multiplies the data covariance matrix to obtain the kernel covariance matrix. .. versionadded:: 0.11.0 kde.covariance_factor : float Computes the coefficient (`kde.factor`) that multiplies the data covariance matrix to obtain the kernel covariance matrix. The default is `scotts_factor`. A subclass can overwrite this method to provide a different method, or set it through a call to `kde.set_bandwidth`. Notes ----- Bandwidth selection strongly influences the estimate obtained from the KDE (much more so than the actual shape of the kernel). Bandwidth selection can be done by a "rule of thumb", by cross-validation, by "plug-in methods" or by other means; see [3]_, [4]_ for reviews. `gaussian_kde` uses a rule of thumb, the default is Scott's Rule. Scott's Rule [1]_, implemented as `scotts_factor`, is:: n**(-1./(d+4)), with ``n`` the number of data points and ``d`` the number of dimensions. Silverman's Rule [2]_, implemented as `silverman_factor`, is:: (n * (d + 2) / 4.)**(-1. / (d + 4)). Good general descriptions of kernel density estimation can be found in [1]_ and [2]_, the mathematics for this multi-dimensional implementation can be found in [1]_. References ---------- .. [1] D.W. Scott, "Multivariate Density Estimation: Theory, Practice, and Visualization", John Wiley & Sons, New York, Chicester, 1992. .. [2] B.W. Silverman, "Density Estimation for Statistics and Data Analysis", Vol. 26, Monographs on Statistics and Applied Probability, Chapman and Hall, London, 1986. .. [3] B.A. Turlach, "Bandwidth Selection in Kernel Density Estimation: A Review", CORE and Institut de Statistique, Vol. 19, pp. 1-33, 1993. .. [4] D.M. Bashtannyk and R.J. Hyndman, "Bandwidth selection for kernel conditional density estimation", Computational Statistics & Data Analysis, Vol. 36, pp. 279-298, 2001. Examples -------- Generate some random two-dimensional data: >>> from scipy import stats >>> def measure(n): >>> "Measurement model, return two coupled measurements." >>> m1 = np.random.normal(size=n) >>> m2 = np.random.normal(scale=0.5, size=n) >>> return m1+m2, m1-m2 >>> m1, m2 = measure(2000) >>> xmin = m1.min() >>> xmax = m1.max() >>> ymin = m2.min() >>> ymax = m2.max() Perform a kernel density estimate on the data: >>> X, Y = np.mgrid[xmin:xmax:100j, ymin:ymax:100j] >>> positions = np.vstack([X.ravel(), Y.ravel()]) >>> values = np.vstack([m1, m2]) >>> kernel = stats.gaussian_kde(values) >>> Z = np.reshape(kernel(positions).T, X.shape) Plot the results: >>> import matplotlib.pyplot as plt >>> fig = plt.figure() >>> ax = fig.add_subplot(111) >>> ax.imshow(np.rot90(Z), cmap=plt.cm.gist_earth_r, ... extent=[xmin, xmax, ymin, ymax]) >>> ax.plot(m1, m2, 'k.', markersize=2) >>> ax.set_xlim([xmin, xmax]) >>> ax.set_ylim([ymin, ymax]) >>> plt.show() """ def __init__(self, dataset, bw_method=None, weights=None): self.dataset = np.atleast_2d(dataset) if not self.dataset.size > 1: raise ValueError("`dataset` input should have multiple elements.") self.d, self.n = self.dataset.shape if weights is not None: self.weights = weights / np.sum(weights) else: self.weights = np.ones(self.n) / self.n # Compute the effective sample size # http://surveyanalysis.org/wiki/Design_Effects_and_Effective_Sample_Size#Kish.27s_approximate_formula_for_computing_effective_sample_size self.neff = 1.0 / np.sum(self.weights ** 2) self.set_bandwidth(bw_method=bw_method) def evaluate(self, points): """Evaluate the estimated pdf on a set of points. Parameters ---------- points : (# of dimensions, # of points)-array Alternatively, a (# of dimensions,) vector can be passed in and treated as a single point. Returns ------- values : (# of points,)-array The values at each point. Raises ------ ValueError : if the dimensionality of the input points is different than the dimensionality of the KDE. """ points = np.atleast_2d(points) d, m = points.shape if d != self.d: if d == 1 and m == self.d: # points was passed in as a row vector points = np.reshape(points, (self.d, 1)) m = 1 else: msg = "points have dimension %s, dataset has dimension %s" % (d, self.d) raise ValueError(msg) # compute the normalised residuals chi2 = cdist(points.T, self.dataset.T, 'mahalanobis', VI=self.inv_cov) ** 2 # compute the pdf result = np.sum(np.exp(-.5 * chi2) * self.weights, axis=1) / self._norm_factor return result __call__ = evaluate def scotts_factor(self): return np.power(self.neff, -1./(self.d+4)) def silverman_factor(self): return np.power(self.neff*(self.d+2.0)/4.0, -1./(self.d+4)) # Default method to calculate bandwidth, can be overwritten by subclass covariance_factor = scotts_factor def set_bandwidth(self, bw_method=None): """Compute the estimator bandwidth with given method. The new bandwidth calculated after a call to `set_bandwidth` is used for subsequent evaluations of the estimated density. Parameters ---------- bw_method : str, scalar or callable, optional The method used to calculate the estimator bandwidth. This can be 'scott', 'silverman', a scalar constant or a callable. If a scalar, this will be used directly as `kde.factor`. If a callable, it should take a `gaussian_kde` instance as only parameter and return a scalar. If None (default), nothing happens; the current `kde.covariance_factor` method is kept. Notes ----- .. versionadded:: 0.11 Examples -------- >>> x1 = np.array([-7, -5, 1, 4, 5.]) >>> kde = stats.gaussian_kde(x1) >>> xs = np.linspace(-10, 10, num=50) >>> y1 = kde(xs) >>> kde.set_bandwidth(bw_method='silverman') >>> y2 = kde(xs) >>> kde.set_bandwidth(bw_method=kde.factor / 3.) >>> y3 = kde(xs) >>> fig = plt.figure() >>> ax = fig.add_subplot(111) >>> ax.plot(x1, np.ones(x1.shape) / (4. * x1.size), 'bo', ... label='Data points (rescaled)') >>> ax.plot(xs, y1, label='Scott (default)') >>> ax.plot(xs, y2, label='Silverman') >>> ax.plot(xs, y3, label='Const (1/3 * Silverman)') >>> ax.legend() >>> plt.show() """ if bw_method is None: pass elif bw_method == 'scott': self.covariance_factor = self.scotts_factor elif bw_method == 'silverman': self.covariance_factor = self.silverman_factor elif np.isscalar(bw_method): self._bw_method = 'use constant' self.covariance_factor = lambda: self.factor / bw_method elif callable(bw_method): self._bw_method = bw_method self.covariance_factor = lambda: self._bw_method(self) else: msg = "`bw_method` should be 'scott', 'silverman', a scalar " \ "or a callable." raise ValueError(msg) self._compute_covariance() def _compute_covariance(self): """Computes the covariance matrix for each Gaussian kernel using covariance_factor(). """ self.factor = self.covariance_factor() # Cache covariance and inverse covariance of the data if not hasattr(self, '_data_inv_cov'): # Compute the mean and residuals _mean = np.sum(self.weights * self.dataset, axis=1) _residual = (self.dataset - _mean[:, None]) # Compute the biased covariance self._data_covariance = np.atleast_2d(np.dot(_residual * self.weights, _residual.T)) # Correct for bias (http://en.wikipedia.org/wiki/Weighted_arithmetic_mean#Weighted_sample_covariance) self._data_covariance /= (1 - np.sum(self.weights ** 2)) self._data_inv_cov = np.linalg.inv(self._data_covariance) self.covariance = self._data_covariance * self.factor**2 self.inv_cov = self._data_inv_cov / self.factor**2 self._norm_factor = np.sqrt(np.linalg.det(2*np.pi*self.covariance)) #* self.n cache = {} def interactive_plot(factor, iso=0.75, year=2017, add_counties=False, add_centroids=True, add_surface=True, weight_by_county_area=True, weight_by_year=True): if year in cache: X, Y, data = cache[year]['data'] else: cache[year] = {} cache[year]['kernels'] = {} data = wns[wns.year <= year] pad=500000 X, Y = np.mgrid[x_bounds[0]-pad:x_bounds[1]+pad:100j, y_bounds[0]-pad:y_bounds[1]+pad:100j] cache[year]['data'] = (X, Y, data) if weight_by_year: weights = gen_weights(data.years_pres, factor=4)#weight_f.value else: weights = np.ones(data.area_weight.shape) weights /= np.sum(weights) if weight_by_county_area: county_weights = data.area_weight county_weights /= np.sum(county_weights) weights += county_weights weights /= np.sum(weights) weights = list(weights) Z = create_kernel(data.x, data.y, X, Y, factor, weights=weights) cache[year]['kernels'][factor] = Z cset = create_kernel_contours(X, Y, Z, levels=[iso]) ax = plot_one(cset, title=f"WNS {year}-{year+1}") if add_surface: ax.imshow(np.rot90(Z), cmap=plt.cm.Reds_r, extent=[X.min(), X.max(), Y.min(), Y.max()], alpha=0.6) if add_counties: data.to_crs(states_aea.crs).plot(column='year', cmap='plasma', ax=ax, legend=True) if add_centroids: data.to_crs(states_aea.crs).centroid.plot(color='grey', ax=ax) f = widgets.FloatSlider( value=1.2, min=0.1, max=4.0, step=0.1, description="BW Factor") f.continuous_update = False y = widgets.IntSlider( value=2014, min=wns.year.min()+1, max=wns.year.max(), step=1, description="Year") i = widgets.FloatSlider( value=0.75, min=0.05, max=.99, step=0.05, description="Isopleth") i.continuous_update = False c = widgets.Checkbox(value=False, description='Show Counties') c2 = widgets.Checkbox(value=False, description='Show Centroids') c3 = widgets.Checkbox(value=False, description='Show KDE Surface') c4 = widgets.Checkbox(value=True, description='Weight by county areas') c5 = widgets.Checkbox(value=True, description='Weight by years since detection') ###Output _____no_output_____ ###Markdown In order to weight counties we need to decide on a function to use for weighting. Let's start with a simple number of years raised to a power function, as it allows us to explore a range of curves *move the slider below to set the shape of this curve** 0.0 is no weight* 1.0 is linear***Note: Changes to this curve do not update the map until one of the widgets below is triggered*** ###Code weight_f = widgets.FloatSlider( value=2, min=0.0, max=10.0, step=0.1, description="Weight Exponent", layout={'width': '500px'}) def exponential(x, factor=2.0): return x**factor wns['years_pres'] = wns.year.max()-wns.year def gen_weights(years_pres, factor=2.0): weights = [exponential(weight, factor) for weight in years_pres] # weights = [w/np.max(weights) for w in weights] return weights def show_weight_curve(factor=2): print(factor) years = list(range(0, wns.years_pres.max())) weights = gen_weights(years, factor=factor) weights = weights / np.max(weights) f, ax1 = plt.subplots(1, 1, figsize=(5,5)) ax1.plot(years, weights) ax1.set_title('County Weights by years since detection') ax1.set_xlabel('Years since Detection (n)') ax1.set_ylabel('Weight') ax1.set_ylim(0,1.1) tex = r'$n^{' + str(factor) + '}$' ax1.text(1, 0.8, tex, fontsize=20, va='bottom') plt.tight_layout() _ = interact(show_weight_curve, factor=weight_f) ###Output _____no_output_____ ###Markdown Once the weight function is finished used the sliders below to see the effect on our KDE ###Code %matplotlib inline _ = interact(interactive_plot, factor=f, iso=i, year=y, add_counties=c, add_centroids=c2, add_surface=c3, weight_by_county_area=c4, weight_by_years=c5) i.continuous_update = False from shapely.geometry import mapping, Polygon, MultiPolygon, LineString, Point import fiona from fiona.crs import from_epsg def save_isos_to_shp(outfname, cset, epsg=26912, all_iso=None, territory=1, all_data_stats=1): # Define a polygon feature geometry with one attribute schema = { 'geometry': 'MultiPolygon', 'properties': {'Territory':'str', 'level': 'float'}, } # Write a new Shapefile with fiona.open(outfname, 'w', 'ESRI Shapefile', schema, crs=wns.crs) as c: ## If there are multiple geometries, put the "for" loop here for i in reversed(range(len(cset.collections))): level = cset.levels[i] polys = [] sum_area = 0 for p in cset.collections[i].get_paths(): v = p.vertices x = v[:,0] y = v[:,1] this_poly = Polygon([(i[0], i[1]) for i in zip(x,y)]) sum_area += this_poly.area polys.append(this_poly) print('iso'+ str(int(level*100))) multi = MultiPolygon(polys) props = {'Territory': territory, 'level': level*100} c.write({ 'geometry': mapping(multi), 'properties': props, }) factor = f.value iso=i.value year=y.value add_counties=False add_centroids=True add_surface=True weight_by_county_area=True weight_by_year=True if year in cache: X, Y, data = cache[year]['data'] else: cache[year] = {} cache[year]['kernels'] = {} data = wns[wns.year <= year] pad=500000 X, Y = np.mgrid[x_bounds[0]-pad:x_bounds[1]+pad:100j, y_bounds[0]-pad:y_bounds[1]+pad:100j] cache[year]['data'] = (X, Y, data) if weight_by_year: weights = gen_weights(data.years_pres, factor=4)#weight_f.value else: weights = np.ones(data.area_weight.shape) weights /= np.sum(weights) if weight_by_county_area: county_weights = data.area_weight county_weights /= np.sum(county_weights) weights += county_weights weights /= np.sum(weights) weights = list(weights) Z = create_kernel(data.x, data.y, X, Y, factor, weights=weights) cache[year]['kernels'][factor] = Z cset = create_kernel_contours(X, Y, Z, levels=[iso]) save_isos_to_shp(r"c:\temp\test3.shp", cset) cset weight_z = widgets.FloatSlider( value=2, min=0.0, max=10.0, step=0.1, description="Weight Exponent", layout={'width': '500px'}) weight_z ###Output _____no_output_____
Custom Models, Layers, and Loss Functions with TensorFlow/Week 1 - Functional APIs/C1_W1_Lab_2_multi-output.ipynb
###Markdown Ungraded Lab: Build a Multi-output ModelIn this lab, we'll show how you can build models with more than one output. The dataset we will be working on is available from the [UCI Machine Learning Repository](https://archive.ics.uci.edu/ml/datasets/Energy+efficiency). It is an Energy Efficiency dataset which uses the bulding features (e.g. wall area, roof area) as inputs and has two outputs: Cooling Load and Heating Load. Let's see how we can build a model to train on this data. Imports ###Code try: # %tensorflow_version only exists in Colab. %tensorflow_version 2.x except Exception: pass import tensorflow as tf import numpy as np import matplotlib.pyplot as plt import pandas as pd from tensorflow.keras.models import Model from tensorflow.keras.layers import Dense, Input from sklearn.model_selection import train_test_split ###Output _____no_output_____ ###Markdown UtilitiesWe define a few utilities for data conversion and visualization to make our code more neat. ###Code def format_output(data): y1 = data.pop('Y1') y1 = np.array(y1) y2 = data.pop('Y2') y2 = np.array(y2) return y1, y2 def norm(x): return (x - train_stats['mean']) / train_stats['std'] def plot_diff(y_true, y_pred, title=''): plt.scatter(y_true, y_pred) plt.title(title) plt.xlabel('True Values') plt.ylabel('Predictions') plt.axis('equal') plt.axis('square') plt.xlim(plt.xlim()) plt.ylim(plt.ylim()) plt.plot([-100, 100], [-100, 100]) plt.show() def plot_metrics(metric_name, title, ylim=5): plt.title(title) plt.ylim(0, ylim) plt.plot(history.history[metric_name], color='blue', label=metric_name) plt.plot(history.history['val_' + metric_name], color='green', label='val_' + metric_name) plt.show() ###Output _____no_output_____ ###Markdown Prepare the DataWe download the dataset and format it for training. ###Code # Get the data from UCI dataset URL = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00242/ENB2012_data.xlsx' # Use pandas excel reader df = pd.read_excel(URL) df = df.sample(frac=1).reset_index(drop=True) # Split the data into train and test with 80 train / 20 test train, test = train_test_split(df, test_size=0.2) train_stats = train.describe() # Get Y1 and Y2 as the 2 outputs and format them as np arrays train_stats.pop('Y1') train_stats.pop('Y2') train_stats = train_stats.transpose() train_Y = format_output(train) test_Y = format_output(test) # Normalize the training and test data norm_train_X = norm(train) norm_test_X = norm(test) ###Output _____no_output_____ ###Markdown Build the ModelHere is how we'll build the model using the functional syntax. Notice that we can specify a list of outputs (i.e. `[y1_output, y2_output]`) when we instantiate the `Model()` class. ###Code # Define model layers. input_layer = Input(shape=(len(train .columns),)) first_dense = Dense(units='128', activation='relu')(input_layer) second_dense = Dense(units='128', activation='relu')(first_dense) # Y1 output will be fed directly from the second dense y1_output = Dense(units='1', name='y1_output')(second_dense) third_dense = Dense(units='64', activation='relu')(second_dense) # Y2 output will come via the third dense y2_output = Dense(units='1', name='y2_output')(third_dense) # Define the model with the input layer and a list of output layers model = Model(inputs=input_layer, outputs=[y1_output, y2_output]) print(model.summary()) ###Output _____no_output_____ ###Markdown Configure parametersWe specify the optimizer as well as the loss and metrics for each output. ###Code # Specify the optimizer, and compile the model with loss functions for both outputs optimizer = tf.keras.optimizers.SGD(lr=0.001) model.compile(optimizer=optimizer, loss={'y1_output': 'mse', 'y2_output': 'mse'}, metrics={'y1_output': tf.keras.metrics.RootMeanSquaredError(), 'y2_output': tf.keras.metrics.RootMeanSquaredError()}) ###Output _____no_output_____ ###Markdown Train the Model ###Code # Train the model for 500 epochs history = model.fit(norm_train_X, train_Y, epochs=500, batch_size=10, validation_data=(norm_test_X, test_Y)) ###Output _____no_output_____ ###Markdown Evaluate the Model and Plot Metrics ###Code # Test the model and print loss and mse for both outputs loss, Y1_loss, Y2_loss, Y1_rmse, Y2_rmse = model.evaluate(x=norm_test_X, y=test_Y) print("Loss = {}, Y1_loss = {}, Y1_mse = {}, Y2_loss = {}, Y2_mse = {}".format(loss, Y1_loss, Y1_rmse, Y2_loss, Y2_rmse)) # Plot the loss and mse Y_pred = model.predict(norm_test_X) plot_diff(test_Y[0], Y_pred[0], title='Y1') plot_diff(test_Y[1], Y_pred[1], title='Y2') plot_metrics(metric_name='y1_output_root_mean_squared_error', title='Y1 RMSE', ylim=6) plot_metrics(metric_name='y2_output_root_mean_squared_error', title='Y2 RMSE', ylim=7) ###Output _____no_output_____
homework/nbs/lesson5.ipynb
###Markdown Create simple model ###Code model = Sequential([ Embedding(vocab_size, 32, input_length=seq_len), Flatten(), Dense(100, activation='relu'), Dropout(0.5), Dense(1, activation='sigmoid') ]) model.compile(loss='binary_crossentropy', optimizer=Adam(), metrics=['accuracy']) model.fit(trn, labels_train, validation_data=(test, labels_test), nb_epoch=2, batch_size=64) model.summary() ###Output _____no_output_____ ###Markdown Single conv layer with max pooling ###Code ?Dense conv1 = Sequential([ Embedding(vocab_size, 32, input_length=seq_len), Convolution1D(64, 3, activation='relu'), MaxPooling1D(), Flatten(), Dense(100, activation='relu'), Dropout(0.5), Dense(1, activation='sigmoid') ]) conv1.summary() conv1.compile(loss="binary_crossentropy", optimizer=Adam(), metrics=["accuracy"]) conv1.fit(trn, labels_train, validation_data=(test, labels_test), nb_epoch=2, batch_size=64) ###Output _____no_output_____ ###Markdown Pre-trained vectors ###Code dataset_path = '../data/glove/' def load_vectors(name): return ( load_array(dataset_path + name + '.dat'), pickle.load(open(dataset_path + name + "_words.pkl")), pickle.load(open(dataset_path + name + "_idx.pkl")), ) vecs, words, wordidx = load_vectors('6B.100d') def create_emb(): n_fact = vecs.shape[1] emb = np.zeros((vocab_size, n_fact)) for i in range(1,len(emb)): word = idx2word[i] if word and re.match(r"^[a-zA-Z0-9\-]*$", word): src_idx = wordidx[word] emb[i] = vecs[src_idx] else: # If we can't find the word in glove, randomly initialize emb[i] = normal(scale=0.6, size=(n_fact,)) # This is our "rare word" id - we want to randomly initialize emb[-1] = normal(scale=0.6, size=(n_fact,)) emb/=3 return emb emb = create_emb() conv2 = Sequential([ Embedding(vocab_size, 100, input_length=seq_len, weights=[emb], trainable=False), Convolution1D(64, 3, activation='relu'), ZeroPadding1D(1), MaxPooling1D(), Flatten(), Dense(100, activation='relu'), Dropout(0.6), Dense(1, activation='sigmoid') ]) conv1.summary() conv2.compile(loss="binary_crossentropy", optimizer=Adam(lr=0.0005), metrics=["accuracy"]) conv2.fit(trn, labels_train, validation_data=(test, labels_test), nb_epoch=2, batch_size=64) conv2.optimizer.lr = 0.00001 conv3 = Sequential([ Embedding(vocab_size, 100, input_length=seq_len, dropout=0.2, weights=[emb], trainable=False), Dropout(0.25), Convolution1D(64, 5, border_mode='same', activation='relu'), Dropout(0.25), MaxPooling1D(), Flatten(), Dense(100, activation='relu'), Dropout(0.7), Dense(1, activation='sigmoid')]) conv3.compile(loss="binary_crossentropy", optimizer=Adam(lr=0.0005), metrics=["accuracy"]) conv3.fit(trn, labels_train, validation_data=(test, labels_test), nb_epoch=2, batch_size=64) conv3.optimizer.lr=1e-4 conv3.fit(trn, labels_train, validation_data=(test, labels_test), nb_epoch=2, batch_size=64) ###Output _____no_output_____ ###Markdown Multi-size CNN ###Code from keras.layers import Merge graph_in = Input ((vocab_size, 100)) convs = [ ] for fsz in range (3, 6): x = Convolution1D(64, fsz, border_mode='same', activation="relu")(graph_in) x = MaxPooling1D()(x) x = Flatten()(x) convs.append(x) out = Merge(mode="concat")(convs) graph = Model(graph_in, out) emb = create_emb() model = Sequential ([ Embedding(vocab_size, 100, input_length=seq_len, dropout=0.2, weights=[emb]), Dropout (0.2), graph, Dropout (0.5), Dense (100, activation="relu"), Dropout (0.7), Dense (1, activation='sigmoid') ]) model.compile(loss='binary_crossentropy', optimizer=Adam(), metrics=['accuracy']) model.fit(trn, labels_train, validation_data=(test, labels_test), nb_epoch=1, batch_size=64) model.optimizer.lr = 1E-4 ###Output _____no_output_____ ###Markdown LSTM ###Code model = Sequential([ Embedding(vocab_size, 32, input_length=seq_len, mask_zero=True, W_regularizer=l2(1e-6), dropout=0.2), LSTM(100, consume_less='gpu'), Dense(1, activation='sigmoid')]) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) model.summary() model.fit(trn, labels_train, validation_data=(test, labels_test), nb_epoch=5, batch_size=64) ###Output _____no_output_____
bcu/notebooks/scraping/Welcome_Week_Doc2Cal.ipynb
###Markdown Convert Welcome Week Schedule (.docx) into a calender file importable by google calendar (and others) ###Code import re from bs4 import BeautifulSoup import requests html_path = "/tmp/timetable.html" ics_path = "/tmp/timetable.ics" with open(html_path, 'r') as file: data = file.read() # Parse the html content soup = BeautifulSoup(data, "lxml") clean_time_re = re.compile("[^0-9]") def split_time(t): t = clean_time_re.sub("", t) t = t.replace("00", ":00 ") t = t.replace("30", ":30 ") times = [] for time in t.strip().split(" "): t1 = time.split(":") if int(t1[0]) < 7: times.append("%d:%s" % (int(t1[0])+12, t1[1])) else: times.append(time) return times def clean_text(t): t = t.replace("\n", " ") t = t.replace(" ", " ") return t events = [] for rowIndex, row in enumerate(soup.find("table").find_all("tr")): r = [] for colIndex, td in enumerate(row.find_all("td")): text = clean_text(td.text).strip() href = "" for link in td.find_all("a"): href = link.get("href") r.append(text) if rowIndex and colIndex and len(text): title = text.split("\n")[0] times = split_time(r[0]) event = {"date":heading[colIndex], "from": times[0].strip(), "to": times[1].strip(), "title": title, "text": text, "href": href} events.append(event) if not rowIndex: heading = r print("read %d events" % len(events)) print(events[0]) from ics import Calendar, Event from datetime import datetime from dateutil import tz date_format = '%Y-%m-%d %H:%M:%S' # Format the ics module expects the dates to be in def make_time(event, key): date_toks = event["date"].split("/") t = "20%s-%s-%s %s:00" % (date_toks[2], date_toks[1], date_toks[0], event[key]) dt = datetime.strptime(t,date_format).replace(tzinfo=tz.gettz('BST')) dt = dt.astimezone(tz.tzutc()) return dt.strftime(date_format) c = Calendar() for event in events: e = Event() e.name = event["text"] e.begin = make_time(event, "from") e.end = make_time(event, "to") e.url = event["href"] c.events.add(e) with open(ics_path, 'w') as my_file: my_file.writelines(c) with open(ics_path) as myfile: [print(next(myfile).strip()) for x in range(8)] ###Output BEGIN:VCALENDAR VERSION:2.0 PRODID:ics.py - http://git.io/lLljaA BEGIN:VEVENT DTEND:20200914T130000Z DTSTART:20200914T120000Z SUMMARY:Meet your Student Success Advisers (SSAs) SSAs are Arts\, Design and Media graduates who can provide lots of insider tips and support.   Link to the Activity on Teams UID:[email protected]
gmm_ml_synthetic_speech_detection.ipynb
###Markdown Spoofed Speech Detection via Maximum Likelihood Estimation of Gaussian Mixture ModelsThe goal of synthetic speech detection is to determine whether a speech segment $S$ is natural or synthetic/converted speeach.This notebook implements a Gaussian mixture model maximum likelihood (GMM-ML) classifier for synthetic (spoofed) speech detection. This approach uses regular mel frequency cepstral coefficients (MFCC) features and gives the best performance on the [ASVspoof 2015 dataset](https://www.idiap.ch/dataset/avspoof) among the standard classifiers (GMM-SV, GMM-UBM, ...). For more background information see: *Hanilçi, Cemal, Tomi Kinnunen, Md Sahidullah, and Aleksandr Sizov. "Classifiers for synthetic speech detection: a comparison." In INTERSPEECH 2015*. The scripts use the Python package [Bob.Bio.SPEAR 2.04](https://pypi.python.org/pypi/bob.bio.spear/2.0.4) for speaker recogntion.This work is part of the ["DDoS Resilient Emergency Dispatch Center"](https://www.dhs.gov/science-and-technology/news/2015/09/04/dhs-st-awards-university-houston-26m-cyber-security-research) project at the University of Houston, funded by the Department of Homeland Security (DHS).April 19, 2015Lorenzo Rossi(lorenzo **[dot]** rossi **[at]** gmail **[dot]** com) ###Code import os import time import numpy as np import pandas as pd from bob.bio.spear import preprocessor, extractor from bob.bio.gmm import algorithm from bob.io.base import HDF5File from bob.learn import em from sklearn.metrics import classification_report, roc_curve, roc_auc_score WAV_FOLDER = 'Wav/' #'ASV2015dataset/wav/' # Path to folder containing speakers .wav subfolders LABEL_FOLDER = 'CM_protocol/' #'ASV2015dataset/CM_protocol/' # Path to ground truth csv files EXT = '.wav' %matplotlib inline ###Output _____no_output_____ ###Markdown Loading the Ground TruthLoad the dataframes (tables) with the labels for the training, development and evaluation (hold out) sets. Each subfolder corresponds to a different speaker. For example, T1 and D4 indicate the subfolders associated to the utterances and spoofed segments of speakers T1 and D4, respectively in training and development sets. Note that number of evaluation samples >> number of development samples >> testing samples.You can either select the speakers in each set one by one, *e.g.*:```train_subfls = ['T1', 'T2']``` will only load segments from speakers T1 and T2 for training,or use all the available speakers in a certain subset by leaving the list empty, *e.g.*:```devel_subfls = [] ```will load all the available Dx speaker segments for the development stage. If you are running this notebook for the first time, you may want to start only with 2 or so speakers per set for sake of quick testing. All the scripts may take several hours to run on the full size datsets. ###Code train_subfls = ['T1']#, 'T2', 'T3', 'T4', 'T5', 'T6', 'T7', 'T8', 'T9', 'T13'] #T13 used instead of T10 for gender balance devel_subfls = ['D1']#, 'D2', 'D3', 'D4', 'D5', 'D6', 'D7', 'D8', 'D9', 'D10'] evalu_subfls = ['E1']#, 'E2', 'E3', 'E4', 'E5', 'E6','E7', 'E8', 'E9', 'E10'] train = pd.read_csv(LABEL_FOLDER + 'cm_train.trn', sep=' ', header=None, names=['folder','file','method','source']) if len(train_subfls): train = train[train.folder.isin(train_subfls)] train.sort_values(['folder', 'file'], inplace=True) devel = pd.read_csv(LABEL_FOLDER + 'cm_develop.ndx', sep=' ', header=None, names=['folder','file','method','source']) if len(devel_subfls): devel = devel[devel.folder.isin(devel_subfls)] devel.sort_values(['folder', 'file'], inplace=True) evalu = pd.read_csv(LABEL_FOLDER +'cm_evaluation.ndx', sep=' ', header=None, names=['folder','file','method','source']) if len(evalu_subfls): evalu = evalu[evalu.folder.isin(evalu_subfls)] evalu.sort_values(['folder', 'file'], inplace=True) label_2_class = {'human':1, 'spoof':0} print('training samples:',len(train)) print('development samples:',len(devel)) print('evaluation samples:',len(evalu)) ###Output training samples: 655 development samples: 1525 evaluation samples: 4185 ###Markdown Speech Preprocessing and MFCC ExtractionSilence removal and MFCC feature extraction for training segments. More details about the bob.bio.spear involved libraries at:https://www.idiap.ch/software/bob/docs/latest/bioidiap/bob.bio.spear/master/implemented.htmlYou can also skip this stage and load a set of feaures (see **Loading features** cell). ###Code # Parameters n_ceps = 60 # number of ceptral coefficients (implicit in extractor) silence_removal_ratio = .1 subfolders = train_subfls ground_truth = train # initialize feature matrix features = [] y = np.zeros((len(ground_truth),)) print("Extracting features for training stage.") vad = preprocessor.Energy_Thr(ratio_threshold=silence_removal_ratio) cepstrum = extractor.Cepstral() k = 0 start_time = time.clock() for folder in subfolders[0:n_subfls]: print(folder, end=", ") folder = "".join(('Wav/',folder,'/')) f_list = os.listdir(folder) for f_name in f_list: # ground truth try: label = ground_truth[ground_truth.file==f_name[:-len(EXT)]].source.values[0] except IndexError: continue y[k] = label_2_class[label] # silence removal x = vad.read_original_data(folder+f_name) vad_data = vad(x) if not vad_data[2].max(): vad = preprocessor.Energy_Thr(ratio_threshold=silence_removal_ratio*.8) vad_data = vad(x) vad = preprocessor.Energy_Thr(ratio_threshold=silence_removal_ratio) # MFCC extraction mfcc = cepstrum(vad_data) features.append(mfcc) k += 1 Xf = np.array(features) print(k,"files processed in",(time.clock()-start_time)/60,"minutes.") ###Output Extracting features for training stage. T1, 655 files processed in 1.8766250499999992 minutes. ###Markdown Saving features ###Code np.save('X.npy',Xf) np.save('y.npy',y) print('Feature and label matrices saved to disk') ###Output _____no_output_____ ###Markdown Loading features ###Code # Load already extracter features to skip the preprocessing-extraction stage Xf = np.load('train_features_10.npy') y = np.load('y_10.npy') ###Output _____no_output_____ ###Markdown GMM - ML Classification GMM TrainingTrain the GMMs for natural and synthetic speach. For documentation on bob.bio k-means and GMM machines see:https://pythonhosted.org/bob.learn.em/guide.htmlYou can also skip the training stage and load an already trained GMM model (see cell **Loading GMM Model**). ###Code # Parameters of the GMM machines n_gaussians = 128 # number of Gaussians max_iterats = 25 # maximum number of iterations ###Output _____no_output_____ ###Markdown GMM for natural speech ###Code # Initialize and train k-means machine: the means will initialize EM algorithm for GMM machine start_time = time.clock() kmeans_nat = em.KMeansMachine(n_gaussians,n_ceps) kmeansTrainer = em.KMeansTrainer() em.train(kmeansTrainer, kmeans_nat, np.vstack(Xf[y==1]), max_iterations = max_iterats, convergence_threshold = 1e-5) #kmeans_nat.means # initialize and train GMM machine gmm_nat = em.GMMMachine(n_gaussians,n_ceps) trainer = em.ML_GMMTrainer(True, True, True) gmm_nat.means = kmeans_nat.means em.train(trainer, gmm_nat, np.vstack(Xf[y==1]), max_iterations = max_iterats, convergence_threshold = 1e-5) #gmm_nat.save(HDF5File('gmm_nat.hdf5', 'w')) print("Done in:", (time.clock() - start_time)/60, "minutes") print(gmm_nat) ###Output Done in: 1.7726312666666666 minutes <bob.learn.em.GMMMachine object at 0x7fc500be12d0> ###Markdown GMM for synthetic speech ###Code # initialize and train k-means machine: the means will initialize EM algorithm for GMM machine start_time = time.clock() kmeans_synt = em.KMeansMachine(n_gaussians,n_ceps) kmeansTrainer = em.KMeansTrainer() em.train(kmeansTrainer, kmeans_synt, np.vstack(Xf[y==0]), max_iterations = max_iterats, convergence_threshold = 1e-5) # initialize and train GMM machine gmm_synt = em.GMMMachine(n_gaussians,n_ceps) trainer = em.ML_GMMTrainer(True, True, True) gmm_synt.means = kmeans_synt.means em.train(trainer, gmm_synt, np.vstack(Xf[y==0]), max_iterations = max_iterats, convergence_threshold = 1e-5) print("Done in:", (time.clock() - start_time)/60, "minutes") #gmm_synt.save(HDF5File('gmm_synt.hdf5', 'w')) print(gmm_synt) ###Output Done in: 6.424915316666667 minutes <bob.learn.em.GMMMachine object at 0x7fc500be1330> ###Markdown Loading GMM model ###Code gmm_nat = em.GMMMachine() gmm_nat.load(HDF5File('gmm_nat.hdf5', 'r')) gmm_synt = em.GMMMachine() gmm_synt.load(HDF5File('gmm_synt.hdf5','r')) np.save('p_gmm_ml_eval_10.npy',llr_score) np.save('z_gmm_ml_eval_est_10.npy',z_gmm) ###Output _____no_output_____ ###Markdown GMM-ML ScoringExtract the features for the testing data, compute the likelihood ratio test and compute ROC AUC and estimated EER scores. ###Code status = 'devel' # 'devel'(= test) OR 'evalu'(= hold out) start_time = time.clock() if status == 'devel': subfolders = devel_subfls ground_truth = devel elif status == 'evalu': subfolders = evalu_subfls ground_truth = evalu n_subfls = len(subfolders) # initialize score and class arrays llr_gmm_score = np.zeros(len(ground_truth),) z_gmm = np.zeros(len(ground_truth),) print(status) vad = preprocessor.Energy_Thr(ratio_threshold=.1) cepstrum = extractor.Cepstral() k = 0 thr = .5 speaker_list = ground_truth.folder.unique() for speaker_id in speaker_list: #speaker = ground_truth[ground_truth.folder==speaker_id] f_list = list(ground_truth[ground_truth.folder==speaker_id].file) folder = "".join(['Wav/',speaker_id,'/']) print(speaker_id, end=',') for f in f_list: f_name = "".join([folder,f,'.wav']) x = vad.read_original_data(f_name) # voice activity detection vad_data = vad(x) if not vad_data[2].max(): vad = preprocessor.Energy_Thr(ratio_threshold=.08) vad_data = vad(x) vad = preprocessor.Energy_Thr(ratio_threshold=.1) # MFCC extraction mfcc = cepstrum(vad_data) # Log likelihood ratio computation llr_gmm_score[k] = gmm_nat(mfcc)-gmm_synt(mfcc) z_gmm[k] = int(llr_gmm_score[k]>0) k += 1 ground_truth['z'] = ground_truth.source.map(lambda x: int(x=='human')) ground_truth['z_gmm'] = z_gmm ground_truth['score_gmm'] = llr_gmm_score print(roc_auc_score(ground_truth.z, ground_truth.z_gmm)) print(k,"files processed in",(time.clock()-start_time)/60,"minutes.") # Performance evaluation humans = z_gmm[z_dvl==0] spoofed = z_gmm[z_dvl==1] fnr = 100*(1-(humans<thr).sum()/len(humans)) fpr = 100*(1-(spoofed>=thr).sum()/len(spoofed)) print("ROC AUC score:", roc_auc_score(z_dvl,z_gmm)) print("False negative rate %:", fnr) print("False positive rate %:", fpr) print("EER %: <=", (fnr+fpr)/2) ###Output devel D1,0.950263157895 1525 files processed in 5.7873589999999995 minutes. ROC AUC score: 0.950263157895 False negative rate %: 2.94736842105 False positive rate %: 7.0 EER %: <= 4.97368421053 ###Markdown EER computationAdjust the threshold $thr$ to reduce $FNR-FPR$ for a more accurate estimate of the $EER$.The Equal Error Rate ($EER$) is the value where the false negative rate ($FNR$) equals the false positive rate ($FPR$). It's an error metric commonly used to characterize biometric systems. ###Code thr = -.115 pz = llr_gmm_score spoofed = pz[np.array(ground_truth.z)==1] humans = pz[np.array(ground_truth.z)==0] fnr = 100*(humans>thr).sum()/len(humans) fpr = 100*(spoofed<=thr).sum()/len(spoofed) print("False negative vs positive rates %:", fnr, fpr) print("FNR - FPR %:", fnr-fpr) if np.abs(fnr-fpr) <.25: print("EER =", (fnr+fpr)/2,"%") else: print("EER ~", (fnr+fpr)/2,"%") ###Output False negative vs positive rates %: 3.36842105263 3.0 FNR - FPR %: 0.368421052632 EER ~ 3.18421052632 %
NLPAnalytics/NLP_Code_Along_Classification of Spam and Ham (NLP).ipynb
###Markdown NLP Code AlongFor this code along we will build a spam filter! We'll use the various NLP tools we learned about as well as a new classifier, Naive Bayes.We'll use a classic dataset for this - UCI Repository SMS Spam Detection: https://archive.ics.uci.edu/ml/datasets/SMS+Spam+Collection ###Code from pyspark.sql import SparkSession spark = SparkSession.builder.appName('nlp').getOrCreate() data = spark.read.csv("smsspamcollection/SMSSpamCollection",inferSchema=True,sep='\t') data.show() data = data.withColumnRenamed('_c0','class').withColumnRenamed('_c1','text') data.show() ###Output +-----+--------------------+ |class| text| +-----+--------------------+ | ham|Go until jurong p...| | ham|Ok lar... Joking ...| | spam|Free entry in 2 a...| | ham|U dun say so earl...| | ham|Nah I don't think...| | spam|FreeMsg Hey there...| | ham|Even my brother i...| | ham|As per your reque...| | spam|WINNER!! As a val...| | spam|Had your mobile 1...| | ham|I'm gonna be home...| | spam|SIX chances to wi...| | spam|URGENT! You have ...| | ham|I've been searchi...| | ham|I HAVE A DATE ON ...| | spam|XXXMobileMovieClu...| | ham|Oh k...i'm watchi...| | ham|Eh u remember how...| | ham|Fine if that’s th...| | spam|England v Macedon...| +-----+--------------------+ only showing top 20 rows ###Markdown Clean and Prepare the Data ** Create a new length feature: ** ###Code from pyspark.sql.functions import length data = data.withColumn('length',length(data['text'])) data.show() # Pretty Clear Difference data.groupby('class').mean().show() ###Output +-----+-----------------+ |class| avg(length)| +-----+-----------------+ | ham|71.45431945307645| | spam|138.6706827309237| +-----+-----------------+ ###Markdown Feature Transformations ###Code from pyspark.ml.feature import Tokenizer,StopWordsRemover, CountVectorizer,IDF,StringIndexer tokenizer = Tokenizer(inputCol="text", outputCol="token_text") stopremove = StopWordsRemover(inputCol='token_text',outputCol='stop_tokens') count_vec = CountVectorizer(inputCol='stop_tokens',outputCol='c_vec') idf = IDF(inputCol="c_vec", outputCol="tf_idf") ham_spam_to_num = StringIndexer(inputCol='class',outputCol='label') from pyspark.ml.feature import VectorAssembler from pyspark.ml.linalg import Vector clean_up = VectorAssembler(inputCols=['tf_idf','length'],outputCol='features') ###Output _____no_output_____ ###Markdown The ModelWe'll use Naive Bayes, but feel free to play around with this choice! ###Code from pyspark.ml.classification import NaiveBayes # Use defaults nb = NaiveBayes() ###Output _____no_output_____ ###Markdown Pipeline ###Code from pyspark.ml import Pipeline data_prep_pipe = Pipeline(stages=[ham_spam_to_num,tokenizer,stopremove,count_vec,idf,clean_up]) cleaner = data_prep_pipe.fit(data) clean_data = cleaner.transform(data) ###Output _____no_output_____ ###Markdown Training and Evaluation! ###Code clean_data = clean_data.select(['label','features']) clean_data.show() (training,testing) = clean_data.randomSplit([0.7,0.3]) spam_predictor = nb.fit(training) data.printSchema() test_results = spam_predictor.transform(testing) test_results.show() from pyspark.ml.evaluation import MulticlassClassificationEvaluator acc_eval = MulticlassClassificationEvaluator() acc = acc_eval.evaluate(test_results) print("Accuracy of model at predicting spam was: {}".format(acc)) ###Output Accuracy of model at predicting spam was: 0.9248020435242028
backtrader_dma/Part2-Collect-Data.ipynb
###Markdown Disclaimer Collect dataTo run the sample strategy we need 1-day bars for a single symbol. Any symbol will work, we will use AAPL. Create a database for free sample data (if it doesn't already exist): ###Code from quantrocket.history import create_usstock_db create_usstock_db("usstock-free-1d", bar_size="1 day", free=True) ###Output _____no_output_____ ###Markdown Then collect the data: ###Code from quantrocket.history import collect_history collect_history("usstock-free-1d") ###Output _____no_output_____ ###Markdown Use flightlog to monitor the progress:```quantrocket.history: INFO [usstock-free-1d] Collecting FREE history from 2007 to presentquantrocket.history: INFO [usstock-free-1d] Collecting updated FREE securities listingsquantrocket.history: INFO [usstock-free-1d] Collecting additional FREE history from 2020-04 to presentquantrocket.history: INFO [usstock-free-1d] Collected 160 monthly files in quantrocket.v2.history.usstock-free-1d.sqlite``` Next we look up the Sid for AAPL (using the CLI in this example): ###Code !quantrocket master get --symbol 'AAPL' --json | json2yml ###Output --- - Sid: "FIBBG000B9XRY4" Symbol: "AAPL" Exchange: "XNAS" Country: "US" Currency: "USD" SecType: "STK" Etf: 0 Timezone: "America/New_York" Name: "APPLE INC" PriceMagnifier: 1 Multiplier: 1 Delisted: 0 DateDelisted: null LastTradeDate: null RolloverDate: null
notebook/05_Feature_Extraction_with_Pretrained_Models.ipynb
###Markdown Feature Extraction with Pre-trained ModelsIn the earlier notebooks, we demonstrated how to build **autoencoder** as a feature extraction tool. The results of convolutional AE seemed promising for its reconstructions preserve most visible features. However, training a new model for Another way to do feature extraction is to leverage **pre-trained models** with **fine-tuning**. The [tensorflow model hub](https://tfhub.dev/) hosts many pre-trained models. For example, the [Feature vectors of ResNet-50 model pre-trained on BigEarthNet remote sensing dataset](https://tfhub.dev/google/remote_sensing/bigearthnet-resnet50/1) is a popular model for remote-sensing datasets.In this notebook, we are going to illustrate this process. Extra Data Preprocessing: ResizeThe data pre-processing functions developed earlier yields a (858,858) image. Here we add a *resize* step to change the resolutions for further processing. ([`opencv.resize`](https://pythonexamples.org/python-opencv-cv2-resize-image/) is used.) ###Code import matplotlib.pyplot as plt import numpy as np import pandas as pd import tensorflow as tf import os, argparse, logging import cv2 # Utility functions def list_noaagridsatb1_files(dir, suffix='.v02r01.nc', to_remove=['GRIDSAT-B1.','.v02r01.nc']): ''' To scan through the sapecified dir and get the corresponding file with suffix. ''' import os import pandas as pd xfiles = [] for root, dirs, files in os.walk(dir, followlinks=True): # Loop through the directory for fn in files: if fn.endswith(suffix): # Filter files with suffix timestamp = fn for s in to_remove: # Removing prefix and suffix to get time-stamp timestamp = timestamp.replace(s,'') xfiles.append({'timestamp':timestamp, 'xuri':os.path.join(root, fn)}) return(pd.DataFrame(xfiles).sort_values('timestamp').reset_index(drop=True)) # Binary reader def read_noaagridsatb1(furi, var='irwin_cdr', scale=0.01, offset=200, remove_na=True, crop_east_asia=True, rescale=512): ''' The method reads in a NOAA-GridSta-B1 image in netCDF4 format (.nc file). The brightness temperature data was stored in int16 as 'irwin_cdr', with a scal factor of 0.01 and offset of 200. The missing values is flagged as -31999. More details of the data is described in https://developers.google.com/earth-engine/datasets/catalog/NOAA_CDR_GRIDSAT-B1_V2. Since our analysis focuss on East Asia (0-60'N, 100-160'E), we used an option to crop the data to this region (index: lat:1000~1858, lon:4000~4858). The output is a 2-d numpy array of float32 with shape (858, 858). ''' import numpy as np import netCDF4 as nc import cv2 # Read in data data = nc.Dataset(furi) cdr = np.array(data.variables['irwin_cdr'])*scale+offset # Remove missing value if remove_na: cdr[cdr<0] = offset # Crop domain to East-Asia (0-60'N, 100-160'E) if crop_east_asia: tmp = cdr[0, 1000:1858, 4000:4858] #output = cv2.resize(tmp, (rescale,rescale), interpolation=cv2.INTER_CUBIC) return(tmp) else: return(cdr[0,:,:]) def read_multiple_noaagridsatb1(flist): ''' This method reads in a list of NOAA-GridSat-B1 images and returns a numpy array. ''' import numpy as np data = [] for f in flist: tmp = read_noaagridsatb1(f) #ny, nx = tmp.shape if tmp is not None: #tmp = tmp.reshape((ny, nx, 1)) data.append(tmp) return(np.array(data, dtype=np.float32)) def data_generator_ae(flist, batch_size, add_dim=False): ''' Data generator for batched processing. ''' nSample = len(flist) # This line is just to make the generator infinite, keras needs that while True: batch_start = 0 batch_end = batch_size while batch_start < nSample: limit = min(batch_end, nSample) X = read_multiple_noaagridsatb1(flist[batch_start:limit]) if add_dim: X = np.expand_dims(X, axis=3) #print(X.shape) yield (X,X) #a tuple with two numpy arrays with batch_size samples batch_start += batch_size batch_end += batch_size # End of generator # Test # Define parameters datadir = 'D:/worksapce/2020Q3_representation_learning/data/noaa' finfo = list_noaagridsatb1_files(datadir) print(finfo.head()) # Load data data = read_noaagridsatb1(finfo['xuri'].iloc[180]) print(data.shape) data2 = cv2.resize(data, (512,512), interpolation=cv2.INTER_CUBIC) # Visualize the process %matplotlib inline import matplotlib.pyplot as plt plt.imshow(data) plt.title(finfo['timestamp'].iloc[180]) plt.gray() plt.show() plt.imshow(data2) plt.title(finfo['timestamp'].iloc[180]) plt.gray() plt.show() import tensorflow_hub as hub model_url = "https://tfhub.dev/google/remote_sensing/bigearthnet-resnet50/1" model = hub.load(model_url) image = np.stack([data2,data2,data2], axis=2) images = np.expand_dims(image, axis=0) # A batch of images with shape [batch_size, height, width, 3]. print(images.shape) features = model.signatures['default'](tf.convert_to_tensor(images)) # Features with shape [batch_size, num_features]. %matplotlib inline import matplotlib.pyplot as plt for k in features.keys(): print(k + ": \t" + str(features[k].shape)) print(features['default']) plt.plot(features['default'].numpy().flatten()) # Try a different input shape image1 = np.stack([data,data,data], axis=2) images = np.expand_dims(image1, axis=0) print(images.shape) features = model.signatures['default'](tf.convert_to_tensor(images)) for k in features.keys(): print(k + ": \t" + str(features[k].shape)) print(features['default']) plt.plot(features['default'].numpy().flatten()) plt.show() ###Output (1, 858, 858, 3) block2: (1, 108, 108, 512) default: (1, 2048) after_root: (1, 215, 215, 64) block3: (1, 54, 54, 1024) block4: (1, 27, 27, 2048) pre_logits: (1, 2048) logits: (1, 43) block1: (1, 215, 215, 256) tf.Tensor([[0. 2.306268 0. ... 0. 0. 0. ]], shape=(1, 2048), dtype=float32)
.ipynb_checkpoints/run_eeg_notebooks-checkpoint.ipynb
###Markdown Ownself SSVEPBelow are codes to generate the SSVEP stimulus, using the function calls as from NeuroTechX/eeg-notebooks ###Code counter = 0 import numpy as np from psychopy import visual, core, event import os import datetime # Initialize the SSVEP parameters counter = counter + 1 DURATION_s = 7 # Duration in seconds FRAME_RATE_Hz = 144 # Frame rate of PC, is 144Hz for my own laptop # Initialize the SSVEP Visual parameters SSVEP_FREQ_Hz = [14.4,16,18,20.57] # SSVEP Frequency in Hz. Set to factor of 144Hz SPATIAL_FREQ = [0.01,0.01,0.01,0.01] # How thick are the vertical lines. Smaller is thick lines, big value is very thin lines PHASE = [0.5, 0.5, 0.5, 0.5] # Phase difference between the 2 image. Takes value 0 to 1 SIZE = [8, 8, 8, 8] # Size of image X = [-14, 14, -14, 14] # X position of image, value from (-20, 20) Y = [ 8, 8, -7, -7] # Y position of image, value from (-10, 10) num_class = len(SSVEP_FREQ_Hz) # Initialize run time TOTAL_RUN = 20 # Get directory to save data # Directory and Data is saved in "E:\Github\gtec_Simulink\ssvep", together with the matlab .mat file CURRENT_DIR= os.getcwd() # Use os.getcwd() for running directly on .ipynb; os.path.dirname(__file__) on .py CURRENT_CKPT_REL = str(datetime.datetime.now()).replace('-','').replace(':','').replace('.','_').replace(' ','_') CURRENT_CKPT_DIR = os.path.join(CURRENT_DIR, '..', 'gtec_Simulink', 'ssvep', 'training_ssvep_'+ CURRENT_CKPT_REL[6:8] + '_' + CURRENT_CKPT_REL[4:6] + '_' + CURRENT_CKPT_REL[0:4] + '_' + CURRENT_CKPT_REL[9:11] + '_' + CURRENT_CKPT_REL[11:13] + '_' + str(counter)) os.mkdir(CURRENT_CKPT_DIR) print('Created directory {} to store experiment results'.format(CURRENT_CKPT_DIR)) # Initialize parameters and array frame_b4_switch = np.zeros((num_class), dtype=np.int32) for i in range(num_class): frame_b4_switch[i] = FRAME_RATE_Hz*100 // ( SSVEP_FREQ_Hz[i] *100) # Need x100 so that // on integer print('Class {} with Freq {} has frame_b4_switch = {}'.format(i, SSVEP_FREQ_Hz[i], frame_b4_switch[i])) if (100*FRAME_RATE_Hz) % ( 100*SSVEP_FREQ_Hz[i]) != 0: print('\tWarning, SSVEP_FREQ_Hz {} for class {} is not a factor of the screen refresh rate {}'.format(SSVEP_FREQ_Hz, i, FRAME_RATE_Hz)) print('\tWill be rounded off to {} Hz so that frame_b4_switch is {}'.format(144/frame_b4_switch[i], frame_b4_switch[i])) total_frames = DURATION_s * FRAME_RATE_Hz track_array = np.zeros((total_frames, num_class)) for i in range(num_class): for j in range(0, total_frames, frame_b4_switch[i]): track_array[j,i] = 1 if j+frame_b4_switch[i]//2 < total_frames: track_array[j+frame_b4_switch[i]//2,i] = 1 print('track_array has dimension ({},{})'.format(len(track_array.T), len(track_array))) # print(track_array.T) use_neg = [] for i in range(num_class): use_neg.append(False) # Set up graphics mywin = visual.Window([1536, 864], monitor="testMonitor", units="deg", fullscr=True) # ref_image = [] # for i in range(num_class): # ref_image.append( visual.Circle(win=mywin, radius=10, fillColor = 'RoyalBlue', autoDraw=True, pos = (X[i], Y[i]))) pos_image = [] for i in range(num_class): pos_image.append( visual.GratingStim(win=mywin, mask="circle", size=SIZE[i], sf=SPATIAL_FREQ[i], pos = (X[i], Y[i])) ) neg_image = [] for i in range(num_class): neg_image.append( visual.GratingStim(win=mywin, mask="circle", size=SIZE[i], sf=SPATIAL_FREQ[i], phase=PHASE[i], pos = (X[i], Y[i])) ) ref_image = [] for i in range(num_class): ref_image.append( visual.Circle(win=mywin, radius=0.12, fillColor='black', lineColor='black', pos = (X[i], Y[i])) ) # Randomize the target class # rand_mat = np.random.choice(TOTAL_RUN, TOTAL_RUN, replace=False) rand_mat = np.arange(0,TOTAL_RUN) rand_mat = rand_mat%len(SSVEP_FREQ_Hz) symbol = np.zeros(num_class) # Present instruction and wait for space bar for run in range(TOTAL_RUN): symbol = ['.', '.', '.', '.'] # Set all symbols to ' ' symbol[rand_mat[run]] = 'X' # Selected class denoted with X text = visual.TextStim(win=mywin, text= '{}\t\t\t\t\t\t\t\t{}\n\n \ \nLook at the corner with X \ \nPress space bar to start experiment run {} / {}\ {}\t\t\t\t\t\t\t\t{}'.format(symbol[0], symbol[1], run+1, TOTAL_RUN, symbol[2], symbol[3]), color=[-1, -1, -1]) text.draw() mywin.flip() event.waitKeys(keyList="space") track_flip_time = [] # Start flicker for j in range(total_frames): for i in range(num_class): if j == 0: pos_image[i].draw() ref_image[i].draw() else: if track_array[j,i] == 0: if use_neg[i] == True: # Is using negative image, continue negative neg_image[i].draw() else: pos_image[i].draw() ref_image[i].draw() else: if use_neg[i] == True: # Is using negative image, switch to positive pos_image[i].draw() use_neg[i] = False ref_image[i].draw() else: neg_image[i].draw() use_neg[i] = True ref_image[i].draw() track_flip_time.append(mywin.flip()) # Save the rand_mat to text file print('Saving rand_mat to text file') np.savetxt(CURRENT_CKPT_DIR + '.out' , rand_mat, fmt ='%u') # Close the SSVEP Stimulus window mywin.close() import numpy as np from psychopy import visual, core, event import os import datetime # Initialize the SSVEP parameters counter = counter + 1 DURATION_s = 10 # Duration in seconds FRAME_RATE_Hz = 144 # Frame rate of PC, is 144Hz for my own laptop # Initialize the SSVEP Visual parameters SSVEP_FREQ_Hz = [14.4,16,18,20.57] # SSVEP Frequency in Hz. Set to factor of 144Hz SSVEP_FREQ_Hz = [1,1,1,1] # SSVEP Frequency in Hz. Set to factor of 144Hz SPATIAL_FREQ = [0.01,0.01,0.01,0.01] # How thick are the vertical lines. Smaller is thick lines, big value is very thin lines PHASE = [0.5, 0.5, 0.5, 0.5] # Phase difference between the 2 image. Takes value 0 to 1 SIZE = [8, 8, 8, 8] # Size of image X = [-14, 14, -14, 14] # X position of image, value from (-20, 20) Y = [ 8, 8, -7, -7] # Y position of image, value from (-10, 10) num_class = len(SSVEP_FREQ_Hz) # Initialize run time TOTAL_RUN = 4 # Get directory to save data # Directory and Data is saved in "E:\Github\gtec_Simulink\ssvep", together with the matlab .mat file CURRENT_DIR= os.getcwd() # Use os.getcwd() for running directly on .ipynb; os.path.dirname(__file__) on .py CURRENT_CKPT_REL = str(datetime.datetime.now()).replace('-','').replace(':','').replace('.','_').replace(' ','_') CURRENT_CKPT_DIR = os.path.join(CURRENT_DIR, '..', 'gtec_Simulink', 'ssvep', 'training_ssvep_'+ CURRENT_CKPT_REL[6:8] + '_' + CURRENT_CKPT_REL[4:6] + '_' + CURRENT_CKPT_REL[0:4] + '_' + CURRENT_CKPT_REL[9:11] + '_' + CURRENT_CKPT_REL[11:13] + '_' + str(counter)) os.mkdir(CURRENT_CKPT_DIR) print('Created directory {} to store experiment results'.format(CURRENT_CKPT_DIR)) # Initialize parameters and array frame_b4_switch = np.zeros((num_class), dtype=np.int32) for i in range(num_class): frame_b4_switch[i] = FRAME_RATE_Hz*100 // ( SSVEP_FREQ_Hz[i] *100) # Need x100 so that // on integer print('Class {} with Freq {} has frame_b4_switch = {}'.format(i, SSVEP_FREQ_Hz[i], frame_b4_switch[i])) if (100*FRAME_RATE_Hz) % ( 100*SSVEP_FREQ_Hz[i]) != 0: print('\tWarning, SSVEP_FREQ_Hz {} for class {} is not a factor of the screen refresh rate {}'.format(SSVEP_FREQ_Hz, i, FRAME_RATE_Hz)) print('\tWill be rounded off to {} Hz so that frame_b4_switch is {}'.format(144/frame_b4_switch[i], frame_b4_switch[i])) total_frames = DURATION_s * FRAME_RATE_Hz track_array = np.zeros((total_frames, num_class)) for i in range(num_class): for j in range(0, total_frames, frame_b4_switch[i]): track_array[j,i] = 1 if j+frame_b4_switch[i]//2 < total_frames: track_array[j+frame_b4_switch[i]//2,i] = 1 print('track_array has dimension ({},{})'.format(len(track_array.T), len(track_array))) # print(track_array.T) use_neg = [] for i in range(num_class): use_neg.append(False) # Set up graphics mywin = visual.Window([1536, 864], monitor="testMonitor", units="deg", fullscr=True) # ref_image = [] # for i in range(num_class): # ref_image.append( visual.Circle(win=mywin, radius=10, fillColor = 'RoyalBlue', autoDraw=True, pos = (X[i], Y[i]))) pos_image = [] for i in range(num_class): pos_image.append( visual.GratingStim(win=mywin, mask="circle", size=SIZE[i], sf=SPATIAL_FREQ[i], pos = (X[i], Y[i])) ) neg_image = [] for i in range(num_class): neg_image.append( visual.GratingStim(win=mywin, mask="circle", size=SIZE[i], sf=SPATIAL_FREQ[i], phase=PHASE[i], pos = (X[i], Y[i])) ) ref_image = [] for i in range(num_class): ref_image.append( visual.Circle(win=mywin, radius=5, fillColor='RoyalBlue', pos = (X[i], Y[i])) ) # Randomize the target class # rand_mat = np.random.choice(TOTAL_RUN, TOTAL_RUN, replace=False) rand_mat = np.arange(0,TOTAL_RUN) rand_mat = rand_mat%len(SSVEP_FREQ_Hz) symbol = np.zeros(num_class) # Present instruction and wait for space bar for run in range(TOTAL_RUN): symbol = ['.', '.', '.', '.'] # Set all symbols to ' ' symbol[rand_mat[run]] = 'X' # Selected class denoted with X text = visual.TextStim(win=mywin, text= '{}\t\t\t\t\t\t\t\t{}\n\n \ \nLook at the corner with X \ \nPress space bar to start experiment run {} / {}\ {}\t\t\t\t\t\t\t\t{}'.format(symbol[0], symbol[1], run+1, TOTAL_RUN, symbol[2], symbol[3]), color=[-1, -1, -1]) text.draw() mywin.flip() event.waitKeys(keyList="space") track_flip_time = [] # Start flicker for j in range(total_frames): for i in range(num_class): if j == 0: pos_image[i].setAutoDraw(True) neg_image[i].setAutoDraw(False) ref_image[i].draw() else: if track_array[j,i] == 0: ref_image[i].draw() else: if use_neg[i] == True: # Is using negative image, switch to positive pos_image[i].setAutoDraw(True) neg_image[i].setAutoDraw(False) use_neg[i] = False ref_image[i].draw() else: pos_image[i].setAutoDraw(False) neg_image[i].setAutoDraw(True) use_neg[i] = True ref_image[i].draw() track_flip_time.append(mywin.flip()) # Save the rand_mat to text file print('Saving rand_mat to text file') np.savetxt(CURRENT_CKPT_DIR + '.out' , rand_mat, fmt ='%u') # Close the SSVEP Stimulus window mywin.close() mywin.close() ###Output _____no_output_____ ###Markdown Below try to use PsychoPy to create flickering stimulus that are image. Attempt to stimulate the Face Form Area (FFA). ###Code import numpy as np from psychopy import visual, core, event import os import datetime # Initialize the SSVEP parameters counter = counter + 1 DURATION_s = 5 # Duration in seconds FRAME_RATE_Hz = 144 # Frame rate of PC, is 144Hz for my own laptop # Initialize the SSVEP Visual parameters SSVEP_FREQ_Hz = [14.4,16,18,20.57] # SSVEP Frequency in Hz. Set to factor of 144Hz SPATIAL_FREQ = [0.01,0.01,0.01,0.01] # How thick are the vertical lines. Smaller is thick lines, big value is very thin lines PHASE = [0.5, 0.5, 0.5, 0.5] # Phase difference between the 2 image. Takes value 0 to 1 SIZE = [8, 8, 8, 8] # Size of image X = [-14, 14, -14, 14] # X position of image, value from (-20, 20) Y = [ 8, 8, -7, -7] # Y position of image, value from (-10, 10) num_class = len(SSVEP_FREQ_Hz) # Initialize run time TOTAL_RUN = 4 # Get directory to save data # Directory and Data is saved in "E:\Github\gtec_Simulink\ssvep", together with the matlab .mat file CURRENT_DIR= os.getcwd() # Use os.getcwd() for running directly on .ipynb; os.path.dirname(__file__) on .py CURRENT_CKPT_REL = str(datetime.datetime.now()).replace('-','').replace(':','').replace('.','_').replace(' ','_') CURRENT_CKPT_DIR = os.path.join(CURRENT_DIR, '..', 'gtec_Simulink', 'ssvep', 'training_ssvep_'+ CURRENT_CKPT_REL[6:8] + '_' + CURRENT_CKPT_REL[4:6] + '_' + CURRENT_CKPT_REL[0:4] + '_' + CURRENT_CKPT_REL[9:11] + '_' + CURRENT_CKPT_REL[11:13] + '_' + str(counter)) os.mkdir(CURRENT_CKPT_DIR) print('Created directory {} to store experiment results'.format(CURRENT_CKPT_DIR)) # Initialize parameters and array frame_b4_switch = np.zeros((num_class), dtype=np.int32) for i in range(num_class): frame_b4_switch[i] = FRAME_RATE_Hz*100 // ( SSVEP_FREQ_Hz[i] *100) # Need x100 so that // on integer print('Class {} with Freq {} has frame_b4_switch = {}'.format(i, SSVEP_FREQ_Hz[i], frame_b4_switch[i])) if (100*FRAME_RATE_Hz) % ( 100*SSVEP_FREQ_Hz[i]) != 0: print('\tWarning, SSVEP_FREQ_Hz {} for class {} is not a factor of the screen refresh rate {}'.format(SSVEP_FREQ_Hz, i, FRAME_RATE_Hz)) print('\tWill be rounded off to {} Hz so that frame_b4_switch is {}'.format(144/frame_b4_switch[i], frame_b4_switch[i])) total_frames = DURATION_s * FRAME_RATE_Hz track_array = np.zeros((total_frames, num_class)) for i in range(num_class): for j in range(0, total_frames, frame_b4_switch[i]): track_array[j,i] = 1 if j+frame_b4_switch[i]//2 < total_frames: track_array[j+frame_b4_switch[i]//2,i] = 1 print('track_array has dimension ({},{})'.format(len(track_array.T), len(track_array))) # print(track_array.T) use_neg = [] for i in range(num_class): use_neg.append(False) # Set up graphics mywin = visual.Window([1536, 864], monitor="testMonitor", units="deg", fullscr=True) # ref_image = [] # for i in range(num_class): # ref_image.append( visual.Circle(win=mywin, radius=10, fillColor = 'RoyalBlue', autoDraw=True, pos = (X[i], Y[i]))) pos_image = [] pos_image.append( visual.ImageStim(win=mywin, image='images\jaychou_1.jpg' , flipVert=True, pos = (X[0], Y[0])) ) pos_image.append( visual.ImageStim(win=mywin, image='images\KDEF_1.jpg' ,flipVert=False, pos = (X[1], Y[1])) ) pos_image.append( visual.GratingStim(win=mywin, mask="circle", size=SIZE[2], sf=SPATIAL_FREQ[2], pos = (X[2], Y[2])) ) pos_image.append( visual.GratingStim(win=mywin, mask="circle", size=SIZE[3], sf=SPATIAL_FREQ[3], pos = (X[3], Y[3])) ) neg_image = [] neg_image.append( visual.ImageStim(win=mywin, image='images\jaychou_2.jpg', flipVert=True, pos = (X[0], Y[0])) ) neg_image.append( visual.ImageStim(win=mywin, image='images\KDEF_2.jpg', flipVert=False, pos = (X[1], Y[1])) ) neg_image.append( visual.GratingStim(win=mywin, mask="circle", size=SIZE[2], sf=SPATIAL_FREQ[2], phase=PHASE[2], pos = (X[2], Y[2])) ) neg_image.append( visual.GratingStim(win=mywin, mask="circle", size=SIZE[3], sf=SPATIAL_FREQ[3], phase=PHASE[3], pos = (X[3], Y[3])) ) ref_image = [] ref_image.append( visual.Circle(win=mywin, radius=0.01, fillColor='black', lineColor='black', pos = (X[0], Y[0])) ) ref_image.append( visual.Circle(win=mywin, radius=0.01, fillColor='black', lineColor='black', pos = (X[1], Y[1])) ) ref_image.append( visual.Circle(win=mywin, radius=0.12, fillColor='black', lineColor='black', pos = (X[2], Y[2])) ) ref_image.append( visual.Circle(win=mywin, radius=0.12, fillColor='black', lineColor='black', pos = (X[3], Y[3])) ) # Randomize the target class # rand_mat = np.random.choice(TOTAL_RUN, TOTAL_RUN, replace=False) rand_mat = np.arange(0,TOTAL_RUN) rand_mat = rand_mat%len(SSVEP_FREQ_Hz) symbol = np.zeros(num_class) # Present instruction and wait for space bar for run in range(TOTAL_RUN): symbol = ['.', '.', '.', '.'] # Set all symbols to ' ' symbol[rand_mat[run]] = 'X' # Selected class denoted with X text = visual.TextStim(win=mywin, text= '{}\t\t\t\t\t\t\t\t{}\n\n \ \nLook at the corner with X \ \nPress space bar to start experiment run {} / {}\ {}\t\t\t\t\t\t\t\t{}'.format(symbol[0], symbol[1], run+1, TOTAL_RUN, symbol[2], symbol[3]), color=[-1, -1, -1]) text.draw() mywin.flip() event.waitKeys(keyList="space") track_flip_time = [] # Start flicker for j in range(total_frames): for i in range(num_class): if j == 0: pos_image[i].draw() ref_image[i].draw() else: if track_array[j,i] == 0: if use_neg[i] == True: # Is using negative image, continue negative neg_image[i].draw() else: pos_image[i].draw() ref_image[i].draw() else: if use_neg[i] == True: # Is using negative image, switch to positive pos_image[i].draw() use_neg[i] = False ref_image[i].draw() else: neg_image[i].draw() use_neg[i] = True ref_image[i].draw() track_flip_time.append(mywin.flip()) # Save the rand_mat to text file print('Saving rand_mat to text file') np.savetxt(CURRENT_CKPT_DIR + '.out' , rand_mat, fmt ='%u') # Close the SSVEP Stimulus window mywin.close() mywin.close() # Original code before able to make SSVEP stimulus with SSVEP frequencies that are not factors of 2 of 144Hz import numpy as np from psychopy import visual, core, event import os import datetime # Initialize the SSVEP parameters counter = counter + 1 DURATION_s = 1 # Duration in seconds FRAME_RATE_Hz = 144 # Frame rate of PC, is 144Hz for my own laptop # Initialize the SSVEP Visual parameters # SSVEP_FREQ_Hz = [14.4, 8, 12, 9.6] # SSVEP Frequency in Hz. Set to factor of 144Hz SSVEP_FREQ_Hz = [18,20,14.4,12] # SSVEP Frequency in Hz. Set to factor of 144Hz SPATIAL_FREQ = [0.01,0.01,0.01,0.01] # How thick are the vertical lines. Smaller is thick lines, big value is very thin lines PHASE = [0.5, 0.5, 0.5, 0.5] # Phase difference between the 2 image. Takes value 0 to 1 SIZE = [8, 8, 8, 8] # Size of image X = [-14, 14, -14, 14] # X position of image, value from (-20, 20) Y = [ 8, 8, -7, -7] # Y position of image, value from (-10, 10) num_class = len(SSVEP_FREQ_Hz) # Initialize run time TOTAL_RUN = 4 # Get directory to save data # Directory and Data is saved in "E:\Github\gtec_Simulink\ssvep", together with the matlab .mat file CURRENT_DIR= os.getcwd() # Use os.getcwd() for running directly on .ipynb; os.path.dirname(__file__) on .py CURRENT_CKPT_REL = str(datetime.datetime.now()).replace('-','').replace(':','').replace('.','_').replace(' ','_') CURRENT_CKPT_DIR = os.path.join(CURRENT_DIR, '..', 'gtec_Simulink', 'ssvep', 'training_ssvep_'+ CURRENT_CKPT_REL[6:8] + '_' + CURRENT_CKPT_REL[4:6] + '_' + CURRENT_CKPT_REL[0:4] + '_' + CURRENT_CKPT_REL[9:11] + '_' + CURRENT_CKPT_REL[11:13] + '_' + str(counter)) os.mkdir(CURRENT_CKPT_DIR) print('Created directory {} to store experiment results'.format(CURRENT_CKPT_DIR)) # Initialize parameters and array frame_b4_switch = np.zeros((num_class), dtype=np.int32) for i in range(num_class): frame_b4_switch[i] = FRAME_RATE_Hz // ( SSVEP_FREQ_Hz[i] * 2) print('Class {} has frame_b4_switch = {}'.format(i, frame_b4_switch[i])) if FRAME_RATE_Hz % ( SSVEP_FREQ_Hz[i] * 2) != 0: print('Warning, SSVEP_FREQ_Hz {} for class {} is not a factor of the screen refresh rate {}'.format(SSVEP_FREQ_Hz, i, FRAME_RATE_Hz)) total_frames = DURATION_s * FRAME_RATE_Hz track_array = np.zeros((total_frames, num_class)) for i in range(num_class): for j in range(0, total_frames, frame_b4_switch[i]): track_array[j,i] = 1 use_neg = [] for i in range(num_class): use_neg.append(False) # Set up graphics mywin = visual.Window([1536, 864], monitor="testMonitor", units="deg", fullscr=True) # ref_image = [] # for i in range(num_class): # ref_image.append( visual.Circle(win=mywin, radius=10, fillColor = 'RoyalBlue', autoDraw=True, pos = (X[i], Y[i]))) pos_image = [] for i in range(num_class): pos_image.append( visual.GratingStim(win=mywin, mask="circle", size=SIZE[i], sf=SPATIAL_FREQ[i], pos = (X[i], Y[i])) ) neg_image = [] for i in range(num_class): neg_image.append( visual.GratingStim(win=mywin, mask="circle", size=SIZE[i], sf=SPATIAL_FREQ[i], phase=PHASE[i], pos = (X[i], Y[i])) ) # Randomize the target class # rand_mat = np.random.choice(TOTAL_RUN, TOTAL_RUN, replace=False) # Randomize target class rand_mat = np.arange(0,TOTAL_RUN) # Target class in order rand_mat = rand_mat%len(SSVEP_FREQ_Hz) symbol = np.zeros(num_class) print(rand_mat.shape) # Present instruction and wait for space bar for run in range(TOTAL_RUN): symbol = ['.', '.', '.', '.'] # Set all symbols to ' ' symbol[rand_mat[run]] = 'X' # Selected class denoted with X text = visual.TextStim(win=mywin, text= '{}\t\t\t\t\t\t\t\t{}\n\n \ \nLook at the corner with X \ \nPress space bar to start experiment run {} / {}\ {}\t\t\t\t\t\t\t\t{}'.format(symbol[0], symbol[1], run+1, TOTAL_RUN, symbol[2], symbol[3]), color=[-1, -1, -1]) text.draw() mywin.flip() event.waitKeys(keyList="space") track_flip_time = [] # Start flicker for j in range(total_frames): for i in range(num_class): if j == 0: pos_image[i].setAutoDraw(True) neg_image[i].setAutoDraw(False) else: if track_array[j,i] == 0: pass else: if use_neg[i] == True: # Is using negative image, switch to positive pos_image[i].setAutoDraw(True) neg_image[i].setAutoDraw(False) use_neg[i] = False else: pos_image[i].setAutoDraw(False) neg_image[i].setAutoDraw(True) use_neg[i] = True track_flip_time.append(mywin.flip()) # Save the rand_mat to text file print(rand_mat) np.savetxt(CURRENT_CKPT_DIR + '.out' , rand_mat, fmt ='%u') # Close the SSVEP Stimulus window mywin.close() mywin.close() # PsychoPy for non-flickering stimulus, just instruction to perform action resulting in artifact import numpy as np from psychopy import visual, core, event import os import datetime # Initialize the SSVEP parameters DURATION_s = 2 # Duration in seconds FRAME_RATE_Hz = 144 # Frame rate of PC, is 144Hz for my own laptop # Initialize total runs TOTAL_RUN = 50 # Initialize the SSVEP Visual parameters ACTION = ['Clench Left', 'Clench Right', 'Double Clench', 'Long Clench', 'Dont Move'] SSVEP_FREQ_Hz = [10,11,12,13,14] # SSVEP Frequency in Hz. Set to factor of 144Hz SPATIAL_FREQ = [0.2, 0.2, 0.2, 0.2,0.2] # How thick are the vertical lines. Smaller is thick lines, big value is very thin lines PHASE = [0.5, 0.5, 0.5, 0.5,0.5] # Phase difference between the 2 image. Takes value 0 to 1 SIZE = [8, 8, 8, 8,8] # Size of image X = [-15, 15, -15, 15,15] # X position of image, value from (-20, 20) Y = [ 8, 8, -10, -10,10] # Y position of image, value from (-10, 10) num_class = len(SSVEP_FREQ_Hz) # Get directory to save data # Directory and Data is saved in "E:\Github\gtec_Simulink\ssvep", together with the matlab .mat file CURRENT_DIR= os.getcwd() # Use os.getcwd() for running directly on .ipynb; os.path.dirname(__file__) on .py CURRENT_CKPT_REL = str(datetime.datetime.now()).replace('-','').replace(':','').replace('.','_').replace(' ','_') CURRENT_CKPT_DIR = os.path.join(CURRENT_DIR, '..', 'gtec_Simulink', 'artifact', 'training_ssvep_'+ CURRENT_CKPT_REL[6:8] + '_' + CURRENT_CKPT_REL[4:6] + '_' + CURRENT_CKPT_REL[0:4] + '_' + CURRENT_CKPT_REL[9:11] + '_' + CURRENT_CKPT_REL[11:13]) os.mkdir(CURRENT_CKPT_DIR) print('Created directory {} to store experiment results'.format(CURRENT_CKPT_DIR)) # Initialize parameters and array frame_b4_switch = np.zeros((num_class), dtype=np.int32) for i in range(num_class): frame_b4_switch[i] = FRAME_RATE_Hz // ( SSVEP_FREQ_Hz[i] * 2) print('Class {} has frame_b4_switch = {}'.format(i, frame_b4_switch[i])) if FRAME_RATE_Hz % ( SSVEP_FREQ_Hz[i] * 2) != 0: print('Warning, SSVEP_FREQ_Hz {} for class {} is not a factor of the screen refresh rate {}'.format(SSVEP_FREQ_Hz, i, FRAME_RATE_Hz)) total_frames = DURATION_s * FRAME_RATE_Hz track_array = np.zeros((total_frames, num_class)) for i in range(num_class): for j in range(0, total_frames, frame_b4_switch[i]): track_array[j,i] = 1 use_neg = [] for i in range(num_class): use_neg.append(False) # Set up graphics mywin = visual.Window([1536, 864], monitor="testMonitor", units="deg", fullscr=True) pos_image = [] for i in range(num_class): pos_image.append( visual.GratingStim(win=mywin, mask="circle", size=SIZE[i], sf=SPATIAL_FREQ[i], pos = (X[i], Y[i])) ) neg_image = [] for i in range(num_class): neg_image.append( visual.GratingStim(win=mywin, mask="circle", size=SIZE[i], sf=SPATIAL_FREQ[i], phase=PHASE[i], pos = (X[i], Y[i])) ) # Randomize the target class rand_mat = np.random.choice(TOTAL_RUN, TOTAL_RUN, replace=False) rand_mat = rand_mat%len(SSVEP_FREQ_Hz) symbol = np.zeros(num_class) print(rand_mat.shape) # Present instruction and wait for space bar for run in range(TOTAL_RUN): # text = visual.TextStim(win=mywin, text= # '{}\t\t\t\t\t\t\t\t{}\n\n \ # \nLook at the corner with X \ # \nPress space bar to start experiment run {} / {}\ # {}\t\t\t\t\t\t\t\t{}'.format(symbol[0], symbol[1], run+1, TOTAL_RUN, symbol[2], symbol[3]), color=[-1, -1, -1]) text = visual.TextStim(win=mywin, text= '\nPerform Action = {} \ \nPress space bar to start experiment run {} / {} '.format(ACTION[rand_mat[run]], run+1, TOTAL_RUN), color=[-1, -1, -1]) text.draw() mywin.flip() event.waitKeys(keyList="space") track_flip_time = [] # Start flicker for j in range(total_frames): for i in range(num_class): if j == 0: pass # pos_image[i].setAutoDraw(True) # neg_image[i].setAutoDraw(False) else: if track_array[j,i] == 0: pass else: if use_neg[i] == True: # Is using negative image, switch to positive pass # pos_image[i].setAutoDraw(True) # neg_image[i].setAutoDraw(False) # use_neg[i] = False else: pass # pos_image[i].setAutoDraw(False) # neg_image[i].setAutoDraw(True) # use_neg[i] = True track_flip_time.append(mywin.flip()) # Save the rand_mat to text file print(rand_mat) np.savetxt(CURRENT_CKPT_DIR + '.out' , rand_mat, fmt ='%u') # Close the SSVEP Stimulus window mywin.close() """ Demo of the ElementArrayStim, a highly optimised stimulus for generating arrays of similar (but not identical) elements, such as in global form arrays or random dot stimuli. Elements must have the same basic texture and mask, but can differ in any other way (ori, sf, rgb...). This demo relies on numpy arrays to manipulate stimulus characteristics. Working with array vectors is fast, much faster than python for-loops, which would be too slow for a large array of stimuli like this. See also the starField demo. """ from __future__ import division from builtins import range from psychopy import visual, core, event from psychopy.tools.coordinatetools import cart2pol # We only need these two commands from numpy.random: from numpy.random import random, shuffle win = visual.Window([1024, 768], units='pix', monitor='testMonitor') N = 500 fieldSize = 500 elemSize = 40 coherence = 0.5 # build a standard (but dynamic!) global form stimulus xys = random([N, 2]) * fieldSize - fieldSize / 2.0 # numpy vector globForm = visual.ElementArrayStim(win, nElements=N, sizes=elemSize, sfs=3, xys=xys, colors=[180, 1, 1], colorSpace='hsv') # calculate the orientations for global form stimulus def makeCoherentOris(XYs, coherence, formAngle): # length along the first dimension: nNew = XYs.shape[0] # random orientations: newOris = random(nNew) * 180 # select some elements to be coherent possibleIndices = list(range(nNew)) # create an array of indices shuffle(possibleIndices) # shuffle it 'in-place' (no new array) coherentIndices = possibleIndices[0: int(nNew * coherence)] # use polar coordinates; set the ori of the coherent elements theta, radius = cart2pol(XYs[: , 0], XYs[: , 1]) newOris[coherentIndices] = formAngle - theta[coherentIndices] return newOris globForm.oris = makeCoherentOris(globForm.xys, coherence, 45) # Give each element a life of 10 frames, and give it a new position after that lives = random(N) * 10 # this will be the current life of each element while not event.getKeys(): # take a copy of the current xy and ori values newXYs = globForm.xys newOris = globForm.oris # find the dead elemnts and reset their life deadElements = (lives > 10) # numpy vector, not standard python lives[deadElements] = 0 # for the dead elements update the xy and ori # random array same shape as dead elements newXYs[deadElements, : ] = random(newXYs[deadElements, : ].shape) * fieldSize - fieldSize/2.0 # for new elements we still want same % coherent: new = makeCoherentOris(newXYs[deadElements, : ], coherence, 45) newOris[deadElements] = new # update the oris and xys of the new elements globForm.xys = newXYs globForm.pris = newOris globForm.draw() win.flip() lives = lives + 1 event.clearEvents('mouse') # only really needed for pygame windows win.close() core.quit() a = np.random.choice(40, 40, replace=False) # a.sort() a = a%4 print(a) b=np.zeros(4) for i in range(40): b = ['.', '.', '.', ','] # print(a[i]) b[a[i]] = 'X' print(b) from psychopy import visual, core # Initialize the SSVEP parameters DURATION_s = 10 # Duration in seconds SSVEP_FREQ_Hz = 8 # SSVEP Frequency in Hz. Set to factor of 144Hz FRAME_RATE_Hz = 144 # Frame rate of PC, is 144Hz for my own laptop # Initialize the SSVEP Visual parameters SPATIAL_FREQ = 0.2 # How thick are the vertical lines. Smaller is thick lines, big value is very thin lines PHASE = 0.1 # Phase difference between the 2 image. Takes value 0 to 1 FIRST_SIZE = 8 # Size of first image FIRST_X = -15 # X position of first image, value from (-20, 20) FIRST_Y = 8 # Y position of first image, value from (-10, 10) SECOND_SIZE = 20 # Size of second image SECOND_X = 15 # X position of second image, value from (-20, 20) SECOND_Y = -10 # Y position of second image, value from (-10, 10) def init_flicker_stim(frame_rate, cycle, soa): """Initialize flickering stimulus. Get parameters for a flickering stimulus, based on the screen refresh rate and the desired stimulation cycle. Args: frame_rate (float): screen frame rate, in Hz cycle (tuple or int): if tuple (on, off), represents the number of 'on' periods and 'off' periods in one flickering cycle. This supposes a "single graphic" stimulus, where the displayed object appears and disappears in the background. If int, represents the number of total periods in one cycle. This supposes a "pattern reversal" stimulus, where the displayed object appears and is replaced by its opposite. soa (float): stimulus duration, in s Returns: (dict): dictionary with keys 'cycle' -> tuple of (on, off) periods in a cycle 'freq' -> stimulus frequency 'n_cycles' -> number of cycles in one stimulus trial """ if isinstance(cycle, tuple): stim_freq = frame_rate / sum(cycle) n_cycles = int(soa * stim_freq) else: stim_freq = frame_rate / cycle cycle = (cycle, cycle) n_cycles = int(soa * stim_freq) / 2 return {"cycle": cycle, "freq": stim_freq, "n_cycles": n_cycles} # Generate stimulus pattern and confirm with printout the SSVEP stimulus frequency stim_patterns = [init_flicker_stim(FRAME_RATE_Hz, FRAME_RATE_Hz//SSVEP_FREQ_Hz, DURATION_s),] print("Flickering frequencies (Hz): {}\n".format([stim_patterns[0]["freq"]])) import timeit # To check on elapsed time tic=timeit.default_timer() # Set up graphics mywin = visual.Window([1536, 864], monitor="testMonitor", units="deg", fullscr=True) # First image grating = visual.GratingStim(win=mywin, mask="circle", size=FIRST_SIZE, sf=SPATIAL_FREQ, pos = (FIRST_X, FIRST_Y)) # Second image grating_neg = visual.GratingStim(win=mywin, mask="circle", size=SECOND_SIZE, sf=SPATIAL_FREQ, phase=PHASE, pos = (SECOND_X, SECOND_Y)) # Present flickering stim ind = 0 # ind is a index used if there is more than 1 SSVEP stimulus pattern for _ in range(int(stim_patterns[ind]["n_cycles"])): grating.setAutoDraw(True) for _ in range(int(stim_patterns[ind]["cycle"][0])): mywin.flip() grating.setAutoDraw(False) grating_neg.setAutoDraw(True) for _ in range(stim_patterns[ind]["cycle"][1]): mywin.flip() grating_neg.setAutoDraw(False) # Close the SSVEP Stimulus window mywin.close() # Print elapsed time toc=timeit.default_timer() print("Elapsed time is {}s".format(toc - tic)) mywin.close() mywin = visual.Window([1536, 864], monitor="testMonitor", units="deg", fullscr=True) for i in range(1440): a = mywin.flip(clearBuffer=True) print(a) mywin.close() 1183.3108047000005-1182.3108608000002 mywin.close() ###Output _____no_output_____ ###Markdown Running psychopy ###Code from psychopy import visual, core win = visual.Window() msg = visual.TextStim(win, text="Hello All") msg.draw() win.flip() core.wait(1) win.close() from psychopy import visual, core win = visual.Window([400,400]) message = visual.TextStim(win, text='hello') message.autoDraw = True # Automatically draw every frame win.flip() core.wait(2.0) message.text = 'world' # Change properties of existing stim win.flip() core.wait(2.0) win.close() from psychopy import visual, core # Setup stimulus win = visual.Window([400, 400]) gabor = visual.GratingStim(win, tex='sin', mask='gauss', sf=5, name='gabor', autoLog=False) fixation = visual.GratingStim(win, tex=None, mask='gauss', sf=0, size=0.02, name='fixation', autoLog=False) # Let's draw a stimulus for 200 frames, drifting for frames 50:100 for frameN in range(3000): # For exactly 200 frames if 10 <= frameN < 1500: # Present fixation for a subset of frames fixation.draw() if 50 <= frameN < 3000: # Present stim for a different subset gabor.phase += 0.1 # Increment by 10th of cycle gabor.draw() win.flip() win.close() from psychopy import event event.globalKeys.clear() import timeit tic=timeit.default_timer() mywin = visual.Window([800,600], monitor="testMonitor", units="deg") grating = visual.GratingStim(win=mywin, mask="circle", size=3, pos=[-4,0], sf=3) fixation = visual.GratingStim(win=mywin, size=0.5, pos=[0,0], sf=0, rgb=-1) grating.draw() fixation.draw() mywin.update() for frameN in range(1440): grating.setPhase(0.05, '+') # advance phase by 0.05 of a cycle fixation.setPhase(0.05, '+') grating.draw() fixation.draw() mywin.update() toc=timeit.default_timer() mywin.close() toc - tic #elapsed time in seconds 144 mywin = visual.Window([800,600], monitor="testMonitor", units="deg") grating = visual.GratingStim(win=mywin, mask="circle", size=3, pos=[-4,0], sf=3) mywin.update() mywin.close() %matplotlib inline ###Output _____no_output_____ ###Markdown SSVEP run experimentThis example demonstrates the initiation of an EEG stream with eeg-notebooks, and how to run an experiment. Codes are all extracted from https://github.com/NeuroTechX/eeg-notebooks, which makes use of psychopy ###Code # Code to directly run NeuroTechX/eeg-notebooks from eegnb.experiments.visual_ssvep import ssvep ssvep.present() mywin.close() import os from eegnb import generate_save_fn from eegnb.devices.eeg import EEG from eegnb.experiments.visual_ssvep import ssvep # Define some variables board_name = 'muse' experiment = 'visual_ssvep' subject = 'test' record_duration=120 ###Output _____no_output_____ ###Markdown Initiate EEG deviceStart EEG device ###Code eeg_device = EEG(device=board_name) # Create save file name save_fn = generate_save_fn(board_name, experiment, subject) print(save_fn) ###Output _____no_output_____ ###Markdown Run Experiment ###Code ssvep.present(duration=record_duration, eeg=eeg_device, save_fn=save_fn) ###Output _____no_output_____
Coursera/Introduction to Programming with MATLAB/Week-8/Assignment/Solution-7.ipynb
###Markdown problem integerize.m: ###Code function cl = integerize(A) cls = {'int8'; 'int16'; 'int32'; 'int64'}; cl = 'NONE'; mx = max(A(:)); mn = min(A(:)); for ii = 1:length(cls) if intmax(cls{ii}) >= mx && intmin(cls{ii}) <= mn cl = cls{ii}; break; end end end ###Output _____no_output_____ ###Markdown Or ###Code function Name = integerize (A) DataType = {'int8', 'int16', 'int32', 'int64', 'NONE'}; Limit = [2^7, 2^15, 2^31, 2^63, realmax]; A(A<0) = A(A<0) + 1; % must do this for negatives!!!!! Name = DataType{max(abs(A(:))) < Limit}; end ###Output _____no_output_____ ###Markdown problem year2016.m: ###Code function month = year2016(m) if ~isscalar(m) || m < 1 || m > 12 || m ~= floor(m) month = []; return; end days = ([31 29 31 30 31 30 31 31 30 31 30 31]); ms = {'January'; 'February'; 'March'; 'April'; 'May'; 'June'; ... 'July'; 'August'; 'September'; 'October'; 'November'; 'December'}; ds = {'Sun'; 'Mon'; 'Tue'; 'Wed'; 'Thu'; 'Fri'; 'Sat'}; start = 4; % Jan 1, 2016 was a Friday. US week starts on Sunday. % We'll add ii and 1 below because rem(n,7) returns numbers % 0-6 and we need indexes 1-7. start = start + sum(days(1:m-1)); for ii = 1:days(m) month(ii).month = ms{m}; month(ii).date = ii; month(ii).day = ds{rem(start+ii,7)+1}; end end ###Output _____no_output_____ ###Markdown problem palin_product.m: Naive implementation. Slow for many cases: ###Code function n = palin_product(dig, lim) n = 0; for ii = 10^(dig-1):10^dig-1 for jj = 10^(dig-1):ii p = ii*jj; if p >= lim continue; elseif palindrome(p) && p > n n = p; end end end end function isp = palindrome(p) txt = num2str(p); isp = strcmp(txt,txt(end:-1:1)); end ###Output _____no_output_____ ###Markdown Version optimized for speed. Most of the time, the inner loop ends early: ###Code function n = palin_product(dig, lim) n = 0; for ii = 10^dig-1 : -1 : 10^(dig-1) % going from large to small for jj = min(10^dig-1,floor((lim-1)/ii)) : -1 : 10^(dig-1) % check numbers under lim p = ii*jj; if p < n % gone under the current max continue; % no need to go further in the inner loop elseif palindrome(p) n = p; % found a larger one continue; % no need to go further in the inner loop end end end end function isp = palindrome(p) txt = num2str(p); isp = strcmp(txt,txt(end:-1:1)); end ###Output _____no_output_____ ###Markdown Vectorized solution, but needs lots of memory: ###Code function n = palin_product (dig,lim) % a is the smallest dig-digit number that can be formed. If the smallest possible % product (a^2) is smaller than the specified limit, we determine b, the largest % dig-digit number that can be formed. We then build the square outer product of a:b. % Logically indexing into to this matrix for elements less than lim creates a column % vector P of candidate products. We convert each of these to a string, reverse its % characters, and convert it back to a number, to form the column vector Q. Finally, % we return the maximum element in P which has the same value in both P and Q. n = 0; a = 10^(dig-1); if lim>a^2 b = 10^dig - 1; P = (a:b)' * (a:b); P = P(P<lim); Q = str2num(fliplr(num2str(P))); n = max(P(P==Q)); end end ###Output _____no_output_____ ###Markdown problem dial.m: Traditional solution: ###Code function num = dial(str) num = uint64(0); if length(str) > 16 return; end for ii = 1:length(str) if str(ii) >= 'A' && str(ii) <= 'Z' str(ii) = map(str(ii)); elseif ~(str(ii) >= '0' && str(ii) <= '9') return; end end num = uint64(str2num(str)); end function ch = map(ch) m = '22233344455566677778889999'; ch = m(ch - 'A' + 1); end ###Output _____no_output_____ ###Markdown Vectorized version: ###Code function ph = dial(str) code = '0123456789xxxxxxx22233344455566677778889999'; % x represents invalid character ph = '0'; n = str-'0'+1; % index into the vector code if ~((sum(str(n <= 0)) + sum(n > length(code))) || ... % checks for indexes out of range sum(code(n) == 'x') || ... % checks for any x-s length(str) > 16 ) % checks too long input ph = code(n); % mapping with a single command end ph = uint64(str2num(ph)); % convert string to number and uint64 end ###Output _____no_output_____ ###Markdown Another vectorized version: ###Code function n = dial (s) if ~all(ismember(s,['0':'9','A':'Z'])) || length(s) > 16 n = uint64(0); else map = '22233344455566677778889999'; s(s>='A') = map(s(s>='A')-64); n = uint64(str2double(s)) ; end end ###Output _____no_output_____ ###Markdown problem logi_unpack.m: ###Code function L = logiunpack(cv) n = length(cv); L = false(n); for ii = 1:n for jj = 1:length(cv{ii}) L(ii,cv{ii}(jj)) = true; end end end ###Output _____no_output_____ ###Markdown problem logi_pack.m: ###Code function cv = logipack(L) [r c] = size(L); cv = cell(1,r); for ii = 1:r cv{ii} = find(L(ii,:)); if isempty(cv{ii}) % find can return 1x0 empty arrays cv{ii} = []; % so we make sure it is 0x0 end end end ###Output _____no_output_____ ###Markdown problem centuries.m: Problem is small enough so there is no real need to get fancy: ###Code function c = centuries(n) if ~isscalar(n) || n < 1 || n > 3000 || n ~= floor(n) c = ''; else cents = {'I'; 'II'; 'III'; 'IV'; 'V'; 'VI'; 'VII'; 'VIII'; 'IX'; 'X'; 'XI'; 'XII'; 'XIII'; 'XIV'; 'XV'; 'XVI'; 'XVII'; 'XVIII'; 'XIX'; 'XX'; 'XXI'; 'XXII'; 'XXIII'; 'XXIV'; 'XXV'; 'XXVI'; 'XXVII'; 'XXVIII'; 'XXIX'; 'XXX'}; c = cents{ceil(n/100)}; end end ###Output _____no_output_____ ###Markdown Nevertheless, here is a general solution: ###Code function c = centuries (y) c = ''; if isscalar(y) && rem(y,1)==0 && y>0 && y<=3000 c = A2R(fix((y-1)/100)+1); end end function R = A2R (A) % Converts Arabic numbers to Roman strings. Roman = {'I' 'IV' 'V' 'IX' 'X' 'XL' 'L' 'XC' 'C' 'CD' 'D' 'CM' 'M'}; Arabic = {1 4 5 9 10 40 50 90 100 400 500 900 1000}; R = ''; k = 13; while k>0 % remove largest modulii first if A>=Arabic{k} % if value>current modulus A = A-Arabic{k}; % remove modulus from value R = [R Roman{k}]; % append Roman character else k = k-1; % else consider next smaller modulus end end end ###Output _____no_output_____ ###Markdown problem find_zero.m: ###Code function x = find_zero (f, x1,x2) x = (x1+x2)/2.0; % find interval midpoint while abs(f(x)) > 1e-10 % are we there yet? if f(x1)*f(x)>0 % if f(left) and f(mid) have the same sign x1 = x; % move left to mid else x2 = x; % move right to mid end x = (x1+x2)/2.0; % recalculate midpoint end end ###Output _____no_output_____
Basic_Quantum/Uncomputation.ipynb
###Markdown Uncomputation ###Code %matplotlib inline from qiskit import * ###Output _____no_output_____ ###Markdown To compute $a \wedge b \wedge c$, we need two working qubits. We need to split the computation into two parts: $a \wedge b \wedge c = (a \wedge b) \wedge c$Moreover, it is good practice to clean the working qubit that only contains an intermediate result: that is called uncomputation ###Code circuit = QuantumCircuit(5, 2) # Steps to change qubits to 1 for test purposes _ = circuit.x(0) _ = circuit.x(1) _ = circuit.x(2) _ = circuit.barrier() ###Output _____no_output_____ ###Markdown Firstly, we calculate $a \wedge b$ in the first working qubit. ###Code _ = circuit.ccx(0, 1, 3) circuit.draw(output="mpl") ###Output _____no_output_____ ###Markdown Secondly, we calculate $(a \wedge b) \wedge c$ using both the third qubit and the first working qubit. The second working qubit is the target. ###Code _ = circuit.ccx(2, 3, 4) circuit.draw(output="mpl") ###Output _____no_output_____ ###Markdown We now uncompute $a \wedge b$ in the first working qubit. That way, we can reuse it later if needed.To do so, we remember that the Toffoli gate is the inverse of itself: $(a \wedge b) \oplus (a \wedge b) = 0$ ###Code _ = circuit.ccx(0, 1, 3) circuit.draw(output="mpl") ###Output _____no_output_____ ###Markdown Lastly, we measure both working qubits: one should contain the AND and the other should always be zero ###Code # Measure (x^y^z) and working qubuit in classical bit _ = circuit.barrier() _ = circuit.measure(4, 0) _ = circuit.measure(3, 1) circuit.draw(output="mpl") # Simulate simulator = Aer.get_backend("qasm_simulator") job = execute(circuit, backend=simulator, shots=1024) result = job.result() counts = result.get_counts() # Plot results visualization.plot_histogram(counts) ###Output _____no_output_____
basic_usage.ipynb
###Markdown signedfacLet $X$ denote an $N\times M$ matrix. Optionally, let $C,D$ give covariate information about each row and/or each column. Let $k\in \mathbb{N}$.`signedfac` is a package for obtaining low-dimensional summaries of the rows and columns of the matrix $X$. It achieves this by fitting a model family $p(x;B_U,B_V,\mu_U,\Sigma_U,\mu_V,\sigma_V,\theta)$ to the observed matrix $X$. This model family is defined by the following generative story:$$U_i \sim \mathcal{N}(\mu_U + B_U C_i,\Sigma_U)\qquad U_i \in \mathbb{R}^k, i\in \{0\cdots N-1\}$$$$V_j \sim \mathcal{N}(\mu_V + B_V D_j,\Sigma_V)\qquad V_j \in \mathbb{R}^k, j\in \{0\cdots N-1\}$$$$X_{ij}|U,V \sim \mathrm{datamodel}(\cdot ; U_i^\top V_j, \theta_j)$$At the moment, $\mathrm{datamodel}$ can be one of the following:1. `normal` -- $p(x;w,\theta) = \mathcal{N}(x; w,\theta)$2. `negativebinomial` -- $p(x;w,\theta) = \mathrm{NegativeBinomial}(x;\ \mathrm{logit}(w),\theta)$3. `bernoulli` -- $p(x;w) = \mathrm{Bernoulli}(x;\ \mathrm{logit}(w))$Once this model is fit, the `signedfac` package estimates the posterior means, $\mathbb{E}[U,V|X]$. These low-dimensional posterior means can be understood as summaries of each row and each column. These representations can be used for visualization or downstream analysis. ###Code %load_ext autoreload %autoreload 2 import numpy as np import matplotlib.pylab as plt import numpy.random as npr import signedfac import scipy as sp import scipy.special ###Output The autoreload extension is already loaded. To reload it, use: %reload_ext autoreload ###Markdown example usageFirst we'll make some simulated data. Note we will not simulate directly from the true prior -- in practice the prior is basically never right. It is, however, essential to have this prior to avoid overfitting in data-starved situations. Even though we're not sampling from the prior, we'll be able to use `signedfac` to detect the overall structure of what's going on in the data. ###Code # we'll design it so that there are two different kinds of rows. the first 25 will be one way # the second 25 will be another way U=npr.randn(50,2); U[:25]+=8 # we'll do the same for columns. the first 10 will be one way, the second 10 another V=npr.randn(80,2); V[:10]+=8 # make data X=npr.randn(50,80)[email protected] # initialize model, using the "normal" datamodel -- this is just a standard factor analysis model. # if your data is really normal and you don't have covariates # and you don't want to use GPU-accleration, you could use # sklearn.linear_decomposition.FactorAnalysis for this instead model=signedfac.initialize(X,2,'normal') # send data to tensorflow (& to the GPU if you have one) X_tf = signedfac.to_tensorflow(X) # initialize trainer. this object fits the model trainer=signedfac.Trainer(X_tf,model) # train the model, using 10 iterations trainer.train_tqdm_notebook(maxiter=10) # look at losses over time trainer.status() # get low-dimensional summaries row_loadings=model.row_loadings col_loadings=model.col_loadings plt.subplot(1,2,1) plt.scatter(row_loadings[:,0],row_loadings[:,1],c=np.r_[0:len(row_loadings)]>=25) plt.title("we are able to detect the \n fact that there are two \n different kinds of rows") plt.subplot(1,2,2) plt.scatter(col_loadings[:,0],col_loadings[:,1],c=np.r_[0:len(col_loadings)]>=10) plt.title("same with columns") ###Output _____no_output_____ ###Markdown negative binomial example ###Code U=npr.randn(50,2)*.1; U[:25]+=.5 V=npr.randn(80,2)*.1; V[:10]+=3.5 thetas=npr.rand(80)*3+2 X=npr.negative_binomial(thetas[None,:],sp.special.expit([email protected])) plt.imshow(X) plt.colorbar() model=signedfac.initialize(X,2,'negativebinomial',diagsig=True) X_tf = signedfac.to_tensorflow(X) trainer=signedfac.Trainer(X_tf,model) trainer.train_tqdm_notebook(maxiter=500) trainer.status() plt.title("we do a reasonable job of estimating thetas \n (the estimator is negatively biased tho)") plt.scatter(thetas,model.thetas.numpy()) plt.plot([0,5],[0,5],'r-') plt.xlabel("true theta") plt.ylabel('estimate') row_loadings=model.row_loadings col_loadings=model.col_loadings plt.gcf().set_size_inches(10,5) plt.subplot(1,2,1) plt.scatter(row_loadings[:,0],row_loadings[:,1],c=np.r_[0:len(row_loadings)]>=25) plt.title("we are able to detect the \n fact that there are two \n different kinds of rows") plt.subplot(1,2,2) plt.scatter(col_loadings[:,0],col_loadings[:,1],c=np.r_[0:len(col_loadings)]>=10) plt.title("same with columns"); ###Output _____no_output_____ ###Markdown a dastardly caseWhat if some columns are identically zero? We don't want the algorithm to utterly fail... ###Code U=npr.randn(50,2); U[:25]+=8 V=npr.randn(80,2); V[:10]+=8 X=npr.randn(50,80)[email protected] X[:,25:30]=0 Xnb=npr.poisson(np.exp(X/30)) Xnb[:,25:30]=0 model=signedfac.initialize(X,2,'normal') X_tf = signedfac.to_tensorflow(X) trainer=signedfac.Trainer(X_tf,model) trainer.train_tqdm_notebook(maxiter=10) trainer.status() model.thetas.numpy().min() model=signedfac.initialize(Xnb,2,'negativebinomial') X_tf = signedfac.to_tensorflow(Xnb) trainer=signedfac.Trainer(X_tf,model) trainer.train_tqdm_notebook(maxiter=300) trainer.status() model.thetas.numpy().min() ###Output _____no_output_____ ###Markdown zfoo=model._get_zeta(X_tf).numpy() ###Code zfoo2=model._get_zeta(X_tf).numpy() zfoo zfoo2[25],zfoo[25] plt.scatter(zfoo,zfoo2) ###Output _____no_output_____
code/PyTorch EfficientFPN Code.ipynb
###Markdown About this notebook- Starter using PyTorch Directory settings ###Code # ==================================================== # Directory settings # ==================================================== import os os.environ['CUDA_VISIBLE_DEVICES'] = '0, 1' # specify GPUs locally OUTPUT_DIR = './submission' if not os.path.exists(OUTPUT_DIR): os.makedirs(OUTPUT_DIR) dataset_path = './data/data' anns_file_path = dataset_path + '/' + 'train.json' ###Output _____no_output_____ ###Markdown Data Loading ###Code import os import random import time import json import warnings warnings.filterwarnings('ignore') import torch import torch.nn as nn from torch.utils.data import Dataset, DataLoader from utils import label_accuracy_score import cv2 import numpy as np import pandas as pd # 전처리를 위한 라이브러리 from pycocotools.coco import COCO import torchvision import torchvision.transforms as transforms import albumentations as A from albumentations.pytorch import ToTensorV2 # 시각화를 위한 라이브러리 import matplotlib.pyplot as plt import seaborn as sns; sns.set() # Read annotations with open(anns_file_path, 'r') as f: dataset = json.loads(f.read()) categories = dataset['categories'] anns = dataset['annotations'] imgs = dataset['images'] nr_cats = len(categories) nr_annotations = len(anns) nr_images = len(imgs) # Load categories and super categories cat_names = [] super_cat_names = [] super_cat_ids = {} super_cat_last_name = '' nr_super_cats = 0 for cat_it in categories: cat_names.append(cat_it['name']) super_cat_name = cat_it['supercategory'] # Adding new supercat if super_cat_name != super_cat_last_name: super_cat_names.append(super_cat_name) super_cat_ids[super_cat_name] = nr_super_cats super_cat_last_name = super_cat_name nr_super_cats += 1 # Count annotations cat_histogram = np.zeros(nr_cats,dtype=int) for ann in anns: cat_histogram[ann['category_id']] += 1 # Convert to DataFrame df = pd.DataFrame({'Categories': cat_names, 'Number of annotations': cat_histogram}) df = df.sort_values('Number of annotations', 0, False) # category labeling sorted_temp_df = df.sort_index() # background = 0 에 해당되는 label 추가 후 기존들을 모두 label + 1 로 설정 sorted_df = pd.DataFrame(["Backgroud"], columns = ["Categories"]) sorted_df = sorted_df.append(sorted_temp_df, ignore_index=True) category_names = list(sorted_df.Categories) def get_classname(classID, cats): for i in range(len(cats)): if cats[i]['id']==classID: return cats[i]['name'] return "None" class CustomDataLoader(Dataset): """COCO format""" def __init__(self, data_dir, mode = 'train', transform = None): super().__init__() self.mode = mode self.transform = transform self.coco = COCO(data_dir) def __getitem__(self, index: int): # dataset이 index되어 list처럼 동작 image_id = self.coco.getImgIds(imgIds=index) image_infos = self.coco.loadImgs(image_id)[0] # cv2 를 활용하여 image 불러오기 images = cv2.imread(os.path.join(dataset_path, image_infos['file_name'])) images = cv2.cvtColor(images, cv2.COLOR_BGR2RGB).astype(np.float32) if (self.mode in ('train', 'val')): ann_ids = self.coco.getAnnIds(imgIds=image_infos['id']) anns = self.coco.loadAnns(ann_ids) # Load the categories in a variable cat_ids = self.coco.getCatIds() cats = self.coco.loadCats(cat_ids) # masks : size가 (height x width)인 2D # 각각의 pixel 값에는 "category id + 1" 할당 # Background = 0 masks = np.zeros((image_infos["height"], image_infos["width"])) # Unknown = 1, General trash = 2, ... , Cigarette = 11 for i in range(len(anns)): className = get_classname(anns[i]['category_id'], cats) pixel_value = category_names.index(className) masks = np.maximum(self.coco.annToMask(anns[i])*pixel_value, masks) masks = masks.astype(np.float32) # transform -> albumentations 라이브러리 활용 if self.transform is not None: transformed = self.transform(image=images, mask=masks) images = transformed["image"] masks = transformed["mask"] return images, masks if self.mode == 'test': # transform -> albumentations 라이브러리 활용 if self.transform is not None: transformed = self.transform(image=images) images = transformed["image"] return images, image_infos def __len__(self) -> int: # 전체 dataset의 size를 return return len(self.coco.getImgIds()) ###Output _____no_output_____ ###Markdown CFG ###Code # ==================================================== # CFG # ==================================================== class CFG: debug=False img_size=512 max_len=275 print_freq=1000 num_workers=4 model_name='timm-efficientnet-b5' #['timm-efficientnet-b4', 'tf_efficientnet_b0_ns'] size=512 # [512, 1024] freeze_epo = 0 warmup_epo = 1 cosine_epo = 39 #14 #19 warmup_factor=10 scheduler='GradualWarmupSchedulerV2' # ['ReduceLROnPlateau', 'CosineAnnealingLR', 'CosineAnnealingWarmRestarts', 'GradualWarmupSchedulerV2', 'get_linear_schedule_with_warmup'] epochs=freeze_epo + warmup_epo + cosine_epo # not to exceed 9h #[1, 5, 10] factor=0.2 # ReduceLROnPlateau patience=4 # ReduceLROnPlateau eps=1e-6 # ReduceLROnPlateau T_max=4 # CosineAnnealingLR T_0=4 # CosineAnnealingWarmRestarts encoder_lr=3e-5 #[1e-4, 3e-5] min_lr=1e-6 batch_size=24 + 0 #[64, 256 + 128, 512, 1024, 512 + 256 + 128, 2048] weight_decay=1e-6 gradient_accumulation_steps=1 max_grad_norm=5 dropout=0.5 seed=42 smoothing=0.05 n_fold=5 trn_fold=[0] trn_fold=[0, 1, 2, 3, 4] # [0, 1, 2, 3, 4] train=True apex=False log_day='0505' model_type=model_name version='v1-1' load_state=False cutmix=False pesudo=False #if CFG.apex: from torch.cuda.amp import autocast, GradScaler if CFG.debug: CFG.epochs = 2 train = train.sample(n=2, random_state=CFG.seed).reset_index(drop=True) import wandb ###Output _____no_output_____ ###Markdown Library ###Code # ==================================================== # Library # ==================================================== import sys #sys.path.append('../input/pytorch-image-models/pytorch-image-models-master') import os import gc import re import math import time import random import shutil import pickle from pathlib import Path from contextlib import contextmanager from collections import defaultdict, Counter import scipy as sp import numpy as np import pandas as pd from tqdm.auto import tqdm from sklearn import preprocessing from sklearn.model_selection import StratifiedKFold, GroupKFold, KFold from functools import partial import cv2 from PIL import Image import torch import torch.nn as nn import torch.nn.functional as F from torch.optim import Adam, SGD import torchvision.models as models from torch.nn.parameter import Parameter from torch.utils.data import DataLoader, Dataset from torch.nn.utils.rnn import pad_sequence, pack_padded_sequence from torch.optim.lr_scheduler import CosineAnnealingWarmRestarts, CosineAnnealingLR, ReduceLROnPlateau from warmup_scheduler import GradualWarmupScheduler # from transformers import get_linear_schedule_with_warmup from albumentations.pytorch import ToTensorV2 from albumentations import ImageOnlyTransform import albumentations as A import segmentation_models_pytorch as smp import warnings warnings.filterwarnings('ignore') device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') ###Output _____no_output_____ ###Markdown Utils ###Code # ==================================================== # Utils # ==================================================== def init_logger(log_file=OUTPUT_DIR+'train.log'): from logging import getLogger, INFO, FileHandler, Formatter, StreamHandler logger = getLogger(__name__) logger.setLevel(INFO) handler1 = StreamHandler() handler1.setFormatter(Formatter("%(message)s")) handler2 = FileHandler(filename=log_file) handler2.setFormatter(Formatter("%(message)s")) logger.addHandler(handler1) logger.addHandler(handler2) return logger LOGGER = init_logger() def seed_torch(seed=42): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True seed_torch(seed=CFG.seed) ###Output _____no_output_____ ###Markdown Dataset 정의 및 DataLoader 할당 ###Code from albumentations import ( Compose, OneOf, Normalize, Resize, RandomResizedCrop, RandomCrop, HorizontalFlip, VerticalFlip, RandomBrightness, RandomContrast, RandomBrightnessContrast, Rotate, ShiftScaleRotate, Cutout, IAAAdditiveGaussianNoise, Transpose, Blur, GaussNoise, MotionBlur, MedianBlur, OpticalDistortion, ElasticTransform, GridDistortion, IAAPiecewiseAffine, CLAHE, IAASharpen, IAAEmboss, HueSaturationValue, ToGray, JpegCompression ) # collate_fn needs for batch def collate_fn(batch): return tuple(zip(*batch)) train_transform = A.Compose([ A.VerticalFlip(p=.25), A.Cutout(num_holes=10, max_h_size=int(.1 * CFG.img_size), max_w_size=int(.1 * CFG.img_size), p=.25), A.ShiftScaleRotate(p=.25), A.RandomResizedCrop(CFG.size, CFG.size, scale = [0.75, 1], p=1), A.Normalize( mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225) ), ToTensorV2(transpose_mask=False) ]) val_transform = A.Compose([ A.Normalize( mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_pixel_value=255.0, p=1.0 ), ToTensorV2(transpose_mask=False) ]) test_transform = A.Compose([ A.Normalize( mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_pixel_value=255.0, p=1.0 ), ToTensorV2(transpose_mask=False) ]) ###Output _____no_output_____ ###Markdown MODEL ###Code class Encoder(nn.Module): def __init__(self, model_name='timm-efficientnet-b4', pretrained=False): super().__init__() self.encoder = smp.FPN(encoder_name=model_name, encoder_weights="noisy-student", classes=12) # [imagenet, noisy-student] #@autocast() def forward(self, x): x = self.encoder(x) return x ###Output _____no_output_____ ###Markdown Helper functions ###Code #!pip install -q git+https://github.com/ildoonet/pytorch-gradual-warmup-lr.git class GradualWarmupSchedulerV2(GradualWarmupScheduler): def __init__(self, optimizer, multiplier, total_epoch, after_scheduler=None): super(GradualWarmupSchedulerV2, self).__init__(optimizer, multiplier, total_epoch, after_scheduler) def get_lr(self): if self.last_epoch > self.total_epoch: if self.after_scheduler: if not self.finished: self.after_scheduler.base_lrs = [base_lr * self.multiplier for base_lr in self.base_lrs] self.finished = True return self.after_scheduler.get_lr() return [base_lr * self.multiplier for base_lr in self.base_lrs] if self.multiplier == 1.0: return [base_lr * (float(self.last_epoch) / self.total_epoch) for base_lr in self.base_lrs] else: return [base_lr * ((self.multiplier - 1.) * self.last_epoch / self.total_epoch + 1.) for base_lr in self.base_lrs] #https://www.kaggle.com/bigironsphere/loss-function-library-keras-pytorch class DiceLoss(nn.Module): def __init__(self, weight=None, size_average=True): super(DiceLoss, self).__init__() def forward(self, inputs, targets, smooth=1): #comment out if your model contains a sigmoid or equivalent activation layer inputs = F.sigmoid(inputs) #flatten label and prediction tensors inputs = inputs.view(-1) targets = targets.view(-1) intersection = (inputs * targets).sum() dice = (2.*intersection + smooth)/(inputs.sum() + targets.sum() + smooth) return 1 - dice class DiceBCELoss(nn.Module): # Formula Given above. def __init__(self, weight=None, size_average=True): super(DiceBCELoss, self).__init__() def forward(self, inputs, targets, smooth=1): #comment out if your model contains a sigmoid or equivalent activation layer BCE = F.binary_cross_entropy_with_logits(inputs, targets, reduction='mean') inputs = F.sigmoid(inputs) #flatten label and prediction tensors inputs = inputs.view(-1) targets = targets.view(-1) intersection = (inputs * targets).mean() dice_loss = 1 - (2.*intersection + smooth)/(inputs.mean() + targets.mean() + smooth) Dice_BCE = 0.9*BCE + 0.1*dice_loss return Dice_BCE.mean() import numpy as np def _fast_hist(label_true, label_pred, n_class): mask = (label_true >= 0) & (label_true < n_class) hist = np.bincount(n_class * label_true[mask].astype(int) + label_pred[mask], minlength=n_class ** 2).reshape(n_class, n_class) return hist def label_accuracy_score(hist): """ Returns accuracy score evaluation result. - [acc]: overall accuracy - [acc_cls]: mean accuracy - [mean_iu]: mean IU - [fwavacc]: fwavacc """ acc = np.diag(hist).sum() / hist.sum() with np.errstate(divide='ignore', invalid='ignore'): acc_cls = np.diag(hist) / hist.sum(axis=1) acc_cls = np.nanmean(acc_cls) with np.errstate(divide='ignore', invalid='ignore'): iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist)) mean_iu = np.nanmean(iu) freq = hist.sum(axis=1) / hist.sum() fwavacc = (freq[freq > 0] * iu[freq > 0]).sum() return acc, acc_cls, mean_iu, iu, fwavacc def add_hist(hist, label_trues, label_preds, n_class): """ stack hist(confusion matrix) """ for lt, lp in zip(label_trues, label_preds): hist += _fast_hist(lt.flatten(), lp.flatten(), n_class) return hist # ==================================================== # Helper functions # ==================================================== class AverageMeter(object): """Computes and stores the average and current value""" def __init__(self): self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count def asMinutes(s): m = math.floor(s / 60) s -= m * 60 return '%dm %ds' % (m, s) def timeSince(since, percent): now = time.time() s = now - since es = s / (percent) rs = es - s return '%s (remain %s)' % (asMinutes(s), asMinutes(rs)) def train_fn(train_loader, encoder, criterion, optimizer, epoch, scheduler, device): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() miou_score = AverageMeter() # switch to train mode encoder.train() scaler = torch.cuda.amp.GradScaler() start = end = time.time() global_step = 0 hist = np.zeros((12, 12)) for step, (images, targets) in enumerate(train_loader): # measure data loading time data_time.update(time.time() - end) #images = torch.stack(images) # (batch, channel, height, width) #targets = torch.stack(targets).long() # (batch, channel, height, width) images = images.to(device) targets = targets.to(device).long() batch_size = images.size(0) if CFG.cutmix: # generate mixed sample lam = np.random.beta(1., 1.) rand_index = torch.randperm(batch_size).cuda() bbx1, bby1, bbx2, bby2 = rand_bbox(images.size(), lam) images[:, :, bbx1:bbx2, bby1:bby2] = images[rand_index, :, bbx1:bbx2, bby1:bby2] targets[:, bbx1:bbx2, bby1:bby2] = targets[rand_index, bbx1:bbx2, bby1:bby2] # ========================= # zero_grad() # ========================= optimizer.zero_grad() if CFG.apex: with autocast(): y_preds = encoder(images) loss = criterion(y_preds, targets) scaler.scale(loss).backward() else: y_preds = encoder(images) loss = criterion(y_preds, targets) loss.backward() # record loss losses.update(loss.item(), batch_size) if CFG.gradient_accumulation_steps > 1: loss = loss / CFG.gradient_accumulation_steps #loss.backward() encoder_grad_norm = torch.nn.utils.clip_grad_norm_(encoder.parameters(), CFG.max_grad_norm) if (step + 1) % CFG.gradient_accumulation_steps == 0: if CFG.apex: scaler.step(optimizer) scaler.update() else: optimizer.step() global_step += 1 # record dice_coeff y_preds = torch.argmax(y_preds.squeeze(), dim=1).detach().cpu().numpy() hist = add_hist(hist, targets.detach().cpu().numpy(), y_preds, n_class=12) acc, acc_cls, mIoU, iu, fwavacc = label_accuracy_score(hist) miou_score.update(mIoU, batch_size) # measure elapsed time batch_time.update(time.time() - end) end = time.time() if step % CFG.print_freq == 0 or step == (len(train_loader)-1): print('Epoch: [{0}][{1}/{2}] ' 'Data {data_time.val:.3f} ({data_time.avg:.3f}) ' 'Elapsed {remain:s} ' 'Loss: {loss.val:.4f}({loss.avg:.4f}) ' 'MioU: {miou.val:.4f}({miou.avg:.4f}) ' 'Encoder Grad: {encoder_grad_norm:.4f} ' 'Encoder LR: {encoder_lr:.6f} ' .format( epoch+1, step, len(train_loader), batch_time=batch_time, data_time=data_time, loss=losses, miou=miou_score, remain=timeSince(start, float(step+1)/len(train_loader)), encoder_grad_norm=encoder_grad_norm, encoder_lr=scheduler.get_lr()[0], )) acc, acc_cls, mIoU, iu, fwavacc = label_accuracy_score(hist) return losses.avg, mIoU def valid_fn(valid_loader, encoder, criterion, device): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() miou_score = AverageMeter() # switch to evaluation mode encoder.eval() #trues = [] #preds = [] start = end = time.time() hist = np.zeros((12, 12)) for step, (images, targets) in enumerate(valid_loader): # measure data loading time data_time.update(time.time() - end) # images = torch.stack(images) # (batch, channel, height, width) # targets = torch.stack(targets).long() # (batch, channel, height, width) images = images.to(device) targets = targets.to(device).long() batch_size = images.size(0) with torch.no_grad(): y_preds = encoder(images) loss = criterion(y_preds, targets) losses.update(loss.item(), batch_size) # record dice_coeff y_preds = torch.argmax(y_preds.squeeze(), dim=1).detach().cpu().numpy() hist = add_hist(hist, targets.detach().cpu().numpy(), y_preds, n_class=12) acc, acc_cls, mIoU, iu, fwavacc = label_accuracy_score(hist) miou_score.update(mIoU, batch_size) #trues.append(labels.to('cpu').numpy()) #preds.append(y_preds.sigmoid().to('cpu').numpy()) # measure elapsed time batch_time.update(time.time() - end) end = time.time() if step % CFG.print_freq == 0 or step == (len(valid_loader)-1): print('EVAL: [{0}/{1}] ' 'Data {data_time.val:.3f} ({data_time.avg:.3f}) ' 'Elapsed {remain:s} ' 'Loss: {loss.val:.4f}({loss.avg:.4f}) ' 'MioU: {miou.val:.4f}({miou.avg:.4f}) ' .format( step, len(valid_loader), batch_time=batch_time, data_time=data_time, loss=losses, miou=miou_score, remain=timeSince(start, float(step+1)/len(valid_loader)), )) #preds = np.concatenate(preds) acc, acc_cls, mIoU, iu, fwavacc = label_accuracy_score(hist) print(iu) return losses.avg, mIoU ###Output _____no_output_____ ###Markdown Train loop ###Code # from segmentation_models.losses import bce_jaccard_loss from losses.soft_ce import SoftCrossEntropyLoss from losses.lovasz import LovaszLoss from utils import rand_bbox, copyblob, FocalLoss criterion = SoftCrossEntropyLoss(smooth_factor=CFG.smoothing, ignore_index=1) #['SoftCrossEntropyLoss(smooth_factor=CFG.smoothing, ignore_index=1)', DiceBCELoss()', 'DiceLoss()', 'nn.BCEWithLogitsLoss()'] # ==================================================== # Train loop # ==================================================== def train_loop(fold): LOGGER.info(f"========== fold: {fold} training ==========") # ==================================================== # loader # ==================================================== # train.json / validation.json / test.json 디렉토리 설정 if CFG.pesudo: train_path = dataset_path + f'/train_data_pesudo{fold}.json' else: train_path = dataset_path + f'/train_data{fold}.json' val_path = dataset_path + f'/valid_data{fold}.json' # train dataset train_dataset = CustomDataLoader(data_dir=train_path, mode='train', transform=train_transform) # validation dataset val_dataset = CustomDataLoader(data_dir=val_path, mode='val', transform=val_transform) # DataLoader train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=CFG.batch_size, num_workers=CFG.num_workers, pin_memory=True, drop_last=True, shuffle=True) valid_loader = torch.utils.data.DataLoader(dataset=val_dataset, batch_size=CFG.batch_size, num_workers=CFG.num_workers, pin_memory=True, # drop_last=True, shuffle=False) # ==================================================== # scheduler # ==================================================== def get_scheduler(optimizer): if CFG.scheduler=='ReduceLROnPlateau': scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=CFG.factor, patience=CFG.patience, verbose=True, eps=CFG.eps) elif CFG.scheduler=='CosineAnnealingLR': scheduler = CosineAnnealingLR(optimizer, T_max=CFG.T_max, eta_min=CFG.min_lr, last_epoch=-1) elif CFG.scheduler=='CosineAnnealingWarmRestarts': scheduler = CosineAnnealingWarmRestarts(optimizer, T_0=CFG.T_0, T_mult=1, eta_min=CFG.min_lr, last_epoch=-1) elif CFG.scheduler=='GradualWarmupSchedulerV2': scheduler_cosine=torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, CFG.cosine_epo) scheduler_warmup=GradualWarmupSchedulerV2(optimizer, multiplier=CFG.warmup_factor, total_epoch=CFG.warmup_epo, after_scheduler=scheduler_cosine) scheduler=scheduler_warmup return scheduler # ==================================================== # model & optimizer # ==================================================== encoder = Encoder(CFG.model_name, pretrained=True) encoder.to(device) if len(os.environ['CUDA_VISIBLE_DEVICES'].split(',')) > 1: #print('DataParallel') encoder = nn.DataParallel(encoder) optimizer = Adam(encoder.parameters(), lr=CFG.encoder_lr, weight_decay=CFG.weight_decay, amsgrad=False) scheduler = get_scheduler(optimizer) # Log the network weight histograms (optional) #wandb.watch(encoder, log='all') # ==================================================== # loop # ==================================================== #criterion = nn.BCEWithLogitsLoss() criterion = SoftCrossEntropyLoss(smooth_factor=CFG.smoothing, ignore_index=1) #['SoftCrossEntropyLoss(smooth_factor=CFG.smoothing, ignore_index=1)', DiceBCELoss()', 'DiceLoss()', 'nn.BCEWithLogitsLoss()'] # criterion = FocalLoss() best_score = 0 best_loss = np.inf for epoch in range(CFG.epochs): start_time = time.time() # train avg_loss, avg_tr_miou = train_fn(train_loader, encoder, criterion, optimizer, epoch, scheduler, device) # eval avg_val_loss, avg_val_miou = valid_fn(valid_loader, encoder, criterion, device) # scoring #score = get_score(valid_labels, text_preds) score = avg_val_miou if isinstance(scheduler, ReduceLROnPlateau): scheduler.step(score) elif isinstance(scheduler, CosineAnnealingLR): scheduler.step() elif isinstance(scheduler, CosineAnnealingWarmRestarts): scheduler.step() elif isinstance(scheduler, GradualWarmupSchedulerV2): scheduler.step(epoch) elapsed = time.time() - start_time LOGGER.info(f'Epoch {epoch+1} - avg_train_loss: {avg_loss:.4f} time: {elapsed:.0f}s') LOGGER.info(f'Epoch {epoch+1} - Score: {avg_val_miou:.4f}') wandb.log({ "avg_loss": avg_loss, "avg_val_loss": avg_val_loss, "Score": score, 'epoch': epoch, "lr": optimizer.param_groups[0]["lr"], }) model_to_save = encoder.module if hasattr(encoder, 'module') else encoder if score > best_score: best_score = score LOGGER.info(f'Epoch {epoch+1} - Save Best Score: {best_score:.4f} Model') torch.save({'encoder': model_to_save.state_dict(), 'optimizer': optimizer.state_dict(), 'scheduler': scheduler.state_dict(), #'text_preds': text_preds, }, OUTPUT_DIR+f'{CFG.log_day}_d{CFG.dropout}_s{CFG.seed}_{CFG.model_name}_{CFG.version}_fold{fold}_best.pth') best_oof = avg_val_miou # print(best_oof) # return best_oof #text_preds ###Output _____no_output_____ ###Markdown Main ###Code # ==================================================== # main # ==================================================== def main(rank=0, world_size=0): """ Prepare: 1.train 2.folds """ #rank = 2 #world_size = 2 if CFG.train: # train oof_df = pd.DataFrame() for fold in range(CFG.n_fold): if fold in CFG.trn_fold: # train seed_torch(seed=CFG.seed) wandb.init(project='Trash-Segmentation', name=f'{CFG.log_day}_{CFG.model_type}+FPN+AUG_{CFG.version}_fold{fold}', entity='choco_9966') config = wandb.config # Initialize config config.batch_size = CFG.batch_size config.encoder_lr = CFG.encoder_lr config.seed = CFG.seed config.weight_decay = CFG.weight_decay config.gradient_accumulation_steps = CFG.gradient_accumulation_steps config.scheduler = CFG.scheduler config.model_name = CFG.model_name config.apex = CFG.apex config.num_workers = CFG.num_workers config.img_size = CFG.size config.print_freq = CFG.print_freq config.n_fold = CFG.n_fold config.train = CFG.train config.epochs = CFG.epochs # config.inference = CFG.inference # config.swa = CFG.swa # config.swa_start = CFG.swa_start # config.swa_lr = CFG.swa_lr # config.swa = CFG.swa config.smoothing = CFG.smoothing train_loop(fold) wandb.join() if __name__ == '__main__': main() def test(models, data_loader, device): size = 256 transform = A.Compose([A.Resize(256, 256)]) print('Start prediction.') file_name_list = [] preds_array = np.empty((0, size*size), dtype=np.long) with torch.no_grad(): for step, (imgs, image_infos) in enumerate(test_loader): # inference (512 x 512) for n, model in enumerate(models): model = model.to(device) model.eval() if n == 0: outs = model(torch.stack(imgs).to(device)) else: outs += model(torch.stack(imgs).to(device)) oms = torch.argmax(outs.squeeze(), dim=1).detach().cpu().numpy() # resize (256 x 256) temp_mask = [] for img, mask in zip(np.stack(imgs), oms): transformed = transform(image=img, mask=mask) mask = transformed['mask'] temp_mask.append(mask) oms = np.array(temp_mask) oms = np.around(oms.reshape([oms.shape[0], size*size])).astype(int) preds_array = np.vstack((preds_array, oms)) file_name_list.append([i['file_name'] for i in image_infos]) print("End prediction.") file_names = [y for x in file_name_list for y in x] return file_names, preds_array test_path = dataset_path + f'/test.json' # test dataset test_dataset = CustomDataLoader(data_dir=test_path, mode='test', transform=test_transform) test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=CFG.batch_size, num_workers=CFG.num_workers, pin_memory=True, shuffle=False, collate_fn=collate_fn) class Encoder(nn.Module): def __init__(self, model_name='timm-efficientnet-b4', pretrained=False): super().__init__() self.encoder = smp.FPN(encoder_name=model_name, encoder_weights="noisy-student", classes=12) # [imagenet, noisy-student] #@autocast() def forward(self, x): x = self.encoder(x) return x # 추론을 실행하기 전에는 반드시 설정 (batch normalization, dropout 를 평가 모드로 설정) # model.eval() models = [] for fold in range(5): model_path = f'./submission{CFG.log_day}_d{CFG.dropout}_s{CFG.seed}_{CFG.model_name}_{CFG.version}_fold{fold}_best.pth' checkpoint = torch.load(model_path, map_location=device) model = Encoder(CFG.model_name, pretrained=False) model.load_state_dict(checkpoint['encoder']) models += [model] ###Output _____no_output_____ ###Markdown Multi Scale TTA (No ttach package) ###Code import argparse import scipy import os import numpy as np import json import torch import torch.nn as nn import torch.nn.functional as F from torchvision import transforms from scipy import ndimage from tqdm import tqdm from math import ceil from glob import glob from PIL import Image from collections import OrderedDict import numpy as np import pydensecrf.densecrf as dcrf from pydensecrf.utils import unary_from_softmax, create_pairwise_bilateral ''' # Default Values are apperance_kernel = [8, 164, 100] # PairwiseBilateral [sxy, srgb, compat] spatial_kernel = [3, 10] # PairwiseGaussian [sxy, compat] # or if you want to to specify seprately for each XY direction and RGB color channel then apperance_kernel = [(1.5, 1.5), (64, 64, 64), 100] # PairwiseBilateral [sxy, srgb, compat] spatial_kernel = [(0.5, 0.5), 10] # PairwiseGaussian [sxy, compat] ''' # https://www.programcreek.com/python/example/106424/pydensecrf.densecrf.DenseCRF2D h, w = 512, 512 def dense_crf(probs, img=None, n_classes=12, n_iters=10, scale_factor=1): c,h,w = probs.shape if img is not None: assert(img.shape[1:3] == (h, w)) img = np.transpose(img,(1,2,0)).copy(order='C') img = np.uint8(255 * img) d = dcrf.DenseCRF2D(w, h, n_classes) # Define DenseCRF model. unary = unary_from_softmax(probs) unary = np.ascontiguousarray(unary) d.setUnaryEnergy(unary) d.addPairwiseGaussian(sxy=(3,3), compat=10) d.addPairwiseBilateral(sxy=10, srgb=5, rgbim=np.copy(img), compat=10) Q = d.inference(n_iters) preds = np.array(Q, dtype=np.float32).reshape((n_classes, h, w)) return preds scales = [0.75, 1.0, 1.25] def multi_scale_predict(model, image, scales, num_classes, device, flip=False): input_size = (image.size(2), image.size(3)) upsample = nn.Upsample(size=input_size, mode='bilinear', align_corners=True) total_predictions = np.zeros((image.size(0), num_classes, image.size(2), image.size(3))) image = image.data.data.cpu().numpy() for scale in scales: scaled_img = ndimage.zoom(image, (1.0, 1.0, float(scale), float(scale)), order=1, prefilter=False) scaled_img = torch.from_numpy(scaled_img).to(device) scaled_prediction = upsample(model(scaled_img).cpu()) if flip: fliped_img = scaled_img.flip(-1).to(device) fliped_predictions = upsample(model(fliped_img).cpu()) scaled_prediction = 0.5 * (fliped_predictions.flip(-1) + scaled_prediction) total_predictions += scaled_prediction.data.cpu().numpy() total_predictions /= len(scales) return total_predictions invTrans = transforms.Compose([ transforms.Normalize(mean = [ 0., 0., 0. ], std = [ 1/0.229, 1/0.224, 1/0.225 ]), transforms.Normalize(mean = [ -0.485, -0.456, -0.406 ], std = [ 1., 1., 1. ]), ]) def test(models, data_loader, device): size = 256 transform = A.Compose([A.Resize(256, 256)]) print('Start prediction.') file_name_list = [] preds_array = np.empty((0, size*size), dtype=np.long) with torch.no_grad(): for step, (imgs, image_infos) in enumerate(test_loader): # inference (512 x 512) for n, model in enumerate(models): model = model.to(device) model.eval() if n == 0: outs = multi_scale_predict(model, torch.stack(imgs).to(device), scales, 12, device, flip=True) else: outs += multi_scale_predict(model, torch.stack(imgs).to(device), scales, 12, device, flip=True) probs_array = [] for image, prob in zip(imgs, outs): prob = F.softmax(torch.from_numpy(prob), dim=0) prob = dense_crf(img=np.around(invTrans(image).cpu().numpy()).astype(float), probs=prob.cpu().numpy()) probs_array += [np.argmax(prob, axis=0)] oms = np.array(probs_array) # oms = np.argmax(outs.squeeze(), axis=1) # resize (256 x 256) temp_mask = [] for img, mask in zip(np.stack(imgs), oms): transformed = transform(image=img, mask=mask) mask = transformed['mask'] temp_mask.append(mask) oms = np.array(temp_mask) oms = np.around(oms.reshape([oms.shape[0], size*size])).astype(int) preds_array = np.vstack((preds_array, oms)) file_name_list.append([i['file_name'] for i in image_infos]) print("End prediction.") file_names = [y for x in file_name_list for y in x] return file_names, preds_array %%time # sample_submisson.csv 열기 submission = pd.read_csv('./submission/sample_submission.csv', index_col=None) # test set에 대한 prediction model = model.to(device) file_names, preds = test(models, test_loader, device) # PredictionString 대입 for file_name, string in zip(file_names, preds): submission = submission.append({"image_id" : file_name, "PredictionString" : ' '.join(str(e) for e in string.tolist())}, ignore_index=True) submission.to_csv("./submission/0505_EfficientFPNB4_5FOLD_FLIP_CRF2.csv", index=False) ###Output _____no_output_____ ###Markdown Multi Scale TTA (ttach package) ###Code import ttach as tta transforms = tta.Compose( [ tta.HorizontalFlip(), tta.VerticalFlip(), tta.Scale(scales=[0.75, 1, 1.25]), tta.Multiply(factors=[0.9, 1, 1.1]), ] ) models = [] for fold in range(5): model_path = f'./submission{CFG.log_day}_d{CFG.dropout}_s{CFG.seed}_{CFG.model_name}_{CFG.version}_fold{fold}_best.pth' checkpoint = torch.load(model_path, map_location=device) model = Encoder(CFG.model_name, pretrained=False) model.load_state_dict(checkpoint['encoder']) tta_model = tta.SegmentationTTAWrapper(model, transforms) models += [tta_model] def test(models, data_loader, device): size = 256 transform = A.Compose([A.Resize(256, 256)]) print('Start prediction.') file_name_list = [] preds_array = np.empty((0, size*size), dtype=np.long) with torch.no_grad(): for step, (imgs, image_infos) in enumerate(test_loader): # inference (512 x 512) for n, model in enumerate(models): model = model.to(device) model.eval() if n == 0: outs = model(torch.stack(imgs).to(device)) else: outs += model(torch.stack(imgs).to(device)) probs_array = [] for image, prob in zip(imgs, outs): prob = F.softmax(prob, dim=0) prob = dense_crf(img=np.around(invTrans(image).cpu().numpy()).astype(float), probs=prob.cpu().numpy()) probs_array += [np.argmax(prob, axis=0)] oms = np.array(probs_array) # oms = np.argmax(outs.squeeze(), axis=1) # resize (256 x 256) temp_mask = [] for img, mask in zip(np.stack(imgs), oms): transformed = transform(image=img, mask=mask) mask = transformed['mask'] temp_mask.append(mask) oms = np.array(temp_mask) oms = np.around(oms.reshape([oms.shape[0], size*size])).astype(int) preds_array = np.vstack((preds_array, oms)) file_name_list.append([i['file_name'] for i in image_infos]) print("End prediction.") file_names = [y for x in file_name_list for y in x] return file_names, preds_array %%time # sample_submisson.csv 열기 submission = pd.read_csv('./submission/sample_submission.csv', index_col=None) # test set에 대한 prediction model = model.to(device) file_names, preds = test(models, test_loader, device) # PredictionString 대입 for file_name, string in zip(file_names, preds): submission = submission.append({"image_id" : file_name, "PredictionString" : ' '.join(str(e) for e in string.tolist())}, ignore_index=True) submission.to_csv("./submission/0505_EfficientFPNB4_5FOLD_FLIP_CRF3.csv", index=False) ###Output _____no_output_____
1 Longest Even Length Word.ipynb
###Markdown Longest Even Length WordConsider a string, sentence, of space-separated words where each word is a substring consisting of English alphabetic letters only.We want to find the first word in sentence having a length which is both an even number and greater than orequal to the length of any other word of even length in the sentence. For example, if sentence is `Time to write great code` , then the word we're looking for is Time .While code and Time are of maximal length, Time occurs first. If sentence is `Write code for a great time` , then the word we're looking for is code. ###Code data1 = 'One great way to make predictions about an unfamiliar nonfiction text is to take a walk through the book before reading.' data2 = 'photographs or other images, readers can start to get a sense about the topic. This scanning and skimming helps set the expectation for the reading.' data3 = ' testing very9 important' #Importing necessary packages import pandas as pd import numpy as np #Function for Splitting Words def splitword(data): temp={} data.lower() split_data=data.split() for i in range(0,len(split_data)): temp.update({split_data[i]:len(split_data[i])}) output=pd.DataFrame(temp.items(),columns=['word','count']) return output #Function for finding Longest Even word def q01_longest_even_word(sentence): test=splitword(sentence)#Call to the function created above if (test['count']%2==0).any(): output=test.word[test[test['count']%2==0]['count'].idxmax()] else: output=0 return output q01_longest_even_word(data3) ###Output _____no_output_____
tutorial_part1/Signals and Systems.ipynb
###Markdown \tableofcontents% These TeX commands run at the start to remove section numbering\renewcommand{\thesection}{\hspace*{-1.0em}}\renewcommand{\thesubsection}{\hspace*{-1.0em}}\renewcommand{\thesubsubsection}{\hspace*{-1.0em}} ###Code %pylab inline #%matplotlib qt from __future__ import division # use so 1/2 = 0.5, etc. import sk_dsp_comm.sigsys as ss import sk_dsp_comm.pyaudio_helper as pah import sk_dsp_comm.iir_design_helper as iir_d import scipy.signal as signal import time import sys import imp # for module development and reload() from IPython.display import Audio, display from IPython.display import Image, SVG pylab.rcParams['savefig.dpi'] = 100 # default 72 #pylab.rcParams['figure.figsize'] = (6.0, 4.0) # default (6,4) #%config InlineBackend.figure_formats=['png'] # default for inline viewing %config InlineBackend.figure_formats=['svg'] # SVG inline viewing #%config InlineBackend.figure_formats=['pdf'] # render pdf figs for LaTeX #Image('filename.png',width='80%') def FT_approx(x,t,Nfft): ''' Approximate the Fourier transform of a finite duration signal using scipy.signal.freqz() Inputs ------ x = input signal array t = time array used to create x(t) Nfft = the number of frequency domain points used to approximate X(f) on the interval [fs/2,fs/2], where fs = 1/Dt. Dt being the time spacing in array t Return ------ f = frequency axis array in Hz X = the Fourier transform approximation (complex) Mark Wickert, January 2015 ''' fs = 1/(t[1] - t[0]) t0 = (t[-1]+t[0])/2 # time delay at center N0 = len(t)/2 # FFT center in samples f = arange(-1/2,1/2,1/Nfft) w, X = signal.freqz(x,1,2*pi*f) X /= fs # account for dt = 1/fs in integral X *= exp(-1j*2*pi*f*fs*t0)# time interval correction X *= exp(1j*2*pi*f*N0)# FFT time interval is [0,Nfft-1] F = f*fs return F, X ###Output _____no_output_____ ###Markdown Signals Using the signal primitives `ss.rect(t,tau)` and `ss.tri(t,tau)` synthesize the following signal and also plot the corresponding frequency spectra. ###Code Image('images/Pulse_Signal_Modeling.png',width='100%') ###Output _____no_output_____ ###Markdown * In working the examples below it worth recalling how re-mapping the argument of a function can be used to shift a function right or left ###Code Image('images/Shifting_Functions.png',width='100%') ###Output _____no_output_____ ###Markdown Example: $x_5(t)$ ###Code t = arange(-4,4,.01) x5 = ss.tri(t-1,1) - ss.tri(t+1,1) plot(t,x5) ylabel(r'Amplitude') xlabel(r'Times (s)') title(r'The Finte Energy Signal $x_5(t)$') grid(); ###Output _____no_output_____ ###Markdown The spectrum (Fourier transform)$$ X_5(f) = \int_{-\infty}^\infty x(t) e^{-j2\pi ft}\, dt $$is numerically computed using `FT_approx()`. ###Code F, X5 = FT_approx(x5,t,4096) plot(F, abs(X5)) xlim([-5,5]) title(r'Spectrum Magnitude $|X_5(f)|$') ylabel(r'Magnitude') xlabel(r'Frequency (Hz)') grid(); ###Output _____no_output_____ ###Markdown Example: The Bi-Phase PulseThis pulse is used in Manchester encoding of a bit stream.$$ p(t) = \Pi\left(\frac{t-T/4}{T/2}\right) - \Pi\left(\frac{t-3T/4}{T/2}\right)$$When each bit in a bit stream is mapped to the time axis using this pulse shape1. There is a transition for every bit making sychronization easier2. The spectrum does not contain and zero frequency components3. Unfortunately the spectrum is wide than the simple rectangle $\pm 1 \times \Pi\big(t/T\big)$ pulse shape ###Code # Complete this example by plottont $p(t)$ for $T = 1$ and plotting the spectrum. # Overlay the spectrum of $\Pi(t/T)$ for $T=1$ # Code here ###Output _____no_output_____ ###Markdown **Complete a Few Other Examples from the $3\times 2$ Plot Figure Above** (time permitting) Raised Cosine and Square-root Raised CosineThe module `digitalcom` utilizes this pulse shape in many of its functions. More later in **Part3** ###Code Ns = 8 # samples per bit period p_RC = ss.rc_imp(Ns,0.35) # 0.35 is a popular excess bandwidth factor p_SRC = ss.sqrt_rc_imp(Ns,0.35) n = arange(len(p_RC)) plot(n/Ns,p_RC) plot(n/Ns,p_SRC) legend((r'Raised Cosine',r'Square-root RC'),loc='best') title(r'RC and SRC Pulse Shapes (functions are discrete)') ylabel(r'Amplitude') xlabel(r'Bit Period ($n/N_s$)') grid() ###Output _____no_output_____ ###Markdown Plot the spectrum magnitude (Fourier transform) by repurposing the function `freqz_resp_list(b,a=np.array([1]),mode = 'dB',fs=1.0,Npts = 1024)` found in the module `iir_design_helper` ###Code import sk_dsp_comm.iir_design_helper as iir_d iir_d.freqz_resp_list([p_RC,p_SRC],[1,1],'dB',8.0) title(r'Spectral Comparison Between RC and SRC Pulse Shapes') ylabel(r'Spectral Density (dB)') xlabel(r'Bit Rate Normalized Frequency $f/R_b$') ylim([-80,20]) legend((r'RC',r'SRC'),loc='best') grid(); # Repeat the above pulse shape modeling for alpha = 0.5 and 0.25 # Overlay plots of just the SRC spectrum for alpha = 0.25, 0.35, 0.5 ###Output _____no_output_____ ###Markdown AliasingIn `sigsys` the function `f_prin = prin_alias(f,fs)` finds the *principal alias* of a sinusoidal signal relative to the sampling rate $f_s$: ###Code # Some examples (ss.prin_alias(25,100),ss.prin_alias(300,200),ss.prin_alias(18000,44100)) f = arange(64,72,.01) f_prin = ss.prin_alias(f,8) # Sampling frequency is 8 MHz plot(f,f_prin) title(r'Under Sampling a Signal Over 64-72 MHz with $f_s = 8$ MHz') ylabel(r'Output Frequency (MHz)') xlabel(r'Input Frequency (MHz)') grid(); ###Output _____no_output_____
tabular-playground-series-feb-2022-2/test-estimators.ipynb
###Markdown 'Native'のみ: 0.971659944'Native'と'MultipliedATGC'の時: 0.96771621252403 ###Code estimators = [ ('rf', RandomForestClassifier(random_state=0, n_jobs=-1)), ('ert', ExtraTreesClassifier(random_state=0, n_jobs=-1)) ] final_estimator = VotingClassifier( estimators=[ ('rf', RandomForestClassifier(random_state=0, n_jobs=-1)), ('ert', ExtraTreesClassifier(random_state=0, n_jobs=-1)) ], voting='soft', n_jobs=-1 ) model = StackingClassifier( estimators=estimators, final_estimator=final_estimator, n_jobs=-1 ) cv_score = cross_val_score(model, X_train, y_train, n_jobs=-1, verbose=2) print(cv_score) print(np.mean(cv_score)) # model.fit(X_train, y_train) # y_pred=model.predict(X_test) # save_submission(y_pred, np.mean(cv_score), 'target','rf-ert-stacking-voting') n_stack = 3 estimatorss = [[ ] for i in range(3)] estimatorss ###Output _____no_output_____
XGBoost-RFECV-RoF-Vancouver.ipynb
###Markdown In this note book the following steps are taken:1. Find the best hyper parameters for estimator2. Find the most important features by tunned random forest3. Comapring r2 of the tuuned full model and model with selected features4. Furthur step is finding tuned model with selected features and comparing the hyper parameters ###Code #import data Data=pd.read_csv("Vancouver-Transfomed-Data.csv") X = Data.iloc[:,:-1] y = Data.iloc[:,-1] #split test and training set. total number of data is 330 so the test size cannot be large np.random.seed(60) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 1000) regressors = {} regressors.update({"XGBoost": XGBRegressor(random_state=1000)}) FEATURE_IMPORTANCE = {"XGBoost"} #Define range of hyperparameters for estimator np.random.seed(60) parameters = {} parameters.update({"XGBoost": { "regressor__learning_rate":[0.001,0.01,0.02,0.1,0.25,0.5,1], "regressor__gamma":[0.001,0.01,0.02,0.1,0.25,0.5,1], "regressor__max_depth" : [5,10,15,20], "regressor__reg_alpha":[0.001,0.01,0.02,0.1], "regressor__reg_lambda":[0.001,0.01,0.02,0.1], "regressor__min_child_weight":[0.001,0.01,0.02,0.1]} }) # Make correlation matrix corr_matrix = X_train.corr(method = "spearman").abs() # Draw the heatmap sns.set(font_scale = 1.0) f, ax = plt.subplots(figsize=(11, 9)) sns.heatmap(corr_matrix, cmap= "YlGnBu", square=True, ax = ax) f.tight_layout() plt.savefig("correlation_matrix.png", dpi = 1080) # Select upper triangle of matrix upper = corr_matrix.where(np.triu(np.ones(corr_matrix.shape), k = 1).astype(np.bool)) # Find index of feature columns with correlation greater than 0.8 to_drop = [column for column in upper.columns if any(upper[column] > 0.8)] # Drop features X_train = X_train.drop(to_drop, axis = 1) X_test = X_test.drop(to_drop, axis = 1) X_train FEATURE_IMPORTANCE = {"XGBoost"} selected_regressor = "XGBoost" regressor = regressors[selected_regressor] results = {} for regressor_label, regressor in regressors.items(): # Print message to user print(f"Now tuning {regressor_label}.") scaler = StandardScaler() steps = [("scaler", scaler), ("regressor", regressor)] pipeline = Pipeline(steps = steps) #Define parameters that we want to use in gridsearch cv param_grid = parameters[selected_regressor] # Initialize GridSearch object for estimator gscv = RandomizedSearchCV(pipeline, param_grid, cv = 3, n_jobs= -1, verbose = 1, scoring = r2_score, n_iter=20) # Fit gscv (Tunes estimator) print(f"Now tuning {selected_regressor}. Go grab a beer or something.") gscv.fit(X_train, np.ravel(y_train)) #Getting the best hyperparameters best_params = gscv.best_params_ best_params #Getting the best score of model best_score = gscv.best_score_ best_score #Check overfitting of the estimator from sklearn.model_selection import cross_val_score mod = XGBRegressor(gamma= 0.01, learning_rate= 0.5, max_depth=10, min_child_weight= 0.001, reg_alpha=0.001, reg_lambda = 0.01 ,random_state=10000) scores_test = cross_val_score(mod, X_test, y_test, scoring='r2', cv=5) scores_test tuned_params = {item[11:]: best_params[item] for item in best_params} regressor.set_params(**tuned_params) #Find r2 of the model with all features (Model is tuned for all features) results={} model=regressor.set_params(gamma= 0.01, learning_rate= 0.5, max_depth=10, min_child_weight= 0.001, reg_alpha=0.001, reg_lambda = 0.01 ,random_state=10000) model.fit(X_train,y_train) y_pred = model.predict(X_test) R2 = metrics.r2_score(y_test, y_pred) results = {"classifier": model, "Best Parameters": best_params, "Training r2": best_score*100, "Test r2": R2*100} results # Select Features using RFECV class PipelineRFE(Pipeline): # Source: https://ramhiser.com/post/2018-03-25-feature-selection-with-scikit-learn-pipeline/ def fit(self, X, y=None, **fit_params): super(PipelineRFE, self).fit(X, y, **fit_params) self.feature_importances_ = self.steps[-1][-1].feature_importances_ return self steps = [("scaler", scaler), ("regressor", regressor)] pipe = PipelineRFE(steps = steps) np.random.seed(60) # Initialize RFECV object feature_selector = RFECV(pipe, cv = 5, step = 1, verbose = 1) # Fit RFECV feature_selector.fit(X_train, np.ravel(y_train)) # Get selected features feature_names = X_train.columns selected_features = feature_names[feature_selector.support_].tolist() performance_curve = {"Number of Features": list(range(1, len(feature_names) + 1)), "R2": feature_selector.grid_scores_} performance_curve = pd.DataFrame(performance_curve) # Performance vs Number of Features # Set graph style sns.set(font_scale = 1.75) sns.set_style({"axes.facecolor": "1.0", "axes.edgecolor": "0.85", "grid.color": "0.85", "grid.linestyle": "-", 'axes.labelcolor': '0.4', "xtick.color": "0.4", 'ytick.color': '0.4'}) colors = sns.color_palette("RdYlGn", 20) line_color = colors[3] marker_colors = colors[-1] # Plot f, ax = plt.subplots(figsize=(13, 6.5)) sns.lineplot(x = "Number of Features", y = "R2", data = performance_curve, color = line_color, lw = 4, ax = ax) sns.regplot(x = performance_curve["Number of Features"], y = performance_curve["R2"], color = marker_colors, fit_reg = False, scatter_kws = {"s": 200}, ax = ax) # Axes limits plt.xlim(0.5, len(feature_names)+0.5) plt.ylim(0.60, 1) # Generate a bolded horizontal line at y = 0 ax.axhline(y = 0.625, color = 'black', linewidth = 1.3, alpha = .7) # Turn frame off ax.set_frame_on(False) # Tight layout plt.tight_layout() #Define new training and test set based based on selected features by RFECV X_train_rfecv = X_train[selected_features] X_test_rfecv= X_test[selected_features] np.random.seed(60) regressor.fit(X_train_rfecv, np.ravel(y_train)) #Finding important features np.random.seed(60) feature_importance = pd.DataFrame(selected_features, columns = ["Feature Label"]) feature_importance["Feature Importance"] = regressor.feature_importances_ feature_importance = feature_importance.sort_values(by="Feature Importance", ascending=False) feature_importance # Initialize GridSearch object for model with selected features np.random.seed(60) gscv = RandomizedSearchCV(pipeline, param_grid, cv = 3, n_jobs= -1, verbose = 1, scoring = r2_score, n_iter=20) #Tuning random forest classifier with selected features np.random.seed(60) gscv.fit(X_train_rfecv,y_train) #Getting the best parameters of model with selected features best_params = gscv.best_params_ best_params #Getting the score of model with selected features best_score = gscv.best_score_ best_score #Check overfitting of the tuned model with selected features from sklearn.model_selection import cross_val_score mod = XGBRegressor(gamma= 0.001, learning_rate= 0.5, max_depth=15, min_child_weight= 0.001, reg_alpha=0.02, reg_lambda = 0.02 ,random_state=10000) scores_test = cross_val_score(mod, X_test_rfecv, y_test, scoring='r2', cv=5) scores_test results={} model=regressor.set_params(gamma= 0.001, learning_rate= 0.5, max_depth=15, min_child_weight= 0.001, reg_alpha=0.02, reg_lambda = 0.02 ,random_state=10000) model.fit(X_train_rfecv,y_train) y_pred = model.predict(X_test_rfecv) R2 = metrics.r2_score(y_test, y_pred) results = {"classifier": model, "Best Parameters": best_params, "Training r2": best_score*100, "Test r2": R2*100} results ###Output _____no_output_____
PDA/jupyter/jupyterNotebooks/assignments/03Collections_Assignments.ipynb
###Markdown Programming and Data Analytics 1 2021/2022Sant'Anna School of Advanced Studies, Pisa, ItalyCourse responsibleAndrea Vandin [email protected] Daniele Licari [email protected] Assignments forLecture 3: Collections--- ###Code #@title RUN, BUT DO NOT MODIFY !curl -O https://raw.githubusercontent.com/EMbeDS-education/StatsAndComputing20212022/main/PDA/jupyter/jupyterNotebooks/assignments/auto_testing.py %reload_ext autoreload %autoreload 2 from auto_testing import * ###Output _____no_output_____ ###Markdown Assignment 03.01: Play with lists StatementWrite a program that - Reads two integers `a` and `b` - prints `a` and `b`- Creates a list (`lst1`) of 10 elements containing value 0 in each entry - sets to value `a` the entries in position 1 and 3 - sets to value `b` the entries in position 2 and 4 - prints this list- Creates a list (`lst2`) of 10 elements containing values from 0 to 9 included - prints this list- Prints `True` if the `lst1` is smaller than `lst2`, and `False` otherwise- Concatenates the two lists in list `lst3` - prints `lst3` Example input 1```1``````2``` Example output 1```12[0, 1, 2, 1, 2, 0, 0, 0, 0, 0][0, 1, 2, 3, 4, 5, 6, 7, 8, 9]True[0, 1, 2, 1, 2, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]``` Example input 2```2``````1``` Example output 2```21[0, 2, 1, 2, 1, 0, 0, 0, 0, 0][0, 1, 2, 3, 4, 5, 6, 7, 8, 9]False[0, 2, 1, 2, 1, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]``` Write your solution here* Do not change the first line (`def ...():`)* Maintain the given indentation* You can run some tests by yourself by decommenting the last line ###Code def asgn03_01Play_with_lists(): # This program reads a number and prints it a = int(input()) print(a) # Change it according to the assignment description #You can test independently your solution by executing the following line #asgn03_01Play_with_lists() ###Output _____no_output_____ ###Markdown Run the following cells to perform the provided tests ###Code #@title RUN and TEST ALL from IPython.display import display, Markdown inputs=[[0,0],[1,2],[2,1]] expected_outputs=[["0","0","[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]","[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]","True","[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]"],\ ["1","2","[0, 1, 2, 1, 2, 0, 0, 0, 0, 0]","[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]","True","[0, 1, 2, 1, 2, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]"], \ ["2","1","[0, 2, 1, 2, 1, 0, 0, 0, 0, 0]","[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]","False","[0, 2, 1, 2, 1, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]"]] for k in range(len(inputs)): display(Markdown(f'{k+1}. TEST {inputs[k]} = {",".join(expected_outputs[k])}')) print('-'*60) run_and_test(inputs[k],expected_outputs[k],asgn03_01Play_with_lists) ###Output _____no_output_____ ###Markdown Assignment 03.02: Play with lists and sets StatementWrite a program that - Reads a string `s` - Creates a list `lst` containing as elements the characters in `s` in the same order in which they appear in `s` - sets the string `'1'` as the value of the first entry of `lst` - prints `lst`- Creates a set `st` containing the characters in the list - prints the length of `st` - prints the minimum element in `st` - prints the maximum element in `st`- Creates a list `lst2` containing the elements in `st` - prints the length of `lst2` - prints the minimum element in `lst2` - prints the maximum element in `lst2` - sorts `lst2` - appends string `'!'` at the end of `lst2` - prints `lst2` Example input 1```abba``` Example output 1```['1', 'b', 'b', 'a']31b31b['1', 'a', 'b', '!']``` Example input 2```ciaomondo``` Example output 2```['1', 'i', 'a', 'o', 'm', 'o', 'n', 'd', 'o']71o71o['1', 'a', 'd', 'i', 'm', 'n', 'o', '!']``` Write your solution here* Do not change the first line (`def ...():`)* Maintain the given indentation* You can run some tests by yourself by decommenting the last line ###Code def asgn03_02Play_with_lists_and_sets(): # This program reads a number and prints it a = int(input()) print(a) # Change it according to the assignment description #You can test independently your solution by executing the following line #asgn03_02Play_with_lists_and_sets() ###Output _____no_output_____ ###Markdown Run the following cells to perform the provided tests ###Code #@title RUN and TEST ALL from IPython.display import display, Markdown inputs=[['abba'],['ciaomondo']] expected_outputs=[["['1', 'b', 'b', 'a']","3","1","b","3","1","b","['1', 'a', 'b', '!']"],\ ["['1', 'i', 'a', 'o', 'm', 'o', 'n', 'd', 'o']","7","1","o","7","1","o","['1', 'a', 'd', 'i', 'm', 'n', 'o', '!']"], \ ] for k in range(len(inputs)): display(Markdown(f'{k+1}. TEST {inputs[k]} = {",".join(expected_outputs[k])}')) print('-'*60) run_and_test(inputs[k],expected_outputs[k],asgn03_02Play_with_lists_and_sets) ###Output _____no_output_____ ###Markdown Assignment 03.03: Play with matrices StatementWrite a program that reads a string of digits `s`, and creates a 2-dimensional list (a matrix of size 2 times the length of s) containing- in the first row: the digits in `s`- in the second row: twice the value of each digit in `s` Finally, your program should print the list Example input 1```123``` Example output 1```[[1, 2, 3], [2, 4, 6]]``` Example input 2```12345``` Example output 2```[[1, 2, 3, 4, 5], [2, 4, 6, 8, 10]]``` Write your solution here* Do not change the first line (`def ...():`)* Maintain the given indentation* You can run some tests by yourself by decommenting the last line ###Code def asgn03_03Play_with_matrices(): # This program reads a number and prints it s = input() # Change it according to the assignment description #You can test independently your solution by executing the following line #asgn03_03Play_with_matrices() ###Output _____no_output_____ ###Markdown Run the following cells to perform the provided tests ###Code #@title RUN and TEST ALL from IPython.display import display, Markdown inputs=[['123'],['12345'],['56734']] expected_outputs=[["[[1, 2, 3], [2, 4, 6]]"],\ ["[[1, 2, 3, 4, 5], [2, 4, 6, 8, 10]]"], \ ["[[5, 6, 7, 3, 4], [10, 12, 14, 6, 8]]"]] for k in range(len(inputs)): display(Markdown(f'{k+1}. TEST {inputs[k]} = {",".join(expected_outputs[k])}')) print('-'*60) run_and_test(inputs[k],expected_outputs[k],asgn03_03Play_with_matrices) ###Output _____no_output_____ ###Markdown Assignment 03.04: Play with dictionaries StatementWrite a program that - Reads three strings `st1`, `st2`, and `st3` given in 3 lines- Creates a dictionary (`d1`) mapping each string to its upper-case version- Prints such dictionary `d1` Example input 1```Andrea``````Daniele``````Giulio``` Example output 1```{'Andrea': 'ANDREA', 'Daniele': 'DANIELE', 'Giulio': 'GIULIO'}``` Example input 2```Andrea``````Daniele``````Andrea``` Example output 2```{'Andrea': 'ANDREA', 'Daniele': 'DANIELE'}``` Write your solution here* Do not change the first line (`def ...():`)* Maintain the given indentation* You can run some tests by yourself by decommenting the last line ###Code def asgn03_04Play_with_dictionaries(): # This program reads a string and prints it s = input() print(s) # Change it according to the assignment description #You can test independently your solution by executing the following line #asgn03_04Play_with_dictionaries() ###Output _____no_output_____ ###Markdown Run the following cells to perform the provided tests ###Code #@title RUN and TEST ALL from IPython.display import display, Markdown inputs=[['Andrea','Daniele','Andrea'],['Andrea','Daniele','Giulio'],['ANDREA','DANIELE','GIULIO']] expected_outputs=[["{'Andrea': 'ANDREA', 'Daniele': 'DANIELE'}"],\ ["{'Andrea': 'ANDREA', 'Daniele': 'DANIELE', 'Giulio': 'GIULIO'}"], \ ["{'ANDREA': 'ANDREA', 'DANIELE': 'DANIELE', 'GIULIO': 'GIULIO'}"]] for k in range(len(inputs)): display(Markdown(f'{k+1}. TEST {inputs[k]} = {",".join(expected_outputs[k])}')) print('-'*60) run_and_test(inputs[k],expected_outputs[k],asgn03_04Play_with_dictionaries) ###Output _____no_output_____ ###Markdown Assignment 03.05: Dictionaries and numbers StatementWrite a program that - Reads a string containing only digits- Creates a dictionary (`d1`) casting each character in a string (a string representation of a digit) to its `int` counterpart- Prints this dictionary `d1`**Note**- The length of the input string is not known a prior. Your implementation should work with strings of any length. Example input 1```12345``` Example output 1```{'1': 1, '2': 2, '3': 3, '4': 4, '5': 5}``` HintIn class we have seen how to create dictionaries using list comprehension. With that in mind, solving this exercise requires just a few lines of code Write your solution here* Do not change the first line (`def ...():`)* Maintain the given indentation* You can run some tests by yourself by decommenting the last line ###Code def asgn03_05Dictionaries_and_numbers(): # This program reads a string and prints it s = input() print(s) # Change it according to the assignment description #You can test independently your solution by executing the following line #asgn03_05Dictionaries_and_numbers() ###Output _____no_output_____ ###Markdown Run the following cells to perform the provided tests ###Code #@title RUN and TEST ALL from IPython.display import display, Markdown inputs=[['123'],['12345'],['123456789']] expected_outputs=[["{'1': 1, '2': 2, '3': 3}"],\ ["{'1': 1, '2': 2, '3': 3, '4': 4, '5': 5}"], \ ["{'1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9}"]] for k in range(len(inputs)): display(Markdown(f'{k+1}. TEST {inputs[k]} = {",".join(expected_outputs[k])}')) print('-'*60) run_and_test(inputs[k],expected_outputs[k],asgn03_05Dictionaries_and_numbers) ###Output _____no_output_____ ###Markdown Assignment 03.06: Play with class 3 StatementWrite a program that - Reads three integers: - `start` - `stop` - `step`- Creates a range `r` using these three parameters- Creates a list `x` containing the elements in `r`- Creates a list `y` containing the square of each element in `r`- Creates a dictionary `d` that maps each element in `x` with the corresponding one in `y`- Prints `d` Example input 1```0``````6``````1``` Example output 1```{0: 0, 1: 1, 2: 4, 3: 9, 4: 16, 5: 25}``` Example input 2```0``````100``````10``` Example output 2```{0: 0, 10: 100, 20: 400, 30: 900, 40: 1600, 50: 2500, 60: 3600, 70: 4900, 80: 6400, 90: 8100}``` Write your solution here* Do not change the first line (`def ...():`)* Maintain the given indentation* You can run some tests by yourself by decommenting the last line ###Code def asgn03_06Play_with_class_3(): # This program reads a string and prints it s = input() print(s) # Change it according to the assignment description #You can test independently your solution by executing the following line # asgn03_06Play_with_class_3() ###Output _____no_output_____ ###Markdown Run the following cells to perform the provided tests ###Code #@title RUN and TEST ALL from IPython.display import display, Markdown inputs=[[0,6,1],[0,30,2],[0,100,10]] expected_outputs=[["{0: 0, 1: 1, 2: 4, 3: 9, 4: 16, 5: 25}"],\ ["{0: 0, 2: 4, 4: 16, 6: 36, 8: 64, 10: 100, 12: 144, 14: 196, 16: 256, 18: 324, 20: 400, 22: 484, 24: 576, 26: 676, 28: 784}"], \ ["{0: 0, 10: 100, 20: 400, 30: 900, 40: 1600, 50: 2500, 60: 3600, 70: 4900, 80: 6400, 90: 8100}"]] for k in range(len(inputs)): display(Markdown(f'{k+1}. TEST {inputs[k]} = {",".join(expected_outputs[k])}')) print('-'*60) run_and_test(inputs[k],expected_outputs[k],asgn03_06Play_with_class_3) ###Output _____no_output_____
scripts/rebuttal/figR3-01.moisture_source_seperation.AR_boundary.ipynb
###Markdown only use the abs version AR see final script at :/pic/projects/hyperion/chen423/tools/paper_tools/AR-SST/step5.moisture_decomposition.py ###Code import numpy as np import xarray as xr import matplotlib.pyplot as plt scenario = 'HIST' year = 2003 month = 10 para_b = int(10) reffile = '/raid1/chen423/serdp/data/ref_data/wrf_ref/geo_em.d01.nc' landmask = xr.open_dataset(reffile).LANDMASK.values[0,para_b:(450-para_b),para_b:(450-para_b)] ETdir = '/home/chen423/.tmp/AR-SST/%s/moisture/ET/' % (scenario) uIVTdir = '/home/chen423/.tmp/AR-SST/%s/moisture/uIVT/' % (scenario) ARdir = '/home/chen423/.tmp/AR-SST/%s/AR_tagged/Gershunov/SERDP6km_adj/' % (scenario) ETfile = ETdir + 'WRF_NARR.%s.SFCEVP.%d.%d.nc' % (scenario, year, month) uIVTfile = uIVTdir + 'WRF_NARR.%s.uIVT.%d.%d.nc' % (scenario, year, month) ARfile = ARdir + 'WRF_ARtag_adj.%s.Gershunov.%d.%d.ARabs.nc' % (scenario, year, month) ETdata = xr.open_dataset(ETfile).SFCEVP.values[:,para_b:(450-para_b),para_b:(450-para_b)] uIVTdata = xr.open_dataset(uIVTfile).uIVT.values[:,para_b:(450-para_b),para_b:(450-para_b)] ARtag = xr.open_dataset(ARfile).AR_tag.values[:,para_b:(450-para_b),para_b:(450-para_b)] def compute_moisture_intensity(in_ARtag, in_uIVT, in_ET, ref_mask): uIVT_total = in_uIVT[:,0][in_ARtag[:,0]==1].sum()*6000*86400 ET_total = in_ET[(in_ARtag==1)&(ref_mask==0)].sum()*6000*6000 if (ET_total+uIVT_total)==0: out_ratio = -9999 else: out_ratio = ET_total/(ET_total+uIVT_total) return out_ratio nt = ARtag.shape[0] testdata = np.zeros(nt) for t in np.arange(nt): testdata[t] = compute_moisture_intensity(ARtag[t], uIVTdata[t], ETdata[int(np.floor(t/4))], landmask) print(t) testdata plt.scatter(np.arange(nt)[testdata>0], testdata[testdata>0]) fig_debug = plt.figure() plt.subplot(1,2,1) plt.pcolormesh(ARtag[9,0:430,0:200]) plt.subplot(1,2,2) plt.pcolormesh(uIVTdata[9,0:430,0:200]) plt.colorbar() plt.show() plt.close() del(fig_debug) plt.pcolormesh(landmask) plt.colorbar() ###Output _____no_output_____
notebook/Graphviz playground and note.ipynb
###Markdown Graphviz PlayGrounds ###Code %pylab inline --no-import-all import pygraphviz as pgv from IPython.display import Image as Image import numpy as np from numpy.random import * A = pgv.AGraph() # Create Graph n = 10 path="./assets/" for i in range(n): node_a = randint(n) node_b = randint(n)+1 A.add_node(node_a) A.add_node(node_b) # We can userriibute handring after pygraphviz-0.36 A.add_edge(node_a, node_b, label=randint(10)+1) A.layout(prog='neato') A.draw(path+'graph.png') A.draw(path+'graph.eps') # We can saved eps format. Image(filename=path+'graph.png') ###Output Populating the interactive namespace from numpy and matplotlib
pytorch_mnist/.ipynb_checkpoints/pytorch_mnist-checkpoint.ipynb
###Markdown MNIST Training using PyTorch Contents1. [Background](Background)1. [Setup](Setup)1. [Data](Data)1. [Train](Train)1. [Host](Host)--- BackgroundMNIST is a widely used dataset for handwritten digit classification. It consists of 70,000 labeled 28x28 pixel grayscale images of hand-written digits. The dataset is split into 60,000 training images and 10,000 test images. There are 10 classes (one for each of the 10 digits). This tutorial will show how to train and test an MNIST model on SageMaker using PyTorch.For more information about the PyTorch in SageMaker, please visit [sagemaker-pytorch-containers](https://github.com/aws/sagemaker-pytorch-containers) and [sagemaker-python-sdk](https://github.com/aws/sagemaker-python-sdk) github repositories.--- Setup_This notebook was created and tested on an ml.m4.xlarge notebook instance._Let's start by creating a SageMaker session and specifying:- The S3 bucket and prefix that you want to use for training and model data. This should be within the same region as the Notebook Instance, training, and hosting.- The IAM role arn used to give training and hosting access to your data. See the documentation for how to create these. Note, if more than one role is required for notebook instances, training, and/or hosting, please replace the `sagemaker.get_execution_role()` with a the appropriate full IAM role arn string(s). Upgrade sagemaker sdk to v2 ###Code !pip install sagemaker --upgrade -i https://pypi.tuna.tsinghua.edu.cn/simple import boto3 import sagemaker sagemaker_session = sagemaker.Session() bucket = sagemaker_session.default_bucket() prefix = "sagemaker/DEMO-pytorch-mnist" role = sagemaker.get_execution_role(sagemaker_session=sagemaker_session) print("Sagemaker SDK version: {0}".format(sagemaker.__version__)) print("Sagemaker Execute Role: {0}".format(role)) print("Bucket: {0}".format(bucket)) ###Output Sagemaker SDK version: 2.51.0 Sagemaker Execute role: arn:aws-cn:iam::188642756190:role/service-role/AmazonSageMaker-ExecutionRole-20200605T111655 Bucket: sagemaker-cn-northwest-1-188642756190 ###Markdown Data Getting the data ###Code from torchvision import datasets, transforms datasets.MNIST( "data", download=True, transform=transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))] ), ) ###Output Downloading http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz to data/MNIST/raw/train-images-idx3-ubyte.gz ###Markdown Uploading the data to S3We are going to use the `sagemaker.Session.upload_data` function to upload our datasets to an S3 location. The return value inputs identifies the location -- we will use later when we start the training job. ###Code inputs = sagemaker_session.upload_data(path="data", bucket=bucket, key_prefix=prefix) print("input spec (in this case, just an S3 path): {}".format(inputs)) ###Output input spec (in this case, just an S3 path): s3://sagemaker-cn-northwest-1-188642756190/sagemaker/DEMO-pytorch-mnist ###Markdown Train Training scriptThe `mnist.py` script provides all the code we need for training and hosting a SageMaker model (`model_fn` function to load a model).The training script is very similar to a training script you might run outside of SageMaker, but you can access useful properties about the training environment through various environment variables, such as:* `SM_MODEL_DIR`: A string representing the path to the directory to write model artifacts to. These artifacts are uploaded to S3 for model hosting.* `SM_NUM_GPUS`: The number of gpus available in the current container.* `SM_CURRENT_HOST`: The name of the current container on the container network.* `SM_HOSTS`: JSON encoded list containing all the hosts .Supposing one input channel, 'training', was used in the call to the PyTorch estimator's `fit()` method, the following will be set, following the format `SM_CHANNEL_[channel_name]`:* `SM_CHANNEL_TRAINING`: A string representing the path to the directory containing data in the 'training' channel.For more information about training environment variables, please visit [SageMaker Containers](https://github.com/aws/sagemaker-containers).A typical training script loads data from the input channels, configures training with hyperparameters, trains a model, and saves a model to `model_dir` so that it can be hosted later. Hyperparameters are passed to your script as arguments and can be retrieved with an `argparse.ArgumentParser` instance.Because the SageMaker imports the training script, you should put your training code in a main guard (``if __name__=='__main__':``) if you are using the same script to host your model as we do in this example, so that SageMaker does not inadvertently run your training code at the wrong point in execution.For example, the script run by this notebook: ###Code !pygmentize mnist.py ###Output import argparse import json import logging import os import sys import sagemaker_containers import torch import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torch.utils.data import torch.utils.data.distributed from torchvision import datasets, transforms logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) logger.addHandler(logging.StreamHandler(sys.stdout)) # Based on https://github.com/pytorch/examples/blob/master/mnist/main.py class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(1, 10, kernel_size=5) self.conv2 = nn.Conv2d(10, 20, kernel_size=5) self.conv2_drop = nn.Dropout2d() self.fc1 = nn.Linear(320, 50) self.fc2 = nn.Linear(50, 10) def forward(self, x): x = F.relu(F.max_pool2d(self.conv1(x), 2)) x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2)) x = x.view(-1, 320) x = F.relu(self.fc1(x)) x = F.dropout(x, training=self.training) x = self.fc2(x) return F.log_softmax(x, dim=1) def _get_train_data_loader(batch_size, training_dir, is_distributed, **kwargs): logger.info("Get train data loader") dataset = datasets.MNIST( training_dir, train=True, transform=transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))] ), ) train_sampler = ( torch.utils.data.distributed.DistributedSampler(dataset) if is_distributed else None ) return torch.utils.data.DataLoader( dataset, batch_size=batch_size, shuffle=train_sampler is None, sampler=train_sampler, **kwargs ) def _get_test_data_loader(test_batch_size, training_dir, **kwargs): logger.info("Get test data loader") return torch.utils.data.DataLoader( datasets.MNIST( training_dir, train=False, transform=transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))] ), ), batch_size=test_batch_size, shuffle=True, **kwargs ) def _average_gradients(model): # Gradient averaging. size = float(dist.get_world_size()) for param in model.parameters(): dist.all_reduce(param.grad.data, op=dist.reduce_op.SUM) param.grad.data /= size def train(args): is_distributed = len(args.hosts) > 1 and args.backend is not None logger.debug("Distributed training - {}".format(is_distributed)) use_cuda = args.num_gpus > 0 logger.debug("Number of gpus available - {}".format(args.num_gpus)) kwargs = {"num_workers": 1, "pin_memory": True} if use_cuda else {} device = torch.device("cuda" if use_cuda else "cpu") if is_distributed: # Initialize the distributed environment. world_size = len(args.hosts) os.environ["WORLD_SIZE"] = str(world_size) host_rank = args.hosts.index(args.current_host) os.environ["RANK"] = str(host_rank) dist.init_process_group(backend=args.backend, rank=host_rank, world_size=world_size) logger.info( "Initialized the distributed environment: '{}' backend on {} nodes. ".format( args.backend, dist.get_world_size() ) + "Current host rank is {}. Number of gpus: {}".format(dist.get_rank(), args.num_gpus) ) # set the seed for generating random numbers torch.manual_seed(args.seed) if use_cuda: torch.cuda.manual_seed(args.seed) train_loader = _get_train_data_loader(args.batch_size, args.data_dir, is_distributed, **kwargs) test_loader = _get_test_data_loader(args.test_batch_size, args.data_dir, **kwargs) logger.debug( "Processes {}/{} ({:.0f}%) of train data".format( len(train_loader.sampler), len(train_loader.dataset), 100.0 * len(train_loader.sampler) / len(train_loader.dataset), ) ) logger.debug( "Processes {}/{} ({:.0f}%) of test data".format( len(test_loader.sampler), len(test_loader.dataset), 100.0 * len(test_loader.sampler) / len(test_loader.dataset), ) ) model = Net().to(device) if is_distributed and use_cuda: # multi-machine multi-gpu case model = torch.nn.parallel.DistributedDataParallel(model) else: # single-machine multi-gpu case or single-machine or multi-machine cpu case model = torch.nn.DataParallel(model) optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum) for epoch in range(1, args.epochs + 1): model.train() for batch_idx, (data, target) in enumerate(train_loader, 1): data, target = data.to(device), target.to(device) optimizer.zero_grad() output = model(data) loss = F.nll_loss(output, target) loss.backward() if is_distributed and not use_cuda: # average gradients manually for multi-machine cpu case only _average_gradients(model) optimizer.step() if batch_idx % args.log_interval == 0: logger.info( "Train Epoch: {} [{}/{} ({:.0f}%)] Loss: {:.6f}".format( epoch, batch_idx * len(data), len(train_loader.sampler), 100.0 * batch_idx / len(train_loader), loss.item(), ) ) test(model, test_loader, device) save_model(model, args.model_dir) def test(model, test_loader, device): model.eval() test_loss = 0 correct = 0 with torch.no_grad(): for data, target in test_loader: data, target = data.to(device), target.to(device) output = model(data) test_loss += F.nll_loss(output, target, size_average=False).item() # sum up batch loss pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability correct += pred.eq(target.view_as(pred)).sum().item() test_loss /= len(test_loader.dataset) logger.info( "Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n".format( test_loss, correct, len(test_loader.dataset), 100.0 * correct / len(test_loader.dataset) ) ) def model_fn(model_dir): device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model = torch.nn.DataParallel(Net()) with open(os.path.join(model_dir, "model.pth"), "rb") as f: model.load_state_dict(torch.load(f)) return model.to(device) def save_model(model, model_dir): logger.info("Saving the model.") path = os.path.join(model_dir, "model.pth") # recommended way from http://pytorch.org/docs/master/notes/serialization.html torch.save(model.cpu().state_dict(), path) if __name__ == "__main__": parser = argparse.ArgumentParser() # Data and model checkpoints directories parser.add_argument( "--batch-size", type=int, default=64, metavar="N", help="input batch size for training (default: 64)", ) parser.add_argument( "--test-batch-size", type=int, default=1000, metavar="N", help="input batch size for testing (default: 1000)", ) parser.add_argument( "--epochs", type=int, default=10, metavar="N", help="number of epochs to train (default: 10)", ) parser.add_argument( "--lr", type=float, default=0.01, metavar="LR", help="learning rate (default: 0.01)" ) parser.add_argument( "--momentum", type=float, default=0.5, metavar="M", help="SGD momentum (default: 0.5)" ) parser.add_argument("--seed", type=int, default=1, metavar="S", help="random seed (default: 1)") parser.add_argument( "--log-interval", type=int, default=100, metavar="N", help="how many batches to wait before logging training status", ) parser.add_argument( "--backend", type=str, default=None, help="backend for distributed training (tcp, gloo on cpu and gloo, nccl on gpu)", ) # Container environment parser.add_argument("--hosts", type=list, default=json.loads(os.environ["SM_HOSTS"])) parser.add_argument("--current-host", type=str, default=os.environ["SM_CURRENT_HOST"]) parser.add_argument("--model-dir", type=str, default=os.environ["SM_MODEL_DIR"]) parser.add_argument("--data-dir", type=str, default=os.environ["SM_CHANNEL_TRAINING"]) parser.add_argument("--num-gpus", type=int, default=os.environ["SM_NUM_GPUS"]) train(parser.parse_args()) ###Markdown Run training in SageMakerThe `PyTorch` class allows us to run our training function as a training job on SageMaker infrastructure. We need to configure it with our training script, an IAM role, the number of training instances, the training instance type, and hyperparameters. In this case we are going to run our training job on 2 ```ml.c4.xlarge``` instances. But this example can be ran on one or multiple, cpu or gpu instances ([full list of available instances](https://aws.amazon.com/sagemaker/pricing/instance-types/)). The hyperparameters parameter is a dict of values that will be passed to your training script -- you can see how to access these values in the `mnist.py` script above. ###Code from sagemaker.pytorch import PyTorch estimator = PyTorch( sagemaker_session=sagemaker_session, entry_point="mnist.py", role=role, framework_version="1.6.0", py_version="py3", instance_count=1, instance_type='ml.m5.xlarge', use_spot_instances=True, max_run= 3600, max_wait=7200, hyperparameters={"epochs": 6, "backend": "gloo"}, ) ###Output _____no_output_____ ###Markdown After we've constructed our `PyTorch` object, we can fit it using the data we uploaded to S3. SageMaker makes sure our data is available in the local filesystem, so our training script can simply read the data from disk. ###Code estimator.fit({"training": "s3://sagemaker-cn-northwest-1-188642756190/sagemaker/DEMO-pytorch-mnist"}) ###Output 2021-08-04 02:49:52 Starting - Starting the training job... 2021-08-04 02:49:55 Starting - Launching requested ML instances... 2021-08-04 02:50:44 Starting - Preparing the instances for training............ 2021-08-04 02:52:37 Downloading - Downloading input data... 2021-08-04 02:53:16 Training - Training image download completed. Training in progress..bash: cannot set terminal process group (-1): Inappropriate ioctl for device bash: no job control in this shell 2021-08-04 02:53:16,146 sagemaker-training-toolkit INFO Imported framework sagemaker_pytorch_container.training 2021-08-04 02:53:16,161 sagemaker-training-toolkit INFO No GPUs detected (normal if no gpus installed) 2021-08-04 02:53:16,169 sagemaker_pytorch_container.training INFO Block until all host DNS lookups succeed. 2021-08-04 02:53:19,246 sagemaker_pytorch_container.training INFO Invoking user training script. 2021-08-04 02:53:19,503 sagemaker-training-toolkit INFO No GPUs detected (normal if no gpus installed) 2021-08-04 02:53:19,513 sagemaker-training-toolkit INFO No GPUs detected (normal if no gpus installed) 2021-08-04 02:53:19,523 sagemaker-training-toolkit INFO No GPUs detected (normal if no gpus installed) 2021-08-04 02:53:19,532 sagemaker-training-toolkit INFO Invoking user script  Training Env:  { "additional_framework_parameters": {}, "channel_input_dirs": { "training": "/opt/ml/input/data/training" }, "current_host": "algo-1", "framework_module": "sagemaker_pytorch_container.training:main", "hosts": [ "algo-1" ], "hyperparameters": { "backend": "gloo", "epochs": 6 }, "input_config_dir": "/opt/ml/input/config", "input_data_config": { "training": { "TrainingInputMode": "File", "S3DistributionType": "FullyReplicated", "RecordWrapperType": "None" } }, "input_dir": "/opt/ml/input", "is_master": true, "job_name": "pytorch-training-2021-08-04-02-49-51-901", "log_level": 20, "master_hostname": "algo-1", "model_dir": "/opt/ml/model", "module_dir": "s3://sagemaker-cn-northwest-1-188642756190/pytorch-training-2021-08-04-02-49-51-901/source/sourcedir.tar.gz", "module_name": "mnist", "network_interface_name": "eth0", "num_cpus": 4, "num_gpus": 0, "output_data_dir": "/opt/ml/output/data", "output_dir": "/opt/ml/output", "output_intermediate_dir": "/opt/ml/output/intermediate", "resource_config": { "current_host": "algo-1", "hosts": [ "algo-1" ], "network_interface_name": "eth0" }, "user_entry_point": "mnist.py" }  Environment variables:  SM_HOSTS=["algo-1"] SM_NETWORK_INTERFACE_NAME=eth0 SM_HPS={"backend":"gloo","epochs":6} SM_USER_ENTRY_POINT=mnist.py SM_FRAMEWORK_PARAMS={} SM_RESOURCE_CONFIG={"current_host":"algo-1","hosts":["algo-1"],"network_interface_name":"eth0"} SM_INPUT_DATA_CONFIG={"training":{"RecordWrapperType":"None","S3DistributionType":"FullyReplicated","TrainingInputMode":"File"}} SM_OUTPUT_DATA_DIR=/opt/ml/output/data SM_CHANNELS=["training"] SM_CURRENT_HOST=algo-1 SM_MODULE_NAME=mnist SM_LOG_LEVEL=20 SM_FRAMEWORK_MODULE=sagemaker_pytorch_container.training:main SM_INPUT_DIR=/opt/ml/input SM_INPUT_CONFIG_DIR=/opt/ml/input/config SM_OUTPUT_DIR=/opt/ml/output SM_NUM_CPUS=4 SM_NUM_GPUS=0 SM_MODEL_DIR=/opt/ml/model SM_MODULE_DIR=s3://sagemaker-cn-northwest-1-188642756190/pytorch-training-2021-08-04-02-49-51-901/source/sourcedir.tar.gz SM_TRAINING_ENV={"additional_framework_parameters":{},"channel_input_dirs":{"training":"/opt/ml/input/data/training"},"current_host":"algo-1","framework_module":"sagemaker_pytorch_container.training:main","hosts":["algo-1"],"hyperparameters":{"backend":"gloo","epochs":6},"input_config_dir":"/opt/ml/input/config","input_data_config":{"training":{"RecordWrapperType":"None","S3DistributionType":"FullyReplicated","TrainingInputMode":"File"}},"input_dir":"/opt/ml/input","is_master":true,"job_name":"pytorch-training-2021-08-04-02-49-51-901","log_level":20,"master_hostname":"algo-1","model_dir":"/opt/ml/model","module_dir":"s3://sagemaker-cn-northwest-1-188642756190/pytorch-training-2021-08-04-02-49-51-901/source/sourcedir.tar.gz","module_name":"mnist","network_interface_name":"eth0","num_cpus":4,"num_gpus":0,"output_data_dir":"/opt/ml/output/data","output_dir":"/opt/ml/output","output_intermediate_dir":"/opt/ml/output/intermediate","resource_config":{"current_host":"algo-1","hosts":["algo-1"],"network_interface_name":"eth0"},"user_entry_point":"mnist.py"} SM_USER_ARGS=["--backend","gloo","--epochs","6"] SM_OUTPUT_INTERMEDIATE_DIR=/opt/ml/output/intermediate SM_CHANNEL_TRAINING=/opt/ml/input/data/training SM_HP_BACKEND=gloo SM_HP_EPOCHS=6 PYTHONPATH=/opt/ml/code:/opt/conda/bin:/opt/conda/lib/python36.zip:/opt/conda/lib/python3.6:/opt/conda/lib/python3.6/lib-dynload:/opt/conda/lib/python3.6/site-packages  Invoking script with the following command:  /opt/conda/bin/python3.6 mnist.py --backend gloo --epochs 6  Distributed training - False Number of gpus available - 0 Get train data loader Get test data loader Processes 60000/60000 (100%) of train data Processes 10000/10000 (100%) of test data [2021-08-04 02:53:20.857 algo-1:26 INFO utils.py:27] RULE_JOB_STOP_SIGNAL_FILENAME: None [2021-08-04 02:53:21.141 algo-1:26 INFO profiler_config_parser.py:102] Unable to find config at /opt/ml/input/config/profilerconfig.json. Profiler is disabled. [2021-08-04 02:53:21.141 algo-1:26 INFO json_config.py:91] Creating hook from json_config at /opt/ml/input/config/debughookconfig.json. [2021-08-04 02:53:21.141 algo-1:26 INFO hook.py:199] tensorboard_dir has not been set for the hook. SMDebug will not be exporting tensorboard summaries. [2021-08-04 02:53:21.142 algo-1:26 INFO hook.py:253] Saving to /opt/ml/output/tensors [2021-08-04 02:53:21.142 algo-1:26 INFO state_store.py:67] The checkpoint config file /opt/ml/input/config/checkpointconfig.json does not exist. [2021-08-04 02:53:21.167 algo-1:26 INFO hook.py:550] name:module.conv1.weight count_params:250 [2021-08-04 02:53:21.167 algo-1:26 INFO hook.py:550] name:module.conv1.bias count_params:10 [2021-08-04 02:53:21.167 algo-1:26 INFO hook.py:550] name:module.conv2.weight count_params:5000 [2021-08-04 02:53:21.167 algo-1:26 INFO hook.py:550] name:module.conv2.bias count_params:20 [2021-08-04 02:53:21.167 algo-1:26 INFO hook.py:550] name:module.fc1.weight count_params:16000 [2021-08-04 02:53:21.167 algo-1:26 INFO hook.py:550] name:module.fc1.bias count_params:50 [2021-08-04 02:53:21.167 algo-1:26 INFO hook.py:550] name:module.fc2.weight count_params:500 [2021-08-04 02:53:21.168 algo-1:26 INFO hook.py:550] name:module.fc2.bias count_params:10 [2021-08-04 02:53:21.168 algo-1:26 INFO hook.py:552] Total Trainable Params: 21840 [2021-08-04 02:53:21.168 algo-1:26 INFO hook.py:413] Monitoring the collections: losses [2021-08-04 02:53:21.170 algo-1:26 INFO hook.py:476] Hook is writing from the hook with pid: 26  Train Epoch: 1 [6400/60000 (11%)] Loss: 1.935915 Train Epoch: 1 [12800/60000 (21%)] Loss: 1.211672 Train Epoch: 1 [19200/60000 (32%)] Loss: 0.880198 Train Epoch: 1 [25600/60000 (43%)] Loss: 0.596671 Train Epoch: 1 [32000/60000 (53%)] Loss: 0.537024 Train Epoch: 1 [38400/60000 (64%)] Loss: 0.696666 Train Epoch: 1 [44800/60000 (75%)] Loss: 0.492262 Train Epoch: 1 [51200/60000 (85%)] Loss: 0.595390 Train Epoch: 1 [57600/60000 (96%)] Loss: 0.437477 Test set: Average loss: 0.2071, Accuracy: 9391/10000 (94%)  Train Epoch: 2 [6400/60000 (11%)] Loss: 0.739252 Train Epoch: 2 [12800/60000 (21%)] Loss: 0.396193 Train Epoch: 2 [19200/60000 (32%)] Loss: 0.404517 Train Epoch: 2 [25600/60000 (43%)] Loss: 0.655186 Train Epoch: 2 [32000/60000 (53%)] Loss: 0.604028 Train Epoch: 2 [38400/60000 (64%)] Loss: 0.431388 Train Epoch: 2 [44800/60000 (75%)] Loss: 0.297259 Train Epoch: 2 [51200/60000 (85%)] Loss: 0.556284 Train Epoch: 2 [57600/60000 (96%)] Loss: 0.294674 Test set: Average loss: 0.1323, Accuracy: 9598/10000 (96%)  Train Epoch: 3 [6400/60000 (11%)] Loss: 0.417698 Train Epoch: 3 [12800/60000 (21%)] Loss: 0.220523 Train Epoch: 3 [19200/60000 (32%)] Loss: 0.150998 Train Epoch: 3 [25600/60000 (43%)] Loss: 0.305187 Train Epoch: 3 [32000/60000 (53%)] Loss: 0.180535 Train Epoch: 3 [38400/60000 (64%)] Loss: 0.359533 Train Epoch: 3 [44800/60000 (75%)] Loss: 0.195364 Train Epoch: 3 [51200/60000 (85%)] Loss: 0.204428 Train Epoch: 3 [57600/60000 (96%)] Loss: 0.303194 Test set: Average loss: 0.1028, Accuracy: 9683/10000 (97%)  Train Epoch: 4 [6400/60000 (11%)] Loss: 0.385129 Train Epoch: 4 [12800/60000 (21%)] Loss: 0.314084 Train Epoch: 4 [19200/60000 (32%)] Loss: 0.307006 Train Epoch: 4 [25600/60000 (43%)] Loss: 0.157440 Train Epoch: 4 [32000/60000 (53%)] Loss: 0.201386 Train Epoch: 4 [38400/60000 (64%)] Loss: 0.124630 Train Epoch: 4 [44800/60000 (75%)] Loss: 0.276066 Train Epoch: 4 [51200/60000 (85%)] Loss: 0.226980 Train Epoch: 4 [57600/60000 (96%)] Loss: 0.340483 Test set: Average loss: 0.0904, Accuracy: 9712/10000 (97%)  Train Epoch: 5 [6400/60000 (11%)] Loss: 0.175094 Train Epoch: 5 [12800/60000 (21%)] Loss: 0.276447 Train Epoch: 5 [19200/60000 (32%)] Loss: 0.300416 Train Epoch: 5 [25600/60000 (43%)] Loss: 0.089050 Train Epoch: 5 [32000/60000 (53%)] Loss: 0.224019 Train Epoch: 5 [38400/60000 (64%)] Loss: 0.080593 Train Epoch: 5 [44800/60000 (75%)] Loss: 0.133747 Train Epoch: 5 [51200/60000 (85%)] Loss: 0.381174 Train Epoch: 5 [57600/60000 (96%)] Loss: 0.106297 Test set: Average loss: 0.0761, Accuracy: 9761/10000 (98%)  Train Epoch: 6 [6400/60000 (11%)] Loss: 0.224328 Train Epoch: 6 [12800/60000 (21%)] Loss: 0.235598 Train Epoch: 6 [19200/60000 (32%)] Loss: 0.126070 Train Epoch: 6 [25600/60000 (43%)] Loss: 0.195561 Train Epoch: 6 [32000/60000 (53%)] Loss: 0.213003 Train Epoch: 6 [38400/60000 (64%)] Loss: 0.156561 Train Epoch: 6 [44800/60000 (75%)] Loss: 0.159231 Train Epoch: 6 [51200/60000 (85%)] Loss: 0.156286 Train Epoch: 6 [57600/60000 (96%)] Loss: 0.276445 Test set: Average loss: 0.0696, Accuracy: 9790/10000 (98%)  Saving the model. INFO:__main__:Train Epoch: 1 [6400/60000 (11%)] Loss: 1.935915 INFO:__main__:Train Epoch: 1 [12800/60000 (21%)] Loss: 1.211672 INFO:__main__:Train Epoch: 1 [19200/60000 (32%)] Loss: 0.880198 INFO:__main__:Train Epoch: 1 [25600/60000 (43%)] Loss: 0.596671 INFO:__main__:Train Epoch: 1 [32000/60000 (53%)] Loss: 0.537024 INFO:__main__:Train Epoch: 1 [38400/60000 (64%)] Loss: 0.696666 INFO:__main__:Train Epoch: 1 [44800/60000 (75%)] Loss: 0.492262 INFO:__main__:Train Epoch: 1 [51200/60000 (85%)] Loss: 0.595390 INFO:__main__:Train Epoch: 1 [57600/60000 (96%)] Loss: 0.437477 /opt/conda/lib/python3.6/site-packages/torch/nn/_reduction.py:44: UserWarning: size_average and reduce args will be deprecated, please use reduction='sum' instead. warnings.warn(warning.format(ret)) INFO:__main__:Test set: Average loss: 0.2071, Accuracy: 9391/10000 (94%)  INFO:__main__:Train Epoch: 2 [6400/60000 (11%)] Loss: 0.739252 INFO:__main__:Train Epoch: 2 [12800/60000 (21%)] Loss: 0.396193 INFO:__main__:Train Epoch: 2 [19200/60000 (32%)] Loss: 0.404517 INFO:__main__:Train Epoch: 2 [25600/60000 (43%)] Loss: 0.655186 INFO:__main__:Train Epoch: 2 [32000/60000 (53%)] Loss: 0.604028 INFO:__main__:Train Epoch: 2 [38400/60000 (64%)] Loss: 0.431388 INFO:__main__:Train Epoch: 2 [44800/60000 (75%)] Loss: 0.297259 INFO:__main__:Train Epoch: 2 [51200/60000 (85%)] Loss: 0.556284 INFO:__main__:Train Epoch: 2 [57600/60000 (96%)] Loss: 0.294674 INFO:__main__:Test set: Average loss: 0.1323, Accuracy: 9598/10000 (96%)  INFO:__main__:Train Epoch: 3 [6400/60000 (11%)] Loss: 0.417698 INFO:__main__:Train Epoch: 3 [12800/60000 (21%)] Loss: 0.220523 INFO:__main__:Train Epoch: 3 [19200/60000 (32%)] Loss: 0.150998 INFO:__main__:Train Epoch: 3 [25600/60000 (43%)] Loss: 0.305187 INFO:__main__:Train Epoch: 3 [32000/60000 (53%)] Loss: 0.180535 INFO:__main__:Train Epoch: 3 [38400/60000 (64%)] Loss: 0.359533 INFO:__main__:Train Epoch: 3 [44800/60000 (75%)] Loss: 0.195364 INFO:__main__:Train Epoch: 3 [51200/60000 (85%)] Loss: 0.204428 INFO:__main__:Train Epoch: 3 [57600/60000 (96%)] Loss: 0.303194 INFO:__main__:Test set: Average loss: 0.1028, Accuracy: 9683/10000 (97%)  INFO:__main__:Train Epoch: 4 [6400/60000 (11%)] Loss: 0.385129 INFO:__main__:Train Epoch: 4 [12800/60000 (21%)] Loss: 0.314084 INFO:__main__:Train Epoch: 4 [19200/60000 (32%)] Loss: 0.307006 INFO:__main__:Train Epoch: 4 [25600/60000 (43%)] Loss: 0.157440 INFO:__main__:Train Epoch: 4 [32000/60000 (53%)] Loss: 0.201386 INFO:__main__:Train Epoch: 4 [38400/60000 (64%)] Loss: 0.124630 INFO:__main__:Train Epoch: 4 [44800/60000 (75%)] Loss: 0.276066 INFO:__main__:Train Epoch: 4 [51200/60000 (85%)] Loss: 0.226980 INFO:__main__:Train Epoch: 4 [57600/60000 (96%)] Loss: 0.340483 INFO:__main__:Test set: Average loss: 0.0904, Accuracy: 9712/10000 (97%)  INFO:__main__:Train Epoch: 5 [6400/60000 (11%)] Loss: 0.175094 INFO:__main__:Train Epoch: 5 [12800/60000 (21%)] Loss: 0.276447 INFO:__main__:Train Epoch: 5 [19200/60000 (32%)] Loss: 0.300416 INFO:__main__:Train Epoch: 5 [25600/60000 (43%)] Loss: 0.089050 INFO:__main__:Train Epoch: 5 [32000/60000 (53%)] Loss: 0.224019 INFO:__main__:Train Epoch: 5 [38400/60000 (64%)] Loss: 0.080593 INFO:__main__:Train Epoch: 5 [44800/60000 (75%)] Loss: 0.133747 INFO:__main__:Train Epoch: 5 [51200/60000 (85%)] Loss: 0.381174 INFO:__main__:Train Epoch: 5 [57600/60000 (96%)] Loss: 0.106297 INFO:__main__:Test set: Average loss: 0.0761, Accuracy: 9761/10000 (98%)  INFO:__main__:Train Epoch: 6 [6400/60000 (11%)] Loss: 0.224328 INFO:__main__:Train Epoch: 6 [12800/60000 (21%)] Loss: 0.235598 INFO:__main__:Train Epoch: 6 [19200/60000 (32%)] Loss: 0.126070 INFO:__main__:Train Epoch: 6 [25600/60000 (43%)] Loss: 0.195561 INFO:__main__:Train Epoch: 6 [32000/60000 (53%)] Loss: 0.213003 INFO:__main__:Train Epoch: 6 [38400/60000 (64%)] Loss: 0.156561 INFO:__main__:Train Epoch: 6 [44800/60000 (75%)] Loss: 0.159231 INFO:__main__:Train Epoch: 6 [51200/60000 (85%)] Loss: 0.156286 INFO:__main__:Train Epoch: 6 [57600/60000 (96%)] Loss: 0.276445 INFO:__main__:Test set: Average loss: 0.0696, Accuracy: 9790/10000 (98%)  INFO:__main__:Saving the model.  2021-08-04 02:55:00,337 sagemaker-training-toolkit INFO Reporting training SUCCESS 2021-08-04 02:55:11 Uploading - Uploading generated training model 2021-08-04 02:55:11 Completed - Training job completed Training seconds: 154 Billable seconds: 43 Managed Spot Training savings: 72.1% ###Markdown Host Create endpointAfter training, we use the `PyTorch` estimator object to build and deploy a `PyTorchPredictor`. This creates a Sagemaker Endpoint -- a hosted prediction service that we can use to perform inference.As mentioned above we have implementation of `model_fn` in the `mnist.py` script that is required. We are going to use default implementations of `input_fn`, `predict_fn`, `output_fn` and `transform_fm` defined in [sagemaker-pytorch-containers](https://github.com/aws/sagemaker-pytorch-containers).The arguments to the deploy function allow us to set the number and type of instances that will be used for the Endpoint. These do not need to be the same as the values we used for the training job. For example, you can train a model on a set of GPU-based instances, and then deploy the Endpoint to a fleet of CPU-based instances, but you need to make sure that you return or save your model as a cpu model similar to what we did in `mnist.py`. Here we will deploy the model to a single ```ml.m4.xlarge``` instance. ###Code predictor = estimator.deploy(initial_instance_count=1, instance_type="ml.m5.xlarge") ###Output -------------! ###Markdown EvaluateWe can now use this predictor to classify hand-written digits. Drawing into the image box loads the pixel data into a `data` variable in this notebook, which we can then pass to the `predictor`. ###Code from IPython.display import HTML HTML(open("input.html").read()) import numpy as np image = np.array([data], dtype=np.float32) response = predictor.predict(image) prediction = response.argmax(axis=1)[0] print(prediction) ###Output 5 ###Markdown CleanupAfter you have finished with this example, remember to delete the prediction endpoint to release the instance(s) associated with it ###Code estimator.delete_endpoint() ###Output _____no_output_____
notebook/4_classification/6_CFU/sampling_test /c_undersampling.ipynb
###Markdown Balance the dataset ###Code df["IsBadBuy"].value_counts() df[df["IsBadBuy"]==0]["IsBadBuy"].value_counts() remove_n = 41411 drop_indices = np.random.choice(df[df["IsBadBuy"]==0].index, remove_n, replace=False) df_subset = df.drop(drop_indices) print(df_subset["IsBadBuy"].value_counts()) ###Output 0 6854 1 6672 Name: IsBadBuy, dtype: int64 ###Markdown Label Encoder and one hot encoder ###Code df_subset = pd.get_dummies(df_subset) df_subset = pd.get_dummies(df_subset, columns=['WheelTypeID']) df_subset.columns ###Output _____no_output_____ ###Markdown Train/Test partitioning ###Code attributes = [col for col in df_subset.columns if col != 'IsBadBuy'] x = df_subset[attributes].values y = df_subset['IsBadBuy'] x_train, x_val, y_train, y_val = train_test_split(x, y, test_size=0.3, random_state=1, stratify=y) ###Output _____no_output_____ ###Markdown Grid Search ###Code def report(results, n_top=3): configurations = {} c_i = 0 for i in range(1, n_top + 1): candidates = np.flatnonzero(results['rank_test_score'] == i) for candidate in candidates: print("Model with rank: {0}".format(i)) print("Mean validation score: {0:.3f} (std: {1:.3f})".format( results['mean_test_score'][candidate], results['std_test_score'][candidate])) print("Parameters: {0}".format(results['params'][candidate])) print("") configurations[c_i] = results['params'][candidate] c_i += 1 return configurations param_list = {'criterion': ['gini', 'entropy'], 'max_depth': [None] + list(np.arange(2, 10)), 'min_samples_split': list(np.arange(2, 40)), 'min_samples_leaf': list(np.arange(1, 40)), } clf = DecisionTreeClassifier(criterion='gini', max_depth=None, min_samples_split=2, min_samples_leaf=1) random_search = RandomizedSearchCV(clf, param_distributions=param_list, n_iter=10000, scoring='roc_auc', n_jobs = 4, verbose = 1) random_search.fit(x, y) cnfs = report(random_search.cv_results_, n_top=3) ###Output Fitting 5 folds for each of 10000 candidates, totalling 50000 fits ###Markdown Perform Clustering ###Code models = [] y_pred_vals = [] y_pred_trains = [] hyper_ps = random_search.cv_results_ for cnf in cnfs.values(): criterion = cnf['criterion'] max_depth = cnf['max_depth'] min_samples_split = cnf['min_samples_split'] min_samples_leaf = cnf['min_samples_leaf'] clf = DecisionTreeClassifier(criterion=criterion, max_depth=max_depth, min_samples_split=min_samples_split, min_samples_leaf=min_samples_leaf) clf = clf.fit(x_train, y_train) models.append(clf) y_pred = clf.predict(x_val) y_pred_tr = clf.predict(x_train) y_pred_vals.append(y_pred) y_pred_trains.append(y_pred_tr) ###Output _____no_output_____ ###Markdown Analyze the classification results ###Code for i in range(0,5): print("model {}".format(i)) print('Train Accuracy %s' % accuracy_score(y_train, y_pred_trains[i])) print('Train F1-score %s' % f1_score(y_train, y_pred_trains[i], average=None)) print() print('Test Accuracy %s' % accuracy_score(y_val, y_pred_vals[i])) print('Test F1-score %s' % f1_score(y_val, y_pred_vals[i], average=None)) print(classification_report(y_val, y_pred_vals[i])) confusion_matrix(y_val, y_pred_vals[i]) ###Output model 0 Train Accuracy 0.6158639628221377 Train F1-score [0.62739473 0.60359673] Test Accuracy 0.6153277476589453 Test F1-score [0.63035757 0.59902389] precision recall f1-score support 0 0.61 0.65 0.63 2056 1 0.62 0.58 0.60 2002 accuracy 0.62 4058 macro avg 0.62 0.61 0.61 4058 weighted avg 0.62 0.62 0.61 4058 model 1 Train Accuracy 0.6158639628221377 Train F1-score [0.62739473 0.60359673] Test Accuracy 0.6153277476589453 Test F1-score [0.63035757 0.59902389] precision recall f1-score support 0 0.61 0.65 0.63 2056 1 0.62 0.58 0.60 2002 accuracy 0.62 4058 macro avg 0.62 0.61 0.61 4058 weighted avg 0.62 0.62 0.61 4058 model 2 Train Accuracy 0.6158639628221377 Train F1-score [0.62739473 0.60359673] Test Accuracy 0.6153277476589453 Test F1-score [0.63035757 0.59902389] precision recall f1-score support 0 0.61 0.65 0.63 2056 1 0.62 0.58 0.60 2002 accuracy 0.62 4058 macro avg 0.62 0.61 0.61 4058 weighted avg 0.62 0.62 0.61 4058 model 3 Train Accuracy 0.6158639628221377 Train F1-score [0.62739473 0.60359673] Test Accuracy 0.6153277476589453 Test F1-score [0.63035757 0.59902389] precision recall f1-score support 0 0.61 0.65 0.63 2056 1 0.62 0.58 0.60 2002 accuracy 0.62 4058 macro avg 0.62 0.61 0.61 4058 weighted avg 0.62 0.62 0.61 4058 model 4 Train Accuracy 0.6158639628221377 Train F1-score [0.62739473 0.60359673] Test Accuracy 0.6153277476589453 Test F1-score [0.63035757 0.59902389] precision recall f1-score support 0 0.61 0.65 0.63 2056 1 0.62 0.58 0.60 2002 accuracy 0.62 4058 macro avg 0.62 0.61 0.61 4058 weighted avg 0.62 0.62 0.61 4058 ###Markdown Analyze the classification performance ###Code roc_auc_models = [] for i in range(0,5): fpr, tpr, _ = roc_curve(y_val, y_pred_vals[i]) roc_auc = auc(fpr, tpr) roc_auc = roc_auc_score(y_val, y_pred_vals[i], average=None) print("model {} - roc_auc: {}".format(i, roc_auc)) roc_auc_models.append(roc_auc) ###Output model 0 - roc_auc: 0.6148955616368067 model 1 - roc_auc: 0.6148955616368067 model 2 - roc_auc: 0.6148955616368067 model 3 - roc_auc: 0.6148955616368067 model 4 - roc_auc: 0.6148955616368067 ###Markdown Choose the best model Visualize the decision tree ###Code dot_data = tree.export_graphviz(models[0], out_file=None, feature_names=attributes, class_names=['BadBuy' if x == 1 else 'GoodBuy' for x in clf.classes_], filled=True, rounded=True, special_characters=True, max_depth=4) graph = pydotplus.graph_from_dot_data(dot_data) Image(graph.create_png()) ###Output _____no_output_____ ###Markdown Evaluate the performance ###Code plt.figure(figsize=(8, 5)) plt.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % (roc_auc_models[0])) plt.plot([0, 1], [0, 1], 'k--') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.tick_params(axis='both', which='major') plt.legend(loc="lower right", fontsize=14, frameon=False) plt.show() ###Output _____no_output_____ ###Markdown Features Importance ###Code for col, imp in zip(attributes, models[2].feature_importances_): print(col, imp) ###Output _____no_output_____ ###Markdown Model evaluation on test set ###Code df_test = pd.read_csv('../../data/test.csv') cl_test.cleaning(df_test) df_test.columns df_test = pd.get_dummies(df_test) df_test = pd.get_dummies(df_test, columns=['WheelTypeID']) df_test.columns attributes = [col for col in df_test.columns if col != 'IsBadBuy'] x_test = df_test[attributes].values y_test = df_test['IsBadBuy'] y_pred_test = models[0].predict(x_test) print('Test Accuracy %s' % accuracy_score(y_test, y_pred_test)) print('Test F1-score %s' % f1_score(y_test, y_pred_test, average=None)) print(classification_report(y_test, y_pred_test)) # Plot non-normalized confusion matrix titles_options = [("Confusion matrix, without normalization", None), ("Normalized confusion matrix", 'true')] for title, normalize in titles_options: disp = plot_confusion_matrix(models[0], x_test, y_test, cmap=plt.cm.Blues, normalize=normalize) disp.ax_.set_title(title) print(title) print(disp.confusion_matrix) plt.show() fpr, tpr, _ = roc_curve(y_test, y_pred_test) roc_auc = auc(fpr, tpr) roc_auc = roc_auc_score(y_test, y_pred_test, average=None) print("model {} - roc_auc: {}".format(0, roc_auc)) roc_auc_models.append(roc_auc) plt.figure(figsize=(8, 5)) plt.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % (roc_auc)) plt.plot([0, 1], [0, 1], 'k--') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.tick_params(axis='both', which='major') plt.legend(loc="lower right", fontsize=14, frameon=False) plt.show() ###Output _____no_output_____
10.The Requests-HTML Package/Section 10 - Exploring the package capabilities.ipynb
###Markdown Exploring the requests-html capabilities ###Code # Official documentation of the requests-html package: https://requests-html.readthedocs.io/en/latest/ ###Output _____no_output_____ ###Markdown Initial setup ###Code # Loading the necessary packages from requests_html import HTMLSession # establish/open a session session = HTMLSession() # submitting a GET request r = session.get("https://en.wikipedia.org/wiki/Association_football") r.status_code # The html response to the GET request is contained in the '.html' method r.html ###Output _____no_output_____ ###Markdown Links ###Code # We can extract all link addresses directly with '.links' urls = r.html.links urls # Note that those are the relative URLs # To get absolute URLs we can use '.absolute_links' instead of '.links' full_path_urls = r.html.absolute_links full_path_urls # An important thing to note is that these links (given by both methods) are returned in a SET, not a LIST type(urls) ###Output _____no_output_____ ###Markdown Searching for elements ###Code # A quick note: requests-html uses CSS selectors for searching # We will cover them in the next section, # but here is a more thorough look into it: https://www.w3schools.com/cssref/css_selectors.asp # We can search for elements similarly to Beautiful Soup using the find() method # It behaves as find_all() # find all 'a' tags links = r.html.find("a") links links[4] # To get the raw HTML of an element use the '.html' method links[4].html type(links[4].html) # To extract the text inside an element, use ".text", just like in Beautiful Soup links[4].text # To obtain a dictionary of the element's attributes, use '.attrs' (exactly as in Beautiful Soup) links[10].attrs # This package offers a couple of ways to filter tags based off text # Choose only those tags that contain the string 'wikipedia' in their text (not in the 'href' attribute) # Note: this is not case-sensitive r.html.find("a", containing = "wikipedia") # display the text of those tags [tag.text for tag in r.html.find("a", containing = "wikipedia")] # If we wish to find only the first element (similarly to Beautiful Soup .find()) we need to specify the 'first' parameter r.html.find("p", first = True) ###Output _____no_output_____
notebooks/OscoNet introduction.ipynb
###Markdown OscoNet: Inferring oscillatory gene networksThis notebook provides an introduction to the bootstrap hypothesis test described in more detaild in the [paper](https://www.biorxiv.org/content/10.1101/600049v1). ###Code %matplotlib inline from OscopeBootstrap import qvalue from OscopeBootstrap.create_edge_network_represention import create_edge_network_representation from OscopeBootstrap.SyntheticDataset import GetSimISyntheticData, true_adj_matrix from OscopeBootstrap.oscope_tf import bootstrap_hypothesis_test, get_accuracy, get_metrics_for_different_qvalue_thresholds from matplotlib import pyplot as plt import numpy as np ###Output _____no_output_____ ###Markdown First create a synthetic dataset containing one group of co-oscillating genes ###Code NG = 10 # half gene cluster size so we have a total of 10 co-oscillating genes G = 80 # number of genes N = 1000 # cells ngroups = 2 # one cluster data_df, phaseG, angularSpeed = GetSimISyntheticData(NG=NG, G=G, ngroups=ngroups, N=N, noiseLevel=0) adjacency_matrix_true = true_adj_matrix(G, angularSpeed) # True adjacency matrix ppp=data_df.iloc NG ###Output _____no_output_____ ###Markdown Show an example of each type of gene expression in the synthetic data1. Strong oscillator which has little noise1. Weak oscillator with higher noise1. White noise gene (no oscillation)For more information on how these are generate see the docstring for `GetSimISyntheticData` and the supplementary materialof the original [Oscope paper](https://www.nature.com/articles/nmeth.3549). ###Code _, ax = plt.subplots(3, sharex=True, sharey=True) ax[0].plot(np.arange(N), data_df.iloc[0, :], 'bo'); ax[0].set_title('strong oscillator') ax[1].plot(np.arange(N), data_df.iloc[NG, :], 'bo'); ax[1].set_title('weak-noisy oscillator') ax[2].plot(np.arange(N), data_df.iloc[2*NG, :], 'bo'); ax[2].set_title('white noise gene') ###Output _____no_output_____ ###Markdown Apply the hypothesis testThis can take 30seconds - 1 minute depending on your hardware. ###Code n_bootstrap = 100 # number of bootstrap samples grid_points_in_search = 10 # grid size for phase shift parameter estimation., alpha = 0.001 # significance level adjacency_matrix, qvalues, cost_matrix = bootstrap_hypothesis_test(n_bootstrap, data_df.values, alpha=alpha, grid_points_in_search=grid_points_in_search) ###Output /Users/luisacutillo/GitProjects/Elli/FullOscoNet/OscopeBootstrap/oscope_tf.py:222: RuntimeWarning: invalid value encountered in multiply psi_ng = np.zeros((G, G)) * np.inf ###Markdown Calculate diagnostics ###Code correct_ratio = get_accuracy(adjacency_matrix, adjacency_matrix_true) print(f'Ratio of correctly identified pairs {correct_ratio:.2f}') TPR, FDR, _ = get_metrics_for_different_qvalue_thresholds(qvalues, adjacency_matrix_true, np.array([alpha])) print(f'True positive rate {float(TPR):.2f}, False discovery rate {float(FDR):.2f}') ###Output Ratio of correctly identified pairs 0.99 True positive rate 1.00, False discovery rate 0.05 ###Markdown Show true and estimated adjacency matricesThe matrix shown 20 X 20 as we have G=20 genes in our example.Each entry (i, j) shows if gene i is found to be co-oscillating with gene j. ###Code _, ax = plt.subplots(1, 2, figsize=(20, 7)) ax[0].imshow(adjacency_matrix_true); ax[0].set_title('true adjacency matrix') ax[1].imshow(adjacency_matrix); ax[1].set_title('estimated adjacency matrix') ###Output _____no_output_____ ###Markdown We can see the hypothesis test correctly identifies the cluster whilst also having false positives. Calculate edge network representation needed for network analysisThis creates a dataframe with 3 columns, the gene names of all significant pairs and the cost of that interaction. The cost is defined in Equation (1) in the paper - the smaller, the stronger the interaction. This edge network can then be passed in to the network analysis code. The cost value may be interpreted as a weight in the graph and hence we pass in the reciprocal of the `cost_matrix`. The user could also pass in `qvalues` instead of the `cost_matrix` to obtain a weight based on value. ###Code gene_names = [f'gene{i}' for i in range(G)] edge_network = create_edge_network_representation(adjacency_matrix, 1/cost_matrix, gene_names) edge_network ###Output _____no_output_____
Crash Course on Python/WEEK 6/Final Project - Word Cloud/utf-8''C1M6L2_Final_Project_V3.ipynb
###Markdown Final Project - Word Cloud For this project, you'll create a "word cloud" from a text by writing a script. This script needs to process the text, remove punctuation, ignore case and words that do not contain all alphabets, count the frequencies, and ignore uninteresting or irrelevant words. A dictionary is the output of the `calculate_frequencies` function. The `wordcloud` module will then generate the image from your dictionary. For the input text of your script, you will need to provide a file that contains text only. For the text itself, you can copy and paste the contents of a website you like. Or you can use a site like [Project Gutenberg](https://www.gutenberg.org/) to find books that are available online. You could see what word clouds you can get from famous books, like a Shakespeare play or a novel by Jane Austen. Save this as a .txt file somewhere on your computer.Now you will need to upload your input file here so that your script will be able to process it. To do the upload, you will need an uploader widget. Run the following cell to perform all the installs and imports for your word cloud script and uploader widget. It may take a minute for all of this to run and there will be a lot of output messages. But, be patient. Once you get the following final line of output, the code is done executing. Then you can continue on with the rest of the instructions for this notebook.**Enabling notebook extension fileupload/extension...****- Validating: OK** ###Code # Here are all the installs and imports you will need for your word cloud script and uploader widget !pip install wordcloud !pip install fileupload !pip install ipywidgets !jupyter nbextension install --py --user fileupload !jupyter nbextension enable --py fileupload import wordcloud import numpy as np from matplotlib import pyplot as plt from IPython.display import display import fileupload import io import sys ###Output Requirement already satisfied: wordcloud in /opt/conda/lib/python3.6/site-packages (1.7.0) Requirement already satisfied: numpy>=1.6.1 in /opt/conda/lib/python3.6/site-packages (from wordcloud) (1.15.4) Requirement already satisfied: pillow in /opt/conda/lib/python3.6/site-packages (from wordcloud) (5.4.1) Requirement already satisfied: matplotlib in /opt/conda/lib/python3.6/site-packages (from wordcloud) (3.0.3) Requirement already satisfied: cycler>=0.10 in /opt/conda/lib/python3.6/site-packages (from matplotlib->wordcloud) (0.10.0) Requirement already satisfied: kiwisolver>=1.0.1 in /opt/conda/lib/python3.6/site-packages (from matplotlib->wordcloud) (1.0.1) Requirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /opt/conda/lib/python3.6/site-packages (from matplotlib->wordcloud) (2.3.1) Requirement already satisfied: python-dateutil>=2.1 in /opt/conda/lib/python3.6/site-packages (from matplotlib->wordcloud) (2.8.0) Requirement already satisfied: six in /opt/conda/lib/python3.6/site-packages (from cycler>=0.10->matplotlib->wordcloud) (1.12.0) Requirement already satisfied: setuptools in /opt/conda/lib/python3.6/site-packages (from kiwisolver>=1.0.1->matplotlib->wordcloud) (40.8.0) Requirement already satisfied: fileupload in /opt/conda/lib/python3.6/site-packages (0.1.5) Requirement already satisfied: ipywidgets>=5.1 in /opt/conda/lib/python3.6/site-packages (from fileupload) (7.4.2) Requirement already satisfied: traitlets>=4.2 in /opt/conda/lib/python3.6/site-packages (from fileupload) (4.3.2) Requirement already satisfied: notebook>=4.2 in /opt/conda/lib/python3.6/site-packages (from fileupload) (5.7.5) Requirement already satisfied: widgetsnbextension~=3.4.0 in /opt/conda/lib/python3.6/site-packages (from ipywidgets>=5.1->fileupload) (3.4.2) Requirement already satisfied: ipython>=4.0.0; python_version >= "3.3" in /opt/conda/lib/python3.6/site-packages (from ipywidgets>=5.1->fileupload) (7.4.0) Requirement already satisfied: ipykernel>=4.5.1 in /opt/conda/lib/python3.6/site-packages (from ipywidgets>=5.1->fileupload) (5.1.0) Requirement already satisfied: nbformat>=4.2.0 in /opt/conda/lib/python3.6/site-packages (from ipywidgets>=5.1->fileupload) (4.4.0) Requirement already satisfied: ipython_genutils in /opt/conda/lib/python3.6/site-packages (from traitlets>=4.2->fileupload) (0.2.0) Requirement already satisfied: six in /opt/conda/lib/python3.6/site-packages (from traitlets>=4.2->fileupload) (1.12.0) Requirement already satisfied: decorator in /opt/conda/lib/python3.6/site-packages (from traitlets>=4.2->fileupload) (4.3.2) Requirement already satisfied: prometheus-client in /opt/conda/lib/python3.6/site-packages (from notebook>=4.2->fileupload) (0.6.0) Requirement already satisfied: jupyter-client>=5.2.0 in /opt/conda/lib/python3.6/site-packages (from notebook>=4.2->fileupload) (5.2.4) Requirement already satisfied: nbconvert in /opt/conda/lib/python3.6/site-packages (from notebook>=4.2->fileupload) (5.4.1) Requirement already satisfied: jinja2 in /opt/conda/lib/python3.6/site-packages (from notebook>=4.2->fileupload) (2.10) Requirement already satisfied: pyzmq>=17 in /opt/conda/lib/python3.6/site-packages (from notebook>=4.2->fileupload) (18.0.1) Requirement already satisfied: jupyter-core>=4.4.0 in /opt/conda/lib/python3.6/site-packages (from notebook>=4.2->fileupload) (4.4.0) Requirement already satisfied: Send2Trash in /opt/conda/lib/python3.6/site-packages (from notebook>=4.2->fileupload) (1.5.0) Requirement already satisfied: terminado>=0.8.1 in /opt/conda/lib/python3.6/site-packages (from notebook>=4.2->fileupload) (0.8.1) Requirement already satisfied: tornado<7,>=4.1 in /opt/conda/lib/python3.6/site-packages (from notebook>=4.2->fileupload) (6.0.2) Requirement already satisfied: setuptools>=18.5 in /opt/conda/lib/python3.6/site-packages (from ipython>=4.0.0; python_version >= "3.3"->ipywidgets>=5.1->fileupload) (40.8.0) Requirement already satisfied: jedi>=0.10 in /opt/conda/lib/python3.6/site-packages (from ipython>=4.0.0; python_version >= "3.3"->ipywidgets>=5.1->fileupload) (0.13.3) Requirement already satisfied: pickleshare in /opt/conda/lib/python3.6/site-packages (from ipython>=4.0.0; python_version >= "3.3"->ipywidgets>=5.1->fileupload) (0.7.5) Requirement already satisfied: prompt_toolkit<2.1.0,>=2.0.0 in /opt/conda/lib/python3.6/site-packages (from ipython>=4.0.0; python_version >= "3.3"->ipywidgets>=5.1->fileupload) (2.0.9) Requirement already satisfied: pygments in /opt/conda/lib/python3.6/site-packages (from ipython>=4.0.0; python_version >= "3.3"->ipywidgets>=5.1->fileupload) (2.3.1) Requirement already satisfied: backcall in /opt/conda/lib/python3.6/site-packages (from ipython>=4.0.0; python_version >= "3.3"->ipywidgets>=5.1->fileupload) (0.1.0) Requirement already satisfied: pexpect in /opt/conda/lib/python3.6/site-packages (from ipython>=4.0.0; python_version >= "3.3"->ipywidgets>=5.1->fileupload) (4.6.0) Requirement already satisfied: jsonschema!=2.5.0,>=2.4 in /opt/conda/lib/python3.6/site-packages (from nbformat>=4.2.0->ipywidgets>=5.1->fileupload) (3.0.1) Requirement already satisfied: python-dateutil>=2.1 in /opt/conda/lib/python3.6/site-packages (from jupyter-client>=5.2.0->notebook>=4.2->fileupload) (2.8.0) Requirement already satisfied: mistune>=0.8.1 in /opt/conda/lib/python3.6/site-packages (from nbconvert->notebook>=4.2->fileupload) (0.8.4) Requirement already satisfied: entrypoints>=0.2.2 in /opt/conda/lib/python3.6/site-packages (from nbconvert->notebook>=4.2->fileupload) (0.3) Requirement already satisfied: bleach in /opt/conda/lib/python3.6/site-packages (from nbconvert->notebook>=4.2->fileupload) (3.1.0) Requirement already satisfied: pandocfilters>=1.4.1 in /opt/conda/lib/python3.6/site-packages (from nbconvert->notebook>=4.2->fileupload) (1.4.2) Requirement already satisfied: testpath in /opt/conda/lib/python3.6/site-packages (from nbconvert->notebook>=4.2->fileupload) (0.4.2) Requirement already satisfied: defusedxml in /opt/conda/lib/python3.6/site-packages (from nbconvert->notebook>=4.2->fileupload) (0.5.0) Requirement already satisfied: MarkupSafe>=0.23 in /opt/conda/lib/python3.6/site-packages (from jinja2->notebook>=4.2->fileupload) (1.1.1) Requirement already satisfied: parso>=0.3.0 in /opt/conda/lib/python3.6/site-packages (from jedi>=0.10->ipython>=4.0.0; python_version >= "3.3"->ipywidgets>=5.1->fileupload) (0.3.4) Requirement already satisfied: wcwidth in /opt/conda/lib/python3.6/site-packages (from prompt_toolkit<2.1.0,>=2.0.0->ipython>=4.0.0; python_version >= "3.3"->ipywidgets>=5.1->fileupload) (0.1.7) Requirement already satisfied: ptyprocess>=0.5 in /opt/conda/lib/python3.6/site-packages (from pexpect->ipython>=4.0.0; python_version >= "3.3"->ipywidgets>=5.1->fileupload) (0.6.0) Requirement already satisfied: attrs>=17.4.0 in /opt/conda/lib/python3.6/site-packages (from jsonschema!=2.5.0,>=2.4->nbformat>=4.2.0->ipywidgets>=5.1->fileupload) (19.1.0) Requirement already satisfied: pyrsistent>=0.14.0 in /opt/conda/lib/python3.6/site-packages (from jsonschema!=2.5.0,>=2.4->nbformat>=4.2.0->ipywidgets>=5.1->fileupload) (0.14.11) Requirement already satisfied: webencodings in /opt/conda/lib/python3.6/site-packages (from bleach->nbconvert->notebook>=4.2->fileupload) (0.5.1) Requirement already satisfied: ipywidgets in /opt/conda/lib/python3.6/site-packages (7.4.2) Requirement already satisfied: ipython>=4.0.0; python_version >= "3.3" in /opt/conda/lib/python3.6/site-packages (from ipywidgets) (7.4.0) Requirement already satisfied: traitlets>=4.3.1 in /opt/conda/lib/python3.6/site-packages (from ipywidgets) (4.3.2) Requirement already satisfied: nbformat>=4.2.0 in /opt/conda/lib/python3.6/site-packages (from ipywidgets) (4.4.0) Requirement already satisfied: widgetsnbextension~=3.4.0 in /opt/conda/lib/python3.6/site-packages (from ipywidgets) (3.4.2) Requirement already satisfied: ipykernel>=4.5.1 in /opt/conda/lib/python3.6/site-packages (from ipywidgets) (5.1.0) Requirement already satisfied: setuptools>=18.5 in /opt/conda/lib/python3.6/site-packages (from ipython>=4.0.0; python_version >= "3.3"->ipywidgets) (40.8.0) Requirement already satisfied: jedi>=0.10 in /opt/conda/lib/python3.6/site-packages (from ipython>=4.0.0; python_version >= "3.3"->ipywidgets) (0.13.3) Requirement already satisfied: decorator in /opt/conda/lib/python3.6/site-packages (from ipython>=4.0.0; python_version >= "3.3"->ipywidgets) (4.3.2) ###Markdown Whew! That was a lot. All of the installs and imports for your word cloud script and uploader widget have been completed. **IMPORTANT!** If this was your first time running the above cell containing the installs and imports, you will need save this notebook now. Then under the File menu above, select Close and Halt. When the notebook has completely shut down, reopen it. This is the only way the necessary changes will take affect.To upload your text file, run the following cell that contains all the code for a custom uploader widget. Once you run this cell, a "Browse" button should appear below it. Click this button and navigate the window to locate your saved text file. ###Code # This is the uploader widget def _upload(): _upload_widget = fileupload.FileUploadWidget() def _cb(change): global file_contents decoded = io.StringIO(change['owner'].data.decode('utf-8')) filename = change['owner'].filename print('Uploaded `{}` ({:.2f} kB)'.format( filename, len(decoded.read()) / 2 **10)) file_contents = decoded.getvalue() _upload_widget.observe(_cb, names='data') display(_upload_widget) _upload() ###Output _____no_output_____ ###Markdown Write a function in the cell below that iterates through the words in *file_contents*, removes punctuation, and counts the frequency of each word. Oh, and be sure to make it ignore word case, words that do not contain all alphabets and boring words like "and" or "the". Then use it in the `generate_from_frequencies` function to generate your very own word cloud!**Hint:** Try storing the results of your iteration in a dictionary before passing them into wordcloud via the `generate_from_frequencies` function. ###Code def calculate_frequencies(file_contents): # Here is a list of punctuations and uninteresting words you can use to process your text punctuations = '''!()-[]{};:'"\,<>./?@#$%^&*_~''' uninteresting_words = ["the", "a", "to", "if", "is", "it", "of", "and", "or", "an", "as", "i", "me", "my", \ "we", "our", "ours", "you", "your", "yours", "he", "she", "him", "his", "her", "hers", "its", "they", "them", \ "their", "what", "which", "who", "whom", "this", "that", "am", "are", "was", "were", "be", "been", "being", \ "have", "has", "had", "do", "does", "did", "but", "at", "by", "with", "from", "here", "when", "where", "how", \ "all", "any", "both", "each", "few", "more", "some", "such", "no", "nor", "too", "very", "can", "will", "just"] # LEARNER CODE START HERE file_contents2 = "" for index, char in enumerate(file_contents): if char.isalpha() == True or char.isspace(): file_contents2 += char file_contents2 = file_contents2.split() file_without_uninteresting_words = [] for word in file_contents2: if word.lower() not in uninteresting_words and word.isalpha() == True: file_without_uninteresting_words.append(word) frequencies = {} for word in file_without_uninteresting_words: if word.lower() not in frequencies: frequencies[word.lower()] = 1 else: frequencies[word.lower()] += 1 #wordcloud cloud = wordcloud.WordCloud() cloud.generate_from_frequencies(frequencies) return cloud.to_array() ###Output _____no_output_____ ###Markdown If you have done everything correctly, your word cloud image should appear after running the cell below. Fingers crossed! ###Code # Display your wordcloud image myimage = calculate_frequencies(file_contents) plt.imshow(myimage, interpolation = 'nearest') plt.axis('off') plt.show() ###Output _____no_output_____
notebooks/CaptumBERTSequenceClassification.ipynb
###Markdown Interpretation of BertForSequenceClassification in captum ###Code !pip install transformers !pip install captum from transformers import BertTokenizer, BertForSequenceClassification, BertConfig from captum.attr import visualization as viz from captum.attr import IntegratedGradients, LayerConductance, LayerIntegratedGradients from captum.attr import configure_interpretable_embedding_layer, remove_interpretable_embedding_layer import torch device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # load model model = BertForSequenceClassification.from_pretrained("checkpoints/bert_disc_model", return_dict=False) model.to(device) model.eval() model.zero_grad() # load tokenizer tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') def predict(inputs): return model(inputs)[0] ref_token_id = tokenizer.pad_token_id # A token used for generating token reference sep_token_id = tokenizer.sep_token_id # A token used as a separator between question and text and it is also added to the end of the text. cls_token_id = tokenizer.cls_token_id # A token used for prepending to the concatenated question-text word sequence def construct_input_ref_pair(text, ref_token_id, sep_token_id, cls_token_id): text_ids = tokenizer.encode(text, add_special_tokens=False) # construct input token ids input_ids = [cls_token_id] + text_ids + [sep_token_id] # construct reference token ids ref_input_ids = [cls_token_id] + [ref_token_id] * len(text_ids) + [sep_token_id] return torch.tensor([input_ids], device=device), torch.tensor([ref_input_ids], device=device), len(text_ids) def construct_input_ref_token_type_pair(input_ids, sep_ind=0): seq_len = input_ids.size(1) token_type_ids = torch.tensor([[0 if i <= sep_ind else 1 for i in range(seq_len)]], device=device) ref_token_type_ids = torch.zeros_like(token_type_ids, device=device)# * -1 return token_type_ids, ref_token_type_ids def construct_input_ref_pos_id_pair(input_ids): seq_length = input_ids.size(1) position_ids = torch.arange(seq_length, dtype=torch.long, device=device) # we could potentially also use random permutation with `torch.randperm(seq_length, device=device)` ref_position_ids = torch.zeros(seq_length, dtype=torch.long, device=device) position_ids = position_ids.unsqueeze(0).expand_as(input_ids) ref_position_ids = ref_position_ids.unsqueeze(0).expand_as(input_ids) return position_ids, ref_position_ids def construct_attention_mask(input_ids): return torch.ones_like(input_ids) def custom_forward(inputs): preds = predict(inputs) return torch.softmax(preds, dim = 1)[0][0].unsqueeze(-1) lig = LayerIntegratedGradients(custom_forward, model.bert.embeddings) text = "These tests do not work as expected." input_ids, ref_input_ids, sep_id = construct_input_ref_pair(text, ref_token_id, sep_token_id, cls_token_id) token_type_ids, ref_token_type_ids = construct_input_ref_token_type_pair(input_ids, sep_id) position_ids, ref_position_ids = construct_input_ref_pos_id_pair(input_ids) attention_mask = construct_attention_mask(input_ids) indices = input_ids[0].detach().tolist() all_tokens = tokenizer.convert_ids_to_tokens(indices) model(input_ids) predict(input_ids) custom_forward(input_ids) attributions, delta = lig.attribute(inputs=input_ids, baselines=ref_input_ids, return_convergence_delta=True) score = predict(input_ids) print('Question: ', text) print('Predicted Answer: ' + str(torch.argmax(score[0]).cpu().numpy()) + ', prob ungrammatical: ' + str(torch.softmax(score, dim = 1)[0][0].cpu().detach().numpy())) def summarize_attributions(attributions): attributions = attributions.sum(dim=-1).squeeze(0) attributions = attributions / torch.norm(attributions) return attributions attributions_sum = summarize_attributions(attributions) # storing couple samples in an array for visualization purposes score_vis = viz.VisualizationDataRecord( attributions_sum, torch.softmax(score, dim = 1)[0][0], torch.argmax(torch.softmax(score, dim = 1)[0]), 0, text, attributions_sum.sum(), all_tokens, delta) print('\033[1m', 'Visualization For Score', '\033[0m') viz.visualize_text([score_vis]) attributions_sum ###Output _____no_output_____
sample_windows3.ipynb
###Markdown Chapter 8 - 포인터 1.1 메모리 주서와 주소연산자 & 주소 개념**메모리 공간은 바이트마다 고유한 주소(address)가 있다.** 마치 아파트 각 세대마다 고유 번호가 있는 것과 같다. 아파트의 호수로 집을 찾듯이 주소를 이용하여 메모리의 위치를 파악할 수 있다. 메모리 주소는 0부터 바이트마다 1씩 증가한다. **메모리 주소는 저장 장소인 변수 이름과 함께 기억 장소를 참조하는 또 다른 방법이다.** 이 주소값을 이용하면 보다 편리하고 융통성 있는 프로그램을 만들 수 있다. 그러나 메모리 주소를 잘못 다루면 시스템에 심각한 문제를 일으킬 수 있다. 또한 메모리 주소를 처음 학습하는 사람에겐 좀 어려울 수 있다. ###Code co1 = CompileOutputOnly('exer8_1') cio1 = CompileInputOuput('exer8_9') saq1 = ShortAnswerQuestion('(1) 메모리 공간은 바이트마다 고유한 ____(이)가 있다.', ['주소', '주소값', 'address', 'Address'], ' 주소를 말한다.', ' 주소를 이용하여 메모리의 위치를 파악할 수 있다.') cq1 = ChoiceQuestion("""(2) 배열 선언 double a[] = {2, 4, 5, 7, 8, 9}; 에서 *a와 *(a+2)의 참조값은 각각 무엇인가? """, ['4, 7', '2, 5', '5, 8', '2, 4'], 1, ' 인덱스는 0부터 시작한다.', ' *a는 2, *(a+2)는 5이다.') cq2 = ChoiceQuestion("""다음은 여러 포인터와 선언에 대한 설명이다. 다음 중에서 잘못 설명하고 있는 것은 무엇인가?""", ['double형 포인터 선언: double *pd;', 'int형 포인터 원소 4개인 배열 선언: int *p[4];', '일차원 배열 int a[3]의 배열 포인터 선언: int *p;', '이차원 배열 int b[3][4]의 배열 포인터 선언: int *p[3][4];'], 3, ' 이차원 배열 포인터는 *를 두개 붙여 선언한다.', ' int **p로 선언한다.') rate = AchieveRate() add_link_buttons(1, 'sample_windows2.ipynb') ###Output _____no_output_____
Stock Market Analysis of Big Techs.ipynb
###Markdown Stock Market Analysis for Tech StocksIn this project, we'll analyse data from the stock market for some technology stocks. Again, we'll use Pandas to extract and analyse the information, visualise it, and look at different ways to analyse the risk of a stock, based on its performance history. Here are the questions we'll try to answer:- What was the change in a stock's price over time?- What was the daily return average of a stock?- What was the moving average of various stocks?- What was the correlation between daily returns of different stocks?- How much value do we put at risk by investing in a particular stock?- How can we attempt to predict future stock behaviour? ###Code #Python Data Analysis imports import pandas as pd from pandas import Series,DataFrame import numpy as np #Visualisation imports import matplotlib.pyplot as plt import seaborn as sns sns.set_style('whitegrid') %matplotlib inline #To grab stock data from pandas_datareader import DataReader from datetime import datetime #To handle floats in Python 2 #from __future__ import division ###Output _____no_output_____ ###Markdown We're going to analyse some tech stocks, and it seems like a good idea to look at their performance over the last year. We can create a list with the stock names, for future looping. ###Code #We're going to analyse stock info for Apple, Google, Microsoft, and Amazon tech_list = ['AAPL','GOOG','MSFT','AMZN','YHOO'] #Setting the end date to today end = datetime.now() #Start date set to 1 year back start = datetime(end.year-1,end.month,end.day) #Using Yahoo Finance to grab the stock data for stock in tech_list: globals()[stock] = DataReader('GOOGL','yahoo',start,end) #The globals method sets the stock name to a global variable ###Output _____no_output_____ ###Markdown Thanks to the globals method, Apple's stock data will be stored in the AAPL global variable dataframe. Let's see if that worked. ###Code AAPL.head() #Basic stats for Apple's Stock AAPL.describe() ###Output _____no_output_____ ###Markdown And that easily, we can make out what the stock's minimum, maximum, and average price was for the last year. ###Code #Some basic info about the dataframe AAPL.info() ###Output <class 'pandas.core.frame.DataFrame'> DatetimeIndex: 253 entries, 2020-06-29 to 2021-06-29 Data columns (total 6 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 High 253 non-null float64 1 Low 253 non-null float64 2 Open 253 non-null float64 3 Close 253 non-null float64 4 Volume 253 non-null int64 5 Adj Close 253 non-null float64 dtypes: float64(5), int64(1) memory usage: 13.8 KB ###Markdown No missing info in the dataframe above, so we can go about our business. What's the change in stock's price over time? ###Code #Plotting the stock's adjusted closing price using pandas AAPL['Adj Close'].plot(legend=True,figsize=(12,5)) ###Output _____no_output_____ ###Markdown Similarily, we can plot change in a stock's volume being traded, over time. ###Code #Plotting the total volume being traded over time AAPL['Volume'].plot(legend=True,figsize=(12,5)) ###Output _____no_output_____ ###Markdown What was the moving average of various stocks? Let's check out the moving average for stocks over a 10, 20 and 50 day period of time. We'll add that information to the stock's dataframe. ###Code ma_day = [10,20,50] for ma in ma_day: column_name = "MA for %s days" %(str(ma)) AAPL[column_name] = AAPL['Adj Close'].rolling(window=ma,center=False).mean() AAPL.tail() AAPL[['Adj Close','MA for 10 days','MA for 20 days','MA for 50 days']].plot(subplots=False,figsize=(12,5)) ###Output _____no_output_____ ###Markdown Moving averages for more days have a smoother plot, as they're less reliable on daily fluctuations. So even though, Apple's stock has a slight dip near the start of September, it's generally been on an upward trend since mid-July. What was the daily return average of a stock? ###Code #The daily return column can be created by using the percentage change over the adjusted closing price AAPL['Daily Return'] = AAPL['Adj Close'].pct_change() AAPL['Daily Return'].tail() #Plotting the daily return AAPL['Daily Return'].plot(figsize=(14,5),legend=True,linestyle='--',marker='o') sns.distplot(AAPL['Daily Return'].dropna(),bins=100,color='red') ###Output _____no_output_____ ###Markdown Positive daily returns seem to be slightly more frequent than negative returns for Apple. What was the correlation between daily returns of different stocks? ###Code #Reading just the 'Adj Close' column this time close_df = DataReader(tech_list,'yahoo',start,end)['Adj Close'] close_df.tail() ###Output _____no_output_____ ###Markdown Everything works as expected. Just as we did earlier, we can use Pandas' pct_change method to get the daily returns of our stocks. ###Code rets_df = close_df.pct_change() rets_df.tail() ###Output _____no_output_____ ###Markdown Let's try creating a scatterplot to visualise any correlations between different stocks. First we'll visualise a scatterplot for the relationship between the daily return of a stock to itself. ###Code sns.jointplot('GOOG','GOOG',rets_df,kind='scatter',color='green') ###Output _____no_output_____ ###Markdown As expected, the relationship is perfectly linear because we're trying to correlate something with itself. Now, let's check out the relationship between Google and Apple's daily returns. ###Code sns.jointplot('GOOG','AAPL',rets_df,kind='scatter') ###Output _____no_output_____ ###Markdown There seems to be a minor correlation between the two stocks, looking at the figure above. The Pearson R Correlation Coefficient value of 0.45 echoes that sentiment.But what about other combinations of stocks? Quick and dirty overarching visualisation of the scatterplots and histograms of daily returns of our stocks. To see the actual numbers for the correlation coefficients, we can use seaborn's corrplot method. Google and Microsoft seem to have the highest correlation. But another interesting thing to note is that all tech companies that we explored are positively correlated. How much value do we put at risk by investing in a particular stock? A basic way to quantify risk is to compare the expected return (which can be the mean of the stock's daily returns) with the standard deviation of the daily returns. ###Code rets = rets_df.dropna() ###Output _____no_output_____ ###Markdown We'd want a stock to have a high expected return and a low risk; Google and Microsoft seem to be the safe options for that. Meanwhile, Yahoo and Amazon stocks have higher expected returns, but also have a higher risk Value at RiskWe can treat _Value at risk_ as the amount of money we could expect to lose for a given confidence interval. We'll use the 'Bootstrap' method and the 'Monte Carlo Method' to extract this value.__Bootstrap Method__Using this method, we calculate the empirical quantiles from a histogram of daily returns. The quantiles help us define our confidence interval. ###Code sns.distplot(AAPL['Daily Return'].dropna(),bins=100,color='purple') ###Output _____no_output_____ ###Markdown To recap, our histogram for Apple's stock looked like the above. And our daily returns dataframe looked like: ###Code rets.head() #Using Pandas built in qualtile method rets['AAPL'].quantile(0.05) ###Output _____no_output_____ ###Markdown The 0.05 empirical quantile of daily returns is at -0.019. This means that with 95% confidence, the worst daily loss will not exceed 2.57% (of the investment). How can we attempt to predict future stock behaviour?__Monte Carlo Method__Check out this [link](http://www.investopedia.com/articles/07/montecarlo.asp) for more info on the Monte Carlo method. In short: in this method, we run simulations to predict the future many times, and aggregate the results in the end for some quantifiable value. ###Code days = 365 #delta t dt = 1/365 mu = rets.mean()['GOOG'] sigma = rets.std()['GOOG'] #Function takes in stock price, number of days to run, mean and standard deviation values def stock_monte_carlo(start_price,days,mu,sigma): price = np.zeros(days) price[0] = start_price shock = np.zeros(days) drift = np.zeros(days) for x in xrange(1,days): #Shock and drift formulas taken from the Monte Carlo formula shock[x] = np.random.normal(loc=mu*dt,scale=sigma*np.sqrt(dt)) drift[x] = mu * dt #New price = Old price + Old price*(shock+drift) price[x] = price[x-1] + (price[x-1] * (drift[x]+shock[x])) return price ###Output _____no_output_____ ###Markdown We're going to run the simulation of Google stocks. Let's check out the opening value of the stock. ###Code GOOG.head() ###Output _____no_output_____ ###Markdown Let's do a simulation of 100 runs, and plot them. ###Code start_price = 622.049 #Taken from above for run in xrange(100): plt.plot(stock_monte_carlo(start_price,days,mu,sigma)) plt.xlabel('Days') plt.ylabel('Price') plt.title('Monte Carlo Analysis for Google') runs = 10000 simulations = np.zeros(runs) for run in xrange(runs): simulations[run] = stock_monte_carlo(start_price,days,mu,sigma)[days-1] q = np.percentile(simulations,1) plt.hist(simulations,bins=200) plt.figtext(0.6,0.8,s="Start price: $%.2f" %start_price) plt.figtext(0.6,0.7,"Mean final price: $%.2f" % simulations.mean()) plt.figtext(0.6,0.6,"VaR(0.99): $%.2f" % (start_price -q,)) plt.figtext(0.15,0.6, "q(0.99): $%.2f" % q) plt.axvline(x=q, linewidth=4, color='r') plt.title(u"Final price distribution for Google Stock after %s days" %days, weight='bold') ###Output _____no_output_____
modules/module_20/vowpal-wabbit.ipynb
###Markdown Source: https://analyticsindiamag.com/guide-to-vowpal-wabbit-a-state-of-the-art-library-for-interactive-machine-learning/ ###Code import numpy as np import pandas as pd import sklearn from vowpalwabbit import pyvw training_data =[{'action': 1, 'cost': 2, 'prob': 0.3, 'f1': 'a', 'f2': 'c', 'f3': ''}, {'action': 3, 'cost': 1, 'prob': 0.2, 'f1': 'b', 'f2': 'd', 'f3': ''}, {'action': 4, 'cost': 0, 'prob': 0.6, 'f1': 'a', 'f2': 'b', 'f3': ''}, {'action': 2, 'cost': 1, 'prob': 0.4, 'f1': 'a', 'f2': 'b', 'f3': 'c'}, {'action': 3, 'cost': 2, 'prob': 0.7, 'f1': 'a', 'f2': 'd', 'f3': ''}] training_df = pd.DataFrame(training_data) # create a column named ‘index’ training_df['index'] = range(1, len(training_df) + 1) # set the newly created column as the index column training_df = training_df.set_index("index") testing_data = [{'f1': 'b', 'f2': 'c', 'f3': ''}, {'f1': 'a', 'f2': '', 'f3': 'b'}, {'f1': 'b', 'f2': 'b', 'f3': ''}, {'f1': 'a', 'f2': '', 'f3': 'b'}] testing_df = pd.DataFrame(testing_data) # Add index to data frame testing_df['index'] = range(1, len(testing_df) + 1) testing_df = testing_df.set_index("index") testing_data = [{'f1': 'b', 'f2': 'c', 'f3': ''}, {'f1': 'a', 'f2': '', 'f3': 'b'}, {'f1': 'b', 'f2': 'b', 'f3': ''}, {'f1': 'a', 'f2': '', 'f3': 'b'}] testing_df = pd.DataFrame(testing_data) # Add index to data frame testing_df['index'] = range(1, len(testing_df) + 1) testing_df = testing_df.set_index("index") vw = pyvw.vw("--cb 4") #Extract action, its cost, probability and features of each training sample for i in training_df.index: action = training_df.loc[i, "action"] cost = training_df.loc[i, "cost"] probability = training_df.loc[i, "prob"] feature1 = training_df.loc[i, "f1"] feature2 = training_df.loc[i, "f2"] feature3 = training_df.loc[i, "f3"] # Construct the ith example in the required vw format. learn_ex = (str(action) + ":" + str(cost) + ":" + str(probability) + " | " + str(feature1) + " " + str(feature2) + " " + str(feature3)) #Perform actual learning by calling learn() on the ith example vw.learn(learn_ex) ###Output 1.145833 0.000000 8 8.0 3:0:0.6 3:0... 12
src/04_word2vec/05_w2v_results_socdiv.ipynb
###Markdown Visualizing Word2Vec Models Trained on Biomedical Abstracts in PubMed A Comparison of Race and Diversity Over Time Brandon Kramer - University of Virginia's Biocomplexity Institute This notebook explores two Word2Vec models trained the PubMed database taken from January 2021. Overall, I am interested in testing whether diversity and racial terms are becoming more closely related over time. To do this, I [trained](https://github.com/brandonleekramer/diversity/blob/master/src/04_word_embeddings/03_train_word2vec.ipynb) two models (one from 1990-2000 data and then a random sample of the 2010-2020 data). Now, I will visualize the results of these models to see which words are similar to race/diversity as well as plotting some comparisons of these two terms over time.For those unfamiliar with Word2Vec, it might be worth reading [this post from Connor Gilroy](https://ccgilroy.github.io/community-discourse/introduction.html) - a sociologist that details how word embeddings can help us better understand the concept of "community." The post contains information on how Word2Vec and other word embedding approaches can teach us about word/document similarity, opposite words, and historical changes in words. Basically, Word2Vec turns all of the words in the corpus into a number based on how they are used in the context of 5-word windows (a parameter I defined in this model), making all of the words directly compariable to one another within a vector space. The end result is that we are able to compare how similar or different words are or, as we will see below, how similar or different words become over time. As we will come to see, this approach is useful but not perfect for dealing with our case due to the polysemy of 'diversity.' Import packages and ingest data Let's load all of our packages and the `.bin` files that hold our models. ###Code # load packages import os from itertools import product import pandas.io.sql as psql import pandas as pd from pandas import DataFrame from gensim.models import Word2Vec from gensim.models import KeyedVectors import numpy as np from sklearn.manifold import TSNE import matplotlib.pyplot as plt import matplotlib.cm as cm from matplotlib.patches import Rectangle import seaborn as sns import warnings warnings.filterwarnings("ignore") # load data os.chdir("/sfs/qumulo/qhome/kb7hp/git/diversity/data/word_embeddings/") earlier_model = Word2Vec.load("word2vec_1990_2000_socdiv_0821.bin") later_model_original = Word2Vec.load("word2vec_2010_2020_socdiv_0821.bin") ###Output _____no_output_____ ###Markdown Normalizing Our Results ###Code # http://www-personal.umich.edu/~tdszyman/misc/InsightSIGNLP16.pdf # https://github.com/williamleif/histwords # https://gist.github.com/zhicongchen/9e23d5c3f1e5b1293b16133485cd17d8 <<<<<< # https://github.com/nikhgarg/EmbeddingDynamicStereotypes/blob/master/dataset_utilities/normalize_vectors.py def intersection_align_gensim(m1, m2, words=None): """ Intersect two gensim word2vec models, m1 and m2. Only the shared vocabulary between them is kept. If 'words' is set (as list or set), then the vocabulary is intersected with this list as well. Indices are re-organized from 0..N in order of descending frequency (=sum of counts from both m1 and m2). These indices correspond to the new syn0 and syn0norm objects in both gensim models: -- so that Row 0 of m1.syn0 will be for the same word as Row 0 of m2.syn0 -- you can find the index of any word on the .index2word list: model.index2word.index(word) => 2 The .vocab dictionary is also updated for each model, preserving the count but updating the index. """ # Get the vocab for each model vocab_m1 = set(m1.wv.index_to_key) vocab_m2 = set(m2.wv.index_to_key) # Find the common vocabulary common_vocab = vocab_m1 & vocab_m2 if words: common_vocab &= set(words) # If no alignment necessary because vocab is identical... if not vocab_m1 - common_vocab and not vocab_m2 - common_vocab: return (m1,m2) # Otherwise sort by frequency (summed for both) common_vocab = list(common_vocab) common_vocab.sort(key=lambda w: m1.wv.get_vecattr(w, "count") + m2.wv.get_vecattr(w, "count"), reverse=True) # print(len(common_vocab)) # Then for each model... for m in [m1, m2]: # Replace old syn0norm array with new one (with common vocab) indices = [m.wv.key_to_index[w] for w in common_vocab] old_arr = m.wv.vectors new_arr = np.array([old_arr[index] for index in indices]) m.wv.vectors = new_arr # Replace old vocab dictionary with new one (with common vocab) # and old index2word with new one new_key_to_index = {} new_index_to_key = [] for new_index, key in enumerate(common_vocab): new_key_to_index[key] = new_index new_index_to_key.append(key) m.wv.key_to_index = new_key_to_index m.wv.index_to_key = new_index_to_key print(len(m.wv.key_to_index), len(m.wv.vectors)) return (m1,m2) def smart_procrustes_align_gensim(base_embed, other_embed, words=None): """ Original script: https://gist.github.com/quadrismegistus/09a93e219a6ffc4f216fb85235535faf Procrustes align two gensim word2vec models (to allow for comparison between same word across models). Code ported from HistWords <https://github.com/williamleif/histwords> by William Hamilton <[email protected]>. First, intersect the vocabularies (see `intersection_align_gensim` documentation). Then do the alignment on the other_embed model. Replace the other_embed model's syn0 and syn0norm numpy matrices with the aligned version. Return other_embed. If `words` is set, intersect the two models' vocabulary with the vocabulary in words (see `intersection_align_gensim` documentation). """ # patch by Richard So [https://twitter.com/richardjeanso) (thanks!) to update this code for new version of gensim # base_embed.init_sims(replace=True) # other_embed.init_sims(replace=True) # make sure vocabulary and indices are aligned in_base_embed, in_other_embed = intersection_align_gensim(base_embed, other_embed, words=words) # get the (normalized) embedding matrices base_vecs = in_base_embed.wv.get_normed_vectors() other_vecs = in_other_embed.wv.get_normed_vectors() # just a matrix dot product with numpy m = other_vecs.T.dot(base_vecs) # SVD method from numpy u, _, v = np.linalg.svd(m) # another matrix operation ortho = u.dot(v) # Replace original array with modified one, i.e. multiplying the embedding matrix by "ortho" other_embed.wv.vectors = (other_embed.wv.vectors).dot(ortho) return other_embed later_model = smart_procrustes_align_gensim(earlier_model, later_model_original) ###Output 78100 78100 78100 78100 ###Markdown Analyzing Most Similar Words **What words are most similar to "racial," "ethnicity", and "diversity"?** As we can see below, "racial" and "ethnicity" is mostly similar to other racialized and/or gendered terms in both the 1990-2000 and 2010-20 periods. "Diversity", on the other hand, is most similar to heterogeneity, divergence and complexity in 1990-2000 and then richness, divergence and diversification in 2010-2020. Overall, this tells us a different version of the same story we saw when analyzing Hypothesis 1: "diversity" rarely refers to social diversity along racial or classed lines. Diversity is mostly used as a biological term. Even here, richness, along with evenness, are measure within Simpson's Index for measuring ecological biodiversity (e.g. [Stirling et al. 2001](https://www.journals.uchicago.edu/doi/abs/10.1086/321317?casa_token=Fb4sojZm9XgAAAAA:BV-t4e5f3SZ05gTJZRUydcQvHTYg47f1qRu51CixgF-b_HnGVXuPQFaqf_Lp88Tvy51Gnp7iw4yG)). ###Code # average of earlier model earlier_race = earlier_model.wv.most_similar(positive=['race', 'racial', 'racially'], topn=50) earlier_race = pd.DataFrame(earlier_race).rename(columns={0: "term", 1: "score"}) earlier_race['year'] = '1990-2000' earlier_race.reset_index(inplace=True) earlier_race = earlier_race.rename(columns = {'index':'rank'}) # average of later model later_race = later_model.wv.most_similar(positive=['race', 'racial', 'racially'], topn=50) later_race = pd.DataFrame(later_race).rename(columns={0: "term", 1: "score"}) later_race['year'] = '2010-2020' later_race.reset_index(inplace=True) later_race = later_race.rename(columns = {'index':'rank'}) # merge the tables for comparison top_race_vectors = pd.merge(earlier_race, later_race, on=["rank"]) top_race_vectors # average of earlier model earlier_ethnicity = earlier_model.wv.most_similar(positive=['ethnic', 'ethnicity', 'ethnically'], topn=50) earlier_ethnicity = pd.DataFrame(earlier_ethnicity).rename(columns={0: "term", 1: "score"}) earlier_ethnicity['year'] = '1990-2000' earlier_ethnicity.reset_index(inplace=True) earlier_ethnicity = earlier_ethnicity.rename(columns = {'index':'rank'}) # average of later model later_ethnicity = later_model.wv.most_similar(positive=['ethnic', 'ethnicity', 'ethnically'], topn=50) later_ethnicity = pd.DataFrame(later_ethnicity).rename(columns={0: "term", 1: "score"}) later_ethnicity['year'] = '2010-2020' later_ethnicity.reset_index(inplace=True) later_ethnicity = later_ethnicity.rename(columns = {'index':'rank'}) # merge the tables for comparison top_ethnicity_vectors = pd.merge(earlier_ethnicity, later_ethnicity, on=["rank"]) top_ethnicity_vectors # average of earlier model earlier_diversity = earlier_model.wv.most_similar(positive=['socialdiverse', 'socialdiversity'], topn=50) earlier_diversity = pd.DataFrame(earlier_diversity).rename(columns={0: "term", 1: "score"}) earlier_diversity['year'] = '1990-2000' earlier_diversity.reset_index(inplace=True) earlier_diversity = earlier_diversity.rename(columns = {'index':'rank'}) # average of later model later_diversity = later_model.wv.most_similar(positive=['socialdiverse', 'socialdiversity'], topn=50) later_diversity = pd.DataFrame(later_diversity).rename(columns={0: "term", 1: "score"}) later_diversity['year'] = '2010-2020' later_diversity.reset_index(inplace=True) later_diversity = later_diversity.rename(columns = {'index':'rank'}) # merge the tables for comparison top_diversity_vectors = pd.merge(earlier_diversity, later_diversity, on=["rank"]) top_diversity_vectors ###Output _____no_output_____ ###Markdown Comparing Race and Diversity That makes it a little difficult to directly compare the terms, so let's use the `wv.similarity()` function to directly look at that. This basically allows you to directly compare the two words to see how close they are in the vector space. To make this process a little more efficient, we are going to make our own function named `w2v_similarities_over_time()` and then compare all the relevant terms. Following [Garg et al. (2018)](https://www.pnas.org/content/115/16/E3635.short), we also decided to average some of the terms in our dictionaries since it gets a little cumbersome trying to interpret the multiple outcomes of very similiary terms like diversity/diverse, race/racial, ethnic/ethnicity, etc. ###Code def w2v_similarities_over_time(df, w2v_m1, w2v_m2): ''' function compares several word2vec vectors from two different years and then examines how those several comparisons change over time ---------------------------------------------------------------- 1) first it takes a dictionary of words and creates its product 2) compares all of those words within the vector space of w2v_m1 3) compares all of those words within the vector space of w2v_m2 4) examines changes in the comparisons of w2v_m1 and w2v_m2 over time ''' df = list(product(df['term'], df['term'])) df = pd.DataFrame(df, columns=['term1','term2']) cos_sim_m1 = [] for index, row in df.iterrows(): cos_sim_m1.append(w2v_m1.wv.similarity(row[0],row[1])) cos_sim_m1 = DataFrame(cos_sim_m1, columns=['cos_sim_m1']) df = df.merge(cos_sim_m1, left_index=True, right_index=True) cos_sim_m2 = [] for index, row in df.iterrows(): cos_sim_m2.append(w2v_m2.wv.similarity(row[0],row[1])) cos_sim_m2 = DataFrame(cos_sim_m2, columns=['cos_sim_m2']) df = df.merge(cos_sim_m2, left_index=True, right_index=True) df["cos_sim_diffs"] = df["cos_sim_m1"] - df["cos_sim_m2"] df_matrix = df.pivot("term1", "term2", "cos_sim_diffs") return df_matrix ###Output _____no_output_____ ###Markdown Let's pull in our dictionaries but filter to only the race and diversity entries: ###Code race_diversity_early = earlier_model.wv.similarity('race','socialdiversity') race_diversity_later = later_model.wv.similarity('race','socialdiversity') racial_diversity_early = earlier_model.wv.similarity('racial','socialdiversity') racial_diversity_later = later_model.wv.similarity('racial','socialdiversity') ethnic_diversity_early = earlier_model.wv.similarity('ethnic','socialdiversity') ethnic_diversity_later = later_model.wv.similarity('ethnic','socialdiversity') ethnicity_diversity_early = earlier_model.wv.similarity('ethnicity','socialdiversity') ethnicity_diversity_later = later_model.wv.similarity('ethnicity','socialdiversity') black_div_early = earlier_model.wv.similarity('black','socialdiversity') black_div_later = later_model.wv.similarity('black','socialdiversity') afam_div_early = earlier_model.wv.similarity('africanamerican','socialdiversity') afam_div_later = later_model.wv.similarity('africanamerican','socialdiversity') white_div_early = earlier_model.wv.similarity('white','socialdiversity') white_div_later = later_model.wv.similarity('white','socialdiversity') caucasian_div_early = earlier_model.wv.similarity('caucasian','socialdiversity') caucasian_div_later = later_model.wv.similarity('caucasian','socialdiversity') hisp_div_early = earlier_model.wv.similarity('hispanic','socialdiversity') hisp_div_later = later_model.wv.similarity('hispanic','socialdiversity') asian_div_early = earlier_model.wv.similarity('asian','socialdiversity') asian_div_later = later_model.wv.similarity('asian','socialdiversity') latino_div_early = earlier_model.wv.similarity('latino','socialdiversity') latino_div_later = later_model.wv.similarity('latino','socialdiversity') native_div_early = earlier_model.wv.similarity('native','socialdiversity') native_div_later = later_model.wv.similarity('native','socialdiversity') print('Overall Comparisons of Racial and Diversity Terms:') print('Race and socialdiversity: 1990-2000 score:', race_diversity_early, ' 2010-2020 score:', race_diversity_later, ' Difference is:', race_diversity_later-race_diversity_early ) print('Racial and socialdiversity: 1990-2000 score:', racial_diversity_early, ' 2010-2020 score:', racial_diversity_later, ' Difference is:', racial_diversity_later-racial_diversity_early) print('Ethnic and socialdiversity: 1990-2000 score:', ethnic_diversity_early, ' 2010-2020 score:', ethnic_diversity_later, ' Difference is:', ethnic_diversity_later-ethnic_diversity_early) print('Ethnicity and socialdiversity: 1990-2000 score:', ethnicity_diversity_early, ' 2010-2020 score:', ethnicity_diversity_later, ' Difference is:', ethnicity_diversity_later-ethnicity_diversity_early) print('Black and socialdiversity: 1990-1995 score:', black_div_early, ' 2015-2020 score:', black_div_later, ' Difference is:', black_div_later-black_div_early) print('African American and socialdiversity: 1990-1995 score:', afam_div_early, ' 2015-2020 score:', afam_div_later, ' Difference is:', afam_div_later-afam_div_early) print('White and socialdiversity: 1990-1995 score:', white_div_early, ' 2015-2020 score:', white_div_later, ' Difference is:', white_div_later-white_div_early) print('Caucasian and socialdiversity: 1990-1995 score:', caucasian_div_early, ' 2015-2020 score:', caucasian_div_later, ' Difference is:', caucasian_div_later-caucasian_div_early) print('Hispanic and socialdiversity: 1990-1995 score:', hisp_div_early, ' 2015-2020 score:', hisp_div_later, ' Difference is:', hisp_div_later-hisp_div_early) print('Latino and socialdiversity: 1990-1995 score:', latino_div_early, ' 2015-2020 score:', latino_div_later, ' Difference is:', latino_div_later-latino_div_early) print('Asian and socialdiversity: 1990-1995 score:', asian_div_early, ' 2015-2020 score:', asian_div_later, ' Difference is:', asian_div_later-asian_div_early) print('Native and socialdiversity: 1990-1995 score:', native_div_early, ' 2015-2020 score:', native_div_later, ' Difference is:', native_div_early-native_div_later) ###Output Overall Comparisons of Racial and Diversity Terms: Race and socialdiversity: 1990-2000 score: 0.1456064 2010-2020 score: 0.09730333 Difference is: -0.048303068 Racial and socialdiversity: 1990-2000 score: 0.3118446 2010-2020 score: 0.20273337 Difference is: -0.10911122 Ethnic and socialdiversity: 1990-2000 score: 0.3323195 2010-2020 score: 0.23735352 Difference is: -0.09496598 Ethnicity and socialdiversity: 1990-2000 score: 0.17747341 2010-2020 score: 0.17145237 Difference is: -0.0060210377 Black and socialdiversity: 1990-1995 score: 0.14626867 2015-2020 score: 0.10616236 Difference is: -0.040106304 African American and socialdiversity: 1990-1995 score: 0.19208874 2015-2020 score: 0.099406496 Difference is: -0.09268224 White and socialdiversity: 1990-1995 score: 0.10028377 2015-2020 score: 0.08158496 Difference is: -0.018698812 Caucasian and socialdiversity: 1990-1995 score: 0.31263688 2015-2020 score: 0.09052864 Difference is: -0.22210824 Hispanic and socialdiversity: 1990-1995 score: 0.24034366 2015-2020 score: 0.110881895 Difference is: -0.12946177 Latino and socialdiversity: 1990-1995 score: 0.22484964 2015-2020 score: 0.16707414 Difference is: -0.057775497 Asian and socialdiversity: 1990-1995 score: 0.36033434 2015-2020 score: 0.1751638 Difference is: -0.18517053 Native and socialdiversity: 1990-1995 score: 0.075064 2015-2020 score: 0.092080116 Difference is: -0.017016113 ###Markdown To interpret these scores, we have to know that a value of 1 means that two words have a perfect relationship, 0 means the two words have no relationship, and -1 means that they are perfect opposites ([Stack Overflow 2017](https://stackoverflow.com/questions/42381902/interpreting-negative-word2vec-similarity-from-gensim), [Google Groups 2019](https://groups.google.com/g/gensim/c/SZ1yct-7CuU)). Thus, when we compare all of the race, racial, ethnic and ethnicity vectors to diverse and diversity, we actually see that they are becoming *less* similar over time. Thus, despite our earlier hypotheses indicating that diversity is rising while racial terms decline, it does not seem to be the case that the two are being used in similar ways over time. It is worth noting that a number of things could complicate this interpretation, including the polysemy of diversity. Next, we will create a plot for this. We have to keep this grey scale, because sociologists are still living in the late 1900s. Before moving on to plots of these vectors, let's take a look at specific racial terms and see how they compare to diversity. ###Code plt.figure(figsize=(6, 4)) sns.set_style("white") d = {'group': [ 'Asian', 'Latino', 'Hispanic', 'Caucasian', 'White', 'Black', 'African American', 'Ethnicity', 'Ethnic', 'Racial', 'Race' ], '1990-2000': [ asian_div_early, latino_div_early, hisp_div_early, caucasian_div_early, white_div_early, black_div_early, afam_div_early, ethnicity_diversity_early, ethnic_diversity_early, racial_diversity_early, race_diversity_early ], '2010-2020': [ asian_div_later, latino_div_later, hisp_div_later, caucasian_div_later, white_div_later, black_div_later, afam_div_later, ethnicity_diversity_later, ethnic_diversity_later, racial_diversity_later, race_diversity_later ]} df = pd.DataFrame(data=d) ordered_df = df my_range=range(1,len(df.index)+1) plt.hlines(y=my_range, xmin=ordered_df['1990-2000'], xmax=ordered_df['2010-2020'], color='lightgrey', alpha=0.4) plt.scatter(ordered_df['1990-2000'], my_range, color='red', alpha=1, label='1990-2000') plt.scatter(ordered_df['2010-2020'], my_range, color='skyblue', alpha=0.8 , label='2010-2020') #plt.scatter(ordered_df['1990-2000'], my_range, color='black', alpha=1, label='1990-2000') #plt.scatter(ordered_df['2010-2020'], my_range, color='dimgrey', alpha=0.4 , label='2010-2020') plt.legend() # Add title and axis names plt.yticks(my_range, ordered_df['group']) plt.title("Figure 4. Comparison of Racial/Ethnic Word Vectors Relative \n to the Social Diversity Vector for 1990-2000 and 2010-2020 Word2Vec Models", loc='center') plt.xlabel('Cosine Similarity Scores') #plt.ylabel('All Terms Compared to Diversity') top_race_vectors.to_csv('/sfs/qumulo/qhome/kb7hp/git/diversity/data/final_data/race_vectors_socdiv_0921.csv') top_ethnicity_vectors.to_csv('/sfs/qumulo/qhome/kb7hp/git/diversity/data/final_data/ethnicity_vectors_socdiv_0921.csv') top_diversity_vectors.to_csv('/sfs/qumulo/qhome/kb7hp/git/diversity/data/final_data/diversity_vectors_socdiv_0921.csv') ordered_df.to_csv('/sfs/qumulo/qhome/kb7hp/git/diversity/data/final_data/select_wv_comps_socdiv_0921.csv') ###Output _____no_output_____ ###Markdown Overall, this approach gave us some mixed results. Asian and diversity/diverse become significantly more dissimilar while white and diversity/diverse become more similar. Once could argue that this supports Berrey's argument about diversity being used to reinforce whiteness, but it also might just be diverse/diversity being more to describe variation in neuroscience where a common term is 'white matter.' In the end, it might just be the case that the Word2Vec model's inability to deal with polysemy does not help us answer our research question. Before concluding that, let's look at visual plots of our vectors. Singular Value Decomposition In order to do that, we have to reduce the 512 dimension model into just 2 dimensions using the `TSNE` package. We will do this for both models, which will take around 30 minutes to run. Scroll down to see the results... ###Code %%capture earlier_vocab = list(earlier_model.wv.vocab) earlier_x = earlier_model[earlier_vocab] earlier_tsne = TSNE(n_components=2) earlier_tsne_x = earlier_tsne.fit_transform(earlier_x) df_earlier = pd.DataFrame(earlier_tsne_x, index=earlier_vocab, columns=['x', 'y']) later_vocab = list(later_model.wv.vocab) later_x = later_model[later_vocab] later_tsne = TSNE(n_components=2) later_tsne_x = later_tsne.fit_transform(later_x) df_later = pd.DataFrame(later_tsne_x, index=later_vocab, columns=['x', 'y']) keys = ['race', 'racial', 'ethnic', 'ethnicity', 'diverse', 'diversity'] earlier_embedding_clusters = [] earlier_word_clusters = [] for word in keys: earlier_embeddings = [] earlier_words = [] for similar_word, _ in earlier_model.wv.most_similar(word, topn=30): earlier_words.append(similar_word) earlier_embeddings.append(earlier_model[similar_word]) earlier_embedding_clusters.append(earlier_embeddings) earlier_word_clusters.append(earlier_words) earlier_embedding_clusters = np.array(earlier_embedding_clusters) n, m, k = earlier_embedding_clusters.shape e_tsne_model_en_2d = TSNE(perplexity=15, n_components=2, init='pca', n_iter=3500, random_state=32) e_embeddings_en_2d = np.array(e_tsne_model_en_2d.fit_transform(earlier_embedding_clusters.reshape(n * m, k))).reshape(n, m, 2) later_embedding_clusters = [] later_word_clusters = [] for word in keys: later_embeddings = [] later_words = [] for similar_word, _ in later_model.wv.most_similar(word, topn=30): later_words.append(similar_word) later_embeddings.append(later_model[similar_word]) later_embedding_clusters.append(later_embeddings) later_word_clusters.append(later_words) later_embedding_clusters = np.array(later_embedding_clusters) n, m, k = later_embedding_clusters.shape l_tsne_model_en_2d = TSNE(perplexity=15, n_components=2, init='pca', n_iter=3500, random_state=32) l_embeddings_en_2d = np.array(l_tsne_model_en_2d.fit_transform(later_embedding_clusters.reshape(n * m, k))).reshape(n, m, 2) ###Output _____no_output_____ ###Markdown Plotting the Results of the Word2Vec Models (1990-95 vs 2015-20) ###Code def tsne_plot_similar_words(title, labels, earlier_embedding_clusters, earlier_word_clusters, a, filename=None): plt.figure(figsize=(16, 9)) colors = cm.rainbow(np.linspace(0, 1, len(labels))) for label, earlier_embeddings, earlier_words, color in zip(labels, earlier_embedding_clusters, earlier_word_clusters, colors): x = earlier_embeddings[:, 0] y = earlier_embeddings[:, 1] plt.scatter(x, y, c=color, alpha=a, label=label) for i, word in enumerate(earlier_words): plt.annotate(word, alpha=0.5, xy=(x[i], y[i]), xytext=(5, 2), textcoords='offset points', ha='right', va='bottom', size=10) plt.legend(loc=4) plt.title(title) plt.grid(True) if filename: plt.savefig(filename, format='png', dpi=150, bbox_inches='tight') plt.show() early_plot = tsne_plot_similar_words('Comparing the Use of Race, Ethnicity and Diversity (Word2Vec Model of 1990-2000 PubMed Data)', keys, e_embeddings_en_2d, earlier_word_clusters, 0.7, 'earlier_comparison.png') early_plot ###Output _____no_output_____ ###Markdown Now we can look at how the words in each vector of race, racial, ethnic, ethnicity, diversity and diverse. When we start to look at the specific terms of interest, we find racial and ethnic are the far left or toward the bottom-center. Other variants of these terms are more centered in the plot. On the other hand, diversity and diverse are both clustered toward the top-right, which means that race and diversity are fairly far away in the vector space. ###Code def tsne_plot_similar_words(title, labels, later_embedding_clusters, later_word_clusters, a, filename=None): plt.figure(figsize=(16, 9)) colors = cm.rainbow(np.linspace(0, 1, len(labels))) for label, later_embeddings, later_words, color in zip(labels, later_embedding_clusters, later_word_clusters, colors): x = later_embeddings[:, 0] y = later_embeddings[:, 1] plt.scatter(x, y, c=color, alpha=a, label=label) for i, word in enumerate(later_words): plt.annotate(word, alpha=0.5, xy=(x[i], y[i]), xytext=(5, 2), textcoords='offset points', ha='right', va='bottom', size=10) plt.legend(loc=4) plt.title(title) plt.grid(True) if filename: plt.savefig(filename, format='png', dpi=150, bbox_inches='tight') plt.show() later_plot = tsne_plot_similar_words('Comparing the Use of Race, Ethnicity and Diversity (Word2Vec Model of 2010-2020 PubMed Data)', keys, l_embeddings_en_2d, later_word_clusters, 0.7, 'later_comparison.png') os.chdir("/sfs/qumulo/qhome/kb7hp/git/diversity/data/word_embeddings/") plt.savefig('later_comparison.png') later_plot ###Output _____no_output_____ ###Markdown When we look at the same vectors in the 2015-20 model, it seems like the vectors are more closely related overall. However, when we look closer we see that the 'race' and 'ethnicity' are up in the top-left corner while 'racial' and 'ethnic' are in the top-right corner. Both sets are still fairly separated from the red and orange diversity vectors. Although these plots do not show this as clear as one might want, our analyses above do suggest that diverse and diversity as well as race, racial, ethnic, and ethinicity are being used more dissimilarly over time. The challenging thing about this analysis disentangling the polysemy from how diversity is used. If we were able to 'disentange' the use of diveristy in its more general sense compared to its usage in the context of equity, inclusion and justice discussions, would we find that the two words are becoming more similar over time? Does diversity replace race/ethnicity?: Contextualizing Word Vectors with Heat MapsAfter consulting some colleagues, we thought about two potential ways to test this. The first would be to turn to BERT or ELMo ([Fonteyn 2019](https://laurenthelinguist.files.wordpress.com/2019/08/sle_2019_bert.pdf); [Rakhmanberdieva 2019](https://towardsdatascience.com/word-representation-in-natural-language-processing-part-iii-2e69346007f)), which would allow us to identify the contexual variations in how diversity is used. The problem is that BERT, for example, is trained on Wikipedia data that is not historical. There are BERT options like PubMedBERT and BioBERT, but they are trained on the entirety of the PubMed abstracts, which fails to help us identify historical variations in how the terms change. Moreover, it would not make much sense to fine tune a BERT model on the same data in which it was already trained on. Thus, we ruled out BERT as an option. Instead, we decided to continue using Word2Vec and instead compare the diveristy vector to a myriad of other vectors that we measured in H1. Our logic was that if we see diversity become more semantically similar to other diversity-related vectors over time time, while also moving further away from the racial/ethic vectors, we could infer that diversity is actually replacing race/ethnicity in biomedical abstracts over time. To do this, I developed a function named `w2v_similarities_over_time()` that calculates the difference between all the words witin a dictionary of terms and then compares how they have changed relative to one another over time. Specifically, I will be comparing how diverse and diversity change relative to the terms in our race/ethnicity, sex/gender, sexuality, social class, and cultural/equity categories from [Hypotheses 1 and 2](https://growthofdiversity.netlify.app/methods/). Then, I will visualize the results of these models comparisons using some heat maps.First step is importing our H1 library so we can pluck out all of the vectors for a heat map in a relatively automated manner. ###Code # load dictionary of words os.chdir("/sfs/qumulo/qhome/kb7hp/git/diversity/data/dictionaries/") h1_dictionary = pd.read_csv("diversity_project - tree_data.csv") h1_dictionary = h1_dictionary[h1_dictionary['viz_embeddings'] == 1].drop(['hypothesis', 'viz_embeddings', 'mean_embeddings'], axis=1) h1_dictionary = h1_dictionary.replace({'category': {'asian|black|hispanic_latinx|white': 'race_ethnicity'}}, regex=True) h1_dictionary = h1_dictionary.replace({'category': {'sex_gender|sexuality': 'gender_sexuality'}}, regex=True) h1_dictionary = h1_dictionary.replace({'category': {'cultural|equity': 'cultural_equity'}}, regex=True) h1_dictionary = h1_dictionary.replace({'category': {'minority': 'social_class'}}, regex=True).sort_values(by=['category', 'term']) #recode for social diversity h1_dictionary = h1_dictionary.replace({'term': {'diverse': 'socialdiverse'}}, regex=True).sort_values(by=['category', 'term']) h1_dictionary = h1_dictionary.replace({'term': {'diversely': 'socialdiversely'}}, regex=True).sort_values(by=['category', 'term']) h1_dictionary = h1_dictionary.replace({'term': {'diversified': 'socialdiversified'}}, regex=True).sort_values(by=['category', 'term']) h1_dictionary = h1_dictionary.replace({'term': {'diversification': 'socialdiversification'}}, regex=True).sort_values(by=['category', 'term']) h1_dictionary = h1_dictionary.replace({'term': {'diversifying': 'socialdiversifying'}}, regex=True).sort_values(by=['category', 'term']) h1_dictionary = h1_dictionary.replace({'term': {'diversity': 'socialdiversity'}}, regex=True).sort_values(by=['category', 'term']) # manual deletion after chatting with catherine h1_dictionary = h1_dictionary[~h1_dictionary['term'].isin(['socialdiverse', 'negro', 'ethnic', 'racist', 'racial', 'homosexual', 'men', 'women', 'inequality', 'equality'])] h1_dictionary #h1_dictionary[h1_dictionary['category'].isin(['diversity'])] ###Output _____no_output_____ ###Markdown Next, we will define our `w2v_similarities_over_time()` function. ###Code def w2v_similarities_over_time(df, w2v_m1, w2v_m2): ''' function compares several word2vec vectors from two different years and then examines how those several comparisons change over time ---------------------------------------------------------------- 1) first it takes a dictionary of words and creates its product 2) compares all of those words within the vector space of w2v_m1 3) compares all of those words within the vector space of w2v_m2 4) examines changes in the comparisons of w2v_m1 and w2v_m2 over time ''' df = list(product(df['term'], df['term'])) df = pd.DataFrame(df, columns=['term1','term2']) cos_sim_m1 = [] for index, row in df.iterrows(): cos_sim_m1.append(w2v_m1.wv.similarity(row[0],row[1])) cos_sim_m1 = DataFrame(cos_sim_m1, columns=['cos_sim_m1']) df = df.merge(cos_sim_m1, left_index=True, right_index=True) cos_sim_m2 = [] for index, row in df.iterrows(): cos_sim_m2.append(w2v_m2.wv.similarity(row[0],row[1])) cos_sim_m2 = DataFrame(cos_sim_m2, columns=['cos_sim_m2']) df = df.merge(cos_sim_m2, left_index=True, right_index=True) df["cos_sim_diffs"] = df["cos_sim_m2"] - df["cos_sim_m1"] df_matrix = df.pivot("term1", "term2", "cos_sim_diffs") return df_matrix ###Output _____no_output_____ ###Markdown And check to make sure we get the same results as above... ###Code race_ethnicity = h1_dictionary[(h1_dictionary['category'] == 'race_ethnicity') | (h1_dictionary['category'] == 'diversity')] race_ethnicity_matrix = w2v_similarities_over_time(race_ethnicity, earlier_model, later_model) race_ethnicity_matrix ###Output _____no_output_____ ###Markdown These do look similar to the basic plot we created above.So now we will create each of our four heat maps and combine them into a joint figure for publication (again in grey scale for the sociologists)... ###Code sns.set(rc={'figure.figsize':(8,5)}) sns.set_style("whitegrid") cultural_equity = h1_dictionary[(h1_dictionary['category'] == 'cultural_equity') | (h1_dictionary['category'] == 'diversity')] cultural_equity = cultural_equity[~cultural_equity['term'].isin(['interlinguistic', 'oppressive', 'religion', 'religiosity'])] cultural_equity_matrix = w2v_similarities_over_time(cultural_equity, earlier_model, later_model) cmap = sns.diverging_palette(20, 230, as_cmap=True) #cmap = sns.cubehelix_palette(200, hue=0.05, rot=0, light=0, dark=0.9) #corr_cultural = cultural_equity_matrix.corr() #mask_cultural = np.triu(np.ones_like(corr_cultural, dtype=bool)) cultural_equity_heatmap = sns.heatmap(cultural_equity_matrix, center=0, #mask=mask_cultural, cmap=cmap) race_ethnicity = h1_dictionary[(h1_dictionary['category'] == 'race_ethnicity') | (h1_dictionary['category'] == 'diversity')] race_ethnicity_matrix = w2v_similarities_over_time(race_ethnicity, earlier_model, later_model) #corr_race = race_ethnicity_matrix.corr() #mask_race = np.triu(np.ones_like(corr_race, dtype=bool)) race_ethnicity_heatmap = sns.heatmap(race_ethnicity_matrix, #mask=mask_race, center=0, cmap=cmap).set_title("Figure 4B. Racial and Ethnic Vectors") gender_sexuality = h1_dictionary[(h1_dictionary['category'] == 'gender_sexuality') | (h1_dictionary['category'] == 'diversity')] gender_sexuality_matrix = w2v_similarities_over_time(gender_sexuality, earlier_model, later_model) #corr_gender = gender_sexuality_matrix.corr() #mask_gender = np.triu(np.ones_like(corr_gender, dtype=bool)) gender_sexuality_heatmap = sns.heatmap(gender_sexuality_matrix, center=0, #mask=mask_gender, cmap=cmap).set_title("Figure 4C. Gender and Sexuality Vectors") social_class = h1_dictionary[(h1_dictionary['category'] == 'social_class') | (h1_dictionary['category'] == 'diversity')] social_class_matrix = w2v_similarities_over_time(social_class, earlier_model, later_model) #corr_class = social_class_matrix.corr() #mask_class = np.triu(np.ones_like(corr_class, dtype=bool)) social_class_heatmap = sns.heatmap(social_class_matrix, center=0, #mask=mask_class, cmap=cmap).set_title("Figure 4D. Socio-Economic Vectors") sns.set(rc={'figure.figsize':(14,9)}) sns.set_style("whitegrid") fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, constrained_layout=False) # race/ethnicity race_ethnicity_g = sns.heatmap(race_ethnicity_matrix, center=0, cmap=cmap, ax=ax1) race_ethnicity_labels = race_ethnicity['term'].sort_values().tolist() N = len(race_ethnicity_labels) race_label = 'socialdiversity' race_index = race_ethnicity_labels.index(race_label) x, y, w, h = 0, race_index, N, 1 for _ in range(2): race_ethnicity_g.add_patch(Rectangle((x, y), w, h, fill=False, edgecolor='black', lw=2, clip_on=False)) x, y, w, h = y, x, h, w race_ethnicity_g.set_title("Figure 5A. Racial and Ethnic Vectors") race_ethnicity_g.set(xlabel=None) race_ethnicity_g.set(ylabel=None) race_ethnicity_g.set_xticklabels(race_ethnicity_g.get_xticklabels(), rotation=40, horizontalalignment='right') ## cultural cultural_equity_g = sns.heatmap(cultural_equity_matrix, center=0, cmap=cmap, ax=ax2) cultural_labels = cultural_equity['term'].sort_values().tolist() N = len(cultural_labels) cultural_label = 'socialdiversity' cultural_index = cultural_labels.index(cultural_label) x, y, w, h = 0, cultural_index, N, 1 for _ in range(2): cultural_equity_g.add_patch(Rectangle((x, y), w, h, fill=False, edgecolor='black', lw=2, clip_on=False)) x, y, w, h = y, x, h, w cultural_equity_g.set_title("Figure 5B. Cultural and Equity/Justice Vectors") cultural_equity_g.set(xlabel=None) cultural_equity_g.set(ylabel=None) cultural_equity_g.set_xticklabels(cultural_equity_g.get_xticklabels(), rotation=40, horizontalalignment='right') # socio-economic social_class_g = sns.heatmap(social_class_matrix, center=0, cmap=cmap, ax=ax3) ses_labels = social_class['term'].sort_values().tolist() N = len(ses_labels) ses_label = 'socialdiversity' ses_index = ses_labels.index(ses_label) x, y, w, h = 0, ses_index, N, 1 for _ in range(2): social_class_g.add_patch(Rectangle((x, y), w, h, fill=False, edgecolor='black', lw=2, clip_on=False)) x, y, w, h = y, x, h, w social_class_g.set_title("Figure 5C. Socio-Economic Vectors") social_class_g.set(xlabel=None) social_class_g.set(ylabel=None) social_class_g.set_xticklabels(social_class_g.get_xticklabels(), rotation=40, horizontalalignment='right') # sex/gender gender_sexuality_g = sns.heatmap(gender_sexuality_matrix, center=0, cmap=cmap, ax=ax4) gender_labels = gender_sexuality['term'].sort_values().tolist() N = len(gender_labels) gender_label = 'socialdiversity' gender_index = gender_labels.index(gender_label) x, y, w, h = 0, gender_index, N, 1 for _ in range(2): gender_sexuality_g.add_patch(Rectangle((x, y), w, h, fill=False, edgecolor='black', lw=2, clip_on=False)) x, y, w, h = y, x, h, w gender_sexuality_g.set_title("Figure 5D. Gender and Sexuality Vectors") gender_sexuality_g.set(xlabel=None) gender_sexuality_g.set(ylabel=None) gender_sexuality_g.set_xticklabels(gender_sexuality_g.get_xticklabels(), rotation=40, horizontalalignment='right') plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.5, hspace=0.5) ###Output _____no_output_____ ###Markdown The final product provides some interesting results. It sure looks like diversity is generally more similar to most of the vectors apart from the race/ethnicity vectors, which could suggest that diversity is replacing race/ethnicity within in the context of articles that are examining other historically underrepresented populations in biomedical research. ###Code # load dictionary of words os.chdir("/sfs/qumulo/qhome/kb7hp/git/diversity/data/dictionaries/") national_terms = pd.read_csv("diversity_project - national_embeddings.csv") national_terms = national_terms.replace({'term': {'diversity': 'socialdiversity'}}, regex=True) national_terms.head() sns.set(rc={'figure.figsize':(8,5)}) sns.set_style("whitegrid") national_asian = national_terms[(national_terms['category'] == 'asian') | (national_terms['category'] == 'diversity')] national_asian_matrix = w2v_similarities_over_time(national_asian, earlier_model, later_model) cmap = sns.diverging_palette(20, 230, as_cmap=True) national_asian_heatmap = sns.heatmap(national_asian_matrix, center=0, cmap=cmap) sns.set(rc={'figure.figsize':(8,5)}) sns.set_style("whitegrid") national_europe = national_terms[(national_terms['category'] == 'europe') | (national_terms['category'] == 'diversity')] national_europe_matrix = w2v_similarities_over_time(national_europe, earlier_model, later_model) cmap = sns.diverging_palette(20, 230, as_cmap=True) national_europe_heatmap = sns.heatmap(national_europe_matrix, center=0, cmap=cmap) sns.set(rc={'figure.figsize':(8,5)}) sns.set_style("whitegrid") national_americas = national_terms[(national_terms['category'] == 'americas') | (national_terms['category'] == 'diversity')] national_americas_matrix = w2v_similarities_over_time(national_americas, earlier_model, later_model) cmap = sns.diverging_palette(20, 230, as_cmap=True) national_americas_heatmap = sns.heatmap(national_americas_matrix, center=0, cmap=cmap) sns.set(rc={'figure.figsize':(8,5)}) sns.set_style("whitegrid") national_africa = national_terms[(national_terms['category'] == 'africa') | (national_terms['category'] == 'diversity')] national_africa_matrix = w2v_similarities_over_time(national_africa, earlier_model, later_model) cmap = sns.diverging_palette(20, 230, as_cmap=True) national_africa_heatmap = sns.heatmap(national_africa_matrix, center=0, cmap=cmap) sns.set(rc={'figure.figsize':(14,9)}) sns.set_style("whitegrid") fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, constrained_layout=False) ## cultural national_asian_g = sns.heatmap(national_asian_matrix, center=0, cmap=cmap, ax=ax1) national_asian_labels = national_asian['term'].sort_values().tolist() N = len(national_asian_labels) national_asian_label = 'socialdiversity' national_asian_index = national_asian_labels.index(national_asian_label) x, y, w, h = 0, national_asian_index, N, 1 for _ in range(2): national_asian_g.add_patch(Rectangle((x, y), w, h, fill=False, edgecolor='black', lw=2, clip_on=False)) x, y, w, h = y, x, h, w national_asian_g.set_title("Figure 6A. Top-10 Asian Vectors") national_asian_g.set(xlabel=None) national_asian_g.set(ylabel=None) national_asian_g.set_xticklabels(national_asian_g.get_xticklabels(), rotation=40, horizontalalignment='right') # race/ethnicity national_europe_g = sns.heatmap(national_europe_matrix, center=0, cmap=cmap, ax=ax2) national_europe_labels = national_europe['term'].sort_values().tolist() N = len(national_europe_labels) national_europe_label = 'socialdiversity' national_europe_index = national_europe_labels.index(national_europe_label) x, y, w, h = 0, national_europe_index, N, 1 for _ in range(2): national_europe_g.add_patch(Rectangle((x, y), w, h, fill=False, edgecolor='black', lw=2, clip_on=False)) x, y, w, h = y, x, h, w national_europe_g.set_title("Figure 6B. Top-10 European Vectors") national_europe_g.set(xlabel=None) national_europe_g.set(ylabel=None) national_europe_g.set_xticklabels(national_europe_g.get_xticklabels(), rotation=40, horizontalalignment='right') # sex/gender national_americas_g = sns.heatmap(national_americas_matrix, center=0, cmap=cmap, ax=ax3) national_americas_labels = national_americas['term'].sort_values().tolist() N = len(national_americas_labels) national_americas_label = 'socialdiversity' national_americas_index = national_americas_labels.index(national_americas_label) x, y, w, h = 0, national_americas_index, N, 1 for _ in range(2): national_americas_g.add_patch(Rectangle((x, y), w, h, fill=False, edgecolor='black', lw=2, clip_on=False)) x, y, w, h = y, x, h, w national_americas_g.set_title("Figure 6C. Top-10 American Vectors") national_americas_g.set(xlabel=None) national_americas_g.set(ylabel=None) national_americas_g.set_xticklabels(national_americas_g.get_xticklabels(), rotation=40, horizontalalignment='right') # socio-economic national_africa_g = sns.heatmap(national_africa_matrix, center=0, cmap=cmap, ax=ax4) national_africa_labels = national_africa['term'].sort_values().tolist() N = len(national_africa_labels) national_africa_label = 'socialdiversity' national_africa_index = national_africa_labels.index(national_africa_label) x, y, w, h = 0, national_africa_index, N, 1 for _ in range(2): national_africa_g.add_patch(Rectangle((x, y), w, h, fill=False, edgecolor='black', lw=2, clip_on=False)) x, y, w, h = y, x, h, w national_africa_g.set_title("Figure 6D. Top-10 African Vectors") national_africa_g.set(xlabel=None) national_africa_g.set(ylabel=None) national_africa_g.set_xticklabels(national_africa_g.get_xticklabels(), rotation=40, horizontalalignment='right') plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.5, hspace=0.5) ###Output _____no_output_____ ###Markdown Mean Differences Let's define a function that compares the mean differences for all terms within each category so that we can make better sense of the visualizations. ###Code def w2v_sim_mean_comps(df, w2v_m1, w2v_m2): df = list(product(df['term'], df['term'])) df = pd.DataFrame(df, columns=['term1','term2']) cos_sim_m1 = [] for index, row in df.iterrows(): cos_sim_m1.append(w2v_m1.wv.similarity(row[0],row[1])) cos_sim_m1 = DataFrame(cos_sim_m1, columns=['cos_sim_m1']) df = df.merge(cos_sim_m1, left_index=True, right_index=True) cos_sim_m2 = [] for index, row in df.iterrows(): cos_sim_m2.append(w2v_m2.wv.similarity(row[0],row[1])) cos_sim_m2 = DataFrame(cos_sim_m2, columns=['cos_sim_m2']) df = df.merge(cos_sim_m2, left_index=True, right_index=True) df["cos_sim_diffs"] = df["cos_sim_m2"] - df["cos_sim_m1"] return df os.chdir("/sfs/qumulo/qhome/kb7hp/git/diversity/data/dictionaries/") h1_allterms = pd.read_csv("diversity_project - tree_data.csv") h1_allterms = h1_allterms[h1_allterms['term'].isin(list(earlier_model.wv.key_to_index))] h1_allterms['term'] = h1_allterms['term'].str.replace('_', '', regex=True) h1_allterms = h1_allterms[~h1_allterms['term'].isin(['intersexual'])] h1_allterms = h1_allterms.replace({'term': {'diverse': 'socialdiverse'}}, regex=True).sort_values(by=['category', 'term']) h1_allterms = h1_allterms.replace({'term': {'diversely': 'socialdiversely'}}, regex=True).sort_values(by=['category', 'term']) h1_allterms = h1_allterms.replace({'term': {'diversified': 'socialdiversified'}}, regex=True).sort_values(by=['category', 'term']) h1_allterms = h1_allterms.replace({'term': {'diversification': 'socialdiversification'}}, regex=True).sort_values(by=['category', 'term']) h1_allterms = h1_allterms.replace({'term': {'diversifying': 'socialdiversifying'}}, regex=True).sort_values(by=['category', 'term']) h1_allterms = h1_allterms.replace({'term': {'diversity': 'socialdiversity'}}, regex=True).sort_values(by=['category', 'term']) h1_allterms os.chdir("/sfs/qumulo/qhome/kb7hp/git/diversity/data/dictionaries/") h1_allterms = pd.read_csv("diversity_project - tree_data.csv") h1_allterms = h1_allterms[h1_allterms['term'].isin(list(earlier_model.wv.key_to_index))] h1_allterms['term'] = h1_allterms['term'].str.replace('_', '', regex=True) h1_allterms = h1_allterms[~h1_allterms['term'].isin(['intersexual'])] h1_allterms = h1_allterms.replace({'term': {'diverse': 'socialdiverse'}}, regex=True).sort_values(by=['category', 'term']) h1_allterms = h1_allterms.replace({'term': {'diversely': 'socialdiversely'}}, regex=True).sort_values(by=['category', 'term']) h1_allterms = h1_allterms.replace({'term': {'diversified': 'socialdiversified'}}, regex=True).sort_values(by=['category', 'term']) h1_allterms = h1_allterms.replace({'term': {'diversification': 'socialdiversification'}}, regex=True).sort_values(by=['category', 'term']) h1_allterms = h1_allterms.replace({'term': {'diversifying': 'socialdiversifying'}}, regex=True).sort_values(by=['category', 'term']) h1_allterms = h1_allterms.replace({'term': {'diversity': 'socialdiversity'}}, regex=True).sort_values(by=['category', 'term']) list_of_terms = ['cultural', 'disability', 'equity', 'lifecourse', 'migration', 'minority', 'race_ethnicity', 'sex_gender', 'sexuality', 'social_class'] aggregated_means = pd.DataFrame(columns = ['term1', 'term2', 'cos_sim_m1', 'cos_sim_m2', 'cos_sim_diffs', 'group']) for term in list_of_terms: tmp_dictionary = h1_allterms[(h1_allterms['category'] == term) | (h1_allterms['term'] == 'socialdiversity')] comp_outcomes = w2v_sim_mean_comps(tmp_dictionary, earlier_model, later_model) comp_outcomes = comp_outcomes[(comp_outcomes['term1'] == 'socialdiversity') & (comp_outcomes['term2'] != 'socialdiversity')] comp_outcomes['group'] = [term] * len(comp_outcomes) aggregated_means = pd.concat([aggregated_means, comp_outcomes]) aggregated_means = aggregated_means.groupby(by=["group"]).mean().round(3) aggregated_means.to_csv('/sfs/qumulo/qhome/kb7hp/git/diversity/data/final_data/socdiv_mean_h1_comps_0921.csv') aggregated_means os.chdir("/sfs/qumulo/qhome/kb7hp/git/diversity/data/dictionaries/") h3_dictionary = pd.read_csv("diversity_project - h3_dictionary.csv") h3_dictionary = h3_dictionary[h3_dictionary['term'].isin(list(earlier_model.wv.key_to_index))] h3_dictionary = h3_dictionary.drop(['str_type','regional','subclass','source','date_added'], axis=1) category_analysis = h3_dictionary.drop(['continental'], axis=1) #category_analysis = category_analysis[category_analysis['mean_embeddings'] == 1] #category_analysis = category_analysis.drop(['mean_embeddings'], axis=1) #category_analysis = category_analysis[~category_analysis['term'].str.contains("s$")] category_analysis = category_analysis[~category_analysis['term'].str.contains("_")] os.chdir("/sfs/qumulo/qhome/kb7hp/git/diversity/data/dictionaries/") h3_div_subset = pd.read_csv("diversity_project - tree_data.csv") h3_div_subset = h3_div_subset.drop(['viz_embeddings','mean_embeddings','hypothesis'], axis=1) h3_div_subset = h3_div_subset[h3_div_subset['category'] == 'diversity'] h3_div_subset = h3_div_subset[['term', 'category']] h3_div_subset = h3_div_subset.replace({'term': {'diverse': 'socialdiverse'}}, regex=True).sort_values(by=['category', 'term']) h3_div_subset = h3_div_subset.replace({'term': {'diversely': 'socialdiversely'}}, regex=True).sort_values(by=['category', 'term']) h3_div_subset = h3_div_subset.replace({'term': {'diversified': 'socialdiversified'}}, regex=True).sort_values(by=['category', 'term']) h3_div_subset = h3_div_subset.replace({'term': {'diversification': 'socialdiversification'}}, regex=True).sort_values(by=['category', 'term']) h3_div_subset = h3_div_subset.replace({'term': {'diversifying': 'socialdiversifying'}}, regex=True).sort_values(by=['category', 'term']) h3_div_subset = h3_div_subset.replace({'term': {'diversity': 'socialdiversity'}}, regex=True).sort_values(by=['category', 'term']) category_analysis = pd.concat([category_analysis, h3_div_subset]) category_analysis list_of_terms = ['continental', 'directional', 'national', 'omb/us census', 'race/ethnicity', 'subcontinental', 'subnational'] aggregated_means = pd.DataFrame(columns = ['term1', 'term2', 'cos_sim_m1', 'cos_sim_m2', 'cos_sim_diffs', 'group']) for term in list_of_terms: tmp_dictionary = category_analysis[(category_analysis['category'] == term) | (category_analysis['term'] == 'socialdiversity')] comp_outcomes = w2v_sim_mean_comps(tmp_dictionary, earlier_model, later_model) comp_outcomes = comp_outcomes[(comp_outcomes['term1'] == 'socialdiversity') & (comp_outcomes['term2'] != 'socialdiversity')] comp_outcomes['group'] = [term] * len(comp_outcomes) aggregated_means = pd.concat([aggregated_means, comp_outcomes]) aggregated_means = aggregated_means.groupby(by=["group"]).mean().round(3) aggregated_means.to_csv('/sfs/qumulo/qhome/kb7hp/git/diversity/data/final_data/socdiv_mean_h3_comps_0921.csv') aggregated_means os.chdir("/sfs/qumulo/qhome/kb7hp/git/diversity/data/dictionaries/") h3_dictionary = pd.read_csv("diversity_project - h3_dictionary.csv") h3_dictionary = h3_dictionary[h3_dictionary['term'].isin(list(earlier_model.wv.key_to_index))] h3_dictionary = h3_dictionary.drop(['str_type','regional','subclass','source','date_added'], axis=1) national_means = h3_dictionary[h3_dictionary['category'] == 'national'] #national_means = national_means[national_means['mean_embeddings'] == 1] national_means = national_means.drop(['category','mean_embeddings'], axis=1) national_means = national_means.rename(columns={'continental': 'category'}) national_means = national_means[~national_means['term'].str.contains("s$")] national_means = national_means[~national_means['term'].str.contains("_")] national_means = pd.concat([national_means, h3_div_subset]) national_means list_of_terms = ['africa', 'asia', 'europe', 'north america', 'oceania', 'south america'] aggregated_means = pd.DataFrame(columns = ['term1', 'term2', 'cos_sim_m1', 'cos_sim_m2', 'cos_sim_diffs', 'group']) for term in list_of_terms: tmp_dictionary = national_means[(national_means['category'] == term) | (national_means['term'] == 'socialdiversity')] comp_outcomes = w2v_sim_mean_comps(tmp_dictionary, earlier_model, later_model) comp_outcomes = comp_outcomes[(comp_outcomes['term1'] == 'socialdiversity') & (comp_outcomes['term2'] != 'socialdiversity')] comp_outcomes['group'] = [term] * len(comp_outcomes) aggregated_means = pd.concat([aggregated_means, comp_outcomes]) aggregated_means = aggregated_means.groupby(by=["group"]).mean().round(3) aggregated_means.to_csv('/sfs/qumulo/qhome/kb7hp/git/diversity/data/final_data/socdiv_mean_pop_comps_0921.csv') aggregated_means ###Output _____no_output_____
Assignment-day_1.ipynb
###Markdown Questions 1:Given the following jumbled word, OBANWRI guess the correct English word.A. RANIBOWB. RAINBOWC. BOWRANID. ROBWANI ###Code input_word =input("Enter Your choice for jumbled word OBANWRI : A. RANIBOW , B. RAINBOW, C. BOWRANI, D. ROBWANI : " ) if input_word.title() == 'A': print("Your choice is Worng") elif input_word.title() =='B': print("Your choice is Right") elif input_word.title() =='C': print("Your choice is Worng") elif input_word.title() =='D': print("Your choice is Worng") else: print("You Entered Worng Choice For given jumbled word") ###Output Enter Your choice for jumbled word OBANWRI : A. RANIBOW , B. RAINBOW, C. BOWRANI, D. ROBWANI : b Your choice is Right ###Markdown Questions 2:Write a program which prints “LETS UPGRADE”. (Please note that you have toprint in ALL CAPS as given) ###Code firstWord = 'lets' lastWord = 'upgrade' print(firstWord.upper() +' '+ lastWord.upper()) ###Output LETS UPGRADE ###Markdown Questions 3:Write a program that takes cost price and selling price as input and displays whether the transaction is aProfit or a Loss or Neither. ###Code print("Enter the Cost price of commodity : ") costPrice = int(input()) print("Enter the selling price of commodity : ") sellPrice = int(input()) transaction = sellPrice - costPrice if transaction > 0 : print("You Earned a profit of : " , transaction) elif transaction < 0 : print("You are in loss of : ", transaction) else: print("You had neither in loss nor in profit ") ###Output Enter the Cost price of commodity : 50 Enter the selling price of commodity : 100 You Earned a profit of : 50 ###Markdown Questions 4:Write a program that takes cost price and selling price as input and displays whether the transaction is aProfit or a Loss or Neither.INPUT FORMATThe first line contains the cost price.The second line contains the selling price.OUTPUT FORMATPrint "Profit" if the transaction is a profit or "Loss" if it is a loss. If it is neitherprofit nor loss, print "Neither". (You must not have quotes in your output)NOTE:Please stick to the input and output format. Don't add anything extra like'Enter cost price', 'Enter selling price', etc. ###Code costPrice = int(input()) sellPrice = int(input()) transaction = sellPrice - costPrice if transaction > 0 : print("Profit") elif transaction < 0 : print("Loss") else: print("Neither") ###Output 23 43 Profit ###Markdown Questions 5:Write a program that takes an amount in Indian Rupees as input. You need to find its equivalentin Euro and display it. Assume 1 Euro equals Rs. 80.Please note that you are expected to stick to the given input and outputformat as in sample test cases. Please don't add any extra lines such as'Enter a number', etc.Your program should take only one number as input and display the output. ###Code indianRu = int(input()) euro = indianRu/80 print(euro) ###Output 500 6.25
11 - Introduction to Python/8_Iteration/6_Iterating over Dictionaries (6:21)/Iterating over Dictionaries - Exercise_Py3.ipynb
###Markdown Iterating over Dictionaries In this exercise you will use the same dictionaries as the ones we used in the lesson - "prices" and "quantity". This time, don't just calculate all the money Jan spent. Calculate how much she spent on products with a price of 5 dollars or more. ###Code prices = { "box_of_spaghetti" : 4, "lasagna" : 5, "hamburger" : 2 } quantity = { "box_of_spaghetti" : 6, "lasagna" : 10, "hamburger" : 0 } money_spent = 0 ###Output _____no_output_____ ###Markdown And how much did Jan spent on products that cost less than 5 dollars? ###Code prices = { "box_of_spaghetti" : 4, "lasagna" : 5, "hamburger" : 2 } quantity = { "box_of_spaghetti" : 6, "lasagna" : 10, "hamburger" : 0 } money_spent = 0 ###Output _____no_output_____
d2l-en/chapter_attention-mechanism/seq2seq-attention.ipynb
###Markdown Sequence to Sequence with Attention MechanismIn this section, we add the attention mechanism to the sequence to sequencemodel introduced in :numref:`chapter_seq2seq`to explicitly select state. :numref:`fig_s2s_attention` shows the modelarchitecture for a decoding time step. As can be seen, the memory of theattention layer consists of the encoder outputs of each time step. Duringdecoding, the decoder output from the previous time step is used as the query,the attention output is then fed into the decoder with the input to provideattentional context information.![The second time step in decoding for the sequence to sequence model with attention mechanism.](../img/seq2seq_attention.svg):label:`fig_s2s_attention`The layer structure in the encoder and the decoder is shown in :numref:`fig_s2s_attention_details`.![The layers in the sequence to sequence model with attention mechanism.](../img/seq2seq-attention-details.svg):label:`fig_s2s_attention_details` ###Code import d2l from mxnet import nd from mxnet.gluon import rnn, nn ###Output _____no_output_____ ###Markdown DecoderNow let's implement the decoder of this model. We add a MLP attention layer which has the same hidden size as the LSTM layer. The state passed from the encoder to the decoder contains three items:- the encoder outputs of all time steps, which are used as the attention layer's memory with identical keys and values- the hidden state of the last time step that is used to initialize the encoder's hidden state- valid lengths of the decoder inputs so the attention layer will not consider encoder outputs for padding tokens.In each time step of decoding, we use the output of the last RNN layer as the query for the attention layer. Its output is then concatenated with the input embedding vector to feed into the RNN layer. Despite the RNN layer hidden state also contains history information from decoder, the attention output explicitly selects the encoder outputs that are correlated to the query and suspends other non-correlated information. ###Code class Seq2SeqAttentionDecoder(d2l.Decoder): def __init__(self, vocab_size, embed_size, num_hiddens, num_layers, dropout=0, **kwargs): super(Seq2SeqAttentionDecoder, self).__init__(**kwargs) self.attention_cell = d2l.MLPAttention(num_hiddens, dropout) self.embedding = nn.Embedding(vocab_size, embed_size) self.rnn = rnn.LSTM(num_hiddens, num_layers, dropout=dropout) self.dense = nn.Dense(vocab_size, flatten=False) def init_state(self, enc_outputs, enc_valid_len, *args): outputs, hidden_state = enc_outputs # Transpose outputs to (batch_size, seq_len, hidden_size) return (outputs.swapaxes(0,1), hidden_state, enc_valid_len) def forward(self, X, state): enc_outputs, hidden_state, enc_valid_len = state X = self.embedding(X).swapaxes(0, 1) outputs = [] for x in X: # query shape: (batch_size, 1, hidden_size) query = hidden_state[0][-1].expand_dims(axis=1) # context has same shape as query context = self.attention_cell( query, enc_outputs, enc_outputs, enc_valid_len) # concatenate on the feature dimension x = nd.concat(context, x.expand_dims(axis=1), dim=-1) # reshape x to (1, batch_size, embed_size+hidden_size) out, hidden_state = self.rnn(x.swapaxes(0, 1), hidden_state) outputs.append(out) outputs = self.dense(nd.concat(*outputs, dim=0)) return outputs.swapaxes(0, 1), [enc_outputs, hidden_state, enc_valid_len] ###Output _____no_output_____ ###Markdown Use the same hyper-parameters to create an encoder and decoder as in :numref:`chapter_seq2seq`, we get the same decoder output shape, but the state structure is changed. ###Code encoder = d2l.Seq2SeqEncoder(vocab_size=10, embed_size=8, num_hiddens=16, num_layers=2) encoder.initialize() decoder = Seq2SeqAttentionDecoder(vocab_size=10, embed_size=8, num_hiddens=16, num_layers=2) decoder.initialize() X = nd.zeros((4, 7)) state = decoder.init_state(encoder(X), None) out, state = decoder(X, state) out.shape, len(state), state[0].shape, len(state[1]), state[1][0].shape ###Output _____no_output_____ ###Markdown TrainingAgain, we use the same training hyper-parameters as in:numref:`chapter_seq2seq`. The training loss is similar to the seq2seq model, because thesequences in the training dataset are relative short. The additional attentionlayer doesn't lead to a significant different. But due to both attention layercomputational overhead and we unroll the time steps in the decoder, this modelis much slower than the seq2seq model. ###Code embed_size, num_hiddens, num_layers, dropout = 32, 32, 2, 0.0 batch_size, num_steps = 64, 10 lr, num_epochs, ctx = 0.005, 200, d2l.try_gpu() src_vocab, tgt_vocab, train_iter = d2l.load_data_nmt(batch_size, num_steps) encoder = d2l.Seq2SeqEncoder( len(src_vocab), embed_size, num_hiddens, num_layers, dropout) decoder = Seq2SeqAttentionDecoder( len(tgt_vocab), embed_size, num_hiddens, num_layers, dropout) model = d2l.EncoderDecoder(encoder, decoder) d2l.train_s2s_ch8(model, train_iter, lr, num_epochs, ctx) ###Output loss 0.031, 4992 tokens/sec on gpu(0) ###Markdown Lastly, we predict several sample examples. ###Code for sentence in ['Go .', 'Wow !', "I'm OK .", 'I won !']: print(sentence + ' => ' + d2l.predict_s2s_ch8( model, sentence, src_vocab, tgt_vocab, num_steps, ctx)) ###Output Go . => va !
Examples/Google Drive Integration - Workbook.ipynb
###Markdown Information * Test Project ID: optimum-column-273718* Project Name: PozzoIntegrationT1* Organization: uw.edu* Location: uw.edu Goals * integration of Gdrive API and Python * done - use of a client_secrets.json file a Group Project request for API access* Movement through Gdrive * done - relys on file/folder ID's - need to read and save the ones you want* Uploading a new folder - or updating an existing folder from python information * Done - will download it to the current folder but will rewrite every time it is downloaded and can then be read into python* Pulling information from Gdrive into Python - saving as a dataframe * Done - can be pulled into Python and edited as a dataframe. * pickling useful fileID's and saving the file paths for ease of use* * searching title names - within the Gdrive folder itself * search loops that loop through all folders/files until there is a match* searching the information within the Gdrive folder itself* * Modifying metadata*?* to update information* * Optimizing Gdrive structure for searching - * Branching of structure? by instrument? by person? by experiment? by materials? by date? * Project - instrument - person - date? * Instrument - person - experiment - date? * Pozzo Group - All Data - ()* Organize HDF5 files in Gdrive? how does the connections work? Imports ###Code import pandas as pd from pydrive.auth import GoogleAuth from pydrive.drive import GoogleDrive gauth = GoogleAuth() #creates browser to ask for consent to modify files in the name of the API project (Pozzo Lab Group) gauth.LocalWebserverAuth() drive = GoogleDrive(gauth) ###Output Your browser has been opened to visit: https://accounts.google.com/o/oauth2/auth?client_id=166164154392-i4l5heveqn5jq5gjsr4pcdn53m8ql5r3.apps.googleusercontent.com&redirect_uri=http%3A%2F%2Flocalhost%3A8080%2F&scope=https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fdrive&access_type=offline&response_type=code Authentication successful. ###Markdown View all folders and files in your Google Drive ###Code filelist = drive.ListFile({'q':"'root' in parents and trashed = false"}).GetList() for file in filelist: print('Title: %s, ID: %s' % (file['title'], file['id'])) if(file['title'] == 'Pozzo Group'): fileID = file['id'] print(fileID) ###Output Title: Recipes, ID: 1Sl_R4-T_R507dmiYZfWCng5cSJwc-Olc Title: Lit Review, ID: 1ItbY0Lbz_O8pS59yl8GfkbWZ2LAFz5zs Title: Polymer Lit Review, ID: 17c-kCk3jvKu1qcGmnzDwoOvfClV27Hf0 Title: Polymer Blends, ID: 1AsYXaij5sFtIpFWoxDtYjmvkUB7ut8zY Title: Drafts.pdf, ID: 1io6O364YXNlzgkdWyOzUxMNbjVxZ1u4K Title: Science as Art, ID: 1H-4hSRnJyOlo8UzQs5aSkP9JNfZa6493 Title: Blends Paper, ID: 1M08wCd1PWl-yKvil89IMhxUJEa6VnvZE Title: FacultyMeetings2020-ToFaculty, ID: 10JotGiYWTRWsaA3uVhCSq3O8WaInQEZIIs-e1YAcKfI Title: FacultyMeetings2020-ToFaculty.xlsx, ID: 1b4949qLsUTxbW_JZ6GJ-EcmgKZOy1GSQ Title: McAleer Money: Pallicera, ID: 1J08pSvqxWz4hfNzz6L7_cAJs1OQg7-Ju Title: ACES, ID: 0BxITy4q3C1SYWmg5YlNHcjlfVEE Title: Note Jan 21, 2020.pdf, ID: 1w_Zk8a8L63OFNilooGU0cN70OqO8FyTr Title: FY20_ACES Budget_APPROVED, ID: 1eGUoKTSLGn1WMsCTGSP6o2ubmI8AeFQcckREKFOY-g4 Title: FY20_ACES Budget_APPROVED.pdf, ID: 0BwGCEaO99URQYjlySG9jS04tRUo2SF9ta1BWWjRFWWFzSHFN Title: NCEC2020 schedule_Sageedits_v3, ID: 1eK_QrqrW7KyBG-2DeS0OIoOKPs1gysR6vwWV5CbmMOw Title: NCEC2020 schedule_ASMedits_v2.docx, ID: 0BwGCEaO99URQSWVhV0tlUUdlYXVVVnF3TzVQcHU2N0NUZlM4 Title: Pozzo Group, ID: 0BxuawDMj8CGPV2cyU0lvcHQ3clU Title: Controlled Systems, ID: 1vBh0kg7DeAgSfDYpEN8rAwSHSLnIIaPm Title: NCEC2020_Poster_v2, ID: 1FodfTycb7uHkj8cnhIJk6ldJgLkgJeA_aeNRRkG5QKc Title: NCEC2020_Poster_v2.pdf, ID: 0BwGCEaO99URQOFpGeWhNR2RKWThfYzBuc2ZxNExwdnRIcjNV Title: Calibration of Opentrons Data, ID: 1r2dxUNLJlFGxSpL3NlJucerffeclqKbH_x6i8mR369c Title: VSANS Contrast Matching Drug Movement For Caitlyn, ID: 1DwSiSAKj0FAAacnsgcmOiIkHb7rbq6RzejO6zQtOaKA Title: VSANS Contrast Matching Drug Movement For Caitlyn.docx, ID: 0BwGCEaO99URQdDFoTU40OHNCVFZjU2sxaHJNd2ZyVVdJYXdF Title: NCNR_SANS_20190416_LDP_CMW, ID: 10AQYBPhFTsWK6fFHH6_PIxlWgG0IfjOLRnwd4nMIIDA Title: NCNR_SANS_20190416_LDP_CMW, ID: 1hnOcDedQepVlYCQofr2zH5YymlhQ5zt8r4JLpgg8waA Title: NCNR_SANS_20190416_LDP_CMW, ID: 1qVKEecQXYkSwu-8v-tFjwDkuKeM_rFT5OGdmP9xCLRU Title: NCNR_SANS_20190416_LDP_CMW.docx, ID: 0BwGCEaO99URQZXh6QWNyd1hJRU9BMk9LaTU5c3c4TUlfWnlB Title: Contact Tracker, ID: 1zH9D2oKqYchnoWkDmGXmfK_Aw4UhPnpAK3GX0fShtX0 Title: Contact Tracker.xlsx, ID: 0BwGCEaO99URQd2RubmVSaFJaLVZ2bDVWZ29Bb3lWR0Fhd2pn Title: Photos, ID: 12Rbd9qjBvkDQJ0nw3bSrev-KcIzb6Mk7 Title: Schools Information list v2, ID: 1WoxXlSKpcIsFQDG123FzXStdgo1fLVsVQx8MYVrOoV4 Title: Candy and Snacks that are Iconic (TM) to American culture, ID: 1Nu4eaz2ofE1n1Ejta-FpBmBPNMUAwLONcnmhdy1qhRc Title: Drafts, ID: 10KS9yBhBawNot4zCtHzK8c932RRKJFNM6lbM59L9j5I Title: Jones Foster Accelerator, ID: 1-Afd6Mkthtmoczw5jIdVh06ome1EQIg0 Title: NIST VSANS (8/12-8/16), ID: 10Wyh1CsHoJ0BgCFCImLSu0dyoleS9zx1 Title: July 18th 2019, ID: 1Ie1_clIVbD3rb_I-ZegHF-QzFZtmvP0o17QrAENePu8 Title: Scattering Sample Prep, ID: 1zvNJHpkX_H6KBiXO4vZUXSrUyBNxsTb9dS_qmZ1pfH8 Title: Scattering sample preparation, ID: 17KuqEiPxi0KqRVRHWvrSZ9otya5nUIPG8Y0CbDpxKiI Title: 20190630_134413.mp4, ID: 1-3fRUP1m3ketPA3D3VxgA4xMRmd88GWI Title: PeerEval19pABET, ID: 1gGRU_Rk7mf5wHVHtrvoCJ6Wzcxy9ytUDWUsxaoMTFa4 Title: PeerEval19pABET.docx, ID: 0BwGCEaO99URQTVlzbVJjSm9HVk1mN1pzaVBNNURtdlYwQmdV Title: for sage, ID: 1q-UyE-b2LHK4newahChghxY7wt5M1_TjxYn_09elSDU Title: for sage.pdf, ID: 0BwGCEaO99URQdDBQeU05QWJZbXBhZm9XT0VDQ2hHNllXdzJJ Title: Colloidal Systems, ID: 1JVOA-RkU51slQLVgyXTmFK8h-tfIVOB- Title: Colloids Lab, ID: 1idjem9fwXDZpYWBUxN7rZ0ELytPPtIKk Title: ACES officer bios, ID: 1eekJ40dqR5LD6kvDrEVx7wuSHaDxN1DnJyUBoifiPK4 Title: [Rafael_C._Gonzalez,_Richard_E._Woods,_Steven_L._E(b-ok.cc) (1), ID: 15r1jHh7fp2YIAV_SDnOUV4Q0mDE4RB4V36T91PX7OrY Title: Colloids HW3.docx, ID: 1O-0sCPbQr3j7T2IZAkPvp39v-LIaSxyT Title: Math, ID: 11qSoymfacIHzxrkzWfdE8WCtyXlapDcp Title: D&D, ID: 1fRfdOKQlJbGnBnu3a-6cKsb6Zygh20eh Title: Research Notes, ID: 1eb533JwOp6Mbn2B0J48Drfis9391pPvH Title: Trainings, ID: 1FlmMP4_tXo080NFsJwXwSqN6aKNfQkeO Title: Pozzo Group Safety Training Handout, ID: 1R4gwZJlwHX6WAWAtUGWQz31HneA7klfta6PsjKz2kio Title: Pozzo Group Safety Training Handout.docx, ID: 0BwGCEaO99URQalRYYjFja0lxVF9uTXlMMXRtdU9SQXNPOVdN Title: NIFTI and UWIN Drafts, ID: 1lw1ze50RgqFWSmeLXSWR7lty4rjwWqjsI1ukmQpA5jI Title: NIFTI and UWIN Drafts, ID: 1L8Z41xRZCEQFGeh17kmQXodlQf2UqUuN_Uv9ZhzpjgQ Title: NIFTI and UWIN Drafts.docx, ID: 1hS2OQxX2vRn0NnUYP2TDSP6V_nkI0vLe Title: Thermodynamics, ID: 13VQpInwheo7Xbo3hlAJxbyPcHCeWh5Nr Title: Transport, ID: 1t3qRg6h2mHoAf1B7K6OZJnelneEbtmIu Title: Orientation, ID: 1Vznfwxxo0kIVwbE-nVn5J0nZNb7s4XSo 0BxuawDMj8CGPV2cyU0lvcHQ3clU ###Markdown File ID's are the method of Gdrive for working through file paths - you have to know the ID of the file you want Into a folder within the root folder- replace the 'root' in the 'q': "...." with the fileID of the folder you wish to go into ###Code filelist2 = drive.ListFile({'q':"'0BxuawDMj8CGPV2cyU0lvcHQ3clU' in parents and trashed = false"}).GetList() for file in filelist2: #print('Title: %s, ID: %s' % (file['title'], file['id'])) if(file['title'] == 'EscalateTesting'): fileID2 = file['id'] print(fileID2) folder_mime = drive.CreateFile({'id': fileID2}) mime = folder_mime['mimeType'] string = 'the mimetype is {}'.format(mime) print(string) ###Output the mimetype is application/vnd.google-apps.folder ###Markdown Three folders deep ###Code filelist3 = drive.ListFile({'q':"'1jTd692l4o_8TnHpIXg3uHMGRIdlCSIYd' in parents and trashed = false"}).GetList() for file in filelist3: print('Title: %s, ID: %s' % (file['title'], file['id'])) if(file['title'] == 'Wfracs.csv'): filedown = file['id'] print(filedown) print( id_chem) print(id_stock) insert = str('9984039423') variable_string = "'q': '{} in parents and trashed = false'".format(insert) print(variable_string) ###Output _____no_output_____ ###Markdown Search all folders and find the file ID with the title as specified ###Code goal = 'Wfracs.csv' goal_id = 0 filelist = drive.ListFile({'q':"'root' in parents and trashed = false"}).GetList() # Try separating into separate functions - one that will enter a folder and get the file list # the other that will find the goal? - needs to be able to search all the layers of all the # folders - currently can go three deep and thats it. def filesearch(filelist, goal): for file in filelist: hold_name = file['title'] if file['title'] == goal: goal_id = file['id'] return goal_id elif file['mimeType'] == 'application/vnd.google-apps.folder': hold_id = file['id'] hold_name = file['title'] filelist2 = drive.ListFile({'q':"'{}' in parents and trashed = false".format(hold_id)}).GetList() for file in filelist2: if file['title'] == goal: goal_id = file['id'] return goal_id elif file['mimeType'] == 'application/vnd.google-apps.folder': hold_id = file['id'] hold_name = file['title'] filelist3 = drive.ListFile({'q':"'{}' in parents and trashed = false".format(hold_id)}).GetList() for file in filelist3: if file['title'] == goal: goal_id = file['id'] return goal_id else: pass #print('file not found in folder {}'.format(hold_name)) else: pass #print('file not found in folder {}'.format(hold_name)) else: pass #print('file not found in folder {}'.format(hold_name)) #print('file not found within three folders of starting location') filesearch(filelist, goal) ###Output _____no_output_____ ###Markdown Upload a file into a folder ###Code #Creation of the desired file in the place you desire - #can change location using the folder ID in the parents section file1 = drive.CreateFile({"mimeType": "text/csv", "parents": [{"kind": "drive#fileLink", "id": fileID2}], "title": 'test_file.csv'}) #uploading the file file1.Upload() print('Created file %s with mimeType %s' % (file1['title'], file1['mimeType'])) ###Output Created file test_file.csv with mimeType text/csv ###Markdown Download a file (to your computer and then) into python ###Code print(filedown) file_tdown = drive.CreateFile({'id': filedown}) content = file_tdown.GetContentFile('Wfracs.csv') df = pd.read_csv('Wfracs.csv') ###Output _____no_output_____ ###Markdown All together: Creation, upload, download, edit, re-uploadExample is done with `chemical_inventory.xlsx` and `stock_specification.csv` ###Code # For files that are already created, read files into python (need to be in folder # or have a specified path) they can then be edited or changed in python as needed stock_spec = pd.read_csv('stock_specification.csv') chem_inv = pd.read_excel('chemical_inventory.xlsx') # Uploading files to the specified folder of the Gdrive - the mime type needs # to be changed to reflect the correct file and the "parents" need to have the # file ID of the contianing folder. Files are directly uploaded from file location # so if there are edits done in the notebook they will need to be re-downloaded file_up = drive.CreateFile({"mimeType": "text/csv", "parents": [{"kind": "drive#fileLink", "id": fileID2}], "title": 'stock_specifications.csv'}) file_up.SetContentFile('stock_specification.csv') file_up.Upload() print('Created file %s with mimeType %s' % (file_up['title'], file1['mimeType'])) #pydrive will also assume mimeType and the title from the file being uploaded file_up = drive.CreateFile({"parents": [{"kind": "drive#fileLink", "id": fileID2}]}) file_up.SetContentFile('chemical_inventory.xlsx') file_up.Upload() print('Created file %s with mimeType %s' % (file_up['title'], file1['mimeType'])) # # FILESEARCH IS CURRENTLY INEFFICENT: NEED TO REFINE - works. kind-of # # using the filesearch function - find the ID's of the files you are looking to # # download to python one at a time # filelist = drive.ListFile({'q':"'root' in parents and trashed = false"}).GetList() # goal = 'stock_specification.csv' # id_stock = filesearch(filelist, goal) # goal = 'chemical_inventory.xlsx' # id_chem = filesearch(filelist, goal) id_stock = '1iBFJ-JHr8Fx1do-85QF5To1fPur85Yz1' id_chem = '1hLY4iEEao5LXzqkQqsfDgm5f_a-rUwKB' file_down = drive.CreateFile({'id': id_stock}) down_stock = file_down.GetContentFile('stock_specification.csv') df_stock = pd.read_csv('stock_specification.csv') file_down = drive.CreateFile({'id': id_chem}) down_chem = file_down.GetContentFile('chemical_inventory.xlsx') df_chem = pd.read_excel('chemical_inventory.xlsx') # Edit as desired and then SAVE FILE TO COMPUTER # Upload (manually) to the same file ID as before # This will overwrite the file that is currently on the drive. file_up = drive.CreateFile({ "parents": [{"kind": "drive#fileLink", "id": fileID2}], "id": id_stock}) file_up.SetContentFile('stock_specification.csv') file_up.Upload() print('Created file %s with mimeType %s' % (file_up['title'], file1['mimeType'])) file_up = drive.CreateFile({"parents": [{"kind": "drive#fileLink", "id": fileID2}], "id": id_chem}) file_up.SetContentFile('chemical_inventory.xlsx') file_up.Upload() print('Created file %s with mimeType %s' % (file_up['title'], file1['mimeType'])) ###Output Created file stock_specifications.csv with mimeType text/csv Created file chemical_inventory.xlsx with mimeType text/csv
_build/jupyter_execute/notebooks/Lesson_2.ipynb
###Markdown 2. Τιμές, τύποι και μεταβλητές. Συμβολοσειρές Σταθερές (Constants)H Python δεν διαθέτει προκαθορισμένες *σταθερές* όπως άλλες γλώσσες προγραμματισμού.Όμως κατά σύμβαση και όχι κατά κανόνα έχει συμφωνηθεί οι *σταθερές* να ονοματίζονται με κεφαλαίους χαρακτήρες.Η αδυναμία της Python στην περίπτωση της δήλωσης *σταθερών* είναι ότι επιτρέπεται η αλλαγή των τιμών τουςΠαρακάτω παρατίθεται ένα παράδειγμα δήλωσης *σταθερών*. ###Code RATIO_FEET_TO_METERS = 3.281 RATIO_LB_TO_KG = 2.205 PI = 3.14 ###Output _____no_output_____ ###Markdown Κυριολεκτικές σταθερές (literal constants)Η κυριολεκτική *σταθερά* ή τιμή είναι ένας αριθμός, ή χαρακτήρας ή μιά συμβολοσειρά. Για παράδειγμα τα παρακάτωαποτελούν τιμές: *3.25* (στην python η υποδιαστολή ορίζεται με . και όχι ,), *"ένα τυχαίο κείμενο"*, *5.25e-1*.Αυτές οι τιμές δεν μεταβάλλονται κατά τη διάρκεια εκτέλεσης του προγράμματος γι' αυτό και λέγονται σταθερές. Μπορούν να εκχωρηθούν σε μεταβλητέςκαι να χρησιμοποιηθούν σαν τελεστέοι σε λογικές εκφράσεις ή σαν παραμέτροι σε συναρτήσεις. Τύποι δεδομένωνΟι τιμές ανήκουν σε τρεις τύπους δεδομένων (data types) ή κλάσσεις (class):- τους ακέραιους αριθμούς (integer) π.χ. το 15 - τους αριθμούς κινητής υποδιαστολής (floating point) π.χ. το 201.25)- τις συμβολοσειρές (string) π.χ. το "Time is money" Με την εντολή `type` ο διερμηνευτής μας απαντάει με τον τύπο της τιμής, όπως παρακάτω: ###Code type("No news, good news.") ###Output _____no_output_____ ###Markdown Η Python είναι *Dynamic typing* δηλαδή δεν ο τύπος των μεταβλητών δεν προκαθορίζεται κατά την συγγραφή αλλά κατά την εκτέλεση. Κανόνες ονοματοδοσίας μεταβλητώνΤα ονόματα των μεταβλητών στην Python υπακούουν στους παρακάτω κανόνες:- Το όνομα μίας μεταβλητής μπορεί να ξεκινά από ένα γράμμα ή από κάτω πάυλα.- Το όνομα μίας μεταβλητής δεν μπορεί με αριθμό.- Το όνομα μίας μεταβλητής μπορεί να περιέχει μόνο αλφαριθμητικούς χαρακτήρες.- Στα ονόματα των μεταβλήτών γίνεται διάκριση ανάμεσα σε πεζά και κεφαλαία (case sensitive).- Οι δεσμευμένες λέξεις της Python (keywords) δεν μπορούν να χρησιμοποιηθούν σε ονόματα μεταβλητών. Συμβολοσειρές (Strings) Μια συμβολοσειρά είναι μια ακολουθία από χαρακτήρες όπως το `"Το πεπρωμένον φυγείν αδύνατον."`.Μπορεί να είναι σε κάθε γλώσσα που υποστηρίζεται από το πρώτυπου Unicode. Οι συμβολοσειρές περικλείονται σε μονά, διπλά ή τριπλά εισαγωγικά. Με τριπλά εισαγωγικά μπορούν να ενσωματωθούν με ευκολία συμβολοσειρές σε πολλές γραμμές και πολλαπλά εισαγωγικά εντός αυτόν.Ακολουθούν παραδείγματα συμβολοσειρά. ###Code "My name is Bond, James Bond." 'There is no smoke without fire' ''' No bees no honey, no work no money. A little is better than none What’s done cannot be undone ''' ###Output _____no_output_____ ###Markdown Χαρακτήρες διαφυγής,κενά, νέες γραμμές Μπορούμε να σπάσουμε μια συμβολοσειρά κατά την συγγραφή σε νέα γραμμή με τον χαρακτήρα `\` και κατά την εκτέλεση με τον χαρακτήρα `\n` π.χ. ###Code message = 'There is no smoke \ without fire' print(message) message = 'There is no smoke \nwithout fire' print(message) ###Output There is no smoke without fire ###Markdown Ή να ορίσουμε κενά με το `\t` ###Code message = 'There is no smoke \twithout fire' print(message) ###Output There is no smoke without fire ###Markdown Ο χαρακτήρας `\` είναι χαρακτήρας διαφυγής που απενεργοποιεί την ειδική λειτουργία των παραπάνω ή την παράθεση εισαγωγικών μεσα σε εισαγωγικά. ###Code print('There is no smoke \\n without fire') print('Where there\'s a will, there\'s a way') ###Output Where there's a will, there's a way ###Markdown Ανεπεξέργαστες συμβολοσειρές (Raw Strings)Παρόμοιο αποτέλεσμα με τα παραπάνω πετυχαίνουμε τις ανεπεξέργαστες συμβολοσειρές οι οποίες ορίζονται με ένα r σαν πρόθεμα ###Code print(r"It was made by \n συνέχεια") ###Output It was made by \n συνέχεια ###Markdown Αφαίρεση κενώνΣε αρκετές περιπτώσεις οι συμβολοσειρές περιέχουν κενά είτε στην αρχή είτε στο τέλος.Για παράδειγμα οι παρακάτω συμβολοσειρές δεν είναι το ίδιες για την Python. Και επιβεβαιώνεται σε μέσω ελέγχου ισότητας. ###Code departmentA='ΤΜΧΠΑ' departmentB = ' ΤΜΧΠΑ ' print(departmentA == departmentB) #not equal ###Output False ###Markdown Για την αφαίρεση των κένων αριστερά, δεξιά ή ταυτόχρονα και στις δύο πλευρές της συμβολοσειρας χρησιμοποιούμε την μέθοδο `strip` και τις παραλλαγές της `rstrip` και `lstrip` ###Code print(departmentB.rstrip()) print(departmentB.lstrip()) print(departmentB.strip()) ###Output ΤΜΧΠΑ ΤΜΧΠΑ ΤΜΧΠΑ ###Markdown Συνένωση (Concatenation) συμβολοσειρώνΗ απλή παράθεση συμβολοσειρών οδηγεί στην συνενωσή τους δηλ. ###Code message = "Curiosity " "killed " 'the ' '''cat''' print(message) ###Output Curiosity killed the cat ###Markdown Συνένωση συμβολοσειρών και μεταβλητώνΗ συνένωση μεταβλητών και συμβολοσειρών γίνεται με τον τελεστη `+`. ###Code city='Βόλος' perifereia='Θεσσαλία' print('O '+city+' είναι πόλη της Ελλάδα στην ' +perifereia) ###Output O Βόλος είναι πόλη της Ελλάδα στην Θεσσαλία ###Markdown Η μέθοδος formatΆλλη μια πιο πρακτική μέθοδος κατά την συννένωση μεταβλητών και συμβολοσειρών είναι η μέθοδος format. ###Code print('O {0} έχει υψόμετρο {1} μέτρα'.format("Όλυμπος", 2918)) print('O {} έχει υψόμετρο {} μέτρα'.format("Όλυμπος", 2918)) print('O {name} έχει υψόμετρο {height} μέτρα'.format(name="Σμόλικας", height= 2637 )) ###Output O Όλυμπος έχει υψόμετρο 2918 μέτρα O Όλυμπος έχει υψόμετρο 2918 μέτρα O Σμόλικας έχει υψόμετρο 2637 μέτρα ###Markdown Δεσμευμένες λέξεις (reserved words)Ορισμένες λέξεις έχουν ιδιαίτερη σημασία για την python και δεν μπορούν να χρησιμοποιηθούν σαν ονόματα μεταβλητών. Τα παρακάτω κομμάτια κώδικα θα εκδηλώσουν σφάλμα μεταγλώττισης. ###Code class="Πρώτο εξάμηνο" break='Πότε θα κάνουμε διάλειμμα;' ###Output _____no_output_____ ###Markdown Πρόκειται για 33 λέξεις στην τρέχουσα έκδοση της Python.Μπορούμε να δούμε ποιές είναι αυτές οι δεσμεύνες λέξεις με την παρακάτω εντολή: ###Code help("keywords") ###Output Here is a list of the Python keywords. Enter any keyword to get more help. False class from or None continue global pass True def if raise and del import return as elif in try assert else is while async except lambda with await finally nonlocal yield break for not ###Markdown Η εντολή help Γενικά με την εντολή `help` καλούμε για βοήθεια και πληροφορίες την Python: ###Code help(print) help(abs) help(max) ###Output Help on built-in function max in module builtins: max(...) max(iterable, *[, default=obj, key=func]) -> value max(arg1, arg2, *args, *[, key=func]) -> value With a single iterable argument, return its biggest item. The default keyword-only argument specifies an object to return if the provided iterable is empty. With two or more arguments, return the largest argument. ###Markdown Αλλαγή Πεζών Κεφαλαίων (Convert case)Μπορούμε να κάνουμε αλλαγή ανάμεσα σε κεφαλαία και πεζά με τις παρακάτω μεθόδους συμβολοσειρών:`upper()`, `title()`, `lower()`.Αξίζει να σημειώσουμε ότι οι μέθοδοι αυτές δεν έχουν επίδραση στην μεταβλητή που τις καλούμε αλλά πρέπει να επαναεκχωρήσουμε το αποτέλεσμα της μεθόδου στην μεταβλητή με το ίδιο όνομα. ###Code agios="άγιος νικόλαος" print(agios.upper()) print(agios) # ο agios παραμένει "άγιος νικόλαος" print(agios.title()) print('ΑΓΊΑ ΕΛΈΝΗ'.lower()) agios = agios.upper() print(agios) # ο agios μετά την εκχώρηση στην ίδια μεταβλητή γινεται ΆΓΙΟΣ ΝΙΚΌΛΑΟΣ ###Output ΆΓΙΟΣ ΝΙΚΌΛΑΟΣ άγιος νικόλαος Άγιος Νικόλαος αγία ελένη ΆΓΙΟΣ ΝΙΚΌΛΑΟΣ ###Markdown Οι συμβολοσειρές είναι μη μεταβαλλόμενη δομή δεδομένωνΟι συμβολοσειρές αποτελούνται από ακολουθίες χαρακτήρων με σταθερό μέγεθοςκαι μη μεταβαλλόμενα περιεχόμενα. Αυτό σημαίνει ότι δεν είναι δυνατόν να προστίθενται ή να αφαιρούνταιχαρακτήρες, ούτε να τροποποιούνται τα περιεχόμενα του αλφαριθμητικού. Πρόκειται για μια μη μεταβαλλόμενη (immutable) δομή της Python.Η αρίθμηση των χαρακτήρων σε ένα αλφαριθμητικό ξεκινάει από το 0. Έτσι στην συμβολοσειρά `country = Ελλάδα` έχουμε:`country[0]` → Ε (η αρίθμηση ξεκινά από το 0)`country[1]` → λ`country[2]` → λ`country[3]` → ά`country[4]` → δ`country[5]` → αΗ παραπάνω συμβολοσειρά έχει μήκος 6 χαρακτήρες. Μήκος συμβολοσειράςΜέσω της συνάρτησης `len` η Python μας επιστρέφει το μήκος συμβολοσειράς δηλαδή το πλήθος των χαρακτήρων (μαζί με τα κενά) από τους οποιούς αποτελείται. ###Code message = 'Ή τώρα ή ποτέ.' len(message) ###Output _____no_output_____ ###Markdown Η μέθοδος findΗ μέθοδος `find` μας επιτρέπει να αναζητήσουμε μια συμβολοσειρά μέσα σε μια άλλη συμβολοσειρά. Η μέθοδος μας επιστρέφει την τοποθεσία από την ξεκινάει η αναζητούμενη συμβολοσειρά δηλαδή τον δείκτη (index) στην οποία εντοπίζεταιο πρώτος χαρακτηρας της αναζητούμενης συμβολοσειράς μέσα στα περιεχόμενα της αρχικής συμβολοσειράς.Στην παρακάτω συμβολοσειρά θα αναζητήσουμε την λέξη `ποτέ`. ###Code stixos = 'Η Ελλάδα ποτέ δεν πεθαίνει' index = stixos.find('ποτέ') ###Output _____no_output_____ ###Markdown Κανονικά αν πάμε στον χαρακτήρα με ευρετηρίο (index) 9 πρέπει να εντοπίσουμε τον πρώτο χαρακτήρα της συμβολοσειράς που είναι το `π`.Πράγματι: ###Code stixos[index] ###Output _____no_output_____ ###Markdown Αν δεν εντοπιστεί η λέξη που αναζητούμε στην συμβολοσειρά η Python θα επιστρέψει: `-1` ###Code stixos.find('πάντα') ###Output _____no_output_____ ###Markdown Η αναζήτηση είναι case sensitive δηλαδή γίνεται διάκριση ανάμεσα σε πεζά και κεφαλαία. ###Code stixos.find('Ελλάδα') # επιστρέφει τον δείκτη 2 γιατί εντοπίστηκε η λέξη κλειδί stixos.find('ΕΛΛΆΔΑ') # επιστρέφει -1 γιατί δεν εντοπίστηκε η λέξη κλειδί ###Output _____no_output_____ ###Markdown Μια άλλη σημαντική μέθοδος των συμβολοσειρών είναι η μέθοδος `replace` κατά την οποία μπορούμε να αντικαταστήσουμε τα περιεχόμενα μιας συμβολοσειράς. Στην πρώτη παράμετρο ορίζουμε την συμβολοσειρά που θέλουμε να αντικαταστήσουμε με την δεύτερη παράμετρο. ###Code stixos.replace('ποτέ', 'πάντα') ###Output _____no_output_____
DistribuicaoMedicamentos.ipynb
###Markdown Distribuicao de medicamentos para Covid19https://opendatasus.saude.gov.br/dataset/distribuicao-de-medicamentos-covid-19 ###Code #importando bibliotecas import pandas as pd df = pd.read_excel(r'atualizacao-de-medicamentos.xlsx') df.head() #Tipos dos registros df.dtypes df.info() #Análise estatistica basica do dataframe df.describe() #Análise de amostras df.sample(5) #Tipos de medicamentos df['ITEM'].unique() df['ITEM'].nunique() len(df['ITEM']) df['ITEM'].value_counts() #Quantidade de medicamentos distribuidos por UF df.groupby(['UF', 'ITEM'])['QUANTIDADE'].sum() #Visualização da tabela de colunas com string df.select_dtypes('object') #Visualização nome das colunas tipo string df.select_dtypes('object').columns #Remove espacos em branco dos textos for registro in df.select_dtypes('object').columns: df[registro] = df[registro].str.strip() print("Removido espaço em branco dos textos") #Quantidade de medicamentos por UF df['UF'].value_counts() #Exibir grafico da distribuição de medicamentos por UF %matplotlib inline df['UF'].value_counts().plot.barh() #Analise de programa de saude para COVID19 df[df['PROGRAMA DE SAÚDE'] == 'COVID-19']['UF'].value_counts() df[df['PROGRAMA DE SAÚDE'] == 'COVID-19']['UF'].value_counts().plot.barh() #Verificar onde a quantidade é nula df[df['QUANTIDADE'].isnull()] ###Output _____no_output_____
.ipynb_checkpoints/OptionsPricingEvaluation-checkpoint.ipynb
###Markdown Black-Scholes European Option Pricing Script ###Code # File Contains: Python code containing closed-form solutions for the valuation of European Options, # for backward compatability with Python 2.7 from __future__ import division # import necessary libaries import math import numpy as np from scipy.stats import norm from scipy.stats import mvn # Plotting import matplotlib.pylab as pl import numpy as np ###Output _____no_output_____ ###Markdown Option Pricing Theory: Black-Scholes modelBlack Scholes genre option models widely used to value European options. The original “Black Scholes” model was published in 1973 for non-dividend paying stocks. Since that time, a wide variety of extensions to the original Black Scholes model have been created. Modifications of the formula are used to price other financial instruments like dividend paying stocks, commodity futures, and FX forwards. Mathematically, these formulas are nearly identical. The primary difference between these models is whether the asset has a carrying cost (if the asset has a cost or benefit associated with holding it) and how the asset gets present valued. To illustrate this relationship, a “generalized” form of the Black Scholes equation is shown below.The Black Scholes model is based on number of assumptions about how financial markets operate. Black Scholes style models assume:1. **Arbitrage Free Markets**. Black Scholes formulas assume that traders try to maximize their personal profits and don’t allow arbitrage opportunities (riskless opportunities to make a profit) to persist. 2. **Frictionless, Continuous Markets**. This assumption of frictionless markets assumes that it is possible to buy and sell any amount of the underlying at any time without transaction costs.3. **Risk Free Rates**. It is possible to borrow and lend money at a risk-free interest rate4. **Log-normally Distributed Price Movements**. Prices are log-normally distributed and described by Geometric Brownian Motion5. **Constant Volatility**. The Black Scholes genre options formulas assume that volatility is constant across the life of the option contract. In practice, these assumptions are not particularly limiting. The primary limitation imposed by these models is that it is possible to (reasonably) describe the dispersion of prices at some point in the future in a mathematical equation. An important concept of Black Scholes models is that the actual way that the underlying asset drifts over time isn't important to the valuation. Since European options can only be exercised when the contract expires, it is only the distribution of possible prices on that date that matters - the path that the underlying took to that point doesn't affect the value of the option. This is why the primary limitation of the model is being able to describe the dispersion of prices at some point in the future, not that the dispersion process is simplistic.The generalized Black-Scholes formula can found below (see *Figure 1 – Generalized Black Scholes Formula*). While these formulas may look complicated at first glance, most of the terms can be found as part of an options contract or are prices readily available in the market. The only term that is difficult to calculate is the implied volatility (σ). Implied volatility is typically calculated using prices of other options that have recently been traded.>*Call Price*>\begin{equation}C = Fe^{(b-r)T} N(D_1) - Xe^{-rT} N(D_2)\end{equation}>*Put Price*>\begin{equation}P = Xe^{-rT} N(-D_2) - Fe^{(b-r)T} N(-D_1)\end{equation}>*with the following intermediate calculations*>\begin{equation}D_1 = \frac{ln\frac{F}{X} + (b+\frac{V^2}{2})T}{V*\sqrt{T}}\end{equation}>\begin{equation}D_2 = D_1 - V\sqrt{T}\end{equation}>*and the following inputs*>| Symbol | Meaning |>|--------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|>| F or S | **Underlying Price**. The price of the underlying asset on the valuation date. S is used commonly used to represent a spot price, F a forward price |>| X | **Strike Price**. The strike, or exercise, price of the option. |>| T | **Time to expiration**. The time to expiration in years. This can be calculated by comparing the time between the expiration date and the valuation date. T = (t_1 - t_0)/365 |>| t_0 | **Valuation Date**. The date on which the option is being valued. For example, it might be today’s date if the option we being valued today. |>| t_1 | **Expiration Date**. The date on which the option must be exercised. |>| V | **Volatility**. The volatility of the underlying security. This factor usually cannot be directly observed in the market. It is most often calculated by looking at the prices for recent option transactions and back-solving a Black Scholes style equation to find the volatility that would result in the observed price. This is commonly abbreviated with the greek letter sigma,σ, although V is used here for consistency with the code below. |>| q | **Continuous Yield**. Used in the Merton model, this is the continuous yield of the underlying security. Option holders are typically not paid dividends or other payments until they exercise the option. As a result, this factor decreases the value of an option. |>| r | **Risk Free Rate**. This is expected return on a risk-free investment. This is commonly a approximated by the yield on a low-risk government bond or the rate that large banks borrow between themselves (LIBOR). The rate depends on tenor of the cash flow. For example, a 10-year risk-free bond is likely to have a different rate than a 20-year risk-free bond.[DE1] |>| rf | **Foreign Risk Free Rate**. Used in the Garman Kohlhagen model, this is the risk free rate of the foreign currency. Each currency will have a risk free rate. |>*Figure 1 - Generalized Black Scholes Formula* Model ImplementationThese functions encapsulate a generic version of the pricing formulas. They are primarily intended to be called by the other functions within this libary. The following functions will have a fixed interface so that they can be called directly for academic applicaitons that use the cost-of-carry (b) notation: _GBS() A generalized European option model _GBS_ImpliedVol() A generalized European option implied vol calculator The other functions in this libary are called by the four main functions and are not expected to be interface safe (the implementation and interface may change over time). Implementation for European Options ###Code # The primary class for calculating Generalized Black Scholes option prices and deltas # It is not intended to be part of this module's public interface # Inputs: option_type = "p" or "c", fs = price of underlying, x = strike, t = time to expiration, r = risk free rate # b = cost of carry, v = implied volatility # Outputs: value, delta, gamma, theta, vega, rho def _gbs(option_type, fs, x, t, r, b, v): _debug("Debugging Information: _gbs()") # ----------- # Create preliminary calculations t__sqrt = math.sqrt(t) d1 = (math.log(fs / x) + (b + (v * v) / 2) * t) / (v * t__sqrt) d2 = d1 - v * t__sqrt if option_type == "c": # it's a call _debug(" Call Option") value = fs * math.exp((b - r) * t) * norm.cdf(d1) - x * math.exp(-r * t) * norm.cdf(d2) delta = math.exp((b - r) * t) * norm.cdf(d1) gamma = math.exp((b - r) * t) * norm.pdf(d1) / (fs * v * t__sqrt) theta = -(fs * v * math.exp((b - r) * t) * norm.pdf(d1)) / (2 * t__sqrt) - (b - r) * fs * math.exp( (b - r) * t) * norm.cdf(d1) - r * x * math.exp(-r * t) * norm.cdf(d2) vega = math.exp((b - r) * t) * fs * t__sqrt * norm.pdf(d1) rho = x * t * math.exp(-r * t) * norm.cdf(d2) else: # it's a put _debug(" Put Option") value = x * math.exp(-r * t) * norm.cdf(-d2) - (fs * math.exp((b - r) * t) * norm.cdf(-d1)) delta = -math.exp((b - r) * t) * norm.cdf(-d1) gamma = math.exp((b - r) * t) * norm.pdf(d1) / (fs * v * t__sqrt) theta = -(fs * v * math.exp((b - r) * t) * norm.pdf(d1)) / (2 * t__sqrt) + (b - r) * fs * math.exp( (b - r) * t) * norm.cdf(-d1) + r * x * math.exp(-r * t) * norm.cdf(-d2) vega = math.exp((b - r) * t) * fs * t__sqrt * norm.pdf(d1) rho = -x * t * math.exp(-r * t) * norm.cdf(-d2) _debug(" d1= {0}\n d2 = {1}".format(d1, d2)) _debug(" delta = {0}\n gamma = {1}\n theta = {2}\n vega = {3}\n rho={4}".format(delta, gamma, theta, vega, rho)) return value, delta, gamma, theta, vega, rho ###Output _____no_output_____ ###Markdown Implementation: Implied VolatilityThis section implements implied volatility calculations. It contains implementation of a **Newton-Raphson Search.** This is a fast implied volatility search that can be used when there is a reliable estimate of Vega (i.e., European options) ###Code # ---------- # Find the Implied Volatility of an European (GBS) Option given a price # using Newton-Raphson method for greater speed since Vega is available #def _gbs_implied_vol(option_type, fs, x, t, r, b, cp, precision=.00001, max_steps=100): # return _newton_implied_vol(_gbs, option_type, x, fs, t, b, r, cp, precision, max_steps) ###Output _____no_output_____ ###Markdown Public Interface for valuation functionsThis section encapsulates the functions that user will call to value certain options. These function primarily figure out the cost-of-carry term (b) and then call the generic version of the function (like _GBS() or _American). All of these functions return an array containg the premium and the greeks. ###Code # --------------------------- # Black Scholes: stock Options (no dividend yield) # Inputs: # option_type = "p" or "c" # fs = price of underlying # x = strike # t = time to expiration # v = implied volatility # r = risk free rate # q = dividend payment # b = cost of carry # Outputs: # value = price of the option # delta = first derivative of value with respect to price of underlying # gamma = second derivative of value w.r.t price of underlying # theta = first derivative of value w.r.t. time to expiration # vega = first derivative of value w.r.t. implied volatility # rho = first derivative of value w.r.t. risk free rates def BlackScholes(option_type, fs, x, t, r, v): b = r return _gbs(option_type, fs, x, t, r, b, v) ###Output _____no_output_____ ###Markdown Public Interface for implied Volatility Functions ###Code # Inputs: # option_type = "p" or "c" # fs = price of underlying # x = strike # t = time to expiration # v = implied volatility # r = risk free rate # q = dividend payment # b = cost of carry # Outputs: # value = price of the option # delta = first derivative of value with respect to price of underlying # gamma = second derivative of value w.r.t price of underlying # theta = first derivative of value w.r.t. time to expiration # vega = first derivative of value w.r.t. implied volatility # rho = first derivative of value w.r.t. risk free rates #def euro_implied_vol(option_type, fs, x, t, r, q, cp): # b = r - q # return _gbs_implied_vol(option_type, fs, x, t, r, b, cp) ###Output _____no_output_____ ###Markdown Implementation: Helper FunctionsThese functions aren't part of the main code but serve as utility function mostly used for debugging ###Code # --------------------------- # Helper Function for Debugging # Prints a message if running code from this module and _DEBUG is set to true # otherwise, do nothing # Developer can toggle _DEBUG to True for more messages # normally this is set to False _DEBUG = False def _debug(debug_input): if (__name__ is "__main__") and (_DEBUG is True): print(debug_input) ###Output _____no_output_____ ###Markdown Real Calculations of Options Prices ###Code bs = BlackScholes('c', fs=60, x=65, t=0.25, r=0.08, v=0.30) optionPrice = bs[0] optionPrice ###Output _____no_output_____ ###Markdown Option prices charts ###Code stockPrices = np.arange(50, 100, 1) prices = stockPrices * 0 stockPrice = 60 strike = 65 timeToExpiration = 0.25 impliedVolatility = 0.30 riskFreeRate = 0.05 pl.title('Stock Option Price') for i in range(len(stockPrices)): prices[i] = BlackScholes('c', stockPrices[i], strike, t = timeToExpiration, r = riskFreeRate, v = impliedVolatility)[0] pl.plot(stockPrices, prices, label = 'Option Price') pl.xlabel("Stock Price") pl.ylabel("Option Price") pl.grid(True) pl.show() timeToExpiration = np.arange(0.1, 1, 0.05) prices = timeToExpiration * 0 stockPrice = 60 strike = 65 #timeToExpiration = 0.25 impliedVolatility = 0.30 riskFreeRate = 0.05 pl.title('Stock Option Price') for i in range(len(prices)): prices[i] = BlackScholes('c', stockPrice, strike, t = timeToExpiration[i], r = riskFreeRate, v = impliedVolatility)[0] pl.plot(timeToExpiration, prices, label = 'Option Price') pl.xlabel("Time to Expiry") pl.ylabel("Option Price") pl.grid(True) pl.show() strikes = np.arange(50, 80, 1) prices = strikes * 0 stockPrice = 60 strike = 65 timeToExpiration = 0.25 impliedVolatility = 0.30 riskFreeRate = 0.05 pl.title('Stock Option Price') for i in range(len(prices)): prices[i] = BlackScholes('c', stockPrice, strikes[i], t = timeToExpiration, r = riskFreeRate, v = impliedVolatility)[0] pl.plot(strikes, prices, label = 'Option Price') pl.xlabel("Striking Price") pl.ylabel("Option Price") pl.grid(True) pl.show() strikes = np.arange(50, 80, 1) prices = strikes * 0 stockPrice = 60 strike = 65 timeToExpiration = 0.25 impliedVolatility = 0.30 riskFreeRate = 0.05 pl.title('Stock Put Option Price') for i in range(len(prices)): prices[i] = BlackScholes('p', stockPrice, strikes[i], t = timeToExpiration, r = riskFreeRate, v = impliedVolatility)[0] pl.plot(strikes, prices, label = 'Option Price') pl.xlabel("Striking Price") pl.ylabel("Option Price") pl.grid(True) pl.show() ###Output _____no_output_____
tips/.ipynb_checkpoints/RealNVP_tutorial-checkpoint.ipynb
###Markdown RealNVP RealNVPNICEを先行研究に、より高性能な Flow base Model を提案した。Coupling Layer と呼ばれる Flow で主題となるヤコビアンの計算できる関数で、これを提案したことが 1st Contribution. Coupling LayerCoupling Layer は次のような変換を行う。 Forward\begin{eqnarray*}x_{1:d}, x_{d+1:D} &=& split(x) \\y_{1:d}&=&x_{1:d} \\y_{d+1:D}&=&s \odot x_{d+1:D} + t \\y &=& concat(y_{1:d}, y_{d+1:D}) \\where && \log{s}, t = NN(x_{1:d}) \\\end{eqnarray*} Inverse\begin{eqnarray*}y_{1:d}, y_{d+1:D} &=& split(y) \\x_{1:d}&=&y_{1:d} \\x_{d+1:D}&=& (y_{d+1:D} - t) / s \\x &=& concat(x_{1:d},x_{d+1:D}) \\where && \log{s}, t = NN(y_{1:d}) \\\end{eqnarray*} Forward Jacobianまずヤコビ行列を求める。\begin{eqnarray*}\frac{\partial y}{\partial x} = \begin{bmatrix}\frac{\partial y_{1:d}}{\partial x_{1:d}} & \frac{\partial y_{1:d}}{\partial x_{d+1:D}} \\\frac{\partial y_{d+1:D}}{\partial x_{1:d}} & \frac{\partial y_{d+1:D}}{\partial x_{d+1:D}}\end{bmatrix}=\begin{bmatrix}\mathbb{I}_d & 0 \\\frac{\partial y_{d+1:D}}{\partial x_{1:d}} & diag(s)\end{bmatrix}\end{eqnarray*}ヤコビアンはヤコビ行列の行列式である。三角行列の行列式は対角成分の積であるから、\begin{eqnarray*}det|\cfrac{dy}{dx}| = \Pi{s}\end{eqnarray*}ヤコビアンは一般に絶対値を付けられる値で、計算の都合上これを対数で表現すると\begin{eqnarray*}\log{|det|\cfrac{dy}{dx}||} = \Sigma{|s|}\end{eqnarray*} Inverse Jacobian同様にヤコビ行列を求める。\begin{eqnarray*}\frac{\partial y}{\partial x} = \begin{bmatrix}\frac{\partial x_{1:d}}{\partial y_{1:d}} & \frac{\partial x_{1:d}}{\partial y_{d+1:D}} \\\frac{\partial x_{d+1:D}}{\partial y_{1:d}} & \frac{\partial x_{d+1:D}}{\partial y_{d+1:D}}\end{bmatrix}=\begin{bmatrix}\mathbb{I}_d & 0 \\\frac{\partial x_{d+1:D}}{\partial y_{1:d}} & diag(\frac{y_{1:d}}{\log{s}})\end{bmatrix}\end{eqnarray*}同様に\begin{eqnarray*}\log{|det|\cfrac{dx}{dy}||} = - \Sigma{|s|}\end{eqnarray*} 問題設定2次元の Double Moon 分布から 2次元の多変量標準正規分布(2変量標準正規分布) との変換を行います。ref [PyTorchでRealNVP](https://qiita.com/cross32768/items/87036cc35c5367050b04) Tensorflow を用いて解く ###Code import numpy as np import matplotlib.pyplot as plt import tensorflow as tf import tensorflow_probability as tfp tfd = tfp.distributions tfb = tfp.bijectors print('tensorflow: ', tf.__version__) print('tensorflow-probability: ', tfp.__version__) ###Output tensorflow: 2.0.0-rc0 tensorflow-probability: 0.8.0-rc0 ###Markdown 2変量正規分布の作成 ###Code mvn = tfd.MultivariateNormalDiag(loc=[0., 0.], scale_diag=[1., 1.]) mvn_samples = mvn.sample(5000) plt.figure(figsize=(5,5)) plt.xlim([-4, 4]) plt.ylim([-4, 4]) plt.scatter(mvn_samples[:, 0], mvn_samples[:, 1], s=15) ###Output _____no_output_____ ###Markdown Double Moon 分布の作成 ###Code def gen_double_moon_samples(num_samples): assert num_samples % 2 == 0, "[Requirement] num_samples % 2 == 0" x1_1 = tfd.Normal(loc=4.0, scale=4.0) x1_1_samples = x1_1.sample(num_samples // 2) x1_2 = tfd.Normal( loc=0.25 * (x1_1_samples - 4) ** 2 - 20, scale=tf.ones_like(num_samples / 2) * 2 ) x1_2_samples = x1_2.sample() x2_1 = tfd.Normal(loc=4.0, scale=4.0) x2_1_samples = x2_1.sample(num_samples // 2) x2_2 = tfd.Normal( loc=-0.25 * (x2_1_samples - 4) ** 2 + 20, scale=tf.ones_like(num_samples / 2) * 2, ) x2_2_samples = x2_2.sample() x1_samples = tf.stack([x1_1_samples * 0.2, x1_2_samples * 0.1], axis=1) x2_samples = tf.stack([x2_1_samples * 0.2 - 2, x2_2_samples * 0.1], axis=1) x_samples = tf.concat([x1_samples, x2_samples], axis=0) return x_samples base_samples = gen_double_moon_samples(50000) base_samples = tf.random.shuffle(base_samples) plt.figure(figsize=(5, 5)) plt.xlim([-4, 4]) plt.ylim([-4, 4]) plt.scatter(base_samples[:, 0], base_samples[:, 1], s=15) ###Output _____no_output_____ ###Markdown データセットの作成 ###Code SHUFFLE_BUFFER_SIZE = 10000 BATCH_SIZE = 10000 train_dataset = ( tf.data.Dataset.from_tensor_slices(base_samples) .shuffle(SHUFFLE_BUFFER_SIZE) .batch(BATCH_SIZE) ) for i in train_dataset.take(1): print('data samples: ', len(i)) plt.figure(figsize=(5, 5)) plt.xlim([-4, 4]) plt.ylim([-4, 4]) plt.scatter(i[:, 0], i[:, 1], s=15) ###Output data samples: 10000 ###Markdown NN レイヤーの作成 ###Code from tensorflow.keras.layers import Layer, Dense, BatchNormalization, ReLU from tensorflow.keras import Model class NN(Layer): def __init__(self, input_shape, n_hidden=[512, 512], activation="relu", name="nn"): super(NN, self).__init__(name="nn") layer_list = [] for i, hidden in enumerate(n_hidden): layer_list.append(Dense(hidden, activation=activation, name='dense_{}_1'.format(i))) layer_list.append(Dense(hidden, activation=activation, name='dense_{}_2'.format(i))) self.layer_list = layer_list self.log_s_layer = Dense(input_shape, activation="tanh", name='log_s') self.t_layer = Dense(input_shape, name='t') def call(self, x): y = x for layer in self.layer_list: y = layer(y) log_s = self.log_s_layer(y) t = self.t_layer(y) return log_s, t def nn_test(): nn = NN(1, [512, 512]) x = tf.keras.Input([1]) log_s, t = nn(x) # Non trainable params: -> Batch Normalization's params tf.keras.Model(x, [log_s, t], name="nn_test").summary() nn_test() nn = NN(1, [512, 512]) x = tf.random.normal([100, 1]) with tf.GradientTape() as t: t.watch(x) y = nn(x) loss = - tf.reduce_mean(y - x) optimizer = tf.optimizers.Adam(learning_rate=0.0001) grads = t.gradient(loss, nn.trainable_variables) optimizer.apply_gradients(zip(grads, nn.trainable_variables)) print(loss) ###Output tf.Tensor(0.097440206, shape=(), dtype=float32) ###Markdown RealNVP レイヤーの作成 ###Code class RealNVP(tfb.Bijector): def __init__( self, input_shape, n_hidden=[512, 512], # this bijector do vector wise quantities. forward_min_event_ndims=1, validate_args: bool = False, name="real_nvp", ): """ Args: input_shape: input_shape, ex. [28, 28, 3] (image) [2] (x-y vector) """ super(RealNVP, self).__init__( validate_args=validate_args, forward_min_event_ndims=forward_min_event_ndims, name=name ) assert input_shape[-1] % 2 == 0 self.input_shape = input_shape nn_layer = NN(input_shape[-1] // 2, n_hidden) nn_input_shape = input_shape.copy() nn_input_shape[-1] = input_shape[-1] // 2 x = tf.keras.Input(nn_input_shape) log_s, t = nn_layer(x) self.nn = Model(x, [log_s, t], name="nn") def _forward(self, x): x_a, x_b = tf.split(x, 2, axis=-1) y_b = x_b log_s, t = self.nn(x_b) s = tf.exp(log_s) y_a = s * x_a + t y = tf.concat([y_a, y_b], axis=-1) return y def _inverse(self, y): y_a, y_b = tf.split(y, 2, axis=-1) x_b = y_b log_s, t = self.nn(y_b) s = tf.exp(log_s) x_a = (y_a - t) / s x = tf.concat([x_a, x_b], axis=-1) return x def _forward_log_det_jacobian(self, x): _, x_b = tf.split(x, 2, axis=-1) log_s, t = self.nn(x_b) return log_s def realnvp_test(): realnvp = RealNVP(input_shape=[2], n_hidden=[512, 512]) x = tf.keras.Input([2]) y = realnvp.forward(x) print('trainable_variables :', len(realnvp.trainable_variables)) Model(x, y, name="realnvp_test").summary() realnvp_test() ###Output trainable_variables : 12 Model: "realnvp_test" __________________________________________________________________________________________________ Layer (type) Output Shape Param # Connected to ================================================================================================== input_9 (InputLayer) [(None, 2)] 0 __________________________________________________________________________________________________ tf_op_layer_split_1 (TensorFlow [(None, 1), (None, 1 0 input_9[0][0] __________________________________________________________________________________________________ nn (Model) [(None, 1), (None, 1 790018 tf_op_layer_split_1[0][1] __________________________________________________________________________________________________ tf_op_layer_Exp_1 (TensorFlowOp [(None, 1)] 0 nn[1][0] __________________________________________________________________________________________________ tf_op_layer_mul_1 (TensorFlowOp [(None, 1)] 0 tf_op_layer_Exp_1[0][0] tf_op_layer_split_1[0][0] __________________________________________________________________________________________________ tf_op_layer_add_1 (TensorFlowOp [(None, 1)] 0 tf_op_layer_mul_1[0][0] nn[1][1] __________________________________________________________________________________________________ tf_op_layer_concat_1 (TensorFlo [(None, 2)] 0 tf_op_layer_add_1[0][0] tf_op_layer_split_1[0][1] ================================================================================================== Total params: 790,018 Trainable params: 790,018 Non-trainable params: 0 __________________________________________________________________________________________________ ###Markdown TransformedDistribution の作成 ###Code num_realnvp = 4 bijector_chain = [] for i in range(num_realnvp): bijector_chain.append(RealNVP(input_shape=[2], n_hidden=[256, 256])) bijector_chain.append(tfp.bijectors.Permute([1, 0])) flow = tfd.TransformedDistribution( distribution=mvn, bijector=tfb.Chain(list(reversed(bijector_chain))) ) print('trainable_variables: ', len(flow.bijector.trainable_variables)) ###Output trainable_variables: 48 ###Markdown 学習前のサンプリング結果 ###Code samples = flow.sample(10000) plt.figure(figsize=(5, 5)) plt.xlim([-4, 4]) plt.ylim([-4, 4]) plt.scatter(samples[:, 0], samples[:, 1], s=15) for targets in train_dataset.take(1): targets = targets #print(flow.bijector.inverse(targets)) print(tf.reduce_sum(flow.bijector.inverse_log_det_jacobian(targets, event_ndims=1))) res = flow.bijector.inverse(targets) print(tf.reduce_mean(flow.log_prob(res))) print(flow.log_prob(res).shape) targets.shape plt.scatter(targets[:,0], targets[:,1]) plt.scatter(res[:, 0], res[:,1]) ###Output tf.Tensor(-35.19041, shape=(), dtype=float32) tf.Tensor(-4.382125, shape=(), dtype=float32) (10000, 10000) ###Markdown 学習 ###Code !rm -r checkpoints @tf.function def loss(targets): return - tf.reduce_mean(flow.log_prob(targets)) optimizer = tf.optimizers.Adam(learning_rate=0.001) log = tf.summary.create_file_writer('checkpoints') avg_loss = tf.keras.metrics.Mean(name='loss', dtype=tf.float32) n_epochs = 200 for epoch in range(n_epochs): for targets in train_dataset: with tf.GradientTape() as tape: log_prob_loss = loss(targets) grads = tape.gradient(log_prob_loss, flow.trainable_variables) optimizer.apply_gradients(zip(grads, flow.trainable_variables)) avg_loss.update_state(log_prob_loss) if tf.equal(optimizer.iterations % 1000, 0): with log.as_default(): tf.summary.scalar("loss", avg_loss.result(), step=optimizer.iterations) print( "Step {} Loss {:.6f}".format( optimizer.iterations, avg_loss.result() ) ) avg_loss.reset_states() ###Output Step <tf.Variable 'Adam/iter:0' shape=() dtype=int64, numpy=1000> Loss 1.837895 ###Markdown 推論 順方向 ###Code base = flow.distribution.sample(10000) targets = flow.sample(10000) plt.scatter(base[:, 0], base[:, 1], s=15) plt.scatter(targets[:, 0], targets[:, 1], s=15) ###Output _____no_output_____ ###Markdown 逆方向 ###Code targets = gen_double_moon_samples(10000) base = flow.bijector.inverse(targets) targets.shape plt.scatter(base[:, 0], base[:,1], s=15) plt.scatter(targets[:,0], targets[:,1], s=15) ###Output _____no_output_____
doc2vec_training.ipynb
###Markdown Doc2vec TrainingInstall packages to environment. ###Code !pip install -U -q PyDrive !pip install gensim # For Gensim !pip install testfixtures !pip install scikit-learn ###Output _____no_output_____ ###Markdown Run the following snippet to load article json files from Google Drive. Utility function save_to_drive can be used to save files to Google Drive. Useful for training models in Google Collab. ###Code # from pydrive.auth import GoogleAuth # from pydrive.drive import GoogleDrive # from google.colab import auth # from oauth2client.client import GoogleCredentials # from google.colab import auth # import io # from googleapiclient.http import MediaIoBaseDownload # from googleapiclient.http import MediaFileUpload # # auth.authenticate_user() # from googleapiclient.discovery import build # drive_service = build('drive', 'v3') # # # # files_to_load = list() # # files_to_load.append({ # "file_name": "all_articles.json", # "id": "19ErkUdKHwJO46T3u_LnoUhU-Ol4Om90W", # "is_binary": 0 # }) # # # def download_from_drive(file_id): # # request = drive_service.files().get_media(fileId=file_id) # downloaded = io.BytesIO() # downloader = MediaIoBaseDownload(downloaded, request) # done = False # while done is False: # # _ is a placeholder for a progress object that we ignore. # # (Our file is small, so we skip reporting progress.) # _, done = downloader.next_chunk() # # downloaded.seek(0) # read = downloaded.read() # return read # # # def save_to_drive(filename): # # file_metadata = { # 'name': filename, # 'mimeType': 'text/plain' # } # media = MediaFileUpload(filename, # mimetype='text/plain', # resumable=True) # created = drive_service.files().create(body=file_metadata, # media_body=media, # fields='id').execute() # print('File ID: {}'.format(created.get('id'))) # # # Download all the Google Drive files # for file in files_to_load: # print(file) # # # load document # doc = download_from_drive(file["id"]) # # text_file = str() # if file["is_binary"]: # text_file = open(file["file_name"], "wb") # # else: # doc = doc.decode("utf-8") # text_file = open(file["file_name"], "w") # # text_file.write(doc) # text_file.close() # # print("loaded: " + file["file_name"]) print("Files loaded") ###Output _____no_output_____ ###Markdown Finding optimal Doc2vec model for IMDB article sentiment predictionDownload the [Large Movie Review Dataset](http://ai.stanford.edu/~amaas/data/sentiment/).Load dataset of articles. ###Code from gensim.utils import to_unicode from nltk.tokenize import word_tokenize import collections import tarfile import re import random from random import shuffle random.seed(10) number_of_articles = 100000 SentimentDocument = collections.namedtuple('SentimentDocument', 'words tags split sentiment') def create_sentiment_document(name, text, index): # Split the name of a movie review file into train/test, and +/- sentiment _, split, sentiment_str, _ = name.split('/') sentiment = {'pos': 1.0, 'neg': 0.0, 'unsup': None}[sentiment_str] if sentiment is None: split = 'extra' tokens = word_tokenize(to_unicode(text)) return SentimentDocument(tokens, [index], split, sentiment) def extract_documents(imdb_file): index = 0 with tarfile.open(imdb_file, mode='r:gz') as tar: for member in tar.getmembers(): if re.match(r'aclImdb/(train|test)/(pos|neg|unsup)/\d+_\d+.txt$', member.name): member_bytes = tar.extractfile(member).read() member_text = member_bytes.decode('utf-8', errors='replace') assert member_text.count('\n') == 0 yield create_sentiment_document(member.name, member_text, index) index += 1 print("Loading documents ...") alldocs = list(extract_documents('aclImdb_v1.tar.gz')) shuffle(alldocs) alldocs = alldocs[:number_of_articles] print(f"Total docs {len(alldocs)}") ###Output _____no_output_____ ###Markdown Based on the [“Distributed Representations of Sentences and Documents”](http://cs.stanford.edu/~quocle/paragraph_vector.pdf) paper [Radim Hurek's](https://radimrehurek.com/gensim/auto_examples/howtos/run_doc2vec_imdb.html) reproduction of the experiment the `Doc2Vec(dbow,d100,n5,mc2,t8` model produces the lowest error rate in sentiment classification (10.3%).This model is a concatenation of the Distributed Bag of Words model model and the DM/mean model.To analyze whether further preprocessing of the text improves the quality of the embeddings we will train a model model for Text that went through a preprocessing pipeline containing:- Frequency based summarization, lematization, stopword removal, and contraction expansion- Lematization, stopword removal- Contraction expansion, stopword removal- Raw textTo evaluate the quality of the embeddings, we will check the accuracy of the sentiment analysis model trained to predict the sentiment based on the document embedding.Define all of the preproccesing functions. ###Code from string import punctuation import nltk from nltk.tokenize import word_tokenize from nltk.stem import WordNetLemmatizer from nltk.corpus import stopwords nltk.download('stopwords') def substitute_contraction(word): """ Substitutes the contraction with expanded form Substates non-ascii quotes :param word: :return: The substituted contraction or the original token """ # Replace unicode commas punctuation = {0x2018: 0x27, 0x2019: 0x27, 0x201C: 0x22, 0x201D: 0x22} w = word.translate(punctuation) contractions = get_contraction_dict() if w in contractions.keys(): subbed = contractions[w] return subbed else: return w def get_contraction_dict(): return { "ain't": "is not", "aren't": "are not", "can't": "cannot", "can't've": "cannot have", "'cause": "because", "could've": "could have", "couldn't": "could not", "couldn't've": "could not have", "didn't": "did not", "doesn't": "does not", "don't": "do not", "hadn't": "had not", "hadn't've": "had not have", "hasn't": "has not", "haven't": "have not", "he'd": "he would", "he'd've": "he would have", "he'll": "he will", "he'll've": "he will have", "he's": "he has", "how'd": "how did", "how'd'y": "how do you", "how'll": "how will", "how's": "how is", "I'd": "I would", "I'd've": "I would have", "I'll": "I will", "I'll've": "I shall have", "I'm": "I am", "I've": "I have", "isn't": "is not", "it'd": "it would", "it'd've": "it would have", "it'll": "it will", "it'll've": "it shall have", "it's": "it is", "let's": "let us", "ma'am": "madam", "mayn't": "may not", "might've": "might have", "mightn't": "might not", "mightn't've": "might not have", "must've": "must have", "mustn't": "must not", "mustn't've": "must not have", "needn't": "need not", "needn't've": "need not have", "o'clock": "of the clock", "oughtn't": "ought not", "oughtn't've": "ought not have", "shan't": "shall not", "sha'n't": "shall not", "shan't've": "shall not have", "she'd": "she would", "she'd've": "she would have", "she'll": "she will", "she'll've": "she will have", "she's": "she is", "should've": "should have", "shouldn't": "should not", "shouldn't've": "should not have", "so've": "so have", "so's": "so is", "that'd": "that had", "that'd've": "that would have", "that's": "that is", "there'd": "there would", "there'd've": "there would have", "there's": "there is", "they'd": "they would", "they'd've": "they would have", "they'll": "they will", "they'll've": "they will have", "they're": "they are", "they've": "they have", "to've": "to have", "wasn't": "was not", "we'd": "we would", "we'd've": "we would have", "we'll": "we will", "we'll've": "we will have", "we're": "we are", "we've": "we have", "weren't": "were not", "what'll": "what will", "what'll've": "what will have", "what're": "what are", "what's": "what is", "what've": "what have", "when's": "when is", "when've": "when have", "where'd": "where did", "where's": "where is", "where've": "where have", "who'll": "who will", "who'll've": "who will have", "who's": "who is", "who've": "who have", "why's": "why is", "why've": "why have", "will've": "will have", "won't": "will not", "won't've": "will not have", "would've": "would have", "wouldn't": "would not", "wouldn't've": "would not have", "y'all": "you all", "y'all'd": "you all would", "y'all'd've": "you all would have", "y'all're": "you all are", "y'all've": "you all have", "you'd": "you would", "you'd've": "you would have", "you'll": "you will", "you'll've": "you will have", "you're": "you are", "you've": "you have" } class LanguageProcessor: def __init__(self): """ All of the natural processing functionality """ self.lemmatizer = WordNetLemmatizer() self.punctuation_list = [c for c in punctuation] # TODO: make this a set self.STOPWORDS = stopwords.words() # TODO: Get more complete list and read it from file MORESTOP = ['will', 'thing', 'n\'t', '\'\'', '\'s', '``', '\'re', '\'', 'mr', 'mr.', '--', '...', '..', '->', '\'.', '\' \'', ' .', '’', '“', '”', "", "\n"] self.STOPWORDS.extend(MORESTOP) def substitute_contractions(self, words): """ Loop through words and sub contractions :param text: :return: """ subbed = [] for word in words: subbed.append(substitute_contraction(word)) return subbed def get_non_stopwords(self, words, substitute_contractions=True, stem=True): """ Returns a list of lowercase non-stopwords in the text. non-stopwords are anything that is not punctuation or stopwords Numerical values are NOT FILTERED OUT :param text: :param stem: :return: """ if substitute_contractions: words = self.substitute_contractions(words) non_stop_words = [] # Loop through tokens for word in words: # Slowing things down token = self.remove_punctuation(word.lower()) if token not in self.STOPWORDS: # Check if token contains punctuation if token not in self.punctuation_list: if stem: non_stop_words.append(self.get_word_lemma(token)) else: non_stop_words.append(token) return non_stop_words def get_word_lemma(self, word): """ Helper to allows customization to stemming process, like checking for trailing e's :param word: :return: """ lema = self.lemmatizer.lemmatize(word) return lema def remove_punctuation(self, text): """ Helper function to remove all non-acsii charcters :param text: :return: """ return ''.join([i if ord(i) < 128 else '' for i in text]) def is_text_token(self, token): """ Checks if not punc or numerical, or non-acsii :param token: :return: """ if len(token) == 1: if ord(token) < 128 and token not in punctuation and not token.isdigit(): return True else: return False else: if not token.isdigit(): return True else: return False # Perform each type of preprocesing corpora = ["lem+stop+con", "stop+con", "stop", "none"] processed_texts = {cor:[] for cor in corpora} processor = LanguageProcessor() # # Reload processed text # import pickle # # Loading files from pickle # processed_texts = pickle.load(open("processed_text.pkl", "rb")) ###Output [nltk_data] Downloading package stopwords to [nltk_data] /Users/milanarezina/nltk_data... [nltk_data] Package stopwords is already up-to-date! ###Markdown Prepare the documents for each type of preprocessing.- lem = lematization- stop = stopword removal- con = expanded contractions ###Code print("Pre-processing Text...") print("Processing lem+stop+con") for doc in alldocs: words = processor.get_non_stopwords(doc.words, substitute_contractions=True, stem=True) doc2 = SentimentDocument(words, doc.tags, doc.split, doc.sentiment) processed_texts["lem+stop+con"].append(doc2) print("Processing stop+con") for doc in alldocs: words = processor.get_non_stopwords(doc.words, substitute_contractions=True, stem=False) doc2 = SentimentDocument(words, doc.tags, doc.split, doc.sentiment) processed_texts["stop+con"].append(doc2) print("Processing stop") for doc in alldocs: words = processor.get_non_stopwords(doc.words, substitute_contractions=False, stem=False) doc2 = SentimentDocument(words, doc.tags, doc.split, doc.sentiment) processed_texts["stop"].append(doc2) processed_texts["none"] = alldocs print("Completed Pre-processing") ###Output _____no_output_____ ###Markdown Setup for evaluating models. ###Code from sklearn.linear_model import LogisticRegression import numpy as np from gensim.models.doc2vec import Doc2Vec from gensim.test.test_doc2vec import ConcatenatedDoc2Vec import multiprocessing # Keep track of the error rates for each model error_rates = {} def logistic_regression_predictor(X, y): """ Return the predictor after fitting a model on embeddings and sentiment class :param X: Embeddings :param y: Sentiment class :return: """ clf = LogisticRegression(random_state=0, verbose=True).fit(X, y) return clf def model_error_rate(doc2vec_model, train, test): """ Test error rate of regression model that uses the doc2vec embeddings to predict sentiment class :param doc2vec_model: :param train: :param test: :return: """ train_y = [doc.sentiment for doc in train_docs] train_x = [doc2vec_model.docvecs[doc.tags[0]] for doc in train_docs] test_x = [doc2vec_model.docvecs[doc.tags[0]] for doc in test_docs] test_y = [doc.sentiment for doc in test] print("Sample Data", train_x[:1], train_y[:1]) print(f"""Train / test data breakdown: Train positive sentiment samples {train_y.count(1.0)} out of {len(train_y)} Test positive sentiment samples {test_y.count(1.0)} out of {len(test_y)}""") predictor = logistic_regression_predictor(train_x, train_y) test_predictions = predictor.predict(test_x) corrects = sum(np.rint(test_predictions) == test_y) errors = len(test_predictions) - corrects error_rate = float(errors) / len(test_predictions) return error_rate, errors, len(test_predictions), predictor # Common Doc2vec configuration common_kwargs = dict( vector_size=100, epochs=20, min_count=2, sample=0, workers=multiprocessing.cpu_count(), negative=5, hs=0, ) # Doc2vec models for each type of preproccesing models_by_corpora = {} ###Output _____no_output_____ ###Markdown Train and evalute a PV-DBOW (paragraph vector distributed bag of words) model for each of the types of preprocessing. ###Code for model in corpora: # TODO: Concatenated doc2vec model perform slightly better, however it is missing # tags property in it's implementation. # 2nd best model used models_by_corpora[model] = Doc2Vec(dm=0, **common_kwargs) print("Training Doc2Vec models ...") # Evaluate Doc2vec for each type of preproccesing for corpus in corpora: model = models_by_corpora[corpus] docs = processed_texts[corpus] print("-"*20) print(f"Training model on: {corpus} documents ...") # Split into train / test sets train_docs = [doc for doc in docs if doc.split == 'train'] test_docs = [doc for doc in docs if doc.split == 'test'] model.build_vocab(docs) model.train(docs, total_examples=len(docs), epochs=model.epochs) err_rate, err_count, test_count, predictor = model_error_rate(model, train_docs, test_docs) error_rates[str(model)] = err_rate print(f"Error rate: {err_rate} for model {str(model)} trained on {corpus}") ###Output Training Doc2Vec models ... -------------------- Model trained on: lem+stop+con Sample Data [array([ 0.19048014, 0.18574472, 0.19675452, 0.34363356, -0.05429327, -0.34967002, -0.9629098 , 0.5046224 , 0.3553929 , -0.02000841, 0.42241308, 0.31711754, -0.1987363 , -0.11315008, -0.06220058, -0.0768768 , 0.22785848, 0.2949146 , -0.39035028, 0.26064464, -0.11486331, 0.13627096, 0.38031614, 0.03657763, 0.4365639 , -0.5859031 , -0.7461953 , 0.551983 , -0.7311374 , 0.46559998, -0.38593325, -0.44615978, -0.30371758, -0.05886084, 0.11023851, -0.15133354, 0.37857378, -0.35254526, -0.05427878, -0.0111264 , -0.5489169 , 0.41485846, 0.50775194, 0.36012176, -0.171678 , 0.24442673, -0.64688766, -0.28453165, -0.38347286, 0.4489134 , -0.25286022, 0.00631496, 0.04119195, -0.15534724, 0.68830603, -0.23483275, 0.14449677, -0.4099465 , 0.12440899, 0.09973439, -0.0247636 , 0.02926677, -0.54103404, 0.4465653 , -0.11672283, 0.64713603, -0.64009285, -0.18964896, 0.1096048 , -0.38112378, 0.26582056, 0.0032023 , -0.12880178, 0.65239453, 0.16594951, -0.16200961, -0.60751504, -0.16466144, 0.23449329, 0.22097771, 0.4549512 , -0.19687998, -0.09566788, -0.24440518, 0.73698527, 0.81102914, -0.0922519 , 0.53137237, 0.67719966, -0.64504015, 0.19332719, 0.03491812, -0.28868482, 0.43951693, -0.5848366 , 0.28153417, 0.11109039, 0.36863866, 0.01903613, -0.09364004], dtype=float32)] [1.0] Train / test data breakdown: Train positive sentiment samples 0 out of 25000 Test positive sentiment samples 0 out of 25000 ###Markdown For the PV-DBOW model no performing any preproccesing produces the highest accuracy.Train and evalute a PV-DM (paragraph vector distributed memory) model for each of the types of preprocessing. ###Code for model in corpora: # TODO: Concatenated doc2vec model perform slightly better, however it is missing # tags property in it's implementation. # 2nd best model used models_by_corpora[model + "+dm"] = Doc2Vec(dm=1, **common_kwargs) print("Training Doc2Vec models ...") # Evaluate Doc2vec for each type of preproccesing for corpus in corpora: model = models_by_corpora[corpus + "+dm"] docs = processed_texts[corpus] print("-"*20) print(f"Training model on: {corpus} documents ...") # Split into train / test sets train_docs = [doc for doc in docs if doc.split == 'train'] test_docs = [doc for doc in docs if doc.split == 'test'] model.build_vocab(docs) model.train(docs, total_examples=len(docs), epochs=model.epochs) err_rate, err_count, test_count, predictor = model_error_rate(model, train_docs, test_docs) error_rates[str(model)] = err_rate print(f"Error rate: {err_rate} for model {str(model)} trained on {corpus}") ###Output Training Doc2Vec models ... -------------------- Training model on: lem+stop+con documents ... Sample Data [array([ 4.8964587e-01, -5.8797348e-02, -2.6077956e-01, 4.5133162e-01, -1.5007243e-01, 3.4983036e-01, -1.4829528e-01, 1.1516626e+00, -1.4036475e-01, -4.4391423e-01, 2.6714194e-01, 2.4020796e-01, 1.8720028e-01, -3.7481549e-01, 1.1214490e+00, 4.3689597e-01, -6.3363835e-02, -9.4921246e-02, -2.2934680e-01, -3.0742434e-01, 1.7611554e-01, -1.1198944e-03, -1.1979154e-01, -1.4307345e-01, 3.8967717e-01, 7.8051817e-01, 4.0083644e-01, 3.1800258e-01, 7.9142708e-01, -1.3788615e-01, -6.3872203e-02, -1.8768595e-01, -3.5029519e-01, -3.0670562e-01, -4.6106091e-01, 5.6179827e-01, 2.3946853e-01, 4.8041072e-01, -4.1785136e-01, 2.2026943e-01, 9.6610361e-01, -4.8661163e-01, -2.3579098e-01, -7.3455429e-01, 1.0253838e+00, 5.8691138e-01, 6.9736248e-01, -9.4024286e-02, -6.7753464e-01, 8.4717132e-02, -3.2288906e-01, -7.3024648e-01, 3.4105018e-01, 1.7702815e-01, 3.6370891e-01, -3.6651492e-01, 6.7028689e-01, -6.2619513e-01, 6.6040194e-01, -2.7155226e-01, -4.5966882e-01, -2.7968913e-01, 7.3509771e-01, 1.1075566e+00, -1.0244410e+00, -8.6179145e-02, 3.3973897e-01, -4.9158314e-01, -1.1459063e+00, -8.8120483e-02, 3.0649221e-01, 2.3029993e-01, 6.6662967e-01, -1.8320113e-02, -5.7732028e-01, 8.4852487e-01, -2.4082957e-01, 1.8521760e-01, -1.3982904e-01, 4.1279918e-01, 8.7412250e-01, -1.5827331e-01, -4.5995748e-01, -1.0220734e+00, -5.9550828e-01, -5.1611763e-01, -1.5298973e-01, -1.1561557e+00, -2.3052068e-01, 3.2421118e-01, 8.4572953e-01, 8.6495228e-02, 1.7998239e-01, -5.6005919e-01, 3.4209199e-02, -3.1384557e-01, 6.2168914e-01, -3.9444017e-01, 1.3564380e-01, 6.4791024e-01], dtype=float32)] [1.0] Train / test data breakdown: Train positive sentiment samples 12500 out of 25000 Test positive sentiment samples 12500 out of 25000
SOSS/PSFs/soss_webbpsf_and_convolution_kernels.ipynb
###Markdown This notebook has 2 purposes: 1) Generates WebbPSF psfs 10x oversampled - At steps of 0.05 microns between 0.5 and 5.2 microns (or user defined wavelengths). - It is for the NIRISS SOSS mode. - Note that the output is in the native detector coordinate system (not DMS) 2) Generate the spectral convolution kernels - At the same wavelengths as the PSFs - At various oversampling of pixels (1 to 10 oversampling) 1) Generate WebbPSF PSFsRun this Jupyter notebook - or call the function in another script ###Code import soss_generate_webbpsf as sosspsf # Call the PSF generation function to generate a monochromatic PSF at the desired wavelengths. # If no wavelengths are specified, PSFs will be generated for all wavelengths from 0.5-5.2µm. # The PSFs can either be returned directly (e.g. the example below), or written to disk #by toggling the save_to_disk keywork True/False. mono_psf = sosspsf.loicpsf(wavelist=[1e-6], save_to_disk=False) import matplotlib.pyplot as plt import numpy as np # Display the PSF we generated. plt.imshow(np.log10(mono_psf[0])) ###Output _____no_output_____ ###Markdown 2) Generate the spectral convolution kernel ###Code import soss_generate_convolution_kernel as sossker ###Output _____no_output_____ ###Markdown To generate the matrices of kernels that extend to a semi_width of 7 native pixels where the first argument is the output path. You need to create the output directory manually first. ###Code sossker.generate_kernel('/Users/albert/NIRISS/SOSSpipeline/convolution_kernels/', psf_path = '/Users/albert/NIRISS/SOSSpipeline/webbpsf_psfs/', verbose=False,kernel_semi_width=5) ###Output /Users/albert/NIRISS/SOSSpipeline/webbpsf_psfs/ 0 [0. 0.00489702 0.01195705 0.05821312 0.25878236 0.39727762 0.16759176 0.0565195 0.03055499 0.01081957 0.00338701] 1 [0. 0.00286759 0.01176933 0.05726486 0.2549394 0.4353044 0.14714649 0.04229379 0.03504945 0.01091439 0.0024503 ] 2 [0. 0.00735438 0.00959247 0.06321221 0.24406136 0.45299971 0.13494669 0.03666811 0.02953399 0.019807 0.00182407] 3 [0. 0.00946932 0.00809061 0.06446443 0.22412019 0.47876552 0.12197278 0.0423255 0.02308966 0.02246274 0.00523924] 4 [0. 0.00655181 0.0052943 0.06049408 0.21475311 0.50373213 0.11957097 0.04257051 0.02223096 0.01598665 0.00881548] 5 [2.50140514e-04 0.00000000e+00 5.93765092e-03 5.27160288e-02 2.05965614e-01 5.14815143e-01 1.37784168e-01 4.14340613e-02 2.24978021e-02 9.29789495e-03 9.30149691e-03] 6 [0.00234198 0. 0.01033506 0.04552525 0.21122931 0.4993198 0.15345349 0.04283099 0.01879918 0.00827535 0.00788959] 7 [0.00000000e+00 3.13742265e-04 1.30696389e-02 3.70634239e-02 1.86435619e-01 5.09026824e-01 1.83805775e-01 4.66707647e-02 1.27063274e-02 8.51887445e-03 2.38900967e-03] 8 [0.00098729 0.00350165 0.01771111 0.02958606 0.18453101 0.49422513 0.19401439 0.05069405 0.01126732 0.01348198 0. ] 9 [0.00372539 0.00590994 0.01998115 0.02122576 0.19726639 0.47901444 0.19370919 0.05326526 0.01117611 0.01472638 0. ] 10 [0.00201538 0.00557067 0.01929712 0.01187424 0.22236153 0.46526496 0.19782487 0.05265181 0.01257584 0.01056358 0. ] 11 [0. 0.00638105 0.01890212 0.00673037 0.23241511 0.46121005 0.19766817 0.05028628 0.01756016 0.0075543 0.00129239] 12 [0. 0.00744196 0.01891313 0.00561915 0.23207299 0.46388504 0.19707011 0.04642688 0.02077572 0.00582617 0.00196886] 13 [0. 0.00700394 0.01771327 0.0054043 0.23469673 0.47014039 0.19680242 0.0416652 0.01981032 0.00365048 0.00311296] 14 [0. 0.00592157 0.01524692 0.00550511 0.23494351 0.48443342 0.19212649 0.03712469 0.01814394 0.00168204 0.00487232] 15 [0. 0.0058899 0.01233409 0.00630645 0.23197283 0.49863717 0.18563594 0.03375107 0.01757498 0.00230507 0.00559252] 16 [0. 0.00784782 0.00936906 0.00789869 0.2283222 0.50860599 0.17802111 0.03242383 0.0174921 0.00572131 0.0042979 ] 17 [0. 0.01130389 0.0064005 0.009516 0.22674618 0.51675915 0.1674723 0.03340957 0.01764007 0.00894473 0.00180759] 18 [0.00000000e+00 1.46894388e-02 3.63590897e-03 1.02908753e-02 2.29767202e-01 5.21921023e-01 1.55615799e-01 3.59884334e-02 1.79846194e-02 1.01041027e-02 2.59758616e-06] 19 [0.00057453 0.01690478 0.00183155 0.01054726 0.23528346 0.52117884 0.14466676 0.03988766 0.0186307 0.01049444 0. ] 20 [8.12356764e-04 1.64607964e-02 0.00000000e+00 9.96473548e-03 2.41994677e-01 5.22311604e-01 1.35008073e-01 4.45733379e-02 1.85372365e-02 1.01179492e-02 2.19234001e-04] 21 [0.00269603 0.01517329 0. 0.01095355 0.24407689 0.52052169 0.12364966 0.0512747 0.01920317 0.01040803 0.00204298] 22 [0.00432339 0.01239155 0. 0.01215263 0.24085249 0.52862063 0.11030315 0.05971068 0.01913194 0.00918964 0.0033239 ] 23 [0.00564237 0.00892448 0. 0.01336399 0.23397571 0.54341888 0.09679168 0.06950059 0.01802339 0.0066724 0.00368651] 24 [0.00671904 0.00496221 0. 0.01407397 0.23194157 0.55580874 0.08600714 0.07818495 0.01571813 0.00370216 0.00288208] 25 [0.00735257 0.00077883 0. 0.01396687 0.23461449 0.56451857 0.07819931 0.08576314 0.0126288 0.0009574 0.00122002] 26 [0.01009521 0. 0.00308036 0.01581392 0.23213431 0.55563005 0.07349487 0.0934212 0.01202012 0.00195189 0.00235808] 27 [0.01236207 0. 0.00667845 0.01730611 0.2343416 0.53975921 0.07044598 0.09912626 0.01212708 0.00398771 0.00386552] 28 [0.01320835 0. 0.00997792 0.0179282 0.24076919 0.52383789 0.06791361 0.10320256 0.0122842 0.00603885 0.00483924] 29 [0.01270014 0. 0.0129119 0.01798989 0.24777289 0.51103364 0.06586911 0.1061106 0.01244029 0.00804798 0.00512357] 30 [0.01122141 0. 0.01558032 0.01775746 0.25426529 0.5016541 0.06439632 0.10770263 0.01271966 0.00998878 0.00471404] 31 [0.00919996 0. 0.01808997 0.01740833 0.25979179 0.49516133 0.06361257 0.10793112 0.01331237 0.01173663 0.00375593] 32 [0.00696987 0. 0.02052062 0.01703416 0.26431658 0.49066647 0.06355347 0.10694138 0.01441564 0.01307739 0.00250442] 33 [0.00472993 0. 0.02291011 0.01666909 0.2679644 0.48735117 0.06415236 0.10501316 0.01619019 0.01378764 0.00123195] 34 [2.56585453e-03 0.00000000e+00 2.52546010e-02 1.63125191e-02 2.70832420e-01 4.84703222e-01 6.52980091e-02 1.02433397e-01 1.87489485e-02 1.37086748e-02 1.42355341e-04] 35 [0.00114311 0.00066431 0.02798812 0.01648518 0.27174898 0.47961722 0.06703041 0.09933641 0.0226439 0.01334236 0. ] 36 [0.00000000e+00 1.53145619e-03 3.07033057e-02 1.67698935e-02 2.71706283e-01 4.74125655e-01 6.91261650e-02 9.58922961e-02 2.74568512e-02 1.23426169e-02 3.45477632e-04] 37 [0. 0.00347018 0.03389776 0.01784432 0.26901373 0.46465569 0.0717681 0.09207962 0.03365842 0.01162219 0.00199 ] 38 [0. 0.00538125 0.0367586 0.01875643 0.26597639 0.45606573 0.07473283 0.08792048 0.04027391 0.01042095 0.00371342] 39 [0. 0.00729579 0.03926391 0.0195001 0.26265718 0.44828807 0.07811767 0.08334529 0.0471239 0.00898221 0.00542589] 40 [0. 0.00922233 0.0413852 0.02007106 0.25913939 0.44128036 0.08201916 0.07833835 0.05398011 0.00754524 0.00701879] 41 [0. 0.01115448 0.04309832 0.020475 0.25555585 0.43498188 0.08647473 0.07296609 0.06058879 0.00632974 0.00837512] 42 [0. 0.01308094 0.04439073 0.02073272 0.25208689 0.42929764 0.09142605 0.0673768 0.06670522 0.00551854 0.00938446] 43 [0. 0.0149917 0.04526392 0.02087722 0.24891181 0.42411459 0.09673286 0.0617671 0.07213647 0.00524215 0.00996218] 44 [0. 0.01688407 0.04573179 0.02095518 0.24619968 0.4193083 0.10218245 0.05634186 0.07676344 0.00556904 0.01006419] 45 [0. 0.01875981 0.04581711 0.0210171 0.24406124 0.41477155 0.10754917 0.05126813 0.08055407 0.00650892 0.00969291] 46 [0. 0.02084757 0.0454266 0.02137315 0.24474844 0.40970421 0.11085121 0.04723494 0.0831397 0.00779003 0.00888416] 47 [0. 0.02270933 0.04479493 0.02157166 0.24390001 0.40553633 0.11541918 0.04310323 0.08547907 0.00975037 0.00773589] 48 [0. 0.02455854 0.04386011 0.02187553 0.24367021 0.40147087 0.11943023 0.03946323 0.08722045 0.01214485 0.00630599] 49 [0. 0.02639592 0.04263519 0.02231522 0.24408825 0.3974937 0.12276885 0.03628136 0.08846362 0.01488016 0.00467772] 50 [0. 0.02821161 0.04112686 0.022909 0.24514106 0.39362997 0.12539756 0.03348501 0.08930482 0.01786816 0.00292594] 51 [0. 0.03010538 0.03917735 0.02390784 0.24797646 0.38967461 0.12634549 0.03119621 0.08976038 0.02074963 0.00110666] 52 [0.00074142 0.03233039 0.03744823 0.02549716 0.24937681 0.3837353 0.12697001 0.02944652 0.09003261 0.02442157 0. ] 53 [0.00252451 0.03502659 0.03617745 0.02786457 0.24889801 0.37493493 0.12705518 0.02846979 0.09009781 0.02895116 0. ] 54 [0.00422226 0.03746546 0.03464536 0.03028511 0.24900125 0.36691621 0.12670766 0.02752731 0.08993411 0.03329526 0. ] 55 [0.00583571 0.03961367 0.03284703 0.03275049 0.24965583 0.35973489 0.12605842 0.02653608 0.0895437 0.03742418 0. ] 56 [0.00736985 0.04143798 0.03078205 0.03525496 0.25082437 0.35342819 0.12523591 0.02543175 0.08891348 0.04132145 0. ] 57 [0.00882879 0.04290449 0.02845402 0.03779269 0.25246956 0.34802531 0.12436234 0.02416547 0.08801933 0.044978 0. ] 58 [0.01047379 0.04416435 0.02559917 0.04125929 0.25613812 0.34258925 0.12185137 0.02301483 0.08703802 0.04787181 0. ] 59 [0.01185571 0.04482086 0.02276664 0.0440232 0.25873471 0.33885697 0.12100306 0.02136593 0.08558915 0.05098377 0. ] 60 [0.01313387 0.04500996 0.01972825 0.04675894 0.26163836 0.3361454 0.12049117 0.01945489 0.08376768 0.05387146 0. ] 61 [0.01428615 0.044712 0.01648437 0.04946968 0.26489524 0.33447292 0.12035242 0.01727721 0.08153796 0.05651205 0. ] 62 [0.01528839 0.04391128 0.01303409 0.05216583 0.26855599 0.33384856 0.12061057 0.01483531 0.07887022 0.05887977 0. ]
notebooks/2. Segmentation.ipynb
###Markdown Segmentation ###Code %reload_ext autoreload %autoreload 2 %matplotlib inline from usal_echo.d04_segmentation.create_seg_view import create_seg_view from usal_echo.d04_segmentation.segment_view import run_segment from usal_echo.d04_segmentation.generate_masks import generate_masks from usal_echo.d04_segmentation.evaluate_masks import evaluate_masks with open("./conf/local/path_parameters.yml") as f: paths = yaml.safe_load(f) dcm_dir = os.path.expanduser(paths["dcm_dir"]) img_dir = os.path.expanduser(paths["img_dir"]) segmentation_dir = os.path.expanduser(paths["segment_dir"]) model_dir = os.path.expanduser(paths["model_dir"]) classification_model = paths["classification_model"] ###Output _____no_output_____ ###Markdown Update the name of the image/dicom directory. ###Code dir_name = "specify name of directory with images" dcm_dir_path = os.path.join(dcm_dir, dir_name) img_dir_path = os.path.join(img_dir, dir_name) run_segment(dcm_dir_path, model_dir, img_dir_path, classification_model) create_seg_view() generate_masks(dcm_dir_path) evaluate_masks() ###Output _____no_output_____
07_DijkstrasAlgorithm.ipynb
###Markdown MSDS 432, Assignment 7 - Dijkstra's Algorithm Author: Brandon Moretz ###Code %matplotlib inline import numpy as np import pandas as pd import matplotlib.pyplot as plt from time import time import matplotlib.style as style from collections import deque, defaultdict np.random.seed(323) # static seed so results are reproducible style.use('seaborn-poster') # sets the size of the charts style.use('ggplot') # set the theme for matplotlib %%javascript IPython.OutputArea.prototype._should_scroll = function(lines) { return false; } ###Output _____no_output_____ ###Markdown Helper Utilities ###Code infinity = float("inf") ###Output _____no_output_____ ###Markdown Sample Data ###Code %%html <img src="img/RoadTrip_NYC_to_L.A.jpg",width=60,height=60> ###Output _____no_output_____ ###Markdown Initialize each of our nodes in the graph ###Code graph = {} graph['NYC'] = {} graph['DC'] = {} graph['Atlanta'] = {} graph['New Orleans'] = {} graph['Dallas'] = {} graph['Indianapolis'] = {} graph['Kansas City'] = {} graph['Denver'] = {} graph['Pittsburgh'] = {} graph['Cincinatti'] = {} graph['St Louis'] = {} graph['OK City'] = {} graph['Alburqurque'] = {} graph['Salt Lake City'] = {} graph['Phoenix'] = {} graph['Las Vegas'] = {} graph['San Diego'] = {} graph['Los Angeles'] = {} ###Output _____no_output_____ ###Markdown Neighbor relationships ###Code graph['NYC']['DC'] = 2 graph['NYC']['Pittsburgh'] = 7 graph['NYC']['Indianapolis'] = 11 graph['DC']['Atlanta'] = 2 graph['Atlanta']['New Orleans'] = 2 graph['New Orleans']['Dallas'] = 2 graph['Dallas']['Alburqurque'] = 2 graph['Alburqurque']['Phoenix'] = 2 graph['Phoenix']['Las Vegas'] = 2 graph['Phoenix']['San Diego'] = 5 graph['San Diego']['Los Angeles'] = 2 graph['Las Vegas']['San Diego'] = 2 graph['Las Vegas']['Los Angeles'] = 5 graph['Indianapolis']['Kansas City'] = 8 graph['Kansas City']['Denver'] = 7 graph['Denver']['Salt Lake City'] = 6 graph['Salt Lake City']['Las Vegas'] = 9 graph['Pittsburgh']['Cincinatti'] = 6 graph['Cincinatti']['St Louis'] = 8 graph['St Louis']['OK City'] = 7 graph['OK City']['Alburqurque'] = 9 ###Output _____no_output_____ ###Markdown Cost table ###Code costs = {} costs['NYC'] = 0 costs['DC'] = 2 costs['Atlanta'] = 2 costs['New Orleans'] = 2 costs['Dallas'] = 2 costs['Indianapolis'] = 11 costs['Kansas City'] = 7 costs['Denver'] = infinity costs['Pittsburgh'] = 7 costs['Cincinatti'] = infinity costs['St Louis'] = infinity costs['OK City'] = infinity costs['Alburqurque'] = 2 costs['Salt Lake City'] = 9 costs['Phoenix'] = 2 costs['Las Vegas'] = 2 costs['San Diego'] = 5 costs['Los Angeles'] = infinity ###Output _____no_output_____ ###Markdown Initialize Parents ###Code parents = {} parents['DC'] = 'NYC' parents['Atlanta'] = 'DC' parents['New Orleans'] = 'Atlanta' parents['Dallas'] = 'New Orleans' parents['Indianapolis'] = 'NYC' parents['Kansas City'] = 'Indianapolis' parents['Denver'] = 'Kansas City' parents['Pittsburgh'] = 'NYC' parents['Cincinatti'] = 'Pittsburgh' parents['St Louis'] = 'Cincinatti' parents['OK City'] = 'St Louis' parents['Alburqurque'] = 'Dallas' parents['Salt Lake City'] = 'Denver' parents['Phoenix'] = 'Alburqurque' parents['Las Vegas'] = 'Phoenix' parents['San Diego'] = 'Phoenix' parents['Los Angeles'] = 'San Diego' processed = [] ###Output _____no_output_____ ###Markdown Search Functions ###Code def BFS(graph, start, finish): # keep track of processed nodes processed = [] # keep track of all the paths to be checked queue = [[start]] # return path if start is goal if start == finish: return "Arrived." # We need to check all possible paths while queue: # Take the first path path = queue.pop(0) # Find the last possible destination on this path node = path[-1] if node not in processed: neighbors = graph[node] # go through all neighbor nodes, make a new path and # push it into the queue for neighbor in neighbors: newPath = list(path) newPath.append(neighbor) queue.append(newPath) # return path if neighbour is goal if neighbor == finish: cost = getRouteCost(newPath) return (newPath, cost) # mark node as processed processed.append(node) # in case there's no path between the 2 nodes return "There is no route." # function to find the lowest cost node, check neighboring costs, and updating # the costs and parents to reflect the cheapest path def findLowestCostNode(costs): lowestCost = float("inf") lowestCostNode = None for node in costs: cost = costs[node] if cost < lowestCost and node not in processed: lowestCost = cost lowestCostNode = node return lowestCostNode # implementation of Dijkstra's Algorithm for weighted graph traversal def dijkstra(graph, costs, dest): totalCost = 0 path = [] node = findLowestCostNode(costs) # Find lowest cost, unprocessed node while node is not None and node != dest: # While loop ends when all nodes have been processed path.append(node) cost = costs[node] totalCost += cost neighbors = graph[node] for n in neighbors.keys(): # Go through all neighors of node in question newCost = cost + neighbors[n] # Find cost to get to node through neighbor if costs[n] > newCost: # If it is cheaper this way costs[n] = newCost # update the cost to reflect this parents[n] = node # This node is now the parent of the neighbor processed.append(node) # mark the node as processed node = findLowestCostNode(costs) # Process the next node and loop path.append(node) return (path, totalCost) def getRouteCost(path): cost = 0 for index in range(len(path)-1): cost += graph[path[index]][path[index+1]] return cost ###Output _____no_output_____ ###Markdown Benchmark ###Code bfsPath = BFS(graph, 'NYC', 'Los Angeles') djkPath = dijkstra(graph, costs, 'Los Angeles') ###Output _____no_output_____ ###Markdown Breadth-First Search ###Code print("The shortest route is: {0}, total cost: {1}".format(bfsPath[0], bfsPath[1])) ###Output The shortest route is: ['NYC', 'Indianapolis', 'Kansas City', 'Denver', 'Salt Lake City', 'Las Vegas', 'Los Angeles'], total cost: 46 ###Markdown Dijkstra's ###Code print("The cheapest route is: {0}, total cost: {1}".format(djkPath[0], djkPath[1])) ###Output The cheapest route is: ['NYC', 'DC', 'Atlanta', 'New Orleans', 'Dallas', 'Alburqurque', 'Phoenix', 'Las Vegas', 'San Diego', 'Los Angeles'], total cost: 18 ###Markdown Results ###Code # Create the output metrics DataFrame results = pd.DataFrame(columns = ['Method', 'Length', 'Cost']) results.loc[0] = ['BFS', len(bfsPath[0]), bfsPath[1]] results.loc[1] = ['Dijkstra', len(djkPath[0]), djkPath[1]] results ###Output _____no_output_____ ###Markdown Summary ###Code results.plot.bar(x='Method', y='Cost', align='center', alpha=0.5) plt.ylabel('Total Travel Time') plt.title('Search Time') plt.show() results.plot.bar(x='Method', y='Length', align='center', alpha=0.5) plt.ylabel('Number of Cities Visited') plt.title('Search Time') plt.show() ###Output _____no_output_____
Model backlog/Train XGBM/13-melanoma-5fold-xgbm-basic-features.ipynb
###Markdown Dependencies ###Code import warnings, json, re, math from melanoma_utility_scripts import * from kaggle_datasets import KaggleDatasets from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import KFold, RandomizedSearchCV, GridSearchCV from xgboost import XGBClassifier SEED = 42 seed_everything(SEED) warnings.filterwarnings("ignore") ###Output _____no_output_____ ###Markdown Model parameters ###Code config = { "N_FOLDS": 5, "N_USED_FOLDS": 5, "DATASET_PATH": 'melanoma-256x256' } with open('config.json', 'w') as json_file: json.dump(json.loads(json.dumps(config)), json_file) config ###Output _____no_output_____ ###Markdown Load data ###Code database_base_path = '/kaggle/input/siim-isic-melanoma-classification/' train = pd.read_csv(f"/kaggle/input/{config['DATASET_PATH']}/train.csv") test = pd.read_csv(database_base_path + 'test.csv') print('Train samples: %d' % len(train)) display(train.head()) print(f'Test samples: {len(test)}') display(test.head()) ###Output Train samples: 33126 ###Markdown Missing values ###Code # age_approx (mean) train['age_approx'].fillna(train['age_approx'].mean(), inplace=True) test['age_approx'].fillna(train['age_approx'].mean(), inplace=True) # anatom_site_general_challenge (NaN) train['anatom_site_general_challenge'].fillna('NaN', inplace=True) test['anatom_site_general_challenge'].fillna('NaN', inplace=True) # sex (mode) train['sex'].fillna(train['sex'].mode()[0], inplace=True) test['sex'].fillna(train['sex'].mode()[0], inplace=True) ###Output _____no_output_____ ###Markdown Feature engineering ###Code ### Label ecoding enc = LabelEncoder() train['sex_enc'] = enc.fit_transform(train['sex'].astype('str')) test['sex_enc'] = enc.transform(test['sex'].astype('str')) ### One-hot ecoding # train = pd.concat([train, pd.get_dummies(train['sex'], prefix='sex_enc', drop_first=True)], axis=1) # test = pd.concat([test, pd.get_dummies(test['sex'], prefix='sex_enc', drop_first=True)], axis=1) ### Mean ecoding # Sex train['sex_mean'] = train['sex'].map(train.groupby(['sex'])['target'].mean()) test['sex_mean'] = test['sex'].map(train.groupby(['sex'])['target'].mean()) # # External features # train_img_ft = pd.read_csv('../input/landscape/TrainSuperTab.csv') # test_img_ft = pd.read_csv('../input/landscape/TestSuperTab.csv') # ext_fts = ['V1', 'V2', 'V3', 'V4','V5', 'V6', 'V7', 'V8', 'V9', 'V10', 'V11', 'V12', # 'V13', 'V14', 'V15', 'V16', 'V17', 'V18', 'V19', 'V20', 'V21', 'V22', 'V23', 'V24', 'V25', # 'V26', 'V27', 'V28', 'V29', 'V30', 'V31', 'V32', 'V33', 'V34', 'V35', 'V36', 'V37'] # for ft in ext_fts: # train[ft] = train_img_ft[ft] # test[ft] = test_img_ft[ft] print('Train set') display(train.head()) print('Test set') display(test.head()) ###Output Train set ###Markdown Model ###Code features = ['age_approx', 'sex_mean'] ohe_features = [col for col in train.columns if 'enc' in col] features += ohe_features # External features # features += ext_fts print(features) # Hyperparameter grid param_grid = { 'max_depth': list(range(2, 12, 2)), 'learning_rate': list(np.logspace(np.log10(0.005), np.log10(0.5), base=10, num=1000)), 'reg_alpha': list(np.linspace(0, 1)), 'reg_lambda': list(np.linspace(0, 1)), 'colsample_bytree': list(np.linspace(0.3, 1, 10)), 'subsample': list(np.linspace(0.5, 1, 100)), 'scale_pos_weight': list(np.linspace(1, (len(train[train['target'] == 0]) / len(train[train['target'] == 1])), 10)), } skf = KFold(n_splits=config['N_USED_FOLDS'], shuffle=True, random_state=SEED) def get_idxs(): for fold,(idxT, idxV) in enumerate(skf.split(np.arange(15))): x_train = train[train['tfrecord'].isin(idxT)] x_valid = train[~train['tfrecord'].isin(idxT)] yield x_train.index, x_valid.index # Model model = XGBClassifier(n_estimators=300, random_state=SEED) grid_search = RandomizedSearchCV(param_distributions=param_grid, estimator=model, scoring='roc_auc', cv=iter(get_idxs()), n_jobs=-1, n_iter=100, verbose=1) result = grid_search.fit(train[features], train['target']) print("Best: %f using %s" % (result.best_score_, result.best_params_)) means = result.cv_results_['mean_test_score'] stds = result.cv_results_['std_test_score'] params = result.cv_results_['params'] for mean, stdev, param in zip(means, stds, params): print("%f (%f) with: %r" % (mean, stdev, param)) params = result.best_params_ ###Output Fitting 5 folds for each of 100 candidates, totalling 500 fits ###Markdown Training ###Code skf = KFold(n_splits=config['N_USED_FOLDS'], shuffle=True, random_state=SEED) test['target'] = 0 model_list = [] for fold,(idxT, idxV) in enumerate(skf.split(np.arange(15))): print(f'\nFOLD: {fold+1}') print(f'TRAIN: {idxT} VALID: {idxV}') train[f'fold_{fold+1}'] = train.apply(lambda x: 'train' if x['tfrecord'] in idxT else 'validation', axis=1) x_train = train[train['tfrecord'].isin(idxT)] y_train = x_train['target'] x_valid = train[~train['tfrecord'].isin(idxT)] y_valid = x_valid['target'] model = XGBClassifier(**params, random_state=SEED) model.fit(x_train[features], y_train, eval_set=[(x_valid[features], y_valid)], eval_metric='auc', verbose=0) model_list.append(model) # Evaludation preds = model.predict_proba(train[features])[:, 1] train[f'pred_fold_{fold+1}'] = preds # Inference preds = model.predict_proba(test[features])[:, 1] test[f'pred_fold_{fold+1}'] = preds test['target'] += preds / config['N_USED_FOLDS'] ###Output FOLD: 1 TRAIN: [ 1 2 3 4 5 6 7 8 10 12 13 14] VALID: [ 0 9 11] FOLD: 2 TRAIN: [ 0 1 2 3 4 6 7 9 10 11 12 14] VALID: [ 5 8 13] FOLD: 3 TRAIN: [ 0 3 4 5 6 7 8 9 10 11 12 13] VALID: [ 1 2 14] FOLD: 4 TRAIN: [ 0 1 2 3 5 6 8 9 11 12 13 14] VALID: [ 4 7 10] FOLD: 5 TRAIN: [ 0 1 2 4 5 7 8 9 10 11 13 14] VALID: [ 3 6 12] ###Markdown Model evaluation ###Code def func(x): if x['fold_1'] == 'validation': return x['pred_fold_1'] elif x['fold_2'] == 'validation': return x['pred_fold_2'] elif x['fold_3'] == 'validation': return x['pred_fold_3'] elif x['fold_4'] == 'validation': return x['pred_fold_4'] elif x['fold_5'] == 'validation': return x['pred_fold_5'] train['pred'] = train.apply(lambda x: func(x), axis=1) auc_oof = roc_auc_score(train['target'], train['pred']) print(f'Overall OOF AUC = {auc_oof:.3f}') df_oof = train[['image_name', 'target', 'pred']] df_oof.to_csv('oof.csv', index=False) display(df_oof.head()) display(df_oof.describe().T) ###Output Overall OOF AUC = 0.660 ###Markdown Feature importance ###Code for n_fold, model in enumerate(model_list): print(f'Fold: {n_fold + 1}') feature_importance = model.get_booster().get_score(importance_type='weight') keys = list(feature_importance.keys()) values = list(feature_importance.values()) importance = pd.DataFrame(data=values, index=keys, columns=['score']).sort_values(by='score', ascending=False) plt.figure(figsize=(16, 8)) sns.barplot(x=importance.score.iloc[:20], y=importance.index[:20], orient='h', palette='Reds_r') plt.show() ###Output Fold: 1 ###Markdown Model evaluation ###Code display(evaluate_model(train, config['N_USED_FOLDS']).style.applymap(color_map)) display(evaluate_model_Subset(train, config['N_USED_FOLDS']).style.applymap(color_map)) ###Output _____no_output_____ ###Markdown Adversarial Validation ###Code ### Adversarial set adv_train = train.copy() adv_test = test.copy() adv_train['dataset'] = 1 adv_test['dataset'] = 0 x_adv = pd.concat([adv_train, adv_test], axis=0) y_adv = x_adv['dataset'] ### Adversarial model model_adv = XGBClassifier(**params, random_state=SEED) model_adv.fit(x_adv[features], y_adv, eval_metric='auc', verbose=0) ### Preds preds = model_adv.predict_proba(x_adv[features])[:, 1] ### Plot feature importance and ROC AUC curve fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(20, 10)) # Feature importance feature_importance = model_adv.get_booster().get_score(importance_type='weight') keys = list(feature_importance.keys()) values = list(feature_importance.values()) importance = pd.DataFrame(data=values, index=keys, columns=['score']).sort_values(by='score', ascending=False) ax1.set_title('Feature Importances') sns.barplot(x=importance.score.iloc[:20], y=importance.index[:20], orient='h', palette='Reds_r', ax=ax1) # Plot ROC AUC curve fpr_train, tpr_train, _ = roc_curve(y_adv, preds) roc_auc_train = auc(fpr_train, tpr_train) ax2.set_title('ROC AUC curve') ax2.plot(fpr_train, tpr_train, color='blue', label='Adversarial AUC = %0.2f' % roc_auc_train) ax2.legend(loc = 'lower right') ax2.plot([0, 1], [0, 1],'r--') ax2.set_xlim([0, 1]) ax2.set_ylim([0, 1]) plt.ylabel('True Positive Rate') plt.xlabel('False Positive Rate') plt.show() ###Output _____no_output_____ ###Markdown Visualize predictions ###Code train['pred'] = 0 for n_fold in range(config['N_USED_FOLDS']): train['pred'] += train[f'pred_fold_{n_fold+1}'] / config['N_FOLDS'] print('Label/prediction distribution') print(f"Train positive labels: {len(train[train['target'] > .5])}") print(f"Train positive predictions: {len(train[train['pred'] > .5])}") print(f"Train positive correct predictions: {len(train[(train['target'] > .5) & (train['pred'] > .5)])}") print('Top 10 samples') display(train[['image_name', 'sex', 'age_approx','anatom_site_general_challenge', 'diagnosis', 'target', 'pred'] + [c for c in train.columns if (c.startswith('pred_fold'))]].head(10)) print('Top 10 positive samples') display(train[['image_name', 'sex', 'age_approx','anatom_site_general_challenge', 'diagnosis', 'target', 'pred'] + [c for c in train.columns if (c.startswith('pred_fold'))]].query('target == 1').head(10)) print('Top 10 predicted positive samples') display(train[['image_name', 'sex', 'age_approx','anatom_site_general_challenge', 'diagnosis', 'target', 'pred'] + [c for c in train.columns if (c.startswith('pred_fold'))]].query('pred > .5').head(10)) ###Output Label/prediction distribution Train positive labels: 584 Train positive predictions: 0 Train positive correct predictions: 0 Top 10 samples ###Markdown Visualize test predictions ###Code print(f"Test predictions {len(test[test['target'] > .5])}|{len(test[test['target'] <= .5])}") print('Top 10 samples') display(test[['image_name', 'sex', 'age_approx','anatom_site_general_challenge', 'target'] + [c for c in test.columns if (c.startswith('pred_fold'))]].head(10)) print('Top 10 positive samples') display(test[['image_name', 'sex', 'age_approx','anatom_site_general_challenge', 'target'] + [c for c in test.columns if (c.startswith('pred_fold'))]].query('target > .5').head(10)) ###Output Test predictions 0|10982 Top 10 samples ###Markdown Test set predictions ###Code submission = pd.read_csv(database_base_path + 'sample_submission.csv') submission['target'] = test['target'] fig = plt.subplots(figsize=(20, 6)) plt.hist(submission['target'], bins=100) plt.title('Preds', size=18) plt.show() display(submission.head(10)) display(submission.describe()) submission[['image_name', 'target']].to_csv('submission.csv', index=False) ###Output _____no_output_____
mlflow/databricks_mlflow_boston_dataset.ipynb
###Markdown MLflow ###Code # Импорт библиотек import mlflow import mlflow.sklearn import pandas as pd import numpy as np from numpy import savetxt import matplotlib.pyplot as plt from sklearn.datasets import load_boston from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_squared_error, r2_score ###Output _____no_output_____ ###Markdown Загрузка данных ###Code boston = load_boston() X = pd.DataFrame(boston.data) X.columns = [_.lower() for _ in boston.feature_names] y = pd.DataFrame(boston.target) y.columns=['target'] df = pd.concat([X,y], axis=1) df.head(5) ###Output _____no_output_____ ###Markdown Разбиение датасета на тренировочный и тестовый массив данных ###Code X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, train_size=0.7, random_state=42, shuffle=True) ###Output _____no_output_____ ###Markdown "Подключение" ноутбука к созданному эксперименту ###Code mlflow.set_experiment('/Experiment/boston') ###Output _____no_output_____ ###Markdown MLflow ###Code with mlflow.start_run(): n_estimators = 100 max_depth = 7 max_features = 5 rf = RandomForestRegressor(n_estimators = n_estimators, max_depth = max_depth, max_features = max_features) rf.fit(X_train, y_train) predictions = rf.predict(X_test) mlflow.log_param("num_trees", n_estimators) mlflow.log_param("max_depth", max_depth) mlflow.log_param("max_feat", max_features) mse = mean_squared_error(y_test, predictions) mlflow.log_metric("mse", mse) mlflow.sklearn.log_model(rf, "random-forest-model") savetxt('predictions.csv', predictions, delimiter=',') mlflow.log_artifact("predictions.csv") ###Output _____no_output_____
AAMY/image_classification_with_cnns.ipynb
###Markdown ###Code ###Output _____no_output_____ ###Markdown Image Classification with Convolutional Neural NetworksIn this tutorial, we'll build and train a neural network to classifiy images of clothing, like sneakers and shirts.It's okay if you don't understand everything. This is a fast-paced overview of a complete TensorFlow program, withh explanations along the way. The goal is to get the general sense of a TensorFlow project, not to catch every detail.This guide uses [tf.keras](https://www.tensorflow.org/guide/keras), a high-level API to build and train models in TensorFlow. Install and import dependenciesWe'll need [TensorFlow Datasets](https://www.tensorflow.org/datasets/), an API that simplifies downloading and accessing datasets, and provides several sample datasets to work with. We're also using a few helper libraries.. ###Code !pip install -U tensorflow_datasets from __future__ import absolute_import, division, print_function, unicode_literals # Import TensorFlow and TensorFlow Datasets import tensorflow as tf import tensorflow_datasets as tfds tf.logging.set_verbosity(tf.logging.ERROR) # Helper libraries import math import numpy as np import matplotlib.pyplot as plt # Improve progress bar display import tqdm import tqdm.auto tqdm.tqdm = tqdm.auto.tqdm print(tf.__version__) # This will go away in the future. # If this gives an error, you mighjt be running TensorFlow 2 or above # If so, the just comment out this line and run this cell again tf.enable_eager_execution() ###Output WARNING: The TensorFlow contrib module will not be included in TensorFlow 2.0. For more information, please see: * https://github.com/tensorflow/community/blob/master/rfcs/20180907-contrib-sunset.md * https://github.com/tensorflow/addons If you depend on functionality not listed there, please file an issue. 1.13.1 ###Markdown Import the Fashion MNIST datasetThis guide uses the [Fashion MNIST](https://github.com/zalandorearch/fashion-mnist) dataset, which contains 70,000 grayscale images in 10 categories. The images show individual articles of clothing at low resolution(28 $\times$ 28 pixels), as seen here: Figure 1 Fashion-MNIST samples (by Zalando, MIT License).&nbsp; Fashion MNIST is intended as a drop-in replacement for the classic [MNIST](http://yann.lecun.com/exdb/mnist/) dataset -- often used as the "Hello, World" of mechine learning programs for computer vision. The MNIST dataset contains images of handwritten digits(0, 1, 2, etc) in an identical format to the articles of clothing we'll use here.This guide uses Fashion MNIST for variety, and because it's a slightly more chhallenging problem than regular MNIST. Both datasets are relatively small and are used to verify that an algorithm works as expected. They're good starting points to test and debug code.We will use 60,000 images to train the network and 10,000 images to evaluate how accurateoy the network learned to classify images. You can access thhe Fashion MNIST directly from TensorFlow, using the [Datasets](https://www.tensorflow.org/datasets) API: ###Code dataset, metadata = tfds.load('fashion_mnist', as_supervised=True, with_info=True) train_dataset, test_dataset = dataset['train'], dataset['test'] class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot'] ###Output _____no_output_____ ###Markdown Explore the dataLet's explore the format of the dataset before training the model. The following shows there are 60,000 images in the training set, and 10000 images in the test set: ###Code num_train_examples = metadata.splits['train'].num_examples num_test_examples = metadata.splits['test'].num_examples print("Number of training examples: {}".format(num_train_examples)) print("Number of test examples: {}".format(num_test_examples)) ###Output Number of training examples: 60000 Number of test examples: 10000 ###Markdown Preporcess the dataThe value of each pixel in the image data is an integer in the range [0, 255]. For the model to work properly, these values need tob e normalized to the range [0, 1]. So here we create a normalization function, and then apply it to each image in the test and train datasets.. ###Code def normalize(images, labels): images = tf.cast(images, tf.float32) images /= 255 return images, labels # The map function applies the normalize function to eachh element in the train # and test datasets train_dataset = train_dataset.map(normalize) test_dataset = test_dataset.map(normalize) ###Output _____no_output_____ ###Markdown Explore the processed dataLet's plot an image to see what it looks like. ###Code # Take an single image, and remove the color dimensioin by reshaping for image, label in test_dataset.take(1): break image = image.numpy().reshape((28,28)) # Plot the image - voila a piece of fashion clothing plt.figure() plt.imshow(image, cmap=plt.cm.binary) plt.colorbar() plt.grid(False) plt.show() plt.figure(figsize=(10,10)) i = 0 for (image, label) in test_dataset.take(25): image = image.numpy().reshape((28,28)) plt.subplot(5,5,i+1) plt.xticks([]) plt.yticks([]) plt.grid(False) plt.imshow(image, cmap=plt.cm.binary) plt.xlabel(class_names[label]) i += 1 plt.show() ###Output _____no_output_____ ###Markdown Build the modelBuilding the nueral network requires configuring the layers of the model, the compling the model. Setup the layersThe basic building block of a neural network is the layer. A layer extracts a representation from th data fed into it. Hopefully, a series of connected layers results in representation that is meaningful for the problem at hand.Much of deep learning consists of chaining together simple layers. Most layers, like `tf.keras.layers.Dense`, have internal parameters which are adjusted ("learned") during training. ###Code model = tf.keras.Sequential([ tf.keras.layers.Conv2D(32, (3,3), padding='same', activation=tf.nn.relu, input_shape=(28,28,1)), tf.keras.layers.MaxPooling2D((2,2), strides=2), tf.keras.layers.Conv2D(64, (3,3), padding='same', activation=tf.nn.relu), tf.keras.layers.MaxPooling2D((2,2), strides=2), tf.keras.layers.Flatten(), tf.keras.layers.Dense(128, activation=tf.nn.relu), tf.keras.layers.Dense(10, activation=tf.nn.softmax) ]) ###Output _____no_output_____ ###Markdown This network layers are:* "convolutions" `tf.keras.layers.Conv2D` and `MaxPooling2D`-- Network start with pairs of Conv/MaxPool. The first layer is a Conv2D filters (3,3) being applied to the input image, retaining the original image size by using padding, and creating 32 output(convoluted) images (so this layer creates 32 convoluted images of the same size as input). After that, the 32 oupts are reduced in size using a MaxPooling2D (2,2) with a stride of 2. The next Conv2D also has a (3,3) kernel, takes 32 images as input and and creates 64 outputs whichh are again reduced in siz e by a MaxPooling2D layer. So far in the course, we have described what a Convolution does, but we haven't yet covered how you chain multiples of these together. We will get back to this in lesson 4 when we use color images. At this point, it's enough if you understand the kind of operation a convolutional filter performs* **output** `tf.keras.layers.Dense` -- A 128-neuron, followed by 10-node *softmax* layer. Each node represents a class of clothing. As in the previous layer, the final layer takes input from the 128 nodes in the layer before it, and outputs a value in the range `[0,1]`, representing the probability that the image belongs to that class. The sum of all 10 node values is 1. Compile the modelBefore the model is ready for training, it needs a few more setting. These are added during the model's *compile* step:* *Loss function* -- An algorithm for measuring how far the model's outpus are from the desired output. Thhe goal of training is this measures loss..* *Optimizer* --Used to monitor the training and testing steps. Thhe following example uses *accuracy*, the faction of the images that are correctly classified. ###Code model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) ###Output _____no_output_____ ###Markdown Train the modelFirst, we define the iteration behavior for the train dataset:1. Repeat forever by specifying `dataset.repeat()`(the `epochs` parameter described below limits how long we perform training)2. The `dataset.batch(32)` tells `model.fit` to use batches of 32 images and labels when updating the model variables.3. And `dataset.batch(32)` tells `model.fit` to use batches of 32 images and labels when updating the model variables.Training is performed by calling thhe `model.fit` method: 1. Feed the training data to the model using `train_dataset`.2. The model learns to associate images and labels.3. The `epochs=5` parameter limits training to 5 full iterations of the training dataset, so a total of 5 * 60000 = 300000 exmples..(Don't worry about `steps_per_epoch`, the requirement to have thhis flag will soon be removed) ###Code BATCH_SIZE = 32 train_dataset = train_dataset.repeat().shuffle(num_train_examples).batch(BATCH_SIZE) test_dataset = test_dataset.batch(BATCH_SIZE) model.fit(train_dataset, epochs=10, steps_per_epoch=math.ceil(num_train_examples/BATCH_SIZE)) ###Output Epoch 1/10 1875/1875 [==============================] - 29s 16ms/step - loss: 0.4016 - acc: 0.8546 Epoch 2/10 1875/1875 [==============================] - 17s 9ms/step - loss: 0.2586 - acc: 0.9061 Epoch 3/10 1875/1875 [==============================] - 18s 10ms/step - loss: 0.2105 - acc: 0.9227 Epoch 4/10 1875/1875 [==============================] - 17s 9ms/step - loss: 0.1828 - acc: 0.9324 Epoch 5/10 1875/1875 [==============================] - 17s 9ms/step - loss: 0.1561 - acc: 0.9416 Epoch 6/10 1875/1875 [==============================] - 17s 9ms/step - loss: 0.1337 - acc: 0.9510 Epoch 7/10 1875/1875 [==============================] - 17s 9ms/step - loss: 0.1098 - acc: 0.9594 Epoch 8/10 1875/1875 [==============================] - 18s 10ms/step - loss: 0.0913 - acc: 0.9671 Epoch 9/10 1875/1875 [==============================] - 17s 9ms/step - loss: 0.0800 - acc: 0.9707 Epoch 10/10 1875/1875 [==============================] - 17s 9ms/step - loss: 0.0652 - acc: 0.9757 ###Markdown As the model trains, the loss and accuracy metrics are displayed. This model reaches an accuracy of about 0.97(or 97%) on the training data Evaluate accuracyNext, compare how the model performs on the test dataset. Use all examples we have in the test dataset to assess accuracy. ###Code test_loss, test_accuracy = model.evaluate(test_dataset, steps=math.ceil(num_test_examples/32)) print('Accuracy on test dataset: ', test_accuracy) num_test_examples/32 ###Output _____no_output_____ ###Markdown As it turns out, thhe accuracy on the test dataset is smaller than the accuracy on the training dataset. This is completely normal, since the model was trained on the `train_dataset`. When the model sess images it has never seen during training, (that is, form the `test_dataset`), we can expect performance to go down. ###Code ###Output _____no_output_____ ###Markdown Make predictions and exploreWith the model trained, we can use it to make predictions about some images. ###Code for test_images, test_labels in test_dataset.take(1): test_images = test_images.numpy() print(test_images[].shape) test_labels = test_labels.numpy() predictions = model.predict(test_images) predictions.shape predictions print(np.argmax(predictions[0])) test_labels[0] ###Output 6 ###Markdown We can graph this to look at the full set of 10 class predictions ###Code def plot_image(i, predictions_array, true_labels, images): prediction_array, true_label, img = predictions_array[i], true_labels[i], images[i] plt.grid(False) plt.xticks([]) plt.yticks([]) plt.imshow(img[...,0], cmap=plt.cm.binary) predicted_label = np.argmax(prediction_array) if predicted_label == true_label: color = 'blue' else: color = 'red' plt.xlabel("{} {:2.0f}% ({})".format(class_names[predicted_label], 100*np.max(prediction_array), class_names[true_label]), color=color) def plot_value_array(i, predictions_array, true_label): predictions_array, true_label = predictions_array[i], true_label[i] plt.grid(False) plt.xticks([]) plt.yticks([]) thisplot = plt.bar(range(10), predictions_array, color="#777777") plt.ylim([0, 1]) predicted_label = np.argmax(predictions_array) thisplot[predicted_label].set_color('red') thisplot[true_label].set_color('blue') i = 0 plt.figure(figsize=(6,3)) plt.subplot(1,2,1) plot_image(i, predictions, test_labels, test_images) plt.subplot(1,2,2) plot_value_array(i, predictions, test_labels) i = 12 plt.figure(figsize=(6,3)) plt.subplot(1,2,1) plot_image(i, predictions, test_labels, test_images) plt.subplot(1,2,2) plot_value_array(i, predictions, test_labels) num_rows = 5 num_cols = 3 num_images = num_rows*num_cols plt.figure(figsize=(2*2*num_cols, 2*num_rows)) for i in range(num_images): plt.subplot(num_rows, 2*num_cols, 2*i+1) plot_image(i, predictions, test_labels, test_images) plt.subplot(num_rows, 2*num_cols, 2*i+2) plot_value_array(i, predictions, test_labels) # Grab an image form the test dataset img = test_images[0] print(img.shape) img = np.array([img]) print(img.shape) predictions_single = model.predict(img) print(predictions_single) np.argmax(predictions_single[0]) ###Output _____no_output_____
Model backlog/Deep Learning/DenseNet121/[140th] - Fine-tune - DenseNet121 - Original size.ipynb
###Markdown Model parameters ###Code # Model parameters BATCH_SIZE = 128 EPOCHS = 30 LEARNING_RATE = 0.0001 HEIGHT = 224 WIDTH = 224 CANAL = 3 N_CLASSES = labels.shape[0] ES_PATIENCE = 3 DECAY_DROP = 0.5 DECAY_EPOCHS = 10 def f2_score_thr(threshold=0.5): def f2_score(y_true, y_pred): beta = 2 y_pred = K.cast(K.greater(K.clip(y_pred, 0, 1), threshold), K.floatx()) true_positives = K.sum(K.clip(y_true * y_pred, 0, 1), axis=1) predicted_positives = K.sum(K.clip(y_pred, 0, 1), axis=1) possible_positives = K.sum(K.clip(y_true, 0, 1), axis=1) precision = true_positives / (predicted_positives + K.epsilon()) recall = true_positives / (possible_positives + K.epsilon()) return K.mean(((1+beta**2)*precision*recall) / ((beta**2)*precision+recall+K.epsilon())) return f2_score def custom_f2(y_true, y_pred): beta = 2 tp = np.sum((y_true == 1) & (y_pred == 1)) tn = np.sum((y_true == 0) & (y_pred == 0)) fp = np.sum((y_true == 0) & (y_pred == 1)) fn = np.sum((y_true == 1) & (y_pred == 0)) p = tp / (tp + fp + K.epsilon()) r = tp / (tp + fn + K.epsilon()) f2 = (1+beta**2)*p*r / (p*beta**2 + r + 1e-15) return f2 def step_decay(epoch): initial_lrate = LEARNING_RATE drop = DECAY_DROP epochs_drop = DECAY_EPOCHS lrate = initial_lrate * math.pow(drop, math.floor((1+epoch)/epochs_drop)) return lrate train_datagen=ImageDataGenerator(rescale=1./255, validation_split=0.25) train_generator=train_datagen.flow_from_dataframe( dataframe=train, directory="../input/imet-2019-fgvc6/train", x_col="id", y_col="attribute_ids", batch_size=BATCH_SIZE, shuffle=True, class_mode="categorical", target_size=(HEIGHT, WIDTH), subset='training') valid_generator=train_datagen.flow_from_dataframe( dataframe=train, directory="../input/imet-2019-fgvc6/train", x_col="id", y_col="attribute_ids", batch_size=BATCH_SIZE, shuffle=True, class_mode="categorical", target_size=(HEIGHT, WIDTH), subset='validation') test_datagen = ImageDataGenerator(rescale=1./255) test_generator = test_datagen.flow_from_dataframe( dataframe=test, directory = "../input/imet-2019-fgvc6/test", x_col="id", target_size=(HEIGHT, WIDTH), batch_size=1, shuffle=False, class_mode=None) ###Output Found 81928 images belonging to 1103 classes. Found 27309 images belonging to 1103 classes. Found 7443 images. ###Markdown Model ###Code def create_model(input_shape, n_out): input_tensor = Input(shape=input_shape) base_model = applications.DenseNet121(weights=None, include_top=False, input_tensor=input_tensor) base_model.load_weights('../input/densenet-keras/DenseNet-BC-121-32-no-top.h5') x = GlobalAveragePooling2D()(base_model.output) x = Dropout(0.5)(x) x = Dense(1024, activation='relu')(x) x = Dropout(0.5)(x) final_output = Dense(n_out, activation='sigmoid', name='final_output')(x) model = Model(input_tensor, final_output) return model # warm up model # first: train only the top layers (which were randomly initialized) model = create_model(input_shape=(HEIGHT, WIDTH, CANAL), n_out=N_CLASSES) for layer in model.layers: layer.trainable = False for i in range(-5,0): model.layers[i].trainable = True optimizer = optimizers.Adam(lr=LEARNING_RATE) metrics = ["accuracy", "categorical_accuracy"] es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=ES_PATIENCE) callbacks = [es] model.compile(optimizer=optimizer, loss="binary_crossentropy", metrics=metrics) model.summary() ###Output WARNING:tensorflow:From /opt/conda/lib/python3.6/site-packages/tensorflow/python/framework/op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version. Instructions for updating: Colocations handled automatically by placer. WARNING:tensorflow:From /opt/conda/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py:3445: calling dropout (from tensorflow.python.ops.nn_ops) with keep_prob is deprecated and will be removed in a future version. Instructions for updating: Please use `rate` instead of `keep_prob`. Rate should be set to `rate = 1 - keep_prob`. __________________________________________________________________________________________________ Layer (type) Output Shape Param # Connected to ================================================================================================== input_1 (InputLayer) (None, 224, 224, 3) 0 __________________________________________________________________________________________________ zero_padding2d_1 (ZeroPadding2D (None, 230, 230, 3) 0 input_1[0][0] __________________________________________________________________________________________________ conv1/conv (Conv2D) (None, 112, 112, 64) 9408 zero_padding2d_1[0][0] __________________________________________________________________________________________________ conv1/bn (BatchNormalization) (None, 112, 112, 64) 256 conv1/conv[0][0] __________________________________________________________________________________________________ conv1/relu (Activation) (None, 112, 112, 64) 0 conv1/bn[0][0] __________________________________________________________________________________________________ zero_padding2d_2 (ZeroPadding2D (None, 114, 114, 64) 0 conv1/relu[0][0] __________________________________________________________________________________________________ pool1 (MaxPooling2D) (None, 56, 56, 64) 0 zero_padding2d_2[0][0] __________________________________________________________________________________________________ conv2_block1_0_bn (BatchNormali (None, 56, 56, 64) 256 pool1[0][0] __________________________________________________________________________________________________ conv2_block1_0_relu (Activation (None, 56, 56, 64) 0 conv2_block1_0_bn[0][0] __________________________________________________________________________________________________ conv2_block1_1_conv (Conv2D) (None, 56, 56, 128) 8192 conv2_block1_0_relu[0][0] __________________________________________________________________________________________________ conv2_block1_1_bn (BatchNormali (None, 56, 56, 128) 512 conv2_block1_1_conv[0][0] __________________________________________________________________________________________________ conv2_block1_1_relu (Activation (None, 56, 56, 128) 0 conv2_block1_1_bn[0][0] __________________________________________________________________________________________________ conv2_block1_2_conv (Conv2D) (None, 56, 56, 32) 36864 conv2_block1_1_relu[0][0] __________________________________________________________________________________________________ conv2_block1_concat (Concatenat (None, 56, 56, 96) 0 pool1[0][0] conv2_block1_2_conv[0][0] __________________________________________________________________________________________________ conv2_block2_0_bn (BatchNormali (None, 56, 56, 96) 384 conv2_block1_concat[0][0] __________________________________________________________________________________________________ conv2_block2_0_relu (Activation (None, 56, 56, 96) 0 conv2_block2_0_bn[0][0] __________________________________________________________________________________________________ conv2_block2_1_conv (Conv2D) (None, 56, 56, 128) 12288 conv2_block2_0_relu[0][0] __________________________________________________________________________________________________ conv2_block2_1_bn (BatchNormali (None, 56, 56, 128) 512 conv2_block2_1_conv[0][0] __________________________________________________________________________________________________ conv2_block2_1_relu (Activation (None, 56, 56, 128) 0 conv2_block2_1_bn[0][0] __________________________________________________________________________________________________ conv2_block2_2_conv (Conv2D) (None, 56, 56, 32) 36864 conv2_block2_1_relu[0][0] __________________________________________________________________________________________________ conv2_block2_concat (Concatenat (None, 56, 56, 128) 0 conv2_block1_concat[0][0] conv2_block2_2_conv[0][0] __________________________________________________________________________________________________ conv2_block3_0_bn (BatchNormali (None, 56, 56, 128) 512 conv2_block2_concat[0][0] __________________________________________________________________________________________________ conv2_block3_0_relu (Activation (None, 56, 56, 128) 0 conv2_block3_0_bn[0][0] __________________________________________________________________________________________________ conv2_block3_1_conv (Conv2D) (None, 56, 56, 128) 16384 conv2_block3_0_relu[0][0] __________________________________________________________________________________________________ conv2_block3_1_bn (BatchNormali (None, 56, 56, 128) 512 conv2_block3_1_conv[0][0] __________________________________________________________________________________________________ conv2_block3_1_relu (Activation (None, 56, 56, 128) 0 conv2_block3_1_bn[0][0] __________________________________________________________________________________________________ conv2_block3_2_conv (Conv2D) (None, 56, 56, 32) 36864 conv2_block3_1_relu[0][0] __________________________________________________________________________________________________ conv2_block3_concat (Concatenat (None, 56, 56, 160) 0 conv2_block2_concat[0][0] conv2_block3_2_conv[0][0] __________________________________________________________________________________________________ conv2_block4_0_bn (BatchNormali (None, 56, 56, 160) 640 conv2_block3_concat[0][0] __________________________________________________________________________________________________ conv2_block4_0_relu (Activation (None, 56, 56, 160) 0 conv2_block4_0_bn[0][0] __________________________________________________________________________________________________ conv2_block4_1_conv (Conv2D) (None, 56, 56, 128) 20480 conv2_block4_0_relu[0][0] __________________________________________________________________________________________________ conv2_block4_1_bn (BatchNormali (None, 56, 56, 128) 512 conv2_block4_1_conv[0][0] __________________________________________________________________________________________________ conv2_block4_1_relu (Activation (None, 56, 56, 128) 0 conv2_block4_1_bn[0][0] __________________________________________________________________________________________________ conv2_block4_2_conv (Conv2D) (None, 56, 56, 32) 36864 conv2_block4_1_relu[0][0] __________________________________________________________________________________________________ conv2_block4_concat (Concatenat (None, 56, 56, 192) 0 conv2_block3_concat[0][0] conv2_block4_2_conv[0][0] __________________________________________________________________________________________________ conv2_block5_0_bn (BatchNormali (None, 56, 56, 192) 768 conv2_block4_concat[0][0] __________________________________________________________________________________________________ conv2_block5_0_relu (Activation (None, 56, 56, 192) 0 conv2_block5_0_bn[0][0] __________________________________________________________________________________________________ conv2_block5_1_conv (Conv2D) (None, 56, 56, 128) 24576 conv2_block5_0_relu[0][0] __________________________________________________________________________________________________ conv2_block5_1_bn (BatchNormali (None, 56, 56, 128) 512 conv2_block5_1_conv[0][0] __________________________________________________________________________________________________ conv2_block5_1_relu (Activation (None, 56, 56, 128) 0 conv2_block5_1_bn[0][0] __________________________________________________________________________________________________ conv2_block5_2_conv (Conv2D) (None, 56, 56, 32) 36864 conv2_block5_1_relu[0][0] __________________________________________________________________________________________________ conv2_block5_concat (Concatenat (None, 56, 56, 224) 0 conv2_block4_concat[0][0] conv2_block5_2_conv[0][0] __________________________________________________________________________________________________ conv2_block6_0_bn (BatchNormali (None, 56, 56, 224) 896 conv2_block5_concat[0][0] __________________________________________________________________________________________________ conv2_block6_0_relu (Activation (None, 56, 56, 224) 0 conv2_block6_0_bn[0][0] __________________________________________________________________________________________________ conv2_block6_1_conv (Conv2D) (None, 56, 56, 128) 28672 conv2_block6_0_relu[0][0] __________________________________________________________________________________________________ conv2_block6_1_bn (BatchNormali (None, 56, 56, 128) 512 conv2_block6_1_conv[0][0] __________________________________________________________________________________________________ conv2_block6_1_relu (Activation (None, 56, 56, 128) 0 conv2_block6_1_bn[0][0] __________________________________________________________________________________________________ conv2_block6_2_conv (Conv2D) (None, 56, 56, 32) 36864 conv2_block6_1_relu[0][0] __________________________________________________________________________________________________ conv2_block6_concat (Concatenat (None, 56, 56, 256) 0 conv2_block5_concat[0][0] conv2_block6_2_conv[0][0] __________________________________________________________________________________________________ pool2_bn (BatchNormalization) (None, 56, 56, 256) 1024 conv2_block6_concat[0][0] __________________________________________________________________________________________________ pool2_relu (Activation) (None, 56, 56, 256) 0 pool2_bn[0][0] __________________________________________________________________________________________________ pool2_conv (Conv2D) (None, 56, 56, 128) 32768 pool2_relu[0][0] __________________________________________________________________________________________________ pool2_pool (AveragePooling2D) (None, 28, 28, 128) 0 pool2_conv[0][0] __________________________________________________________________________________________________ conv3_block1_0_bn (BatchNormali (None, 28, 28, 128) 512 pool2_pool[0][0] __________________________________________________________________________________________________ conv3_block1_0_relu (Activation (None, 28, 28, 128) 0 conv3_block1_0_bn[0][0] __________________________________________________________________________________________________ conv3_block1_1_conv (Conv2D) (None, 28, 28, 128) 16384 conv3_block1_0_relu[0][0] __________________________________________________________________________________________________ conv3_block1_1_bn (BatchNormali (None, 28, 28, 128) 512 conv3_block1_1_conv[0][0] __________________________________________________________________________________________________ conv3_block1_1_relu (Activation (None, 28, 28, 128) 0 conv3_block1_1_bn[0][0] __________________________________________________________________________________________________ conv3_block1_2_conv (Conv2D) (None, 28, 28, 32) 36864 conv3_block1_1_relu[0][0] __________________________________________________________________________________________________ conv3_block1_concat (Concatenat (None, 28, 28, 160) 0 pool2_pool[0][0] conv3_block1_2_conv[0][0] __________________________________________________________________________________________________ conv3_block2_0_bn (BatchNormali (None, 28, 28, 160) 640 conv3_block1_concat[0][0] __________________________________________________________________________________________________ conv3_block2_0_relu (Activation (None, 28, 28, 160) 0 conv3_block2_0_bn[0][0] __________________________________________________________________________________________________ conv3_block2_1_conv (Conv2D) (None, 28, 28, 128) 20480 conv3_block2_0_relu[0][0] __________________________________________________________________________________________________ conv3_block2_1_bn (BatchNormali (None, 28, 28, 128) 512 conv3_block2_1_conv[0][0] __________________________________________________________________________________________________ conv3_block2_1_relu (Activation (None, 28, 28, 128) 0 conv3_block2_1_bn[0][0] __________________________________________________________________________________________________ conv3_block2_2_conv (Conv2D) (None, 28, 28, 32) 36864 conv3_block2_1_relu[0][0] __________________________________________________________________________________________________ conv3_block2_concat (Concatenat (None, 28, 28, 192) 0 conv3_block1_concat[0][0] conv3_block2_2_conv[0][0] __________________________________________________________________________________________________ conv3_block3_0_bn (BatchNormali (None, 28, 28, 192) 768 conv3_block2_concat[0][0] __________________________________________________________________________________________________ conv3_block3_0_relu (Activation (None, 28, 28, 192) 0 conv3_block3_0_bn[0][0] __________________________________________________________________________________________________ conv3_block3_1_conv (Conv2D) (None, 28, 28, 128) 24576 conv3_block3_0_relu[0][0] __________________________________________________________________________________________________ conv3_block3_1_bn (BatchNormali (None, 28, 28, 128) 512 conv3_block3_1_conv[0][0] __________________________________________________________________________________________________ conv3_block3_1_relu (Activation (None, 28, 28, 128) 0 conv3_block3_1_bn[0][0] __________________________________________________________________________________________________ conv3_block3_2_conv (Conv2D) (None, 28, 28, 32) 36864 conv3_block3_1_relu[0][0] __________________________________________________________________________________________________ conv3_block3_concat (Concatenat (None, 28, 28, 224) 0 conv3_block2_concat[0][0] conv3_block3_2_conv[0][0] __________________________________________________________________________________________________ conv3_block4_0_bn (BatchNormali (None, 28, 28, 224) 896 conv3_block3_concat[0][0] __________________________________________________________________________________________________ conv3_block4_0_relu (Activation (None, 28, 28, 224) 0 conv3_block4_0_bn[0][0] __________________________________________________________________________________________________ conv3_block4_1_conv (Conv2D) (None, 28, 28, 128) 28672 conv3_block4_0_relu[0][0] __________________________________________________________________________________________________ conv3_block4_1_bn (BatchNormali (None, 28, 28, 128) 512 conv3_block4_1_conv[0][0] __________________________________________________________________________________________________ conv3_block4_1_relu (Activation (None, 28, 28, 128) 0 conv3_block4_1_bn[0][0] __________________________________________________________________________________________________ conv3_block4_2_conv (Conv2D) (None, 28, 28, 32) 36864 conv3_block4_1_relu[0][0] __________________________________________________________________________________________________ conv3_block4_concat (Concatenat (None, 28, 28, 256) 0 conv3_block3_concat[0][0] conv3_block4_2_conv[0][0] __________________________________________________________________________________________________ conv3_block5_0_bn (BatchNormali (None, 28, 28, 256) 1024 conv3_block4_concat[0][0] __________________________________________________________________________________________________ conv3_block5_0_relu (Activation (None, 28, 28, 256) 0 conv3_block5_0_bn[0][0] __________________________________________________________________________________________________ conv3_block5_1_conv (Conv2D) (None, 28, 28, 128) 32768 conv3_block5_0_relu[0][0] __________________________________________________________________________________________________ conv3_block5_1_bn (BatchNormali (None, 28, 28, 128) 512 conv3_block5_1_conv[0][0] __________________________________________________________________________________________________ conv3_block5_1_relu (Activation (None, 28, 28, 128) 0 conv3_block5_1_bn[0][0] __________________________________________________________________________________________________ conv3_block5_2_conv (Conv2D) (None, 28, 28, 32) 36864 conv3_block5_1_relu[0][0] __________________________________________________________________________________________________ conv3_block5_concat (Concatenat (None, 28, 28, 288) 0 conv3_block4_concat[0][0] conv3_block5_2_conv[0][0] __________________________________________________________________________________________________ conv3_block6_0_bn (BatchNormali (None, 28, 28, 288) 1152 conv3_block5_concat[0][0] __________________________________________________________________________________________________ conv3_block6_0_relu (Activation (None, 28, 28, 288) 0 conv3_block6_0_bn[0][0] __________________________________________________________________________________________________ conv3_block6_1_conv (Conv2D) (None, 28, 28, 128) 36864 conv3_block6_0_relu[0][0] __________________________________________________________________________________________________ conv3_block6_1_bn (BatchNormali (None, 28, 28, 128) 512 conv3_block6_1_conv[0][0] __________________________________________________________________________________________________ conv3_block6_1_relu (Activation (None, 28, 28, 128) 0 conv3_block6_1_bn[0][0] __________________________________________________________________________________________________ conv3_block6_2_conv (Conv2D) (None, 28, 28, 32) 36864 conv3_block6_1_relu[0][0] __________________________________________________________________________________________________ conv3_block6_concat (Concatenat (None, 28, 28, 320) 0 conv3_block5_concat[0][0] conv3_block6_2_conv[0][0] __________________________________________________________________________________________________ conv3_block7_0_bn (BatchNormali (None, 28, 28, 320) 1280 conv3_block6_concat[0][0] __________________________________________________________________________________________________ conv3_block7_0_relu (Activation (None, 28, 28, 320) 0 conv3_block7_0_bn[0][0] __________________________________________________________________________________________________ conv3_block7_1_conv (Conv2D) (None, 28, 28, 128) 40960 conv3_block7_0_relu[0][0] __________________________________________________________________________________________________ conv3_block7_1_bn (BatchNormali (None, 28, 28, 128) 512 conv3_block7_1_conv[0][0] __________________________________________________________________________________________________ conv3_block7_1_relu (Activation (None, 28, 28, 128) 0 conv3_block7_1_bn[0][0] __________________________________________________________________________________________________ conv3_block7_2_conv (Conv2D) (None, 28, 28, 32) 36864 conv3_block7_1_relu[0][0] __________________________________________________________________________________________________ conv3_block7_concat (Concatenat (None, 28, 28, 352) 0 conv3_block6_concat[0][0] conv3_block7_2_conv[0][0] __________________________________________________________________________________________________ conv3_block8_0_bn (BatchNormali (None, 28, 28, 352) 1408 conv3_block7_concat[0][0] __________________________________________________________________________________________________ conv3_block8_0_relu (Activation (None, 28, 28, 352) 0 conv3_block8_0_bn[0][0] __________________________________________________________________________________________________ conv3_block8_1_conv (Conv2D) (None, 28, 28, 128) 45056 conv3_block8_0_relu[0][0] __________________________________________________________________________________________________ conv3_block8_1_bn (BatchNormali (None, 28, 28, 128) 512 conv3_block8_1_conv[0][0] __________________________________________________________________________________________________ conv3_block8_1_relu (Activation (None, 28, 28, 128) 0 conv3_block8_1_bn[0][0] __________________________________________________________________________________________________ conv3_block8_2_conv (Conv2D) (None, 28, 28, 32) 36864 conv3_block8_1_relu[0][0] __________________________________________________________________________________________________ conv3_block8_concat (Concatenat (None, 28, 28, 384) 0 conv3_block7_concat[0][0] conv3_block8_2_conv[0][0] __________________________________________________________________________________________________ conv3_block9_0_bn (BatchNormali (None, 28, 28, 384) 1536 conv3_block8_concat[0][0] __________________________________________________________________________________________________ conv3_block9_0_relu (Activation (None, 28, 28, 384) 0 conv3_block9_0_bn[0][0] __________________________________________________________________________________________________ conv3_block9_1_conv (Conv2D) (None, 28, 28, 128) 49152 conv3_block9_0_relu[0][0] __________________________________________________________________________________________________ conv3_block9_1_bn (BatchNormali (None, 28, 28, 128) 512 conv3_block9_1_conv[0][0] __________________________________________________________________________________________________ conv3_block9_1_relu (Activation (None, 28, 28, 128) 0 conv3_block9_1_bn[0][0] __________________________________________________________________________________________________ conv3_block9_2_conv (Conv2D) (None, 28, 28, 32) 36864 conv3_block9_1_relu[0][0] __________________________________________________________________________________________________ conv3_block9_concat (Concatenat (None, 28, 28, 416) 0 conv3_block8_concat[0][0] conv3_block9_2_conv[0][0] __________________________________________________________________________________________________ conv3_block10_0_bn (BatchNormal (None, 28, 28, 416) 1664 conv3_block9_concat[0][0] __________________________________________________________________________________________________ conv3_block10_0_relu (Activatio (None, 28, 28, 416) 0 conv3_block10_0_bn[0][0] __________________________________________________________________________________________________ conv3_block10_1_conv (Conv2D) (None, 28, 28, 128) 53248 conv3_block10_0_relu[0][0] __________________________________________________________________________________________________ conv3_block10_1_bn (BatchNormal (None, 28, 28, 128) 512 conv3_block10_1_conv[0][0] __________________________________________________________________________________________________ conv3_block10_1_relu (Activatio (None, 28, 28, 128) 0 conv3_block10_1_bn[0][0] __________________________________________________________________________________________________ conv3_block10_2_conv (Conv2D) (None, 28, 28, 32) 36864 conv3_block10_1_relu[0][0] __________________________________________________________________________________________________ conv3_block10_concat (Concatena (None, 28, 28, 448) 0 conv3_block9_concat[0][0] conv3_block10_2_conv[0][0] __________________________________________________________________________________________________ conv3_block11_0_bn (BatchNormal (None, 28, 28, 448) 1792 conv3_block10_concat[0][0] __________________________________________________________________________________________________ conv3_block11_0_relu (Activatio (None, 28, 28, 448) 0 conv3_block11_0_bn[0][0] __________________________________________________________________________________________________ conv3_block11_1_conv (Conv2D) (None, 28, 28, 128) 57344 conv3_block11_0_relu[0][0] __________________________________________________________________________________________________ conv3_block11_1_bn (BatchNormal (None, 28, 28, 128) 512 conv3_block11_1_conv[0][0] __________________________________________________________________________________________________ conv3_block11_1_relu (Activatio (None, 28, 28, 128) 0 conv3_block11_1_bn[0][0] __________________________________________________________________________________________________ conv3_block11_2_conv (Conv2D) (None, 28, 28, 32) 36864 conv3_block11_1_relu[0][0] __________________________________________________________________________________________________ conv3_block11_concat (Concatena (None, 28, 28, 480) 0 conv3_block10_concat[0][0] conv3_block11_2_conv[0][0] __________________________________________________________________________________________________ conv3_block12_0_bn (BatchNormal (None, 28, 28, 480) 1920 conv3_block11_concat[0][0] __________________________________________________________________________________________________ conv3_block12_0_relu (Activatio (None, 28, 28, 480) 0 conv3_block12_0_bn[0][0] __________________________________________________________________________________________________ conv3_block12_1_conv (Conv2D) (None, 28, 28, 128) 61440 conv3_block12_0_relu[0][0] __________________________________________________________________________________________________ conv3_block12_1_bn (BatchNormal (None, 28, 28, 128) 512 conv3_block12_1_conv[0][0] __________________________________________________________________________________________________ conv3_block12_1_relu (Activatio (None, 28, 28, 128) 0 conv3_block12_1_bn[0][0] __________________________________________________________________________________________________ conv3_block12_2_conv (Conv2D) (None, 28, 28, 32) 36864 conv3_block12_1_relu[0][0] __________________________________________________________________________________________________ conv3_block12_concat (Concatena (None, 28, 28, 512) 0 conv3_block11_concat[0][0] conv3_block12_2_conv[0][0] __________________________________________________________________________________________________ pool3_bn (BatchNormalization) (None, 28, 28, 512) 2048 conv3_block12_concat[0][0] __________________________________________________________________________________________________ pool3_relu (Activation) (None, 28, 28, 512) 0 pool3_bn[0][0] __________________________________________________________________________________________________ pool3_conv (Conv2D) (None, 28, 28, 256) 131072 pool3_relu[0][0] __________________________________________________________________________________________________ pool3_pool (AveragePooling2D) (None, 14, 14, 256) 0 pool3_conv[0][0] __________________________________________________________________________________________________ conv4_block1_0_bn (BatchNormali (None, 14, 14, 256) 1024 pool3_pool[0][0] __________________________________________________________________________________________________ conv4_block1_0_relu (Activation (None, 14, 14, 256) 0 conv4_block1_0_bn[0][0] __________________________________________________________________________________________________ conv4_block1_1_conv (Conv2D) (None, 14, 14, 128) 32768 conv4_block1_0_relu[0][0] __________________________________________________________________________________________________ conv4_block1_1_bn (BatchNormali (None, 14, 14, 128) 512 conv4_block1_1_conv[0][0] __________________________________________________________________________________________________ conv4_block1_1_relu (Activation (None, 14, 14, 128) 0 conv4_block1_1_bn[0][0] __________________________________________________________________________________________________ conv4_block1_2_conv (Conv2D) (None, 14, 14, 32) 36864 conv4_block1_1_relu[0][0] __________________________________________________________________________________________________ conv4_block1_concat (Concatenat (None, 14, 14, 288) 0 pool3_pool[0][0] conv4_block1_2_conv[0][0] __________________________________________________________________________________________________ conv4_block2_0_bn (BatchNormali (None, 14, 14, 288) 1152 conv4_block1_concat[0][0] __________________________________________________________________________________________________ conv4_block2_0_relu (Activation (None, 14, 14, 288) 0 conv4_block2_0_bn[0][0] __________________________________________________________________________________________________ conv4_block2_1_conv (Conv2D) (None, 14, 14, 128) 36864 conv4_block2_0_relu[0][0] __________________________________________________________________________________________________ conv4_block2_1_bn (BatchNormali (None, 14, 14, 128) 512 conv4_block2_1_conv[0][0] __________________________________________________________________________________________________ conv4_block2_1_relu (Activation (None, 14, 14, 128) 0 conv4_block2_1_bn[0][0] __________________________________________________________________________________________________ conv4_block2_2_conv (Conv2D) (None, 14, 14, 32) 36864 conv4_block2_1_relu[0][0] __________________________________________________________________________________________________ conv4_block2_concat (Concatenat (None, 14, 14, 320) 0 conv4_block1_concat[0][0] conv4_block2_2_conv[0][0] __________________________________________________________________________________________________ conv4_block3_0_bn (BatchNormali (None, 14, 14, 320) 1280 conv4_block2_concat[0][0] __________________________________________________________________________________________________ conv4_block3_0_relu (Activation (None, 14, 14, 320) 0 conv4_block3_0_bn[0][0] __________________________________________________________________________________________________ conv4_block3_1_conv (Conv2D) (None, 14, 14, 128) 40960 conv4_block3_0_relu[0][0] __________________________________________________________________________________________________ conv4_block3_1_bn (BatchNormali (None, 14, 14, 128) 512 conv4_block3_1_conv[0][0] __________________________________________________________________________________________________ conv4_block3_1_relu (Activation (None, 14, 14, 128) 0 conv4_block3_1_bn[0][0] __________________________________________________________________________________________________ conv4_block3_2_conv (Conv2D) (None, 14, 14, 32) 36864 conv4_block3_1_relu[0][0] __________________________________________________________________________________________________ conv4_block3_concat (Concatenat (None, 14, 14, 352) 0 conv4_block2_concat[0][0] conv4_block3_2_conv[0][0] __________________________________________________________________________________________________ conv4_block4_0_bn (BatchNormali (None, 14, 14, 352) 1408 conv4_block3_concat[0][0] __________________________________________________________________________________________________ conv4_block4_0_relu (Activation (None, 14, 14, 352) 0 conv4_block4_0_bn[0][0] __________________________________________________________________________________________________ conv4_block4_1_conv (Conv2D) (None, 14, 14, 128) 45056 conv4_block4_0_relu[0][0] __________________________________________________________________________________________________ conv4_block4_1_bn (BatchNormali (None, 14, 14, 128) 512 conv4_block4_1_conv[0][0] __________________________________________________________________________________________________ conv4_block4_1_relu (Activation (None, 14, 14, 128) 0 conv4_block4_1_bn[0][0] __________________________________________________________________________________________________ conv4_block4_2_conv (Conv2D) (None, 14, 14, 32) 36864 conv4_block4_1_relu[0][0] __________________________________________________________________________________________________ conv4_block4_concat (Concatenat (None, 14, 14, 384) 0 conv4_block3_concat[0][0] conv4_block4_2_conv[0][0] __________________________________________________________________________________________________ conv4_block5_0_bn (BatchNormali (None, 14, 14, 384) 1536 conv4_block4_concat[0][0] __________________________________________________________________________________________________ conv4_block5_0_relu (Activation (None, 14, 14, 384) 0 conv4_block5_0_bn[0][0] __________________________________________________________________________________________________ conv4_block5_1_conv (Conv2D) (None, 14, 14, 128) 49152 conv4_block5_0_relu[0][0] __________________________________________________________________________________________________ conv4_block5_1_bn (BatchNormali (None, 14, 14, 128) 512 conv4_block5_1_conv[0][0] __________________________________________________________________________________________________ conv4_block5_1_relu (Activation (None, 14, 14, 128) 0 conv4_block5_1_bn[0][0] __________________________________________________________________________________________________ conv4_block5_2_conv (Conv2D) (None, 14, 14, 32) 36864 conv4_block5_1_relu[0][0] __________________________________________________________________________________________________ conv4_block5_concat (Concatenat (None, 14, 14, 416) 0 conv4_block4_concat[0][0] conv4_block5_2_conv[0][0] __________________________________________________________________________________________________ conv4_block6_0_bn (BatchNormali (None, 14, 14, 416) 1664 conv4_block5_concat[0][0] __________________________________________________________________________________________________ conv4_block6_0_relu (Activation (None, 14, 14, 416) 0 conv4_block6_0_bn[0][0] __________________________________________________________________________________________________ conv4_block6_1_conv (Conv2D) (None, 14, 14, 128) 53248 conv4_block6_0_relu[0][0] __________________________________________________________________________________________________ conv4_block6_1_bn (BatchNormali (None, 14, 14, 128) 512 conv4_block6_1_conv[0][0] __________________________________________________________________________________________________ conv4_block6_1_relu (Activation (None, 14, 14, 128) 0 conv4_block6_1_bn[0][0] __________________________________________________________________________________________________ conv4_block6_2_conv (Conv2D) (None, 14, 14, 32) 36864 conv4_block6_1_relu[0][0] __________________________________________________________________________________________________ conv4_block6_concat (Concatenat (None, 14, 14, 448) 0 conv4_block5_concat[0][0] conv4_block6_2_conv[0][0] __________________________________________________________________________________________________ conv4_block7_0_bn (BatchNormali (None, 14, 14, 448) 1792 conv4_block6_concat[0][0] __________________________________________________________________________________________________ conv4_block7_0_relu (Activation (None, 14, 14, 448) 0 conv4_block7_0_bn[0][0] __________________________________________________________________________________________________ conv4_block7_1_conv (Conv2D) (None, 14, 14, 128) 57344 conv4_block7_0_relu[0][0] __________________________________________________________________________________________________ conv4_block7_1_bn (BatchNormali (None, 14, 14, 128) 512 conv4_block7_1_conv[0][0] __________________________________________________________________________________________________ conv4_block7_1_relu (Activation (None, 14, 14, 128) 0 conv4_block7_1_bn[0][0] __________________________________________________________________________________________________ conv4_block7_2_conv (Conv2D) (None, 14, 14, 32) 36864 conv4_block7_1_relu[0][0] __________________________________________________________________________________________________ conv4_block7_concat (Concatenat (None, 14, 14, 480) 0 conv4_block6_concat[0][0] conv4_block7_2_conv[0][0] __________________________________________________________________________________________________ conv4_block8_0_bn (BatchNormali (None, 14, 14, 480) 1920 conv4_block7_concat[0][0] __________________________________________________________________________________________________ conv4_block8_0_relu (Activation (None, 14, 14, 480) 0 conv4_block8_0_bn[0][0] __________________________________________________________________________________________________ conv4_block8_1_conv (Conv2D) (None, 14, 14, 128) 61440 conv4_block8_0_relu[0][0] __________________________________________________________________________________________________ conv4_block8_1_bn (BatchNormali (None, 14, 14, 128) 512 conv4_block8_1_conv[0][0] __________________________________________________________________________________________________ conv4_block8_1_relu (Activation (None, 14, 14, 128) 0 conv4_block8_1_bn[0][0] __________________________________________________________________________________________________ conv4_block8_2_conv (Conv2D) (None, 14, 14, 32) 36864 conv4_block8_1_relu[0][0] __________________________________________________________________________________________________ conv4_block8_concat (Concatenat (None, 14, 14, 512) 0 conv4_block7_concat[0][0] conv4_block8_2_conv[0][0] __________________________________________________________________________________________________ conv4_block9_0_bn (BatchNormali (None, 14, 14, 512) 2048 conv4_block8_concat[0][0] __________________________________________________________________________________________________ conv4_block9_0_relu (Activation (None, 14, 14, 512) 0 conv4_block9_0_bn[0][0] __________________________________________________________________________________________________ conv4_block9_1_conv (Conv2D) (None, 14, 14, 128) 65536 conv4_block9_0_relu[0][0] __________________________________________________________________________________________________ conv4_block9_1_bn (BatchNormali (None, 14, 14, 128) 512 conv4_block9_1_conv[0][0] __________________________________________________________________________________________________ conv4_block9_1_relu (Activation (None, 14, 14, 128) 0 conv4_block9_1_bn[0][0] __________________________________________________________________________________________________ conv4_block9_2_conv (Conv2D) (None, 14, 14, 32) 36864 conv4_block9_1_relu[0][0] __________________________________________________________________________________________________ conv4_block9_concat (Concatenat (None, 14, 14, 544) 0 conv4_block8_concat[0][0] conv4_block9_2_conv[0][0] __________________________________________________________________________________________________ conv4_block10_0_bn (BatchNormal (None, 14, 14, 544) 2176 conv4_block9_concat[0][0] __________________________________________________________________________________________________ conv4_block10_0_relu (Activatio (None, 14, 14, 544) 0 conv4_block10_0_bn[0][0] __________________________________________________________________________________________________ conv4_block10_1_conv (Conv2D) (None, 14, 14, 128) 69632 conv4_block10_0_relu[0][0] __________________________________________________________________________________________________ conv4_block10_1_bn (BatchNormal (None, 14, 14, 128) 512 conv4_block10_1_conv[0][0] __________________________________________________________________________________________________ conv4_block10_1_relu (Activatio (None, 14, 14, 128) 0 conv4_block10_1_bn[0][0] __________________________________________________________________________________________________ conv4_block10_2_conv (Conv2D) (None, 14, 14, 32) 36864 conv4_block10_1_relu[0][0] __________________________________________________________________________________________________ conv4_block10_concat (Concatena (None, 14, 14, 576) 0 conv4_block9_concat[0][0] conv4_block10_2_conv[0][0] __________________________________________________________________________________________________ conv4_block11_0_bn (BatchNormal (None, 14, 14, 576) 2304 conv4_block10_concat[0][0] __________________________________________________________________________________________________ conv4_block11_0_relu (Activatio (None, 14, 14, 576) 0 conv4_block11_0_bn[0][0] __________________________________________________________________________________________________ conv4_block11_1_conv (Conv2D) (None, 14, 14, 128) 73728 conv4_block11_0_relu[0][0] __________________________________________________________________________________________________ conv4_block11_1_bn (BatchNormal (None, 14, 14, 128) 512 conv4_block11_1_conv[0][0] __________________________________________________________________________________________________ conv4_block11_1_relu (Activatio (None, 14, 14, 128) 0 conv4_block11_1_bn[0][0] __________________________________________________________________________________________________ conv4_block11_2_conv (Conv2D) (None, 14, 14, 32) 36864 conv4_block11_1_relu[0][0] __________________________________________________________________________________________________ conv4_block11_concat (Concatena (None, 14, 14, 608) 0 conv4_block10_concat[0][0] conv4_block11_2_conv[0][0] __________________________________________________________________________________________________ conv4_block12_0_bn (BatchNormal (None, 14, 14, 608) 2432 conv4_block11_concat[0][0] __________________________________________________________________________________________________ conv4_block12_0_relu (Activatio (None, 14, 14, 608) 0 conv4_block12_0_bn[0][0] __________________________________________________________________________________________________ conv4_block12_1_conv (Conv2D) (None, 14, 14, 128) 77824 conv4_block12_0_relu[0][0] __________________________________________________________________________________________________ conv4_block12_1_bn (BatchNormal (None, 14, 14, 128) 512 conv4_block12_1_conv[0][0] __________________________________________________________________________________________________ conv4_block12_1_relu (Activatio (None, 14, 14, 128) 0 conv4_block12_1_bn[0][0] __________________________________________________________________________________________________ conv4_block12_2_conv (Conv2D) (None, 14, 14, 32) 36864 conv4_block12_1_relu[0][0] __________________________________________________________________________________________________ conv4_block12_concat (Concatena (None, 14, 14, 640) 0 conv4_block11_concat[0][0] conv4_block12_2_conv[0][0] __________________________________________________________________________________________________ conv4_block13_0_bn (BatchNormal (None, 14, 14, 640) 2560 conv4_block12_concat[0][0] __________________________________________________________________________________________________ conv4_block13_0_relu (Activatio (None, 14, 14, 640) 0 conv4_block13_0_bn[0][0] __________________________________________________________________________________________________ conv4_block13_1_conv (Conv2D) (None, 14, 14, 128) 81920 conv4_block13_0_relu[0][0] __________________________________________________________________________________________________ conv4_block13_1_bn (BatchNormal (None, 14, 14, 128) 512 conv4_block13_1_conv[0][0] __________________________________________________________________________________________________ conv4_block13_1_relu (Activatio (None, 14, 14, 128) 0 conv4_block13_1_bn[0][0] __________________________________________________________________________________________________ conv4_block13_2_conv (Conv2D) (None, 14, 14, 32) 36864 conv4_block13_1_relu[0][0] __________________________________________________________________________________________________ conv4_block13_concat (Concatena (None, 14, 14, 672) 0 conv4_block12_concat[0][0] conv4_block13_2_conv[0][0] __________________________________________________________________________________________________ conv4_block14_0_bn (BatchNormal (None, 14, 14, 672) 2688 conv4_block13_concat[0][0] __________________________________________________________________________________________________ conv4_block14_0_relu (Activatio (None, 14, 14, 672) 0 conv4_block14_0_bn[0][0] __________________________________________________________________________________________________ conv4_block14_1_conv (Conv2D) (None, 14, 14, 128) 86016 conv4_block14_0_relu[0][0] __________________________________________________________________________________________________ conv4_block14_1_bn (BatchNormal (None, 14, 14, 128) 512 conv4_block14_1_conv[0][0] __________________________________________________________________________________________________ conv4_block14_1_relu (Activatio (None, 14, 14, 128) 0 conv4_block14_1_bn[0][0] __________________________________________________________________________________________________ conv4_block14_2_conv (Conv2D) (None, 14, 14, 32) 36864 conv4_block14_1_relu[0][0] __________________________________________________________________________________________________ conv4_block14_concat (Concatena (None, 14, 14, 704) 0 conv4_block13_concat[0][0] conv4_block14_2_conv[0][0] __________________________________________________________________________________________________ conv4_block15_0_bn (BatchNormal (None, 14, 14, 704) 2816 conv4_block14_concat[0][0] __________________________________________________________________________________________________ conv4_block15_0_relu (Activatio (None, 14, 14, 704) 0 conv4_block15_0_bn[0][0] __________________________________________________________________________________________________ conv4_block15_1_conv (Conv2D) (None, 14, 14, 128) 90112 conv4_block15_0_relu[0][0] __________________________________________________________________________________________________ conv4_block15_1_bn (BatchNormal (None, 14, 14, 128) 512 conv4_block15_1_conv[0][0] __________________________________________________________________________________________________ conv4_block15_1_relu (Activatio (None, 14, 14, 128) 0 conv4_block15_1_bn[0][0] __________________________________________________________________________________________________ conv4_block15_2_conv (Conv2D) (None, 14, 14, 32) 36864 conv4_block15_1_relu[0][0] __________________________________________________________________________________________________ conv4_block15_concat (Concatena (None, 14, 14, 736) 0 conv4_block14_concat[0][0] conv4_block15_2_conv[0][0] __________________________________________________________________________________________________ conv4_block16_0_bn (BatchNormal (None, 14, 14, 736) 2944 conv4_block15_concat[0][0] __________________________________________________________________________________________________ conv4_block16_0_relu (Activatio (None, 14, 14, 736) 0 conv4_block16_0_bn[0][0] __________________________________________________________________________________________________ conv4_block16_1_conv (Conv2D) (None, 14, 14, 128) 94208 conv4_block16_0_relu[0][0] __________________________________________________________________________________________________ conv4_block16_1_bn (BatchNormal (None, 14, 14, 128) 512 conv4_block16_1_conv[0][0] __________________________________________________________________________________________________ conv4_block16_1_relu (Activatio (None, 14, 14, 128) 0 conv4_block16_1_bn[0][0] __________________________________________________________________________________________________ conv4_block16_2_conv (Conv2D) (None, 14, 14, 32) 36864 conv4_block16_1_relu[0][0] __________________________________________________________________________________________________ conv4_block16_concat (Concatena (None, 14, 14, 768) 0 conv4_block15_concat[0][0] conv4_block16_2_conv[0][0] __________________________________________________________________________________________________ conv4_block17_0_bn (BatchNormal (None, 14, 14, 768) 3072 conv4_block16_concat[0][0] __________________________________________________________________________________________________ conv4_block17_0_relu (Activatio (None, 14, 14, 768) 0 conv4_block17_0_bn[0][0] __________________________________________________________________________________________________ conv4_block17_1_conv (Conv2D) (None, 14, 14, 128) 98304 conv4_block17_0_relu[0][0] __________________________________________________________________________________________________ conv4_block17_1_bn (BatchNormal (None, 14, 14, 128) 512 conv4_block17_1_conv[0][0] __________________________________________________________________________________________________ conv4_block17_1_relu (Activatio (None, 14, 14, 128) 0 conv4_block17_1_bn[0][0] __________________________________________________________________________________________________ conv4_block17_2_conv (Conv2D) (None, 14, 14, 32) 36864 conv4_block17_1_relu[0][0] __________________________________________________________________________________________________ conv4_block17_concat (Concatena (None, 14, 14, 800) 0 conv4_block16_concat[0][0] conv4_block17_2_conv[0][0] __________________________________________________________________________________________________ conv4_block18_0_bn (BatchNormal (None, 14, 14, 800) 3200 conv4_block17_concat[0][0] __________________________________________________________________________________________________ conv4_block18_0_relu (Activatio (None, 14, 14, 800) 0 conv4_block18_0_bn[0][0] __________________________________________________________________________________________________ conv4_block18_1_conv (Conv2D) (None, 14, 14, 128) 102400 conv4_block18_0_relu[0][0] __________________________________________________________________________________________________ conv4_block18_1_bn (BatchNormal (None, 14, 14, 128) 512 conv4_block18_1_conv[0][0] __________________________________________________________________________________________________ conv4_block18_1_relu (Activatio (None, 14, 14, 128) 0 conv4_block18_1_bn[0][0] __________________________________________________________________________________________________ conv4_block18_2_conv (Conv2D) (None, 14, 14, 32) 36864 conv4_block18_1_relu[0][0] __________________________________________________________________________________________________ conv4_block18_concat (Concatena (None, 14, 14, 832) 0 conv4_block17_concat[0][0] conv4_block18_2_conv[0][0] __________________________________________________________________________________________________ conv4_block19_0_bn (BatchNormal (None, 14, 14, 832) 3328 conv4_block18_concat[0][0] __________________________________________________________________________________________________ conv4_block19_0_relu (Activatio (None, 14, 14, 832) 0 conv4_block19_0_bn[0][0] __________________________________________________________________________________________________ conv4_block19_1_conv (Conv2D) (None, 14, 14, 128) 106496 conv4_block19_0_relu[0][0] __________________________________________________________________________________________________ conv4_block19_1_bn (BatchNormal (None, 14, 14, 128) 512 conv4_block19_1_conv[0][0] __________________________________________________________________________________________________ conv4_block19_1_relu (Activatio (None, 14, 14, 128) 0 conv4_block19_1_bn[0][0] __________________________________________________________________________________________________ conv4_block19_2_conv (Conv2D) (None, 14, 14, 32) 36864 conv4_block19_1_relu[0][0] __________________________________________________________________________________________________ conv4_block19_concat (Concatena (None, 14, 14, 864) 0 conv4_block18_concat[0][0] conv4_block19_2_conv[0][0] __________________________________________________________________________________________________ conv4_block20_0_bn (BatchNormal (None, 14, 14, 864) 3456 conv4_block19_concat[0][0] __________________________________________________________________________________________________ conv4_block20_0_relu (Activatio (None, 14, 14, 864) 0 conv4_block20_0_bn[0][0] __________________________________________________________________________________________________ conv4_block20_1_conv (Conv2D) (None, 14, 14, 128) 110592 conv4_block20_0_relu[0][0] __________________________________________________________________________________________________ conv4_block20_1_bn (BatchNormal (None, 14, 14, 128) 512 conv4_block20_1_conv[0][0] __________________________________________________________________________________________________ conv4_block20_1_relu (Activatio (None, 14, 14, 128) 0 conv4_block20_1_bn[0][0] __________________________________________________________________________________________________ conv4_block20_2_conv (Conv2D) (None, 14, 14, 32) 36864 conv4_block20_1_relu[0][0] __________________________________________________________________________________________________ conv4_block20_concat (Concatena (None, 14, 14, 896) 0 conv4_block19_concat[0][0] conv4_block20_2_conv[0][0] __________________________________________________________________________________________________ conv4_block21_0_bn (BatchNormal (None, 14, 14, 896) 3584 conv4_block20_concat[0][0] __________________________________________________________________________________________________ conv4_block21_0_relu (Activatio (None, 14, 14, 896) 0 conv4_block21_0_bn[0][0] __________________________________________________________________________________________________ conv4_block21_1_conv (Conv2D) (None, 14, 14, 128) 114688 conv4_block21_0_relu[0][0] __________________________________________________________________________________________________ conv4_block21_1_bn (BatchNormal (None, 14, 14, 128) 512 conv4_block21_1_conv[0][0] __________________________________________________________________________________________________ conv4_block21_1_relu (Activatio (None, 14, 14, 128) 0 conv4_block21_1_bn[0][0] __________________________________________________________________________________________________ conv4_block21_2_conv (Conv2D) (None, 14, 14, 32) 36864 conv4_block21_1_relu[0][0] __________________________________________________________________________________________________ conv4_block21_concat (Concatena (None, 14, 14, 928) 0 conv4_block20_concat[0][0] conv4_block21_2_conv[0][0] __________________________________________________________________________________________________ conv4_block22_0_bn (BatchNormal (None, 14, 14, 928) 3712 conv4_block21_concat[0][0] __________________________________________________________________________________________________ conv4_block22_0_relu (Activatio (None, 14, 14, 928) 0 conv4_block22_0_bn[0][0] __________________________________________________________________________________________________ conv4_block22_1_conv (Conv2D) (None, 14, 14, 128) 118784 conv4_block22_0_relu[0][0] __________________________________________________________________________________________________ conv4_block22_1_bn (BatchNormal (None, 14, 14, 128) 512 conv4_block22_1_conv[0][0] __________________________________________________________________________________________________ conv4_block22_1_relu (Activatio (None, 14, 14, 128) 0 conv4_block22_1_bn[0][0] __________________________________________________________________________________________________ conv4_block22_2_conv (Conv2D) (None, 14, 14, 32) 36864 conv4_block22_1_relu[0][0] __________________________________________________________________________________________________ conv4_block22_concat (Concatena (None, 14, 14, 960) 0 conv4_block21_concat[0][0] conv4_block22_2_conv[0][0] __________________________________________________________________________________________________ conv4_block23_0_bn (BatchNormal (None, 14, 14, 960) 3840 conv4_block22_concat[0][0] __________________________________________________________________________________________________ conv4_block23_0_relu (Activatio (None, 14, 14, 960) 0 conv4_block23_0_bn[0][0] __________________________________________________________________________________________________ conv4_block23_1_conv (Conv2D) (None, 14, 14, 128) 122880 conv4_block23_0_relu[0][0] __________________________________________________________________________________________________ conv4_block23_1_bn (BatchNormal (None, 14, 14, 128) 512 conv4_block23_1_conv[0][0] __________________________________________________________________________________________________ conv4_block23_1_relu (Activatio (None, 14, 14, 128) 0 conv4_block23_1_bn[0][0] __________________________________________________________________________________________________ conv4_block23_2_conv (Conv2D) (None, 14, 14, 32) 36864 conv4_block23_1_relu[0][0] __________________________________________________________________________________________________ conv4_block23_concat (Concatena (None, 14, 14, 992) 0 conv4_block22_concat[0][0] conv4_block23_2_conv[0][0] __________________________________________________________________________________________________ conv4_block24_0_bn (BatchNormal (None, 14, 14, 992) 3968 conv4_block23_concat[0][0] __________________________________________________________________________________________________ conv4_block24_0_relu (Activatio (None, 14, 14, 992) 0 conv4_block24_0_bn[0][0] __________________________________________________________________________________________________ conv4_block24_1_conv (Conv2D) (None, 14, 14, 128) 126976 conv4_block24_0_relu[0][0] __________________________________________________________________________________________________ conv4_block24_1_bn (BatchNormal (None, 14, 14, 128) 512 conv4_block24_1_conv[0][0] __________________________________________________________________________________________________ conv4_block24_1_relu (Activatio (None, 14, 14, 128) 0 conv4_block24_1_bn[0][0] __________________________________________________________________________________________________ conv4_block24_2_conv (Conv2D) (None, 14, 14, 32) 36864 conv4_block24_1_relu[0][0] __________________________________________________________________________________________________ conv4_block24_concat (Concatena (None, 14, 14, 1024) 0 conv4_block23_concat[0][0] conv4_block24_2_conv[0][0] __________________________________________________________________________________________________ pool4_bn (BatchNormalization) (None, 14, 14, 1024) 4096 conv4_block24_concat[0][0] __________________________________________________________________________________________________ pool4_relu (Activation) (None, 14, 14, 1024) 0 pool4_bn[0][0] __________________________________________________________________________________________________ pool4_conv (Conv2D) (None, 14, 14, 512) 524288 pool4_relu[0][0] __________________________________________________________________________________________________ pool4_pool (AveragePooling2D) (None, 7, 7, 512) 0 pool4_conv[0][0] __________________________________________________________________________________________________ conv5_block1_0_bn (BatchNormali (None, 7, 7, 512) 2048 pool4_pool[0][0] __________________________________________________________________________________________________ conv5_block1_0_relu (Activation (None, 7, 7, 512) 0 conv5_block1_0_bn[0][0] __________________________________________________________________________________________________ conv5_block1_1_conv (Conv2D) (None, 7, 7, 128) 65536 conv5_block1_0_relu[0][0] __________________________________________________________________________________________________ conv5_block1_1_bn (BatchNormali (None, 7, 7, 128) 512 conv5_block1_1_conv[0][0] __________________________________________________________________________________________________ conv5_block1_1_relu (Activation (None, 7, 7, 128) 0 conv5_block1_1_bn[0][0] __________________________________________________________________________________________________ conv5_block1_2_conv (Conv2D) (None, 7, 7, 32) 36864 conv5_block1_1_relu[0][0] __________________________________________________________________________________________________ conv5_block1_concat (Concatenat (None, 7, 7, 544) 0 pool4_pool[0][0] conv5_block1_2_conv[0][0] __________________________________________________________________________________________________ conv5_block2_0_bn (BatchNormali (None, 7, 7, 544) 2176 conv5_block1_concat[0][0] __________________________________________________________________________________________________ conv5_block2_0_relu (Activation (None, 7, 7, 544) 0 conv5_block2_0_bn[0][0] __________________________________________________________________________________________________ conv5_block2_1_conv (Conv2D) (None, 7, 7, 128) 69632 conv5_block2_0_relu[0][0] __________________________________________________________________________________________________ conv5_block2_1_bn (BatchNormali (None, 7, 7, 128) 512 conv5_block2_1_conv[0][0] __________________________________________________________________________________________________ conv5_block2_1_relu (Activation (None, 7, 7, 128) 0 conv5_block2_1_bn[0][0] __________________________________________________________________________________________________ conv5_block2_2_conv (Conv2D) (None, 7, 7, 32) 36864 conv5_block2_1_relu[0][0] __________________________________________________________________________________________________ conv5_block2_concat (Concatenat (None, 7, 7, 576) 0 conv5_block1_concat[0][0] conv5_block2_2_conv[0][0] __________________________________________________________________________________________________ conv5_block3_0_bn (BatchNormali (None, 7, 7, 576) 2304 conv5_block2_concat[0][0] __________________________________________________________________________________________________ conv5_block3_0_relu (Activation (None, 7, 7, 576) 0 conv5_block3_0_bn[0][0] __________________________________________________________________________________________________ conv5_block3_1_conv (Conv2D) (None, 7, 7, 128) 73728 conv5_block3_0_relu[0][0] __________________________________________________________________________________________________ conv5_block3_1_bn (BatchNormali (None, 7, 7, 128) 512 conv5_block3_1_conv[0][0] __________________________________________________________________________________________________ conv5_block3_1_relu (Activation (None, 7, 7, 128) 0 conv5_block3_1_bn[0][0] __________________________________________________________________________________________________ conv5_block3_2_conv (Conv2D) (None, 7, 7, 32) 36864 conv5_block3_1_relu[0][0] __________________________________________________________________________________________________ conv5_block3_concat (Concatenat (None, 7, 7, 608) 0 conv5_block2_concat[0][0] conv5_block3_2_conv[0][0] __________________________________________________________________________________________________ conv5_block4_0_bn (BatchNormali (None, 7, 7, 608) 2432 conv5_block3_concat[0][0] __________________________________________________________________________________________________ conv5_block4_0_relu (Activation (None, 7, 7, 608) 0 conv5_block4_0_bn[0][0] __________________________________________________________________________________________________ conv5_block4_1_conv (Conv2D) (None, 7, 7, 128) 77824 conv5_block4_0_relu[0][0] __________________________________________________________________________________________________ conv5_block4_1_bn (BatchNormali (None, 7, 7, 128) 512 conv5_block4_1_conv[0][0] __________________________________________________________________________________________________ conv5_block4_1_relu (Activation (None, 7, 7, 128) 0 conv5_block4_1_bn[0][0] __________________________________________________________________________________________________ conv5_block4_2_conv (Conv2D) (None, 7, 7, 32) 36864 conv5_block4_1_relu[0][0] __________________________________________________________________________________________________ conv5_block4_concat (Concatenat (None, 7, 7, 640) 0 conv5_block3_concat[0][0] conv5_block4_2_conv[0][0] __________________________________________________________________________________________________ conv5_block5_0_bn (BatchNormali (None, 7, 7, 640) 2560 conv5_block4_concat[0][0] __________________________________________________________________________________________________ conv5_block5_0_relu (Activation (None, 7, 7, 640) 0 conv5_block5_0_bn[0][0] __________________________________________________________________________________________________ conv5_block5_1_conv (Conv2D) (None, 7, 7, 128) 81920 conv5_block5_0_relu[0][0] __________________________________________________________________________________________________ conv5_block5_1_bn (BatchNormali (None, 7, 7, 128) 512 conv5_block5_1_conv[0][0] __________________________________________________________________________________________________ conv5_block5_1_relu (Activation (None, 7, 7, 128) 0 conv5_block5_1_bn[0][0] __________________________________________________________________________________________________ conv5_block5_2_conv (Conv2D) (None, 7, 7, 32) 36864 conv5_block5_1_relu[0][0] __________________________________________________________________________________________________ conv5_block5_concat (Concatenat (None, 7, 7, 672) 0 conv5_block4_concat[0][0] conv5_block5_2_conv[0][0] __________________________________________________________________________________________________ conv5_block6_0_bn (BatchNormali (None, 7, 7, 672) 2688 conv5_block5_concat[0][0] __________________________________________________________________________________________________ conv5_block6_0_relu (Activation (None, 7, 7, 672) 0 conv5_block6_0_bn[0][0] __________________________________________________________________________________________________ conv5_block6_1_conv (Conv2D) (None, 7, 7, 128) 86016 conv5_block6_0_relu[0][0] __________________________________________________________________________________________________ conv5_block6_1_bn (BatchNormali (None, 7, 7, 128) 512 conv5_block6_1_conv[0][0] __________________________________________________________________________________________________ conv5_block6_1_relu (Activation (None, 7, 7, 128) 0 conv5_block6_1_bn[0][0] __________________________________________________________________________________________________ conv5_block6_2_conv (Conv2D) (None, 7, 7, 32) 36864 conv5_block6_1_relu[0][0] __________________________________________________________________________________________________ conv5_block6_concat (Concatenat (None, 7, 7, 704) 0 conv5_block5_concat[0][0] conv5_block6_2_conv[0][0] __________________________________________________________________________________________________ conv5_block7_0_bn (BatchNormali (None, 7, 7, 704) 2816 conv5_block6_concat[0][0] __________________________________________________________________________________________________ conv5_block7_0_relu (Activation (None, 7, 7, 704) 0 conv5_block7_0_bn[0][0] __________________________________________________________________________________________________ conv5_block7_1_conv (Conv2D) (None, 7, 7, 128) 90112 conv5_block7_0_relu[0][0] __________________________________________________________________________________________________ conv5_block7_1_bn (BatchNormali (None, 7, 7, 128) 512 conv5_block7_1_conv[0][0] __________________________________________________________________________________________________ conv5_block7_1_relu (Activation (None, 7, 7, 128) 0 conv5_block7_1_bn[0][0] __________________________________________________________________________________________________ conv5_block7_2_conv (Conv2D) (None, 7, 7, 32) 36864 conv5_block7_1_relu[0][0] __________________________________________________________________________________________________ conv5_block7_concat (Concatenat (None, 7, 7, 736) 0 conv5_block6_concat[0][0] conv5_block7_2_conv[0][0] __________________________________________________________________________________________________ conv5_block8_0_bn (BatchNormali (None, 7, 7, 736) 2944 conv5_block7_concat[0][0] __________________________________________________________________________________________________ conv5_block8_0_relu (Activation (None, 7, 7, 736) 0 conv5_block8_0_bn[0][0] __________________________________________________________________________________________________ conv5_block8_1_conv (Conv2D) (None, 7, 7, 128) 94208 conv5_block8_0_relu[0][0] __________________________________________________________________________________________________ conv5_block8_1_bn (BatchNormali (None, 7, 7, 128) 512 conv5_block8_1_conv[0][0] __________________________________________________________________________________________________ conv5_block8_1_relu (Activation (None, 7, 7, 128) 0 conv5_block8_1_bn[0][0] __________________________________________________________________________________________________ conv5_block8_2_conv (Conv2D) (None, 7, 7, 32) 36864 conv5_block8_1_relu[0][0] __________________________________________________________________________________________________ conv5_block8_concat (Concatenat (None, 7, 7, 768) 0 conv5_block7_concat[0][0] conv5_block8_2_conv[0][0] __________________________________________________________________________________________________ conv5_block9_0_bn (BatchNormali (None, 7, 7, 768) 3072 conv5_block8_concat[0][0] __________________________________________________________________________________________________ conv5_block9_0_relu (Activation (None, 7, 7, 768) 0 conv5_block9_0_bn[0][0] __________________________________________________________________________________________________ conv5_block9_1_conv (Conv2D) (None, 7, 7, 128) 98304 conv5_block9_0_relu[0][0] __________________________________________________________________________________________________ conv5_block9_1_bn (BatchNormali (None, 7, 7, 128) 512 conv5_block9_1_conv[0][0] __________________________________________________________________________________________________ conv5_block9_1_relu (Activation (None, 7, 7, 128) 0 conv5_block9_1_bn[0][0] __________________________________________________________________________________________________ conv5_block9_2_conv (Conv2D) (None, 7, 7, 32) 36864 conv5_block9_1_relu[0][0] __________________________________________________________________________________________________ conv5_block9_concat (Concatenat (None, 7, 7, 800) 0 conv5_block8_concat[0][0] conv5_block9_2_conv[0][0] __________________________________________________________________________________________________ conv5_block10_0_bn (BatchNormal (None, 7, 7, 800) 3200 conv5_block9_concat[0][0] __________________________________________________________________________________________________ conv5_block10_0_relu (Activatio (None, 7, 7, 800) 0 conv5_block10_0_bn[0][0] __________________________________________________________________________________________________ conv5_block10_1_conv (Conv2D) (None, 7, 7, 128) 102400 conv5_block10_0_relu[0][0] __________________________________________________________________________________________________ conv5_block10_1_bn (BatchNormal (None, 7, 7, 128) 512 conv5_block10_1_conv[0][0] __________________________________________________________________________________________________ conv5_block10_1_relu (Activatio (None, 7, 7, 128) 0 conv5_block10_1_bn[0][0] __________________________________________________________________________________________________ conv5_block10_2_conv (Conv2D) (None, 7, 7, 32) 36864 conv5_block10_1_relu[0][0] __________________________________________________________________________________________________ conv5_block10_concat (Concatena (None, 7, 7, 832) 0 conv5_block9_concat[0][0] conv5_block10_2_conv[0][0] __________________________________________________________________________________________________ conv5_block11_0_bn (BatchNormal (None, 7, 7, 832) 3328 conv5_block10_concat[0][0] __________________________________________________________________________________________________ conv5_block11_0_relu (Activatio (None, 7, 7, 832) 0 conv5_block11_0_bn[0][0] __________________________________________________________________________________________________ conv5_block11_1_conv (Conv2D) (None, 7, 7, 128) 106496 conv5_block11_0_relu[0][0] __________________________________________________________________________________________________ conv5_block11_1_bn (BatchNormal (None, 7, 7, 128) 512 conv5_block11_1_conv[0][0] __________________________________________________________________________________________________ conv5_block11_1_relu (Activatio (None, 7, 7, 128) 0 conv5_block11_1_bn[0][0] __________________________________________________________________________________________________ conv5_block11_2_conv (Conv2D) (None, 7, 7, 32) 36864 conv5_block11_1_relu[0][0] __________________________________________________________________________________________________ conv5_block11_concat (Concatena (None, 7, 7, 864) 0 conv5_block10_concat[0][0] conv5_block11_2_conv[0][0] __________________________________________________________________________________________________ conv5_block12_0_bn (BatchNormal (None, 7, 7, 864) 3456 conv5_block11_concat[0][0] __________________________________________________________________________________________________ conv5_block12_0_relu (Activatio (None, 7, 7, 864) 0 conv5_block12_0_bn[0][0] __________________________________________________________________________________________________ conv5_block12_1_conv (Conv2D) (None, 7, 7, 128) 110592 conv5_block12_0_relu[0][0] __________________________________________________________________________________________________ conv5_block12_1_bn (BatchNormal (None, 7, 7, 128) 512 conv5_block12_1_conv[0][0] __________________________________________________________________________________________________ conv5_block12_1_relu (Activatio (None, 7, 7, 128) 0 conv5_block12_1_bn[0][0] __________________________________________________________________________________________________ conv5_block12_2_conv (Conv2D) (None, 7, 7, 32) 36864 conv5_block12_1_relu[0][0] __________________________________________________________________________________________________ conv5_block12_concat (Concatena (None, 7, 7, 896) 0 conv5_block11_concat[0][0] conv5_block12_2_conv[0][0] __________________________________________________________________________________________________ conv5_block13_0_bn (BatchNormal (None, 7, 7, 896) 3584 conv5_block12_concat[0][0] __________________________________________________________________________________________________ conv5_block13_0_relu (Activatio (None, 7, 7, 896) 0 conv5_block13_0_bn[0][0] __________________________________________________________________________________________________ conv5_block13_1_conv (Conv2D) (None, 7, 7, 128) 114688 conv5_block13_0_relu[0][0] __________________________________________________________________________________________________ conv5_block13_1_bn (BatchNormal (None, 7, 7, 128) 512 conv5_block13_1_conv[0][0] __________________________________________________________________________________________________ conv5_block13_1_relu (Activatio (None, 7, 7, 128) 0 conv5_block13_1_bn[0][0] __________________________________________________________________________________________________ conv5_block13_2_conv (Conv2D) (None, 7, 7, 32) 36864 conv5_block13_1_relu[0][0] __________________________________________________________________________________________________ conv5_block13_concat (Concatena (None, 7, 7, 928) 0 conv5_block12_concat[0][0] conv5_block13_2_conv[0][0] __________________________________________________________________________________________________ conv5_block14_0_bn (BatchNormal (None, 7, 7, 928) 3712 conv5_block13_concat[0][0] __________________________________________________________________________________________________ conv5_block14_0_relu (Activatio (None, 7, 7, 928) 0 conv5_block14_0_bn[0][0] __________________________________________________________________________________________________ conv5_block14_1_conv (Conv2D) (None, 7, 7, 128) 118784 conv5_block14_0_relu[0][0] __________________________________________________________________________________________________ conv5_block14_1_bn (BatchNormal (None, 7, 7, 128) 512 conv5_block14_1_conv[0][0] __________________________________________________________________________________________________ conv5_block14_1_relu (Activatio (None, 7, 7, 128) 0 conv5_block14_1_bn[0][0] __________________________________________________________________________________________________ conv5_block14_2_conv (Conv2D) (None, 7, 7, 32) 36864 conv5_block14_1_relu[0][0] __________________________________________________________________________________________________ conv5_block14_concat (Concatena (None, 7, 7, 960) 0 conv5_block13_concat[0][0] conv5_block14_2_conv[0][0] __________________________________________________________________________________________________ conv5_block15_0_bn (BatchNormal (None, 7, 7, 960) 3840 conv5_block14_concat[0][0] __________________________________________________________________________________________________ conv5_block15_0_relu (Activatio (None, 7, 7, 960) 0 conv5_block15_0_bn[0][0] __________________________________________________________________________________________________ conv5_block15_1_conv (Conv2D) (None, 7, 7, 128) 122880 conv5_block15_0_relu[0][0] __________________________________________________________________________________________________ conv5_block15_1_bn (BatchNormal (None, 7, 7, 128) 512 conv5_block15_1_conv[0][0] __________________________________________________________________________________________________ conv5_block15_1_relu (Activatio (None, 7, 7, 128) 0 conv5_block15_1_bn[0][0] __________________________________________________________________________________________________ conv5_block15_2_conv (Conv2D) (None, 7, 7, 32) 36864 conv5_block15_1_relu[0][0] __________________________________________________________________________________________________ conv5_block15_concat (Concatena (None, 7, 7, 992) 0 conv5_block14_concat[0][0] conv5_block15_2_conv[0][0] __________________________________________________________________________________________________ conv5_block16_0_bn (BatchNormal (None, 7, 7, 992) 3968 conv5_block15_concat[0][0] __________________________________________________________________________________________________ conv5_block16_0_relu (Activatio (None, 7, 7, 992) 0 conv5_block16_0_bn[0][0] __________________________________________________________________________________________________ conv5_block16_1_conv (Conv2D) (None, 7, 7, 128) 126976 conv5_block16_0_relu[0][0] __________________________________________________________________________________________________ conv5_block16_1_bn (BatchNormal (None, 7, 7, 128) 512 conv5_block16_1_conv[0][0] __________________________________________________________________________________________________ conv5_block16_1_relu (Activatio (None, 7, 7, 128) 0 conv5_block16_1_bn[0][0] __________________________________________________________________________________________________ conv5_block16_2_conv (Conv2D) (None, 7, 7, 32) 36864 conv5_block16_1_relu[0][0] __________________________________________________________________________________________________ conv5_block16_concat (Concatena (None, 7, 7, 1024) 0 conv5_block15_concat[0][0] conv5_block16_2_conv[0][0] __________________________________________________________________________________________________ bn (BatchNormalization) (None, 7, 7, 1024) 4096 conv5_block16_concat[0][0] __________________________________________________________________________________________________ relu (Activation) (None, 7, 7, 1024) 0 bn[0][0] __________________________________________________________________________________________________ global_average_pooling2d_1 (Glo (None, 1024) 0 relu[0][0] __________________________________________________________________________________________________ dropout_1 (Dropout) (None, 1024) 0 global_average_pooling2d_1[0][0] __________________________________________________________________________________________________ dense_1 (Dense) (None, 1024) 1049600 dropout_1[0][0] __________________________________________________________________________________________________ dropout_2 (Dropout) (None, 1024) 0 dense_1[0][0] __________________________________________________________________________________________________ final_output (Dense) (None, 1103) 1130575 dropout_2[0][0] ================================================================================================== Total params: 9,217,679 Trainable params: 2,180,175 Non-trainable params: 7,037,504 __________________________________________________________________________________________________ ###Markdown Train top layers ###Code STEP_SIZE_TRAIN = train_generator.n//train_generator.batch_size STEP_SIZE_VALID = valid_generator.n//valid_generator.batch_size history = model.fit_generator(generator=train_generator, steps_per_epoch=STEP_SIZE_TRAIN, validation_data=valid_generator, validation_steps=STEP_SIZE_VALID, epochs=EPOCHS, callbacks=callbacks, verbose=2, max_queue_size=16, workers=3, use_multiprocessing=True) ###Output WARNING:tensorflow:From /opt/conda/lib/python3.6/site-packages/tensorflow/python/ops/math_ops.py:3066: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version. Instructions for updating: Use tf.cast instead. Epoch 1/30 - 705s - loss: 0.0447 - acc: 0.9850 - categorical_accuracy: 0.0385 - val_loss: 0.0149 - val_acc: 0.9971 - val_categorical_accuracy: 0.0783 Epoch 2/30 - 684s - loss: 0.0166 - acc: 0.9970 - categorical_accuracy: 0.0737 - val_loss: 0.0138 - val_acc: 0.9971 - val_categorical_accuracy: 0.0911 Epoch 3/30 - 682s - loss: 0.0149 - acc: 0.9971 - categorical_accuracy: 0.0938 - val_loss: 0.0132 - val_acc: 0.9972 - val_categorical_accuracy: 0.1161 Epoch 4/30 - 677s - loss: 0.0141 - acc: 0.9971 - categorical_accuracy: 0.1080 - val_loss: 0.0130 - val_acc: 0.9972 - val_categorical_accuracy: 0.1420 Epoch 5/30 - 683s - loss: 0.0136 - acc: 0.9971 - categorical_accuracy: 0.1157 - val_loss: 0.0127 - val_acc: 0.9972 - val_categorical_accuracy: 0.1053 Epoch 6/30 - 684s - loss: 0.0133 - acc: 0.9971 - categorical_accuracy: 0.1196 - val_loss: 0.0127 - val_acc: 0.9972 - val_categorical_accuracy: 0.1186 Epoch 7/30 - 678s - loss: 0.0129 - acc: 0.9972 - categorical_accuracy: 0.1247 - val_loss: 0.0126 - val_acc: 0.9972 - val_categorical_accuracy: 0.1008 Epoch 8/30 - 689s - loss: 0.0127 - acc: 0.9972 - categorical_accuracy: 0.1276 - val_loss: 0.0124 - val_acc: 0.9972 - val_categorical_accuracy: 0.1136 Epoch 9/30 - 684s - loss: 0.0125 - acc: 0.9972 - categorical_accuracy: 0.1292 - val_loss: 0.0124 - val_acc: 0.9972 - val_categorical_accuracy: 0.1326 Epoch 10/30 - 679s - loss: 0.0123 - acc: 0.9972 - categorical_accuracy: 0.1335 - val_loss: 0.0123 - val_acc: 0.9972 - val_categorical_accuracy: 0.1199 Epoch 11/30 - 686s - loss: 0.0122 - acc: 0.9972 - categorical_accuracy: 0.1321 - val_loss: 0.0122 - val_acc: 0.9972 - val_categorical_accuracy: 0.1143 Epoch 12/30 - 686s - loss: 0.0120 - acc: 0.9972 - categorical_accuracy: 0.1384 - val_loss: 0.0123 - val_acc: 0.9972 - val_categorical_accuracy: 0.1285 Epoch 13/30 - 678s - loss: 0.0120 - acc: 0.9972 - categorical_accuracy: 0.1375 - val_loss: 0.0122 - val_acc: 0.9972 - val_categorical_accuracy: 0.1204 Epoch 14/30 - 688s - loss: 0.0119 - acc: 0.9972 - categorical_accuracy: 0.1384 - val_loss: 0.0124 - val_acc: 0.9972 - val_categorical_accuracy: 0.1152 Epoch 15/30 - 686s - loss: 0.0118 - acc: 0.9972 - categorical_accuracy: 0.1438 - val_loss: 0.0123 - val_acc: 0.9972 - val_categorical_accuracy: 0.1155 Epoch 16/30 - 671s - loss: 0.0117 - acc: 0.9972 - categorical_accuracy: 0.1383 - val_loss: 0.0123 - val_acc: 0.9972 - val_categorical_accuracy: 0.1296 Epoch 00016: early stopping ###Markdown Fine-tune the complete model ###Code for layer in model.layers: layer.trainable = True metrics = ["accuracy", "categorical_accuracy"] lrate = LearningRateScheduler(step_decay) es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=(ES_PATIENCE)) callbacks = [es] optimizer = optimizers.Adam(lr=0.0001) model.compile(optimizer=optimizer, loss="binary_crossentropy", metrics=metrics) model.summary() STEP_SIZE_TRAIN = train_generator.n//train_generator.batch_size STEP_SIZE_VALID = valid_generator.n//valid_generator.batch_size history = model.fit_generator(generator=train_generator, steps_per_epoch=STEP_SIZE_TRAIN, validation_data=valid_generator, validation_steps=STEP_SIZE_VALID, epochs=EPOCHS, callbacks=callbacks, verbose=2, max_queue_size=16, workers=3, use_multiprocessing=True) ###Output Epoch 1/30 ###Markdown Complete model graph loss ###Code sns.set_style("whitegrid") fig, (ax1, ax2, ax3) = plt.subplots(1, 3, sharex='col', figsize=(20,7)) ax1.plot(history.history['loss'], label='Train loss') ax1.plot(history.history['val_loss'], label='Validation loss') ax1.legend(loc='best') ax1.set_title('Loss') ax2.plot(history.history['acc'], label='Train Accuracy') ax2.plot(history.history['val_acc'], label='Validation accuracy') ax2.legend(loc='best') ax2.set_title('Accuracy') ax3.plot(history.history['categorical_accuracy'], label='Train Cat Accuracy') ax3.plot(history.history['val_categorical_accuracy'], label='Validation Cat Accuracy') ax3.legend(loc='best') ax3.set_title('Cat Accuracy') plt.xlabel('Epochs') sns.despine() plt.show() ###Output _____no_output_____ ###Markdown Find best threshold value ###Code lastFullValPred = np.empty((0, N_CLASSES)) lastFullValLabels = np.empty((0, N_CLASSES)) for i in range(STEP_SIZE_VALID+1): im, lbl = next(valid_generator) scores = model.predict(im, batch_size=valid_generator.batch_size) lastFullValPred = np.append(lastFullValPred, scores, axis=0) lastFullValLabels = np.append(lastFullValLabels, lbl, axis=0) print(lastFullValPred.shape, lastFullValLabels.shape) def find_best_fixed_threshold(preds, targs, do_plot=True): score = [] thrs = np.arange(0, 0.5, 0.01) for thr in thrs: score.append(custom_f2(targs, (preds > thr).astype(int))) score = np.array(score) pm = score.argmax() best_thr, best_score = thrs[pm], score[pm].item() print(f'thr={best_thr:.3f}', f'F2={best_score:.3f}') if do_plot: plt.plot(thrs, score) plt.vlines(x=best_thr, ymin=score.min(), ymax=score.max()) plt.text(best_thr+0.03, best_score-0.01, f'$F_{2}=${best_score:.3f}', fontsize=14); plt.show() return best_thr, best_score threshold, best_score = find_best_fixed_threshold(lastFullValPred, lastFullValLabels, do_plot=True) ###Output thr=0.140 F2=0.393 ###Markdown Apply model to test set and output predictions ###Code test_generator.reset() STEP_SIZE_TEST = test_generator.n//test_generator.batch_size preds = model.predict_generator(test_generator, steps=STEP_SIZE_TEST) predictions = [] for pred_ar in preds: valid = [] for idx, pred in enumerate(pred_ar): if pred > threshold: valid.append(idx) if len(valid) == 0: valid.append(np.argmax(pred_ar)) predictions.append(valid) filenames = test_generator.filenames label_map = {valid_generator.class_indices[k] : k for k in valid_generator.class_indices} results = pd.DataFrame({'id':filenames, 'attribute_ids':predictions}) results['id'] = results['id'].map(lambda x: str(x)[:-4]) results['attribute_ids'] = results['attribute_ids'].apply(lambda x: list(map(label_map.get, x))) results["attribute_ids"] = results["attribute_ids"].apply(lambda x: ' '.join(x)) results.to_csv('submission.csv',index=False) results.head(10) ###Output _____no_output_____
spamIt.ipynb
###Markdown Spam or Ham?Implement a spam filter in Python using the Naive Bayes algorithm to classify the emails as spam or not-spam (a.k.a. ham). Check ModulesCheck your system for the required dependencies. ###Code import sys dependencies = ["nltk", "numpy", "pandas", "scipy", "sklearn", "pickle", "re"] for module in dependencies: print("\nChecking for " + module + "...") try: # Import module from string variable: # https://stackoverflow.com/questions/8718885/import-module-from-string-variable # To import using a variable, call __import__(name) module_obj = __import__(module) # To contain the module, create a global object using globals() globals()[module] = module_obj except ImportError: print("Install " + module + " before continuing") print("In a terminal type the following commands:") print("python get-pip.py") print("pip install " + module + "\n") sys.exit(1) print("\nSystem is ready!") ###Output Checking for nltk... Checking for numpy... Checking for pandas... Checking for scipy... Checking for sklearn... Checking for pickle... Checking for re... System is ready! ###Markdown Download DatasetDownload a set of spam and ham actual emails. Each email is a separate plain text file. Unzip the compressed tar files, read the text and load it into a Pandas Dataframe. Convert the dataframe to a Pickle object. ###Code import urllib.request import os import tarfile import pickle import pandas as pd print("Downloading Enron emails in the Downloads folder...") # Get the user's Downloads folder path downloads = os.path.join(os.environ['HOME'] + "/Downloads") url = "http://www.aueb.gr/users/ion/data/enron-spam/preprocessed/" enron_dir = os.path.join(downloads, 'Enron emails') enron_files = ['enron1.tar.gz', 'enron2.tar.gz', 'enron3.tar.gz', 'enron4.tar.gz', 'enron5.tar.gz', 'enron6.tar.gz'] def download(): """ Download Enron emails if missing. """ # Create the directories. if not os.path.exists(enron_dir): os.makedirs(enron_dir) # Download the files that not exist. for file in enron_files: path = os.path.join(enron_dir, file) if not os.path.exists(path): urllib.request.urlretrieve(url + file, path) def extract_emails(fname): """ Extract the zipped emails and load them into a pandas df. Args: fname (str): the files with tar.gz extension Returns: pandas df: a pandas dataframe of emails """ rows = [] tfile = tarfile.open(fname, 'r:gz') for member in tfile.getmembers(): if 'ham' in member.name: f = tfile.extractfile(member) if f is not None: row = f.read() rows.append({'message': row, 'class': 'ham'}) if 'spam' in member.name: f = tfile.extractfile(member) if f is not None: row = f.read() rows.append({'message': row, 'class': 'spam'}) tfile.close() return pd.DataFrame(rows) def populate_df_and_pickle(): """ Populate the df with all the emails and save it to a pickle object. """ if not os.path.exists(downloads + "/emails.pickle"): emails_df = pd.DataFrame({'message': [], 'class': []}) for file in enron_files: unzipped_file = extract_emails(os.path.join(enron_dir, file)) emails_df = emails_df.append(unzipped_file) emails_df.to_pickle(downloads + "/emails.pickle") if __name__ == '__main__': download() populate_df_and_pickle() print("Download, unzip, and save to pickle done!") with open(downloads + '/emails.pickle', 'rb') as f: emails_df = pickle.load(f) # Translate bytes objects into strings. emails_df['message'] = emails_df['message'].apply(lambda x: x.decode('latin-1')) # Reset pandas df index. emails_df = emails_df.reset_index(drop=True) # Map 'spam' to 1 and 'ham' to 0. emails_df['class'] = emails_df['class'].map({'spam':1, 'ham':0}) print(emails_df.index) emails_df.shape emails_df.iloc[25000].values ###Output _____no_output_____ ###Markdown Clean the DataRemove the punctuation, any urls and numbers. Finally, convert every word to lower case. ###Code from string import punctuation import re def clean_email(email): """ Remove all punctuation, urls, numbers, and newlines. Convert to lower case. Args: email (unicode): the email Returns: email (unicode): only the text of the email """ email = re.sub(r'http\S+', ' ', email) email = re.sub("\d+", " ", email) email = email.replace('\n', ' ') email = email.translate(str.maketrans("", "", punctuation)) email = email.lower() return email emails_df['message'] = emails_df['message'].apply(clean_email) emails_df.iloc[25000].values ###Output _____no_output_____ ###Markdown Prepare the DataSplit the text string into individual words and stem each word. Remove english stop words. Split and StemSplit the text by white spaces and link the different forms of the same word to each other, using stemming. For example "responsiveness" and "response" have the same stem/root - "respons". Remove Stop WordsSome words such as “the” or “is” appear in all emails and don’t have much content to them. These words are not going to help the algorithm distinguish spam from ham. Such words are called stopwords and they can be disregarded during classification. ###Code from nltk.stem.snowball import SnowballStemmer # nltk.download('wordnet') # uncomment to download 'wordnet' from nltk.corpus import wordnet as wn def preproces_text(email): """ Split the text string into individual words, stem each word, and append the stemmed word to words. Make sure there's a single space between each stemmed word. Args: email (unicode): the email Returns: words (unicode): the text of the email """ words = "" # Create the stemmer. stemmer = SnowballStemmer("english") # Split text into words. email = email.split() for word in email: # Optional: remove unknown words. # if wn.synsets(word): words = words + stemmer.stem(word) + " " return words emails_df['message'] = emails_df['message'].apply(preproces_text) emails_df.iloc[25000].values ###Output _____no_output_____ ###Markdown Machine Learning Vectorize Words and Split Data to Train/Test SetsTransform the words into a tf-idf matrix using the sklearn TfIdf transformation. Then, create train/test sets with the `train_test_split` function, using `stratify` parameter. The dataset is highly unbalanced and the `stratify` parameter will make a split so that the proportion of values in the sample produced will be the same as the proportion of values provided to parameter `stratify`. For example, if variable y is 0 and 1 and there are 30% of 0's and 70% of 1's, `stratify=y` will make sure that the random split has 30% of 0's and 75% of 1's. ###Code from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.model_selection import train_test_split # Define the independent variables as Xs. Xs = emails_df['message'].values # Define the target (dependent) variable as Ys. Ys = emails_df['class'].values # Vectorize words - Turn the text numerical feature vectors, # using the strategy of tokenization, counting and normalization. vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5, stop_words='english') Xs = vectorizer.fit_transform(Xs) # Create a train/test split using 20% test size. X_train, X_test, y_train, y_test = train_test_split(Xs, Ys, test_size=0.2, shuffle=True, random_state=0, stratify=Ys) feature_names = vectorizer.get_feature_names() print("Number of different words: {0}".format(len(feature_names))) print("Word example: {0}".format(feature_names[5369])) # Check the split printing the shape of each set. print(X_train.shape, y_train.shape) print(X_test.shape, y_test.shape) ###Output Number of different words: 119405 Word example: arcadian (26972, 119405) (26972,) (6744, 119405) (6744,) ###Markdown Train a ClassifierTrain a Naive Bayes classifier and evaluate the performance with the accuracy score. ###Code from sklearn.naive_bayes import MultinomialNB # Create classifier. clf = MultinomialNB() # Fit the classifier on the training features and labels. clf.fit(X_train, y_train) # Make prediction - Store predictions in a list named pred. pred = clf.predict(X_test) # Calculate the accuracy on the test data. print("Accuracy: {}".format(clf.score(X_test, y_test))) ###Output Accuracy: 0.9847271648873073 ###Markdown Identify the Most Powerful FeaturesPrint the 10 most important features. ###Code def get_most_important_features(vectorizer, classifier, n=None): feature_names = vectorizer.get_feature_names() top_features = sorted(zip(classifier.coef_[0], feature_names))[-n:] for coef, feat in top_features: print(coef, feat) get_most_important_features(vectorizer, clf, 10) ###Output -7.10193040638 money -7.08106062291 price -7.07724882029 onlin -7.07696063312 offer -7.06439782381 www -7.04630242466 softwar -6.97568091654 email -6.94140085524 click -6.65836580587 com -6.59068342497 http ###Markdown ExamplesLet's try out our classifier. ###Code email = ["Hello George, how about a game of tennis tomorrow?", "Hello, click here if you want to satisfy your wife tonight", "We offer free viagra!!! Click here now!!!", "Dear Sara, I prepared the annual report. Please check the attachment.", "Hi David, will we go for cinema tonight?", "Best holidays offers only here!!!"] examples = vectorizer.transform(email) predictions = clf.predict(examples) predictions ###Output _____no_output_____
ATR/JupyterNotbook/LOG2ADL.ipynb
###Markdown Log2Csv ConverterATRで取得したデータをADLtaggerで扱えるように形式変換する. 処理できるATRのセンサで取得したデータは日分のみ(timestampで日にちを確定できない) [次でバックして!!!] timestampの小数点以下の0が桁落ち ==> str.zfill(3) + .astype(str)を使え!!! ###Code import os import shutil # shutil.rmtree(path)でdirectory tree全体を削除(空でなくても) import pandas as pd import datetime as dt import csv class Log2ADL(object): """ Convert log files which generated by ATR acceleration sensor into ADL tagger format """ def __init__(self, date_str, t_shift, path_to_log_dir='./log/', path_to_output_dir='./ADL/'): """ Argument -------- date_str: str, the date which logs were recorded. (ex. '2017-09-21') t_shift: int, to adjust timestump between PC and ATR sensors.[ms] path_log_dir/path_output_dir: Path to the log/output file directory """ self.path_to_log_dir = path_to_log_dir self.path_to_output_dir = path_to_output_dir # タイムスタンプを合わせるための基準時刻を生成 self.base_timestamp = dt.datetime.strptime(date_str, '%Y-%m-%d') + dt.timedelta(milliseconds=t_shift) print("Class was successfully generated [base_timestump=",self.base_timestamp,"]") """Read Log Files""" def read_logs(self): # Step.1: 処理するLogファイルの一覧を取得 print("Step1: Get log files.") log_list = os.listdir(self.path_to_log_dir) print(">> Success: ",log_list, "\n") # Step.2: logファイルをデータフレームに変換 print("Step2: Convert log files to pd.DataFrame.") df = [] for file_name in log_list: if file_name.find('.log') > 0: path_to_file = self.path_to_log_dir + file_name with open(path_to_file, 'r') as f: reader, x = csv.reader(f), [] for row in reader: if "ags" in row: x.append(row) print(">> Read CSV ["+ path_to_file + '] ==> Sucess(', len(x), "rows)") df = df + x self.df = pd.DataFrame(df, columns=["sensor", "time_ATR", "accX", "accY", "accZ", "gyroX", "gyroY", "gyroZ"]) print(">> Success: df.shape=", self.df.shape, "\n") return self.df """Add Timestamps""" def generate_timestamp(self,time): """Convert an ATR timestamp into a datetime object""" # Params >> time(integer or str) # Return >> datetime.datatime # Convert milliseconds to r60 milliseconds, time = time%1000, int(time/1000) seconds, time = time%60, int(time/60) minutes, time = time%60, int(time/60) hours, time = time%60, int(time/60) # Error Check if time > 1: print(">> Error: timestamp of ATR sensor is invaild format.") # 基準時間と合わせる new_time = self.base_timestamp + dt.timedelta(milliseconds=milliseconds, seconds=seconds, minutes=minutes, hours=hours) return new_time def add_timestamps(self): print("Step3: Add Timestamps.") df = self.df.sort_values(by=["time_ATR"], ascending=True).reset_index(drop=True) df["timestamp"] = df["time_ATR"].apply(self.generate_timestamp) df["time"], df["time_milli"] = df["timestamp"].dt.strftime('%Y%m%d_%H:%M:%S.'), df["timestamp"].dt.microsecond // 1000 df["time"] = df["time"].astype(str) + df["time_milli"].astype(str).str.zfill(3) #df["time"] = df["timestamp"].apply(lambda x: x.strftime('%Y%m%d_%H:%M:%S.') + "%03d" % (x.microsecond // 1000)) df["group"] = df["timestamp"].dt.strftime('%Y%m%d_%H%M') self.df = df print(">> Success: df.shape=", self.df.shape, "\n") return df """Write output""" def activate_dir(self, target_path, dir_name): if not os.path.isdir(target_path+dir_name): # 存在しない場合はディレクトリを作成 os.mkdir(target_path+dir_name) if os.path.isdir(target_path+dir_name): print(">> Directory was created ["+ target_path+dir_name +"]") return target_path + dir_name + '/' def to_csvs(self): print("Step4: Write CSVs.") groups = self.df["group"].drop_duplicates().reset_index(drop=True) df = self.df # Outディレクトリをクリア if os.path.isdir(self.path_to_output_dir): shutil.rmtree(self.path_to_output_dir) os.mkdir(self.path_to_output_dir) print(">> Clean output directory.") for group in groups: # 書き込む行を選択 df_selected = df[df["group"] == group].sort_values(by=["timestamp"]) # 書き込むディレクトリを選択: Acc ## ディレクトリの確認 target_path = self.activate_dir(self.path_to_output_dir, "acc2") target_path = self.activate_dir(target_path, "acc2_R") ## 書き込みファイルを指定 target_file_name = group+"00_acc2.csv" # CSV書き込み df_selected[["time", "accX", "accY", "accZ"]].to_csv(target_path+'/'+target_file_name, index=False, header=["time", "x", "y", "z"]) print(">> write", target_path+target_file_name) # 書き込むディレクトリを選択: Gyro ## ディレクトリの確認 target_path = self.activate_dir(self.path_to_output_dir, "Gyro") target_path = self.activate_dir(target_path, "gyro") ## 書き込みファイルを指定 target_file_name = group+"00_gyro.csv" # CSV書き込み df_selected[["time", "accX", "accY", "accZ"]].to_csv(target_path+'/'+target_file_name, index=False, header=["time", "x", "y", "z"]) print(">> write", target_path+target_file_name) print(">> Success:", len(groups), "files were created.\n") # converter = Log2ADL('2017-09-21', 100) # df_foo = converter.read_logs() # df_foo = converter.add_timestamps() # df_foo = converter.to_csvs() """ Main 関数 """ ts_minutes = 10 ts_seconds = ts_minutes*60 + 88 - 5 + 0.45 ts_ms = ts_seconds*1000 print("ts_ms:", ts_ms) converter = Log2ADL('2018-01-31', ts_ms, path_to_log_dir='./data/log/', path_to_output_dir='./data/ADL/') df_foo = converter.read_logs() #converter.df = converter.df[:50000] df_foo = converter.add_timestamps() converter.to_csvs() display(df_foo.head()) """ Main 関数: 2018.02.01 """ ts_minutes = 0 ts_seconds = ts_minutes*60 ts_ms = ts_seconds*1000 print("ts_ms:", ts_ms) sub_name = "45_maekawa" path_to_log_dir = "/root/upconversion/data/2018_01_16/{}/log/".format(sub_name) path_to_output_dir = "/root/upconversion/data/2018_01_16/{}/data/".format(sub_name) converter = Log2ADL('2018-02-04', ts_ms, path_to_log_dir=path_to_log_dir, path_to_output_dir=path_to_output_dir) df_foo = converter.read_logs() #converter.df = converter.df[:50000] df_foo = converter.add_timestamps() converter.to_csvs() display(df_foo.head()) ###Output ts_ms: 0 Class was successfully generated [base_timestump= 2018-02-04 00:00:00 ] Step1: Get log files. >> Success: ['45_maekawa.log'] Step2: Convert log files to pd.DataFrame. >> Read CSV [/root/upconversion/data/2018_01_16/45_maekawa/log/45_maekawa.log] ==> Sucess( 714080 rows) >> Success: df.shape= (714080, 8) Step3: Add Timestamps. >> Success: df.shape= (714080, 12) Step4: Write CSVs. >> Clean output directory. >> Directory was created [/root/upconversion/data/2018_01_16/45_maekawa/data/acc2] >> Directory was created [/root/upconversion/data/2018_01_16/45_maekawa/data/acc2/acc2_R] >> write /root/upconversion/data/2018_01_16/45_maekawa/data/acc2/acc2_R/20180204_121300_acc2.csv >> Directory was created [/root/upconversion/data/2018_01_16/45_maekawa/data/Gyro] >> Directory was created [/root/upconversion/data/2018_01_16/45_maekawa/data/Gyro/gyro] >> write /root/upconversion/data/2018_01_16/45_maekawa/data/Gyro/gyro/20180204_121300_gyro.csv >> write /root/upconversion/data/2018_01_16/45_maekawa/data/acc2/acc2_R/20180204_121400_acc2.csv >> write /root/upconversion/data/2018_01_16/45_maekawa/data/Gyro/gyro/20180204_121400_gyro.csv >> write /root/upconversion/data/2018_01_16/45_maekawa/data/acc2/acc2_R/20180204_121500_acc2.csv >> write /root/upconversion/data/2018_01_16/45_maekawa/data/Gyro/gyro/20180204_121500_gyro.csv >> write /root/upconversion/data/2018_01_16/45_maekawa/data/acc2/acc2_R/20180204_121600_acc2.csv >> write /root/upconversion/data/2018_01_16/45_maekawa/data/Gyro/gyro/20180204_121600_gyro.csv >> write /root/upconversion/data/2018_01_16/45_maekawa/data/acc2/acc2_R/20180204_121700_acc2.csv >> write /root/upconversion/data/2018_01_16/45_maekawa/data/Gyro/gyro/20180204_121700_gyro.csv >> write /root/upconversion/data/2018_01_16/45_maekawa/data/acc2/acc2_R/20180204_121800_acc2.csv >> write /root/upconversion/data/2018_01_16/45_maekawa/data/Gyro/gyro/20180204_121800_gyro.csv >> write /root/upconversion/data/2018_01_16/45_maekawa/data/acc2/acc2_R/20180204_121900_acc2.csv >> write /root/upconversion/data/2018_01_16/45_maekawa/data/Gyro/gyro/20180204_121900_gyro.csv >> write /root/upconversion/data/2018_01_16/45_maekawa/data/acc2/acc2_R/20180204_122000_acc2.csv >> write /root/upconversion/data/2018_01_16/45_maekawa/data/Gyro/gyro/20180204_122000_gyro.csv >> write /root/upconversion/data/2018_01_16/45_maekawa/data/acc2/acc2_R/20180204_122100_acc2.csv >> write /root/upconversion/data/2018_01_16/45_maekawa/data/Gyro/gyro/20180204_122100_gyro.csv >> write /root/upconversion/data/2018_01_16/45_maekawa/data/acc2/acc2_R/20180204_122200_acc2.csv >> write /root/upconversion/data/2018_01_16/45_maekawa/data/Gyro/gyro/20180204_122200_gyro.csv >> write /root/upconversion/data/2018_01_16/45_maekawa/data/acc2/acc2_R/20180204_122300_acc2.csv >> write /root/upconversion/data/2018_01_16/45_maekawa/data/Gyro/gyro/20180204_122300_gyro.csv >> write /root/upconversion/data/2018_01_16/45_maekawa/data/acc2/acc2_R/20180204_122400_acc2.csv >> write /root/upconversion/data/2018_01_16/45_maekawa/data/Gyro/gyro/20180204_122400_gyro.csv >> write /root/upconversion/data/2018_01_16/45_maekawa/data/acc2/acc2_R/20180204_122500_acc2.csv >> write /root/upconversion/data/2018_01_16/45_maekawa/data/Gyro/gyro/20180204_122500_gyro.csv >> Success: 13 files were created.
Customer-Segmentation-using-Python-in-ML.ipynb
###Markdown Customer Segmentation using Python in Machine Learning __Author: Soma Dey__ 1. Importing the dataset and all required libraries ###Code # 'Numpy' is used for mathematical operations on large, multi-dimensional arrays and matrices import numpy as np # 'Pandas' is used for data manipulation and analysis import pandas as pd # 'Matplotlib' is a data visualization library for 2D and 3D plots, built on numpy import matplotlib.pyplot as plt %matplotlib inline # 'Seaborn' is based on matplotlib; used for plotting statistical graphics import seaborn as sns # suppress display of warnings import warnings warnings.filterwarnings("ignore") #Load the data into a pandas dataFrame df = pd.read_csv("C:/Users/SOMA/Documents/Python_Projects/Data Set/Mall_Customers.csv") #Read the dataset df.head() #printing first 5 records of the dataset #Now printing last 5 records of the dataset df.tail() df.info() df.shape ###Output _____no_output_____ ###Markdown - **This Customer Segmentation dataset has 200 rows and 5 columns** 2. Exploratory Data Analysis ###Code # Now we have to find out gender distribution between males and females. genders=df.Gender.value_counts() plt.figure(figsize=(10,5)) sns.barplot(x=genders.index, y=genders.values) plt.show() # Now I have visualized the different customers with age groups age18_25 = df.Age[(df.Age<=25)&(df.Age>=18)] age26_35 = df.Age[(df.Age<=35)&(df.Age>=26)] age36_45 = df.Age[(df.Age<=45)&(df.Age>=36)] age46_55 = df.Age[(df.Age<=55)&(df.Age>=46)] age55above = df.Age[(df.Age>=56)] x=["18-25","26-35","36-45","46-55","Above55"] y=[len(age18_25.values), len(age26_35.values),len(age36_45.values),len(age46_55.values),len(age55above.values)] plt.figure(figsize=(15,6)) plt.title=("Number of customers and ages") plt.xlabel=("Ages") plt.ylabel=("Number of customers") sns.barplot(x=x,y=y) plt.show() ###Output _____no_output_____ ###Markdown - **So by this, we can say that customers of age group 26-35 are more in number than the other age groups** ###Code # Now we are going to visualize the highest spending scores among the customers ss1_20= df["Spending Score (1-100)"][(df["Spending Score (1-100)"]>=1) &(df["Spending Score (1-100)"]<=20)] ss21_40= df["Spending Score (1-100)"][(df["Spending Score (1-100)"]>=21) &(df["Spending Score (1-100)"]<=40)] ss41_60= df["Spending Score (1-100)"][(df["Spending Score (1-100)"]>=41) &(df["Spending Score (1-100)"]<=60)] ss61_80= df["Spending Score (1-100)"][(df["Spending Score (1-100)"]>=61) &(df["Spending Score (1-100)"]<=80)] ss81_100= df["Spending Score (1-100)"][(df["Spending Score (1-100)"]>=81) &(df["Spending Score (1-100)"]<=100)] x=["1-20","21-40","41-60","61-80","81-100"] y=[len(ss1_20.values), len(ss21_40.values),len(ss41_60.values),len(ss61_80.values),len(ss81_100.values)] plt.figure(figsize=(10,6)) plt.title=("Spending scores of the customers") plt.xlabel=("Spending Scores") plt.ylabel=("score of customers") sns.barplot(x=x , y=y) plt.show() ###Output _____no_output_____ ###Markdown - **So based on the bar graph we can see that the majority of spending scores among the customers is between 41-60** ###Code # Now we are going to visualize the annual income of the customers ai0_30 = df["Annual Income (k$)"][(df["Annual Income (k$)"]>=0)&(df["Annual Income (k$)"]<=30)] ai31_60 = df["Annual Income (k$)"][(df["Annual Income (k$)"]>=31)&(df["Annual Income (k$)"]<=60)] ai61_90 = df["Annual Income (k$)"][(df["Annual Income (k$)"]>=61)&(df["Annual Income (k$)"]<=90)] ai91_120 = df["Annual Income (k$)"][(df["Annual Income (k$)"]>=91)&(df["Annual Income (k$)"]<=120)] ai121_150 = df["Annual Income (k$)"][(df["Annual Income (k$)"]>=121)&(df["Annual Income (k$)"]<=150)] x=["0-30","31-60", "61-90","91-120","121-150"] y=[len(ai0_30.values), len(ai31_60.values), len(ai61_90.values),len(ai91_120.values), len(ai121_150.values)] plt.figure(figsize=(15,6)) plt.title=("Annual Income of customers") plt.xlabel=("Annual Income in k$") plt.ylabel=("Number of customers") sns.barplot(x=x,y=y) plt.show() ###Output _____no_output_____ ###Markdown - __The graph obtained shows that the majority of customers have the annual income between 61-90$__ 3. Finding the optimum number of clusters ###Code from sklearn.cluster import KMeans x = df.iloc[:, [3,4]].values wcss=[] #Within cluster sum of squares for i in range(1,11): kmeans=KMeans(n_clusters=i , init='k-means++', max_iter = 300, n_init = 10, random_state=0) kmeans.fit(x) wcss.append(kmeans.inertia_) #Ploting Elbow Method plt.figure(figsize=(12,6)) plt.scatter(range(1, 11), wcss,marker="s",c="black") plt.plot(range(1,11),wcss,c="red") plt.xlabel=('KValue') plt.ylabel=('WCSS') #for value of k from 1 to 10 in graph plt.xticks(np.arange(1,11,1)) plt.title=("Elbow Graph") plt.show() ###Output _____no_output_____ ###Markdown + **In this method, the number of clusters are varies within a certain range. For each number, within-cluster sum of square (wcss) value is calculated and stored in a list. These value are then plotted against the range of number of clusters used before. The location of bend in the 2d plot indicates the appropiate number of clusters**+ **From the obtained graph we can observe that at point 5 there is a maximum inflection in the curve. So we can use 5 clusters in K means algorithm** 4. Modeling and visualization with KMeans Algorithm ###Code kmeansmodel = KMeans(n_clusters=5, init='k-means++', random_state=0) y_kmeans = kmeansmodel.fit_predict(x) #predicted values y_kmeans #Visualizing the K-means Clusters sns.set_style("whitegrid"); plt.figure(figsize=(12,7)) plt.scatter(x[y_kmeans==0,0], x[y_kmeans==0,1], s=80,c='red', label='customer1' , marker="*") plt.scatter(x[y_kmeans==1,0], x[y_kmeans==1,1], s=80,c='blue', label='customer2' , marker="*") plt.scatter(x[y_kmeans==2,0], x[y_kmeans==2,1], s=80,c='green', label='customer3' , marker="*") plt.scatter(x[y_kmeans==3,0], x[y_kmeans==3,1], s=80,c='orange', label='customer4' , marker="*") plt.scatter(x[y_kmeans==4,0], x[y_kmeans==4,1], s=80,c='purple', label='customer5' , marker="*") plt.scatter(kmeans.cluster_centers_[:,0] , kmeans.cluster_centers_[:,1], s=100, c="black", label="centroids") plt.title = (str('cluster of customers')) plt.xlabel = (str('Annual Income')) plt.ylabel = (str('Spending Scores')) plt.legend() plt.show() ###Output _____no_output_____
ProcessingDigitalGlobeTifData.ipynb
###Markdown 1. Setting Up Colab on the Google VMHow to use the Colab interface on a remote Google virtual machine that is mounted with a Google Bucket. Initial Set Up *(only has to be done once per account)*(Locally)1. `gcloud init` (if you haven’t already)2. `gcloud compute config-ssh` (if you haven’t already, create your password)(On VM)0. Start VM (if it isn't already) on the Google VM page and under the connect column, go to the SSH tab and select 'Open in browser window'1. Follow the instructions for installing gcsfuse here https://github.com/GoogleCloudPlatform/gcsfuse/blob/master/docs/installing.md2. Install conda>`wget http://repo.continuum.io/archive/Anaconda3-4.0.0-Linux-x86_64.sh`>`bash Anaconda3-4.0.0-Linux-x86_64.sh`>Note that if this gives you an error involving unpackaging the zip file, try:>`sudo apt-get install bzip2`>`rm -r /home//anaconda3`3. `source ~/.bashrc`4. `conda install jupyter`5. `conda install tornado=4.5.3` (resolving depending issue)6. `pip install jupyter_http_over_ws`7. `pip install requests==2.4.3`8. `jupyter serverextension enable --py jupyter_http_over_ws`9. `conda upgrade ipykernel`*In order to run colab (or any notebook) from a VM, we have to run the notebook from the VM, and then pipe the output of that VM on to our local machine (we'll be using port 8888). Once we do this, we can have a display run 'locally' (either through Jupyter notebook or colab).* Running the VM on Colab(Locally)1. In your terminal, run:>`gcloud compute ssh --zone us-west1-b dfd-cpu-instance -- -L 8888:localhost:8888`>(replace us-west1-b and dfd-cpu-instance with your VM's region and instance name respectively)(On VM)1. Start VM (if it isn't already) on the Google VM page and under the connect column, go to the SSH tab and select 'Open in browser window' and under the connect column, go to the SSH tab and select 'Open in browser window.' If you are already in the VM, make sure you are in your home directory.3. `mkdir satellite && cd satellite` (we called our data directory satellite but you can call it anything you want)4. Check if there's anything in this directory. If not, run (in the satellite folder): `gcsfuse tent-bucket .`5. In the pop-up terminal, type:`jupyter notebook --NotebookApp.allow_origin='https://colab.research.google.com' --port=8888 --no-browser`6. Copy and paste link in URL it prints out (it will direct to a jupyter notebook page)7. From here you can directly create/run notebooks, or go to the next step to connect to the colab8. In the colab, go to the runtime option (click the down arrow), connect to 'local' and make sure the port is 8888 (or whatever you set it as in part 6)9. Now you can run the rest of this colab! :) 2. Processing Digital Globe Data in to Image FilesProcessing our Digital Globe Data, which exists as TIF files in to consumable chunks of images and geo-mapping the correpsonding structures in our labelling data to each output image. ###Code # Install necessary imports for processing TIF files !pip install rasterio # Import necessary libraries import base64 import json import pandas as pd from PIL import Image from io import BytesIO from collections import defaultdict # import geopandas as gpd # from geopandas import GeoDataFrame import descartes from shapely.geometry import Point import numpy as np from rasterio.io import MemoryFile import rasterio import rasterio.features import rasterio.warp from rasterio.plot import show import random import glob # This is the directory of the mounted google bucket GOOGLE_BUCKET_DIR = 'satellite/' # NOTE: I added 'labels' directory before each file now that it is local # and we are storing the geojson rating data here REGION_MAPPINGS = { 'Africa1': { 'qc': GOOGLE_BUCKET_DIR + 'labels/africa1_qcexport_20180906.geojson', 'raw': GOOGLE_BUCKET_DIR + 'labels/africa1_rawtags_20180906.geojson'}, 'Africa2': { 'qc': GOOGLE_BUCKET_DIR + 'labels/africa2_qcexport_20180927.geojson', 'raw': GOOGLE_BUCKET_DIR + 'labels/africa2_rawtags_20180927.geojson'}, 'Bangladesh': { 'qc': GOOGLE_BUCKET_DIR + 'labels/bangladesh_qcexport_20180906.geojson', 'raw': GOOGLE_BUCKET_DIR + 'labels/bangladesh_rawtags_20180906.geojson'}, 'WesternAsia': { 'qc': GOOGLE_BUCKET_DIR + 'labels/westernAsia_qcexport_20180906.geojson', 'raw': GOOGLE_BUCKET_DIR + 'labels/westernAsia_rawtags_20180906.geojson'} } COLORS = {'UNHCR Tent': 'r', 'Administrative Structure': 'm', 'Round Earthen Structure': 'c', 'Other Tent': 'y'} CHIP_URL_PREFIX = 'https://s3.amazonaws.com/explorationlab/chips/' def get_features(filepath, region): """ Featurize datasets (QC and Raw) according to the following schema from the stored json object. Note that we: 1) Ignore the metadata tags and only use the 'feature tag' 2) Expand the geography tag within the feature tag to 'latitude', 'longitude', and 'coordinate_type' keys, in addition to the raw geojson 3) Convert date features to pandas Timestamp and confidence score to float QC datasets id: unique identifier label: name for the feature type score: the CrowdRank confidence score of the assigned attribute, relative to other tags in the dataset. (floating point 0-1 with 0=no confidence, 1= fully confident) agreement: number of other taggers who placed the same tag type on this point chip_url: link to a .jpg image chip hosted on aws with the identified feature in the center of the image timestamp: Date/time in GMT of the last QC batch acquisition_date: Date the image was collected sensor: the DigitalGlobe satellite that captured the image catalog_id: DigitalGlobe identifier for the image strips collected by our satellites map_id: unique identifier for each lattice cell across the imagery Raw tags id: unique identifier tagger_id: unique identifier of the user map_view_id: not too helpful for you, but I can use this to link back to another table in the database for the map/image seen by the crowd type_id: database tag for the feature type label: name for the feature type timestamp: time in GMT that tag was placed """ with open(filepath) as f: content = f.read() obj = json.loads(content) data = defaultdict(list) for x in obj['features']: data['latitude'].append(x['geometry']['coordinates'][1]) data['longitude'].append(x['geometry']['coordinates'][0]) data['coordinate_type'].append(x['geometry']['type']) for key in x['properties'].keys(): if key == 'acquisition_date' or key == 'timestamp': data[key].append(pd.Timestamp(x['properties'][key])) elif key == 'score': data[key].append(float(x['properties'][key])) elif key == 'chip_url': data[key].append(x['properties'][key].replace(CHIP_URL_PREFIX, '')) else: data[key].append(x['properties'][key]) df = pd.DataFrame.from_dict(dict(data)) df['region'] = region # Clean data a little - group all 'Other Tent ...' together in an 'Other' category df['label'] = df['label'].apply(lambda x: 'Other Tent' if x.startswith('Other Tent') else x) return df def lonlat2xy(lon_in, lat_in, lon1, lon2, lat1, lat2, w, h): """Simple function to calcualte x/y coordinates based on lon/lat coordinate""" x = w * (lon_in - lon1) / (lon2 - lon1) y = h * (lat_in - lat1) / (lat2 - lat1) return x, y def xy2lonlat(x_in, y_in, lon1, lon2, lat1, lat2, w, h): """Simple function to calcualte lat/lon coordinates based on x/y coordinate""" lon = lon1 + (lon2 - lon1) * x_in / w lat = lat1 + (lat2 - lat1) * y_in / h return lon, lat def get_cropped_images(tif_path, labels, xcrop, ycrop): """ Output image crops and corresponding labels for a given tif file and all labels. tif_path: path to the tif file we want to get images out of labels: pandas dataframe containing the tomnod data ratings file (processed in get_features method above) xcrop: width of desired crop (geospatial) ycrop: height of desired crop (geospatial) """ with rasterio.open(tif_path) as dataset: # Read the dataset's valid data mask as a ndarray. mask = dataset.dataset_mask() big_lon1, big_lat1 = 0, 0 big_lon2, big_lat2 = 0, 0 # Extract feature shapes and values from the array. for geom, val in rasterio.features.shapes( mask, transform=dataset.transform): # Transform shapes from the dataset's own coordinate # reference system to CRS84 (EPSG:4326). geom = rasterio.warp.transform_geom( dataset.crs, 'EPSG:4326', geom, precision=10) # Get coordinates from the dataset [big_lon1, big_lat1] = geom['coordinates'][0][0] [big_lon2, big_lat2] = geom['coordinates'][0][2] w, h = dataset.profile['width'], dataset.profile['height'] # Assign crop sizes according to the resolution lon_crop = xcrop/w * (big_lon2 - big_lon1) lat_crop = ycrop/h * (big_lat2 - big_lat1) # Flip dimension order to have channel last instead of first big_im = np.transpose(dataset.read(), (1,2,0)) ims, labels_crops = [], [] # Iterate through each crop grid and calculate its labels for x1 in range(0, w, xcrop): for y1 in range(0, h, ycrop): # Calculate lat/lon boundaries for crop lon1, lat1 = xy2lonlat( x1, y1, big_lon1, big_lon2, big_lat1, big_lat2, w, h) lon2, lat2 = lon1 + lon_crop, lat1 + lat_crop min_lon, max_lon = min(lon1, lon2), max(lon1, lon2) min_lat, max_lat = min(lat1, lat2), max(lat1, lat2) # Get all structures within this range and create the label labels_crop = labels[(labels.latitude > min_lat) & (labels.latitude < max_lat) & (labels.longitude > min_lon) & (labels.longitude < max_lon)] labels_crop['tile_min_lat'] = big_lat1 labels_crop['tile_max_lat'] = big_lat2 labels_crop['tile_min_lon'] = big_lon1 labels_crop['tile_max_lon'] = big_lon2 labels_crop['min_lat'], labels_crop['max_lat'] = min_lat, max_lat labels_crop['min_lon'], labels_crop['max_lon'] = min_lon, max_lon labels_crop['x'], labels_crop['y'] = lonlat2xy( labels_crop.longitude, labels_crop.latitude, lon1, lon2, lat1, lat2, xcrop, ycrop) # Create the cropped images im = big_im[y1:y1+ycrop, x1:x1+xcrop, :] ims.append(im) labels_crops.append(labels_crop) return ims, labels_crops def upload_crops(ims, labels_crops, tif_prefix, dest=''): """ Upload an array of image and labels for crops to a filepath ims: array of images labels_crops: array of pandas dataframes containing labels w.r.t each image tf_prefix: tif_id to store the images under """ indices = [i for i in range(len(labels_crops)) if not labels_crops[i].empty] if len(indices) == 0: return region = labels_crops[indices[0]].region.unique()[0] for i in indices: png_filename = '%s%s/%s_%d.png' % (dest, region, tif_prefix, i) print('Outputting: %s' % png_filename) img = ims[i] if img.shape[-1] == 1: img = np.squeeze(np.stack((img,) * 3, -1)) Image.fromarray(img.astype('uint8')).save(png_filename) csv_filename = '%s%s/%s_%d.csv' % (dest, region, tif_prefix, i) labels_crops[i].to_csv(csv_filename, encoding='utf-8') # For the project, we copied all the code above and then moved all the code # below under the main function (commented out below). This allowed us to run # a script processing on all examples remotely and detach the thread. # Note that this can also be adapted to run on the colab (although this may take # significantly slower as the .tif files will not be mounted on the file system) # if __name__ == "__main__": qc = pd.DataFrame() for region in REGION_MAPPINGS.keys(): if len(qc) == 0: qc = get_features(REGION_MAPPINGS[region]['qc'], region) else: qc = qc.append(get_features(REGION_MAPPINGS[region]['qc'], region)) all_tifs = glob.glob(GOOGLE_BUCKET_DIR + '*/*.tif') failed_indices = [] CROP_SIZE = 500 for i in range(10): tif = all_tifs[i] print("Processing index %d, tile %s" % (i,tif)) tif_prefix = (tif.split('/')[-1]).split('.')[0] # try: ims, labels_crops = get_cropped_images(tif, qc, CROP_SIZE, CROP_SIZE) upload_crops(ims, labels_crops, tif_prefix) # except: # failed_indices.append(i) print("Failed indices:") for idx in failed_indices: print(idx) ###Output Processing index 0, tile satellite/10200100058D0600/10200100058D0600_BROWSE.tif
04RNN/04Character_Level_RNN_sol.ipynb
###Markdown Character-Level LSTM in PyTorch이번 실습을 통해 character-level LSTM 를 구현하자. 특정 text에서 문자를 하나씩 학습한다. 그리고 학습한 것을 바탕으로 새로운 문자를 생성하여 문장을 만든다. Anna Karenina라는 소설을 사용하여 모형을 구현해 보자. **소설을 학습하고 소설과 유사한 문장을 생성하는 실습을 해보자.**아래 그림은 일반적인 character-wise RNN의 구조를 나타낸다.![img](../assets/charseq.jpeg) ###Code import numpy as np import torch from torch import nn import torch.nn.functional as F ###Output _____no_output_____ ###Markdown Load in DataAnna Karenina text file을 다운로드하여 학습에 사용할 수 있도록 전처리를 수행한다. ###Code # open text file and read in data as `text` with open('./data/anna.txt', 'r') as f: text = f.read() ###Output _____no_output_____ ###Markdown 처음 100 개 문자를 확인해 보자. ###Code text[:100] ###Output _____no_output_____ ###Markdown Tokenization문자를 숫자로 전환하기 위한 **dictionary**를 생성한다. 문자를 숫자로 Encoding하여 모형의 input data로 사용한다. ###Code # encode the text and map each character to an integer and vice versa # we create two dictionaries: # 1. int2char, which maps integers to characters # 2. char2int, which maps characters to unique integers chars = tuple(set(text)) int2char = dict(enumerate(chars)) char2int = {ch: ii for ii, ch in int2char.items()} # encode the text encoded = np.array([char2int[ch] for ch in text], dtype=np.int64) ###Output _____no_output_____ ###Markdown 처음 100개 문자를 확인하여 문자가 숫자로 인코딩 되었음을 확인해 보자. ###Code encoded[:100] ###Output _____no_output_____ ###Markdown Pre-processing the dataLSTM에서는 input을 **one-hot encoded** 으로 사용한다. 따라서 우리가 인코딩한 문자도 one-hot encoding 방식으로 변환해야 한다. ###Code def one_hot_encode(arr, n_labels): # Initialize the the encoded array one_hot = np.zeros((np.multiply(*arr.shape), n_labels), dtype=np.float32) # Fill the appropriate elements with ones one_hot[np.arange(one_hot.shape[0]), arr.flatten()] = 1. # Finally reshape it to get back to the original array one_hot = one_hot.reshape((*arr.shape, n_labels)) return one_hot # check that the function works as expected test_seq = np.array([[3, 5, 1]]) one_hot = one_hot_encode(test_seq, 8) print(one_hot) ###Output [[[0. 0. 0. 1. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 1. 0. 0.] [0. 1. 0. 0. 0. 0. 0. 0.]]] ###Markdown Making training mini-batches모형을 훈련하기 위해 mini-batches 를 생성한다. 생성되는 batches 는 아래와 비슷하게 될 것이다:![img](../assets/[email protected])이 예에서 encoded characters 를 `arr` 라고 하자. 이를 주어진 `batch_size`로 multiple sequences로 나눈다, 각각의 sequences는 `seq_length` 길이를 가진다. Creating Batches**1. 먼저 full mini-batches로 나누고 남은 문자는 모두 삭제한다.**각 batch는 $N \times M$ 의 문자를 가진다.(여기서 $N$ 은 batch size = 하나의 batch에 들어가는 sequence 개수) 그리고 $M$ 은 sequence_length 이다. 그 다음에 batches의 총 갯수인 $K$는 `arr` 의 길이를 하나의 batch에 들어가는 문자의 갯수(=$N \times M$)로 나누면 된다. `arr`에서 필요한 총 문자의 갯수는 $N * M * K$이 된다.**2. `arr`를 $N$ batches로 나눈다.** `arr.reshape(size)`를 사용한다. `size` tuple값으로 준다. 하나의 batch 당 $N$ sequences가 있다. 따라서 $N$이 첫번째 차원값이 된다. reshape를 하고 나면 $N \times (M * K)$이 된다.**3. 이 array를 사용하여 mini-batches를 생성한다.**$N \times (M * K)$ array에 대해서 각각의 batch $N \times M$ window를 가진다. 윈도우는`seq_length`만큼 이동한다. input array와 target arrays를 생성한다. targets은 단지 inputs 을 one character shift한 것이다. `range` 함수를 사용하는데 이때 interval option을 seq_length로 주면 된다. > **실습 :** 아래에 batches 를 생성하는 function를 작성하자. 쉽지 않은 실습이므로 해답 내용을 참조하여 작성해 보자. ###Code def get_batches(arr, batch_size, seq_length): '''Create a generator that returns batches of size batch_size x seq_length from arr. Arguments --------- arr: Array you want to make batches from batch_size: Batch size, the number of sequences per batch seq_length: Number of encoded chars in a sequence ''' ## TODO: Get the number of batches we can make n_batches = len(arr) // (batch_size*seq_length) #정수나누기해야한다. ## TODO: Keep only enough characters to make full batches arr = arr[:batch_size*seq_length*n_batches] ## TODO: Reshape into batch_size rows arr = arr.reshape((batch_size, -1)) ## TODO: Iterate over the batches using a window of size seq_length for n in range(0, arr.shape[1], seq_length): # The features x = arr[:, n:n+seq_length] # The targets, shifted by one ==> y는 target으로 x값을 1만큼 shift해서 생성한다 y = np.zeros_like(x) try : y[:, :-1], y[:, -1] = x[:, 1:], arr[:, n+seq_length] except IndexError: y[:, :-1], y[:, -1] = x[:, 1:], arr[:, 0] yield x, y # yield는 generator를 return한다. ###Output _____no_output_____ ###Markdown Test Your Implementationbatch가 제대로 생성되는지 테스트 해본다. 일단 batch size 는 8 그리고 50 sequence length로 설정해 보자. ###Code batches = get_batches(encoded, 8, 50) x, y = next(batches) # printing out the first 10 items in a sequence print('x\n', x[:10, :10]) print('\ny\n', y[:10, :10]) ###Output x [[54 28 58 59 46 56 22 47 70 45] [ 0 74 40 47 46 28 58 46 47 58] [56 40 41 47 74 22 47 58 47 38] [ 0 47 46 28 56 47 24 28 57 56] [47 0 58 4 47 28 56 22 47 46] [24 52 0 0 57 74 40 47 58 40] [47 48 40 40 58 47 28 58 41 47] [71 33 7 74 40 0 44 77 20 47]] y [[28 58 59 46 56 22 47 70 45 45] [74 40 47 46 28 58 46 47 58 46] [40 41 47 74 22 47 58 47 38 74] [47 46 28 56 47 24 28 57 56 38] [ 0 58 4 47 28 56 22 47 46 56] [52 0 0 57 74 40 47 58 40 41] [48 40 40 58 47 28 58 41 47 0] [33 7 74 40 0 44 77 20 47 67]] ###Markdown --- Defining the network with PyTorch아래와 같은 network를 구성해 보자.![img](../assets/charRNN.png) Model Structure`__init__` 는 다음과 같이 작성한다:* 필요한 dictionary를 생성한다.* LSTM layer는 input size (characters 갯수), hidden layer size `n_hidden`, layers 갯수 `n_layers`, dropout 확률로 `drop_prob`, 그리고 batch_first = True로 설정한다.* dropout layer 를 설정한다.* fully-connected layer는 input size `n_hidden`와 output size (characters의 갯수)로 생성한다.* 최종적으로 weight를 초기화한다. --- LSTM Inputs/Outputs기본적으로 [LSTM layer](https://pytorch.org/docs/stable/nn.htmllstm) 은 다음과 같이 작성한다.```pythonself.lstm = nn.LSTM(input_size, n_hidden, n_layers, dropout=drop_prob, batch_first=True)````input_size` 는 characters의 갯수이다. sequential input을 받고 `n_hidden`는 hidden layers 에서의 unit 개수이다. dropout을 설정할 수 있다. 마지막으로 `forward` function에서 LSTM cells을 쌓아 올린다.hidden state 의 초기 상태는 모두 0으로 초기화한다.```pythonself.init_hidden()``` ###Code # check if GPU is available train_on_gpu = torch.cuda.is_available() if(train_on_gpu): print('Training on GPU!') else: print('No GPU available, training on CPU; consider making n_epochs very small.') class CharRNN(nn.Module): def __init__(self, tokens, n_hidden=256, n_layers=2, drop_prob=0.5, lr=0.001): super().__init__() self.drop_prob = drop_prob self.n_layers = n_layers self.n_hidden = n_hidden self.lr = lr # creating character dictionaries self.chars = tokens self.int2char = dict(enumerate(self.chars)) self.char2int = {ch: ii for ii, ch in self.int2char.items()} ## TODO: define the layers of the model self.lstm = nn.LSTM(len(self.chars), n_hidden, n_layers, dropout=drop_prob, batch_first=True) self.dropout = nn.Dropout(p=drop_prob) self.fc = nn.Linear(n_hidden, len(self.chars)) def forward(self, x, hidden): ''' Forward pass through the network. These inputs are x, and the hidden/cell state `hidden`. ''' ## TODO: Get the outputs and the new hidden state from the lstm r_output, hidden = self.lstm(x, hidden) # lstm 수행하면 수행 결과와 새로운 히든이 출력으로. out = self.dropout(r_output) # lstm출력을 dropout하여 out = out.contiguous().view(-1, self.n_hidden) # shape 바꾸어서 다음 layer로 stack out = self.fc(out) # return the final output and the hidden state return out, hidden def init_hidden(self, batch_size): ''' Initializes hidden state ''' # Create two new tensors with sizes n_layers x batch_size x n_hidden, # initialized to zero, for hidden state and cell state of LSTM weight = next(self.parameters()).data if (train_on_gpu): hidden = (weight.new(self.n_layers, batch_size, self.n_hidden).zero_().cuda(), weight.new(self.n_layers, batch_size, self.n_hidden).zero_().cuda()) else: hidden = (weight.new(self.n_layers, batch_size, self.n_hidden).zero_(), weight.new(self.n_layers, batch_size, self.n_hidden).zero_()) return hidden ###Output _____no_output_____ ###Markdown Time to train훈련하기 위한 epochs수와 learning rate,그리고 기타 parameters를 적절히 설정한다.Adam optimizer 와 cross entropy loss 를 사용한다. > * gradient가 지수 함수로 증가하는 경우가 있을 수 있다. 이 문제는 exploding gradient 문제로 잘 알려져 있다. [`clip_grad_norm_`](https://pytorch.org/docs/stable/_modules/torch/nn/utils/clip_grad.html) 를 사용하여 gradients exploding을 방지한다. ###Code def train(net, data, epochs=10, batch_size=10, seq_length=50, lr=0.001, clip=5, val_frac=0.1, print_every=10): ''' Training a network Arguments --------- net: CharRNN network data: text data to train the network epochs: Number of epochs to train batch_size: Number of mini-sequences per mini-batch, aka batch size seq_length: Number of character steps per mini-batch lr: learning rate clip: gradient clipping val_frac: Fraction of data to hold out for validation print_every: Number of steps for printing training and validation loss ''' net.train() opt = torch.optim.Adam(net.parameters(), lr=lr) criterion = nn.CrossEntropyLoss() # create training and validation data val_idx = int(len(data)*(1-val_frac)) data, val_data = data[:val_idx], data[val_idx:] if(train_on_gpu): net.cuda() counter = 0 n_chars = len(net.chars) for e in range(epochs): # initialize hidden state h = net.init_hidden(batch_size) for x, y in get_batches(data, batch_size, seq_length): counter += 1 # One-hot encode our data and make them Torch tensors x = one_hot_encode(x, n_chars) inputs, targets = torch.from_numpy(x), torch.from_numpy(y) if(train_on_gpu): inputs, targets = inputs.cuda(), targets.cuda() # Creating new variables for the hidden state, otherwise # we'd backprop through the entire training history h = tuple([each.data for each in h]) # zero accumulated gradients net.zero_grad() # get the output from the model output, h = net(inputs, h) # calculate the loss and perform backprop loss = criterion(output, targets.view(batch_size*seq_length)) loss.backward() # `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs. nn.utils.clip_grad_norm_(net.parameters(), clip) opt.step() # loss stats if counter % print_every == 0: # Get validation loss val_h = net.init_hidden(batch_size) val_losses = [] net.eval() for x, y in get_batches(val_data, batch_size, seq_length): # One-hot encode our data and make them Torch tensors x = one_hot_encode(x, n_chars) x, y = torch.from_numpy(x), torch.from_numpy(y) # Creating new variables for the hidden state, otherwise # we'd backprop through the entire training history val_h = tuple([each.data for each in val_h]) inputs, targets = x, y if(train_on_gpu): inputs, targets = inputs.cuda(), targets.cuda() output, val_h = net(inputs, val_h) val_loss = criterion(output, targets.view(batch_size*seq_length)) val_losses.append(val_loss.item()) net.train() # reset to train mode after iterationg through validation data print("Epoch: {}/{}...".format(e+1, epochs), "Step: {}...".format(counter), "Loss: {:.4f}...".format(loss.item()), "Val Loss: {:.4f}".format(np.mean(val_losses))) ###Output _____no_output_____ ###Markdown Instantiating the modelNetwork instance를 생성하고 hyperparameters를 설정한다. 그리고 mini-batches sizes를 설정하고 training한다. ###Code ## TODO: set you model hyperparameters # define and print the net n_hidden= 512 n_layers= 2 net = CharRNN(chars, n_hidden, n_layers) print(net) ###Output CharRNN( (lstm): LSTM(83, 512, num_layers=2, batch_first=True, dropout=0.5) (dropout): Dropout(p=0.5, inplace=False) (fc): Linear(in_features=512, out_features=83, bias=True) ) ###Markdown Set your training hyperparameters! ###Code batch_size = 128 seq_length = 100 n_epochs = 1 # start small if you are just testing initial behavior print('Training the model...') # train the model train(net, encoded, epochs=n_epochs, batch_size=batch_size, seq_length=seq_length, lr=0.001, print_every=10) ###Output Epoch: 1/1... Step: 10... Loss: 3.2768... Val Loss: 3.2368 Epoch: 1/1... Step: 20... Loss: 3.1438... Val Loss: 3.1355 Epoch: 1/1... Step: 30... Loss: 3.1416... Val Loss: 3.1234 Epoch: 1/1... Step: 40... Loss: 3.1145... Val Loss: 3.1191 Epoch: 1/1... Step: 50... Loss: 3.1443... Val Loss: 3.1169 Epoch: 1/1... Step: 60... Loss: 3.1202... Val Loss: 3.1156 Epoch: 1/1... Step: 70... Loss: 3.1051... Val Loss: 3.1135 Epoch: 1/1... Step: 80... Loss: 3.1223... Val Loss: 3.1092 Epoch: 1/1... Step: 90... Loss: 3.1138... Val Loss: 3.0970 Epoch: 1/1... Step: 100... Loss: 3.0803... Val Loss: 3.0663 Epoch: 1/1... Step: 110... Loss: 3.0323... Val Loss: 3.0133 Epoch: 1/1... Step: 120... Loss: 2.9059... Val Loss: 2.8930 Epoch: 1/1... Step: 130... Loss: 2.8215... Val Loss: 2.7923 ###Markdown Hyperparameters설정이 필요한 hyperparameters 는 다음과 같다..* `n_hidden` - The number of units in the hidden layers.* `n_layers` - Number of hidden LSTM layers to use.* `batch_size` - Number of sequences running through the network in one pass.* `seq_length` - Number of characters in the sequence. 보통 큰 값으로 설정하면 더 긴 내용을 학습할 수 있다.너무 크게 하면 학습이 오래 걸린다. * `lr` - Learning rate for training Tips and Tricks> Validation Loss vs. Training Loss을 확인한다.>- 보통 training loss와 validation loss 의 차이가 심하면 **overfitting**된다고 본다. 그런 경우에는 network size를 줄이거나 dropout을 설정한다.>- 만약, training/validation loss가 같으면 **underfitting**되었다고 본다. 그럴 경우에는 layer의 갯수, layer당 unit의 갯수를 증가한다.> 적절한 hyper parameter를 설정한다.> 중요한 parameters는 `n_hidden`,`n_layers` 2개 이다. 이 parameter는 데이터셋의 크기에 따라 달라진다. 보통의 경우에는 전체 train resource를 고려하여 좀 더 큰 네트워크를 만들어서 train을 진행한다. validation, train loss를 살펴보고 overfitting 되면 dropout 등을 추가적으로 주고 훈련을 진행한다. 이때 validation loss가 최소가 되는 모형을 최종적인 모형으로 저장한다. Checkpoint훈련이 종료되면 최종적인 모형을 저장한다. ###Code # change the name, for saving multiple files model_name = 'rnn_1_epoch.net' checkpoint = {'n_hidden': net.n_hidden, 'n_layers': net.n_layers, 'state_dict': net.state_dict(), 'tokens': net.chars} with open(model_name, 'wb') as f: torch.save(checkpoint, f) ###Output _____no_output_____ ###Markdown --- Making Predictions모형이 잘 작동하여 문장을 제대로 생성하는지 확인해 본다. A note on the `predict` functionRNN의 출력 값은 주어진 입력값 다음에 나올 문자를 예측하여 출력한다. 문자의 score값을 출력하므로 이를 확률로 전환해야 한다. 확률로 전환하기 위해 softmax함수를 사용한다.> softmax function를 적용하면 주어진 입력 다음에 나올 문자에 대한 확률값을 출력하게 된다. Top K sampling다음 나올 문자의 확률이 높은 k개의 문자만이 최종 출력되도록 해야한다. ###Code def predict(net, char, h=None, top_k=None): ''' Given a character, predict the next character. Returns the predicted character and the hidden state. ''' # tensor inputs x = np.array([[net.char2int[char]]]) x = one_hot_encode(x, len(net.chars)) inputs = torch.from_numpy(x) if(train_on_gpu): inputs = inputs.cuda() # detach hidden state from history h = tuple([each.data for each in h]) # get the output of the model out, h = net(inputs, h) # get the character probabilities p = F.softmax(out, dim=1).data if(train_on_gpu): p = p.cpu() # move to cpu # get top characters if top_k is None: top_ch = np.arange(len(net.chars)) else: p, top_ch = p.topk(top_k) top_ch = top_ch.numpy().squeeze() # select the likely next character with some element of randomness p = p.numpy().squeeze() char = np.random.choice(top_ch, p=p/p.sum()) # return the encoded value of the predicted char and the hidden state return net.int2char[char], h ###Output _____no_output_____ ###Markdown Priming and generating text 보통 문장의 시작 단어를 prime 값(=초기값)으로 준다. 그러면 이 문자열을 기반으로 하여 문장을 생성한다. ###Code def sample(net, size, prime='The', top_k=None): if(train_on_gpu): net.cuda() else: net.cpu() net.eval() # eval mode # First off, run through the prime characters chars = [ch for ch in prime] h = net.init_hidden(1) for ch in prime: char, h = predict(net, ch, h, top_k=top_k) chars.append(char) # Now pass in the previous character and get a new one for ii in range(size): char, h = predict(net, chars[-1], h, top_k=top_k) chars.append(char) return ''.join(chars) print(sample(net, 1000, prime='Anna', top_k=5)) ###Output Annas ootied he oe an oose he aas hed taree he tatin an to sine hadd aosse nas tetin tise ha hera had an oo hater natian the hh hetthes ond aetins hosinn ate ande atit oad, oan he tian tan ansee ahed aad, ans ans het hind tisdinsetisesiod toe sasind, her nnt an shi tone hod hertit attat thad sines hes ind ate ne hhr se hasdised hir hottes hhe an tote aa tan thes ote ter to sisro tare he ae sintes ho teet at aons herteed terereta toteestin aat ho ao ae ho an sin toee hor tetittere tond, thot ion the sori ae hede hantia shis hin ao has tine te aane hes oteire sat ios tins he she tontee hit tae heteran tae atrettat intiad tore toesitistit ind oad ate sititrin hored hotie ten oo the toed so se sha he toesse tho an ho hod the seese thes ot at ho had waterar tat tot an san she sar san aad tene thr to hh aes ae ood hod an tane ton he aodd an hira hhe his oas aer tat he hh he het oade ne toe hh atret tons has tare tot ao at hhise tie ton seee sa nhos ha he aos hir ned ho oe sot ane aond sote ###Markdown Loading a checkpoint ###Code # Here we have loaded in a model that trained over 20 epochs `rnn_20_epoch.net` with open('rnn_20_epoch.net', 'rb') as f: checkpoint = torch.load(f, map_location=lambda storage, loc: storage) loaded = CharRNN(checkpoint['tokens'], n_hidden=checkpoint['n_hidden'], n_layers=checkpoint['n_layers']) loaded.load_state_dict(checkpoint['state_dict']) # Sample using a loaded model print(sample(loaded, 2000, top_k=5, prime="And Levin said")) ###Output And Levin said to herself, to be answered, and to the club changes of his hat for the doctor's perfect woman when she could not be a pretty doctor's conversation. All the times on the churce of what it was the first time an old meant which, still seemed to her heart, but he could not be seen in speaking. She was chief that she went, and held his face all that he had nothing, and he felt that the money, she had been a stepped and a man who had been almost all the secretary, his eyes of the princess at the clear on the feeling that something were strange feeling for him, to ask an instant that he was this tone that he did not know them, but he saw at the table of his from her, as to her hands, he had not to decide. The strength of his shoted weight, to be told her face and he felt that it was a cried of his hands to his hands. "You're seeing there about it." "I have been at a little of taken," said Stepan Arkadyevitch, louking at her hands, and so struck his brother and talked with her hat, and as a carefully repont with his study with seciness with the steps of the statements at the memory, which had to be still to be the stop, he shaded him, the peasants, as it were, with a colored hearted looking approach and her sister's carriage was the station of socance. And so he said his frings and his stopped strange woman, said his brother. The convincions were so must be found herself, were a strange sound of the mistress and significantly only a cheeks, the cheer of the streams, she had taken up a sort, but were a partier, he was carrying himself out on the starts of all the country and his crowned attention. "There's nothing it all at out that some seemed a men. And when I'm such a side of his fach, though I do the same, and what he can be decented, and I am to go into the stairs and with him. And you've cried them to be done?" said Levin, liking his eyes off. The day to the sick coan had and the doctor was not the starl of his hands when she had been the forest and heally would ha
notebooks/PytorchNoteBook.ipynb
###Markdown Model Interpretation ###Code #prediction = predict_probability(real_model, validation_transforms, image_path) interpret_model(real_model,validation_transforms, image_path, label_idx, use_cpu=True, intrepret_type="integrated gradients") interpret_model(real_model,validation_transforms, image_path, label_idx, use_cpu=True, intrepret_type="gradient shap") interpret_model(real_model,validation_transforms, image_path, label_idx, use_cpu=True, intrepret_type="saliency") interpret_model(real_model,validation_transforms, image_path2, label_idx, use_cpu=True, intrepret_type="integrated gradients") interpret_model(real_model,validation_transforms, image_path2, label_idx, use_cpu=True, intrepret_type="gradient shap") interpret_model(real_model,validation_transforms, image_path2, label_idx, use_cpu=True, intrepret_type="saliency") #train data directory test_dir = 'C:/Peter Moss AML Leukemia Research/Dataset/all_test/' test_csv = 'C:/Peter Moss AML Leukemia Research/Dataset/test.csv' label_csv = pd.read_csv(test_csv) testset = LeukemiaDataset(df_data=label_csv, data_dir=test_dir, transform=validation_transforms) test_data_loader = DataLoader(testset, batch_size=batch_size, shuffle=True) show_predictions(model=real_model, class_names=class_name, test_data_loader=test_data_loader, n_images=6) ###Output _____no_output_____
C4 Machine Learning II/LABS_PROJECT/Tech_Fun_C4_P7_Game_AI_Reinforcement_Learning.ipynb
###Markdown Technology Fundamentals Course 4, Project Part 7: Reinforcement Learning**Instructor**: Wesley Beckner**Contact**: [email protected]**Teaching Assitants**: Varsha Bang, Wesley Beckner**Contact**: [email protected], [email protected] this lesson we'll abandon the world of heuristical agents and embrace the wilds of reinforcement learning--- 6.0 Preparing Environment and Importing Data[back to top](top) 6.0.1 Import Packages[back to top](top) baselines requires an older version of TF ###Code pip install tensorflow==1.15.0 ###Output Collecting tensorflow==1.15.0 [?25l Downloading https://files.pythonhosted.org/packages/92/2b/e3af15221da9ff323521565fa3324b0d7c7c5b1d7a8ca66984c8d59cb0ce/tensorflow-1.15.0-cp37-cp37m-manylinux2010_x86_64.whl (412.3MB)  |████████████████████████████████| 412.3MB 41kB/s [?25hCollecting keras-applications>=1.0.8 [?25l Downloading https://files.pythonhosted.org/packages/71/e3/19762fdfc62877ae9102edf6342d71b28fbfd9dea3d2f96a882ce099b03f/Keras_Applications-1.0.8-py3-none-any.whl (50kB)  |████████████████████████████████| 51kB 6.1MB/s [?25hRequirement already satisfied: termcolor>=1.1.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow==1.15.0) (1.1.0) Requirement already satisfied: absl-py>=0.7.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow==1.15.0) (0.12.0) Requirement already satisfied: wrapt>=1.11.1 in /usr/local/lib/python3.7/dist-packages (from tensorflow==1.15.0) (1.12.1) Collecting gast==0.2.2 Downloading https://files.pythonhosted.org/packages/4e/35/11749bf99b2d4e3cceb4d55ca22590b0d7c2c62b9de38ac4a4a7f4687421/gast-0.2.2.tar.gz Collecting tensorboard<1.16.0,>=1.15.0 [?25l Downloading https://files.pythonhosted.org/packages/1e/e9/d3d747a97f7188f48aa5eda486907f3b345cd409f0a0850468ba867db246/tensorboard-1.15.0-py3-none-any.whl (3.8MB)  |████████████████████████████████| 3.8MB 28.8MB/s [?25hRequirement already satisfied: wheel>=0.26 in /usr/local/lib/python3.7/dist-packages (from tensorflow==1.15.0) (0.36.2) Requirement already satisfied: opt-einsum>=2.3.2 in /usr/local/lib/python3.7/dist-packages (from tensorflow==1.15.0) (3.3.0) Collecting tensorflow-estimator==1.15.1 [?25l Downloading https://files.pythonhosted.org/packages/de/62/2ee9cd74c9fa2fa450877847ba560b260f5d0fb70ee0595203082dafcc9d/tensorflow_estimator-1.15.1-py2.py3-none-any.whl (503kB)  |████████████████████████████████| 512kB 46.4MB/s [?25hRequirement already satisfied: keras-preprocessing>=1.0.5 in /usr/local/lib/python3.7/dist-packages (from tensorflow==1.15.0) (1.1.2) Requirement already satisfied: protobuf>=3.6.1 in /usr/local/lib/python3.7/dist-packages (from tensorflow==1.15.0) (3.17.3) Requirement already satisfied: grpcio>=1.8.6 in /usr/local/lib/python3.7/dist-packages (from tensorflow==1.15.0) (1.34.1) Requirement already satisfied: astor>=0.6.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow==1.15.0) (0.8.1) Requirement already satisfied: six>=1.10.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow==1.15.0) (1.15.0) Requirement already satisfied: google-pasta>=0.1.6 in /usr/local/lib/python3.7/dist-packages (from tensorflow==1.15.0) (0.2.0) Requirement already satisfied: numpy<2.0,>=1.16.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow==1.15.0) (1.19.5) Requirement already satisfied: h5py in /usr/local/lib/python3.7/dist-packages (from keras-applications>=1.0.8->tensorflow==1.15.0) (3.1.0) Requirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python3.7/dist-packages (from tensorboard<1.16.0,>=1.15.0->tensorflow==1.15.0) (3.3.4) Requirement already satisfied: setuptools>=41.0.0 in /usr/local/lib/python3.7/dist-packages (from tensorboard<1.16.0,>=1.15.0->tensorflow==1.15.0) (57.0.0) Requirement already satisfied: werkzeug>=0.11.15 in /usr/local/lib/python3.7/dist-packages (from tensorboard<1.16.0,>=1.15.0->tensorflow==1.15.0) (1.0.1) Requirement already satisfied: cached-property; python_version < "3.8" in /usr/local/lib/python3.7/dist-packages (from h5py->keras-applications>=1.0.8->tensorflow==1.15.0) (1.5.2) Requirement already satisfied: importlib-metadata; python_version < "3.8" in /usr/local/lib/python3.7/dist-packages (from markdown>=2.6.8->tensorboard<1.16.0,>=1.15.0->tensorflow==1.15.0) (4.6.0) Requirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.7/dist-packages (from importlib-metadata; python_version < "3.8"->markdown>=2.6.8->tensorboard<1.16.0,>=1.15.0->tensorflow==1.15.0) (3.4.1) Requirement already satisfied: typing-extensions>=3.6.4; python_version < "3.8" in /usr/local/lib/python3.7/dist-packages (from importlib-metadata; python_version < "3.8"->markdown>=2.6.8->tensorboard<1.16.0,>=1.15.0->tensorflow==1.15.0) (3.7.4.3) Building wheels for collected packages: gast Building wheel for gast (setup.py) ... [?25l[?25hdone Created wheel for gast: filename=gast-0.2.2-cp37-none-any.whl size=7557 sha256=5f1c2a767c53a986bb5923c892c6890ec5f3f2e14774d7f2cf76d0c88ae6beed Stored in directory: /root/.cache/pip/wheels/5c/2e/7e/a1d4d4fcebe6c381f378ce7743a3ced3699feb89bcfbdadadd Successfully built gast ERROR: tensorflow-probability 0.13.0 has requirement gast>=0.3.2, but you'll have gast 0.2.2 which is incompatible. ERROR: kapre 0.3.5 has requirement tensorflow>=2.0.0, but you'll have tensorflow 1.15.0 which is incompatible. Installing collected packages: keras-applications, gast, tensorboard, tensorflow-estimator, tensorflow Found existing installation: gast 0.4.0 Uninstalling gast-0.4.0: Successfully uninstalled gast-0.4.0 Found existing installation: tensorboard 2.5.0 Uninstalling tensorboard-2.5.0: Successfully uninstalled tensorboard-2.5.0 Found existing installation: tensorflow-estimator 2.5.0 Uninstalling tensorflow-estimator-2.5.0: Successfully uninstalled tensorflow-estimator-2.5.0 Found existing installation: tensorflow 2.5.0 Uninstalling tensorflow-2.5.0: Successfully uninstalled tensorflow-2.5.0 Successfully installed gast-0.2.2 keras-applications-1.0.8 tensorboard-1.15.0 tensorflow-1.15.0 tensorflow-estimator-1.15.1 ###Markdown install baselines from openAI ###Code !apt-get update !apt-get install -y cmake libopenmpi-dev python3-dev zlib1g-dev !pip install "stable-baselines[mpi]==2.9.0" # Check version of tensorflow import tensorflow as tf tf.__version__ from gym import spaces import gym from stable_baselines.common.env_checker import check_env import random import pandas as pd import numpy as np import matplotlib.pyplot as plt def n_step_ai(board, win_patterns, player_label, n_steps=3): opponent = ['X', 'O'] opponent.remove(player_label) opponent = opponent[0] avail_moves = {i: 1 for i in board.keys() if board[i] == ' '} for move in avail_moves.keys(): temp_board = board.copy() temp_board[move] = player_label score = get_minimax(n_steps, temp_board, player_label) avail_moves[move] = score ########################################## ### The rest of our ai agent harness is the same ########################################## # first grab max score max_score = max(avail_moves.values()) # then select all moves that have this max score valid = [] for key, value in avail_moves.items(): if value == max_score: valid.append(key) # return a random selection of the moves with the max score move = random.choice(valid) return move def minimax(depth, board, maximizing_player, player_label, verbiose=False): # infer the opponent opponent = ['X', 'O'] opponent.remove(player_label) opponent = opponent[0] # set the available moves avail_moves = [i for i in board.keys() if board[i] == ' '] # check if the depth is 0, or stalemate/winner has been reached # if so this is the basecase and we want to return get_score() terminal_move = is_terminal_node(board, avail_moves) if terminal_move or depth == 0: score = get_score(board, player_label, win_patterns) if verbiose: print('{} score: {}. depth: {}'.format(board, score, depth)) return score ### in the following we want to search through every possible board at the ### current level (the possible moves for the current player, given that the ### player is either the one whose turn it is or the imagined opponent) # call minimax where it is the current players turn and so we want to # maximize the score if maximizing_player: score = -np.Inf for move in avail_moves: new_board = board.copy() new_board[move] = player_label score = max(score, minimax(depth-1, new_board, False, player_label, verbiose)) if verbiose: print('{} max. score: {}. depth: {}'.format(board, score, depth)) return score # call minimax where it is the opponent players turn and so we want to # minimize the score elif not maximizing_player: score = np.Inf for move in avail_moves: new_board = board.copy() new_board[move] = opponent score = min(score, minimax(depth-1, new_board, True, player_label, verbiose)) if verbiose: print('{} min. score: {}. depth: {}'.format(board, score, depth)) return score def is_terminal_node(board, avail_moves): if check_winning(board, win_patterns): return True elif check_stalemate(board, win_patterns): return True else: return False def get_score(board, player_label, win_patterns): # this will look somewhat similar to our 1-step lookahead algorithm opponent = ['X', 'O'] opponent.remove(player_label) opponent = opponent[0] score = 0 for pattern in win_patterns: values = [board[i] for i in pattern] # if the opponent wins, the score is -100 if values == [opponent, opponent, opponent]: score = -100 elif values == [player_label, player_label, player_label]: score = 100 return score # we're going to pull out and reformat some of our helper functions in the # TicTacToe class win_patterns = [[1,2,3], [4,5,6], [7,8,9], [1,4,7], [2,5,8], [3,6,9], [1,5,9], [7,5,3]] def check_winning(board, win_patterns): for pattern in win_patterns: values = [board[i] for i in pattern] if values == ['X', 'X', 'X'] or values == ['O', 'O', 'O']: return True return False def check_stalemate(board, win_patterns): if (' ' not in board.values()) and (check_winning(board, win_patterns) == ''): return True return False def get_minimax(depth, board, player_label, verbiose=False): score = minimax(depth-1, board, False, player_label, verbiose=verbiose) return score def n_step_ai_temp(board, win_patterns, player_label, n_steps, verbiose=False): opponent = ['X', 'O'] opponent.remove(player_label) opponent = opponent[0] avail_moves = {i: 1 for i in board.keys() if board[i] == ' '} for move in avail_moves.keys(): temp_board = board.copy() temp_board[move] = player_label score = get_minimax(n_steps, temp_board, player_label, verbiose=verbiose) avail_moves[move] = score return avail_moves def one_step_ai(board, win_patterns, player_label): opponent = ['X', 'O'] opponent.remove(player_label) opponent = opponent[0] avail_moves = {i: 1 for i in board.keys() if board[i] == ' '} temp_board = board.copy() ######################################## # we're going to change the following lines, instead of caring # whether we've found the best move, we want to update the move # with a score ######################################## # check if the opponent has a winning move first, we will overwrite # the score for this move if it is also a winning move for the current # player for move in avail_moves.keys(): temp_board[move] = opponent for pattern in win_patterns: values = [temp_board[i] for i in pattern] if values == [opponent, opponent, opponent]: avail_moves[move] = 10 temp_board[move] = ' ' for move in avail_moves.keys(): temp_board[move] = player_label for pattern in win_patterns: values = [temp_board[i] for i in pattern] if values == [player_label, player_label, player_label]: avail_moves[move] = 100 temp_board[move] = ' ' # first grab max score max_score = max(avail_moves.values()) # then select all moves that have this max score valid = [] for key, value in avail_moves.items(): if value == max_score: valid.append(key) # return a random selection of the moves with the max score move = random.choice(valid) return move class TicTacToe: # can preset winner and starting player def __init__(self, winner='', start_player=''): self.winner = winner self.start_player = start_player self.board = {1: ' ', 2: ' ', 3: ' ', 4: ' ', 5: ' ', 6: ' ', 7: ' ', 8: ' ', 9: ' ',} self.win_patterns = [[1,2,3], [4,5,6], [7,8,9], [1,4,7], [2,5,8], [3,6,9], [1,5,9], [7,5,3]] # the other functions are now passed self def visualize_board(self): print( "|{}|{}|{}|\n|{}|{}|{}|\n|{}|{}|{}|\n".format(*self.board.values()) ) def check_winning(self): for pattern in self.win_patterns: values = [self.board[i] for i in pattern] if values == ['X', 'X', 'X']: self.winner = 'X' # we update the winner status return "'X' Won!" elif values == ['O', 'O', 'O']: self.winner = 'O' return "'O' Won!" return '' def check_stalemate(self): if (' ' not in self.board.values()) and (self.check_winning() == ''): self.winner = 'Stalemate' return "It's a stalemate!" class GameEngine(TicTacToe): def __init__(self, setup='auto', user_ai=None): super().__init__() self.setup = setup self.user_ai = user_ai def heuristic_ai(self, player_label): opponent = ['X', 'O'] opponent.remove(player_label) opponent = opponent[0] avail_moves = [i for i in self.board.keys() if self.board[i] == ' '] temp_board = self.board.copy() middle = 5 corner = [1,3,7,9] side = [2,4,6,8] # first check for a winning move move_found = False for move in avail_moves: temp_board[move] = player_label for pattern in self.win_patterns: values = [temp_board[i] for i in pattern] if values == [player_label, player_label, player_label]: move_found = True break if move_found: break else: temp_board[move] = ' ' # check if the opponent has a winning move if move_found == False: for move in avail_moves: temp_board[move] = opponent for pattern in self.win_patterns: values = [temp_board[i] for i in pattern] if values == [opponent, opponent, opponent]: move_found = True break if move_found: break else: temp_board[move] = ' ' # check if middle avail if move_found == False: if middle in avail_moves: move_found = True move = middle # check corners if move_found == False: move_corner = [val for val in avail_moves if val in corner] if len(move_corner) > 0: move = random.choice(move_corner) move_found = True # check side if move_found == False: move_side = [val for val in avail_moves if val in side] if len(move_side) > 0: move = random.choice(move_side) move_found = True return move def random_ai(self): while True: move = random.randint(1,9) if self.board[move] != ' ': continue else: break return move def setup_game(self): if self.setup == 'user': players = int(input("How many Players? (type 0, 1, or 2)")) self.player_meta = {'first': {'label': 'X', 'type': 'ai'}, 'second': {'label': 'O', 'type': 'human'}} if players != 2: ########## # Allow the user to set the ai level ########## ### if they have not provided an ai_agent if self.user_ai == None: level = int(input("select AI level (1, 2)")) if level == 1: self.ai_level = 1 elif level == 2: self.ai_level = 2 else: print("Unknown AI level entered, this will cause problems") else: self.ai_level = 3 if players == 1: first = input("who will go first? (X, (AI), or O (Player))") if first == 'O': self.player_meta = {'second': {'label': 'X', 'type': 'ai'}, 'first': {'label': 'O', 'type': 'human'}} elif players == 0: first = random.choice(['X', 'O']) if first == 'O': self.player_meta = {'second': {'label': 'X', 'type': 'ai'}, 'first': {'label': 'O', 'type': 'ai'}} else: self.player_meta = {'first': {'label': 'X', 'type': 'ai'}, 'second': {'label': 'O', 'type': 'ai'}} elif self.setup == 'auto': first = random.choice(['X', 'O']) if first == 'O': self.start_player = 'O' self.player_meta = {'second': {'label': 'X', 'type': 'ai'}, 'first': {'label': 'O', 'type': 'ai'}} else: self.start_player = 'X' self.player_meta = {'first': {'label': 'X', 'type': 'ai'}, 'second': {'label': 'O', 'type': 'ai'}} ########## # and automatically set the ai level otherwise ########## if self.user_ai == None: self.ai_level = 2 else: self.ai_level = 3 def play_game(self): while True: for player in ['first', 'second']: self.visualize_board() player_label = self.player_meta[player]['label'] player_type = self.player_meta[player]['type'] if player_type == 'human': move = input("{}, what's your move?".format(player_label)) # we're going to allow the user to quit the game from the input line if move in ['q', 'quit']: self.winner = 'F' print('quiting the game') break move = int(move) if self.board[move] != ' ': while True: move = input("{}, that position is already taken! "\ "What's your move?".format(player_label)) move = int(move) if self.board[move] != ' ': continue else: break else: ########## # Our level 1 ai agent (random) ########## if self.ai_level == 1: move = self.random_ai() ########## # Our level 2 ai agent (heuristic) ########## elif self.ai_level == 2: move = self.heuristic_ai(player_label) ########## # Our user-defined AI agent ########## elif self.ai_level == 3: move = self.user_ai(self.board, self.win_patterns, player_label) self.board[move] = player_label # the winner varaible will now be check within the board object self.check_winning() self.check_stalemate() if self.winner == '': continue elif self.winner == 'Stalemate': print(self.check_stalemate()) self.visualize_board() break else: print(self.check_winning()) self.visualize_board() break if self.winner != '': return self ###Output _____no_output_____ ###Markdown 6.0.2 Run Tests ###Code def test_n_step_ai(): random.seed(42) game = GameEngine(setup='auto', user_ai=n_step_ai) game.setup_game() game.play_game() # check that the winner is X assert game.winner == 'X', "Winner should be X!" # check that the ai level is set to 3 which means our engine is properly # accessing the user defined ai assert game.ai_level == 3, "The engine is not using the user defined AI!" test_n_step_ai() ###Output | | | | | | | | | | | | |X| | | | | | | | | | | |X| | | | | |O| | | | | |X| |X| | | |O| | | | | |X|O|X| | | |O| | | | | |X|O|X| | |X|O| | | | | |X|O|X| |O|X|O| | | | | 'X' Won! |X|O|X| |O|X|O| | | |X| ###Markdown 6.1 Reinforcement Learning: Reset, Step, and RewardFirstly, to interact with OpenAI Gym, we need to include a method of reseting the current game. 6.1.2 Reset ###Code class GameEngine(TicTacToe): def __init__(self, setup='auto', user_ai=None): super().__init__() self.setup = setup self.user_ai = user_ai def heuristic_ai(self, player_label): opponent = ['X', 'O'] opponent.remove(player_label) opponent = opponent[0] avail_moves = [i for i in self.board.keys() if self.board[i] == ' '] temp_board = self.board.copy() middle = 5 corner = [1,3,7,9] side = [2,4,6,8] # first check for a winning move move_found = False for move in avail_moves: temp_board[move] = player_label for pattern in self.win_patterns: values = [temp_board[i] for i in pattern] if values == [player_label, player_label, player_label]: move_found = True break if move_found: break else: temp_board[move] = ' ' # check if the opponent has a winning move if move_found == False: for move in avail_moves: temp_board[move] = opponent for pattern in self.win_patterns: values = [temp_board[i] for i in pattern] if values == [opponent, opponent, opponent]: move_found = True break if move_found: break else: temp_board[move] = ' ' # check if middle avail if move_found == False: if middle in avail_moves: move_found = True move = middle # check corners if move_found == False: move_corner = [val for val in avail_moves if val in corner] if len(move_corner) > 0: move = random.choice(move_corner) move_found = True # check side if move_found == False: move_side = [val for val in avail_moves if val in side] if len(move_side) > 0: move = random.choice(move_side) move_found = True return move def random_ai(self): while True: move = random.randint(1,9) if self.board[move] != ' ': continue else: break return move def setup_game(self): if self.setup == 'user': players = int(input("How many Players? (type 0, 1, or 2)")) self.player_meta = {'first': {'label': 'X', 'type': 'ai'}, 'second': {'label': 'O', 'type': 'human'}} if players != 2: ########## # Allow the user to set the ai level ########## ### if they have not provided an ai_agent if self.user_ai == None: level = int(input("select AI level (1, 2)")) if level == 1: self.ai_level = 1 elif level == 2: self.ai_level = 2 else: print("Unknown AI level entered, this will cause problems") else: self.ai_level = 3 if players == 1: first = input("who will go first? (X, (AI), or O (Player))") if first == 'O': self.player_meta = {'second': {'label': 'X', 'type': 'ai'}, 'first': {'label': 'O', 'type': 'human'}} elif players == 0: first = random.choice(['X', 'O']) if first == 'O': self.player_meta = {'second': {'label': 'X', 'type': 'ai'}, 'first': {'label': 'O', 'type': 'ai'}} else: self.player_meta = {'first': {'label': 'X', 'type': 'ai'}, 'second': {'label': 'O', 'type': 'ai'}} elif self.setup == 'auto': first = random.choice(['X', 'O']) if first == 'O': self.start_player = 'O' self.player_meta = {'second': {'label': 'X', 'type': 'ai'}, 'first': {'label': 'O', 'type': 'ai'}} else: self.start_player = 'X' self.player_meta = {'first': {'label': 'X', 'type': 'ai'}, 'second': {'label': 'O', 'type': 'ai'}} ########## # and automatically set the ai level otherwise ########## if self.user_ai == None: self.ai_level = 2 else: self.ai_level = 3 def play_game(self): while True: for player in ['first', 'second']: self.visualize_board() player_label = self.player_meta[player]['label'] player_type = self.player_meta[player]['type'] if player_type == 'human': move = input("{}, what's your move?".format(player_label)) # we're going to allow the user to quit the game from the input line if move in ['q', 'quit']: self.winner = 'F' print('quiting the game') break move = int(move) if self.board[move] != ' ': while True: move = input("{}, that position is already taken! "\ "What's your move?".format(player_label)) move = int(move) if self.board[move] != ' ': continue else: break else: ########## # Our level 1 ai agent (random) ########## if self.ai_level == 1: move = self.random_ai() ########## # Our level 2 ai agent (heuristic) ########## elif self.ai_level == 2: move = self.heuristic_ai(player_label) ########## # Our user-defined AI agent ########## elif self.ai_level == 3: move = self.user_ai(self.board, self.win_patterns, player_label) self.board[move] = player_label # the winner varaible will now be check within the board object self.check_winning() self.check_stalemate() if self.winner == '': continue elif self.winner == 'Stalemate': print(self.check_stalemate()) self.visualize_board() break else: print(self.check_winning()) self.visualize_board() break if self.winner != '': return self #################################### # Adding our ability to reset the game #################################### def reset_game(self): self.board = {1: ' ', 2: ' ', 3: ' ', 4: ' ', 5: ' ', 6: ' ', 7: ' ', 8: ' ', 9: ' ',} self.winner = '' self.setup_game() ###Output _____no_output_____ ###Markdown Let's test our reset: ###Code game = GameEngine('auto') game.setup_game() game.play_game() game.reset_game() game.play_game() ###Output | | | | | | | | | | | | | | | | | |X| | | | | | | | |O| | |X| | | | | | | | |O| | |X| | | | |X| |O| |O| | |X| | | | |X| |O|X|O| | |X| | | | |X| |O|X|O| | |X| | | |O|X| |O|X|O| | |X| | |X|O|X| |O|X|O| |O|X| | |X|O|X| It's a stalemate! |O|X|O| |O|X|X| |X|O|X| ###Markdown This `reset_game` function works the way we intend. However, the big step we will have to make from our current tic tac toe module to one usable by OpenAI is to work with integers rather than strings in our board representation. 6.1.3 Observation and Action SpacesThe following are the important changes we will have to make to our game class in order to work with OpenAI's built-in reinforcement learning algorithms:``` the board now has integers as values instead of stringsself.board = {1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 0, 9: 0,} the available token spaces, note that in order to access our board dictionary these actions will need to be re-indexed to 1self.action_space = spaces.Discrete(9) the observation space requires int rep for player tokensself.observation_space = spaces.Box(low=0, high=2, shape=(9,), dtype=np.int)self.reward_range = (-10, 1) we will redefine our player labels as intsself.player_label = 1self.opponent_label = 2``` Let's take a look at our redefined action space: ###Code board = {1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 0, 9: 0,} state = np.array(list(board.values())).reshape(9,) state ###Output _____no_output_____ ###Markdown Does this align with a random sample of the observation space? It should if it is going to work! ###Code box = spaces.Box(low=0, high=2, shape=(9,), dtype=np.int) box.sample() ###Output _____no_output_____ ###Markdown Let's break this down. For 1 of 9 spaces (defined by shape in `spaces.Box`), the game board can take on the value of 0, 1, or 2 (defined by low and high in `spaces.Box`). When we sample from box we get a random snapshot of how the bored could possibly look. The way we've defined `state` is such that it too, represents how the board could possibly look. `state` will be returned by both `reset` and `step` when we go to wrap all of this in our game environment. 6.1.4 StepOur Reinforcement Learning (RL) agent will have much less information available to them than our prior algorithms. For this we need to define our reward system a little differently. Given a current board the agent receives:* +10 for playing a winning move* -100 for playing an invalid move * -10 if the opponent wins the next move* 1/9 for playing a valid move ###Code class TicTacToeGym(GameEngine, gym.Env): def __init__(self, user_ai=None, ai_level=1): super().__init__() self.setup = 'auto' # the default behavior will be no user_ai and ai_level set to 1 (random) self.user_ai = user_ai self.ai_level = ai_level # the board now has integers as values instead of strings self.board = {1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 0, 9: 0,} # the available token spaces, note that in order to access our board # dictionary these actions will need to be re-indexed to 1 self.action_space = spaces.Discrete(9) # the observation space requires int rep for player tokens self.observation_space = spaces.Box(low=0, high=2, shape=(9,), dtype=np.int) self.reward_range = (-10, 1) # we will redefine our player labels as ints self.player_label = 1 self.opponent_label = 2 # for StableBaselines self.spec = None self.metadata = None ############################################################################## # we will have to redefine any function in our previous module that makes use # of the string entries, X and O on the board. We need to replace the logic # with 1's and 2's ############################################################################## def check_winning(self): for pattern in self.win_patterns: values = [self.board[i] for i in pattern] if values == [1, 1, 1]: self.winner = 'X' # we update the winner status return "'X' Won!" elif values == [2, 2, 2]: self.winner = 'O' return "'O' Won!" return '' def check_stalemate(self): if (0 not in self.board.values()) and (self.check_winning() == ''): self.winner = 'Stalemate' return "It's a stalemate!" def reset_game(self): overwrite_ai = self.ai_level self.board = {1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 0, 9: 0,} self.winner = '' self.setup_game() self.ai_level = overwrite_ai # depending now on if X or O is first will need to take the AI's first step if self.start_player == 'O': move = self.random_ai() self.board[move] = 2 def reset(self): self.reset_game() state = np.array(list(self.board.values())).reshape(9,) return state def random_ai(self): while True: move = random.randint(1,9) if self.board[move] != 0: continue else: break return move ############################################################################## # we will have to recycle a lot of what was previously wrapped up in # play_game() since gym needs access to every point after the Reinf AI # makes a move ############################################################################## def step(self, action): # gym discrete indexes at 0, our board indexes at 1 move = action + 1 # Check if agent's move is valid avail_moves = [i for i in self.board.keys() if self.board[i] == 0] is_valid = move in avail_moves # if valid, then play the move, and let the other opponent make a move # as well if is_valid: # Play the move # update board self.board[move] = self.player_label self.check_winning() self.check_stalemate() if self.winner == '': ################################################################## # instead of continuing as we did in our play_game loop we will # take one additional step for the AI and then let openAI gym # handle incrementing between steps. ################################################################## ########## # Our level 1 ai agent (random) ########## # if self.ai_level == 1: move = self.random_ai() # ########## # # Our level 2 ai agent (heuristic) # ########## # elif self.ai_level == 2: # move = self.heuristic_ai('O') # ########## # # Our user-defined AI agent # ########## # elif self.ai_level == 3: # move = self.user_ai(self.board, self.win_patterns, 'O') self.board[move] = self.opponent_label self.check_winning() self.check_stalemate() if self.winner == '': reward, done, info = 1/9, False, {} if self.winner == 'Stalemate': reward, done, info = -1, True, {} elif self.winner == 'X': reward, done, info = 100, True, {} elif self.winner == 'O': reward, done, info = -10, True, {} else: # End the game and penalize agent reward, done, info = -100, True, {} state = np.array(list(self.board.values())).reshape(9,) return state, reward, done, info ###Output _____no_output_____ ###Markdown 6.1.5 Testing the EnvironmentWe can check that the environment is compatible with gym using `check_env`. Notice the below doesn't return any error messages. This means everything is working ok! ###Code env = TicTacToeGym() check_env(env) ###Output _____no_output_____ ###Markdown We can also define a model from OpenAI and see how our game board updates in a single step with the new wrapper ###Code from stable_baselines.common.policies import MlpPolicy from stable_baselines.common.vec_env import DummyVecEnv from stable_baselines import PPO2 env = TicTacToeGym() model = PPO2(MlpPolicy, env, verbose=1) # test our reset function obs = env.reset() # the start player should randomly select between X and O print('the start player: {}'.format(env.start_player)) # we should return an action from model.predict action, _states = model.predict(obs) print("the taken action: {}".format(action)) # we divert default behavior of setup_game by saving and reestablishing our # user input ai_level print("AI level: {}".format(env.ai_level)) # check the board update from env.step() obs, rewards, dones, info = env.step(action) print(obs) print("Should be blank if no winner: [{}]".format(env.check_winning())) ###Output the start player: O the taken action: 7 AI level: 1 [0 2 0 0 0 0 0 1 2] Should be blank if no winner: [] ###Markdown And we can still visualize the board: ###Code env.visualize_board() ###Output |0|2|0| |0|0|0| |0|1|2| ###Markdown And check that our untrained model will win approx half the time: ###Code winners = [] for j in range(1000): obs = env.reset() for i in range(10): action, _states = model.predict(obs) # print(action) obs, rewards, dones, info = env.step(action) # env.visualize_board() if env.winner != '': winners.append(env.winner) break pd.DataFrame(winners).value_counts() ###Output _____no_output_____ ###Markdown 6.1.6 Training the ModelNow we will train the PPO2 model on our environment! ###Code from stable_baselines.common.policies import MlpPolicy from stable_baselines.common.vec_env import DummyVecEnv from stable_baselines import PPO2 env = TicTacToeGym() model = PPO2(MlpPolicy, env, verbose=1) model.learn(total_timesteps=100000) obs = env.reset() for i in range(10): action, _states = model.predict(obs) print(action) obs, rewards, dones, info = env.step(action) env.visualize_board() if env.winner != '': print(env.winner) break winners = [] for j in range(1000): obs = env.reset() for i in range(10): action, _states = model.predict(obs) # print(action) obs, rewards, dones, info = env.step(action) # env.visualize_board() if env.winner != '': winners.append(env.winner) break ###Output _____no_output_____ ###Markdown Let's see how many times our trained model won: ###Code pd.DataFrame(winners).value_counts() ###Output _____no_output_____ ###Markdown Not terrible! Could be better! Let's play against our model 6.1.7 Play Against the Model To make our model compatible with the old `play_game` method, we will need a way to convert to and rom int vs string representations on our board. Let's test this: ###Code value_map = {' ': 0, 'X': 1, 'O': 2} board = {1: 'X', 2: ' ', 3: ' ', 4: ' ', 5: ' ', 6: ' ', 7: ' ', 8: ' ', 9: ' ',} for key in board.keys(): board[key] = value_map[board[key]] board ###Output _____no_output_____ ###Markdown And now we can wrap it up into a new ai function: ###Code def rl_ai(board, win_patterns, player_label, model=model): # note that we are simply leaving win_patterns and player_label # here so that we can use the game engine as defined in prior # sessions, these inputs are ignored. ai_board = board.copy() value_map = {' ': 0, 'X': 1, 'O': 2} for key in ai_board.keys(): ai_board[key] = value_map[ai_board[key]] obs = np.array(list(ai_board.values())).reshape(9,) action, _states = model.predict(obs) move = action + 1 return move game = GameEngine('user', user_ai=rl_ai) game.setup_game() game.play_game() ###Output How many Players? (type 0, 1, or 2)1 who will go first? (X, (AI), or O (Player))X | | | | | | | | | | | | | | | | | |X| | | | | | O, what's your move?1 |O| | | | |X| | | | | | |O| | | | |X| | |X| | | O, what's your move?2 |O|O| | | |X| | |X| | | 'X' Won! |O|O|X| | |X| | |X| | | ###Markdown Notice any interesting behaviors about the model? 6.2 Improve the ModelHow can we improve this puppy? What about training the model against a smarter opponent? changing the reward values? training for longer? OR trying a different reinforcement learning model? Try any or all of these and see what works! ###Code class TicTacToeGym(GameEngine, gym.Env): def __init__(self, user_ai=None, ai_level=1): super().__init__() self.setup = 'auto' # the default behavior will be no user_ai and ai_level set to 1 (random) self.user_ai = user_ai self.ai_level = ai_level # the board now has integers as values instead of strings self.board = {1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 0, 9: 0,} # the available token spaces, note that in order to access our board # dictionary these actions will need to be re-indexed to 1 self.action_space = spaces.Discrete(9) # the observation space requires int rep for player tokens self.observation_space = spaces.Box(low=0, high=2, shape=(9,), dtype=np.int) self.reward_range = (-10, 1) # we will redefine our player labels as ints self.player_label = 1 self.opponent_label = 2 # for StableBaselines self.spec = None self.metadata = None ############################################################################## # we will have to redefine any function in our previous module that makes use # of the string entries, X and O on the board. We need to replace the logic # with 1's and 2's ############################################################################## def check_winning(self): for pattern in self.win_patterns: values = [self.board[i] for i in pattern] if values == [1, 1, 1]: self.winner = 'X' # we update the winner status return "'X' Won!" elif values == [2, 2, 2]: self.winner = 'O' return "'O' Won!" return '' def check_stalemate(self): if (0 not in self.board.values()) and (self.check_winning() == ''): self.winner = 'Stalemate' return "It's a stalemate!" def reset_game(self): overwrite_ai = self.ai_level self.board = {1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 0, 9: 0,} self.winner = '' self.setup_game() self.ai_level = overwrite_ai # depending now on if X or O is first will need to take the AI's first step if self.start_player == 'O': move = self.random_ai() self.board[move] = 2 def reset(self): self.reset_game() state = np.array(list(self.board.values())).reshape(9,) return state def random_ai(self): while True: move = random.randint(1,9) if self.board[move] != 0: continue else: break return move def heuristic_ai(self, player_label): opponent = [1, 2] opponent.remove(player_label) opponent = opponent[0] avail_moves = [i for i in self.board.keys() if self.board[i] == 0] temp_board = self.board.copy() middle = 5 corner = [1,3,7,9] side = [2,4,6,8] # first check for a winning move move_found = False for move in avail_moves: temp_board[move] = player_label for pattern in self.win_patterns: values = [temp_board[i] for i in pattern] if values == [player_label, player_label, player_label]: move_found = True break if move_found: break else: temp_board[move] = 0 # check if the opponent has a winning move if move_found == False: for move in avail_moves: temp_board[move] = opponent for pattern in self.win_patterns: values = [temp_board[i] for i in pattern] if values == [opponent, opponent, opponent]: move_found = True break if move_found: break else: temp_board[move] = 0 # check if middle avail if move_found == False: if middle in avail_moves: move_found = True move = middle # check corners if move_found == False: move_corner = [val for val in avail_moves if val in corner] if len(move_corner) > 0: move = random.choice(move_corner) move_found = True # check side if move_found == False: move_side = [val for val in avail_moves if val in side] if len(move_side) > 0: move = random.choice(move_side) move_found = True return move ############################################################################## # we will have to recycle a lot of what was previously wrapped up in # play_game() since gym needs access to every point after the Reinf AI # makes a move ############################################################################## def step(self, action): # gym discrete indexes at 0, our board indexes at 1 move = action + 1 # Check if agent's move is valid avail_moves = [i for i in self.board.keys() if self.board[i] == 0] is_valid = move in avail_moves # if valid, then play the move, and let the other opponent make a move # as well if is_valid: # Play the move # update board self.board[move] = self.player_label self.check_winning() self.check_stalemate() if self.winner == '': ################################################################## # instead of continuing as we did in our play_game loop we will # take one additional step for the AI and then let openAI gym # handle incrementing between steps. ################################################################## ########## # Our level 1 ai agent (random) ########## if self.ai_level == 1: move = self.random_ai() # ########## # # Our level 2 ai agent (heuristic) # ########## elif self.ai_level == 2: move = self.heuristic_ai(self.player_label) # ########## # # Our user-defined AI agent # ########## # elif self.ai_level == 3: # move = self.user_ai(self.board, self.win_patterns, 'O') self.board[move] = self.opponent_label self.check_winning() self.check_stalemate() if self.winner == '': reward, done, info = 1/9, False, {} if self.winner == 'Stalemate': reward, done, info = -10, True, {} elif self.winner == 'X': reward, done, info = 50, True, {} elif self.winner == 'O': reward, done, info = -50, True, {} else: # End the game and penalize agent reward, done, info = -100, True, {} state = np.array(list(self.board.values())).reshape(9,) return state, reward, done, info from stable_baselines.common.policies import MlpPolicy from stable_baselines.common.vec_env import DummyVecEnv from stable_baselines import PPO2 env = TicTacToeGym(ai_level=1) model = PPO2(MlpPolicy, env, verbose=1) model.learn(total_timesteps=100000) winners = [] for j in range(1000): obs = env.reset() for i in range(10): action, _states = model.predict(obs) # print(action) obs, rewards, dones, info = env.step(action) # env.visualize_board() if env.winner != '': winners.append(env.winner) break pd.DataFrame(winners).value_counts() game = GameEngine('user', user_ai=rl_ai) game.setup_game() game.play_game() ###Output _____no_output_____
Docker_containers/1. Basic manual/1. Basics.ipynb
###Markdown Docker containers* Course link: https://carpentries-incubator.github.io/docker-introduction/* Video: https://www.youtube.com/watch?v=HelrQnm3v4g Containers are independent OS-level virtualization to deliver software in packages.They ensure:* Standardisation* Portability* Reliability* ReproducibilityIt avoids package/ software conflicts. Can use multiple versions of same package simultaneously. The main program you want to use likely depends on many, many, different other programs (including the operating system!), creating a very complex, and often fragile system. One change or missing piece may stop the whole thing from working or break something that was already running. It’s no surprise that this situation is sometimes informally termed “dependency hell”.For example, in conda the base environment often becomes a dependency hell. What is a Container? What is Docker?Docker is a tool that allows you to build what are called “containers”. A situation* Software A works on Ubuntu* Software B works on Arch LinuxWe want another filesystem that we could use to chain together both the software in a "pipeline". Containers can make it possible with something called a "**container host**". VirtualisationContainer are virtual computers on host computer. A container can be considered as a lightweight virtual machine. Underneath the container is usually the Linux kernel with some filesystem."**container image**" is a recipe or a template for a container.Container is – a self-contained, complete, separate computer filesystem. * documentation – there is a clear record of what software and software dependencies were used, from bottom to top.* portability – the container can be used on any computer that has Docker installed – it doesn’t matter whether the computer is Mac, Windows or Linux-based.* reproducibility – you can use the exact same software and environment on your computer and on other resources (like a large-scale computing cluster).* configurability – containers can be sized to take advantage of more resources (memory, CPU, etc.) on large systems (clusters) or less, depending on the circumstances. Potential applications* Using containers solely on your own computer to use a specific software tool or to test out a tool (possibly to avoid a difficult and complex installation process, to save your time or to avoid dependency hell).* Creating a Dockerfile that generates a container image with software that you specify installed, then sharing a container image generated using this Dockerfile with your collaborators for use on their computers or a remote computing resource (e.g. cloud-based or HPC system).* Archiving the container images so you can repeat analysis/modelling using the same software and configuration in the future – capturing your workflow. Docker command line* Install Docker using these instructions: https://carpentries-incubator.github.io/docker-introduction/setup.html* Create an account here: https://hub.docker.com/* Download this intro file: https://carpentries-incubator.github.io/docker-introduction/files/docker-intro.zip* For windows: https://docs.docker.com/docker-for-windows/install/* For Linux: https://docs.docker.com/install/linux/docker-ce/ubuntu/ Install using the repository* sudo apt-get update* sudo apt-get install \ ca-certificates \ curl \ gnupg \ lsb-release* curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg* echo \ "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu \ $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null* sudo docker run hello-world If you are getting error for permission `Got permission denied while trying to connect to the Docker daemon socket at unix:///var/run/docker.sock: Get http://%2Fvar%2Frun%2Fdocker.sock/v1.40/containers/json: dial unix /var/run/docker.sock: connect: permission denied` Then grant a permission using `sudo chmod 666 /var/run/docker.sock` You should be able to run this to see `Docker Client` and `Docker Server`.```console(base) hell@Dell-Precision-T1600:~$ docker versionClient: Docker Engine - Community Version: 20.10.12 API version: 1.41 Go version: go1.16.12 Git commit: e91ed57 Built: Mon Dec 13 11:45:33 2021 OS/Arch: linux/amd64 Context: default Experimental: trueServer: Docker Engine - Community Engine: Version: 20.10.12 API version: 1.41 (minimum version 1.12) Go version: go1.16.12 Git commit: 459d0df Built: Mon Dec 13 11:43:42 2021 OS/Arch: linux/amd64 Experimental: false containerd: Version: 1.4.12 GitCommit: 7b11cfaabd73bb80907dd23182b9347b4245eb5d runc: Version: 1.0.2 GitCommit: v1.0.2-0-g52b36a2 docker-init: Version: 0.19.0 GitCommit: de40ad0``` Copy pasteYou can save the version name in a text file.```console (base) hell@Dell-Precision-T1600:~/Desktop/Docker$ docker --versionDocker version 20.10.12, build e91ed57(base) hell@Dell-Precision-T1600:~/Desktop/Docker$ docker --version>>version.txt``` Docker is working correctly ?You will see contents in the directory.```console(base) hell@Dell-Precision-T1600:~/Desktop/Docker$ docker container lsCONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES```if you get `Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?` then something is wrong. Getting help* General help: `docker --help`* Command specific: `docker COMMAND --help````console(base) hell@Dell-Precision-T1600:~/Desktop/Docker$ docker container --helpUsage: docker container COMMANDManage containersCommands: attach Attach local standard input, output, and error streams to a running container commit Create a new image from a container's changes cp Copy files/folders between a container and the local filesystem create Create a new container diff Inspect changes to files or directories on a container's filesystem exec Run a command in a running container export Export a container's filesystem as a tar archive inspect Display detailed information on one or more containers kill Kill one or more running containers logs Fetch the logs of a container ls List containers pause Pause all processes within one or more containers port List port mappings or a specific mapping for the container prune Remove all stopped containers rename Rename a container restart Restart one or more containers rm Remove one or more containers run Run a command in a new container start Start one or more stopped containers stats Display a live stream of container(s) resource usage statistics stop Stop one or more running containers top Display the running processes of a container unpause Unpause all processes within one or more containers update Update configuration of one or more containers wait Block until one or more containers stop, then print their exit codesRun 'docker container COMMAND --help' for more information on a command.``` Docker Command Line Interface (CLI) syntax`docker [command] [subcommand] [additional options]`Let us start with prebuilt images, then we will create our own images. Downloading docker image The `docker image` command is used to interact with Docker container images. ```console(base) hell@Dell-Precision-T1600:~/Desktop/Docker$ docker image lsREPOSITORY TAG IMAGE ID CREATED SIZEhello-world latest feb5d9fea6a5 4 months ago 13.3kB``` Download image using pull`docker image pull image_name````console(base) hell@Dell-Precision-T1600:~/Desktop/Docker$ docker image pull hello-worldUsing default tag: latestlatest: Pulling from library/hello-worldDigest: sha256:97a379f4f88575512824f3b352bc03cd75e239179eea0fecc38e597b2209f49aStatus: Image is up to date for hello-world:latestdocker.io/library/hello-world:latest(base) hell@Dell-Precision-T1600:~/Desktop/Docker$ docker image lsREPOSITORY TAG IMAGE ID CREATED SIZEhello-world latest feb5d9fea6a5 4 months ago 13.3kB``` The `Hello World` image is available here https://hub.docker.com/_/hello-world. We can download the image using `docker image pull image_name`. Running a containerUse command `docker container run container_name`.```console(base) hell@Dell-Precision-T1600:~/Desktop/Docker$ docker image lsREPOSITORY TAG IMAGE ID CREATED SIZEhello-world latest feb5d9fea6a5 4 months ago 13.3kB(base) hell@Dell-Precision-T1600:~/Desktop/Docker$ docker container run hello-worldHello from Docker!This message shows that your installation appears to be working correctly.To generate this message, Docker took the following steps: 1. The Docker client contacted the Docker daemon. 2. The Docker daemon pulled the "hello-world" image from the Docker Hub. (amd64) 3. The Docker daemon created a new container from that image which runs the executable that produces the output you are currently reading. 4. The Docker daemon streamed that output to the Docker client, which sent it to your terminal.To try something more ambitious, you can run an Ubuntu container with: $ docker run -it ubuntu bashShare images, automate workflows, and more with a free Docker ID: https://hub.docker.com/For more examples and ideas, visit: https://docs.docker.com/get-started/``` Downloading alpine image```console(base) hell@Dell-Precision-T1600:~/Desktop/Docker$ docker image pull alpineUsing default tag: latestlatest: Pulling from library/alpine59bf1c3509f3: Pull complete Digest: sha256:21a3deaa0d32a8057914f36584b5288d2e5ecc984380bc0118285c70fa8c9300Status: Downloaded newer image for alpine:latestdocker.io/library/alpine:latest```The container needs extra command.```console(base) hell@Dell-Precision-T1600:~/Desktop/Docker$ docker container run alpine cat /etc/os-releaseNAME="Alpine Linux"ID=alpineVERSION_ID=3.15.0PRETTY_NAME="Alpine Linux v3.15"HOME_URL="https://alpinelinux.org/"BUG_REPORT_URL="https://bugs.alpinelinux.org/"``` Print Hello world using alpine image```console(base) hell@Dell-Precision-T1600:~/Desktop/Docker$ docker container run alpine echo 'hello world'hello world``` Running containers interactivelyWe wanted to keep the container running so we could log into it and test drive more commands? The way to do this is by adding the interactive flags `-i` and `-t` (usually combined as `-it`) to the docker container run command and provide a shell (`bash`,`sh`, etc.) as our command. The alpine Docker container image doesn’t include bash so we need to use `sh`. Technically, the interactive flag is just `-i` – the extra `-t` (combined as -it above) is the “pseudo-TTY” option, a fancy term that means a text interface. This allows you to connect to a shell, like `sh`, using a command line. Since you usually want to have a command line when running interactively, it makes sense to use the two together.The prompt would change to this.```console(base) hell@Dell-Precision-T1600:~/Desktop/Docker$ docker container run -it alpine sh/ ``` ```console(base) hell@Dell-Precision-T1600:~/Desktop/Docker$ docker container run -it alpine sh/ pwd// lsbin etc lib mnt proc run srv tmp vardev home media opt root sbin sys usr/ whoamiroot/ echo $PATH/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin/ cat /etc/os-releaseNAME="Alpine Linux"ID=alpineVERSION_ID=3.15.0PRETTY_NAME="Alpine Linux v3.15"HOME_URL="https://alpinelinux.org/"BUG_REPORT_URL="https://bugs.alpinelinux.org/"/ exit(base) hell@Dell-Precision-T1600:~/Desktop/Docker$ ``` Installing UbuntuFind different versions of `Ubuntu` here https://hub.docker.com/search?q=ubnutu%20&type=image&sort=updated_at&order=desc.Installing one of them:```console(base) hell@Dell-Precision-T1600:~/Desktop/Docker$ docker pull xufuhe/ubnutuUsing default tag: latestlatest: Pulling from xufuhe/ubnutud5c6f90da05d: Pull complete 1300883d87d5: Pull complete c220aa3cfc1b: Pull complete 2e9398f099dc: Pull complete dc27a084064f: Pull complete Digest: sha256:34471448724419596ca4e890496d375801de21b0e67b81a77fd6155ce001edadStatus: Downloaded newer image for xufuhe/ubnutu:latestdocker.io/xufuhe/ubnutu:latest(base) hell@Dell-Precision-T1600:~/Desktop/Docker$ docker image lsREPOSITORY TAG IMAGE ID CREATED SIZEalpine latest c059bfaa849c 2 months ago 5.59MBhello-world latest feb5d9fea6a5 4 months ago 13.3kBxufuhe/ubnutu latest ccc7a11d65b1 4 years ago 120MB(base) hell@Dell-Precision-T1600:~/Desktop/Docker$ docker container run -it xufuhe/ubnutu sh cat /etc/os-releaseNAME="Ubuntu"VERSION="16.04.3 LTS (Xenial Xerus)"ID=ubuntuID_LIKE=debianPRETTY_NAME="Ubuntu 16.04.3 LTS"VERSION_ID="16.04"HOME_URL="http://www.ubuntu.com/"SUPPORT_URL="http://help.ubuntu.com/"BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"VERSION_CODENAME=xenialUBUNTU_CODENAME=xenial apt-get --helpapt 1.2.24 (amd64)Usage: apt-get [options] command apt-get [options] install|remove pkg1 [pkg2 ...] apt-get [options] source pkg1 [pkg2 ...]apt-get is a command line interface for retrieval of packagesand information about them from authenticated sources andfor installation, upgrade and removal of packages togetherwith their dependencies.Most used commands: update - Retrieve new lists of packages upgrade - Perform an upgrade install - Install new packages (pkg is libc6 not libc6.deb) remove - Remove packages purge - Remove packages and config files autoremove - Remove automatically all unused packages dist-upgrade - Distribution upgrade, see apt-get(8) dselect-upgrade - Follow dselect selections build-dep - Configure build-dependencies for source packages clean - Erase downloaded archive files autoclean - Erase old downloaded archive files check - Verify that there are no broken dependencies source - Download source archives download - Download the binary package into the current directory changelog - Download and display the changelog for the given packageSee apt-get(8) for more information about the available commands.Configuration options and syntax is detailed in apt.conf(5).Information about how to configure sources can be found in sources.list(5).Package and version choices can be expressed via apt_preferences(5).Security details are available in apt-secure(8). This APT has Super Cow Powers. exit(base) hell@Dell-Precision-T1600:~/Desktop/Docker$ ``` Shorthands`docker container run xufuhe/ubuntu cat /etc/os-release``docker container run xufuhe/ubuntu apt-get --help` More options* `--rm`: this option guarantees that any running container is completely removed from your computer after the container is stopped. * `--name=`: By default, Docker assigns a random name and ID number to each container instance that you run on your computer. If you want to be able to more easily refer to a specific running container, you can assign it a name using this option. Summary* The `docker image pull` command downloads Docker container images from the internet.* The `docker image ls` command lists Docker container images that are (now) on your computer.* The `docker container run` command creates running containers from container images and can run commands inside them.* When using the `docker container run` command, a container can run a default action (if it has one), a user specified action, or a shell to be used interactively. Cleaning up containers Removing containerThe command `docker ps` serves the same purpose as `docker container ls`.Use `docker ps --all` to see all running or tracked containers.```console(base) hell@Dell-Precision-T1600:~$ docker ps --allCONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES1b5e9aa58feb xufuhe/ubnutu "sh" 5 hours ago Exited (0) 5 hours ago nervous_shaw21333093a18b alpine "sh" 6 hours ago Exited (0) 6 hours ago epic_jang644d45eaf5f6 alpine "sh" 6 hours ago Exited (130) 6 hours ago vigorous_hypatia77194213259d alpine "echo 'hello world'" 6 hours ago Exited (0) 6 hours ago vigorous_cray184fbf917972 alpine "cat /etc/os-release" 6 hours ago Exited (0) 6 hours ago dreamy_sinoussi8045747df6d4 alpine "/bin/sh" 6 hours ago Exited (0) 6 hours ago amazing_hawking009e47dc71a8 hello-world "/hello" 6 hours ago Exited (0) 6 hours ago serene_murdockddd7faafe140 hello-world "/hello" 7 hours ago Exited (0) 7 hours ago peaceful_liskov``` Remove a container`docker container rm CONTAINER_ID````console(base) hell@Dell-Precision-T1600:~$ docker container rm ddd7faafe140ddd7faafe140``` Remove all containers`docker container prune````console(base) hell@Dell-Precision-T1600:~$ docker container pruneWARNING! This will remove all stopped containers.Are you sure you want to continue? [y/N] yDeleted Containers:1b5e9aa58feb4fc84d1330f2447acd2d109b316d8cc74121bb1fe65811754ba921333093a18b17e6f9f3b8b3c715444188200aaae10e885e74495e81451554fa644d45eaf5f650044bc70082a62502b22158757a5d9308a4296053e6013111cb77194213259dd34ed51fefa048ab8b861e4138c897cce43c98da097154ae1bdd184fbf917972bafcf475d486bff80bd1c44d893bc32075f4931a04b87492f8968045747df6d48e23ed3baf682f24967abd08df9f5c99d8a7341695cae7ab8ac0009e47dc71a89f981cda1a23cdf7fcb7e42e56c2db134db60847424029bd5874Total reclaimed space: 55B(base) hell@Dell-Precision-T1600:~$ docker ps --allCONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES``` Removing images`docker image rm image_name````console(base) hell@Dell-Precision-T1600:~$ docker image lsREPOSITORY TAG IMAGE ID CREATED SIZEalpine latest c059bfaa849c 2 months ago 5.59MBhello-world latest feb5d9fea6a5 4 months ago 13.3kBxufuhe/ubnutu latest ccc7a11d65b1 4 years ago 120MB(base) hell@Dell-Precision-T1600:~$ docker image rm xufuhe/ubnutu:latest Untagged: xufuhe/ubnutu:latestUntagged: xufuhe/ubnutu@sha256:34471448724419596ca4e890496d375801de21b0e67b81a77fd6155ce001edadDeleted: sha256:ccc7a11d65b1b5874b65adb4b2387034582d08d65ac1817ebc5fb9be1baa5f88Deleted: sha256:cb5450c7bb149c39829e9ae4a83540c701196754746e547d9439d9cc59afe798Deleted: sha256:364dc483ed8e64e16064dc1ecf3c4a8de82fe7f8ed757978f8b0f9df125d67b3Deleted: sha256:4f10a8fd56139304ad81be75a6ac056b526236496f8c06b494566010942d8d32Deleted: sha256:508ceb742ac26b43bdda819674a5f1d33f7b64c1708e123a33e066cb147e2841Deleted: sha256:8aa4fcad5eeb286fe9696898d988dc85503c6392d1a2bd9023911fb0d6d27081(base) hell@Dell-Precision-T1600:~$ docker image lsREPOSITORY TAG IMAGE ID CREATED SIZEalpine latest c059bfaa849c 2 months ago 5.59MBhello-world latest feb5d9fea6a5 4 months ago 13.3kB``` The reason that there are a few lines of output, is that a given container image may have been formed by merging multiple underlying layers. Any layers that are used by multiple Docker container images will only be stored once. Finding Containers on Docker HubThe Docker Hub is an online repository of container images, a vast number of which are publicly available.There are official images endorsed by Docker itself. They are reliable (no malware) and stable.![image.png](attachment:c651bc58-37c1-4e66-b03a-6c53392a8834.png) Official Python image![image.png](attachment:5068600e-515a-437b-91c8-43b55083b8f2.png) Exploring Container image versionsA single Docker Hub page can have many different versions of container images, based on the version of the software inside. hese versions are indicated by “tags”. When referring to the specific version of a container image by its tag, you use a colon `:` like this: CONTAINER_IMAGE_NAME:TAG* For Python 3.8: `docker image pull python:3.8`* For Python 3.6: `docker image pull python:3.6` Other image providers* ContinuumIO (developer of Anaconda): https://hub.docker.com/u/continuumio* rocker for R langauge: https://hub.docker.com/u/rockerTo download Individually managed containers:OWNER/CONTAINER_IMAGE_NAME:TAG Docker Hub is a repositorySo, to acquire an image from a repository with specific version.OWNER/REPOSITORY:TAG ###Code # Creating your own Container images * If you want custom images for your task then create your own docker. * If you want to run a code written in older version of let's say Tensorflow. * Share you workflow with someone else ###Output _____no_output_____
course/chapter2/section5_pt.ipynb
###Markdown Handling multiple sequences (PyTorch) Install the Transformers and Datasets libraries to run this notebook. ###Code ! pip install datasets transformers[sentencepiece] import torch from transformers import AutoTokenizer, AutoModelForSequenceClassification checkpoint = "distilbert-base-uncased-finetuned-sst-2-english" tokenizer = AutoTokenizer.from_pretrained(checkpoint) model = AutoModelForSequenceClassification.from_pretrained(checkpoint) sequence = "I've been waiting for a HuggingFace course my whole life." tokens = tokenizer.tokenize(sequence) ids = tokenizer.convert_tokens_to_ids(tokens) input_ids = torch.tensor(ids) model(input_ids) tokenized_inputs = tokenizer(sequence, return_tensors="pt") print(tokenized_inputs["input_ids"]) import torch from transformers import AutoTokenizer, AutoModelForSequenceClassification checkpoint = "distilbert-base-uncased-finetuned-sst-2-english" tokenizer = AutoTokenizer.from_pretrained(checkpoint) model = AutoModelForSequenceClassification.from_pretrained(checkpoint) sequence = "I've been waiting for a HuggingFace course my whole life." tokens = tokenizer.tokenize(sequence) ids = tokenizer.convert_tokens_to_ids(tokens) input_ids = torch.tensor([ids]) print("Input IDs:", input_ids) output = model(input_ids) print("Logits:", output.logits) batched_ids = [ [200, 200, 200], [200, 200] ] padding_id = 100 batched_ids = [ [200, 200, 200], [200, 200, padding_id] ] model = AutoModelForSequenceClassification.from_pretrained(checkpoint) sequence1_ids = [[200, 200, 200]] sequence2_ids = [[200, 200]] batched_ids = [[200, 200, 200], [200, 200, tokenizer.pad_token_id]] print(model(torch.tensor(sequence1_ids)).logits) print(model(torch.tensor(sequence2_ids)).logits) print(model(torch.tensor(batched_ids)).logits) batched_ids = [ [200, 200, 200], [200, 200, tokenizer.pad_token_id] ] attention_mask = [ [1, 1, 1], [1, 1, 0] ] outputs = model(torch.tensor(batched_ids), attention_mask=torch.tensor(attention_mask)) print(outputs.logits) sequence = sequence[:max_sequence_length] ###Output _____no_output_____
ar_fps_post_statistics.ipynb
###Markdown Sheet 1 - all runs ###Code def add_all_runs_sheet(c_df, writer): c_df.to_excel(writer, sheet_name='All runs', index=False) ###Output _____no_output_____ ###Markdown Sheet 2 - only non-outliers from all runs ###Code def add_runs_without_outliers(c_df, writer, outlier_threshold=120): no_outliers_df = c_df[c_df['AR_fps'] >= outlier_threshold] no_outliers_df.to_excel(writer, sheet_name='Runs without outliers', index=False) return no_outliers_df ###Output _____no_output_____ ###Markdown Sheet 3 - only outliers from all runs ###Code def add_outlier_runs(c_df, writer, outlier_threshold=120): with_outliers_df = c_df[c_df['AR_fps'] < outlier_threshold] with_outliers_df.to_excel(writer, sheet_name='Outlier runs', index=False) return with_outliers_df ###Output _____no_output_____ ###Markdown Sheet 4 - add statistics (min, max, stddev, etc.) for FPS (runs without outliers) ###Code def add_statistics(c_df, writer): sliced_df = c_df[['run', 'AR_fps']].copy() desc_df = sliced_df.describe() median_series = sliced_df.median() fps_stat_df = pd.DataFrame(columns=['Min', 'Max', 'Average', 'Median', 'Std Deviation']) fps_stat_df['Min'] = [desc_df["AR_fps"]["min"]] fps_stat_df['Max'] = [desc_df["AR_fps"]["max"]] fps_stat_df['Average'] = [desc_df["AR_fps"]["mean"]] fps_stat_df['Median'] = [median_series["AR_fps"]] fps_stat_df['Std Deviation'] = [desc_df["AR_fps"]["std"]] # Round to 4 decimals fps_stat_df = fps_stat_df.round(4) # Write the sliced_df to excel sliced_df.to_excel(writer, sheet_name='Statistics', index=False) #get a pointer to the same sheet to write other dfs and text to the same sheet curr_sheet = writer.sheets['Statistics'] # Write text and fps_stat_df #curr_sheet.write(1, 4, "Statistics, # of Frames Delay") curr_sheet['E2'] = "Statistics of FPS values" fps_stat_df.to_excel(writer, startrow=2, startcol=4, sheet_name='Statistics', index=False) return fps_stat_df ###Output _____no_output_____ ###Markdown Sheet 5 - Analyze AR_fps column from all runs ###Code def fps_all_analysis(c_df, writer): curr_row = 0 fps_col_series = c_df['AR_fps'].copy() # Convert the column to dataframe with unique values and their count fps_unique_count_df = fps_col_series.value_counts().sort_index().to_frame() fps_unique_count_df.rename_axis('FPS unique values', inplace=True) fps_unique_count_df.rename(columns={'AR_fps':'count'}) fps_unique_count_df['% of FPS value Distribution'] = round(fps_col_series.value_counts(normalize=True)*100, 2) fps_unique_count_df.sort_index() fps_unique_count_df.to_excel(writer, sheet_name='FPS_Distribution', index=True) # Get current sheet pointer for future writing curr_sheet = writer.sheets['FPS_Distribution'] # Add grand total of runs curr_row = len(fps_unique_count_df) + 2 # update current row val curr_sheet.cell(row=curr_row, column=1).value = 'Grand Total' curr_sheet.cell(row=curr_row, column=2).value = fps_unique_count_df.sum()[0] curr_sheet.cell(row=curr_row, column=3).value = round(fps_unique_count_df['% of FPS value Distribution'].sum()) # Add 3D pie chart image on the excel sheet data = fps_unique_count_df['% of FPS value Distribution'].values.tolist() labels = fps_unique_count_df.index.values.tolist() plt.title("Distribution of Frame Delay values, in %'") patches = plt.pie(data, labels=labels, autopct='%1.1f%%', startangle=120) plt.legend(labels, loc=5) piefile = f"{final_excel_file}_FPS_Distribution.png" plt.savefig(piefile, dpi = 100) img = openpyxl.drawing.image.Image(piefile) img.anchor = 'G4' curr_sheet.add_image(img) plt.close('all') print(f"Saved pie chart: {piefile}") ###Output _____no_output_____ ###Markdown MAIN ###Code #main if is_interactive(): input_excel = 'input/consolidation_result_ARGlass_TypeA.xlsx' else: input_excel = sys.argv[1] # get the name of input excel file, discard the extension input_excel_name, _ = os.path.splitext(os.path.basename(input_excel)) # Create output prerequisites. #1. check if output dir exists, if not create output_dir = 'output' if not os.path.isdir(output_dir): os.mkdir(output_dir) # Create output file name output_file_name = f'{input_excel_name}_post_analysis.xlsx' # Create output file path final_excel_file = os.path.join(output_dir,output_file_name) # Create ExcelWriter object to populate output excel file writer = pd.ExcelWriter(final_excel_file, engine='openpyxl') print(f"*** Working on folder: {input_excel} ***") # Get the input excel sheet into a dataframe c_df = pd.read_excel(input_excel, 0, index_col=None) ##### Add required sheets ####### # Sheet 1 - all runs print("Working on Sheet 1 - All runs") add_all_runs_sheet(c_df, writer) print(f"Total runs: {len(c_df)} ") print("DONE!\n") # Sheet 2 - only non-outliers from all runs print("Working on Sheet 2 - only non-outliers from all runs") no_outliers_df = add_runs_without_outliers(c_df, writer) print(f"Total non-outlier runs: {len(no_outliers_df)} ") print("DONE!\n") # Sheet 3 - only outliers from all runs print("Working on Sheet 3 - only ourtliers from all runs") with_outliers_df = add_outlier_runs(c_df, writer) print(f"Total outlier runs: {len(with_outliers_df)} ") print("DONE!\n") # Sheet 4 - add statistics (min, max, stddev, etc.) for frame delay (runs without outliers) print("Working on Sheet 4 - add statistics (min, max, stddev, etc.) for 'AR_fps' values for non-outlier runs") fps_stat_df = add_statistics(no_outliers_df, writer) print("DONE!\n") # Sheet 5 - Analyze frame delay column from all runs print("Working on Sheet 5 - Analyze frame delay column from all runs") fps_all_analysis(c_df, writer) print("DONE!\n") # Final step. Save the Excel writer object and close it print(f"Consolidating all sheets in final Excel: {final_excel_file}") writer.save() writer.close() print("DONE!") ###Output _____no_output_____
seminars/06/06_naive_bayes_cs_template.ipynb
###Markdown Naive Bayes V tomto notebooku se budeme zabývat využitím klasifikace pomocí Naivního Bayese. Speciálně se budeme soustředit na klasifikaci textů. Základem pro tento dokument je tutorial ze scikit-learn zaměřený na analýzu textů [zde](https://scikit-learn.org/stable/tutorial/text_analytics/working_with_text_data.html). ###Code import pandas as pd import numpy as np from scipy.special import logsumexp from sklearn.naive_bayes import BernoulliNB, MultinomialNB from sklearn.datasets import fetch_20newsgroups from sklearn.feature_extraction.text import CountVectorizer from sklearn.pipeline import Pipeline import matplotlib.pyplot as plt import matplotlib %matplotlib inline np.set_printoptions(precision=5, suppress=True) # suppress scientific float notation (so 0.000 is printed as 0.) ###Output _____no_output_____ ###Markdown Načtení datVyužijeme data ze zdroje [20 Newsgroups](http://qwone.com/~jason/20Newsgroups/), který obsahuje různě kategorizované texty z internetových diskusí.Pro jednoduchost se zaměříme pouze na dvě kategorie - hokej a auta. ###Code categories = ['rec.sport.hockey', 'rec.autos'] train_data = fetch_20newsgroups(subset='train', categories=categories, shuffle=True, random_state=1) test_data = fetch_20newsgroups(subset='test', categories=categories, shuffle=True, random_state=1) # Prozkoumání testovacích dat print('Kategorie:', test_data.target_names) print('Train data length:', len(train_data.data)) # print(train_data.data[0]) print('Kategorie prvního dokumentu:',train_data.target_names[train_data.target[0]]) ###Output Kategorie: ['rec.autos', 'rec.sport.hockey'] Train data length: 1194 Kategorie prvního dokumentu: rec.autos ###Markdown Transformace do bag-of-words reprezentacePoužijeme k tomu CountVectorizer ze scikit-learn. ###Code # Nejprve načteme slovník with open('vocabulary.txt','r') as f: vocab=f.read().splitlines() print(len(vocab)) count_vect = CountVectorizer(vocabulary = vocab) X_train_counts = count_vect.fit_transform(train_data.data) print('Bag of words shape', X_train_counts.shape) print('Bag of words type', type(X_train_counts)) ###Output Bag of words shape (1194, 61188) Bag of words type <class 'scipy.sparse.csr.csr_matrix'> ###Markdown Výstupem je scipy.sparse matrix. ###Code # zobrazení prvního řádku - tj.příznaků prvního dokumentu X_train_counts[0,:20].toarray() ###Output _____no_output_____ ###Markdown We can also extract the vocabulary... ###Code # several words from the dictionary together with their indices in the dictionary print(type(count_vect.vocabulary_)) print(len(count_vect.vocabulary_)) print(len(vocab)) print({vocab[i]:X_train_counts[0,i] for i in range(20)}) ###Output <class 'dict'> 61188 61188 {'archive': 0, 'name': 0, 'atheism': 0, 'resources': 0, 'alt': 0, 'last': 0, 'modified': 0, 'december': 0, 'version': 0, 'atheist': 0, 'addresses': 0, 'of': 6, 'organizations': 0, 'usa': 0, 'freedom': 0, 'from': 2, 'religion': 0, 'foundation': 0, 'darwin': 0, 'fish': 0} ###Markdown TASK 1 - aplikujte nejprve jednoduchý model - Bernoulli Naive Bayes* Reprezentujte dokument pomocí indikátorů výskytů slov ze slovníku vocab* Natrénujte Naivního Bayese s Bernoulliho rozdělením příznaků* Otestujte kvalitu predikce na ručně určených dokumentech* Odhadněte přesnost (acccuracy) predikce s využitím testovací množiny test_data ###Code # Vlastní dokumenty pro testování docs_new = ["Lets play hockey.", "I don't like their seats"] # alternatives to play with # docs_new = ["Lets play hockey.", "I don't like their game"] # Your code here ###Output _____no_output_____ ###Markdown TASK 2 - aplikujte složitější model - Multinomial Naive Bayes* Reprezentujte dokument pomocí počtů výskytů slov ze slovníku vocab - tj. bag-of-words reprezentace* Natrénujte multinomického naivního Bayese* Otestujte kvalitu predikce na ručně určených dokumentech* Odhadněte přesnost (acccuracy) predikce s využitím testovací množiny test_data ###Code # Your code here ###Output _____no_output_____ ###Markdown Task 3 - Implementujte Naivní Bayesův klasifikátor v situaci, kdy má část příznaků kategorické a část Bernoulliho rozdělení* První příznak je kategorický (se třemi kategoriemi)* Zbývajících 10 příznaků má Bernoulliho rozdělení**Hint** - kategorický příznak převeďte na 3 indikátorové příznaky a odděleně použijte MultinomialNB.Potom zvlášt odhadněte zbylé Bernoulliho příznaky a na závěr získané pravděpodobnosti pronásobte. Pozor - je třeba v jednom z modelů zafixovat rozdělení $Y$ na rovnoměrné - aby se pravděpodobnosti $P(Y = y)$ nenásobili dvakrát. ###Code # Vytvoření datasetu class_count = 10 X00 = np.random.choice(3, size = (class_count,1), p = [0.4,0.4,0.2]) X01 = np.random.choice(3, size = (class_count,1), p = [0.2,0.5,0.3]) X0 = np.concatenate([X00,X01]) print(X0.shape) X10 = np.random.choice(2, size = (class_count,5), p = [0.4,0.6]) X11 = np.random.choice(2, size = (class_count,5), p = [0.6,0.4]) X1 = np.concatenate([X10,X11]) print(X1.shape) X20 = np.random.choice(2, size = (class_count,5), p = [0.4,0.6]) X21 = np.random.choice(2, size = (class_count,5), p = [0.2,0.8]) X2 = np.concatenate([X20,X21]) print(X2.shape) X = np.concatenate([X0,X1,X2],axis = 1) print(X.shape) Y = np.concatenate([np.ones(class_count-3), np.zeros(class_count+3)]) print(Y) # Your code here ###Output _____no_output_____
ml_fundamentals/.ipynb_checkpoints/04_ml_feature_selection-checkpoint.ipynb
###Markdown Feature Selection TechniquesWhat is? ###Code # import data gdmobile = 'https://drive.google.com/uc?export=download&id='+'https://drive.google.com/file/d/175nU7CtUXmKQ7T75pZwcsOQZcKLa959v/view?usp=drivesdk'.split('/')[-2] import pandas as pd df = pd.read_csv(gdmobile) df.head() ###Output _____no_output_____ ###Markdown Univariate Selection ###Code x = df.iloc[:,:-1] y = df['price_range'] from sklearn.feature_selection import SelectKBest from sklearn.feature_selection import chi2 # Apply KBest Algorithm ordered_rank_features = SelectKBest(score_func=chi2,k=20) ordered_feature=ordered_rank_features.fit(x,y) ordered_feature dfscores=pd.DataFrame(ordered_feature.scores_,columns=['Score']) dfcolumns=pd.DataFrame(x.columns) features_rank=pd.concat([dfcolumns,dfscores],axis=1) features_rank.columns=['Features','Score'] features_rank.nlargest(10,'Score') ###Output _____no_output_____ ###Markdown Feature ImportanceThis technique provides a score for each feature in the dataset. Higher the score, more relevant is the data. ###Code from sklearn.ensemble import ExtraTreesClassifier import matplotlib.pyplot as plt %matplotlib inline model = ExtraTreesClassifier() model.fit(x,y) print(model.feature_importances_) ranked_features=pd.Series(model.feature_importances_,index=x.columns) ranked_features.nlargest(10).plot(kind='barh') plt.show() ###Output _____no_output_____ ###Markdown CorrelationCorrelation being negative or positiveIf two 'features' are highly correlated, we can drop one of the features ###Code df.corr() # Can also be done using Seaborn import seaborn as sns corr=df.iloc[:,:-1].corr() top_features=corr.index plt.figure(figsize=(20,20)) sns.heatmap(df[top_features].corr(),annot=True) threshold=0.5 # find and remove the highly correlated features def correlation(dataset,threshold): col_corr=set() # Set of all the names of correlated columns corr_matrix = dataset.corr() for i in range(len(corr_matrix.columns)): for j in range(i): if abs(corr_matrix.iloc[i,j]) > threshold: # we are interested in absolute coeff value colname = corr_matrix.columns[i] # getting the name of column col_corr.add(colname) return col_corr correlation(df.iloc[:,:-1],threshold) ###Output _____no_output_____
NYC Taxi 3 - Simple Featuretools.ipynb
###Markdown A Featuretools Baseline The following tutorial illustrates a featuretools baseline model for the NYC Taxi Trip Duration competition on Kaggle. This notebook follows the structure of the previous worksheet, but uses deep feature synthesis to create the model.Step 1: Download raw data As always, if you have not yet downloaded the data it can be found at the Kaggle website. After installing featuretools following the instructions in the documentation you can run the following. ###Code import pandas as pd import numpy as np import featuretools as ft import taxi_utils ft.__version__ TRAIN_DIR = "data/train.csv" TEST_DIR = "data/test.csv" data_train, data_test = taxi_utils.read_data(TRAIN_DIR, TEST_DIR) data_train.head(5) ###Output _____no_output_____ ###Markdown Step 2: Prepare the Data Let's create another column to define test and train datasets. ###Code data_train['test_data'] = False data_test['test_data'] = True ###Output _____no_output_____ ###Markdown We can now combine the data. ###Code data = pd.concat([data_train, data_test], sort=True) ###Output _____no_output_____ ###Markdown Step 3: Create baseline features using DFS Instead of manually creating features, such as month of pickup_datetime, we can let featuretools come up with them. Within featuretools there is a standard format for representing data that is used to set up predictions and build features. A EntitySet stores information about entities (database table), variables (columns in database tables), relationships, and the data itself. First, we create the EntitySet. ###Code es = ft.EntitySet("taxi") ###Output _____no_output_____ ###Markdown We can then use the `entity_from_dataframe` method to add an Entity called trips. We want to track the `id`, the `time_index` and specify other types of variables we care about in this entity. As a note: Featuretools will try to interpret the types of variables. We can override this interpretation by specifying the types. In this case, I wanted passenger_count to be a type of Ordinal, and vendor_id to be of type Categorical. ###Code from featuretools import variable_types as vtypes trip_variable_types = { 'passenger_count': vtypes.Ordinal, 'vendor_id': vtypes.Categorical, } es.entity_from_dataframe(entity_id="trips", dataframe=data, index="id", time_index='pickup_datetime', variable_types=trip_variable_types) es['trips'].df ###Output _____no_output_____ ###Markdown We can also normalize some of the columns to create new entities. So a vendors entity is created based on the unique values in the vendor_id column in trips. ###Code es.normalize_entity(base_entity_id="trips", new_entity_id="vendors", index="vendor_id") es.normalize_entity(base_entity_id="trips", new_entity_id="passenger_cnt", index="passenger_count") ###Output _____no_output_____ ###Markdown We can specify the time for each instance of the target_entity to calculate features. The timestamp represents the last time data can be used for calculating features by DFS. This is specified using a dataframe of cutoff times. Below we can see that the cutoff time for each trip is the pickup time. ###Code cutoff_time = es['trips'].df[['id', 'pickup_datetime']] es.add_interesting_values() ###Output _____no_output_____ ###Markdown  Visualize EntitySet ###Code es.plot() ###Output _____no_output_____ ###Markdown Given this dataset, we would have about 2 million unique cutoff times. This is a good use case to use the approximate features parameter of DFS. In a large dataset, direct features that are aggregations on the prediction entity may not change much from cutoff time to cutoff time. Calculating the aggregation features at specific times every hour and using it for all cutoff times within the hour would save time and perhaps not lose much information. The approximate parameter in DFS lets you specify a window size to use when approximating these direct aggregation features.We now create features using DFS.Note, we can use an already calculated feature_matrix by doing the following:You must copy and run the code.```pythonfeature_matrix = pd.read_csv('https://s3.amazonaws.com/featuretools-static/nyc_taxi/fm_simple.csv', index_col='id')features = feature_matrix.columns.values``` ###Code trans_primitives = ['Minute', 'Hour', 'Day', 'Week', 'Month', 'Weekday', 'Is_weekend'] feature_matrix, features = ft.dfs(entityset=es, target_entity="trips", trans_primitives=trans_primitives, drop_contains=['trips.test_data'], verbose=True, cutoff_time=cutoff_time, approximate='36d') ###Output Built 96 features Elapsed: 06:20 | Remaining: 00:00 | Progress: 100%|██████████| Calculated: 11/11 chunks ###Markdown Here are the features created. Notice how some of the features match the manually created features in the previous notebook. ###Code print(len(features)) features[:25] ###Output _____no_output_____ ###Markdown Step 3: Build the Model We need to retrieve our labels for the train dataset, so we should merge our current feature matrix with the original dataset. We also get the log of the trip duration so that a more linear relationship can be found. ###Code # separates the whole feature matrix into train data feature matrix, train data labels, and test data feature matrix X_train, labels, X_test = taxi_utils.get_train_test_fm(feature_matrix) labels = np.log(labels.values + 1) model = taxi_utils.train_xgb(X_train, labels) ###Output [0] train-rmse:5.00535 valid-rmse:5.00507 Multiple eval metrics have been passed: 'valid-rmse' will be used for early stopping. Will train until valid-rmse hasn't improved in 60 rounds. [10] train-rmse:1.00739 valid-rmse:1.00793 [20] train-rmse:0.575585 valid-rmse:0.577753 [30] train-rmse:0.5238 valid-rmse:0.527231 [40] train-rmse:0.49523 valid-rmse:0.500017 [50] train-rmse:0.475479 valid-rmse:0.481395 [60] train-rmse:0.463175 valid-rmse:0.470005 [70] train-rmse:0.453555 valid-rmse:0.461199 [80] train-rmse:0.448871 valid-rmse:0.457243 [90] train-rmse:0.445029 valid-rmse:0.453938 [100] train-rmse:0.436242 valid-rmse:0.445635 [110] train-rmse:0.434033 valid-rmse:0.443912 [120] train-rmse:0.425273 valid-rmse:0.435776 [130] train-rmse:0.422681 valid-rmse:0.433628 [140] train-rmse:0.417246 valid-rmse:0.428812 [150] train-rmse:0.415018 valid-rmse:0.427003 [160] train-rmse:0.412853 valid-rmse:0.425193 [170] train-rmse:0.407265 valid-rmse:0.420134 [180] train-rmse:0.401503 valid-rmse:0.414888 [190] train-rmse:0.400381 valid-rmse:0.414263 [200] train-rmse:0.398285 valid-rmse:0.412464 [210] train-rmse:0.39587 valid-rmse:0.410498 [220] train-rmse:0.394325 valid-rmse:0.409322 [226] train-rmse:0.390858 valid-rmse:0.40626 Modeling RMSLE 0.40626 ###Markdown Step 4: Make a Submission ###Code submission = taxi_utils.predict_xgb(model, X_test) submission.head(5) submission.to_csv('trip_duration_ft_simple.csv', index=True, index_label='id') ###Output _____no_output_____ ###Markdown This solution:&nbsp; &nbsp; Received a score of 0.45288 on the Kaggle competition.&nbsp; &nbsp; Placed 685 out of 1257.&nbsp; &nbsp; Beat 45% of competitors on the Kaggle competition.&nbsp; &nbsp; Scored 4% better than the baseline solution&nbsp; &nbsp; Had a modeling RMSLE of 0.40196December 27, 2017.Additional AnalysisLet's look at how important each feature was for the model. ###Code feature_names = X_train.columns.values ft_importances = taxi_utils.feature_importances(model, feature_names) ft_importances ###Output _____no_output_____
Notebook/starter model.ipynb
###Markdown Starter Project Creating a Basic LSTM Classifier using Pytorch Section 1.1 Initialization ###Code #importing libraries import pandas as pd import nltk from nltk.corpus import stopwords import re from wordsegment import segment, load from nltk.tokenize import TweetTokenizer STOPWORDS = set(stopwords.words('english')) ###Output _____no_output_____ ###Markdown Section 1.1.1 Initializing Tokenizer, in case you don't know what a tokenizer is please visit: https://nlp.stanford.edu/IR-book/html/htmledition/tokenization-1.html ###Code # in our model we will be using tweet tokenizer # we have reduced the lenght of the text, and using preserve_case as False we turn all charecters to lower case tknzr = TweetTokenizer(reduce_len=True, preserve_case=False, strip_handles=False) ###Output _____no_output_____ ###Markdown Section 1.1.2 Creating the Pre-Processing Function here we remove unwanted symbols and numbers from our text and change known emoji symbols to their text value ###Code def text_preprocess(text): text = str(text) FLAGS = re.MULTILINE | re.DOTALL eyes = r"[8:=;]" nose = r"['`\-]?" def re_sub(pattern, repl): return re.sub(pattern, repl, text, flags=FLAGS) text = re_sub(r"https?:\/\/\S+\b|www\.(\w+\.)+\S*", "<url>") text = re_sub(r"/"," / ") text = re_sub(r"@\w+", "<user>") text = re_sub(r"{}{}[)dD]+|[)dD]+{}{}".format(eyes, nose, nose, eyes), "<smile>") text = re_sub(r"{}{}p+".format(eyes, nose), "<lolface>") text = re_sub(r"{}{}\(+|\)+{}{}".format(eyes, nose, nose, eyes), "<sadface>") text = re_sub(r"{}{}[\/|l*]".format(eyes, nose), "<neutralface>") text = re_sub(r"<3","<heart>") text = re_sub(r"[-+]?[.\d]*[\d]+[:,.\d]*", "<number>") text = re_sub(r"([!?.]){2,}", r"\1 <repeat>") text = re_sub(r"\b(\S*?)(.)\2{2,}\b", r"\1\2 <elong>") text = " ".join([word for word in str(text).split() if word not in STOPWORDS]) tokens = tknzr.tokenize(text.lower()) return " ".join(tokens) ###Output _____no_output_____ ###Markdown Section 1.1.3 Importing the dataset and pre-processing it We turn the sentiment labels to numerical values for the machine to understand You can download the dataset from [here](https://www.kaggle.com/lakshmi25npathi/imdb-dataset-of-50k-movie-reviews) ###Code # we import the dataset using read_csv function from the pandas library # just replace INPUTPATH with your input path df = pd.read_csv("INPUTPATH.csv") # visualizing the firt 5 rows of our dataset df.head() # encoding sentiment labels to numerical values encode_label = {'negative' : 0, 'positive' : 1} df['sentiment'] = df['sentiment'].map(encode_label) # visualizing the firt 5 rows of our dataset again df.head() # pre-processing the dataset df['review'] = df['review'].apply(text_preprocess) # visualizing the firt 5 rows of our dataset again after pre-processing df.head() ###Output _____no_output_____ ###Markdown Section 1.1.4 Saving the pre-processed dataset ###Code # we use the to_csv function from the pandas library to export our dataframe as a csv file #just replace destination_folder with your outputpath # Trim text and titletext to first_n_words train_test_ratio = 0.10 train_valid_ratio = 0.80 first_n_words = 500 df_raw['review'] = df_raw['review'].apply(trim_string) # Splits dataset according to label df_real = df_raw[df_raw['sentiment'] == 0] df_fake = df_raw[df_raw['sentiment'] == 1] # Train-test split df_real_full_train, df_real_test = train_test_split(df_real, train_size = train_test_ratio, random_state = 1) df_fake_full_train, df_fake_test = train_test_split(df_fake, train_size = train_test_ratio, random_state = 1) # Train-valid split df_real_train, df_real_valid = train_test_split(df_real_full_train, train_size = train_valid_ratio, random_state = 1) df_fake_train, df_fake_valid = train_test_split(df_fake_full_train, train_size = train_valid_ratio, random_state = 1) # Concatenate splits of different labels df_train = pd.concat([df_real_train, df_fake_train], ignore_index=True, sort=False) df_valid = pd.concat([df_real_valid, df_fake_valid], ignore_index=True, sort=False) df_test = pd.concat([df_real_test, df_fake_test], ignore_index=True, sort=False) # Write preprocessed data for train, test and validation df_train.to_csv(destination_folder + '/train.csv', index=False) df_valid.to_csv(destination_folder + '/valid.csv', index=False) df_test.to_csv(destination_folder + '/test.csv', index=False) ###Output _____no_output_____ ###Markdown Section 2.1 Training the Starter Model ###Code # importing important libraries needed for this section import matplotlib.pyplot as plt import pandas as pd import torch # Preliminaries from torchtext.data import Field, TabularDataset, BucketIterator # Models import torch.nn as nn from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence # Training import torch.optim as optim # Evaluation from sklearn.metrics import accuracy_score, classification_report, confusion_matrix import seaborn as sns #Sets device as GPU or CPU for training depending on your local machine device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') ###Output _____no_output_____ ###Markdown Section 2.1.1 Loading the pre-processed dataset and creating Training, Testing and Validation SetsWe import the csv file we have saved above and create the iterators for Train set, Validation set and Test set. ###Code # Define columns to read. #text_field will contain our text from the movie reviews text_field = Field(lower=True, include_lengths=True, batch_first=True) #label_field will contain the labels respective to each review label_field = Field(sequential=False, use_vocab=False, batch_first=True, dtype=torch.float) #final list containing the labels and the text fields = [('sentiment', label_field), ('review', text_field)] #we use TabularDataset to create a Dataset for training, validation and testing purpose, the path of each file is passed as the argument train, valid, test = TabularDataset.splits(path=destination_folder, train='train.csv', validation='valid.csv', test='test.csv', format='CSV', fields=fields, skip_header=True) #BucketIterator is used to create iterations of the complete sets in the defined batch sizes #Batch size is the number of examples after which the weights of the network are updated #A smaller batch size means the weights of the network are updated more freqeuntly and a large batch size would mean less updates train_iter = BucketIterator(train, batch_size=64, sort_key=lambda x: len(x.review), device=device, sort=True, sort_within_batch=True) valid_iter = BucketIterator(valid, batch_size=64, sort_key=lambda x: len(x.review), device=device, sort=True, sort_within_batch=True) test_iter = BucketIterator(test, batch_size=64, sort_key=lambda x: len(x.review), device=device, sort=True, sort_within_batch=True) # Vocabulary #this creates the vocabulary for our model, any word that occurs more than three times is added to our vocaublary file text_field.build_vocab(train, min_freq=3) ###Output _____no_output_____ ###Markdown Section 2.1.2 Creating the model architectureWe have defined a simple LSTM model having 128 units, with a bidirecational layer. This is followed be a Dropout layer which drops off random words from training sentences. In the end we have a linear layer, where input units are set as 2 * output units of LSTM layer ###Code class LSTM(nn.Module): def __init__(self, dimension=128): super(LSTM, self).__init__() self.embedding = nn.Embedding(len(text_field.vocab), 500) self.dimension = dimension self.lstm = nn.LSTM(input_size=500, hidden_size=dimension, num_layers=1, batch_first=True, bidirectional=True) self.drop = nn.Dropout(p=0.3) self.fc = nn.Linear(2*dimension, 1) def forward(self, text, text_len): text_emb = self.embedding(text) packed_input = pack_padded_sequence(text_emb, text_len, batch_first=True, enforce_sorted=False) packed_output, _ = self.lstm(packed_input) output, _ = pad_packed_sequence(packed_output, batch_first=True) out_forward = output[range(len(output)), text_len - 1, :self.dimension] out_reverse = output[:, 0, self.dimension:] out_reduced = torch.cat((out_forward, out_reverse), 1) text_fea = self.drop(out_reduced) text_fea = self.fc(text_fea) text_fea = torch.squeeze(text_fea, 1) text_out = torch.sigmoid(text_fea) return text_out ###Output _____no_output_____ ###Markdown Section 2.1.3 Saving the modelWe use the save state dictionary method to save a model, in this case, a dictionary is created where the keys are the layer names and the value pairs contain the weight matrix for the layer. While using this method, each time we want to load a model, we need to define a copy of the original model which will hold the loaded weights. ###Code def save_checkpoint(save_path, model, optimizer, valid_loss): if save_path == None: return state_dict = {'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict(), 'valid_loss': valid_loss} torch.save(state_dict, save_path) print(f'Model saved to ==> {save_path}') def load_checkpoint(load_path, model, optimizer): if load_path==None: return state_dict = torch.load(load_path, map_location=device) print(f'Model loaded from <== {load_path}') model.load_state_dict(state_dict['model_state_dict']) optimizer.load_state_dict(state_dict['optimizer_state_dict']) return state_dict['valid_loss'] def save_metrics(save_path, train_loss_list, valid_loss_list, global_steps_list): if save_path == None: return state_dict = {'train_loss_list': train_loss_list, 'valid_loss_list': valid_loss_list, 'global_steps_list': global_steps_list} torch.save(state_dict, save_path) print(f'Model saved to ==> {save_path}') def load_metrics(load_path): if load_path==None: return state_dict = torch.load(load_path, map_location=device) print(f'Model loaded from <== {load_path}') return state_dict['train_loss_list'], state_dict['valid_loss_list'], state_dict['global_steps_list'] ###Output _____no_output_____ ###Markdown Section 2.1.4 Training the ModelIn this section we add the training part of our model, that trains it on the training set ###Code def train(model, optimizer, criterion = nn.BCELoss(), train_loader = train_iter, valid_loader = valid_iter, num_epochs = 5, eval_every = len(train_iter) // 2, file_path = destination_folder, best_valid_loss = float("Inf")): # initialize running values running_loss = 0.0 valid_running_loss = 0.0 global_step = 0 train_loss_list = [] valid_loss_list = [] global_steps_list = [] # training loop model.train() for epoch in range(num_epochs): for (labels, (title, title_len)), _ in train_loader: labels = labels.to(device) title = title.to(device) title_len = title_len.to(device) output = model(title, title_len) loss = criterion(output, labels) optimizer.zero_grad() loss.backward() optimizer.step() # update running values running_loss += loss.item() global_step += 1 # evaluation step if global_step % eval_every == 0: model.eval() with torch.no_grad(): # validation loop for (labels, (title, title_len)), _ in valid_loader: labels = labels.to(device) title = title.to(device) title_len = title_len.to(device) output = model(title, title_len) loss = criterion(output, labels) valid_running_loss += loss.item() # evaluation average_train_loss = running_loss / eval_every average_valid_loss = valid_running_loss / len(valid_loader) train_loss_list.append(average_train_loss) valid_loss_list.append(average_valid_loss) global_steps_list.append(global_step) # resetting running values running_loss = 0.0 valid_running_loss = 0.0 model.train() # print progress print('Epoch [{}/{}], Step [{}/{}], Train Loss: {:.4f}, Valid Loss: {:.4f}' .format(epoch+1, num_epochs, global_step, num_epochs*len(train_loader), average_train_loss, average_valid_loss)) # checkpoint if best_valid_loss > average_valid_loss: best_valid_loss = average_valid_loss save_checkpoint(file_path + '\model.pt', model, optimizer, best_valid_loss) save_metrics(file_path + '\metrics.pt', train_loss_list, valid_loss_list, global_steps_list) save_metrics(file_path + '\metrics.pt', train_loss_list, valid_loss_list, global_steps_list) print('Finished Training!') model = LSTM().to(device) pretrainedPath = torch.load('modelFinal.pth') model.load_state_dict(pretrainedPath, strict=False) optimizer = optim.Adam(model.parameters(), lr=0.00001) train(model=model, optimizer=optimizer, num_epochs=12) ###Output _____no_output_____ ###Markdown Section 2.1.4 Testing the ModelIn this section we add the testing part of our model, that tests it on the testing set ###Code def evaluate(model, test_loader, version='title', threshold=0.5): y_pred = [] y_true = [] model.eval() with torch.no_grad(): for (labels, (title, title_len)), _ in test_loader: labels = labels.to(device) title = title.to(device) title_len = title_len.to(device) output = model(title, title_len) output = (output > threshold).int() y_pred.extend(output.tolist()) y_true.extend(labels.tolist()) print('Classification Report:') print(classification_report(y_true, y_pred, labels=[1,0], digits=4)) cm = confusion_matrix(y_true, y_pred, labels=[1,0]) ax= plt.subplot() sns.heatmap(cm, annot=True, ax = ax, cmap='Blues', fmt="d") ax.set_title('Confusion Matrix') ax.set_xlabel('Predicted Labels') ax.set_ylabel('True Labels') ax.xaxis.set_ticklabels(['FAKE', 'REAL']) ax.yaxis.set_ticklabels(['FAKE', 'REAL']) best_model = LSTM().to(device) optimizer = optim.Adam(best_model.parameters(), lr=0.001) load_checkpoint(destination_folder + '\model.pt', best_model, optimizer) evaluate(best_model, test_iter) #final save for the model torch.save(model.state_dict(), "modelFinalTransfer.pth") ###Output _____no_output_____
Recommendation_system_assignment_final_.ipynb
###Markdown SGD Algorithm to predict movie ratings **There will be some functions that start with the word "grader" ex: grader_matrix(), grader_mean(), grader_dim() etc, you should not change those function definition.Every Grader function has to return True.** 1. Download the data from here 2. The data will be of this format, each data point is represented as a triplet of user_id, movie_id and rating user_idmovie_idrating772363471208564140143129845850452357275 Task 1 Predict the rating for a given (user_id, movie_id) pair Predicted rating $\hat{y}_{ij}$ for user i, movied j pair is calcuated as $\hat{y}_{ij} = \mu + b_i + c_j + u_i^T v_j$ , here we will be finding the best values of $b_{i}$ and $c_{j}$ using SGD algorithm with the optimization problem for N users and M movies is defined as $$L = \min_{ b, c, \{ u_i \}_{i=1}^N, \{ v_j \}_{j=1}^M}\quad\alpha \Big( \sum_{j} \sum_{k} v_{jk}^2 + \sum_{i} \sum_{k} u_{ik}^2 + \sum_{i} b_i^2 + \sum_{j} c_i^2 \Big)+ \sum_{i,j \in \mathcal{I}^{\text{train}}} (y_{ij} - \mu - b_i - c_j - u_i^T v_j)^2$$ \(\mu\) : scalar mean rating\(b_i\) : scalar bias term for user \(i\)\(c_j\) : scalar bias term for movie \(j\)\(u_i\) : K-dimensional vector for user \(i\)\(v_j\) : K-dimensional vector for movie \(j\) $ \ $ *. We will be giving you some functions, please write code in that functions only.*. After every function, we will be giving you expected output, please make sure that you get that output. 1. Construct adjacency matrix with the given data, assuming its graph and the weight of each edge is the rating given by user to the movie you can construct this matrix like $A[i][j]=r_{ij}$ here $i$ is user_id, $j$ is movie_id and $r_{ij}$ is rating given by user $i$ to the movie $j$ Hint : you can create adjacency matrix using csr_matrix2. We will Apply SVD decomposition on the Adjaceny matrix link1, link2 and get three matrices $U, \sum, V$ such that $U \times \sum \times V^T = A$, if $A$ is of dimensions $N \times M$ then U is of $N \times k$, $\sum$ is of $k \times k$ and $V$ is $M \times k$ dimensions. *. So the matrix $U$ can be represented as matrix representation of users, where each row $u_{i}$ represents a k-dimensional vector for a user *. So the matrix $V$ can be represented as matrix representation of movies, where each row $v_{j}$ represents a k-dimensional vector for a movie.3. Compute $\mu$ , $\mu$ represents the mean of all the rating given in the dataset.(write your code in def m_u())4. For each unique user initilize a bias value $B_{i}$ to zero, so if we have $N$ users $B$ will be a $N$ dimensional vector, the $i^{th}$ value of the $B$ will corresponds to the bias term for $i^{th}$ user (write your code in def initialize())5. For each unique movie initilize a bias value $C_{j}$ zero, so if we have $M$ movies $C$ will be a $M$ dimensional vector, the $j^{th}$ value of the $C$ will corresponds to the bias term for $j^{th}$ movie (write your code in def initialize())6. Compute dL/db_i (Write you code in def derivative_db())7. Compute dL/dc_j(write your code in def derivative_dc()8. Print the mean squared error with predicted ratings.for each epoch: for each pair of (user, movie): b_i = b_i - learning_rate * dL/db_i c_j = c_j - learning_rate * dL/dc_jpredict the ratings with formula$\hat{y}_{ij} = \mu + b_i + c_j + \text{dot_product}(u_i , v_j) $9. you can choose any learning rate and regularization term in the range $10^{-3} \text{ to } 10^2$ 10. __bonus__: instead of using SVD decomposition you can learn the vectors $u_i$, $v_j$ with the help of SGD algo similar to $b_i$ and $c_j$ ###Code import warnings warnings.filterwarnings("ignore") ###Output _____no_output_____ ###Markdown Task 2 As we know U is the learned matrix of user vectors, with its i-th row as the vector ui for user i. Each row of U can be seen as a "feature vector" for a particular user.The question we'd like to investigate is this: do our computed per-user features that are optimized for predicting movie ratings contain anything to do with gender?The provided data file user_info.csv contains an is_male column indicating which users in the dataset are male. Can you predict this signal given the features U?> __Note 1__ : there is no train test split in the data, the goal of this assignment is to give an intution about how to do matrix factorization with the help of SGD and application of truncated SVD. for better understanding of the collabarative fillerting please check netflix case study. > __Note 2__ : Check if scaling of $U$, $V$ matrices improve the metric Reading the csv file ###Code from sklearn.metrics import confusion_matrix from sklearn.metrics import plot_confusion_matrix from sklearn import tree from google.colab import files import io import pandas as pd uploaded = files.upload () data=pd.read_csv(io.BytesIO(uploaded['ratings_train.csv'])) data.head() data.shape ###Output _____no_output_____ ###Markdown Create your adjacency matrix ###Code users =data['user_id'].unique() len(users) items = data['item_id'].unique() len(items) ratings = data['rating'] ratings.shape #csr_matrix((data, (row_ind, col_ind)), [shape=(M, N)]) from scipy.sparse import csr_matrix adjacency_matrix = csr_matrix((data['rating'],(data['user_id'],data['item_id'])))# write your code of adjacency matrix here adjacency_matrix.shape adj = adjacency_matrix.toarray() adj[0][0] ###Output _____no_output_____ ###Markdown Grader function - 1 ###Code def grader_matrix(matrix): assert(matrix.shape==(943,1681)) return True grader_matrix(adjacency_matrix) ###Output _____no_output_____ ###Markdown **The unique items in the given csv file are 1662 only . But the id's vary from 0-1681 but they are not continuous and hence you'll get matrix of size 943x1681.** SVD decompostion Sample code for SVD decompostion ###Code from sklearn.utils.extmath import randomized_svd import numpy as np matrix = np.random.random((20, 10)) U, Sigma, VT = randomized_svd(matrix, n_components=5,n_iter=5, random_state=None) print(U.shape) print(Sigma.shape) print(VT.T.shape) ###Output (20, 5) (5,) (10, 5) ###Markdown Write your code for SVD decompostion ###Code # Please use adjacency_matrix as matrix for SVD decompostion # You can choose n_components as your choice from sklearn.utils.extmath import randomized_svd import numpy as np U, Sigma, VT = randomized_svd(adjacency_matrix, n_components=50,n_iter=5, random_state=None) print(U.shape) print(Sigma.shape) print(VT.T.shape) ###Output (943, 50) (50,) (1681, 50) ###Markdown Compute mean of ratings ###Code def m_u(ratings): '''In this function, we will compute mean for all the ratings''' # you can use mean() function to do this # check this (https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.mean.html) link for more details. Mean = np.round(np.mean(ratings),3) return Mean mu=m_u(data['rating']) print(mu) ###Output 3.529 ###Markdown Grader function -2 ###Code def grader_mean(mu): assert(np.round(mu,3)==3.529) return True mu=m_u(data['rating']) grader_mean(mu) ###Output _____no_output_____ ###Markdown Initialize $B_{i}$ and $C_{j}$ Hint : Number of rows of adjacent matrix corresponds to user dimensions($B_{i}$), number of columns of adjacent matrix corresponds to movie dimensions ($C_{j}$) ###Code def initialize(dim): '''In this function, we will initialize bias value 'B' and 'C'.''' # initalize the value to zeros # return output as a list of zeros Bi = np.zeros(dim) return Bi dim=adjacency_matrix.shape[0]# give the number of dimensions for b_i (Here b_i corresponds to users) b_i=initialize(dim) b_i.sum() dim= adjacency_matrix.shape[1]# give the number of dimensions for c_j (Here c_j corresponds to movies) c_j=initialize(dim) c_j.sum() ###Output _____no_output_____ ###Markdown Grader function -3 ###Code def grader_dim(b_i,c_j): assert(len(b_i)==943 and np.sum(b_i)==0) assert(len(c_j)==1681 and np.sum(c_j)==0) return True grader_dim(b_i,c_j) ###Output _____no_output_____ ###Markdown Compute dL/db_i ###Code data['rating'].shape def derivative_db(user_id,item_id,rating,U,V,mu,alpha,b,c): '''In this function, we will compute dL/db_i''' alpha = 0.01 p1 = 2*b-2*(rating-mu-b-c-np.dot(U[user_id],V[:,item_id])) #print(mu+np.sum(np.dot(U,V))) return p1 ###Output _____no_output_____ ###Markdown Grader function -4 ###Code def grader_db(value): assert(np.round(value,3)==-0.931) return True U1, Sigma, V1 = randomized_svd(adjacency_matrix, n_components=2,n_iter=5, random_state=24) # Please don't change random state # Here we are considering n_componets = 2 for our convinence alpha=0.01 b = 0 c = 0 value=derivative_db(312,98,4,U1,V1,mu,alpha,b,c) print(np.round(value,3)) grader_db(value) ###Output -0.932 ###Markdown Compute dL/dc_j ###Code def derivative_dc(user_id,item_id,rating,U,V,mu,alpha,b,c): '''In this function, we will compute dL/dc_j''' alpha = 0.01 p1 = 2*c-2*(rating-mu-b-c-np.dot(U[user_id],V[:,item_id])) return p1 ###Output _____no_output_____ ###Markdown Grader function - 5 ###Code def grader_dc(value): assert(np.round(value,3)==-2.929) return True U1, Sigma, V1 = randomized_svd(adjacency_matrix, n_components=2,n_iter=5, random_state=24) # Please don't change random state # Here we are considering n_componets = 2 for our convinence r=0.01 value=derivative_dc(58,504,5,U1,V1,mu,alpha,b,c) print(round(value,3)) grader_dc(value) #print(value) U1.shape ###Output -2.93 ###Markdown Compute MSE (mean squared error) for predicted ratings for each epoch, print the MSE value for each epoch: for each pair of (user, movie): b_i = b_i - learning_rate * dL/db_i c_j = c_j - learning_rate * dL/dc_jpredict the ratings with formula$\hat{y}_{ij} = \mu + b_i + c_j + \text{dot_product}(u_i , v_j) $ ###Code adj = adjacency_matrix.toarray() from tqdm import tqdm Y = [] alpha = 0.01 i = 50 MSE = [] learning_rate = 0.01 mu=m_u(data['rating']) y_ = np.zeros((943,1681)) b_i_new = b_i c_j_new = c_j mse = 0 count = 0 while(i>0): mse = 0 for j in range(len(users)): for k in range(len(items)): u = users[j] m = items[k] if adj[j][k] != 0: db = derivative_db(u,m,adj[j][k],U,VT,mu,alpha,b_i[j],c_j[k]) # finding db dc = derivative_dc(u,m,adj[j][k],U,VT,mu,alpha,b_i[j],c_j[k]) #finding dc b_i_new[j] = b_i[j] - learning_rate*db c_j_new[k] = c_j[k] - learning_rate*dc y_[i][j] = b_i[j]+c_j[k]+mu+np.dot(U[users[j]],VT[:,items[k]]) b_i[j] = b_i_new[j] c_j[k] = c_j_new[k] mse+=(adj[j][k]-(b_i_new[j]+c_j_new[k]+mu+np.dot(U[users[j]],VT[:,items[k]])))**2 #finding mean squared error MSE.append(mse/89992) i-=1 MSE ###Output _____no_output_____ ###Markdown Plot epoch number vs MSE * epoch number on X-axis* MSE on Y-axis ###Code import matplotlib.pyplot as plt plt.plot(np.arange(1,51),MSE) plt.xlabel('epoch') plt.ylabel('MSE') plt.title('MSE v/s epoches') ###Output _____no_output_____ ###Markdown Task 2 - For this task you have to consider the user_matrix U and the user_info.csv file.- You have to consider is_male columns as output features and rest as input features. Now you have to fit a model by posing this problem as binary classification task.- You can apply any model like Logistic regression or Decision tree and check the performance of the model. - Do plot confusion matrix after fitting your model and write your observations how your model is performing in this task.- Optional work- You can try scaling your U matrix.Scaling means changing the values of n_componenets while performing svd and then check your results. ###Code U data1 = pd.read_csv('user_info.csv.txt') data1.shape target = data1['is_male'] target data1 = data1.drop(['is_male'],axis = 1) data2 = pd.DataFrame(U) data1.shape data2.shape data = pd.concat([data1,data2],axis = 1) data data.isna().sum() data.duplicated().sum() target.value_counts() from sklearn.model_selection import train_test_split X_train , X_test ,y_train, y_test = train_test_split(data,target,test_size = 0.33,random_state = 10) print(X_train.shape,y_train.shape) print(X_test.shape,y_test.shape) from sklearn.model_selection import RandomizedSearchCV from sklearn.linear_model import LogisticRegression Logistic = LogisticRegression(random_state = 10) param = [{'C': [10**-4, 10**-2, 10**0, 10**2, 10**4]}] model = RandomizedSearchCV(Logistic,param,random_state = 10) model.fit(X_train,y_train) model.best_estimator_ model1 = LogisticRegression(C = 10000,random_state=10) model1.fit(X_train,y_train) y_pred = model1.predict(X_test) plot_confusion_matrix(model1,X_test,y_test) plt.show() roc_auc_score(y_test, model1.predict_proba(X_test)[:, 1]) ###Output _____no_output_____
Maintenance/Prediction of IOT system failures based on sensor data/notebook.ipynb
###Markdown Prediction of fire protection system failures based on sensor data Business Understanding With the help of the Inveron hazard management system, the plants already equipped with sensors can be be monitored and controlled manually, for example from a control station. from a control station. Inveron is a software that bundles and visualizes all sensor data from the fire protection systems and visualizes them. In addition, with the help of Inveron, test alarms can be triggered or alarms can be reset. The Inveron hazard management system already processes a lot of sensor data that can be used in the local system environment, for example thesecurity center of an industrial company. Specificmaintenance intervals tell Minimax service technicians when a system should be serviced. maintenance. In addition to monitoring its own sensor data, Inveron can also monitor third-party data. monitor. For example, connected video cameras can be used for fire detection. can be used for fire detection. Also burglar alarm systems, systems for fence monitoring or gate control systems. Open interfaces (OPC, Modbus, Profibus, BAC-net) allow long-term usability when individual components are exchange of individual components. 2.1. Import of Relevant Modules ###Code import pandas as pd import numpy as np import matplotlib.pyplot as plt # Setting seed for reproducability np.random.seed(9876) PYTHONHASHSEED = 0 from sklearn import preprocessing from sklearn.metrics import confusion_matrix, recall_score, precision_score from keras.models import Sequential from keras.layers import Dense, Dropout, LSTM, Activation %matplotlib inline ###Output _____no_output_____ ###Markdown 2.2. load Data ###Code #load training data train_df = pd.read_csv('https://storage.googleapis.com/ml-service-repository-datastorage/Prediction_of_IOT_system_failures_based_on_sensor_data_PM_train.txt', sep=" ", header=None) train_df.drop(train_df.columns[[26, 27]], axis=1, inplace=True) train_df.columns = ['id', 'zyklus', 'setting1', 'setting2', 'setting3', 's1', 's2', 's3', 's4', 's5', 's6', 's7', 's8', 's9', 's10', 's11', 's12', 's13', 's14', 's15', 's16', 's17', 's18', 's19', 's20', 's21'] # load testdata test_df = pd.read_csv('https://storage.googleapis.com/ml-service-repository-datastorage/Prediction_of_IOT_system_failures_based_on_sensor_data_PM_test.txt', sep=" ", header=None) test_df.drop(test_df.columns[[26, 27]], axis=1, inplace=True) test_df.columns = ['id', 'zyklus', 'setting1', 'setting2', 'setting3', 's1', 's2', 's3', 's4', 's5', 's6', 's7', 's8', 's9', 's10', 's11', 's12', 's13', 's14', 's15', 's16', 's17', 's18', 's19', 's20', 's21'] # load truth data truth_df = pd.read_csv('https://storage.googleapis.com/ml-service-repository-datastorage/Prediction_of_IOT_system_failures_based_on_sensor_data_PM_truth.txt', sep=" ", header=None) truth_df.drop(truth_df.columns[[1]], axis=1, inplace=True) ###Output _____no_output_____ ###Markdown 2.4. Descriptive Analytics ###Code def attribute_description(data): longestColumnName = len(max(np.array(data.columns), key=len)) print("| Feature | Data Type|") print("|-----|------|") for col in data.columns: description = '' col_dropna = data[col].dropna() example = col_dropna.sample(1).values[0] if type(example) == str: description = 'str ' if len(col_dropna.unique()) < 10: description += '{' description += '; '.join([ f'"{name}"' for name in col_dropna.unique()]) description += '}' else: description += '[ example: "'+ example + '" ]' elif (type(example) == np.int32) and (len(col_dropna.unique()) < 10) : description += 'dummy int32 {' description += '; '.join([ f'{name}' for name in sorted(col_dropna.unique())]) description += '}' else: try: description = example.dtype except: description = type(example) print("|" + col.ljust(longestColumnName)+ f'| {description} |') attribute_description(train_df) train_df = train_df.sort_values(['id','zyklus']) train_df.head() ###Output _____no_output_____ ###Markdown 3. Data Preparation 3.1 check for null values ###Code print(train_df.isna().sum()) print(test_df.isna().sum()) ###Output id 0 zyklus 0 setting1 0 setting2 0 setting3 0 s1 0 s2 0 s3 0 s4 0 s5 0 s6 0 s7 0 s8 0 s9 0 s10 0 s11 0 s12 0 s13 0 s14 0 s15 0 s16 0 s17 0 s18 0 s19 0 s20 0 s21 0 dtype: int64 ###Markdown There are no Null values there is no missing data 2.1 calculate the remaining days until failureIm ersten Schritt haben wir nun die Daten eingeladen und werden den Trainingsdatensatz nun mit einer weiteren Spalte versehen, nämlich der verbleibenden Zyklen bis zum Austausch des Feuermelders bzw. Bauteils. Im Testdatensatz wird eine weitere Spalten für die binäre Unterscheidung bzw. Klassifikation angelegt. Herauszufinden ist, ob ein spezifischer Feuermelder (ID) innerhalb w1-Zyklen ausfällt, weshalb hier für w1 ein fiktiver Wert von 1 gewählt wird (1 Tag). ###Code remaining_days_until_F = pd.DataFrame(train_df.groupby('id')['zyklus'].max()).reset_index() remaining_days_until_F.columns = ['id', 'max'] train_df = train_df.merge(remaining_days_until_F, on=['id'], how='left') train_df['remaining_days_until_Failure'] = train_df['max'] - train_df['zyklus'] train_df.drop('max', axis=1, inplace=True) train_df.head() w1 = 20 w0 = 10 train_df['label1'] = np.where(train_df['remaining_days_until_Failure'] <= w1, 1, 0 ) train_df['label2'] = train_df['label1'] train_df.loc[train_df['remaining_days_until_Failure'] <= w0, 'label2'] = 2 train_df.head() ###Output _____no_output_____ ###Markdown The cycle column "cycle" is to be normalized by means of a MinMax normalization, which is why a column with the normalized values is also created ###Code train_df['Zyklus normalisiert'] = train_df['zyklus'] cols_normalize = train_df.columns.difference(['id','zyklus','remaining_days_until_Failure','label1','label2']) minmax = preprocessing.MinMaxScaler() norm_train_df = pd.DataFrame(minmax.fit_transform(train_df[cols_normalize]), columns=cols_normalize, index=train_df.index) join_df = train_df[train_df.columns.difference(cols_normalize)].join(norm_train_df) train_df = join_df.reindex(columns = train_df.columns) train_df.head() ###Output _____no_output_____ ###Markdown Da nun eine Normalisierung der Trainingsdaten stattgefunden hat, wird für die Testdaten ebenfalls eine Normalisierung der Zyklen durchgeführt. ###Code test_df['Zyklus normalisiert'] = test_df['zyklus'] norm_test_df = pd.DataFrame(minmax.transform(test_df[cols_normalize]), columns=cols_normalize, index=test_df.index) test_join_df = test_df[test_df.columns.difference(cols_normalize)].join(norm_test_df) test_df = test_join_df.reindex(columns = test_df.columns) test_df = test_df.reset_index(drop=True) test_df.head() ###Output _____no_output_____ ###Markdown Next, we use the truth data to generate the labels for the test data. ###Code verbleibende_zyklen = pd.DataFrame(test_df.groupby('id')['zyklus'].max()).reset_index() verbleibende_zyklen.columns = ['id', 'max'] truth_df.columns = ['more'] truth_df['id'] = truth_df.index + 1 truth_df['max'] = verbleibende_zyklen['max'] + truth_df['more'] truth_df.drop('more', axis=1, inplace=True) test_df = test_df.merge(truth_df, on=['id'], how='left') test_df['remaining_days_until_Failure'] = test_df['max'] - test_df['zyklus'] test_df.drop('max', axis=1, inplace=True) test_df.head() ###Output _____no_output_____ ###Markdown Next, we use the truth data to generate the labels for the test data. ###Code test_df['label1'] = np.where(test_df['remaining_days_until_Failure'] <= w1, 1, 0 ) test_df['label2'] = test_df['label1'] test_df.loc[test_df['remaining_days_until_Failure'] <= w0, 'label2'] = 2 test_df.head() ###Output _____no_output_____ ###Markdown Now we look at the data structure ###Code train_df.info() ###Output <class 'pandas.core.frame.DataFrame'> Int64Index: 20631 entries, 0 to 20630 Data columns (total 30 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 id 20631 non-null int64 1 zyklus 20631 non-null int64 2 setting1 20631 non-null float64 3 setting2 20631 non-null float64 4 setting3 20631 non-null float64 5 s1 20631 non-null float64 6 s2 20631 non-null float64 7 s3 20631 non-null float64 8 s4 20631 non-null float64 9 s5 20631 non-null float64 10 s6 20631 non-null float64 11 s7 20631 non-null float64 12 s8 20631 non-null float64 13 s9 20631 non-null float64 14 s10 20631 non-null float64 15 s11 20631 non-null float64 16 s12 20631 non-null float64 17 s13 20631 non-null float64 18 s14 20631 non-null float64 19 s15 20631 non-null float64 20 s16 20631 non-null float64 21 s17 20631 non-null float64 22 s18 20631 non-null float64 23 s19 20631 non-null float64 24 s20 20631 non-null float64 25 s21 20631 non-null float64 26 remaining_days_until_Failure 20631 non-null int64 27 label1 20631 non-null int32 28 label2 20631 non-null int32 29 Zyklus normalisiert 20631 non-null float64 dtypes: float64(25), int32(2), int64(3) memory usage: 5.2 MB ###Markdown It can be seen that all data have either the float or int data type, there are no categorical variables in the data set. Now we display the individual variables in a histogram in order to determine whether all variables can be used meaningfully for the model. The just created columns, as well as "id" are not considered, since these are used for the evaluation and it does not make sense to check here. 3.3 analyse the histogram's of all features ###Code def plot_hist(variable): print("min {} : {} ".format(variable, min(train_df[variable]))) print("max {} : {}".format(variable, max(train_df[variable]))) plt.figure(figsize=(9,3)) plt.hist(train_df[variable], color="orange", ec="orange", edgecolor='green') plt.xlabel(variable) plt.ylabel("Frequenz") plt.title("distribution of the {} variable ".format(variable)) plt.show() numericVar = ["zyklus", "setting1", "setting2","setting3", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15", "s16", "s17", "s18", "s19", "s20", "s21"] for n in numericVar: plot_hist(n) ###Output min zyklus : 1 max zyklus : 362 ###Markdown We see that some variables are just constants. These can be deleted as they do not contain any useful information about the state of the plant. The following columns are deleted: 'setting3', 's1', 's5', 's10', 's16 ', 's18', 's19'. ###Code train_df.drop(columns=['setting3', 's1', 's5', 's10', 's16', 's18', 's19'],inplace=True) test_df.drop(columns=['setting3', 's1', 's5', 's10', 's16', 's18', 's19'],inplace=True) train_df2 = train_df test_df2 = test_df ###Output _____no_output_____ ###Markdown 4. Modelling and Evaluation After the data has been prepared, the modeling can begin. We will build a Long Short-Term Memory Layer Recurrent Neural Network using the KERAS library. Keras requires a three-dimensional numpy array (inputs: A 3D tensor with shape [batch, timesteps, feature]), see also: https://keras.io/api/layers/recurrent_layers/lstm/ ). Therefore, in the next step, our features or variables are put into this three-dimensional shape.At the same time the window size is defined, because LSTMs have the advantage to remember things from long sequences without a direct abstraction. Translated with www.DeepL.com/Translator (free version) ###Code window_size = 25 def generate_sequenze(dataframe_id, laenge_sequenz, spalten_sequenz): array = dataframe_id[spalten_sequenz].values anzahl_elemente = array.shape[0] for start, stop in zip(range(0, anzahl_elemente-laenge_sequenz), range(laenge_sequenz, anzahl_elemente)): yield array[start:stop, :] spalten_sensoren = ['s2', 's3','s4', 's6', 's7', 's8', 's9', 's11', 's12', 's13', 's14', 's15', 's17', 's20', 's21',] spalten_sequenz = ['setting1', 'setting2', 'Zyklus normalisiert'] spalten_sequenz.extend(spalten_sensoren) generierung_sequenz = (list(generate_sequenze(train_df2[train_df2['id']==id], window_size, spalten_sequenz)) for id in train_df2['id'].unique()) array_sequenz = np.concatenate(list(generierung_sequenz)).astype(np.float32) array_sequenz.shape def ueberschrift_generieren(dataframe_id, laenge_sequenz, ueberschrift): array_daten = dataframe_id[ueberschrift].values elemente_numerisch = array_daten.shape[0] return array_daten[laenge_sequenz:elemente_numerisch, :] ueberschriften_generieren = [ueberschrift_generieren(train_df[train_df['id']==id], window_size, ['label1']) for id in train_df2['id'].unique()] ueb_array = np.concatenate(ueberschriften_generieren).astype(np.float32) ueb_array.shape ###Output _____no_output_____ ###Markdown In the next step, an LSTM network is created, since the data is now available in three-dimensional form. For this, the network is created with 100 units in the first step and a second one with 50 units. Dropout is used to avoid overfitting. Finally, a single layer, our output layer with a single unit and sigma activation, is generated, since there is a binary classification problem here. ###Code spalten = array_sequenz.shape[2] ausgabe = ueb_array.shape[1] model = Sequential() model.add(LSTM( input_shape=(window_size, spalten), units=200, return_sequences=True)) model.add(Dropout(0.2)) model.add(LSTM( units=100, return_sequences=False)) model.add(Dropout(0.2)) model.add(Dense(units=ausgabe, activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) print(model.summary()) model.fit(array_sequenz, ueb_array, epochs=10, batch_size=200, validation_split=0.05, verbose=1) ###Output Epoch 1/10 87/87 [==============================] - 17s 151ms/step - loss: 0.2726 - accuracy: 0.8735 - val_loss: 0.1108 - val_accuracy: 0.9592 Epoch 2/10 87/87 [==============================] - 12s 136ms/step - loss: 0.0972 - accuracy: 0.9615 - val_loss: 0.0681 - val_accuracy: 0.9625 Epoch 3/10 87/87 [==============================] - 12s 144ms/step - loss: 0.0629 - accuracy: 0.9746 - val_loss: 0.1099 - val_accuracy: 0.9570 Epoch 4/10 87/87 [==============================] - 12s 132ms/step - loss: 0.0610 - accuracy: 0.9746 - val_loss: 0.0787 - val_accuracy: 0.9647 Epoch 5/10 87/87 [==============================] - 11s 130ms/step - loss: 0.0585 - accuracy: 0.9763 - val_loss: 0.0731 - val_accuracy: 0.9614 Epoch 6/10 87/87 [==============================] - 11s 130ms/step - loss: 0.0539 - accuracy: 0.9774 - val_loss: 0.0571 - val_accuracy: 0.9757 Epoch 7/10 87/87 [==============================] - 11s 126ms/step - loss: 0.0547 - accuracy: 0.9774 - val_loss: 0.0520 - val_accuracy: 0.9757 Epoch 8/10 87/87 [==============================] - 11s 122ms/step - loss: 0.0498 - accuracy: 0.9791 - val_loss: 0.0562 - val_accuracy: 0.9735 Epoch 9/10 87/87 [==============================] - 10s 118ms/step - loss: 0.0462 - accuracy: 0.9813 - val_loss: 0.0450 - val_accuracy: 0.9802 Epoch 10/10 87/87 [==============================] - 10s 114ms/step - loss: 0.0452 - accuracy: 0.9811 - val_loss: 0.0322 - val_accuracy: 0.9879 ###Markdown 4.2. Evaluation ###Code scores = model.evaluate(array_sequenz, ueb_array, verbose=1, batch_size=200) print('accuracy: {}'.format(scores[1])) y_pred = model.predict(array_sequenz,verbose=1, batch_size=200) y_pred = np.round(y_pred).astype(int) y_true = ueb_array print('confusion Matrix') konfusionsmatrix = confusion_matrix(y_true, y_pred) konfusionsmatrix accuracy = precision_score(y_true, y_pred) recall = recall_score(y_true, y_pred) print( 'accuracy = ', accuracy, '\n', 'recall = ', recall) ###Output accuracy = 0.859552931252636 recall = 0.9704761904761905 ###Markdown In the next step we will compare the test data, for this we have saved the last working state of the cycle for each fire detector in the test data. To be able to compare the results, we use the last sequence for each fire detector in the test data. ###Code testdaten_vergleich = [test_df[test_df['id']==id][spalten_sequenz].values[ - window_size:] for id in test_df['id'].unique() if len(test_df[test_df['id']==id]) >= window_size] testdaten_vergleich = np.asarray(testdaten_vergleich).astype(np.float32) testdaten_vergleich.shape y2 = [len(test_df[test_df['id']==id]) >= window_size for id in test_df['id'].unique()] ueb_array_test_last = test_df.groupby('id')['label1'].nth(-1)[y2].values ueb_array_test_last = ueb_array_test_last.reshape(ueb_array_test_last.shape[0],1).astype(np.float32) ueb_array_test_last.shape print(testdaten_vergleich.shape) print(ueb_array_test_last.shape) werte_testdaten = model.evaluate(testdaten_vergleich, ueb_array_test_last, verbose=2) print('accuracy: {}'.format(werte_testdaten[1])) testdaten_y = model.predict(testdaten_vergleich) testdaten_y = np.round(testdaten_y).astype(int) wahrheit_y = ueb_array_test_last print('confusion Matrix') konfusionsmatrix2 = confusion_matrix(wahrheit_y, testdaten_y) konfusionsmatrix2 testdaten_genauigkeit = precision_score(wahrheit_y, testdaten_y) recall_test = recall_score(wahrheit_y, testdaten_y) f1_test = 2 * (testdaten_genauigkeit * recall_test) / (testdaten_genauigkeit + recall_test) print( 'Precision: ', testdaten_genauigkeit, '\n', 'recall: ', recall_test,'\n', 'F1-score:', f1_test ) results_df = pd.DataFrame([[werte_testdaten[1],testdaten_genauigkeit,recall_test,f1_test]], columns = ['Accuracy', 'Precision', 'Recall', 'F1-score'], index = ['LSTM']) results_df ###Output _____no_output_____
src/model_selection/model_selection_all_combined_v2.ipynb
###Markdown Import libraries and read in trainval and test datasets ###Code #merge data from several states import numpy as np import pandas as pd import matplotlib.pyplot as plt trainval = pd.read_csv('./data/trainval_all_combined_v2.csv') trainval.head() test = pd.read_csv('./data/test_all_combined_v2.csv') test.head() ###Output _____no_output_____ ###Markdown Convert columns and check for any remaining missing values ###Code # Convert columns and check for any remaining missing values cat_col = ['gender','breeds.primary','breed_popularity','contact.address.state','state_region','cg_adpt_time'] trainval[cat_col]=trainval[cat_col].astype('category') test[cat_col]=test[cat_col].astype('category') # Check for any missing data print("Table size -", end=' ') print(trainval.shape) print("Checking for missing values..") # Number of missing values in each column of training data missing_val_count_by_column = (trainval.isnull().sum()) print(missing_val_count_by_column[missing_val_count_by_column > 0]) # Get names of columns with missing values #missing_cols = [col for col in extension_data.columns # if extension_train[col].isnull().any()] len(trainval) ###Output _____no_output_____ ###Markdown Divide into features and target ###Code # Divide into features and target#test with small number of features, take off name_pop for now X_features = ['age', 'gender', 'size', 'breeds.primary','breeds.mixed', 'breed_popularity', 'log_breed_pop', 'log_dog_pop', 'state_population', 'state_area', 'state_region','contact.address.state'] trainval_X = trainval[X_features] trainval_X.head() trainval_y = trainval['cg_adpt_time'] trainval_y.head() ###Output _____no_output_____ ###Markdown Split trainval into train and validation ###Code from sklearn.model_selection import train_test_split train_X, val_X, train_y, val_y = train_test_split(trainval_X, trainval_y, test_size=0.2) print('Training set size = {} and Validation set size = {}'.format(len(train_X),len(val_X))) train_X.head() from sklearn.preprocessing import OneHotEncoder oh = OneHotEncoder(handle_unknown='ignore', sparse=False) cat_col = ['gender','breeds.primary','breed_popularity','contact.address.state','state_region','cg_adpt_time'] train_X.dtypes import warnings warnings.filterwarnings("ignore", category=DeprecationWarning) warnings.filterwarnings('ignore', category = FutureWarning) from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import GridSearchCV import time from sklearn.metrics import precision_recall_fscore_support as score from sklearn.metrics import accuracy_score train_y.dtypes ###Output _____no_output_____ ###Markdown Make a pipeline of random forest ###Code from sklearn.pipeline import Pipeline def train_RF(n_est, depth): rf = RandomForestClassifier(n_estimators=n_est, max_depth=depth, n_jobs=-1) pipeline_temp = Pipeline(steps=[('preprocessor', oh),('model', rf)]) start = time.time() pipeline_temp.fit(train_X, train_y) end = time.time() fit_time = end - start #scores= -1 * cross_val_score(pipeline_temp, train_X, train_y,cv=5,scoring='neg_mean_absolute_error') start = time.time() y_pred = pipeline_temp.predict(val_X) end = time.time() predict_time = end - start #r2s= cross_val_score(pipeline_temp, train_X, train_y,cv=5,scoring='r2') #print('Est: {} / Depth: {} ---- MAE: {} R^2: {}'.format(n_est,depth,round(scores.mean(), 3),round(r2s.mean(), 3))) print('Est: {} / Depth: {} ---- Fit time: {} / Predict time: {} / Accuracy: {}'.format( n_est,depth,fit_time, predict_time, round((y_pred==val_y).sum()/len(y_pred), 3))) ###Output _____no_output_____ ###Markdown Tune hyperparameters ###Code for n_est in [10, 50, 100, 200]: for depth in [10, 50, 100, None]: train_RF(n_est, depth) for n_est in [100, 150, 200, 250]: for depth in [30, 40, 50, 60]: train_RF(n_est, depth) rf = RandomForestClassifier(n_estimators=150, max_depth=30, n_jobs=-1) pipeline_temp2 = Pipeline(steps=[('preprocessor', oh),('model', rf)]) start = time.time() pipeline_temp2.fit(train_X, train_y) end = time.time() fit_time = end - start #scores= -1 * cross_val_score(pipeline_temp, train_X, train_y,cv=5,scoring='neg_mean_absolute_error') y_pred = pipeline_temp2.predict(val_X) round((y_pred==val_y).sum()/len(y_pred), 3) from sklearn.pipeline import Pipeline from sklearn.compose import ColumnTransformer rf = RandomForestClassifier(n_estimators=150, max_depth=30, n_jobs=-1) categorical_features = ['gender','breeds.primary','breed_popularity','contact.address.state','state_region'] categorical_transformer = Pipeline(steps=[ ('onehot', OneHotEncoder(handle_unknown='ignore', sparse=False))]) preprocessor = ColumnTransformer( transformers=[ ('cat', categorical_transformer, categorical_features)], remainder='passthrough') pipeline_temp = Pipeline(steps=[('preprocessor', preprocessor),('model', rf)]) start = time.time() pipeline_temp.fit(train_X, train_y) end = time.time() ###Output _____no_output_____ ###Markdown look for feature importances in one hot encoded ###Code pipeline_temp['model'].feature_importances_ y_pred = pipeline_temp.predict(val_X) round((y_pred==val_y).sum()/len(y_pred), 3) def train_RF_2(n_est, depth): categorical_features = ['gender', 'state_region'] categorical_transformer = Pipeline(steps=[ ('onehot', OneHotEncoder(handle_unknown='ignore', sparse=False))]) preprocessor = ColumnTransformer( transformers=[ ('cat', categorical_transformer, categorical_features)], remainder='passthrough') pipeline_temp = Pipeline(steps=[('preprocessor', preprocessor),('model', rf)]) start = time.time() pipeline_temp.fit(train_X, train_y) end = time.time() fit_time = end - start #scores= -1 * cross_val_score(pipeline_temp, train_X, train_y,cv=5,scoring='neg_mean_absolute_error') start = time.time() y_pred = pipeline_temp.predict(val_X) end = time.time() predict_time = end - start #r2s= cross_val_score(pipeline_temp, train_X, train_y,cv=5,scoring='r2') #print('Est: {} / Depth: {} ---- MAE: {} R^2: {}'.format(n_est,depth,round(scores.mean(), 3),round(r2s.mean(), 3))) print('Est: {} / Depth: {} ---- Fit time: {} / Predict time: {} / Accuracy: {}'.format( n_est,depth,fit_time, predict_time, round((y_pred==val_y).sum()/len(y_pred), 3))) for n_est in [100, 150, 200, 250]: for depth in [30, 40, 50, 60]: train_RF_2(n_est, depth) y_pred_train = pipeline_temp.predict(train_X) round((y_pred_train==train_y).sum()/len(y_pred_train), 3) from sklearn.metrics import confusion_matrix confusion_matrix(val_y, y_pred, labels=["< 1 week" , "1 - 2 weeks", "< 1 month", "< 3 months", "> 3 months"]) plt.bar(range(len(pipeline_temp['model'].feature_importances_)), pipeline_temp['model'].feature_importances_) plt.show() ###Output _____no_output_____ ###Markdown Gradient Boosting ###Code from sklearn.ensemble import GradientBoostingClassifier oh2 = OneHotEncoder(handle_unknown='ignore', sparse=False) gb = GradientBoostingClassifier(n_estimators=150, max_depth=30, learning_rate=0.3) pipeline_temp2 = Pipeline(steps=[('preprocessor', oh2),('model', gb)]) pipeline_temp2.fit(train_X, train_y) y_pred2 = pipeline_temp2.predict(val_X) round((y_pred2==val_y).sum()/len(y_pred), 3) import pickle filename = './models/rfpipe_cg.pkl' pickle.dump(pipeline_temp, open(filename, 'wb')) import pickle filename = './models/gbpipe_cg.pkl' pickle.dump(pipeline_temp2, open(filename, 'wb')) ###Output _____no_output_____