path
stringlengths
7
265
concatenated_notebook
stringlengths
46
17M
Traffic-Sign-Classifier-Writeup-Calvin-Giddens.ipynb
###Markdown Data Set Summary and Exploration 1. Provide a basic summary of the data set. In the code, the analysis should be done using python, numpy and/or pandas methods rather than hardcoding results manually.I used basic numpy functionality (img.shape) to determine the parameters of the dataset. I used pandas to read the .csv of sign names into an ordered list.* The size of training set is 34799.* The size of the validation set is 4410.* The size of test set is 12630.* The shape of a traffic sign image is [32, 32].* The number of unique classes/labels in the data set is 43. 2. Include an exploratory visualization of the dataset.Below is an exploratory visualization of the data set. It is a bar chart showing how the split between training, validation, and test data. Additionally, I have provided a random sampling of the training images. ###Code import matplotlib.pyplot as plt %matplotlib inline import numpy as np import pickle import pandas as pd # Load pickled data training_file = '../data/signs/train.p' validation_file= '../data/signs/valid.p' testing_file = '../data/signs/test.p' with open(training_file, mode='rb') as f: train = pickle.load(f) with open(validation_file, mode='rb') as f: valid = pickle.load(f) with open(testing_file, mode='rb') as f: test = pickle.load(f) X_train, y_train = train['features'], train['labels'] X_valid, y_valid = valid['features'], valid['labels'] X_test, y_test = test['features'], test['labels'] print("Data loaded.") # Number of training examples n_train = X_train.shape[0] # Number of validation examples n_valid = X_valid.shape[0] # Number of testing examples. n_test = X_test.shape[0] # What's the shape of an traffic sign image? image_shape = [X_train.shape[2], X_train.shape[1]] # How many unique classes/labels there are in the dataset. signnames = pd.read_csv('signnames.csv') n_classes = signnames.shape[0] # Pie chart, where the slices will be ordered and plotted counter-clockwise: labels = 'Training', 'Validation', 'Test' sizes = [34799, 4410, 12630] explode = (0, 0.1, 0) # only "explode" the 2nd slice (i.e. 'Hogs') fig1, ax1 = plt.subplots() ax1.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%', shadow=True, startangle=90) ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle. plt.show() ### Data exploration visualization code import matplotlib as mp import matplotlib.pyplot as plt %matplotlib inline # Creates just a figure and only one subplot num_rows = 4 num_cols = 2 subplot_idx = 0 fig, axes = plt.subplots(nrows=num_rows, ncols=num_cols, figsize=(20,40)) for row in np.arange(num_rows): for col in np.arange(num_cols): axes[row][col].imshow(X_valid[subplot_idx]) axes[row][col].set_title(y_valid[subplot_idx]) subplot_idx += 128 # Visualizations will be shown in the notebook. ###Output Data loaded.
stat_mean_conf_int.ipynb
###Markdown **Корректность проверена на Python 3.7:**+ pandas 0.23.0+ numpy 1.14.5+ sklearn 0.19.1 Доверительные интервалы для оценки среднего ###Code from sklearn import model_selection, datasets, linear_model, metrics import numpy as np import sklearn print(np.__version__) print(sklearn.__version__) %pylab inline ###Output Populating the interactive namespace from numpy and matplotlib ###Markdown Генерация данных ###Code blobs = datasets.make_blobs(300, centers = 2, cluster_std = 6, random_state=1) pylab.figure(figsize(8, 8)) pylab.scatter(list(map(lambda x: x[0], blobs[0])), list(map(lambda x: x[1], blobs[0])), c = blobs[1], cmap = 'autumn', s=100) ###Output _____no_output_____ ###Markdown Сравнение линейных моделей Точечная оценка ###Code train_data, test_data, train_labels, test_labels = model_selection.train_test_split(blobs[0], blobs[1], test_size = 15, random_state = 1) ridge_model = linear_model.RidgeClassifier() ridge_model.fit(train_data, train_labels) metrics.roc_auc_score(test_labels, ridge_model.predict(test_data)) sgd_model = linear_model.SGDClassifier(random_state = 0, max_iter = 1000) sgd_model.fit(train_data, train_labels) metrics.roc_auc_score(test_labels, sgd_model.predict(test_data)) ###Output _____no_output_____ ###Markdown Оценка среднего ###Code sgd_auc_scores = model_selection.cross_val_score(linear_model.SGDClassifier(max_iter = 1000), blobs[0], blobs[1], scoring = 'roc_auc', cv = 20) ridge_auc_scores = model_selection.cross_val_score(linear_model.RidgeClassifier(), blobs[0], blobs[1], scoring = 'roc_auc', cv = 20) ###Output _____no_output_____ ###Markdown Точечная оценка среднего ###Code print("sgd model auc: mean %.3f, std %.3f" % (sgd_auc_scores.mean(), sgd_auc_scores.std(ddof=1))) print("ridge model auc: mean %.3f, std %.3f" % (ridge_auc_scores.mean(), ridge_auc_scores.std(ddof=1))) ###Output sgd model auc: mean 0.877, std 0.215 ridge model auc: mean 0.937, std 0.071 ###Markdown Интервальная оценка среднего ###Code from statsmodels.stats.weightstats import _zconfint_generic, _tconfint_generic sgd_mean = sgd_auc_scores.mean() ridge_mean = ridge_auc_scores.mean() ###Output _____no_output_____ ###Markdown z-интервал Допустим, нам откуда-то известно, что дисперсия auc_scores $\sigma^2=0.25$. Построим доверительные интервалы для средних вида $$\bar{X}_n \pm z_{1-\frac{\alpha}{2}} \frac{\sigma}{\sqrt{n}}$$ ###Code print("sgd model mean auc 95%% confidence interval", _zconfint_generic(sgd_mean, sqrt(0.25/len(sgd_auc_scores)), 0.05, 'two-sided')) print("ridge model mean auc 95%% confidence interval", _zconfint_generic(ridge_mean, sqrt(0.25/len(sgd_auc_scores)), 0.05, 'two-sided')) ###Output sgd model mean auc 95%% confidence interval (0.657655079141569, 1.0959163494298596) ridge model mean auc 95%% confidence interval (0.7174765077129974, 1.155737778001288) ###Markdown t-интервал Вместо гипотетической теоретической дисперсии $\sigma^2$, которую мы на самом деле в данном случае не знаем, используем выборочные дисперсии, и построим доверительные интервалы вида $$\bar{X}_n \pm t_{1-\frac{\alpha}{2}} \frac{S}{\sqrt{n}}$$ ###Code type(sgd_auc_scores) sgd_mean_std = sgd_auc_scores.std(ddof=1)/sqrt(len(sgd_auc_scores)) ridge_mean_std = ridge_auc_scores.std(ddof=1)/sqrt(len(ridge_auc_scores)) print("sgd model mean auc 95%% confidence interval", _tconfint_generic(sgd_mean, sgd_mean_std, len(sgd_auc_scores) - 1, 0.05, 'two-sided')) print("ridge model mean auc 95%% confidence interval", _tconfint_generic(ridge_mean, ridge_mean_std, len(sgd_auc_scores) - 1, 0.05, 'two-sided')) import pandas as pd with open ("water.txt") as file: df = pd.read_csv(file, '\t') df.head() data_n = df[df['location'] == "North"] data_s = df[df['location'] == "South"] data_n.head() data_s.head() data_n.std() data_s['mortality'].std() data_s['mortality'].mean() print("all", _tconfint_generic(df['mortality'].mean(), df['mortality'].std()/((len(df))**0.5), len(df) - 1, 0.05, 'two-sided')) print("south", _tconfint_generic(data_s['mortality'].mean(), data_s['mortality'].std()/((len(data_s))**0.5), len(data_s)- 1, 0.05, 'two-sided')) print("north", _tconfint_generic(data_n['mortality'].mean(), data_n['mortality'].std()/((len(data_n))**0.5), len(data_n)- 1, 0.05, 'two-sided')) ###Output north (1586.5605251961385, 1680.6394748038613)
SGDClassifier.ipynb
###Markdown SGDClassifier ###Code from __future__ import division from IPython.display import display from matplotlib import pyplot as plt %matplotlib inline import numpy as np import pandas as pd import random, sys, os, re from sklearn.linear_model import SGDClassifier from sklearn.cross_validation import StratifiedKFold from sklearn.grid_search import RandomizedSearchCV, GridSearchCV from sklearn.cross_validation import cross_val_predict, permutation_test_score SEED = 97 scale = False minmax = False norm = False nointercept = False N_CLASSES = 2 submission_filename = "../submissions/submission_SGDClassifier.csv" ###Output _____no_output_____ ###Markdown Load the training data ###Code from load_blood_data import load_blood_data y_train, X_train = load_blood_data(train=True, SEED = SEED, scale = scale, minmax = minmax, norm = norm, nointercept = nointercept) ###Output _____no_output_____ ###Markdown Fit the model ###Code StatifiedCV = StratifiedKFold(y = y_train, n_folds = 10, shuffle = True, random_state = SEED) %%time random.seed(SEED) # NOTE: For best results using the default learning rate schedule, # the data should have zero mean and unit variance. clf = SGDClassifier(loss = 'hinge', # 'hinge', 'log', 'modified_huber', 'squared_hinge', 'perceptron', # or a regression loss: 'squared_loss', 'huber', 'epsilon_insensitive', # 'squared_epsilon_insensitive' penalty = 'l2', # 'none', 'l2', 'l1', or 'elasticnet' alpha = 0.0001, # multiplies a single regularization term l1_ratio = 0.15, # Elastic Net mixing parameter n_iter = 5, # epochs fit_intercept = False, # If False, the data is assumed to be centered. epsilon = 0.1, # for 'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive' shuffle = True, verbose = 0, n_jobs = -1, random_state = SEED, learning_rate = 'optimal', # The learning rate schedule eta0 = 0.0, # The initial learning rate for 'constant' or 'invscaling' power_t = 0.5, # The exponent for inverse scaling learning rate class_weight = None, warm_start = False) #average = False) # param_grid = dict(loss = ['hinge','log','modified_huber','squared_hinge','perceptron'], # penalty = ['none','elasticnet'], # l1_ratio = [0.0, 0.001, 0.01, 0.1, 0.5, 1.0], # n_iter = [5, 10, 20, 30, 40]) # grid_clf = GridSearchCV(estimator = clf, # param_grid = param_grid, # n_jobs = -1, # cv = StatifiedCV, # verbose = 0 # ) # grid_clf.fit(X = X_train, y = y_train) # print("clf_params = {}".format(grid_clf.best_params_)) # print("score: {}".format(round(grid_clf.best_score_, 4))) # print # clf = grid_clf.best_estimator_ clf_params = {'penalty': 'elasticnet', 'l1_ratio': 0.01, 'n_iter': 20, 'loss': 'squared_hinge'} clf.set_params(**clf_params) clf.fit(X_train, y_train) # from sklearn_utilities import GridSearchHeatmap # GridSearchHeatmap(grid_clf, y_key='learning_rate', x_key='n_estimators') # from sklearn_utilities import plot_validation_curves # plot_validation_curves(grid_clf, param_grid, X_train, y_train, ylim = (0.0, 1.05)) %%time try: from sklearn_utilities import plot_learning_curve except: import imp, os util = imp.load_source('sklearn_utilities', os.path.expanduser('~/Dropbox/Python/sklearn_utilities.py')) from sklearn_utilities import plot_learning_curve plot_learning_curve(estimator = clf, title = None, X = X_train, y = y_train, ylim = (0.0, 1.10), cv = StratifiedKFold(y = y_train, n_folds = 10, shuffle = True, random_state = SEED), train_sizes = np.linspace(.1, 1.0, 5), n_jobs = -1) plt.show() ###Output /home/george/.local/lib/python2.7/site-packages/matplotlib/collections.py:590: FutureWarning: elementwise comparison failed; returning scalar instead, but in the future will perform elementwise comparison if self._edgecolors == str('face'): ###Markdown Training set predictions ###Code %%time train_preds = cross_val_predict(estimator = clf, X = X_train, y = y_train, cv = StatifiedCV, n_jobs = -1, verbose = 0, fit_params = None, pre_dispatch = '2*n_jobs') y_true, y_pred = y_train, train_preds from sklearn.metrics import confusion_matrix cm = confusion_matrix(y_true, y_pred, labels=None) print cm try: from sklearn_utilities import plot_confusion_matrix except: import imp, os util = imp.load_source('sklearn_utilities', os.path.expanduser('~/Dropbox/Python/sklearn_utilities.py')) from sklearn_utilities import plot_confusion_matrix plot_confusion_matrix(cm, ['Did not Donate','Donated']) accuracy = round(np.trace(cm)/float(np.sum(cm)),4) misclass = 1 - accuracy print("Accuracy {}, mis-class rate {}".format(accuracy,misclass)) from sklearn.metrics import roc_curve from sklearn.metrics import roc_auc_score from sklearn.metrics import log_loss from sklearn.metrics import f1_score fpr, tpr, thresholds = roc_curve(y_true, y_pred, pos_label=None) plt.figure(figsize=(10,6)) plt.plot([0, 1], [0, 1], 'k--') plt.plot(fpr, tpr) AUC = roc_auc_score(y_true, y_pred, average='macro') plt.text(x=0.6,y=0.4,s="AUC {:.4f}"\ .format(AUC), fontsize=16) plt.text(x=0.6,y=0.3,s="accuracy {:.2f}%"\ .format(accuracy*100), fontsize=16) logloss = log_loss(y_true, y_pred) plt.text(x=0.6,y=0.2,s="LogLoss {:.4f}"\ .format(logloss), fontsize=16) f1 = f1_score(y_true, y_pred) plt.text(x=0.6,y=0.1,s="f1 {:.4f}"\ .format(f1), fontsize=16) plt.xlabel('False positive rate') plt.ylabel('True positive rate') plt.title('ROC curve') plt.show() %%time score, permutation_scores, pvalue = permutation_test_score(estimator = clf, X = X_train.values.astype(np.float32), y = y_train, cv = StatifiedCV, labels = None, random_state = SEED, verbose = 0, n_permutations = 100, scoring = None, n_jobs = -1) plt.figure(figsize=(20,8)) plt.hist(permutation_scores, 20, label='Permutation scores') ylim = plt.ylim() plt.plot(2 * [score], ylim, '--g', linewidth=3, label='Classification Score (pvalue {:.4f})'.format(pvalue)) plt.plot(2 * [1. / N_CLASSES], ylim, 'r', linewidth=7, label='Luck') plt.ylim(ylim) plt.legend(loc='center',fontsize=16) plt.xlabel('Score') plt.show() # find mean and stdev of the scores from scipy.stats import norm mu, std = norm.fit(permutation_scores) # format for scores.csv file import re algo = re.search(r"submission_(.*?)\.csv", submission_filename).group(1) print("{: <26} , , {:.4f} , {:.4f} , {:.4f} , {:.4f} , {:.4f} , {:.4f}"\ .format(algo,accuracy,logloss,AUC,f1,mu,std)) ###Output SGDClassifier , , 0.7274 , 9.4143 , 0.5528 , 0.2765 , 0.6339 , 0.0690 ###Markdown -------------------------------------------------------------------------------------------- Test Set Predictions Re-fit with the full training set ###Code clf.set_params(**clf_params) clf.fit(X_train, y_train) ###Output _____no_output_____ ###Markdown Read the test data ###Code from load_blood_data import load_blood_data X_test, IDs = load_blood_data(train=False, SEED = SEED, scale = scale, minmax = minmax, norm = norm, nointercept = nointercept) ###Output _____no_output_____ ###Markdown Predict the test set with the fitted model ###Code y_pred = clf.predict(X_test) print(y_pred[:10]) try: y_pred_probs = clf.predict_proba(X_test) print(y_pred_probs[:10]) donate_probs = [prob[1] for prob in y_pred_probs] except Exception,e: print(e) donate_probs = [0.65 if x>0 else 1-0.65 for x in y_pred] print(donate_probs[:10]) ###Output [1 0 0 0 1 1 1 0 0 0] probability estimates are not available for loss='squared_hinge' [0.65, 0.35, 0.35, 0.35, 0.65, 0.65, 0.65, 0.35, 0.35, 0.35] ###Markdown Create the submission file ###Code assert len(IDs)==len(donate_probs) f = open(submission_filename, "w") f.write(",Made Donation in March 2007\n") for ID, prob in zip(IDs, donate_probs): f.write("{},{}\n".format(ID,prob)) f.close() ###Output _____no_output_____
aceleradev_semana2.ipynb
###Markdown AceleraDev Codenation - Semana 2*Daniel Santos Pereira | Data & B.I Analyst | Machine Learning in Training | MCP* **Manipulando Dados** ###Code #Importação dos pacotes import pandas as pd import numpy as np #Acessando o help dos pacotes pd? ###Output _____no_output_____ ###Markdown **Dicionários** ###Code #Criando um dicionário com os dados dados = {'canal_vendas' : ['Facebook', 'twitter', 'intagram', 'linkedin', 'facebook'], 'acessos': [100, 200, 300, 400, 500], 'site': ['site1', 'site1', 'site2', 'site2', 'site3'], 'vendas': [1000.52, 1052.34, 2009, 5000, 300]} #Para printar o dicionário dados #verificando o tipo de dicionario type(dados) #Acessando a chaves do meu dicionário dados.keys() #Acessando uma chave especifica * Lembrar que ele é case-sensitive dados['canal_vendas'] #Acessando uma posição/valor especifíco de um dicionário dados['canal_vendas'][2] #Acessando uma posição especifica de um dicionário - slice print('Retorna todos os elementos do array: '+ str(dados['canal_vendas'][:])) #Acessando uma posição especifica de um dicionário - slice print('Retorna todos os elementos até a posição definida: '+ str(dados['canal_vendas'][:4])) #Acessando uma posição especifica de um dicionário - slice #dados['canal_vendas'][:4] # Retorna todos os elementos até a posição definida print('Retorna valor de acordo com o índice no array: '+ str(dados['canal_vendas'][2:4])) ###Output Retorna valor de acordo com o índice no array: ['intagram', 'linkedin'] ###Markdown **Lista** ###Code #Criando uma lista lista = [200, 200, 300, 800, 300] #Printando a lista lista #Vendo valores específicos - O array é iniciado no 0 lista[3] #Fatia da lista (slice) -- A partir do índice 1, retorne até o quarto valor lista[1:4] #Adicionando a lista ao dicionário dados['lista'] = lista dados ###Output _____no_output_____ ###Markdown **DataFrames** ###Code #Criar um data frame a partir de um dict dataframe = pd.DataFrame(dados) #Acessando o dataframe dataframe #Printando os primeiros casos do dataframe dataframe.head(2) #Verificando o formado do dataframe (total de linhas e colunas) dataframe.shape #Verificando o índice do dataframe dataframe.index #Verificando os tipos dos dados do dataframe dataframe.dtypes #Verificando se existem valores faltantes dataframe.isna() #Printando os nomes da colunas dataframe.columns #Acessando uma coluna especifica dataframe['canal_vendas'] #Criação de uma nova coluna dataframe['nova_coluna'] = [1, 2, 3, 4, 5] dataframe dataframe.columns #Removendo colunas -- sem a cláusula inplace = True, # -- as colunas não são removidas, somente não aparecem mais dataframe.drop(columns = ['nova_coluna'], inplace=True) #Mostrondo as colunas dataframe.columns #Acessando valores especificos dataframe['acessos'][1] #Acessando fatia de coluna especifica dataframe['canal_vendas'][:2] dataframe #Fatiando os dados usando o iloc (linhas / colunas) dataframe.iloc[3:,4:] #Fatiar os dados usando o loc (indice) dataframe.loc[:3] #Selecionando colunas especificas dataframe[['canal_vendas','acessos']] # Passando os valores atraves de listas filtro = ['canal_vendas', 'acessos'] dataframe[filtro] # Usando o metódo info() dataframe.info() # Completando os valores faltantes usando fillna # Pivotando os dados (coluna) aux = dataframe.pivot(index= 'canal_vendas', columns='site', values='acessos').fillna(0) dataframe.pivot(index= 'canal_vendas', columns='site', values='acessos').fillna(0) # Mudando as colunas usando o comando melt dataframe.melt(id_vars='site', value_vars=['canal_vendas']) # Resetando o índice do dataframe print(aux.columns) aux = aux.reset_index() print(aux.columns) # Exemplo do comando melt aux.melt(id_vars='canal_vendas', value_vars=['site1','site2','site3']) ###Output _____no_output_____
notebooks/cartpole-linear-model-nstep-bootstrap.ipynb
###Markdown Table Of Contents- [N-Step Q-learning](N-Step-Q-learning)- [N-Step SARSA](N-Step-SARSA)- [N-Step Expected-SARSA](N-Step-Expected-SARSA) N-Step Q-learning ###Code import gym from skgym.value_functions import GenericQ from skgym.policies import ValuePolicy from skgym.algorithms import NStepQLearning from sklearn.linear_model import SGDRegressor from sklearn.preprocessing import FunctionTransformer # the Gym environment env = gym.make('CartPole-v0') # define sklearn model for approximating Q-function regressor = SGDRegressor(eta0=0.5, learning_rate='constant') transformer = FunctionTransformer( lambda x: np.hstack((x, x ** 2)), validate=False) # define Q, its induced policy and update algorithm Q = GenericQ(env, regressor, transformer) policy = ValuePolicy(Q) algo = NStepQLearning(Q, n=20, gamma=0.8) # number of iterations num_episodes = 200 max_episode_steps = env._max_episode_steps # used for early stopping num_consecutive_successes = 0 for episode in range(1, num_episodes + 1): last_episode = episode == num_episodes or num_consecutive_successes == 9 # init s = env.reset() # amount of random exploration if last_episode: epsilon = 0 env.render() elif episode < 10: epsilon = 0.5 else: epsilon = 0.01 for t in range(1, max_episode_steps + 1): a = policy.epsilon_greedy(s, epsilon) s_next, r, done, info = env.step(a) # update or render if not last_episode: algo.update(s, a, r, s_next, done) else: env.render() # keep track of consecutive successes if done: if t == max_episode_steps: num_consecutive_successes += 1 print(f"num_consecutive_successes = {num_consecutive_successes}") else: num_consecutive_successes = 0 print(f"failed after {t} steps") break # prepare for next step s = s_next if last_episode: break env.close() ###Output failed after 23 steps failed after 14 steps failed after 15 steps failed after 15 steps failed after 14 steps failed after 10 steps failed after 25 steps failed after 10 steps failed after 9 steps ###Markdown N-Step SARSA ###Code import gym from skgym.value_functions import GenericQ from skgym.policies import ValuePolicy from skgym.algorithms import NStepSarsa from sklearn.linear_model import SGDRegressor from sklearn.preprocessing import FunctionTransformer # the Gym environment env = gym.make('CartPole-v0') # define sklearn model for approximating Q-function regressor = SGDRegressor(eta0=0.5, learning_rate='constant') transformer = FunctionTransformer( lambda x: np.hstack((x, x ** 2)), validate=False) # define Q, its induced policy and update algorithm Q = GenericQ(env, regressor, transformer) policy = ValuePolicy(Q) algo = NStepSarsa(Q, n=20, gamma=0.8) # number of iterations num_episodes = 200 max_episode_steps = env._max_episode_steps # used for early stopping num_consecutive_successes = 0 for episode in range(1, num_episodes + 1): last_episode = episode == num_episodes or num_consecutive_successes == 9 # init s = env.reset() a = policy.random() # amount of random exploration if last_episode: epsilon = 0 env.render() elif episode < 10: epsilon = 0.5 else: epsilon = 0.01 for t in range(1, max_episode_steps + 1): s_next, r, done, info = env.step(a) a_next = policy.epsilon_greedy(s_next, epsilon) # update or render if not last_episode: algo.update(s, a, r, s_next, a_next, done) else: env.render() # keep track of consecutive successes if done: if t == max_episode_steps: num_consecutive_successes += 1 print(f"num_consecutive_successes = {num_consecutive_successes}") else: num_consecutive_successes = 0 print(f"failed after {t} steps") break # prepare for next step s, a = s_next, a_next if last_episode: break env.close() ###Output /home/kris/.local/lib/python3.6/site-packages/gym/envs/registration.py:14: PkgResourcesDeprecationWarning: Parameters to load are deprecated. Call .resolve and .require separately. result = entry_point.load(False) ###Markdown N-Step Expected-SARSA ###Code import gym from skgym.value_functions import GenericQ from skgym.policies import ValuePolicy from skgym.algorithms import NStepExpectedSarsa from sklearn.linear_model import SGDRegressor from sklearn.preprocessing import FunctionTransformer # the Gym environment env = gym.make('CartPole-v0') # define sklearn model for approximating Q-function regressor = SGDRegressor(eta0=0.5, learning_rate='constant') transformer = FunctionTransformer( lambda x: np.hstack((x, x ** 2)), validate=False) # define Q, its induced policy and update algorithm Q = GenericQ(env, regressor, transformer) policy = ValuePolicy(Q) algo = NStepExpectedSarsa(Q, policy, n=20, gamma=0.8) # number of iterations num_episodes = 200 max_episode_steps = env._max_episode_steps # used for early stopping num_consecutive_successes = 0 for episode in range(1, num_episodes + 1): last_episode = episode == num_episodes or num_consecutive_successes == 9 # init s = env.reset() # amount of random exploration if last_episode: epsilon = 0 env.render() elif episode < 10: epsilon = 0.5 else: epsilon = 0.01 for t in range(1, max_episode_steps + 1): a = policy.epsilon_greedy(s, epsilon) s_next, r, done, info = env.step(a) # update or render if not last_episode: algo.update(s, a, r, s_next, done) else: env.render() # keep track of consecutive successes if done: if t == max_episode_steps: num_consecutive_successes += 1 print(f"num_consecutive_successes = {num_consecutive_successes}") else: num_consecutive_successes = 0 print(f"failed after {t} steps") break # prepare for next step s = s_next if last_episode: break env.close() ###Output failed after 12 steps failed after 9 steps failed after 22 steps failed after 10 steps failed after 37 steps failed after 16 steps failed after 22 steps failed after 15 steps failed after 22 steps
docs/_static/demos/datasets/InteractionAnalysisAdvanced.ipynb
###Markdown Configure Spark ###Code conf = SparkConf().setMaster("local[*]") \ .setAppName("advancedZincInteractionDemo") sc = SparkContext(conf = conf) ###Output _____no_output_____ ###Markdown Read PDB in MMTF format ###Code path = "../../resources/mmtf_full_sample/" pdb = mmtfReader.read_sequence_file(path, sc) ###Output _____no_output_____ ###Markdown Use only representative structures ###Code seqId = 40 resolution = 2.0 pdb = pdb.filter(Pisces(seqId, resolution)) ###Output _____no_output_____ ###Markdown Extract proteins with Zn interactions ###Code finder = groupInteractionExtractor("ZN",3) interactions = finder.get_dataset(pdb).cache() ###Output _____no_output_____ ###Markdown List the top 10 residue types that interact with Zn ###Code interactions.printSchema() interactions.show(20) n = interactions.count() print(f"Number of interactions: {n}") ###Output root |-- structureId: string (nullable = false) |-- residue1: string (nullable = false) |-- atom1: string (nullable = false) |-- element1: string (nullable = false) |-- index1: integer (nullable = false) |-- residue2: string (nullable = false) |-- atom2: string (nullable = false) |-- element2: string (nullable = false) |-- index2: integer (nullable = false) |-- distance: float (nullable = false) +-----------+--------+-----+--------+------+--------+-----+--------+------+---------+ |structureId|residue1|atom1|element1|index1|residue2|atom2|element2|index2| distance| +-----------+--------+-----+--------+------+--------+-----+--------+------+---------+ | 1FN9| ZN| ZN| Zn| 730| CYS| SG| S| 50|2.3709755| | 1FN9| ZN| ZN| Zn| 730| CYS| SG| S| 53|2.3940797| | 1FN9| ZN| ZN| Zn| 730| HIS| NE2| N| 70|2.2196307| | 1FN9| ZN| ZN| Zn| 730| CYS| SG| S| 72|2.3465357| | 1FN9| ZN| ZN| Zn| 731| CYS| SG| S| 415|2.3747551| | 1FN9| ZN| ZN| Zn| 731| CYS| SG| S| 418|2.3680198| | 1FN9| ZN| ZN| Zn| 731| HIS| NE2| N| 435|2.1647959| | 1FN9| ZN| ZN| Zn| 731| CYS| SG| S| 437|2.3763454| | 1E4M| ZN| ZN| Zn| 519| HIS| CE1| C| 53|2.9807622| | 1E4M| ZN| ZN| Zn| 519| HIS| NE2| N| 53| 2.040789| | 1E4M| ZN| ZN| Zn| 519| ASP| CG| C| 67| 2.754825| | 1E4M| ZN| ZN| Zn| 519| ASP| OD1| O| 67|2.8967845| | 1E4M| ZN| ZN| Zn| 519| ASP| OD2| O| 67|1.9672809| | 1BF6| ZN| ZN| Zn| 582| HIS| NE2| N| 10|2.2776458| | 1BF6| ZN| ZN| Zn| 582| HIS| NE2| N| 12|2.1644206| | 1BF6| ZN| ZN| Zn| 582| GLU| OE2| O| 123|2.3778422| | 1BF6| ZN| ZN| Zn| 582| ASP| OD1| O| 241| 2.41581| | 1BF6| ZN| ZN| Zn| 583| GLU| CD| C| 123|2.7811828| | 1BF6| ZN| ZN| Zn| 583| GLU| OE1| O| 123|2.1997967| | 1BF6| ZN| ZN| Zn| 583| HIS| ND1| N| 156|2.2733805| +-----------+--------+-----+--------+------+--------+-----+--------+------+---------+ only showing top 20 rows Number of interactions: 238 ###Markdown Show the top 10 interacting group/atom types Exclude Carbon Interactions ###Code topGroupsAndAtoms = interactions.filter("element2 != 'C'") \ .groupBy("residue2","atom2") \ .count() ###Output _____no_output_____ ###Markdown Add column with frequency of occurence Filter out occurrences < 1% Sort descending ###Code topGroupsAndAtoms.withColumn("frequency", topGroupsAndAtoms["count"] / n) \ .filter("frequency > 0.01") \ .sort("frequency", ascending = False) \ .show(20) ###Output +--------+-----+-----+--------------------+ |residue2|atom2|count| frequency| +--------+-----+-----+--------------------+ | CYS| SG| 43| 0.18067226890756302| | HOH| O| 37| 0.15546218487394958| | HIS| NE2| 30| 0.12605042016806722| | HIS| ND1| 24| 0.10084033613445378| | ASP| OD2| 11|0.046218487394957986| | GLU| OE1| 11|0.046218487394957986| | GLU| OE2| 11|0.046218487394957986| | ASP| OD1| 9|0.037815126050420166| | ACT| O| 4| 0.01680672268907563| | ACT| OXT| 4| 0.01680672268907563| +--------+-----+-----+--------------------+ ###Markdown Print the top interacting elements Exclude carbon interactions and group by element 2 ###Code topElements = interactions.filter("element2 != 'C'") \ .groupBy("element2") \ .count() ###Output _____no_output_____ ###Markdown Add column with frequencey of occurence Filter out occurence < 1% sort decending ###Code topElements.withColumn("frequency", topElements["count"] / n) \ .filter("frequency > 0.01") \ .sort("frequency", ascending = False) \ .show(10) interactions.groupBy("element2") \ .avg("distance") \ .sort("avg(distance)") \ .show(10) ###Output +--------+------------------+ |element2| avg(distance)| +--------+------------------+ | N| 2.247671846832548| | Cl|2.3399999141693115| | O| 2.340171109189044| | S|2.3423283100128174| | C| 2.727002328092402| | H|2.8938498497009277| +--------+------------------+ ###Markdown Aggregate multiple statistics NOTE: from pyspark.sql.functions import * required ###Code interactions.groupBy("element2") \ .agg(count("distance"), avg("distance"), min("distance"), max("distance"), kurtosis("distance")) \ .show(10) ###Output +--------+---------------+------------------+-------------+-------------+-------------------+ |element2|count(distance)| avg(distance)|min(distance)|max(distance)| kurtosis(distance)| +--------+---------------+------------------+-------------+-------------+-------------------+ | O| 91| 2.340171109189044| 1.8502038| 2.9841056|-0.5095228492389405| | C| 44| 2.727002328092402| 1.8144855| 2.9990435| 2.050274417960135| | N| 56| 2.247671846832548| 1.9923105| 2.9953997| 2.470076287060217| | Cl| 1|2.3399999141693115| 2.34| 2.34| NaN| | S| 43|2.3423283100128174| 2.2196188| 2.4604716| 0.3902514824014989| | H| 3|2.8938498497009277| 2.844304| 2.979628|-1.4999999999999993| +--------+---------------+------------------+-------------+-------------+-------------------+ ###Markdown Terminate Spark ###Code sc.stop() ###Output _____no_output_____ ###Markdown Advanced Zinc Interaction Analysis Example Imports ###Code from pyspark import SparkConf, SparkContext from pyspark.sql.functions import * from mmtfPyspark.datasets import groupInteractionExtractor from mmtfPyspark.io import mmtfReader from mmtfPyspark.webfilters import Pisces ###Output _____no_output_____
12_DataScience2021.ipynb
###Markdown Osteoarthritis-Analysis with Deep Learning Transfer-LearningNikolas Wilhelm fastai:https://www.fast.ai/Pytorch:https://pytorch.org/Knee Osteoarthritis Severity Grading Dataset:http://dx.doi.org/10.17632/56rmx5bjcr.1 Import relevant packagesto install: `conda install -c fastai -c pytorch -c anaconda fastai gh anaconda` ###Code from fastai.vision.all import * # The complete Fastai vision library from fastai.metrics import accuracy # For Determining the accuracy import fastai fastai.__version__ ###Output _____no_output_____ ###Markdown 1. Get the DatasetTherefore we first have to download and extract the dataset: (size 6.7GB)http://dx.doi.org/10.17632/56rmx5bjcr.1 ###Code # ToDo: Edit your personal path here! my_path = 'C:/Users/Niko/Documents/data/KneeXrayData/ClsKLData/kneeKL224' ###Output _____no_output_____ ###Markdown Further we need the docu open in order to look up handy functions:https://docs.fast.ai/ 2. Get the Dataset into a DataBlockA DataBlock is a structure which knows how to:* get the items -> get_items = ?* get the labels -> get_y = ?* divides between training and validation data -> splitter = ?* fites all items on the same scale -> item_tfms = ?* applies augmentation during training -> batch_tfms = ?```python The general structure we want to fit all our data in:dblock = DataBlock(blocks = (ImageBlock, CategoryBlock), get_items = ???, get_y = ???, splitter = ???, item_tfms = ???, batch_tfms = ???, ) Create the dataloader from the DataBlockdls = dblock.dataloaders(path=my_path)``` 2.1 How do we get the items?fastai provides us with a function for that: ```python get_image_files(path:str)``` ###Code # simply get a list of all filenames, which are images within this repository: fnames = get_image_files(my_path) fnames ###Output _____no_output_____ ###Markdown 2.2 How do we get a label from this list of items?```Path('C:/Users/Niko/Documents/data/KneeXrayData/ClsKLData/kneeKL224/auto_test/0/9003175_1.png')```We have to define a function, which assigns a label to each image-> The name of the label is already contained within the path string! ###Code def my_label_func(filename): """simply get the name of the parent directory""" return Path(filename).parent.name # immedeately test: my_label_func(fnames[0]) ###Output _____no_output_____ ###Markdown 2.3 Next step is to split the data into training and validation data```Path('C:/Users/Niko/Documents/data/KneeXrayData/ClsKLData/kneeKL224/auto_test/0/9003175_1.png')```https://docs.fast.ai/data.transforms.htmlGrandparentSplitter ###Code #?? GrandparentSplitter my_dataset_splitter = GrandparentSplitter(train_name='train', valid_name='val') ###Output _____no_output_____ ###Markdown 2.4 Will all our images have the same size? If not we can resize all data in order to be uniform ###Code my_item_tfms = Resize(size=224) # ?? Resize ###Output _____no_output_____ ###Markdown 2.5 Finally we want to apply proper data augmentationfastai provides us with the very handy function:```pythonaug_transforms(size=???)```` - Mirror horizontally (left/right knee)- rotate for +/- 10 degree- scale for 0.9 to 1.1 ###Code my_aug_transforms = aug_transforms(size=224) #?? aug_transforms ###Output _____no_output_____ ###Markdown Now let's get everything together: ###Code # Define the DataBlock dblock = DataBlock(blocks = (ImageBlock, CategoryBlock), get_items = get_image_files, # 2.1 get_y = my_label_func, # 2.2 splitter = my_dataset_splitter, # 2.3 item_tfms = my_item_tfms, # 2.4 batch_tfms = my_aug_transforms, # 2.5 ) # Define the dataloader dls = dblock.dataloaders(my_path, # define the source bs=64, # adjust if network is too large num_workers=0, # only for windows -> slow ) # Don't forget to always look at your result to verify! dls.show_batch() #?? dblock.dataloaders ###Output _____no_output_____ ###Markdown 3. Let's get our neural Network!: For image data: Convolutional Neural Network! ###Code learn = cnn_learner(dls, models.resnet50, metrics=accuracy) #?? cnn_learner ###Output _____no_output_____ ###Markdown Was enthält dieser "Learner" alles?? ###Code learn.loss_func # learn.summary() ###Output _____no_output_____ ###Markdown 4. Training!! -> We apply transfer learning, so our network is already trained on a different dataset and will now specialise on the new one. ###Code # ?? learn.fit_one_cycle learn.fit_one_cycle(4) # train for 4 cycles learn.save('./trial1') # save the result learn = learn.load('./trial1') learn.show_results(figsize=(7,7)) interp = ClassificationInterpretation.from_learner(learn) ###Output _____no_output_____ ###Markdown Show confusion matrix and classification report ###Code interp.plot_confusion_matrix(figsize=(7,7)) interp.print_classification_report() ###Output precision recall f1-score support 0 0.54 0.91 0.68 328 1 0.19 0.02 0.04 153 2 0.56 0.44 0.49 212 3 0.61 0.51 0.56 106 4 0.67 0.22 0.33 27 accuracy 0.55 826 macro avg 0.51 0.42 0.42 826 weighted avg 0.50 0.55 0.49 826 ###Markdown 5. Can we improve ?? ###Code learn.lr_find() learn.fine_tune(40, 1e-4) learn.save('./trial2') learn.load('./trial2'); interp = ClassificationInterpretation.from_learner(learn) interp.plot_confusion_matrix(figsize=(7,7)) interp.print_classification_report() ###Output _____no_output_____
Implementations/knn/.ipynb_checkpoints/Kernel Regression - Redacted-checkpoint.ipynb
###Markdown Motivation Similar to K-nearest neighbours, kernel regression makes the assumption that if two examples have similar features, then they're likely to have similar targets/labels as well. Whilst K-nearest neighbours averages the targets of the K nearest examples, in kernel regression we account for the targets of all points in the training set, weighting the contribution of each example based on how close it is to the example we're trying to predict for.For example, let's say we were trying to make predictions based off a single feature and the example we were trying to make a prediction for had the feature $x^* = 10$. Suppose that for each example in our training set we computed the distance $d_i = |x_i - 10|, i = 1,..., n$ and we found that the two closest examples in the training set were $(x = 9, y = 20), (x = 21, y = 45)$.Under K-nearest neighbours, we would weight the contributions of these two examples equally when making our prediction (unless K=1). Conversely, using kernel regression we would say that since the point with $x = 9$ is much more relevant to the example we're trying to predict because the feature value is much closer to $x^* = 10$ and so it should be accorded more importance when making our prediction. How exactly do we weight each example?Writing what we discussed above slightly more formally, if we have a training dataset $D = (x_1, y_1), ..., (x_n, y_n)$, then for a new example with features $x^*$, we predict the associated label to be:$$y^* = \frac{\sum_{i=1}^n \omega_i \times y_i}{\sum_{i=1}^n \omega_i}$$Where $\omega_i$ is a function of $||x_i - x^*||$, the distance between the feature vector for the $i^{th}$ training example and the feature vector of our new example. We want $\omega_i$ to follow two rules:1. As $||x_i - x^*||$ increases, $\omega_i$ should monotonically decrease (the further away a point is, the less weight it should have)2. $\omega_i > 0, i = 1,...,n$ - we can't have zero or negative weightsThere are lots of different functions we could use to meet that fulfil these criteria but the most widely used one is known as the Gaussian kernel, where we set:$$\omega_i = k(x_i, x^*) = \text{exp}(\frac{-||x_i - x^*||^2}{2h^2})$$In the 1-dimensional case, this function has the same squared exponential term as a gaussian distribution and so we can think of the weights following that bell-shaped pattern, centered around $x^*$. Note that the weights associated with each training example change as we make predictions for different inputs.Finally, you might be wondering about the $h$ in denominator of the exponential term - this is a hyperparameter of our model called the 'bandwidth' which governs exactly how much we penalise terms from being far away from $x^*$. If $h$ is very large, then the model will prioritise points close to $x^*$ to a much greater degree than it would if $h$ was small. At the end of this notebook we take a look at what happens if we tweak the value of $h$ and discuss how we can pick the best value. Data Generation ###Code #Import modules import numpy as np import random import pandas as pd import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split ###Output _____no_output_____ ###Markdown As with K-nearest neighbours, this kind of kernel regression works exactly the same for a single feature dataset as it does for a higher-dimensional one so we'll use a single feature dataset for easier visualisationWe're going to use the same dataset for this exercise as we did for the K-nearest neighbours implementation. If you aren't familiar with K-nearest neighbours yet, we'd recommend tackling those notebooks before moving onto this one! ###Code n = 200 #Number of observations X = np.random.uniform(0,5,n) Y = np.array([x*np.sin(x**2) for x in X]) + np.random.normal(0,1,n) data = pd.DataFrame({'X':X, 'Y':Y}) plt.figure(figsize = (10,6)) plt.scatter(X,Y) plt.show() class kernelRegression: def __init__(self, data, target, features, trainTestRatio = 0.9): self.target = target self.features = features #Split up data into a training and testing set self.train, self.test = train_test_split(data, test_size=1-trainTestRatio) #There's no fitting process for kernel regression, per se. To make predictions we simply examine the training set #and find the closest examples we can def kernelFunction(self, x, h): def predictSingleExample(self, x, h = 0.1): #Apply kernel smoother to get the weight of each training example weights = #... #Return weighted, normalised sum return #... def predict(self, X = None, h = 0.1): #Predict the values for a dataframe of values (in the same format as the training set) #Returns a list of the predicted values #if no X provided, predict values of the test set if X is None: X = self.test return [self.predictSingleExample(x, h) for idx, x in X.iterrows()] ###Output _____no_output_____ ###Markdown Now let's predict on the test set ###Code myKR = kernelRegression(data, 'Y', ['X']) myKR.test['Pred'] = myKR.predict(h = 0.05) ###Output _____no_output_____ ###Markdown Plot the results ###Code f = plt.figure(figsize=(15,6)) ax = f.add_subplot(121) ax2 = f.add_subplot(122) ax.scatter(myKR.test['X'], myKR.test['Y'] - myKR.test['Pred']) ax.set_xlabel('X') ax.set_ylabel('Y - Pred') ax.set_xlim([0, 5]) ax.set_ylim([-5,5]) ax2.scatter(myKR.test['Y'], myKR.test['Pred'], label = 'True values vs Predicted Values') ax2.plot(np.arange(-5,5,0.1), np.arange(-5,5,0.1), color = 'green', label = 'Line y = x') ax2.set_xlim([-5, 5]) ax2.set_ylim([-5,5]) ax2.set_xlabel('True Label') ax2.set_ylabel('Predicted Label') ax2.legend() plt.show() ###Output _____no_output_____ ###Markdown The left plot shows the residuals plotted against out input feature, X. We can see that overall, the residuals are scattered about 0, indicating that in general the model did a decent job of capturing the relationship between the feature and target. The residuals exhibit slighly more variance when $x > 3$, but this is not particularly surpising as the target oscillates for large $x$.If we examine the plot on the right, we can see that the scatter plot of the true vs predicted values generally adheres to the line line $y = x$, indicating the model is performing as we would want. Examining the effect of changing hh is a hyperparameter we can tune and should be thought of as a regulariser. The larger the value of k, the more training examples we use in the prediction and therefore the model will be more robust to slight perturbations as we increase k.Let's plot the regression line for a variety of different values of h ###Code plt.figure(figsize = (10,6)) dataRegLine = pd.DataFrame({'X':np.linspace(0,5,100)}) for h in [0.01, 0.02, 0.05, 0.1, 0.3, 1, 2]: dataRegLine[f'Pred{h}'] = myKR.predict(X = dataRegLine[['X']], h = h) #Obtain predictions plt.plot(dataRegLine['X'], dataRegLine[f'Pred{h}'], label = f'Bandwidth: {h}') #Plot regression line plt.legend() plt.scatter(myKR.train['X'], myKR.train['Y']) plt.show() ###Output _____no_output_____
keras-cnn-dog-or-cat-classification.ipynb
###Markdown Import Library ###Code import numpy as np import pandas as pd from keras.preprocessing.image import load_img, image from keras.models import Sequential import os ###Output Using TensorFlow backend. ###Markdown Define Constants ###Code IMAGE_WIDTH=128 IMAGE_HEIGHT=128 IMAGE_SIZE=(IMAGE_WIDTH, IMAGE_HEIGHT) IMAGE_CHANNELS=3 ###Output _____no_output_____ ###Markdown Define Model Architecture ###Code from keras.layers import Conv2D, MaxPooling2D, Dropout, Flatten, Dense, Activation, BatchNormalization def createModel(): model = Sequential() model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(IMAGE_WIDTH, IMAGE_HEIGHT, IMAGE_CHANNELS))) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Conv2D(64, (3, 3), activation='relu')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Conv2D(128, (3, 3), activation='relu')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(512, activation='relu')) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.add(Dense(2, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy']) return model from keras.models import load_model model = createModel() model.load_weights('model.h5') def load(filename: str): img = image.load_img(filename, target_size = IMAGE_SIZE) img = image.img_to_array(img) img = np.expand_dims(img, axis = 0) return img img = load('dog.jpg') result = model.predict(img) if result[0][0] == 1: print('That is a cat') else: print('That is a dog') ###Output WARNING:tensorflow:From /Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/keras/backend/tensorflow_backend.py:422: The name tf.global_variables is deprecated. Please use tf.compat.v1.global_variables instead. That is a dog ###Markdown Convert to .mlmodel ###Code import coremltools new_model = coremltools.converters.keras.convert(model, input_names="image", image_input_names="image", image_scale=1/255.0) ###Output WARNING:root:Keras version 2.3.1 detected. Last version known to be fully compatible of Keras is 2.2.4 . ###Markdown Save new ML model ###Code new_model.save('cat_dog.mlmodel') ###Output _____no_output_____
doc/rtd/nb_timing.ipynb
###Markdown Gammas with Timing Information ###Code ### initializations and import libraries import numpy as np import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec %matplotlib inline %pylab inline from CGMFtk import histories as fh ### rcParams are the default parameters for matplotlib import matplotlib as mpl print ("Matplotbib Version: ", mpl.__version__) mpl.rcParams['font.size'] = 18 mpl.rcParams['font.family'] = 'Helvetica', 'serif' #mpl.rcParams['font.color'] = 'darkred' mpl.rcParams['font.weight'] = 'normal' mpl.rcParams['axes.labelsize'] = 18. mpl.rcParams['xtick.labelsize'] = 18. mpl.rcParams['ytick.labelsize'] = 18. mpl.rcParams['lines.linewidth'] = 2. font = {'family' : 'serif', 'color' : 'darkred', 'weight' : 'normal', 'size' : 18, } mpl.rcParams['xtick.major.pad']='10' mpl.rcParams['ytick.major.pad']='10' mpl.rcParams['image.cmap'] = 'inferno' ###Output Matplotbib Version: 3.1.3 ###Markdown This time, **CGMF** was run with the option `-t -1` to record all of the $\gamma$-ray timing information. **CGMFtk** can read these files as well, based on the information that appears in the first line of the file. ###Code hist = fh.Histories('98252sf_timing.cgmf') ###Output 98252sf_timing.cgmf ###Markdown The timing information can easily be recovered. The prompt $\gamma$s have a time of 0. ###Code gammaAges = hist.getGammaAges() print (gammaAges[:10]) nug = hist.getNug() print (nug[:10]) ###Output [list([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.89589e-09]) list([0.0, 0.0, 1.42003e-09, 1.42041e-09]) list([0.0, 7.42295e-12, 7.34788e-11, 1.94532e-09]) list([0.0, 0.0]) list([0.0, 0.0, 0.0, 0.0]) list([0.0]) list([0.0]) list([0.0, 1.17539e-12, 1.54653e-07, 1.54653e-07]) list([0.0, 0.0, 0.0, 0.0, 0.0]) list([])] [7 4 4 2 4 1 1 4 5 0] ###Markdown The nubargtot function can also be used to construct the average gamma-ray multiplicity per fission event as a function of time. In the call to nubarg() or nubargtot(), timeWindow=True should be included which uses the default timings provided in the function (otherwise, passing a numpy array or list of times to timeWindow will use those times). Optionally, a minimum gamma-ray energy cut-off can also be included, Eth. ###Code times,nubargTime = hist.nubarg(timeWindow=True) # include timeWindow as a boolean or list of times (in seconds) to activate this feature fig = plt.figure(figsize=(8,6)) plt.plot(times,nubargTime,'o',label='Eth=0. MeV') times,nubargTime = hist.nubarg(timeWindow=True,Eth=0.1) plt.plot(times,nubargTime,'o',label='Eth=0.1 MeV') plt.xlabel('Time since fission (s)') plt.ylabel(r'Averge $\gamma$-ray multiplicity') plt.xscale('log') plt.legend() plt.show() ###Output _____no_output_____ ###Markdown The prompt fission gamma-ray spectrum function, pfgs(), can also be used to calculate this quantity within a certain time window since the fission event. The time window is defined using minTime and maxTime to set the lower and upper boundaries. ###Code fig = plt.figure(figsize=(8,6)) bE,pfgsTest = hist.pfgs(minTime=5e-8,maxTime=500e-8) plt.step(bE,pfgsTest,label='Time window') bE,pfgsTest = hist.pfgs() plt.step(bE,pfgsTest,label='All events') plt.yscale('log') plt.xlim(0,2) plt.ylim(0.1,100) plt.xlabel('Gamma-ray Energy (MeV)') plt.ylabel('Prompt Fission Gamma Spectrum') plt.legend() plt.show() ###Output _____no_output_____ ###Markdown The multiplicity as a function of time can be calculated using the `gammaMultiplicity()` function. The smoothness of the resulting curve will depend on the number of events included in the history file. Here, we only have 500,000 events, which is not quite enough for this type of information.The fission fragment of interest has to be specified (here A=134, Z=52), and a timing window, in seconds, is given from the minTime to the maxTime (here 10 ns to 1000 ns). ###Code # calculate the gamma-ray multiplicity as a function of time since fission for a specific fission fragment times,gMultiplicity = hist.gammaMultiplicity(minTime=1e-8,maxTime=1e-6,Afragment=134,Zfragment=52) # also compare to an exponential decay with the half life of the state f = np.exp(-times*np.log(2)/1.641e-7) # the half life of 134Te is 164.1 ns norm = gMultiplicity[0]/f[0] fig = plt.figure(figsize=(8,6)) plt.plot(times*1e9,gMultiplicity/norm,'k-',label='CGMF') plt.plot(times*1e9,f,'r--',label=r'exp($-t\cdot$log(2)/$\tau_{1/2}$)') plt.legend() plt.yscale('log') plt.xlabel('Time since fission (ns)') plt.ylabel(r'N$_\gamma$(t) (arb. units)') plt.show() ###Output _____no_output_____ ###Markdown Isomeric ratios can also be calculated with **CGMFtk** with the `isomericRatio()` function. Again, the fission fragment needs to be specified, along with the spin of the ground state (`Jgs`) and the spin of the isomeric state (`Jm`). In addition, a threshold time, in seconds, is defined. This time should be significantly shorter than the lifetime of the state, and one should make sure to check various thershold times to ensure the isomeric ratio is independent of the choice of threshold time. ###Code # calculate the isomeric ratios for specific states in nuclei # e.g. isomeric ratio for the 1/2- state in 99Nb, ground state is 9/2+, lifetime is 150 s r = hist.isomericRatio(thresholdTime=1,A=99,Z=41,Jm=0.5,Jgs=4.5) print ('99Nb:',round(r,2)) # e.g. isomeric ratio for the 11/2- state in 133Te, ground state is 3/2+, lifetime is 917.4 s r = hist.isomericRatio(thresholdTime=1,A=133,Z=52,Jm=5.5,Jgs=1.5) print ('133Te:',round(r,2)) ###Output 99Nb: 0.91 133Te: 0.32
2_4_overfitting_underfitting/Tutorial - Regularisierung.ipynb
###Markdown 1. VorbereitungUm das Risiko einer Überanpassung zu verhindern, kann die lineare/polynomiale Regression regularisiert werden. Dazu wird der Verlustfunktion ein zusätzlicher Regularisierungsterm hinzugefügt, der dafür sorgt, dass Koeffizienten kleiner Magnitude gegenüber Koeffizienten großer Magnitude bevorzugt werden.Scikit-Learn stellt die lineare Regression mit Regularisierung in den Klassen `Ridge`, `ElasticNet` und `Lasso` zur Verfügung. ###Code import numpy as np import matplotlib.pyplot as plt # Ridge = L2 # Lasso = L1 # ElasticNet = L1 + L2 # Diese Modelle sind LinearRegression mit Regularisierung from sklearn.linear_model import Ridge, Lasso, ElasticNet, LinearRegression from sklearn.pipeline import make_pipeline from sklearn.preprocessing import PolynomialFeatures, RobustScaler from utils_overfitting import generate_data # TODO: Hilfsfunktion zum Erstellen eines Modells mit Regularisierung und polynomischer Expansion def get_polynomial_regression(alpha=1.0, degree=2): model = make_pipeline( PolynomialFeatures(degree=degree, include_bias=False), RobustScaler(), Ridge(alpha=alpha) ) return model def get_polynomial_regression_l1(alpha=1.0, degree=2): model = make_pipeline( RobustScaler(), PolynomialFeatures(degree=degree, include_bias=False), RobustScaler(), Lasso(alpha=alpha) ) return model # TODO: Visualisierungsfunktion def visualize_fit(model, color=None): x_viz = np.linspace(0, 40, 100).reshape(-1, 1) plt.plot(x_viz, model.predict(x_viz), color=color) n_samples = 10 alpha = 100.0 degree = 10 X_train, y_train = generate_data(n_samples=n_samples, random_state=2) X_test, y_test = generate_data(n_samples=10, random_state=5) model = get_polynomial_regression_l1(alpha=alpha, degree=degree) model.fit(X_train, y_train) plt.figure(figsize=(10, 8)) plt.scatter(X_train, y_train) plt.scatter(X_test, y_test, color="orange") plt.ylim(-5, 100) visualize_fit(model, color="red") with np.printoptions(suppress=True): print(model[-1].coef_) # Wähle einen relativ hohen Grad der polynomischen Expansion degree = 10 # TODO: definiere `alpha`, je größer `alpha`, desto stärker die Regularisierung alphas = [0.01, 1.0, 10.0] # TODO: Ridge Regression ist Lineare Regression mit L2-Regularisierung (Weight Decay) ridge_models = None # TODO: Lasso Regression ist Lineare Regression mit L1-Regularisierung lasso_models = None ###Output _____no_output_____ ###Markdown 2. Modelle trainieren ###Code # TODO: Daten generieren x_train, y_train = None, None # TODO: Modelle fitten ###Output _____no_output_____ ###Markdown 3. Visualisierung 3.1 Visualisierung der Ridge Modelle ###Code # TODO: Scatterplot # TODO: Fits Visualisieren # TODO: Koeffizienten untersuchen ###Output _____no_output_____ ###Markdown 3.2 Visualisierung der Lasso Modelle ###Code # TODO: Scatterplot # TODO: Fits Visualisieren # TODO: Koeffizienten untersuchen ###Output _____no_output_____
S03 T02 Exercici 4.ipynb
###Markdown Nivell 2Treballem els conceptes de l'estructura d'una matriu, Broadcasting, indexació, Mask..- Exercici 4Mostreu-me amb exemples de diferents matrius, la regla fonamental de Broadcasting que diu : "les matrius es poden transmetre / broadcast si les seves dimensions coincideixen o si una de les matrius té una mida d'1". ###Code import numpy as np a = np.array(42) b = np.array([1, 2, 3, 4, 5]) c = np.array([[1, 2, 3], [4, 5, 6]]) d = np.array([[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]]) ###Output _____no_output_____ ###Markdown Para esto me he revisado la información que hay en la documentación oficial de numpy:https://numpy.org/doc/stable/user/theory.broadcasting.htmlarray-broadcasting-in-numpyBásicamente el broadcasting es el sistema de que cuando tú le haces hacer una operación de dos arrays entre sí, qué hace numpy con su vida. En especial cuando no tienen la misma forma.La forma más sencilla es que mira, que si le mandas a operar y el array tiene dimensión 0 pues todos por ese número y ya está. También que tengan la misma forma. Por ejemplo: ###Code j = b * a j ###Output _____no_output_____ ###Markdown o que tengan exactamente la misma forma: ###Code k = c * c k ###Output _____no_output_____ ###Markdown (que curiosa forma de poner números cuadrados) ahora bien, suponte que tenemos dos matrices de dimensión 1. Si las sumamos o multiplicamos ocurrirá lo siguiente: ###Code b = np.array([1, 2, 3, 4, 5]) e = np.array([1, 2, 3, 4, 5, 6]) b+e ###Output _____no_output_____ ###Markdown Nos da error porque la forma no coincide, no sabe con qué sumar el sexto número, y no se lo va a inventar numpy. Para hacer esa suma tendríamos que girar el primer array (o el segundo y entonces sumarlos. Y claro, al sumarlos lo que hará es estirar los valores de uno y otro lado, combinándolos y creando una matriz de 5x6 (o de 6x5)En el ejemplo de la documentación de numpy ###Code b = np.array([1, 2, 3, 4, 5]) e = np.array([1, 2, 3, 4, 5, 6]) k = b.reshape(5,1) k + e ###Output _____no_output_____
L03-015-Notebook-Canny_edge_detection.ipynb
###Markdown Implement Canny edge detection ###Code # Try Canny using "wide" and "tight" thresholds wide = cv2.Canny(gray, 30, 100) tight = cv2.Canny(gray, 200, 240) # Display the images f, (ax1, ax2) = plt.subplots(1, 2, figsize=(20,10)) ax1.set_title('wide') ax1.imshow(wide, cmap='gray') ax2.set_title('tight') ax2.imshow(tight, cmap='gray') ###Output _____no_output_____ ###Markdown TODO: Try to find the edges of this flowerSet a small enough threshold to isolate the boundary of the flower. ###Code # Read in the image image = cv2.imread('images/sunflower.jpg') # Change color to RGB (from BGR) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) plt.imshow(image) # Convert the image to grayscale gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) ## TODO: Define lower and upper thresholds for hysteresis # right now the threshold is so small and low that it will pick up a lot of noise lower = 100 upper = 240 edges_wide = cv2.Canny(gray, lower, upper) lower = 120 upper = 240 edges_intermediate = cv2.Canny(gray, lower, upper) lower = 200 upper = 240 edges_narrow = cv2.Canny(gray, lower, upper) #plt.figure(figsize=(20,10)) f, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(30, 20)) ax1.set_title("Wide") ax1.imshow(edges_wide, cmap='gray') ax2.set_title("Intermediate") ax2.imshow(edges_intermediate, cmap='gray') ax3.set_title("Narrow") ax3.imshow(edges_narrow, cmap='gray') ###Output _____no_output_____
docs/contents/tools/classes/mmtf_MMTFDecoder/is_mmtf_MMTFDecoder.ipynb
###Markdown Is mmtf.MMTFDecoder ###Code from molsysmt.tools import mmtf_MMTFDecoder #mmtf_MMTFDecoder.is_mmtf_MMTFDecoder() ###Output _____no_output_____
04_iteration/04_exercises.ipynb
###Markdown **Note**: Click on "*Kernel*" > "*Restart Kernel and Run All*" in [JupyterLab](https://jupyterlab.readthedocs.io/en/stable/) *after* finishing the exercises to ensure that your solution runs top to bottom *without* any errors. If you cannot run this file on your machine, you may want to open it [in the cloud ](https://mybinder.org/v2/gh/webartifex/intro-to-python/develop?urlpath=lab/tree/04_iteration/04_exercises.ipynb). Chapter 4: Recursion & Looping (Coding Exercises) The exercises below assume that you have read the [third part ](https://nbviewer.jupyter.org/github/webartifex/intro-to-python/blob/develop/04_iteration/03_content.ipynb) of Chapter 4.The `...`'s in the code cells indicate where you need to fill in code snippets. The number of `...`'s within a code cell give you a rough idea of how many lines of code are needed to solve the task. You should not need to create any additional code cells for your final solution. However, you may want to use temporary code cells to try out some ideas. Throwing Dice In this exercise, you will model the throwing of dice within the context of a guessing game similar to the one shown in the "*Example: Guessing a Coin Toss*" section in [Chapter 4 ](https://nbviewer.jupyter.org/github/webartifex/intro-to-python/blob/develop/04_iteration/03_content.ipynbExample:-Guessing-a-Coin-Toss).As the game involves randomness, we import the [random ](https://docs.python.org/3/library/random.html) module from the [standard library ](https://docs.python.org/3/library/index.html). To follow best practices, we set the random seed as well. ###Code import random random.seed(42) ###Output _____no_output_____ ###Markdown A die has six sides that we labeled with integers `1` to `6` in this exercise. For a fair die, the probability for each side is the same.**Q1**: Model a `fair_die` as a `list` object! ###Code fair_die = ... ###Output _____no_output_____ ###Markdown **Q2**: What function from the [random ](https://docs.python.org/3/library/random.html) module that we have seen already is useful for modeling a single throw of the `fair_die`? Write a simple expression (i.e., one function call) that draws one of the equally likely sides! Execute the cell a couple of times to "see" the probability distribution! ###Code ... ###Output _____no_output_____ ###Markdown Let's check if the `fair_die` is indeed fair. To do so, we create a little numerical experiment and throw the `fair_die` `100000` times. We track the six different outcomes in a `list` object called `throws` that we initialize with all `0`s for each outcome.**Q3**: Complete the `for`-loop below such that it runs `100000` times! In the body, use your answer to **Q2** to simulate a single throw of the `fair_die` and update the corresponding count in `throws`!Hints: You need to use the indexing operator `[]` and calculate an `index` in each iteration of the loop. Do do not actually need the target variable provided by the `for`-loop and may want to indicate that with an underscore `_`. ###Code throws = [0, 0, 0, 0, 0, 0] for ... in ...: ... ... ... throws ###Output _____no_output_____ ###Markdown `throws` contains the simulation results as absolute counts.**Q4**: Complete the `for`-loop below to convert the counts in `throws` to relative frequencies stored in a `list` called `frequencies`! Round the frequencies to three decimals with the built-in [round() ](https://docs.python.org/3/library/functions.htmlround) function!Hints: Initialize `frequencies` just as `throws` above. How many iterations does the `for`-loop have? `6` or `100000`? You may want to obtain an `index` variable with the [enumerate() ](https://docs.python.org/3/library/functions.htmlenumerate) built-in. ###Code frequencies = [0, 0, 0, 0, 0, 0] for ... in ...: ... frequencies ###Output _____no_output_____ ###Markdown **Q5**: How could we adapt the `list` object used above to model an `unfair_die` where `1` is as likely as `2`, `2` is twice as likely as `3`, and `3` is twice as likely as `4`, `5`, or `6`, who are all equally likely? ###Code unfair_die = ... ###Output _____no_output_____ ###Markdown **Q6**: Copy your solution to **Q2** for the `unfair_die`! Execute the cell a couple of times to "see" the probability distribution! ###Code ... ###Output _____no_output_____ ###Markdown **Q7**: Copy and adapt your solutions to **Q3** and **Q4** to calculate the `frequencies` for the `unfair_die`! ###Code throws = [0, 0, 0, 0, 0, 0] frequencies = [0, 0, 0, 0, 0, 0] for ... in ...: ... ... ... for ... in ...: ... frequencies ###Output _____no_output_____ ###Markdown **Q8**: The built-in [input() ](https://docs.python.org/3/library/functions.htmlinput) allows us to ask the user to enter a `guess`. What is the data type of the object returned by [input() ](https://docs.python.org/3/library/functions.htmlinput)? Assume the user enters the `guess` as a number (i.e., "1", "2", ...) and not as a text (e.g., "one"). ###Code guess = input("Guess the side of the die: ") guess ... ###Output _____no_output_____ ###Markdown **Q9**: Use a built-in constructor to cast `guess` as an `int` object!Hint: Simply wrap `guess` or `input("Guess the side of the die: ")` with the constructor you choose. ###Code ... ###Output _____no_output_____ ###Markdown **Q10**: What type of error is raised if `guess` cannot be cast as an `int` object? **Q11**: Write a `try` statement that catches the type of error (i.e., your answer to **Q10**) raised if the user's input cannot be cast as an `int` object! Print out some nice error message notifying the user of the bad input! ###Code try: ... except ...: ... ###Output _____no_output_____ ###Markdown **Q12**: Write a function `get_guess()` that takes a user's input and checks if it is a valid side of the die! The function should *return* either an `int` object between `1` and `6` or `None` if the user enters something invalid.Hints: You may want to re-use the `try` statement from **Q11**. Instead of printing out an error message, you can also `return` directly from the `except`-clause (i.e., early exit) with `None`. So, the user can make *two* kinds of input errors and maybe you want to model that with two *distinct* `return None` statements. Also, you may want to allow the user to enter leading and trailing whitespace that gets removed without an error message. ###Code def get_guess(): """Process the user's input. Returns: guess (int / NoneType): either 1, 2, 3, 4, 5 or 6 if the input can be parsed and None otherwise """ ... # Check if the user entered an integer. ... ... ... ... # Check if the user entered a valid side. ... ... ... ###Output _____no_output_____ ###Markdown **Q13** Test your function for all *three* cases! ###Code get_guess() ###Output _____no_output_____ ###Markdown **Q14**: Write an *indefinite* loop where in each iteration a `fair_die` is thrown and the user makes a guess! Print out an error message if the user does not enter something that can be understood as a number between `1` and `6`! The game should continue until the user makes a correct guess. ###Code ... ... ... ... ... ... ... ... ... ... ###Output _____no_output_____
docs/_build/doctrees/nbsphinx/examples/data.ipynb
###Markdown Simulating and Loading DataDamuta provides two classes for input data: `DataSet` and `SignatureSet`. `DataSet` ensures that a counts dataframe and sample annotation can be easily aligned via matching on sample ids. `SignatureSet` provides some simple methods for summarizing and understanding mutational signatures, as well for extracing damage and misrepair signatures from COSMIC-format signatures. Simulating Data We can simulate a dataset of mutation counts using the function `sim_parametric`. We will simulate `500` samples containing `10000` mutations each, with varying activities of `10` damage signatures, and `8` misrepair signatures. ###Code from damuta.sim import sim_parametric counts, params = sim_parametric(S=500, N=10000, n_damage_sigs=10, n_misrepair_sigs=8, seed=1992) print(counts.sum(1)) counts.head() ###Output simulated_sample_0 10000 simulated_sample_1 10000 simulated_sample_2 10000 simulated_sample_3 10000 simulated_sample_4 10000 ... simulated_sample_495 10000 simulated_sample_496 10000 simulated_sample_497 10000 simulated_sample_498 10000 simulated_sample_499 10000 Length: 500, dtype: int64 ###Markdown Next, lets make use of `DataSet` to organize our data for us. We will simulate some metadata annotate our 500 samples with. This is most applicable for pan-cancer data, and necessary when fitting Damuta's `HierarchicalTandemLda` model, but the annotation slot of the `DataSet` is also useful for holding clinical metadata about each sample. Note: Pan-cancer data is not required. All Damuta models can just as easily fit a dataset where all samples come from the same tissue type. The `DataSet` class at minimum acts as a container for a pandas DataFrame of mutation type counts. The metadata annotation is also be a pandas DataFrame, in [tidy format](https://en.wikipedia.org/wiki/Tidy_data) (ie. each row is a sample, each column is a feature). The DataFrame index of both the count data and annotation data is the sample id. In this example, we simulated the counts dataframe, but in principle any trinucleotide count data that can be loaded with `pd.read_csv` can be used. ###Code import numpy as np import pandas as pd from damuta import DataSet, SignatureSet # pick from 3 tissues tissues = np.array(["Breast-AdenoCA", "Kidney-RCC", "ColoRect-AdenoCA"]) # pick from primary or metastatic tumour types = np.array(['primary', 'metastatic']) # randomly assign tissue type to samples annotation = pd.DataFrame.from_dict({"tissue_type": tissues[np.random.choice(3,500)], "tumour_type": types[np.random.choice(2,500)] }) annotation = annotation.set_index(counts.index) annotation.head() ###Output _____no_output_____ ###Markdown Pair the counts and metadata with the `DataSet` class. ###Code simulated_data = DataSet(counts, annotation) print(f"simulated_data contains {simulated_data.n_samples} samples") print(simulated_data.ids[0:5]) ###Output simulated_data contains 500 samples ['simulated_sample_0', 'simulated_sample_1', 'simulated_sample_2', 'simulated_sample_3', 'simulated_sample_4'] ###Markdown Loading signature dataLastly, let's retrieve a set of mutational signatures from the [COSMIC database](https://cancer.sanger.ac.uk/signatures/). ###Code signatures = pd.read_csv("https://cancer.sanger.ac.uk/signatures/documents/452/COSMIC_v3.2_SBS_GRCh37.txt", sep='\t', index_col=0 , header=0) COSMIC = SignatureSet(signatures.T) print(f"COSMIC contains {COSMIC.n_sigs} signatures") ###Output COSMIC contains 78 signatures ###Markdown Every COSMIC mutational signature can be re-written as a product of a damage signautre and misrepair signautre. We'll visualize these with `plotly`. ###Code from matplotlib import rcParams rcParams["figure.figsize"]=8,6 from damuta.plotting import plot_signatures cosmic=plot_signatures(COSMIC.signatures.loc[["SBS2", "SBS5", "SBS6"]]).set_figwidth(24) damage=plot_signatures(COSMIC.damage_signatures.loc[["SBS2", "SBS5", "SBS6"]]) misrepair=plot_signatures(COSMIC.misrepair_signatures.loc[["SBS2", "SBS5", "SBS6"]]) ###Output _____no_output_____ ###Markdown The COSMIC signautres are a high quality reference set, as can seen in their high degree of separation (low cosine similarity between different signatures) ###Code COSMIC.summarize_separation() ###Output _____no_output_____
notebooks/Feedback-Group-15.ipynb
###Markdown Proposal feedback Hello group 15, I think your introduction, initial data explore, and method section all are well written, I understood well you group's intention. However, I hold a slightly different idea on the null hypothesis. Please see the following my demonstration of a case which match your null but show significant relation between positive covid and group status. ###Code suppressPackageStartupMessages(library(tidyverse)) suppressPackageStartupMessages(library(tidymodels)) suppressPackageStartupMessages(library(repr)) suppressPackageStartupMessages(library(infer)) # consider all the data frama are samples. # Here I make a data frama of 100 people in total: base_df_1 <- data.frame(group = rep(c(1), each = 80)) # 80 of them labelled as 1, so group 1 account for 80% of the population base_df_0 <- data.frame(group = rep(c(0), each = 20)) # 20 of them labelled as 0, so group 0 account for 20% of the population. # summarize the base population df base_df <- rbind(base_df_1, base_df_0) %>% mutate(group = as.factor(group)) base_df_summary <- base_df %>% group_by(group) %>% summarize(n = n()) %>% mutate(p = n / nrow(base_df)) base_df_summary # Here I make a data frama of 10 people who are positive on covid covid_df_1 <- data.frame(covid = rep(c(1), each = 5)) # 5 of them are in group 1, so p_1 = 0.5, covid_df_0 <- data.frame(covid = rep(c(0), each = 5)) # 5 of them are in group 2, so p_2 = 0.5 covid_df <- rbind(covid_df_1, covid_df_0) %>% mutate(covid = as.factor(covid)) covid_df_summary <- covid_df %>% group_by(covid) %>% summarize(n = n()) %>% mutate(p = n / nrow(covid_df)) covid_df_summary options(repr.plot.width = 12, repr.plot.height = 8) pie <- covid_df_summary %>% ggplot(aes("", y= p, fill = covid)) + geom_bar(width = 1, stat = "identity") + coord_polar("y", start=0)+ theme(text = element_text(size = 20)) pie ###Output _____no_output_____ ###Markdown $H_o:$ The proportion of tested positive is same in each ethnicity$: p_1 = p_2$ ###Code # In this case, p_1 = p_2, then we do not reject your null hypothesis. # see the rate of group 1 have tested positive on covid? rate_of_covid_group_1 <- 5/80 * 100 rate_of_covid_group_1 # but group 0? rate_of_covid_group_0 <- 5/20 * 100 rate_of_covid_group_0 ###Output _____no_output_____
doc/source/Ipython_templates/draw_raw.ipynb
###Markdown PyGSLIB========Draw---------------The GSLIb equivalent parameter file is``` Parameters for DRAW *******************START OF PARAMETERS:data/cluster.dat \file with data3 \ number of variables1 2 3 \ columns for variables0 \ column for probabilities (0=equal)-1.0e21 1.0e21 \ trimming limits69069 100 \random number seed, number to drawdraw.out \file for realizations``` ###Code #general imports import matplotlib.pyplot as plt import pygslib import numpy as np import pandas as pd #make the plots inline %matplotlib inline ###Output _____no_output_____ ###Markdown Getting the data ready for work---------If the data is in GSLIB format you can use the function `pygslib.gslib.read_gslib_file(filename)` to import the data into a Pandas DataFrame. ###Code #get the data in gslib format into a pandas Dataframe cluster = pygslib.gslib.read_gslib_file('../datasets/cluster.dat') print ('\n\t\tCluster Data \n',cluster.tail()) ###Output Cluster Data Xlocation Ylocation Primary Secondary Declustering Weight 135 31.5 41.5 22.75 8.21 0.427 136 34.5 32.5 9.42 6.76 0.413 137 35.5 31.5 8.48 12.78 0.419 138 35.5 33.5 2.82 9.21 0.271 139 36.5 32.5 5.26 12.40 0.252 ###Markdown Testing Draw ###Code print (pygslib.gslib.__draw.draw.__doc__) cluster['NO-Weight']=1. parameters_draw = { 'vr' : cluster[['Xlocation','Ylocation','Primary']], # data 'wt' : cluster['NO-Weight'], # weight/prob (use wt[:]=1 for equal probability) 'rseed' : 69069, # random number seed (conditioning cat.) 'ndraw' : 100} # number to draw vo,sumwts,error = pygslib.gslib.__draw.draw(**parameters_draw) print ('error ? ', error != 0, error) print ('is 1./sumwts == nd?', 1./sumwts, len(cluster)) #making the output (which is numpy array) a pandas dataframe for nice printing dfvo=pd.DataFrame(vo,columns= ['Xlocation','Ylocation','Primary']) ###Output _____no_output_____ ###Markdown Comparing results with gslib ###Code print (dfvo.head(6)) print ('******') print (dfvo.tail(6)) ###Output Xlocation Ylocation Primary 0 39.5 18.5 0.06 1 39.5 18.5 0.06 2 39.5 18.5 0.06 3 39.5 18.5 0.06 4 39.5 18.5 0.06 5 39.5 18.5 0.06 ****** Xlocation Ylocation Primary 94 39.5 18.5 0.06 95 39.5 18.5 0.06 96 39.5 18.5 0.06 97 39.5 18.5 0.06 98 39.5 18.5 0.06 99 39.5 18.5 0.06
lessons/python_primer/1 - Introduction and Jupyter Use.ipynb
###Markdown Lesson 1: Introduction to Python and Using Jupyter notebooks The Jupyter notebook is a platform for using and writing code in a dynamic way that allows users to combine cells of code snippets that are executed with a persistent namespace and kernel alongside markdown text for facilitating readability and visualization.In this notebook, we cover some basics of Jupyter functionality along with a discussion of some details of how you can use python in this environment and elsewhere. InstallationFor this workshop, we've constructed an online environment for everyone to use in order to smooth out any platform-dependent installation issues, but you'll probably want to install the tools we use today locally on your own machine. To do this, we recommend [Anaconda](https://docs.continuum.io/anaconda/install), which is an effective tool for python package management that can create virtual environments, comes with a pre-installed IDE, and includes all of the Jupyter functionality that you'll see here. The Anaconda installer should be detailed on the page linked above, but here's [another resource](https://swcarpentry.github.io/python-novice-gapminder/setup/) for installation that might be helpful. Code vs. MarkdownJupyter notebooks are broken down into "cells" which might contain either code or markdown. If you select a cell with your mouse, it should be highlighted with a green border indicating that you are in "edit," mode and can edit the contents of the cell. If text reading `In [ ]:` is on the left hand side of the cell, it's a "code" cell. For example, type the following and press "Shift + Enter" ###Code print("Hello world!") ###Output Hello world! ###Markdown Pressing "Shift + Enter" executes the code in the cell, prints the output below the cell, and creates a new cell below that one. In addition to code cells, you can also write your own markdown cells by converting a cell using either the dropdown menu in the toolbar or pressing "Esc + m". In general, pressing escape enters "command mode" for which you can issue a number of commands, including* f - find and replace* m - convert to markdown* y - convert to code* h - open the help menuRight now, trying typing "Esc + h" to open the help/shortcut menu and peruse it. Spend a minute testing out some of the shortcuts. Note that markdown cells are quite flexible and can basically do anything wikipedia does, including adding $\LaTeX$-formatted equations.$\hat{H}\psi = E \psi$ Shell commands, magic, and where to learn moreJupyter notebooks can also issue commands to the shell, which can be achieved using the `!` symbol at the beginning of the cell: ###Code !ls . !date ###Output Mon Aug 7 17:52:57 PDT 2017 ###Markdown In addition, certain things can be achieved in Jupyter notebooks using what are called "magic" commands, which are demarcated using the `%` sign. The most common of these are the magic function to enable inline plotting: ###Code %matplotlib inline ###Output _____no_output_____ ###Markdown and to invoke the debugger in a particular cell on an error: ###Code %pdb ###Output Automatic pdb calling has been turned ON
Linear_Fit/Coefficients_Method.ipynb
###Markdown Coefficients This method is based on my linear algebra textbook. You essentially find the coefficients for either the linear or quadratic equation. For a linear equation. $\bar{y} = X\bar{\beta}$ where $\bar{\beta} = \begin{bmatrix}\beta_{0} \\\beta_{1} \\\end{bmatrix}$ or $y = \beta_{0} + \beta_{1}x$. For quadratic, $\bar{y} = X\bar{\beta} + \bar{\epsilon}$ or $y_{n} = \beta_{0} + \beta_{1}x_{n} + \beta_{2}x^{2}_{n} + \epsilon_{n}$ where $\bar{\epsilon}$ is residual vector which is the difference between the observed y and predicted y.To find $\bar{\beta}$, use the normal equation of $X\bar{\beta} = \bar{y}$ which is $X^{T}X\bar{\beta} = X^T\bar{y}$. I broke down the process as follows: $X^{T}X$ $X^T\bar{y}$ $\bar{\beta} = [X^{T}X]^{-1}X^T\bar{y}$ ###Code xtmatrix = np.dot(xmatrix.T,xmatrix) #Step 1 xtmatrix_quad = np.dot(xmatrix_quad.T,xmatrix_quad) #quadratic version of Step 1 ytmatrix = np.dot(xmatrix.T, ypoints) #Step 2 ytmatrix_quad = np.dot(xmatrix_quad.T, ypoints) #quadratic version of Step 2 coeff = np.dot(np.linalg.inv(xtmatrix), ytmatrix) #Step 3 coeff_quad = np.dot(np.linalg.inv(xtmatrix_quad), ytmatrix_quad) #quadratic version of Step 3 print(coeff_quad) ###Output _____no_output_____ ###Markdown Using the coefficients found earlier, substitute it into the respective equations: $\bar{y} = X\bar{\beta}$ or $\bar{y} = X\bar{\beta} + \bar{\epsilon}$. Then, plot the graphs. ###Code def linear(arr, x): b = arr[0] m = arr[1] y = m*x + b p1 = plt.plot(x, y, label = "linear (textbook)") print('Textbook linear equation: y =',m,'x +',b) return p1 # def residual(arr,x): #determining residual vector # res = [] # a = arr[0] # b = arr[1] # c = arr[2] # y = a*(x**2) + b*x + c # for n in np.arange(ypoints.size): # res.append(ypoints[n] - y[n]) # return res def quad(arr, x, y): res = [] a = arr[0] b = arr[1] c = arr[2] y = a*(x**2) + b*x + c for n in np.arange(ypoints.size): res.append(ypoints[n] - y[n]) #difference between observed y and predicted y y += res p2 = plt.plot(x,y, label = "quadratic (textbook)") print('Textbook quadratic equation: y =',a,'x^2 +',b,'x +', c) return p2 def quad2(arr, x, y): a = arr[0] b = arr[1] c = arr[2] y = a*(x**2) + (b*x) + c p2 = plt.plot(x,y) return p2 ###Output _____no_output_____
07-dnn-hmm/recognition_via_reference_dictionary.ipynb
###Markdown Практика №2На прошлой практике мы реализовали DTW алгоритм. Он очень хорош и вроде как даже где-то применяется. Но у него есть существенные недостатки:1. Для распознавания какого-либо слова необходимо иметь заранее подготовленные эталонные записи. Из этого вытекает проблема масштабирования системы с большим словарем.2. Подобная система имеет плохую устойчивость к шумам.3. По какому принципу вибирать нужные эталоны?4. Сколько должно быть таких эталонов, чтобы покрыть всю вариативность произнесений?5. Чем больше эталонов, тем алгоритм вычислительнозатратен.В этом уроке мы перейдем от DTW к распознаванию по эталонному словарю. Эталон в графе теперь будет задаваться как последовательность фонем из словаря. Словарь для YES_NO (data/lexicon_yes_no.txt) выглядит следующим образом:* **YES**: SIL Y EH1 S SIL* **NO**: SIL N OW1 SILСловарь для цифр (data/lexicon_digits.txt):* **ZERO**: SIL Z IH1 R OW0 SIL* **ZERO**: SIL Z IY1 R OW0 SIL* **ONE**: SIL W AH1 N SIL* **ONE**: SIL HH W AH1 N SIL* **TWO**: SIL T UW1 SIL* **THREE**: SIL TH R IY1 SIL* **FOUR**: SIL F AO1 R SIL* **FIVE**: SIL F AY1 V SIL* **SIX**: SIL S IH1 K S SIL* **SEVEN**: SIL S EH1 V AH0 N SIL* **EIGHT**: SIL EY1 T SIL* **NINE**: SIL N AY1 N SILSIL здесь – это фонема паузы.Как вы, возможно, заметили, для некторых слов имеется несколько вариантов фонемной транскрипции. Это позволяет лучше учесть фонетическое разнообразие в произнесении некоторых слов.Каждой фонеме будет сопоставлен наиболее типичный для неё вектор признаков. В данном случае мы будем использовать среднее значение вектора признаков фонем, взятых из обучающего набора. Будем называть это акустической моделью. Данные для обученияНаша задача — собрать статистику акустических признаков по каждой фонеме, находящейся в графе распознавания. В качестве обучающей выборки мы будем использовать подмножество данных `librispeech` (аудиозаписи чтения книг на английском). Для этого предварительно была получена покадровая разметка этого подмножества на фонемы. Схематично данный процесс представлен на картинке ниже:![](https://drive.google.com/uc?export=view&id=16PdwggV_Cl6aaD4cu1MZ-fKzWzAvcFVw)Файл разметки (ali_phones.txt) представлен в формате:filename1 phone_frame1 phone_frame2 phone_frame3 ...****filename2 phone_frame1 phone_frame2 phone_frame3 ...Пример такой покадровой фонемной разметки одного файла с текстом "MUCH COULD BE DONE IN THREE YEARS" представлен ниже:1183-124566-0003 SIL SIL SIL SIL SIL SIL SIL SIL SIL SIL SIL SIL SIL SIL SIL SIL M M M M M M M M M AH1 AH1 AH1 AH1 AH1 AH1 AH1 AH1 AH1 CH CH CH CH CH CH CH CH CH CH CH K K K K K K K K K UH1 UH1 UH1 UH1 D D D D D B B B B B B IY0 IY0 IY0 IY0 IY0 D D D D D D D D D D D D AH1 AH1 AH1 AH1 AH1 AH1 AH1 AH1 AH1 AH1 AH1 AH1 AH1 AH1 AH1 AH1 AH1 AH1 AH1 AH1 N N N N N N N N N N N N N N N SIL SIL SIL SIL SIL SIL SIL SIL SIL SIL SIL SIL SIL SIL SIL SIL SIL SIL SIL SIL SIL SIL SIL SIL SIL SIL SIL SIL SIL SIL SIL SIL SIL SIL SIL SIL SIL SI L SIL SIL SIL SIL SIL SIL SIL SIL IH1 IH1 IH1 IH1 IH1 IH1 IH1 IH1 IH1 IH1 IH1 N N N N N N N N N N SIL SIL SIL SIL SIL TH TH TH TH TH TH TH TH TH TH TH TH TH R R R R R R IY1 IY1 IY1 IY1 IY1 IY1 IY1 Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y IH1 IH1 IH1 IH1 IH1 IH1 IH1 IH1 R R R R R R R R R R R R R R R Z Z Z Z Z Z Z Z Z Z Z Z Z Z Z Z Z Z Z Z SIL SIL SIL SILАудиофайлы этих данных лежат в папке "data/train_librispeech_10h/wav_dir". Bootstrap ###Code !rm -rf lab2 !gdown --id "1meikF0XqBUnVZKoY3160MQ0R25lq-EMu" !unzip -nq lab2.zip !gdown --id "1f0LkW6I3Q5CnOThuNFAJ9umgnb2eU4Ox" !unzip -nq train_librispeech_10h.zip !mv train_librispeech_10h lab2/data !rm -rf lab2.zip sample_data train_librispeech_10h.zip %cd lab2 import os import time import librosa import librosa.display import matplotlib.pyplot as plt %matplotlib inline import numpy as np import IPython.display as ipd from sklearn.manifold import TSNE from sklearn.mixture import GaussianMixture from tqdm.notebook import tqdm plt.rcParams["figure.figsize"] = (15, 5) plt.rcParams["image.interpolation"] = "nearest" # Sample example: wav_example = "data/train_librispeech_10h/wav_dir/1183-124566-0003.flac" # Read wav file. # sr=None to preserve the native sampling rate. x, sr = librosa.load(wav_example, sr=None) print(f"Number of samples: {len(x)}.") print(f"Sampling rate: {sr} Hz.") print(f"Duration: {len(x) / sr:.2f} s.") # ~ librosa.get_duration(x, sr) # Playback: ipd.Audio(x, rate=sr) # Spectrogram: D = librosa.amplitude_to_db(np.abs(librosa.stft(x)), ref=np.max) librosa.display.specshow(D, sr=sr, y_axis="hz", x_axis="time") plt.colorbar(format="%+2.0f dB") plt.title("Log-frequency power spectrogram") ###Output _____no_output_____ ###Markdown Подготовка данных ###Code # Загрузка покадровой фонемной разметки в словарь: def load_phone_aligment(aligment_file): ali_dict = {} with open(aligment_file, encoding="utf-8") as fn: for line in fn: line = line.strip().split() ali_dict[line[0]] = line[1:] return ali_dict ali_file = "data/train_librispeech_10h/ali_phones.txt" ali_dict = load_phone_aligment(ali_file) # Подсчёт акустиеских признаков для записей из папки: def load_data_dict(dir_path, count=None): data_dict = {} for step, wav_name in tqdm(enumerate(os.listdir(dir_path))): x, sr = librosa.load(os.path.join(dir_path, wav_name), sr=None) mfcc = librosa.feature.mfcc(x, sr=sr, n_mfcc=13, n_fft=int(sr * 0.025), hop_length=int(sr * 0.01)) if count and step == count: print(f"Early stop at {step}th file.") break data_dict[wav_name.split('.')[0]] = mfcc.T return data_dict train_records_dir = "data/train_librispeech_10h/wav_dir" train_data_dict = load_data_dict(train_records_dir, count=100) # Инициализация списка допустимых фонем из предустановленного файла: def init_phones_dict(phones_file): phones_dict = {} with open(phones_file, encoding="utf-8") as fn: for line in fn: phones_dict[line.strip()] = [] return phones_dict phones_file = "data/train_librispeech_10h/phones" phones_dict = init_phones_dict(phones_file) # Присвоим каждой фонеме набор её признаков в соответствии с фонемной разметкой # обучающей выборки. Количество кадров признаков может немного отличаться # от количества размеченных фонем; это связано с особенностми построения # разметки в Kaldi. for file_name in train_data_dict.keys(): file_feats = train_data_dict[file_name] file_ali = ali_dict[file_name] for frame in range(min(len(file_feats), len(file_ali))): phones_dict[file_ali[frame]].append(file_feats[frame]) # Выведем статистику по каждой фонеме (некоторые фонемы вообще не используются, # это особенности разметки в Kaldi): count = 0 for phone in phones_dict.keys(): if len(phones_dict[phone]) == 0: count +=1 print(f"{phone:<3}: {len(phones_dict[phone])}") print(f"Number of empty phones is: {count}.") # Соберём статистику на основе среднего значения для каждой фонемы: mean_phones_value = {} for phone in phones_dict.keys(): if phones_dict[phone]: mean_phones_value[phone] = np.mean(phones_dict[phone], axis=0) #-----------------------------TODO №2----------------------------------- # построить GMM модель для каждой фонемы (смотри задание 2) #----------------------------------------------------------------------- ###Output _____no_output_____ ###Markdown Инициализация фонемного графа ###Code # Теперь будем распознавать по словарю, состоящему из конечных слов. # Для этого нужно загрузить файл лексикона, который определяет, # из каких фонем состоят наши слова: def load_lexicon_dict(lexicon_file): lexicon_dict = {} with open(lexicon_file, encoding="utf-8") as fn: for line in fn: line = line.strip().split() lexicon_dict[line[0]] = line[1:] return lexicon_dict lexicon_file = "data/lexicon_yes_no.txt" lexicon_dict = load_lexicon_dict(lexicon_file) ###Output _____no_output_____ ###Markdown Узлы графа теперь представляют собой отдельные фонемы с переходом только в себя и следующий узел (переходов через несколько узлов уже не будет, так как пропуск фонемы в слове нежелателен). Схема такого графа для да/нет (для цифр по аналогии) представлена ниже:![](https://drive.google.com/uc?export=view&id=1IJhR0l0YCPDge05ohigs0_P5gDX66fdM)В качестве model мы будем хранить простой класс AcousticModel, который хранит в себе имя фонемы и ее среднее значение. Также у этого класса есть метод вычисления евклидовой дистанции от хранящегося в нем среднего значения признака фонемы до текущего признака распознаваемой записи, который передается этому методу. ###Code class AcousticModel: def __init__(self, phone_name, phone_feats): self.phone_name = phone_name self.phone_feats = phone_feats def distance(self, input_feats): return float(np.sqrt(sum(pow(self.phone_feats - input_feats, 2)))) #-----------------------------TODO №2------------------------------------------ # Заменить акустическую модель на основе среднего значения признака # на GMM модель: class GmmAcousticModel: def __init__(self, phone_name, phone_feats): self.phone_name = phone_name if not phone_feats: self.phone_feats = None elif len(phone_feats) == 1: self.phone_feats = GaussianMixture(1).fit(phone_feats) else: self.phone_feats = GaussianMixture(3).fit(phone_feats) def distance(self, input_feats): input_feats = np.asarray(input_feats).reshape(1, -1) return -self.phone_feats.score(input_feats) #------------------------------------------------------------------------------ class State: def __init__(self, model, index): # model: node feature vector # is_final: whether the node is final in the word # word: etalon word (only present for the final node) # best_token: token with the least distance in the node # current_word: current etalon word # next_states: the list of next nodes # index: node index self.model = model self.is_final = False self.word = None self.best_token = None self.current_word = None self.next_states = [] self.index = index def load_graph(lexicon_dict, mean_phones_value): start_state = State(AcousticModel(None, None), 0) graph = [start_state, ] state_index = 1 for word in lexicon_dict.keys(): previous_state = start_state for phone in lexicon_dict[word]: state = State(AcousticModel(phone, mean_phones_value[phone]), state_index) # Etalon word will now be stored in each node: state.current_word = word state.next_states.append(state) # add loop previous_state.next_states.append(state) previous_state = state graph.append(state) state_index += 1 if state: state.word = word state.is_final = True return graph def print_graph(graph): if not os.path.exists("exp"): os.mkdir("exp") with open("exp/graph.txt", "w") as fn: np.set_printoptions(formatter={"float": "{: 0.1f}".format}) for state in graph: next_state_indexes = [s.index for s in state.next_states] fn.write( "State: index={} word={} is_final={} " \ "next_state_indexes={} phone={} \n".format( state.index, state.word, state.is_final, next_state_indexes, state.model.phone_name ) ) print("*** SEE exp/graph.txt ***") graph = load_graph(lexicon_dict, mean_phones_value) print_graph(graph) class Token: def __init__(self, state, dist=0.0, word=""): # state: graph state that the given token has at the moment # dist: total accumulated distance traveled by the token # word: the word that was recognized by the token # alive: whether the token is alive self.state = state self.dist = dist self.word = word self.alive = True def beam_pruning(next_tokens, threshold): best_token = next_tokens[np.argmin([token.dist for token in next_tokens])] for token in next_tokens: if token.dist > best_token.dist + threshold: token.alive = False return next_tokens def state_pruning(next_tokens): for token in next_tokens: if not token.state.best_token: token.state.best_token = token else: if token.dist <= token.state.best_token.dist: token.state.best_token.alive = False token.state.best_token = token else: token.alive = False # сбрасываем best_token на None для всеx узлов графа: for token in next_tokens: if token.state.best_token: token.state.best_token = None return next_tokens def recognize(filename, features, graph, recognition_results, beam_threshold): start_state = graph[0] active_tokens = [Token(start_state), ] next_tokens = [] # for ftr_frame in tqdm(features, desc="recognition..."): for ftr_frame in features: for token in active_tokens: if token.alive: for transition_state in token.state.next_states: new_token = Token(transition_state, token.dist, token.word) new_token.dist += transition_state.model.distance(ftr_frame) next_tokens.append(new_token) # State and beam prunings: next_tokens = state_pruning(next_tokens) next_tokens = beam_pruning(next_tokens, beam_threshold) active_tokens = next_tokens next_tokens = [] # Поиск финальных токенов: final_tokens = [] for token in active_tokens: if token.state.is_final and token.alive: final_tokens.append(token) # если нет финальных, то берем лучший из выживших: if len(final_tokens) != 0: win_token = final_tokens[ np.argmin([token.dist for token in final_tokens]) ] else: alive_tokens = [token for token in active_tokens if token.alive] win_token = alive_tokens[ np.argmin([token.dist for token in alive_tokens]) ] win_token.state.word = win_token.state.current_word # Вывод результата DTW: print(f"Result: {filename:<27} ==> {win_token.state.word}.") # Совпадает ли запись с полученным эталоном: record_word = filename.split("_")[0] etalon_word = win_token.state.word.split("_")[0] recognition_results.append(etalon_word.lower() == record_word) return recognition_results def run_recognizer(records_data_dict, graph, beam_threshold=10): start_time = time.time() recognition_results = [] for filename in records_data_dict.keys(): recognition_results = recognize(filename, records_data_dict[filename], graph, recognition_results, beam_threshold) print("-" * 60) wer = (1 - sum(recognition_results) / len(recognition_results)) * 100 total_time = time.time() - start_time print(f"WER is: {wer:.2f}%.") print(f"Total time is: {total_time:.2f} sec.") print("-" * 60) return wer, total_time def load_data_dict(dir_path): data_dict = {} for wav_name in os.listdir(dir_path): x, sr = librosa.load(os.path.join(dir_path, wav_name), sr=None) mfcc = librosa.feature.mfcc(x, sr=sr, n_mfcc=13, n_fft=int(sr * 0.025), hop_length=int(sr * 0.01)) data_dict[wav_name] = mfcc.T return data_dict ###Output _____no_output_____ ###Markdown Запустим наше распознавание на базе YES_NO ###Code lexicon_file = "data/lexicon_yes_no.txt" lexicon_dict = load_lexicon_dict(lexicon_file) graph = load_graph(lexicon_dict, mean_phones_value) records_data_dict = load_data_dict("data/yes_no/records") wer_yes_no_mean, time_yes_no_mean = run_recognizer(records_data_dict, graph, beam_threshold=5) ###Output Result: yes_39a6b995_nohash_0.wav ==> NO. Result: no_4abb2400_nohash_0.wav ==> NO. Result: no_ad89eb1e_nohash_0.wav ==> NO. Result: no_4394fc7c_nohash_0.wav ==> NO. Result: yes_39a12648_nohash_0.wav ==> YES. Result: yes_fc94edb0_nohash_0.wav ==> YES. Result: yes_ff63ab0b_nohash_0.wav ==> YES. Result: no_49af4432_nohash_0.wav ==> NO. Result: yes_fe1916ba_nohash_0.wav ==> NO. Result: no_41474817_nohash_0.wav ==> NO. ------------------------------------------------------------ WER is: 20.00%. Total time is: 0.08 sec. ------------------------------------------------------------ ###Markdown Запустим наше распознавание на базе Digits ###Code lexicon_file = "data/lexicon_digits.txt" lexicon_dict = load_lexicon_dict(lexicon_file) graph = load_graph(lexicon_dict, mean_phones_value) records_data_dict = load_data_dict("data/digits/records") wer_digits_mean, time_digits_mean = run_recognizer(records_data_dict, graph, beam_threshold=100) ###Output Result: nine_fbe51750_nohash_0.wav ==> NINE. Result: seven_fe5c4a7a_nohash_0.wav ==> THREE. Result: four_ffbb695d_nohash_0.wav ==> FOUR. Result: two_fce65496_nohash_0.wav ==> SIX. Result: seven_fd32732a_nohash_0.wav ==> SIX. Result: one_fc2411fe_nohash_0.wav ==> ONE. Result: eight_daf230ac_nohash_0.wav ==> THREE. Result: three_feb1d305_nohash_0.wav ==> FIVE. Result: five_f852895b_nohash_0.wav ==> THREE. Result: zero_f92e49f3_nohash_3.wav ==> TWO. Result: eight_da584bc0_nohash_4.wav ==> SEVEN. Result: nine_ccea893d_nohash_0.wav ==> NINE. Result: five_ffd2ba2f_nohash_0.wav ==> FIVE. Result: two_fb2f3242_nohash_0.wav ==> TWO. Result: one_ffa76c4a_nohash_0.wav ==> ONE. Result: four_fce65496_nohash_0.wav ==> FOUR. Result: six_ffd2ba2f_nohash_0.wav ==> SIX. Result: three_fce96bac_nohash_0.wav ==> FIVE. Result: six_ffbb695d_nohash_0.wav ==> FIVE. Result: zero_f852895b_nohash_0.wav ==> THREE. ------------------------------------------------------------ WER is: 55.00%. Total time is: 0.39 sec. ------------------------------------------------------------ ###Markdown Задание №1 (5 баллов):Отрисовать кластеры признаков фонем "Y EH1 S N OW1" в двухмерном пространстве. Для понижения размерности можно воспользоваться алгоритмом t-SNE -- https://scikit-learn.org/stable/modules/generated/sklearn.manifold.TSNE.html. Все пять кластеров должны помещаться на один рисунок и иметь разные цвета. Также укажите на графике какой цвет какой фонеме соответствует. ###Code def load_data_dict(dir_path, count=None): data_dict = {} for step, wav_name in tqdm(enumerate(os.listdir(dir_path))): x, sr = librosa.load(os.path.join(dir_path, wav_name), sr=None) mfcc = librosa.feature.mfcc(x, sr=sr, n_mfcc=13, n_fft=int(sr * 0.025), hop_length=int(sr * 0.01)) if count and step == count: print(f"Early stop at {step}th file.") break data_dict[wav_name.split('.')[0]] = mfcc.T return data_dict phones_dict = init_phones_dict(phones_file) train_data_dict = load_data_dict(train_records_dir, count=5) for file_name in train_data_dict.keys(): file_feats = train_data_dict[file_name] file_ali = ali_dict[file_name] for frame in range(min(len(file_feats), len(file_ali))): phones_dict[file_ali[frame]].append(file_feats[frame]) phonemes = ["Y", "EH1", "S", "N", "OW1"] limit = 75 features = [] labels = [] for index, label in enumerate(phonemes): for feature in phones_dict[label][:limit]: features.append(feature) labels.append(index) tsne = TSNE(n_components=2, random_state=4) y_tsne = tsne.fit_transform(features) plt.figure(figsize=(14, 8)) scatter = plt.scatter(y_tsne[:, 0], y_tsne[:, 1], c=labels, cmap="hsv") plt.legend(handles=scatter.legend_elements()[0], labels=phonemes, fontsize="large", loc="best") plt.grid() plt.show() ###Output ###Markdown Задание №2 (5 баллов):Заменить модель среднего значения фонемы на GMM. Количестов смесей для каждой фонемы можно выставить в 3. Теперь вместо расстояния мы будем измерять вероятность принадлежности кадра признаков к GMM модели фонемы, находящейся в конкретном узле графа. Чтобы использовать это значение в парадигме нашего алгоритма, в роле дистанции нам нужно брать значение $-log(x)$, где $x$ — предсказанная вероятность. Тогда чем выше вероятность, тем такая импровизированная дистанция будет меньше. Ссылка на питоновскую реализацию GMM — https://scikit-learn.org/stable/modules/generated/sklearn.mixture.GaussianMixture.html.Что стало с качеством распознавания на двух тестах? ###Code phones_dict = init_phones_dict(phones_file) train_data_dict = load_data_dict(train_records_dir, count=100) for file_name in train_data_dict.keys(): file_feats = train_data_dict[file_name] file_ali = ali_dict[file_name] for frame in range(min(len(file_feats), len(file_ali))): phones_dict[file_ali[frame]].append(file_feats[frame]) def load_graph(lexicon_dict, gmm_phones_value): start_state = State(GmmAcousticModel(None, None), 0) graph = [start_state,] state_index = 1 for word in lexicon_dict.keys(): previous_state = start_state for phone in lexicon_dict[word]: state = State(GmmAcousticModel(phone, phones_dict[phone]), state_index) # Etalon word will now be stored in each node: state.current_word = word state.next_states.append(state) # add loop previous_state.next_states.append(state) previous_state = state graph.append(state) state_index += 1 if state: state.word = word state.is_final = True return graph # YES_NO: lexicon_file = "data/lexicon_yes_no.txt" lexicon_dict = load_lexicon_dict(lexicon_file) graph = load_graph(lexicon_dict, phones_dict) records_data_dict = load_data_dict("data/yes_no/records") wer_yes_no_gmm, time_yes_no_gmm = run_recognizer(records_data_dict, graph, beam_threshold=100) # DIGITS: lexicon_file = "data/lexicon_digits.txt" lexicon_dict = load_lexicon_dict(lexicon_file) graph = load_graph(lexicon_dict, phones_dict) records_data_dict = load_data_dict("data/digits/records") wer_digits_gmm, time_digits_gmm = run_recognizer(records_data_dict, graph, beam_threshold=100) print(f"WER for YES_NO improved by {wer_yes_no_mean - wer_yes_no_gmm:.2f}%.") print(f"Time for YES_NO increased by " \ f"{time_yes_no_gmm / time_yes_no_mean:.1f} times.") print(f"WER for DIGITS improved by {wer_digits_mean - wer_digits_gmm:.2f}%.") print(f"Time for DIGITS increased by " \ f"{time_digits_gmm / time_digits_mean:.1f} times.") ###Output _____no_output_____
Set 2/exercise 1/adaptive.ipynb
###Markdown Στην πρωτη ασκηση μας ζητειται να επεξεργαστουμε το εγγραφο trikoupi_low.png με τον αλγοριθμο otsu. Ο αλγοριθμος θα δεχεται την εικονα και θα την κατωφλιωνει οχι σε ολοκληρο το ευρος της αλλα ανα window_size * window_size γειτονιες. Οι βιβλιοθηκες που θα χρειαστουμε: ###Code import sys import numpy as np from PIL import Image from math import ceil ###Output _____no_output_____ ###Markdown Τα δεδομενα εισοδου: ###Code #argv[1] is the input photo #argv[2] is the output photo #argv[3] is the window size input_image = Image.open(sys.argv[1]) out_image = sys.argv[2] window_size = int(sys.argv[3]) #image as numpy array image_array = np.asarray(input_image) image_array_cp = image_array.copy() images_cropped=[] images=[] width, height = input_image.size print('Dimensions of the image (height,width):', image_array.shape) ###Output _____no_output_____ ###Markdown Η συναρτηση που αποφασιζει αν εχουμε greyscale ή rbg εικονα: ###Code #Decide if greyscale or rgb array as input def is_greyscale_array(array): if(len(array.shape) == 2): return True else: return False ###Output _____no_output_____ ###Markdown Η συναρτηση που υπολογιζει την αντικειμενικη otsu: ###Code def ypologise_antikeimeniki_otsu(A, k): pixels_tmima1 = A[A < k] pixels_tmima2 = A[A >=k] mu1 = np.mean(pixels_tmima1) mu2 = np.mean(pixels_tmima2) mu_synoliko = np.mean(A.flatten()) pi1 = len(pixels_tmima1) / (len(pixels_tmima1) + len(pixels_tmima2)) pi2 = len(pixels_tmima2) / (len(pixels_tmima1) + len(pixels_tmima2)) antikeimeniki_synartisi = pi1 * (mu1 - mu_synoliko)**2 + pi2 * (mu2 - mu_synoliko)**2 return(antikeimeniki_synartisi) ###Output _____no_output_____ ###Markdown Αναλογα με το αποτελεσμα της *is_greyscale_array* αλλαζει η *otsu_thresholder* ###Code def otsu_thresholder(image): kalytero_katwfli = 0 kalyterh_timi = 0 for i in range(1, 256): obj_otsu = ypologise_antikeimeniki_otsu(cropped_array, i) if(obj_otsu > kalyterh_timi): kalytero_katwfli = i kalyterh_timi = obj_otsu if (is_greyscale_array(cropped_array)): res = katwfliwsh_eikonas(image, kalytero_katwfli) else: res = katwfliwsh_eikonas_rgb(image, kalytero_katwfli) return(res) ###Output _____no_output_____ ###Markdown Οι κατωφλιωσεις αναλογα με την μορφη της εικονας: ###Code #greyscale def katwfliwsh_eikonas(image, threshold): res = np.zeros_like(image) res[image < threshold] = 0 res[image >=threshold] = 255 return( np.uint8(res) ) #rgb def katwfliwsh_eikonas_rgb(image, threshold): for i in range(len(image)): for j in range(len(image[i])): rgb = image[i][j] red = rgb[0] green = rgb[1] blue=rgb[2] #The average color average=((int(red))+(int(green))+(int(blue)))/3 if average>threshold: image[i][j]=255 return (np.uint8(image)) else: image[i][j]=0 return (np.uint8(image)) ###Output _____no_output_____ ###Markdown Παρακατω κοβουμε την εικονα σε μικροτερες εικονες, εφαρμοζουμε την *otsu_thresholder* στις επιμερους εικονες και στη συνεχεια τις επανασυναρμολογουμε για να φτιαξουμε την αρχικη. Τελος εμφανιζουμε και σωζουμε την τελικη εικονα. ###Code #Crop image into smaller ones, apply otsus thresholding #And rebuild the original image from the crops for j in range(0,j_increment): for i in range(0,i_increment): wi = window_size*i wj = window_size*j wi_plus = wi + window_size wj_plus = wj + window_size #bigger than the whole image if (wi_plus > width): wi_plus = width if (wj_plus > height): wj_plus = height #the area to crop area = (wi, wj, wi_plus, wj_plus) cropped_img = input_image.crop(area) cropped_array = np.array(cropped_img) A_otsu = otsu_thresholder(cropped_array) images_cropped.append(A_otsu) if (is_greyscale_array(cropped_array)): # greyscale print("It's greyscale") for x in images_cropped: images.append(Image.fromarray(x,'L')) new_im = Image.new('L', (width,height)) else: #rgb print("It's RGB") for x in images_cropped: images.append(Image.fromarray(x,'RGBA')) new_im = Image.new('L', (width,height)) #Rebuild the final image i=0 for x in range(0,j_increment): for y in range(0,i_increment): new_im.paste(images[i], (y*window_size, x*window_size)) i+=1 #Show and save new_im.show() new_im.save(out_image) ###Output _____no_output_____
camilo_torres_botero/Ejercicios 1.4 Power Law & Scale Free Networks.ipynb
###Markdown Ejercicios Power Law & Scale Free Network Power Law DistributionGrafique en Log-Log la distribución de grados de una red de escala libre (una red real o creada).Calcule el coeficiente de la power law graficada. ###Code import networkx as nx import matplotlib.pyplot as plt import seaborn as sns import numpy as np from scipy.stats import powerlaw %matplotlib inline sns.set() edges = [] for line in open('CA-HepTh.txt'): if line[0] != '#': edge = line.replace('\n','').split('\t') edges.append((edge[0],edge[1])) G=nx.Graph() G.add_edges_from(edges) degrees = list(G.degree().values()) sns.distplot(degrees) logs = np.log(degrees) plt.hist(logs, log=True) e, l, s = powerlaw.fit(degrees) e, l, s sns.distplot(powerlaw.rvs(e, l, s, size=100000)) ###Output C:\Users\Camil\Anaconda3\lib\site-packages\statsmodels\nonparametric\kdetools.py:20: VisibleDeprecationWarning: using a non-integer number instead of an integer will result in an error in the future y = X[:m/2+1] + np.r_[0,X[m/2+1:],0]*1j
Python Basic/Python Basic Assignment - 3.ipynb
###Markdown Python Basic Assignment Assignment - 3 --------------- 1. Why are functions advantageous to have in your programs? Ans:Advantages:1. Increase modularity2. Increase code reusibility3. Reduce amount of code ----------------- 2. When does the code in a function run: when it's specified or when it's called? Ans:The function run(or execute) when it is called. When function is called the control flow which go to first line of code present inside the function. ----------------- 3. What statement creates a function? Ans:def(keyword) followed by function_name then parentheses.def function_name(): ###Code def function3(): pass ###Output _____no_output_____ ###Markdown ----------------- 4. What is the difference between a function and a function call? Ans:- Function: Function is set of codes which produce some result. Function is useless unless it is invoked or called.- Function Call: Function call is used to invoke or execute set of codes present inside the function. ###Code def function4(): # Function return "Inside the function 4" function4() # Function call ###Output _____no_output_____ ###Markdown ----------------- 5. How many global scopes are there in a Python program? How many local scopes? Ans:There are 1 global scopes and 1 local scope in the python program. ----------------- 6. What happens to variables in a local scope when the function call returns? Ans:The local variables are destroyed when function call returns. ----------------- 7. What is the concept of a return value? Is it possible to have a return value in an expression? Ans:Return value is value, varible or expression which a function can return.Yes function return value can be an expression ###Code def function7(a,b): return a+b+10 ## Returning the expression function7(10, 20) ###Output _____no_output_____ ###Markdown ----------------- 8. If a function does not have a return statement, what is the return value of a call to that function? Ans:If a function does not have a return statement that means the function is not returning any values.In this case after function is executed completly it will return a NoneType. ###Code def function8(): a = 10 b = 80 type(function8()) ###Output _____no_output_____ ###Markdown ----------------- 9. How do you make a function variable refer to the global variable? Ans:This can be achieved by using global keyword to redefine the variable and make the changes of variable as global change. ###Code var1 = 0 def function9(): global var1 var1 = 10 print(var1) function9() print(var1) ###Output 0 10 ###Markdown ----------------- 10. What is the data type of None? Ans:None data type is a null variable or null object. The null object is a data type of class NoneType. ----------------- 11. What does the sentence import areallyourpetsnamederic do? Ans:The statement "import areallyourpetsnamederic" will import the module or package areallyourpetsnamederic. ----------------- 12. If you had a bacon() feature in a spam module, what would you call it after importing spam? Ans:After import the spam.spam.bacon() ###Code # import spam # spam.bacon() ###Output _____no_output_____ ###Markdown ----------------- 13. What can you do to save a programme from crashing if it encounters an error? Ans:To save the program from crashing over a runtime error we can use the try and except statement.It will save from only the runtime error not the compile time error. ###Code try: pass except: pass ###Output _____no_output_____
examples/1. First order irreversible kinetics simulation.ipynb
###Markdown Irreversible first order reaction simulationIrreversible first order reactions are perhaps the most common type of kinteic situations. The general form of these reactions is the following:$$A \rightarrow B$$The rate of change in the concentrations of each species in this system ([A] and [B]) is defined by a series of ordinary differential equations (ODEs). $$ \frac{d[A]}{dt} = -k[A] $$$$ \frac{d[B]}{dt} = k[A] $$Where $k$ is the forward rate constant for the given reaction. For these simple systems, analytical solutions to these equations are easily derived and can be found in any introductory chemistry text or [on the web]. For example, the integrated rate law for a first order reaction is as follows:$$ [A] = [A]_{0}e^{-kt} $$Where $[A]_0$ is the initial concentration of compound *A*. In the example below, we are going to fit some simulated first order kinetic data using traditional fitting techniques with the analytical solution above and with the module *odenlls*, which fits the kinetic data using numerical simulations of the reaction ODEs.To start, we'll need to import some extra modules.[on the web]: https://chem.libretexts.org/Core/Physical_and_Theoretical_Chemistry/Kinetics/Reaction_Rates/First-Order_Reactions ###Code import io import numpy as np import matplotlib.pyplot as plt np.random.seed(1) ###Output _____no_output_____ ###Markdown The *odenlls* module only defines one object class **ODEnlls**, which we'll use here. ###Code from odenlls import ODEnlls ###Output _____no_output_____ ###Markdown Traditional Analytical SolutionsLet's define a function that caculates our first order analytical solution. This saves us a lot of typing in the future, and we can use it for fitting later. ###Code def first_order(times, c_A0, k1): return c_A0*np.exp(-k1*times) ###Output _____no_output_____ ###Markdown Let's simulate this function to get a sense of how the data should look. ###Code times = np.linspace(0, 100, 1000) A0 = 0.1 # Initial concentration k = 0.05 # rate constant A = first_order(times, A0, k) # Concentrations of A at any time plt.plot(times, A) ###Output _____no_output_____ ###Markdown In order to simulate the behavior of the product, we must define a second analytical equation. ###Code def first_order_back(times, c_A0, k1): return c_A0*(1 - np.exp(-k1*times)) B = first_order_back(times, A0, k) plt.plot(times, B) ###Output _____no_output_____ ###Markdown odenlls SimulationsIn order to use an **ODEnlls** object for simulations, we must create a plain text file that contains lines for all of the reactions we expect for the system. There are some rules for how these files should be formatted. 1. Each reaction should be on its own line.2. Compounds can have any name that you'd like; however, you may want to avoid names that are subsets of each other. For example, avoid `butane` and `isobutane`. In addition, do not use any of the following special characters, detailed below, in compound names. You will be using these names in some of the later code, so you might want to keep the names simple.3. Compound names should be separated by `+` if they are on the same side of the reaction arrow. 4. If two or more of the same compound are reacting/forming, write them as follows `n*compound_name`, where `n` is the number of identical compuonds, and `compound_name` is simply the name you want to use for the compound.5. The starting materials and products should be separated by either `->` for irreversible reactions or `=` for equilibrium reactions.6. Blank lines are ignored. Comment lines can be added as long as they begin with a `` character.In order to simulate a reaction file, I'll create a `io.StringIO` object that behaves like a typical Python file object that you would get with the `open` function. Here we'll just create a single irreversible reaction of compound *A* being transformed into compound *B*. ###Code rxn = 'A -> B' f = io.StringIO('w+') f.write(rxn) f.seek(0) ###Output _____no_output_____ ###Markdown Here we are creating an **ODEnlls** object instance, and using the `read_rxns` method to read our reaction "file". Alternatively, the name of the reaction file could be passed as a string to this function, which will internally generate the appropriate file object. ###Code x = ODEnlls() x.read_rxns(f) ###Output _____no_output_____ ###Markdown After exectuting this function, several new attributes are created inside of our **ODEnlls** object. For example, the `rxns` attribute is simply a list of the reactions that were read in from the reaction file. ###Code x.rxns ###Output _____no_output_____ ###Markdown There is a also another list attribute called `odes` that shows all of the ordinary differential equations for each chemcial species. To see how these ODEs line up with the different chemical species, we will print the ODEs with the associated compounds, which are in a semi-private compound list attirbuted named `_cpds`. The ODEs have a lot of parentheses and multipliers, which are a result of the automated reaction processing. ###Code for cpd, ode in zip(x._cpds, x.odes): print('d['+cpd+']/dt =', ode) ###Output d[A]/dt = 1.00*(-1*k1*(A)) d[B]/dt = 1.00*(k1*(A)) ###Markdown Most importantly, a table of simulation parameters is generated as a `Pandas.DataFrame` called `params`. There is a column for *guess* parameters, which will be optimized when we do fitting, and a column *fix* for parameters that will not change during fitting. ###Code x.params ###Output _____no_output_____ ###Markdown Initially, all of the parameters are set as empty `numpy.nan` values. Every parameter will need to have either a *guess* or *fix* parameter associated with it. The **ODEnlls** object has a `set_param` method, which can assist in this process. (Alternatively, if you are familiar with Pandas DataFrames, you can set the values directly. They must be added as floating point values, though.)The `set_param` method can be used in a couple of different ways. The first way is to provide a string name of the first compound followed by the associated value that you'd like to use. The default is to set this as a *guess* parameter. We'll see how to fix parameters later. ###Code x.set_param('A', A0) x.params ###Output _____no_output_____ ###Markdown Alternatively, you can pass in a dictionary of paramters. They dictionary keys need to be compound names, and the values should be the associated parameter values. ###Code param_dict = {'B': 0.0, 'k1': k} x.set_param(param_dict) x.params ###Output _____no_output_____ ###Markdown The **ODEnlls** object also has a `plot` method for visualizing simulations and data. The first argument to this method is a string that determines the plot type. In this case, we only want to visualize the ODE simulation, so we pass the `'sim'` argument. We can also set the times over which we want to plot the simulation using the `times` keyword argument. The `colorlines` keyword argument causes the simulation lines to be colored (otherwise they will both be black).*Note*: The `plot` method uses Matplotlib internally to generate the plots. There are only `pyplot.plot` function calls, so if this is being used in an non-interactive environment, you'll need to use an explicit `pyplot.show` command, at the least. ###Code x.plot('sim', times=times, colorlines=True) ###Output _____no_output_____ ###Markdown As you can see, using the ODE simulation generates concentration data for both compounds *A* and *B* simultaneously, which is a benefit to this method. The ODE simulations are identical to the analytical solutions, as we would expect. ###Code x.plot('sim', times=times, colorlines=True) plt.plot(times, A, 'k--') plt.plot(times, B, 'k--') ###Output _____no_output_____
aula_06_04.ipynb
###Markdown Aula 06/04 - Iris Dataset ###Code #Bibliotecas utilizadas para classificação e exportação da árvore gerada from sklearn.tree import DecisionTreeClassifier, export_graphviz #Biblioteca para facilitar a separação dos dataset em dados de traino e teste from sklearn.model_selection import train_test_split from sklearn.metrics import plot_confusion_matrix import matplotlib.pyplot as plt #Pandas é uma biblioteca que facilita a ingestão e manipulação de dados import pandas as pd import seaborn as sns sns.set(color_codes=True) #enabling to plot using plotlib in the notebook %matplotlib inline %pylab inline df = pd.read_csv( "/content/drive/My Drive/Colab Notebooks/ecm514_datasets/iris_data.txt") df df[' rotulo'].unique() df[' rotulo'] = df[' rotulo'].map( {'Iris-setosa' : 0, 'Iris-versicolor' : 1, 'Iris-virginica' : 2} ) df nomes_features = list(df.columns[:4]) nomes_features x = df[ nomes_features ] x.head() y = df[ df.columns[4] ] y.head() x_treino_dummy = x[:105] x_teste_dummy = x[105:] y_treino_dummy = y[:105] y_teste_dummy = y[105:] #classificador classificador_dummy = DecisionTreeClassifier() #treinando o modelo classificador_dummy.fit( x_treino_dummy, y_treino_dummy ) classificador_dummy.score(x_treino_dummy, y_treino_dummy) classificador_dummy.score(x_teste_dummy, y_teste_dummy) x_treino, x_teste, y_treino, y_teste = train_test_split(x, y, test_size = 0.3, random_state = 2556) x_treino.head() #classificador classificador = DecisionTreeClassifier() #treinando o modelo classificador.fit( x_treino, y_treino ) classificador.score( x_treino, y_treino) classificador.score(x_teste, y_teste) plot_confusion_matrix(estimator=classificador, X=x_teste, y_true=y_teste, display_labels=df[' rotulo'].unique(), cmap=plt.cm.Blues, normalize='true') plot_confusion_matrix(estimator=classificador_dummy, X=x_teste_dummy, y_true=y_teste_dummy, display_labels=df[' rotulo'].unique(), cmap=plt.cm.Blues, normalize=None) import graphviz dot_graph = export_graphviz(classificador, out_file='/content/drive/My Drive/Colab Notebooks/iris.dot') dot_graph = export_graphviz(classificador_dummy, out_file='/content/drive/My Drive/Colab Notebooks/irisdummy.dot') ###Output _____no_output_____
notebooks/final_model_characteristics.ipynb
###Markdown Final Model SpecificationsHere, we do not fine tune the model anymore, but explore ways to validate how to choose our decision making thresholds via profit curves. By decision making threshold I mean "how confident do I need to be to assign a given label to a prediction." ###Code import json import numpy as np import scipy.stats as scs import matplotlib.pyplot as plt import pandas as pd %matplotlib inline from IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = 'all' from sklearn.pipeline import make_pipeline from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, ExtraTreesClassifier from sklearn.linear_model import LogisticRegression, SGDClassifier from sklearn.model_selection import train_test_split, GridSearchCV from sklearn.preprocessing import LabelBinarizer, StandardScaler from sklearn.base import TransformerMixin, BaseEstimator from sklearn.metrics import (precision_recall_fscore_support, roc_auc_score, precision_recall_curve, average_precision_score, roc_curve) from sklearn.externals import joblib from sklearn.base import clone from sklearn.neighbors import KNeighborsClassifier np.random.seed(369) from scipy.interpolate import BSpline, splrep, make_interp_spline from frag_tools import (DropColumns, AddressLatLong, CurrentMakeDummies, Binarize, ChangeTypes, custom_zip_cleaning) from various_tools import (roc, plot_roc, cross_val_metrics, make_metric_df) df = pd.read_csv('/mnt/c/Users/kurtrm/Downloads/Predictive_Maintenance_Transformer_Overload_PA.csv', sep=';') pipe = make_pipeline(CurrentMakeDummies(['Manufacturer', 'Repairs', 'Overloads', 'AssetType']), ChangeTypes(['Age', 'AssetZip'], [lambda x: float(x.replace(',', '.')), custom_zip_cleaning]), Binarize(['VegMgmt', 'PMLate', 'WaterExposure', 'MultipleConnects', 'Storm']), AddressLatLong(), DropColumns(['AssetCity', 'AssetId', 'AvgRepairCost', 'AssetState', 'MilesFromOcean', 'AssetLocation', 'locationID', 'Latitude1', 'Longitude1', 'Latitude', 'Longitude', 'Overloads_Below 100%', 'Overloads_100-120%', 'Overloads_120-150%', 'Overloads_Above 150%', 'AssetZip'])) transformed = pipe.fit_transform(df) transformed.columns X_train, X_test, y_train, y_test = train_test_split(transformed.drop('Status', axis=1).values, transformed['Status'].values, test_size=.2) _, gbc_final = joblib.load('refined_models.pkl') gbc_final.fit(X_train, y_train) probs = gbc_final.predict_proba(X_test) fpr, tpr, thresholds = roc_curve(y_test, probs[:, 1]) fig, ax = plt.subplots(figsize=(12, 8)) ax.plot(fpr, tpr) roc_auc_score(y_test, probs[:, 1]) line = np.linspace(0, 1, len(thresholds)) listy = [] for fp, tp, thresh, lin in zip(fpr, tpr, thresholds, line): listy.append({'fpr': fp, 'tpr': tp, 'thresh': thresh, 'lin': lin}) # with open('../src/static/data/roc_data.json', 'w') as f: # json.dump(listy, f) # joblib.dump(gbc_final, '../src/static/models/final_grad_boost.pkl') def threshold_prediction(model, X, threshold=0.5): """ Return predictions based on threshold. """ return np.where(model.predict_proba(X)[:, 1] > threshold, model.classes_[1], model.classes_[0]) def confusion_matrix(model, X_test, y_test, threshold=0.5): cf = pd.crosstab(y_test, threshold_prediction(model, X_test, threshold)) cf.index.name = 'actual' cf.columns.name = 'predicted' return cf.values cf = confusion_matrix(gbc_final, X_test, y_test, .5) costs = np.array([[-5, -20], [5, 10]]) ###Output _____no_output_____ ###Markdown Sklearn's `predict` methods use .5 as the default threshold. This isn't ideal, as our choices may depend heavily on the consequences. Choosing a lower or higher threshold depends heavily on what's at stake. Though the moral consequences may way heavily in this, the most discrete way to do this is through cost calculation, i.e. how much does a good prediction cost vs. a bad prediction vs. etc... ###Code def calculate_payout(cb_matrix, model, X, y, threshold): return (confusion_matrix(model, X, y, threshold) * cb_matrix).sum() ###Output _____no_output_____ ###Markdown I also explored some ways to smooth out curves using splines. This was put on the back burner in favor of implementing a fully working dashboard. ###Code gen_thresholds = np.linspace(0, 1, 50) fig, ax = plt.subplots(figsize=(10, 8)) profits = [] for thr in gen_thresholds: profits.append(calculate_payout(costs, gbc_final, X_test, y_test, thr)) ax.plot(gen_thresholds, np.array(profits)) ax.set_xlabel('thresholds') ax.set_ylabel('losses') ax.set_title('Profit Curve') fig.tight_layout() splrep(gen_thresholds, np.array(profits)) splines = BSpline(*splrep(gen_thresholds, np.array(profits))) plt.plot(np.linspace(0, 1, 10), splines(np.linspace(0, 1, 10), extrapolate=False)) # roc_splines = BSpline(*splrep(fpr_new, tpr_new)) # plt.plot(np.linspace(0, 1, 10), roc_splines(np.linspace(0, 1,10), extrapolate=False)) def sum_payout(cost_matrix, confusion_matrix): """ Calculate the profit from cost and confusion matrices. """ return (confusion_matrix * cost_matrix).sum() def generate_profit_curve(cost_matrix, model, X_test, y_test, n_thresholds=100): """ Generate the profit curve with a given cost matrix. """ thresholds = np.linspace(0, 1, n_thresholds) totals = [] for threshold in thresholds: iter_conf_matrix = confusion_matrix(model, X_test, y_test, threshold) totals.append(sum_payout(cost_matrix, iter_conf_matrix)) return thresholds, np.array(totals) def diff_generate_profit_curve(cost_matrix, model, X_test, y_test, n_thresholds=100): """ Generate the profit curve with a given cost matrix. """ thresholds = np.linspace(0, 1, n_thresholds) totals = np.empty(n_thresholds) for i, threshold in enumerate(thresholds): iter_conf_matrix = confusion_matrix(model, X_test, y_test, threshold) totals[i] = sum_payout(cost_matrix, iter_conf_matrix) return thresholds, totals fig, ax = plt.subplots(figsize=(12, 8)) precision, recall, threshold = precision_recall_curve(y_test, gbc_final.predict_proba(X_test)[:, 1]) avg_prec = average_precision_score(y_test, gbc_final.predict_proba(X_test)[:, 1]) ax.plot(recall, precision, label=f'{gbc_final.__class__.__name__}: {avg_prec:.3f}') ax.set_ylim(0, 1.1) ax.set_ylabel('Precision') ax.set_xlabel('Recall') ax.set_title('Precision Recall Curve') ax.legend(); ###Output _____no_output_____
Code/.ipynb_checkpoints/XGB_FraudDetection-checkpoint.ipynb
###Markdown Gradient Boosted Trees applied to Fraud detection ###Code from pyspark.ml import Pipeline from pyspark.ml.classification import RandomForestClassifier from pyspark.ml.evaluation import MulticlassClassificationEvaluator from pyspark.sql.functions import col, countDistinct from pyspark.sql import SparkSession from pyspark.sql.functions import col, explode, array, lit # Import VectorAssembler and Vectors from pyspark.ml.linalg import Vectors from pyspark.ml.feature import VectorAssembler from pyspark.ml.classification import GBTClassifier spark = SparkSession.builder.appName('FraudTreeMethods').getOrCreate() ###Output _____no_output_____ ###Markdown Read Data ###Code # Load and parse the data file, converting it to a DataFrame. #data = sqlContext.sql("SELECT * FROM fraud_train_sample_csv") data = spark.read.csv('train_sample.csv', inferSchema=True, header=True) data.show(5) ###Output +------+---+------+---+-------+-------------------+---------------+-------------+ | ip|app|device| os|channel| click_time|attributed_time|is_attributed| +------+---+------+---+-------+-------------------+---------------+-------------+ | 87540| 12| 1| 13| 497|2017-11-07 09:30:38| null| 0| |105560| 25| 1| 17| 259|2017-11-07 13:40:27| null| 0| |101424| 12| 1| 19| 212|2017-11-07 18:05:24| null| 0| | 94584| 13| 1| 13| 477|2017-11-07 04:58:08| null| 0| | 68413| 12| 1| 1| 178|2017-11-09 09:00:09| null| 0| +------+---+------+---+-------+-------------------+---------------+-------------+ only showing top 5 rows ###Markdown Convert the click time to day and hour and add it to data. ###Code import datetime from pyspark.sql.functions import year, month, dayofmonth from pyspark.sql.functions import hour, minute, dayofmonth data = data.withColumn('hour',hour(data.click_time)).\ withColumn('day',dayofmonth(data.click_time)) data.show(5) ###Output +------+---+------+---+-------+-------------------+---------------+-------------+----+---+ | ip|app|device| os|channel| click_time|attributed_time|is_attributed|hour|day| +------+---+------+---+-------+-------------------+---------------+-------------+----+---+ | 87540| 12| 1| 13| 497|2017-11-07 09:30:38| null| 0| 9| 7| |105560| 25| 1| 17| 259|2017-11-07 13:40:27| null| 0| 13| 7| |101424| 12| 1| 19| 212|2017-11-07 18:05:24| null| 0| 18| 7| | 94584| 13| 1| 13| 477|2017-11-07 04:58:08| null| 0| 4| 7| | 68413| 12| 1| 1| 178|2017-11-09 09:00:09| null| 0| 9| 9| +------+---+------+---+-------+-------------------+---------------+-------------+----+---+ only showing top 5 rows ###Markdown FeatheringFeathering, grouping-merging as follow. ###Code gp = data.select("ip","day","hour", "channel")\ .groupBy("ip","day","hour")\ .agg({"channel":"count"})\ .withColumnRenamed("count(channel)", "*ip_day_hour_count_channel")\ .sort(col("ip")) gp.show(5)in # df3 = data.join(gp, 'ip', "outer") # a # df3.show() # over sampling major_df = data.filter(col("is_attributed") == 0) minor_df = data.filter(col("is_attributed") == 1) ratio = int(major_df.count()/minor_df.count()) print("ratio: {}".format(ratio)) a = range(ratio) # duplicate the minority rows oversampled_df = minor_df.withColumn("dummy", explode(array([lit(x) for x in a]))).drop('dummy') # combine both oversampled minority rows and previous majority rows combined_df = major_df.unionAll(oversampled_df) combined_df = major_df.unionAll(oversampled_df) combined_df.show() data = combined_df data = data.drop('click_time','attributed_time') # Split the data into training and test sets (30% held out for testing) (trainingData, testData) = data.randomSplit([0.7, 0.3]) assembler = VectorAssembler(inputCols=['ip', 'app', 'device', 'os', 'channel'],outputCol="features") trainingData = assembler.transform(trainingData) testData = assembler.transform(testData) ###Output ratio: 439 +------+---+------+---+-------+-------------------+---------------+-------------+ | ip|app|device| os|channel| click_time|attributed_time|is_attributed| +------+---+------+---+-------+-------------------+---------------+-------------+ | 87540| 12| 1| 13| 497|2017-11-07 09:30:38| null| 0| |105560| 25| 1| 17| 259|2017-11-07 13:40:27| null| 0| |101424| 12| 1| 19| 212|2017-11-07 18:05:24| null| 0| | 94584| 13| 1| 13| 477|2017-11-07 04:58:08| null| 0| | 68413| 12| 1| 1| 178|2017-11-09 09:00:09| null| 0| | 93663| 3| 1| 17| 115|2017-11-09 01:22:13| null| 0| | 17059| 1| 1| 17| 135|2017-11-09 01:17:58| null| 0| |121505| 9| 1| 25| 442|2017-11-07 10:01:53| null| 0| |192967| 2| 2| 22| 364|2017-11-08 09:35:17| null| 0| |143636| 3| 1| 19| 135|2017-11-08 12:35:26| null| 0| | 73839| 3| 1| 22| 489|2017-11-08 08:14:37| null| 0| | 34812| 3| 1| 13| 489|2017-11-07 05:03:14| null| 0| |114809| 3| 1| 22| 205|2017-11-09 10:24:23| null| 0| |114220| 6| 1| 20| 125|2017-11-08 14:46:16| null| 0| | 36150| 2| 1| 13| 205|2017-11-07 00:54:09| null| 0| | 72116| 25| 2| 19| 259|2017-11-08 23:17:45| null| 0| | 5314| 2| 1| 2| 477|2017-11-09 07:33:41| null| 0| |106598| 3| 1| 20| 280|2017-11-09 03:44:35| null| 0| | 72065| 20| 2| 90| 259|2017-11-06 23:14:08| null| 0| | 37301| 14| 1| 13| 349|2017-11-06 20:07:00| null| 0| +------+---+------+---+-------+-------------------+---------------+-------------+ only showing top 20 rows ###Markdown Train the model ###Code # Train a GBT model. gbt = GBTClassifier(labelCol="is_attributed", featuresCol="features", maxIter=20, maxDepth=4) # Train model. This also runs the indexers. model = gbt.fit(trainingData) # Make predictions. predictions = model.transform(testData) # Select example rows to display. predictions.select("prediction", "is_attributed", "features").show(5) # Select (prediction, true label) and compute test error evaluator = MulticlassClassificationEvaluator(labelCol="is_attributed", predictionCol="prediction", metricName="accuracy") accuracy = evaluator.evaluate(predictions) print("Test Error = %g" % (1.0 - accuracy)) print("Test accuracy = %g" % (accuracy)) predictions.groupBy('prediction').count().show() ###Output +----------+-----+ |prediction|count| +----------+-----+ | 0.0|31620| | 1.0|28359| +----------+-----+ ###Markdown Apply to test, predict ###Code test = spark.read.csv('test.csv', inferSchema=True, header=True) #test.show(5) assembler = VectorAssembler(inputCols=['ip', 'app', 'device', 'os', 'channel'],outputCol="features") test = assembler.transform(test) #test.show(3) predictions = model.transform(test) #predictions.show(2) data_to_submit = predictions.select(['click_id','prediction']) data_to_submit.show(3) data_to_submit = data_to_submit.withColumnRenamed('prediction','is_attributed') data_to_submit.show(3) data_to_submit.groupBy('is_attributed').count().show() print('it is runing now') ###Output it is runing now
03-glue-job-examples/04-glue-job-best-practices/data-validation/validate.ipynb
###Markdown Data Validation ###Code import sys import pyspark.sql.functions as sql_funcs from pyspark.context import SparkContext from awsglue.job import Job from awsglue.utils import getResolvedOptions from awsglue.context import GlueContext from awsglue.dynamicframe import DynamicFrame from awsglue.transforms import * # Create SparkContext sparkContext = SparkContext.getOrCreate() # Create Glue Context glueContext = GlueContext(sparkContext) # Get spark session spark = glueContext.spark_session # Resolve job parameters # Uncomment this in Glue ETL job # args = getResolvedOptions(sys.argv, ["JOB_NAME" # job = Job(glueContext) # job.init(args['JOB_NAME'], args) %pip install -q -r requirements.txt from s3pathlib import S3Path from marshmallow import fields, Schema, validates, ValidationError class Config: bucket = "aws-data-lab-sanhe-for-everything-us-east-2" prefix = "poc/learn-big-data-on-aws/glue-job-examples/04-glue-job-best-practice/data-validation" @property def s3path_prefix(self) -> S3Path: return S3Path(self.bucket, self.prefix) config = Config() ###Output _____no_output_____ ###Markdown Define Data Validation Schema ###Code class RowSchema(Schema): id = fields.Int() name = fields.Str(required=True) balance = fields.Int() @validates("balance") def validate_balance(self, value): if value < 0: raise ValidationError("Balance must be greater than 0.") schema = RowSchema() def filter_failed(row: dict) -> bool: """ Return True if the data failed the validation. """ res = schema.validate(row) if res: return True else: return False def filter_passed(row: dict) -> bool: """ Return True if the data passed the validation. """ return not filter_failed(row) schema.validate({"id": 1, "name": "user1", "balance": 100}) # unknown field schema.validate({"id": 15, "name": "user15", "balance": 1500, "password": "123456"}) # field missing schema.validate({"id": 20, "balance": 2000}) # type error schema.validate({"id": 23, "name": "user23", "balance": "2,300"}) # value range error schema.validate({"id": 26, "name": "user26", "balance": -2600}) # not null value schema.validate({"id": 30, "name": None, "balance": 3000}) ###Output _____no_output_____ ###Markdown Define Dynamodb Validation Tracker ###Code import pynamodb from pynamodb.models import Model from pynamodb.connection import Connection from pynamodb.attributes import UnicodeAttribute, NumberAttribute, JSONAttribute connection = Connection(region="us-east-2") class ValidationTracker(Model): class Meta: """ declare metadata about the table """ table_name = "learn_big_data_on_aws_glue_validation" region = "us-east-2" # billing mode # doc: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html # pay as you go mode billing_mode = pynamodb.models.PAY_PER_REQUEST_BILLING_MODE # provisioned mode # write_capacity_units = 10 # read_capacity_units = 10 # define attributes s3uri = UnicodeAttribute(hash_key=True) status = NumberAttribute(default=0) # set default value for attribute details = JSONAttribute(default={}) # Create dynamodb table if not exists, if already exists, this code won't do anything ValidationTracker.create_table(wait=True) gdf1 = glueContext.create_dynamic_frame.from_options( connection_type="s3", connection_options=dict( paths=[ S3Path(config.s3path_prefix, "1.json").uri, ], recurse=True, ), format="json", format_options=dict(multiLine=True), transformation_ctx="datasource", ) gdf1.toDF().show() ###Output _____no_output_____ ###Markdown File 2```python[ {"id": 11, "name": "user11", "balance": 1100}, {"id": 12, "name": "user12", "balance": 1200}, {"id": 13, "name": "user13", "balance": 1300}, {"id": 14, "name": "user14", "balance": 1400}, {"id": 15, "name": "user15", "balance": 1500, "password": "123456"}, unknown field {"id": 16, "name": "user16", "balance": 1600}, {"id": 17, "name": "user17", "balance": 1700}, {"id": 18, "name": "user18", "balance": 1800}, {"id": 19, "name": "user19", "balance": 1900}, {"id": 20, "balance": 2000}, field missing]``` ###Code gdf2 = glueContext.create_dynamic_frame.from_options( connection_type="s3", connection_options=dict( paths=[ S3Path(config.s3path_prefix, "2.json").uri, ], recurse=True, ), format="json", format_options=dict(multiLine=True), transformation_ctx="datasource", ) for row in gdf2.toDF().collect(): print(row.asDict(recursive=True)) gdf2_selected = gdf2.select_fields(["id", "name", "balance"]) gdf2_selected.toDF().show() gdf2_filtered = gdf2_selected.filter(validate_row) pdf2_filtered = gdf2_filtered.toDF() pdf2_filtered_sorted = pdf2_filtered.sort(pdf2_filtered.id.asc()) pdf2_filtered_sorted.show() ###Output _____no_output_____ ###Markdown File 3 ###Code gdf3 = glueContext.create_dynamic_frame.from_options( connection_type="s3", connection_options=dict( paths=[ S3Path(config.s3path_prefix, "3.json").uri, ], recurse=True, ), format="json", format_options=dict(multiLine=True), transformation_ctx="datasource", ) for row in gdf3.toDF().collect(): print(row.asDict(recursive=True)) gdf3_filtered = gdf3.filter(validate_row) gdf3_filtered.toDF().show() ###Output _____no_output_____ ###Markdown Put them together ###Code gdf = glueContext.create_dynamic_frame.from_options( connection_type="s3", connection_options=dict( paths=[ S3Path(config.s3path_prefix).uri, ], recurse=True, ), format="json", format_options=dict(multiLine=True), transformation_ctx="datasource", ) gdf = DynamicFrame.fromDF( gdf.toDF().withColumn("input_file_name", sql_funcs.input_file_name()), glueContext, "datasource_with_input_file_name", ) gdf.toDF().show(30) gdf = glueContext.create_dynamic_frame.from_catalog( name_space="learn_big_data_on_aws", table_name="data_validation", transformation_ctx="datasource" ) gdf = DynamicFrame.fromDF( gdf.toDF().withColumn("input_file_name", sql_funcs.input_file_name()), glueContext, "datasource_with_input_file_name", ) # gdf.toDF().show(30) for row in gdf.toDF().collect(): print(row.asDict(recursive=True)) gdf.count() gdf_selected = gdf.select_fields(["id", "name", "balance"]) gdf_selected.toDF().show(3) gdf_filtered = gdf_selected.filter(validate_row) pdf_filtered = gdf_filtered.toDF() pdf_filtered_sorted = pdf_filtered.sort(pdf_filtered.id.asc()) pdf_filtered_sorted.show(30) pdf_filtered_sorted.count() ###Output _____no_output_____
project/reports/global_warming/E_Simou.ipynb
###Markdown Final Project for "A Network Tour of Data Science"- Global Warming Effrosyni Simou 1. Aim of the ProjectSince the 2016 Presidential Elections in the USA, the interest of people with regards to climate change and the correct environmental policy has reached an all-time high. In this project the aim is to use a [dataset](https://www.kaggle.com/berkeleyearth/climate-change-earth-surface-temperature-data) with temperature data from 1750 to 2015 [1] and check whether global warming is a fact or a speculation. The dataset is nicely packaged and allows for slicing into interesting subsets (by country, by city, global temperatures e.t.c.). It was put together by Berkeley Earth, which is affiliated with Lawrence Berkeley National Laboratory. 2. Data Acquisition ###Code import numpy as np # Show matplotlib graphs inside the notebook. %matplotlib inline import os.path import pandas as pd import matplotlib.pyplot as plt %matplotlib inline import plotly import plotly.offline as py py.init_notebook_mode(connected=True) import plotly.graph_objs as go import plotly.tools as tls from sklearn import linear_model from statsmodels.tsa.arima_model import ARIMA from myutils import makeTimeSeries from myutils import differenciate from myutils import test_stationarity import warnings warnings.filterwarnings("ignore") ###Output _____no_output_____ ###Markdown 2.1 Importing the data ###Code folder = os.path.join('data', 'temperatures','GlobalLandTemperatures') filename_ByCity = os.path.join(folder, 'GlobalLandTemperaturesByCity.csv') filename_ByCountry = os.path.join(folder, 'GlobalLandTemperaturesByCountry.csv') filename_ByMajorCity = os.path.join(folder, 'GlobalLandTemperaturesByMajorCity.csv') filename_ByState = os.path.join(folder, 'GlobalLandTemperaturesByState.csv') filename_Global = os.path.join(folder, 'GlobalTemperatures.csv') ByCity=pd.read_csv(filename_ByCity) ByCountry=pd.read_csv(filename_ByCountry) ByMajorCity=pd.read_csv(filename_ByMajorCity) ByState=pd.read_csv(filename_ByState) Global=pd.read_csv(filename_Global) ###Output _____no_output_____ ###Markdown 2.2 Looking at the data ###Code ByCity[:10000].to_html('ByCity.html') ByCountry[:10000].to_html('ByCountry.html') ByMajorCity[:10000].to_html('ByMajorCity.html') ByState[:10000].to_html('ByState.html') Global.to_html('Global.html') ###Output _____no_output_____ ###Markdown Export part of the dataset as HTML files for inspection [ByCity](./ByCity.html), [ByCountry](./ByCountry.html), [ByMajorCity](ByMajorCity.html), [ByState](./ByState.html), [Global](./Global.html) .As we can see by following the links above, there is a need to clean our data: * __There are missing data.__ For instance, in the case of the _global_ temperatures there are no measurements for maximum/minimum land temperatures as well as no measurements for land and ocean temperatures before 1850.* __There are duplicates in our data.__ This makes sense since the dataset was created by combining 16 pre-existing archives. For instance, in the case of temperatures _by country_ the temperatures for Denmark, France, Netherlands and United Kingdom are duplicate. Also, in the case of temperatures _by city_ the temperatures for Guatemala City are duplicate.* __Older measurements are less reliable.__ The measurements in this dataset date as back as 1743. It is expected that older measurements will be noisy and therefore less reliable. We will visualize the uncertainty of the measurements in the next section. 2.3 Cleaning the data 2.3.1 Removing duplicates ###Code #Removing duplicates from ByCountry ByCountry_clear = ByCountry[~ByCountry['Country'].isin( ['Denmark', 'France', 'Europe', 'Netherlands', 'United Kingdom'])] #ByCountry_clear.loc[ByCountry_clear['Country'] == 'Denmark (Europe)'] ByCountry_clear = ByCountry_clear.replace( ['Denmark (Europe)', 'France (Europe)', 'Netherlands (Europe)', 'United Kingdom (Europe)'], ['Denmark', 'France', 'Netherlands', 'United Kingdom']) #countries = np.unique(ByCountry_clear['Country']) #np.set_printoptions(threshold=np.inf) #print(countries) #Removing duplicates from ByCity ByCity_clear = ByCity[~ByCity['City'].isin( ['Guatemala'])] ByCity_clear = ByCity_clear.replace(['Guatemala City'],['Guatemala']) #cities = np.unique(ByCity_clear['City']) #print(cities) ###Output _____no_output_____ ###Markdown 2.3.2 Working with the missing dataAs far as the missing data is concerned we can chose to either:* Ignore the missing values* Use the values we have in order to fill in the missing values (e.g. pad, interpolate e.t.c.).For example, if we chose to ignore the global temperature measurement for a month where the value for LandAverageTemperature is missing we can do it as follows: ###Code Global.dropna(subset=['LandAverageTemperature']).head() ###Output _____no_output_____ ###Markdown Or, if we chose to ignore the global temperature measurements for which we don't have all of the 8 fields: ###Code Global.dropna(axis=0).head() ###Output _____no_output_____ ###Markdown If we chose to fill in the missing values with the values of the previous corresponding measurement: ###Code Global.fillna(method='pad').head() ###Output _____no_output_____ ###Markdown The method we will use will depend on the problem we will try to solve with our data. 2.3.3 Uncertainty of measuremets with time ###Code mean_Global= [] mean_Global_uncertainty = [] years = np.unique(Global['dt'].apply(lambda x: x[:4])) for year in years: mean_Global.append(Global[Global['dt'].apply( lambda x: x[:4]) == year]['LandAverageTemperature'].mean()) mean_Global_uncertainty.append(Global[Global['dt'].apply( lambda x: x[:4]) == year]['LandAverageTemperatureUncertainty'].mean()) #print(years.dtype) x=years.astype(int) minimum=np.array(mean_Global) + np.array(mean_Global_uncertainty) y=np.array(mean_Global) maximum=np.array(mean_Global) - np.array(mean_Global_uncertainty) plt.figure(figsize=(16,8)) plt.plot(x,minimum,'b') plt.hold plt.plot(x,y,'r') plt.hold plt.plot(x,maximum,'b') plt.hold plt.fill_between(x,y1=minimum,y2=maximum) plt.xlabel('years',fontsize=16) plt.xlim(1748,2017) plt.ylabel('Temperature, °C',fontsize=16) plt.title('Yearly Global Temperature',fontsize=24) ###Output _____no_output_____ ###Markdown As it can be observed the uncertainty of the measurements in the 18th and 19th century was very high. Early data was collected by technicians using mercury thermometers, where any variation in the visit time impacted measurements. In the 1940s, the construction of airports caused many weather stations to be moved. In the 1980s, there was a move to electronic thermometers that are said to have a cooling bias. One can chose to ignore or give smaller weights to older, less reliable measurements. For the data exploitation part we will consider data from 1900 onward. 3. Data Exploration 3.1 Which countries are warmer? We now draw a map with the average temperature of each country over all years. This serves as a quick way to check that our data make sense. We can see that the warmest countries are the ones along the Equator and that the coldest countries are Greenland, Canada and Russia. Countries for which the data was missing are depicted as white. One can hover above counties to see their name and average temperatures. ###Code countries = np.unique(ByCountry_clear['Country']) mean_temp = [] for country in countries: mean_temp.append(ByCountry_clear[ByCountry_clear['Country'] == country]['AverageTemperature'].mean()) #when taking the mean the missing data are automatically ignored=>see data cleaning section #use choropleth map provided by pyplot data = [ dict( type = 'choropleth', locations = countries, z = mean_temp, locationmode = 'country names', text = countries, colorbar = dict(autotick = True, tickprefix = '', title = '\n °C') ) ] layout = dict( title = 'Average Temperature in Countries', geo = dict( showframe = False, showocean = True, oceancolor = 'rgb(0,255,255)', ), ) fig = dict(data=data, layout=layout) py.iplot(fig,validate=False) ###Output _____no_output_____ ###Markdown 3.2 Which cities have experienced the biggest change of temperature the last 50 years? We now look at the change of temperature in the major cities over the last 50 years. We subtract the oldest temperature $T_{old }$ from the most recent temperature $T_{new}$. Therefore if $dT=T_{new}-T_{old }>0 \rightarrow$ the temperature has increased. It can be observed that for almost all (95%) of the major cities there has been an increase in the temperature in the last 50 years. One can zoom into the map and see the name and the coordinates of the cities. ###Code years_in_MajorCities=np.unique(ByMajorCity['dt'].apply(lambda x: x[:4])) cities = np.unique(ByMajorCity['City']) dt=[years_in_MajorCities[-51],years_in_MajorCities[-1]] T1=[] T2=[] lon=[] lat=[] for city in cities: T1.append(ByMajorCity[(ByMajorCity['City'] == city) & (ByMajorCity['dt'].apply(lambda x: x[:4]) == dt[0])]['AverageTemperature'].mean()) T2.append(ByMajorCity[(ByMajorCity['City'] == city) & (ByMajorCity['dt'].apply(lambda x: x[:4]) == dt[1])]['AverageTemperature'].mean()) lon.append(ByMajorCity[ByMajorCity['City'] == city]['Longitude'].iloc[1]) lat.append(ByMajorCity[ByMajorCity['City'] == city]['Latitude'].iloc[1]) lon=np.array(lon) lat=np.array(lat) for i in range(0,lon.size): if lon[i].endswith('W'): west=lon[i] west=float(west[:-1]) east=str(360-west) lon[i]=east+'E' for i in range(0,lat.size): if lat[i].endswith('S'): south=lat[i] south=float(south[:-1]) north=str(1-south) lat[i]=north+'N' lon=pd.DataFrame(lon) lat=pd.DataFrame(lat) long=lon[0].apply(lambda x: x[:-1]) lati=lat[0].apply(lambda x: x[:-1]) dT=np.array(T2)-np.array(T1) data = [ dict( type = 'scattergeo', lon = long, lat = lati, text=cities, mode = 'markers', marker = dict( size = 8, opacity = 0.8, reversescale = True, autocolorscale = False, symbol = 'square', line = dict( width=1, color='rgba(102, 102, 102)' ), color = dT, colorbar=dict( title="\n °C" ) ))] layout = dict( title = 'Change in the temperature the last 50 years', colorbar = True, geo = dict( showland = True, landcolor = "rgb(250, 250, 250)", subunitcolor = "rgb(217, 217, 217)", countrycolor = "rgb(217, 217, 217)", showocean = True, oceancolor = 'rgb(0,255,255)', ), ) fig = dict( data=data, layout=layout ) py.iplot( fig, validate=False) ###Output _____no_output_____ ###Markdown 4. Data Expoitation We now want to build a model that predicts the global temperature based on the temperatures of the previous years. It can be obsereved from the figure below that the mean of the global temperature data has a positive trend. Therefore the yearly global temperature is a non-stationary process (the joint probability distribution changes when shifted in time). In order to produce reliable results and good prediction the process must be converted to a stationary process. ###Code mean_Global=pd.DataFrame(mean_Global) mean_Global['dt']=years ts=makeTimeSeries(mean_Global) #print(ts) plt.figure(figsize=(16,8)) plt.plot(ts) plt.xlabel('time',fontsize=16) plt.ylabel('Temperature, °C',fontsize=16) plt.title('Yearly Global Temperature',fontsize=24) ###Output _____no_output_____ ###Markdown 4.1 Making the process a stationary process 4.1.1 Differencing An easy way to detrend a time series is by differencing. For a non-stationary time series $X$, its corresponding time series after differencing $X_{diff}$ can be calculated as:$$X_{diff}(i)=X(i)-X(i-1)$$$X_{diff}$ will obviously have one sample less than $X$. ###Code X = ts[0]['1900':'2000'] #training set, temporal split #print(X) X_diff=differenciate(X) #print(X_diff) plt.figure(figsize=(16,8)) plt.plot(X_diff) plt.xlabel('years',fontsize=16) plt.ylabel('Temperature, °C',fontsize=16) plt.title('Yearly Global Temperature (after differencing)',fontsize=24) ###Output _____no_output_____ ###Markdown Now we can check if in fact the process after differencing is stationary with the **Dickey-Fuller Test**. Here the null hypothesis is that the time series is non-stationary. The test results comprise of a Test Statistic and some Critical Values for different confidence levels. If the ‘Test Statistic’ is less than the ‘Critical Value’, we can reject the null hypothesis and say that the series is stationary. ###Code test_stationarity(X_diff) ###Output _____no_output_____ ###Markdown 4.1.2 Detrend by Model Fitting Another way is to try to model the trend and then subtract it from the data. ###Code regresor = linear_model.LinearRegression() y=np.array(X.dropna()) t=np.arange(y.size) y=y.reshape(-1,1) t=t.reshape(-1,1) regresor.fit(t,y) trend=regresor.predict(t) # detrend detrended = [y[i]-trend[i] for i in range(0, y.size)] y=pd.DataFrame(y) y.index=X.index trend=pd.DataFrame(trend) trend.index=X.index detrended=pd.DataFrame(detrended) detrended.index=X.index print('Coefficients: \n', regresor.coef_) print("Mean of error: %.2f" % np.mean((trend - y) ** 2)) # plot trend plt.figure(figsize=(16,8)) plt.plot(y,color='blue',label='time series') plt.plot(trend,color='green',label='trend') plt.xlabel('years',fontsize=16) plt.ylabel('Temperature, °C',fontsize=16) plt.title('Trend of Yearly Global Temperature',fontsize=24) plt.legend() plt.show() # plot detrended plt.figure(figsize=(16,8)) plt.plot(detrended) plt.xlabel('years',fontsize=16) plt.ylabel('Temperature, °C',fontsize=16) plt.title('Detrended Yearly Global Temperature',fontsize=24) plt.show() test_stationarity(detrended[0]) ###Output _____no_output_____ ###Markdown Looking at the results we get from the Dickey-Fuller Test for the two methods of making the time series stationary, we can see that we got better results here through the method of differencing. Therefore, in what follows we will use $X_{diff}$. We could have gotten better results for the method based on modeling the trend if we had allowed a more complex model than the linear one. 4.2 Modeling For the modeling we will use the **Auto-Regressive Integrated Moving Averages (ARIMA)** model. For our training set we will use the global temperatures from 1900 to 2000.The ARIMA provided by statsmodels differenciates the time series. Therefore, we give first in the figure below the results for the differenciated time series. ###Code model = ARIMA(ts[0]['1900':'2000'], order=(1, 1, 2)) results_ARIMA = model.fit(disp=-1) plt.figure(figsize=(16,8)) plt.plot(X_diff,color='blue',label='original') plt.plot(results_ARIMA.fittedvalues, color='red',label='predicted') plt.title('RSS: %.4f'% sum((results_ARIMA.fittedvalues-X_diff)**2),fontsize=20) plt.legend(loc='best') plt.xlabel('years',fontsize=16) plt.ylabel('Temperature, °C',fontsize=16) predictions_ARIMA_diff = pd.Series(results_ARIMA.fittedvalues, copy=True) #print(predictions_ARIMA_diff.head()) ###Output _____no_output_____ ###Markdown We now take it back to the original scale (no differencing). ###Code predictions_ARIMA_diff_cumsum = predictions_ARIMA_diff.cumsum() #print (predictions_ARIMA_diff_cumsum.head()) predictions_ARIMA = pd.Series(X.ix[0], index=X.index) predictions_ARIMA = predictions_ARIMA.add(predictions_ARIMA_diff_cumsum,fill_value=0) #predictions_ARIMA.head() ###Output _____no_output_____ ###Markdown 5. Evaluation 5.1 In-sample performance We now plot the actual and the predicted time series by our model for our training set. It is not a perfect prediction, but the root mean square error is relatively small. ###Code plt.figure(figsize=(16,8)) plt.plot(X,color='blue',label='original') plt.plot(predictions_ARIMA,color='green',label='predicted') plt.title('RMSE= %.4f'% np.sqrt(sum((predictions_ARIMA-X)**2)/len(X)),fontsize=24) plt.legend(loc='best') plt.xlabel('years',fontsize=16) plt.ylabel('Temperature, °C',fontsize=16) ###Output _____no_output_____ ###Markdown 5.2 Out-of-sample performance We now look at the accuracy of our model in predicting the future. We test on the temperatures from 2001 to 2015. Again, the model is not perfectly accurate, but the root mean square error is relatively small. ###Code X_test = ts[0]['2001':] #test set, temporal split #print(X_test) preds=results_ARIMA.predict('2001-01-01','2015-01-01') #preds.head preds_cumsum = preds.cumsum() preds=preds_cumsum+X[-1] #print (preds) #print(X_test) plt.figure(figsize=(16,8)) plt.plot(X_test,color='blue',label='original') plt.plot(preds, color='red',label='predicted') plt.title('RMSE= %.4f'% np.sqrt(sum((preds-X_test)**2)/len(X_test)),fontsize=24) plt.legend(loc='best') plt.xlabel('years',fontsize=16) plt.ylabel('Temperature, °C',fontsize=16) ###Output _____no_output_____
predictions/Prediction_Offense_Final1.ipynb
###Markdown ###Code # Installs %%capture !pip install pmdarima !pip install category_encoders==2.0.0 # Imports import numpy as np import pandas as pd from statsmodels.tsa.arima_model import ARIMA import pmdarima as pm from sklearn import preprocessing import category_encoders as ce # Import data original_df = pd.read_csv('https://raw.githubusercontent.com/JimKing100/nfl-test/master/data-actuals/actuals_offense.csv') kickers_df = pd.read_csv('https://raw.githubusercontent.com/JimKing100/nfl-test/master/data-revised/rookies_non_kicker.csv') offense_df = pd.read_csv('https://raw.githubusercontent.com/JimKing100/nfl-test/master/data-revised/rookies_non_offense.csv') player_df = pd.concat([kickers_df, offense_df], ignore_index=True) # The dataframe of actual offensive points for each game from 2000-2019 original_df.head() # The dataframe of all 2019 offensive players (kickers and offense) player_df.head(50) # Add a row to the final_df dataframe # Each row represents the predicted points for each team def add_row(df, p, f, l, n, pos, pred, act): df = df.append({'player': p, 'first': f, 'last': l, 'name': n, 'position': pos, 'week1-pred': pred, 'week1-act': act }, ignore_index=True) return df # The main code for iterating through the player(offense and kicker) list, calculating the points and adding the rows # to the final_df dataframe. column_names = ['player', 'first', 'last', 'name', 'position', 'week1-pred', 'week1-act' ] player_list = offense_df['player'].tolist() final_df = pd.DataFrame(columns = column_names) for player in player_list: first = player_df['first'].loc[(player_df['player']==player)].iloc[0] last = player_df['last'].loc[(player_df['player']==player)].iloc[0] name = player_df['name'].loc[(player_df['player']==player)].iloc[0] position1 = player_df['position1'].loc[(player_df['player']==player)].iloc[0] start_year = player_df['start'].loc[(player_df['player']==player)].iloc[0] row = original_df.index[(original_df['player']==player)][0] if start_year < 2000: start_year = 2000 col = ((start_year - 2000) * 16) + 5 train_data = original_df.iloc[row, col:309] actuals = original_df.iloc[row, 309:325] act_points = actuals.sum() print(player) if (start_year < 2016) & (player != 'GG-0310'): # ARIMA model model = pm.auto_arima(train_data, start_p=1, start_q=1, test='adf', # use adftest to find optimal 'd' max_p=3, max_q=3, # maximum p and q m=1, # frequency of series d=None, # let model determine 'd' seasonal=False, # No Seasonality start_P=0, D=0, trace=False, error_action='ignore', suppress_warnings=True, stepwise=True) # Forecast n_periods = 16 fc = model.predict(n_periods=n_periods, return_conf_int=False) index_of_fc = np.arange(len(train_data), len(train_data)+n_periods) fc_series = pd.Series(fc, index=index_of_fc) pred_points = fc_series.sum() else: pred_points = train_data.mean() * 16 final_df = add_row(final_df, player, first, last, name, position1, pred_points, act_points) # The final_df dataframe final_df['week1-diff'] = final_df['week1-pred'] - final_df['week1-act'] final_df['week1-pct'] = final_df['week1-diff']/final_df['week1-pred'] final_df['week1-pred'] = final_df['week1-pred'].astype(int) # Calculate the metrics pred_median_error = final_df['week1-pct'].median() print('Median Error - %.4f%%' % (pred_median_error * 100)) final_df.head(50) # Save the results to .csv file final_df.to_csv('/content/week1-pred-offense-norookies.csv', index=False) ###Output _____no_output_____
Stock price prediction.ipynb
###Markdown Preview of Data ###Code data.head() data.info() data.describe() data['Price Change'].value_counts() ###Output _____no_output_____ ###Markdown Modeling with Scikit-Learn ###Code data.columns X = data.drop(['Unnamed: 0', 'symbol', 'date', 'Price Change'], axis=1) y = data['Price Change'] print(X.shape) print(y.shape) # print(X.head()) # print(y.head()) ###Output (10201, 6) (10201,) ###Markdown Split the dataset into training and testing set ###Code X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=5) print(X_train.shape) print(y_train.shape) print(X_test.shape) print(y_test.shape) ###Output (7140, 6) (7140,) (3061, 6) (3061,) ###Markdown Expermenting with most common algorithms ###Code #Normal classification with decision tree clf = DecisionTreeClassifier(max_depth=3) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=5) clf.fit(X_train, y_train) score = clf.score(X_test, y_test) print("Accuracy : " , score) y_pred = clf.predict(X_test) precision = metrics.precision_score(y_test, y_pred, average='binary') print("Precision : " , precision) classifiers = [ LogisticRegression(), KNeighborsClassifier(3), LinearSVC(random_state=0, tol=1e-5), SVC(gamma=2, C=1), DecisionTreeClassifier(max_depth=5), RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1), MLPClassifier(alpha=1, max_iter=1000), AdaBoostClassifier(), GaussianNB(), QuadraticDiscriminantAnalysis()] names = ["Logistic Regression", "Nearest Neighbors", "Linear SVM", "RBF SVM", "Decision Tree", "Random Forest", "Neural Net", "AdaBoost", "Naive Bayes", "QDA"] # iterate over classifiers for name, clf in zip(names, classifiers): clf.fit(X_train, y_train) score = clf.score(X_test, y_test) print(name , " : ", score) ###Output Logistic Regression : 0.5249918327344005 Nearest Neighbors : 0.5223783077425678 ###Markdown Stratified K-Fold Cross validation ###Code sss = StratifiedShuffleSplit(n_splits=5, test_size=0.5, random_state=0) sss.get_n_splits(X, y) X_numpy = X.to_numpy() y_numpy = y.to_numpy() clf = RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1) scores = [] precisions = [] for train_index, test_index in sss.split(X_numpy,y_numpy): # print("Train Index: ", train_index) # print("Test Index: ", test_index) X_train, X_test, y_train, y_test = X_numpy[train_index], X_numpy[test_index], y_numpy[train_index], y_numpy[test_index] # print(X_train.shape) # print(X_test.shape) # print(y_train.shape) # print(y_test.shape) clf.fit(X_train, y_train) scores.append(clf.score(X_test, y_test)) y_pred = clf.predict(X_test) precisions.append(precision_score(y_test, y_pred, average='binary', zero_division=0)) print("Average Score : " , np.mean(scores)) print("Average precision: ", np.mean(precisions)) ###Output Average Score : 0.5779258968829641 Average precision: 0.5321925678948123
results_ipynb/plots/supp/rtc_on_specific_neuron.ipynb
###Markdown this notebook tries to replicate what's in ###Code %matplotlib inline import numpy as np import matplotlib.pyplot as plt import h5py import os.path from torchvision.utils import make_grid from torch import FloatTensor from scipy.stats import pearsonr from skimage.io import imsave as imsave_ski from torch.backends import cudnn cudnn.enabled = False # for deterministic. from strflab import rta, rtc from numpy.random import RandomState from torch.autograd import Variable from tang_jcompneuro import dir_dictionary from tang_jcompneuro.io import load_split_dataset, split_file_name_gen, load_image_dataset from tang_jcompneuro.model_fitting_cnn import (opt_configs_to_explore, models_to_train, init_config_to_use_fn, train_one_case, save_one_model) from tang_jcompneuro.cnn import CNN import time imsave_global_dir = os.path.join(dir_dictionary['plots'], 'supp', 'rtc') os.makedirs(imsave_global_dir, exist_ok=True) def imsave(x, y): imsave_ski(os.path.join(imsave_global_dir, x), y) # let's train a model. # neuron 553, split 0, all stimuli. monkey A. # let me check what config worked best during training. # and I will just use that. # def load_data(): datasets_local = load_split_dataset('MkA_Shape', 'all', True, 553) return datasets_local datasets = load_data() def load_data_idx(): # get testing index. datafile_x = split_file_name_gen(None) with h5py.File(datafile_x, 'r') as f_x: index_this = f_x[f'/MkA_Shape/all/with_val/100/0/test'].attrs['index'] return index_this dataset_test_idx = load_data_idx() assert dataset_test_idx.shape == (1900,) def load_trained_model(): with h5py.File(os.path.join(dir_dictionary['models'], 'cnn', 'b.4', 'MkA_Shape', 'all', '100', '0', '550_600.hdf5'), 'r') as f_out: grp_this = f_out['/MkA_Shape/all/100/0/cnn/b.4/553'] best_config, best_corr_ref = grp_this.attrs['best_config'], grp_this['corr'][()] config_to_use = '1e-3L2_1e-3L2_adam002_mse' assert config_to_use == best_config # ok. l opt_config = opt_configs_to_explore[config_to_use] arch_config = models_to_train['b.4'] # print(opt_config, arch_config) model = CNN(arch_config, init_config_to_use_fn(), mean_response=datasets[1].mean(axis=0), # change seed if you get unlucky for unstable input... # this is the case especially for MkE2_Shape. # i think this was an issue before as well. # except that pytorch 0.2.0 doesn't report such errors. # check /inf_debug_script.py # seed=42, seed=0, # last ditch # for some avg_sq # scale_hack=0.9, # for other avg_sq # as well as other models. scale_hack=None, # for MLP model, use PCAed data. input_size=20, # scale_hack = 0.0 ) model.cuda() t1 = time.time() y_val_cc, y_test_hat, new_cc = train_one_case(model, datasets, opt_config, seed=2, show_every=1000, return_val_perf=True, max_epoch=20000) t2 = time.time() print(t2-t1, 'sec') print('ref corr', best_corr_ref, 'current corr', new_cc) return y_test_hat, new_cc, model y_test_hat_global, new_cc_global, model_global = load_trained_model() model_save_dir = os.path.join(dir_dictionary['models'], 'cnn_cherrypick', 'MkA_all_0_b4') os.makedirs(model_save_dir, exist_ok=True) def save_this_model(): with h5py.File(os.path.join(model_save_dir, '553.hdf5')) as f_out: f_out.require_group('model') save_one_model(model_global, f_out['model']) # save ref ytest_hat, ytest_cc. if 'ytest_hat' not in f_out: f_out.create_dataset('ytest_hat', data=y_test_hat_global) if 'corr' not in f_out: f_out.create_dataset('corr', data=new_cc_global) save_this_model() y_test_hat_global = y_test_hat_global[:,0] weight_values = None for x, y in model_global.named_parameters(): if x == 'conv.conv0.weight': weight_values = y.data.cpu().numpy().copy() break def imshow(npimg, figsize=(8, 6), save_name=None): plt.close('all') plt.figure(figsize=figsize) img_to_show = np.transpose(npimg.numpy(), (1, 2, 0)) plt.imshow(img_to_show) if save_name is not None: imsave(save_name, img_to_show) plt.show() weight_values.shape imshow(make_grid(FloatTensor(weight_values),normalize=True, scale_each=True), save_name='original_filters.png') # let's recover it. I think this makes more sense than my previous experiments, # as here all filters look sensible. num_stimulus_to_study = (2000, 5000, 10000, 20000, 50000, 100000, 200000, 500000, ) model_global.cuda() model_global.eval() # double check it can recover the y_recon stored. # also, I need to show this neuron's tuning curve and the fitting anyway. X_ref = datasets[2] y_ref = datasets[3][:,0] # save top 20 stimuli. X_ref_big = load_image_dataset('Shape_9500', trans=False)[dataset_test_idx] print(X_ref_big.shape) y_sort_idx = np.argsort(y_ref)[::-1] X_ref_big_top = X_ref_big[y_sort_idx[:20]] X_ref_big_top = make_grid(FloatTensor(X_ref_big_top)[:20], nrow=10, normalize=False, scale_each=False) X_ref_big_top = np.transpose(X_ref_big_top.numpy(), (1, 2, 0)) print(X_ref_big_top.shape) imsave('top_20_stimuli.png', X_ref_big_top) y_recon_debug = model_global(Variable(FloatTensor(X_ref).cuda())).data.cpu().numpy()[:,0] y_recon_debug.shape pearsonr(y_recon_debug, y_test_hat_global)[0], abs(y_test_hat_global-y_recon_debug).max() assert pearsonr(y_recon_debug, y_ref.astype(np.float32))[0] == new_cc_global # show tuning curve def show_tuning_curve(y_original, y_fitted, figsize=(3,2.5), top=None): assert y_original.shape == y_fitted.shape == (y_original.size,) if top is None: top = y_original.size plt.close('all') fig_this = plt.figure(figsize=figsize) sort_idx = np.argsort(y_original)[::-1] slice_to_use = slice(top) plt.plot(np.arange(top), y_original[sort_idx][slice_to_use], label='raw', color='red', alpha=0.8) plt.plot(np.arange(top), y_fitted[sort_idx][slice_to_use], label='fitted', color='blue', alpha=0.8) plt.legend() plt.xlabel('stimulus rank') plt.ylabel('response') plt.ylim(0, y_original.max()+0.1) plt.xlim(0, top) r_this = pearsonr(y_original, y_fitted)[0] plt.text(x=125,y=1.0,s='r={:.2f}'.format(r_this), horizontalalignment='center', verticalalignment='center',fontsize='larger') fig_this.subplots_adjust(top=0.95, bottom=0.2, left=0.2, right=0.95, hspace=0.1, wspace=0.1) fig_this.savefig(os.path.join(imsave_global_dir, 'tuning_curve.pdf'), dpi=300) plt.show() show_tuning_curve(y_ref, y_test_hat_global, top=100) # show top stimuli. def batch_input(net_this, X, batch_size=1000): num_batch = len(X) // batch_size assert num_batch*batch_size == len(X) y_all = [] for i_batch in range(num_batch): start_point = i_batch*batch_size end_point = (i_batch+1)*batch_size X_this = X[start_point:end_point] y_this = net_this(Variable(FloatTensor(X_this).cuda())).data.cpu().numpy() y_all.append(y_this) return np.concatenate(y_all, axis=0) def show_rta(rta_this, title, num_iter): plt.close('all') plt.figure(figsize=(2,2)) # rta_this_for_show = rta_this/abs(rta_this).max() grid_image = make_grid(FloatTensor(rta_this.reshape(1, 1, 20, 20)), nrow=1, normalize=True, scale_each=True) grid_image = np.transpose(grid_image.numpy(), (1, 2, 0)) plt.imshow(grid_image) plt.title('{} min {:.2f} max {:.2f}'.format(title, rta_this.min(), rta_this.max())) imsave(f'rta_{num_iter}.png', grid_image) plt.show() def show_rtc(vector_original_list, eig_list, title, num_iter, eigenvalue_band=None): assert eig_list.shape == (1, eig_list.size) plt.close('all') fig, axes = plt.subplots(1, 3, figsize=(20, 8)) fig.suptitle(title) axes[0].plot(np.arange(eig_list.size)+1, eig_list[0], label='raw') axes[0].set_title('eigen values') if eigenvalue_band is not None: assert eigenvalue_band.shape == (2, 1, 400) eigenvalue_band = eigenvalue_band[:, 0] # these two colors shouldn't be the default one... axes[0].plot(np.arange(eig_list.size)+1, eigenvalue_band[0], label='lower') axes[0].plot(np.arange(eig_list.size)+1, eigenvalue_band[1], label='upper') axes[0].set_title('eigen values with 95% CI') axes[0].legend(loc='best') axes[0].set_xlim(0, 100) # for second one, let's just show all filters. grid_image = make_grid(FloatTensor(vector_original_list[0].reshape(400, 1, 20, 20))[:10], nrow=10, normalize=True, scale_each=True) grid_image = np.transpose(grid_image.numpy(), (1, 2, 0)) axes[1].imshow(grid_image) axes[1].set_title('large eig directions. row major order') imsave(f'rtc_top_{num_iter}.png', grid_image) grid_image = make_grid(FloatTensor(vector_original_list[0].reshape(400, 1, 20, 20))[-10:], nrow=10, normalize=True, scale_each=True) grid_image = np.transpose(grid_image.numpy(), (1, 2, 0)) axes[2].imshow(grid_image) axes[2].set_title('small eig directions. row major order') imsave(f'rtc_bottom_{num_iter}.png', grid_image) plt.show() for num_stimulus in num_stimulus_to_study: # first, generate stimulus. rng_state = RandomState(seed=0) # I will just use standard Gaussian, as this seems to drive neurons well enough, by checking the histogram. X = 10*rng_state.randn(num_stimulus, 1, 20, 20) y_all = batch_input(model_global, X) print(X.shape, y_all.shape, y_all.mean(), y_all.std()) plt.close('all') plt.hist(y_all.ravel(), bins=30) plt.show() # ok. let's first do RTA. rta_this = rta.rta(X.reshape(num_stimulus, -1), y_all) rta_this = rta_this.reshape(20, 20) # show rta, normalized show_rta(rta_this, title=f'{num_stimulus} stimuli, RTA', num_iter=num_stimulus) vector_original_list, eig_list, *_ = rtc.rtc(X.reshape(num_stimulus, -1), y_all) print(vector_original_list.shape, eig_list.shape) # go cheap. only 200 trials. # 1. still expensive even for 200 trials. # 2. not that useful. results show that nearly all eigenvalues are above upper bound CI. # eigenvalue_band = determine_eigenvalue_bands(X.reshape(num_stimulus, -1), y_all, trials=200) # print(eigenvalue_band.shape) show_rtc(vector_original_list, eig_list, title=f'{num_stimulus} stimuli, RTC', eigenvalue_band=None,num_iter=num_stimulus) ###Output (2000, 1, 20, 20) (2000, 1) 65.309 8.73878
Python_Dictionary_Comprehension.ipynb
###Markdown **Python Dictionary Comprehension**- In this tutorial, we will learn about Python dictionary comprehension and how to use it with the help of examples. - Dictionaries are data types in Python which allows us to store data in **key/value pair**. For example: ###Code my_dict = {1 : 'apple' , 2 : 'ball'} my_dict ###Output _____no_output_____ ###Markdown - To learn more about them visit: [Python Dictionary](https://www.programiz.com/python-programming/dictionary) **1. Python Dictionary Comprehension**- Dictionary comprehension is an elegant and concise way to create a new dictionary from an iterable in Python.- Dictionary comprehension consists of an expression pair **(key: value)** followed by a `for` statement inside curly braces `{}`.- Here is an example to make a dictionary with each item being a pair of a number and its square. ###Code # Dictionary Comprehension squares = {x : x*x for x in range(6)} squares ###Output _____no_output_____ ###Markdown This code is equivalent to ###Code squares = {} for x in range(6): squares[x] = x*x print(squares) ###Output {0: 0, 1: 1, 2: 4, 3: 9, 4: 16, 5: 25} ###Markdown - A dictionary comprehension can optionally contain more [for](https://www.programiz.com/python-programming/for-loop) or [if](https://www.programiz.com/python-programming/if-elif-else) statements.- An optional `if` statement can filter out items to form the new dictionary.- Here are some examples to make a dictionary with only odd items. ###Code # Dictionary Comprehension with if conditional odd_squares = {x : x*x for x in range(11) if x%2 == 1} odd_squares ###Output _____no_output_____ ###Markdown https://www.programiz.com/python-programming/dictionary To learn more dictionary comprehensions, visit [Python Dictionary Comprehension](https://www.programiz.com/python-programming/dictionary-comprehension). **2. What is Dictionary Comprehension in Python?**- **Dictionary comprehension** is an elegant and concise way to create dictionaries. **Example 1: Dictionary Comprehension**- Consider the following code: ###Code square_dict = dict() for num in range(1, 11): square_dict[num] = num*num print(square_dict) ###Output {1: 1, 2: 4, 3: 9, 4: 16, 5: 25, 6: 36, 7: 49, 8: 64, 9: 81, 10: 100} ###Markdown Now, let's create the dictionary in the above program using dictionary comprehension. ###Code # Dictionary comprehension example square_dict = {num : num*num for num in range(1,11)} print(square_dict) ###Output {1: 1, 2: 4, 3: 9, 4: 16, 5: 25, 6: 36, 7: 49, 8: 64, 9: 81, 10: 100} ###Markdown - The output of both programs will be the same. - In both programs, we have created a dictionary `square_dict` with **number-square key/value pair**.- However, using dictionary comprehension allowed us to **create a dictionary in a single line**. **3. Using Dictionary Comprehension**- From the above example, we can see that dictionary comprehension should be written in a specific pattern.- The minimal syntax for dictionary comprehension is:- `dictionary = {key: value for vars in iterable}`- Let's compare this syntax with dictionary comprehension from the above example. ![Dictionary Comprehension](https://cdn.programiz.com/sites/tutorial2program/files/Python-dictionary-comprehension.png) Now, let's see how we can use dictionary comprehension using data from another dictionary. **Example 3: How to use Dictionary Comprehension** ###Code #item price in dollars old_price = {'milk':1.02, 'coffee':2.5, 'bread':2.5} dollar_to_pound = 0.76 new_price = {item : value*dollar_to_pound for (item, value) in old_price.items()} print(new_price) ###Output {'milk': 0.7752, 'coffee': 1.9, 'bread': 1.9} ###Markdown - Here, we can see that we retrieved the item prices in dollars and converted them to pounds. Using dictionary comprehension makes this task much simpler and shorter. **4. Conditionals in Dictionary Comprehension**- We can further customize dictionary comprehension by adding conditions to it. Let's look at an example. **Example 4: If Conditional Dictionary Comprehension** ###Code original_dict = {'jack': 38, 'michael': 48, 'guido': 57, 'john': 33} even_dict = {k: v for (k, v) in original_dict.items() if v % 2 == 0} print(even_dict) ###Output {'jack': 38, 'michael': 48} ###Markdown - As we can see, only the items with even value have been added, because of the `if` clause in the dictionary comprehension. **Example 5: Multiple if Conditional Dictionary Comprehension** ###Code original_dict = {'jack': 38, 'michael': 48, 'guido': 57, 'john': 33} new_dict = {k: v for (k, v) in original_dict.items() if v % 2 != 0 if v < 40} print(new_dict) ###Output {'john': 33} ###Markdown - In this case, only the items with an odd value of less than 40 have been added to the new dictionary.- It is because of the multiple `if` clauses in the dictionary comprehension. They are equivalent to `and` operation where both conditions have to be true. **Example 6: if-else Conditional Dictionary Comprehension** ###Code original_dict = {'jack': 38, 'michael': 48, 'guido': 57, 'john': 33} new_dict_1 = {k: ('old' if v > 40 else 'young') for (k, v) in original_dict.items()} print(new_dict_1) ###Output {'jack': 'young', 'michael': 'old', 'guido': 'old', 'john': 'young'} ###Markdown - In this case, a new dictionary is created via dictionary comprehension.- The items with a value of 40 or more have the value of 'old' while others have the value of 'young'. **5. Nested Dictionary Comprehension**- We can add dictionary comprehensions to dictionary comprehensions themselves to create nested dictionaries. Let's look at an example. **Example 7: Nested Dictionary with Two Dictionary Comprehensions** ###Code dictionary = { k1: {k2: k1 * k2 for k2 in range(1, 6)} for k1 in range(2, 5) } print(dictionary) ###Output {2: {1: 2, 2: 4, 3: 6, 4: 8, 5: 10}, 3: {1: 3, 2: 6, 3: 9, 4: 12, 5: 15}, 4: {1: 4, 2: 8, 3: 12, 4: 16, 5: 20}} ###Markdown - As you can see, we have constructed a multiplication table in a nested dictionary, for numbers from 2 to 4.- Whenever nested dictionary comprehension is used, Python first starts from the outer loop and then goes to the inner one.- So, the above code would be equivalent to: ###Code dictionary = dict() for k1 in range(11, 16): dictionary[k1] = {k2: k1*k2 for k2 in range(1, 6)} print(dictionary) ###Output {11: {1: 11, 2: 22, 3: 33, 4: 44, 5: 55}, 12: {1: 12, 2: 24, 3: 36, 4: 48, 5: 60}, 13: {1: 13, 2: 26, 3: 39, 4: 52, 5: 65}, 14: {1: 14, 2: 28, 3: 42, 4: 56, 5: 70}, 15: {1: 15, 2: 30, 3: 45, 4: 60, 5: 75}} ###Markdown - It can further be unfolded: ###Code dictionary = dict() for k1 in range(11, 16): dictionary[k1] = dict() for k2 in range(1, 6): dictionary[k1][k2] = k1*k2 print(dictionary) ###Output {11: {1: 11, 2: 22, 3: 33, 4: 44, 5: 55}, 12: {1: 12, 2: 24, 3: 36, 4: 48, 5: 60}, 13: {1: 13, 2: 26, 3: 39, 4: 52, 5: 65}, 14: {1: 14, 2: 28, 3: 42, 4: 56, 5: 70}, 15: {1: 15, 2: 30, 3: 45, 4: 60, 5: 75}}
Data/Create_db.ipynb
###Markdown Create db with Python ###Code import numpy as np #import descartes import pandas as pd import matplotlib.pyplot as pp import geopandas as gpd #%matplotlib inline %matplotlib widget import contextily as ctx import geoplot as gpt import mapclassify from math import radians, cos,sin,asin,sqrt pd.set_option('display.max_columns',30) from ipywidgets import interact, interactive, fixed, interact_manual import ipywidgets as widgets from IPython.display import display from ipywidgets import GridspecLayout from ipywidgets import TwoByTwoLayout import psycopg2 from psycopg2 import Error from shapely.wkt import dumps,loads # Medical facility data (name,coordinate) medical_df=pd.read_csv('Medical_service_info_corrected.csv') # Population data (district) & Population data (chiefdom, not necessary is population data is detailed) district_population_df=pd.read_csv('Population_info.csv') chiefdom_population_df=pd.read_csv('Chiefdom data_Bo and Kenema.csv') # Shape file (district) & Shape file (chiefdom) district_shape_df=gpd.read_file('sle_admbnda_adm2_1m_gov_ocha/') # chiefdom_shape_df=gpd.read_file('sle_admbnda_adm4_1m_gov_ocha/') # EEZ boundaries file eez_shape_df=gpd.read_file('World_EEZ_v11_20191118/') eez_shape_df=eez_shape_df[eez_shape_df['TERRITORY1']=='Sierra Leone'] # City medical facility data (Not necessary if medical facility data is accurate and complete) # Hospitals in Bo and Kenema hospital_partial_df=pd.read_csv('Site_location_programing.csv') def change_coordinates(facility_name,x,y): """ change facility coordinates in medical_df """ medical_gdf.loc[medical_gdf.name==facility_name,'latitude']=x medical_gdf.loc[medical_gdf.name==facility_name,'longitude']=y facility=medical_gdf.loc[medical_gdf.name==facility_name] facility=gpd.GeoDataFrame(facility,geometry=gpd.points_from_xy(facility.longitude,facility.latitude)) # Create district info dataframe and GeoDataFrames district_info_df=pd.merge(district_shape_df,district_population_df, left_on=['admin2RefN'],right_on=['district']) medical_gdf=gpd.GeoDataFrame(medical_df,geometry=gpd.points_from_xy(medical_df.longitude,medical_df.latitude)) # Many coordinates are not accurate in this 'Medical_service_info_corrected.csv' file change_coordinates('Ola During (Epi)',8.490446, -13.216735) # Create population calculation dataframe population_cal_df=pd.DataFrame(chiefdom_population_df.groupby('section').latitude.mean()).reset_index() population_cal_df['longitude']=chiefdom_population_df.groupby('section').longitude.mean().values population_cal_df['population']=chiefdom_population_df.groupby('section').total.sum().values hospital_gdf=gpd.GeoDataFrame(hospital_partial_df,geometry=gpd.points_from_xy(hospital_partial_df.longitude,hospital_partial_df.latitude)) chiefdom_population_df=gpd.GeoDataFrame(chiefdom_population_df,geometry=gpd.points_from_xy(chiefdom_population_df.longitude,chiefdom_population_df.latitude)) ###Output _____no_output_____ ###Markdown Medical_db ###Code medical_g=medical_gdf[['district','name','address','facility_type','Population','geometry']] medical_g.head(10) medical_g.index.values #medical_g.to_csv('medical.csv',index=False) conn=psycopg2.connect(host="localhost",database='sierra_geo',user='postgres') cursor=conn.cursor() cursor.execute("DROP TABLE IF EXISTS medical") cursor.execute("CREATE TABLE medical (id SERIAL PRIMARY KEY, district VARCHAR, name VARCHAR, address VARCHAR, type VARCHAR, population INTEGER, point GEOMETRY)") conn.commit() cursor.execute("DELETE FROM medical") for i in medical_g.index.values: #cursor.execute(sql,(medical_g.iloc[i].district)) cursor.execute("INSERT INTO medical (district,type,population) VALUES ('{}','{}','{}')".format(medical_g.iloc[i].district,medical_g.iloc[i].facility_type,medical_g.iloc[i].Population)) conn.commit() #cursor.execute("DELETE FROM medical") #cursor.execute("INSERT INTO medical (geometry) VALUES (ST_AsEWKB('{}'))".format(medical_g.iloc[0].geometry.wkt)) for i in medical_g.index.values: cursor.execute("UPDATE medical SET point='{}' WHERE id={};".format(medical_g.iloc[i].geometry.wkt,i+1)) conn.commit() ###Output _____no_output_____ ###Markdown District_db ###Code district_info_df.head(5) district_df=district_info_df[['admin2Name','admin1Name','geometry','Population']] district_df conn=psycopg2.connect(database='sierra_geo',host='localhost',user='postgres') cursor=conn.cursor() cursor.execute("DROP TABLE IF EXISTS district") cursor.execute("CREATE TABLE district (id SERIAL PRIMARY KEY, district_name VARCHAR, region_name VARCHAR, population INTEGER, polygon GEOMETRY)") conn.commit() #cursor.execute('DELETE FROM district') for i in district_df.index.values: cursor.execute("INSERT INTO district(district_name,region_name,population,polygon) VALUES('{}','{}','{}','{}')".format(district_df.iloc[i].admin2Name, district_df.iloc[i].admin1Name, district_df.iloc[i].Population, district_df.iloc[i].geometry)) conn.commit() ###Output _____no_output_____ ###Markdown Population_cal_db ###Code population_cal_df.head(2) population_cal_df.to_csv('pop_cal.csv',index=False) ###Output _____no_output_____ ###Markdown Chiefdom_population_df ###Code #chiefdom_population_df=chiefdom_population_df[['district','chiefdom','section','total','latitude','longitude','geometry']] chiefdom_population_df.info() conn=psycopg2.connect(host='localhost',database='sierra_geo',user='postgres') cursor=conn.cursor() cursor.execute('DROP TABLE IF EXISTS chiefdom_population') cursor.execute('CREATE TABLE chiefdom_population (id SERIAL PRIMARY KEY, district VARCHAR, chiefdom VARCHAR,section VARCHAR,total INTEGER,latitude NUMERIC,longitude NUMERIC,point GEOMETRY)') conn.commit() #cursor.execute('DELETE FROM chiefdom_population') for i in chiefdom_population_df.index.values: cursor.execute("INSERT INTO chiefdom_population(district,chiefdom,section,total,latitude,longitude,point) VALUES('{}','{}','{}','{}','{}','{}','{}')".format(chiefdom_population_df.iloc[i].district, chiefdom_population_df.iloc[i].chiefdom, chiefdom_population_df.iloc[i].section, chiefdom_population_df.iloc[i].total, chiefdom_population_df.iloc[i].latitude, chiefdom_population_df.iloc[i].longitude, chiefdom_population_df.iloc[i].geometry)) conn.commit() conn.close() ###Output _____no_output_____ ###Markdown eez_shape_df ###Code eez_shape_df.head(4) conn=psycopg2.connect(host='localhost',database='sierra_geo',user='postgres') cursor=conn.cursor() cursor.execute('DROP TABLE IF EXISTS eez') cursor.execute('CREATE TABLE eez (id SERIAL PRIMARY KEY, line_name VARCHAR, line GEOMETRY)') conn.commit() #cursor.execute('DELETE FROM chiefdom_population') for i in range(3): cursor.execute("INSERT INTO eez(line_name,line) VALUES('{}','{}')".format(eez_shape_df.iloc[i].LINE_NAME, eez_shape_df.iloc[i].geometry )) conn.commit() conn.close() ###Output _____no_output_____ ###Markdown hospital_partial_df ###Code hospital_partial_df.head(2) ###Output _____no_output_____
pytides_examples/demo_noaa_constituents.ipynb
###Markdown demo_noaa_constituents -- Example Pytides UsageBased loosely on the example by Sam Cox on the Pytides wiki, [How to use the NOAA's published Harmonic Constituents in Python with Pytides](https://github.com/sam-cox/pytides/wiki/How-to-use-the-NOAA's-published-Harmonic-Constituents-in-Python-with-Pytides).This example uses the [NOAA constituents published for King's Point, NY](https://tidesandcurrents.noaa.gov/harcon.html?unit=0&timezone=0&id=8516945&name=Kings+Point&state=NY). These have been typed into this notebook below. Code at the bottom of this notebook illustrates how to download the harmonic consituents directly, a better approach in general since it is easier, less error-prone, and provides more digits of precision than the published tables.To find the station number and the webpages for other stations, see [this NOAA page](https://tidesandcurrents.noaa.gov/stations.html?type=Water%20Levels). After going to one of the station webpages, you will find links to the harmonic constituents and datums near the bottom of the page. Make sure you select "meters" as the units and the desired time zone (local or GMT) and then refresh the page if necessary.For more about tidal constituents, see for example: - [wikipedia page](https://en.wikipedia.org/wiki/Theory_of_tidesTidal_constituents) - [NOAA page](https://tidesandcurrents.noaa.gov/about_harmonic_constituents.html) ###Code from datetime import datetime import matplotlib.pyplot as plt import numpy as np ###Output _____no_output_____ ###Markdown Import local submodule version of PytidesThe clawpack version includes some fixes to the original code needed to make it work in Python3. ###Code # Here's what we'd like to do: (?) #from clawpack.pytides.tide import Tide #import clawpack.pytides.constituent as cons # # For now, hardwire in the path... import sys, os CLAW = os.environ['CLAW'] # path to Clawpack files pathstr = os.path.join(CLAW, 'tidal-examples/pytides') assert os.path.isdir(pathstr), '*** Need clawpack/tidal-examples/pytides ***' print('Using Pytides from: %s' % pathstr) if pathstr not in sys.path: sys.path.insert(0,pathstr) from pytides.tide import Tide import pytides.constituent as cons ###Output _____no_output_____ ###Markdown Here are the NOAA constituents, in the order presented on their website for this particular station.We omit the Z0 component, which is 0 relative to MSL and will be adjusted below to present results relative to a different datum, e.g. MLLW. ###Code constituents = [c for c in cons.noaa if c != cons._Z0] #Phases and amplitudes (relative to GMT and in degrees and metres) published_phases = [115.7,140.7,92.6,192,145.5,220.6,159.9,202.8,152.3,\ 117.2,92,0,0,69.7,224.5,141.7,121.9,\ 228.4,252.1,0,60.1,135.5,0,0,204.5,212.2,112.3,\ 141.8,249.1,211.1,75.1,181.4,140.4,202.4,141.8,155,160.9] published_amplitudes = [1.142,0.189,0.241,0.1,0.036,0.066,0.08,0.01,0.004,\ 0.022,0.052,0,0,0.03,0.007,0.025,0.009,\ 0.005,0.008,0,0.024,0.065,0,0,0.004,0.017,0.015,\ 0.002,0.002,0.032,0.003,0.007,0.07,0.009,0.053,\ 0.007,0.008] ###Output _____no_output_____ ###Markdown Print out the constintuents for easy comparison with the [NOAA constituents page for station 8516945, Kings Point, NY](https://tidesandcurrents.noaa.gov/harcon.html?unit=0&timezone=0&id=8516945&name=Kings+Point&state=NY). Note that some of the names are slightly different from the NOAA names: ###Code print('# Name Amplitude Phase') for k,c in enumerate(constituents): print('%s %s %.3f %7.3f' \ % (str(k+1).ljust(4), c.name.ljust(7), published_amplitudes[k], published_phases[k])) ###Output _____no_output_____ ###Markdown We can add a constant offset. The published constituents are relative to MSL (not MTL as stated in the Pytides wiki example). Here we set the offset so that the plots will be relative to MLLW instead. These values can be found on the [NOAA datums page for this station](https://tidesandcurrents.noaa.gov/datums.html?datum=STND&units=1&epoch=0&id=8516945&name=Kings+Point&state=NY). Note that these values are relative to the station datum (STND) although the offset computed should be the same as long as the values of both `MSL` and `MLLW` used are relative to the same datum. Also make sure `meters` is selected when looking at datums (and at constituents). ###Code MSL = 5.113 MLLW = 3.927 offset = MSL - MLLW constituents.append(cons._Z0) published_phases.append(0) published_amplitudes.append(offset) ###Output _____no_output_____ ###Markdown Build the model, and a tide instance: ###Code assert(len(constituents)==len(published_phases)==len(published_amplitudes)) model = np.zeros(len(constituents), dtype = Tide.dtype) model['constituent'] = constituents model['amplitude'] = published_amplitudes model['phase'] = published_phases #Build a TIDE INSTANCE called tide from the MODEL called model tide = Tide(model=model,radians=False) print('Predicted tide on January 1, 2013 relative to MLLW...') print(' at 00:00 GMT: %.3fm\n at 06:00 GMT: %.3fm' \ % tuple(tide.at([datetime(2013,1,1,0,0,0), datetime(2013,1,1,6,0,0)]))) ###Output _____no_output_____ ###Markdown The [actual NOAA prediction](https://tidesandcurrents.noaa.gov/waterlevels.html?id=8516945&units=metric&bdate=20130101&edate=20130102&timezone=GMT&datum=MLLW&interval=6&action=) for 0000 and 0600 GMT on January 1 2013 are -0.079m and 2.206m relative to MLLW. Produce plots over a time range:We can produce plots similar to the [actual NOAA prediction](https://tidesandcurrents.noaa.gov/waterlevels.html?id=8516945&units=metric&bdate=20130101&edate=20130102&timezone=GMT&datum=MLLW&interval=6&action=) over a couple of days. ###Code prediction_t0 = datetime(2013,1,1,0,0,0) prediction_end= datetime(2013,1,3,0,0,0) hrs=((prediction_end - prediction_t0).total_seconds())/3600. print ('The data started at datetime: ',prediction_t0) print ('The data ended at datetime: ',prediction_end) print ('The data spanned %5i hours' %int(hrs)) print (' ') hours = 0.1*np.arange(int(hrs)*10) times = Tide._times(prediction_t0, hours) ###Output _____no_output_____ ###Markdown Evaluate the tide instance at the specified times ###Code my_prediction = tide.at(times) ###Output _____no_output_____ ###Markdown Find the high tides and low tides, and print out a tide table: ###Code # Find the highs and lows using tide.extrema generator function # of the tidal instance called tide. Save ext_hrs and ext_hts # as numpy arrays for plotting later. ext_vals=[t for t in tide.extrema(prediction_t0,prediction_end)] print ('High and Low tides, relative to MLLW: ') n_ext=len(ext_vals) ext_hts=[]; ext_hilo=[]; ext_hrs=[]; ext_datetimes=[]; for i in range(n_ext): ext_tuple=ext_vals[i] ext_datetimes.append(ext_tuple[0]) ext_hts.append(ext_tuple[1]) ext_hilo.append(ext_tuple[2]) ext_hrs.append( ((ext_tuple[0] - prediction_t0).total_seconds())/3600.) ext_hrs=np.array(ext_hrs) ext_hts=np.array(ext_hts) #Print the extrema information print (' ') print (' Date time Hrs Elevation Hi-Low ') for i in range(n_ext): print ('%s %8.3f %8.3f m %8s ' %\ (ext_datetimes[i].strftime('%Y-%m-%d %H:%M:%S'), ext_hrs[i],ext_hts[i],ext_hilo[i]) ) ###Output _____no_output_____ ###Markdown Plot the predicted tide ###Code titlestr = 'January 1-2, 2013 Example Tides \n' +\ 'Kings Point, NY (Station 8516945)' plt.figure(figsize=(13,6)) plt.plot(hours, my_prediction, label="The data (38 NOAA)") plt.plot(ext_hrs,ext_hts,'ro',label="Extrema") plt.xticks(np.arange(0,49,12)) plt.xlabel('Hours since ' + str(prediction_t0) + '(GMT)') plt.ylabel('Meters above MLLW') plt.axis([-1,49,-1,4]) plt.legend(loc='upper left') plt.grid(True) plt.title(titlestr); ###Output _____no_output_____ ###Markdown Download harmonic consituentsRather than typing in the harmonic constituents as done above, it is much easier and less prone to error to download them directly from the NOAA website. The code below should produce the same constituents as used above.We use a function in the GeoClaw `tidetools` module to do this. Eventually this will be moved to geoclaw, but is local to this repository for development purposes... ###Code pathstr = os.path.abspath('..') if pathstr not in sys.path: sys.path.insert(0,pathstr) import tidetools print('Using tidetools from: %s' % tidetools.__file__) station = 8516945 # Kings point, NY print('Fetching harmonic constituents for station %s, standard 37 -- no Z0' % station) harcon, harcon_info = tidetools.fetch_harcon(station, units='meters', verbose=False) numbers = list(range(1,38)) harcon_numbers = [h['number'] for h in harcon] # make sure there are the expected number and in the right order: assert harcon_numbers == numbers, \ '*** unexpected harcon_numbers = %s' % numbers ###Output _____no_output_____ ###Markdown Note that `harcon` is now a dictionary with keys such as `number`, `name`, `amplitude`, etc. Print out these constituents, same as above but with names that agree with the [NOAA page](https://tidesandcurrents.noaa.gov/harcon.html?unit=0&timezone=0&id=8516945&name=Kings+Point&state=NY), and with more digits of precision in the amplitudes: ###Code print('# Name Amplitude Phase') for k,h in enumerate(harcon): print('%s %s %.5f %9.4f' \ % (str(h['number']).ljust(4), h['name'].ljust(7), h['amplitude'], h['phase_GMT'])) print("Note that: harcon_info['units'] = %s" % harcon_info['units']) ###Output _____no_output_____ ###Markdown But our `tidetools` function converted the units to meters in computing `harcon`, since that's what we requested above. Translate harcon into a pytides model:We can translate the `harcon` dictionary into the model needed by pytides as follows: ###Code NOAA_constituents = [c for c in cons.noaa if c != cons._Z0] #Set the amplitudes and phases lists that will be needed for Pytides NOAA_amplitudes = [h['amplitude'] for h in harcon] #in meters #These are relative to GMT (0 deg West time meridan) NOAA_phases_GMT = [h['phase_GMT'] for h in harcon] MSL = 5.113 MLLW = 3.928 offset = MSL - MLLW NOAA_constituents.append(cons._Z0) NOAA_phases_GMT.append(0) NOAA_amplitudes.append(offset) assert(len(constituents) == len(NOAA_phases_GMT) \ == len(NOAA_amplitudes)) NOAA_model = np.zeros(len(NOAA_constituents), dtype = Tide.dtype) NOAA_model['constituent'] = NOAA_constituents NOAA_model['amplitude'] = NOAA_amplitudes NOAA_model['phase'] = NOAA_phases_GMT #Build a TIDE INSTANCE called tide from the MODEL called model NOAA_tide = Tide(model=NOAA_model,radians=False) print('Predicted tide on January 1, 2013 relative to MLLW...') print(' at 00:00 GMT: %.3fm\n at 06:00 GMT: %.3fm' \ % tuple(NOAA_tide.at([datetime(2013,1,1,0,0,0), datetime(2013,1,1,6,0,0)]))) ###Output _____no_output_____
vgg16_transfer_learning.ipynb
###Markdown Transfer Learning to detect cats / dogs using Vgg16 DefinitionsRe-run this cell when starting from a checkpoint ###Code #reset python environment %reset -f import time default_device = '/gpu:0' # default_device = '/cpu:0' num_hidden_neurons = 256 vgg_mean = [103.939, 116.779, 123.68] classes = [l.strip() for l in open('synset.txt').readlines()] training_dataset_dir = './datasets/dogs-vs-cats-redux-kernels-edition/train/' test_dataset_dir = './datasets/dogs-vs-cats-redux-kernels-edition/test/' #model_version = int(time.time()) model_version = 3 model_path = 'models/model-{}/'.format(model_version) def get_batches(x, y, batch_size=32): num_rows = y.shape[0] num_batches = num_rows // batch_size if num_rows % batch_size != 0: num_batches = num_batches + 1 for batch in range(num_batches): yield x[batch_size * batch: batch_size * (batch + 1)], y[batch_size * batch: batch_size * (batch + 1)] ###Output _____no_output_____ ###Markdown Vgg16 Model Class ###Code import tensorflow as tf class Vgg16Model: def __init__(self, weights_path='./vgg16.npy'): self.weights = np.load('vgg16.npy', encoding='latin1').item() self.activation_fn = tf.nn.relu self.conv_padding = 'SAME' self.pool_padding = 'SAME' self.use_bias = True def build(self, input_tensor, trainable=False): self.conv1_1 = self.conv2d(input_tensor, 'conv1_1', 64, trainable) self.conv1_2 = self.conv2d(self.conv1_1, 'conv1_2', 64, trainable) # Max-pooling is performed over a 2 × 2 pixel window, with stride 2. self.max_pool1 = tf.layers.max_pooling2d(self.conv1_2, (2, 2), (2, 2), padding=self.pool_padding) self.conv2_1 = self.conv2d(self.max_pool1, 'conv2_1', 128, trainable) self.conv2_2 = self.conv2d(self.conv2_1, 'conv2_2', 128, trainable) self.max_pool2 = tf.layers.max_pooling2d(self.conv2_2, (2, 2), (2, 2), padding=self.pool_padding) self.conv3_1 = self.conv2d(self.max_pool2, 'conv3_1', 256, trainable) self.conv3_2 = self.conv2d(self.conv3_1, 'conv3_2', 256, trainable) self.conv3_3 = self.conv2d(self.conv3_2, 'conv3_3', 256, trainable) self.max_pool3 = tf.layers.max_pooling2d(self.conv3_3, (2, 2), (2, 2), padding=self.pool_padding) self.conv4_1 = self.conv2d(self.max_pool3, 'conv4_1', 512, trainable) self.conv4_2 = self.conv2d(self.conv4_1, 'conv4_2', 512, trainable) self.conv4_3 = self.conv2d(self.conv4_2, 'conv4_3', 512, trainable) self.max_pool4 = tf.layers.max_pooling2d(self.conv4_3, (2, 2), (2, 2), padding=self.pool_padding) self.conv5_1 = self.conv2d(self.max_pool4, 'conv5_1', 512, trainable) self.conv5_2 = self.conv2d(self.conv5_1, 'conv5_2', 512, trainable) self.conv5_3 = self.conv2d(self.conv5_2, 'conv5_3', 512, trainable) self.max_pool5 = tf.layers.max_pooling2d(self.conv5_3, (2, 2), (2, 2), padding=self.pool_padding) reshaped = tf.reshape(self.max_pool5, shape=(-1, 7 * 7 * 512)) self.fc6 = self.fc(reshaped, 'fc6', 4096, trainable) self.fc7 = self.fc(self.fc6, 'fc7', 4096, trainable) self.fc8 = self.fc(self.fc7, 'fc8', 1000, trainable) self.predictions = tf.nn.softmax(self.fc8, name='predictions') def conv2d(self, layer, name, n_filters, trainable, k_size=3): return tf.layers.conv2d(layer, n_filters, kernel_size=(k_size, k_size), activation=self.activation_fn, padding=self.conv_padding, name=name, trainable=trainable, kernel_initializer=tf.constant_initializer(self.weights[name][0], dtype=tf.float32), bias_initializer=tf.constant_initializer(self.weights[name][1], dtype=tf.float32), use_bias=self.use_bias) def fc(self, layer, name, size, trainable): return tf.layers.dense(layer, size, activation=self.activation_fn, name=name, trainable=trainable, kernel_initializer=tf.constant_initializer(self.weights[name][0], dtype=tf.float32), bias_initializer=tf.constant_initializer(self.weights[name][1], dtype=tf.float32), use_bias=self.use_bias) ###Output _____no_output_____ ###Markdown Images conversion for Vgg16Images have to be of dimension (224, 224, 3). The last dimension is ordered BGR (blue, green, red) ###Code import skimage import skimage.io import skimage.transform # https://github.com/machrisaa/tensorflow-vgg/blob/master/utils.py def load_image(image_path, mean=vgg_mean): image = skimage.io.imread(image_path) image = image.astype(float) short_edge = min(image.shape[:2]) yy = int((image.shape[0] - short_edge) / 2) xx = int((image.shape[1] - short_edge) / 2) crop_image = image[yy: yy + short_edge, xx: xx + short_edge] resized_image = skimage.transform.resize(crop_image, (224, 224), mode='constant') bgr = resized_image[:,:,::-1] - mean return bgr ###Output _____no_output_____ ###Markdown Extract Vgg16 features ###Code import time import os import math def extract_codes(image_directory, batch_size=32): tf.reset_default_graph() # create mapping of filename -> vgg features codes_fc6 = {} codes_fc7 = {} predictions = {} filenames = os.listdir(image_directory) num_files = len(filenames) num_batches = int(math.ceil(num_files / batch_size)) with tf.device(default_device): with tf.Session(graph = tf.Graph()) as sess: _input = tf.placeholder(tf.float32, shape=(None, 224, 224, 3), name="images") vgg = Vgg16Model() vgg.build(_input) sess.run(tf.global_variables_initializer()) for i in range(num_batches): batch_filenames = filenames[i*batch_size : ((i+1)*batch_size)] print("batch {} of {}".format(i+1, num_batches)) start = time.time() images = np.array([load_image(image_directory + f) for f in batch_filenames]) end = time.time() print("\timage loading took {:.4f} sec".format(end-start)) start = end batch_codes_fc6, batch_codes_fc7 = sess.run( [vgg.fc6, vgg.fc7], feed_dict={ _input: images } ) end = time.time() print("\tprediction took {:.4f} sec".format(end-start)) for i, filename in enumerate(batch_filenames): codes_fc6[filename] = batch_codes_fc6[i] codes_fc7[filename] = batch_codes_fc7[i] return codes_fc6, codes_fc7 import numpy as np print('Extracting training codes for fc6 and fc7') training_codes_fc6, training_codes_fc7 = extract_codes(training_dataset_dir) np.save('training_codes_fc6.npy', training_codes_fc6) np.save('training_codes_fc7.npy', training_codes_fc7) print('Extracting test codes for fc6 and fc7') test_codes_fc6, test_codes_fc7 = extract_codes(test_dataset_dir, batch_size=16) np.save('test_codes_fc6.npy', test_codes_fc6) np.save('test_codes_fc7.npy', test_codes_fc7) ###Output _____no_output_____ ###Markdown Checkpoint - Vgg16 features extracted and serialized ###Code import numpy as np import tensorflow as tf ###Output _____no_output_____ ###Markdown Load previously stored training codes (fc6) ###Code from collections import OrderedDict training_codes = np.load('training_codes_fc6.npy') training_codes = OrderedDict(training_codes.item()) ###Output _____no_output_____ ###Markdown Preprocess training data ###Code keys = list(training_codes.keys()) labels = np.array([ (1, 0) if name[:3] == 'dog' else (0,1) for name in keys]) # one hot encode labels images = np.array(list(training_codes.values())) # extract images for i,key in enumerate(keys): assert (training_codes.get(key) == images[i]).all() ###Output _____no_output_____ ###Markdown Split into training and validation set ###Code from sklearn.model_selection import StratifiedShuffleSplit splitter = StratifiedShuffleSplit(n_splits=1, test_size=0.1) train_indices, val_indices = next(splitter.split(images, labels)) train_images, train_labels = images[train_indices], labels[train_indices] val_images, val_labels = images[val_indices], labels[val_indices] ###Output _____no_output_____ ###Markdown Transfer Learning Step - Use a small NN with a single hidden layer ###Code import os import time from tensorflow.python.saved_model import builder as saved_model_builder from tensorflow.python.saved_model.signature_def_utils import predict_signature_def from tensorflow.python.saved_model.tag_constants import SERVING from tensorflow.python.saved_model.signature_constants import DEFAULT_SERVING_SIGNATURE_DEF_KEY from tensorflow.python.saved_model.signature_constants import PREDICT_INPUTS from tensorflow.python.saved_model.signature_constants import PREDICT_OUTPUTS if(os.path.exists(model_path)): raise Exception('directory "{}" already exists. Delete or move it'.format(model_path)) num_epochs = 5 learning_rate = 0.01 keep_prob = 0.5 batch_size = 64 accuracy_print_steps = 10 iteration = 0 tf.reset_default_graph() with tf.device(default_device): with tf.Session(graph=tf.Graph()) as sess: with tf.name_scope("inputs"): _images = tf.placeholder(tf.float32, shape=(None, 4096), name='images') _keep_prob = tf.placeholder(tf.float32, name='keep_probability') with tf.name_scope("targets"): _labels = tf.placeholder(tf.float32, shape=(None, 2), name='labels') with tf.name_scope("hidden_layer"): hidden_weights = tf.Variable( initial_value = tf.truncated_normal([4096, num_hidden_neurons], mean=0.0, stddev=0.01), dtype=tf.float32, name="hidden_weights" ) hidden_bias = tf.Variable( initial_value = tf.zeros(num_hidden_neurons), dtype=tf.float32, name="hidden_bias" ) hidden = tf.matmul(_images, hidden_weights) + hidden_bias hidden = tf.nn.relu(hidden, name="hidden_relu") hidden = tf.nn.dropout(hidden, keep_prob=_keep_prob, name='hidden_dropout') tf.summary.histogram("hidden_weights", hidden_weights) tf.summary.histogram("hidden_bias", hidden_bias) with tf.name_scope("outputs"): output_weights = tf.Variable( initial_value=tf.truncated_normal(shape=(num_hidden_neurons, 2), mean=0.0, stddev=0.01), dtype=tf.float32, name="output_weights" ) output_bias = tf.Variable(initial_value=tf.zeros(2), dtype=tf.float32, name="output_bias") logits = tf.matmul(hidden, output_weights) + output_bias predictions = tf.nn.softmax(logits, name='predictions') tf.summary.histogram("output_weights", output_weights) tf.summary.histogram("output_bias", output_bias) tf.summary.histogram("predictions", predictions) with tf.name_scope("cost"): cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=_labels, name='cross_entropy') cost = tf.reduce_mean(cross_entropy, name='cost') tf.summary.scalar("cost", cost) with tf.name_scope("train"): optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) correct_predictions = tf.equal(tf.argmax(predictions, 1), tf.argmax(_labels, 1), name='correct_predictions') accuracy = tf.reduce_mean(tf.cast(correct_predictions, tf.float32), name='accuracy') ### merge summaries merged_summaries = tf.summary.merge_all() ### Save training and validation logs for tensorboard train_writer = tf.summary.FileWriter('./logs/train/{}'.format(model_version), sess.graph) val_writer = tf.summary.FileWriter('./logs/val/{}'.format(model_version)) sess.run(tf.global_variables_initializer()) for epoch in range(num_epochs): for batch_train_images, batch_train_labels in get_batches(train_images, train_labels, batch_size=batch_size): train_loss, _, p, summary = sess.run( [cost, optimizer, logits, merged_summaries], feed_dict = { _images: batch_train_images, _labels: batch_train_labels, _keep_prob: keep_prob }) train_writer.add_summary(summary, iteration) iteration = iteration + 1 if iteration % accuracy_print_steps == 0: val_acc, val_summary = sess.run([accuracy, merged_summaries], feed_dict ={ _images: val_images, _labels: val_labels, _keep_prob: 1. }) val_writer.add_summary(val_summary, iteration) print('{} / {} Accuracy: {} Loss: {}'.format(epoch + 1, num_epochs, val_acc, train_loss)) ### Save graph and trained variables builder = saved_model_builder.SavedModelBuilder(model_path) builder.add_meta_graph_and_variables( sess, [SERVING], signature_def_map = { DEFAULT_SERVING_SIGNATURE_DEF_KEY: predict_signature_def( inputs = { PREDICT_INPUTS: _images }, outputs = { PREDICT_OUTPUTS: predictions } ) } ) builder.save() ###Output _____no_output_____ ###Markdown Interlude - Try to find optimal hyperparametersRun training with different hyperparameters and use tensorboard to investigate the best solution ###Code import os import time import math from tensorflow.python.saved_model import builder as saved_model_builder from tensorflow.python.saved_model.signature_def_utils import predict_signature_def from tensorflow.python.saved_model.tag_constants import SERVING from tensorflow.python.saved_model.signature_constants import DEFAULT_SERVING_SIGNATURE_DEF_KEY from tensorflow.python.saved_model.signature_constants import PREDICT_INPUTS from tensorflow.python.saved_model.signature_constants import PREDICT_OUTPUTS accuracy_print_steps = 100 def train(writer, num_epochs, hidden_layer_size, learning_rate, num_hidden=1, keep_prob=0.5, batch_size=64, training=True, saved_model_path=None): with tf.device(default_device): with tf.Session(graph=tf.Graph()) as sess: with tf.name_scope("inputs"): _images = tf.placeholder(tf.float32, shape=(None, 4096), name='images') _is_training = tf.placeholder(tf.bool, name='is_training') _keep_prob = tf.placeholder(tf.float32, name='keep_probability') with tf.name_scope("targets"): _labels = tf.placeholder(tf.float32, shape=(None, 2), name='labels') prev_size = 4096 next_input = _images for i in range(num_hidden): with tf.variable_scope("hidden_layer_{}".format(i)): hidden_weights = tf.Variable( initial_value = tf.truncated_normal([prev_size, hidden_layer_size], mean=0.0, stddev=0.01), dtype=tf.float32, name="hidden_weights" ) hidden_bias = tf.Variable( initial_value = tf.zeros(hidden_layer_size), dtype=tf.float32, name="hidden_bias" ) hidden = tf.matmul(next_input, hidden_weights) + hidden_bias hidden = tf.layers.batch_normalization(hidden, training=_is_training) hidden = tf.nn.relu(hidden, name="hidden_relu") hidden = tf.nn.dropout(hidden, keep_prob=_keep_prob, name='hidden_dropout') tf.summary.histogram("hidden_weights_{}".format(i), hidden_weights) tf.summary.histogram("hidden_bias_{}".format(i), hidden_bias) next_input = hidden prev_size = hidden_layer_size with tf.name_scope("outputs"): output_weights = tf.Variable( initial_value=tf.truncated_normal(shape=(hidden_layer_size, 2), mean=0.0, stddev=0.01), dtype=tf.float32, name="output_weights" ) output_bias = tf.Variable(initial_value=tf.zeros(2), dtype=tf.float32, name="output_bias") logits = tf.matmul(next_input, output_weights) + output_bias predictions = tf.nn.softmax(logits, name='predictions') tf.summary.histogram("output_weights", output_weights) tf.summary.histogram("output_bias", output_bias) tf.summary.histogram("predictions", predictions) with tf.name_scope("cost"): cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=_labels, name='cross_entropy') cost = tf.reduce_mean(cross_entropy, name='cost') tf.summary.scalar("cost", cost) with tf.name_scope("train"): with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)): optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) correct_predictions = tf.equal(tf.argmax(predictions, 1), tf.argmax(_labels, 1), name='correct_predictions') accuracy = tf.reduce_mean(tf.cast(correct_predictions, tf.float32), name='accuracy') ### merge summaries merged_summaries = tf.summary.merge_all() sess.run(tf.global_variables_initializer()) iteration = 0 for epoch in range(num_epochs): for batch_train_images, batch_train_labels in get_batches(train_images, train_labels, batch_size=batch_size): train_loss, _, p, summary = sess.run( [cost, optimizer, logits, merged_summaries], feed_dict = { _images: batch_train_images, _labels: batch_train_labels, _keep_prob: keep_prob, _is_training: training }) iteration = iteration + 1 if iteration % accuracy_print_steps == 0: if not writer == None: writer.add_summary(summary, iteration) if iteration % accuracy_print_steps == 0: val_acc, val_summary = sess.run([accuracy, merged_summaries], feed_dict ={ _images: val_images, _labels: val_labels, _keep_prob: 1., _is_training: False }) print('\tEpoch {}/{} Iteration {} Accuracy: {} Loss: {}'.format(epoch + 1, num_epochs, iteration, val_acc, train_loss)) if not saved_model_path == None: ### Save graph and trained variables builder = saved_model_builder.SavedModelBuilder(saved_model_path) builder.add_meta_graph_and_variables( sess, [SERVING], signature_def_map = { DEFAULT_SERVING_SIGNATURE_DEF_KEY: predict_signature_def( inputs = { PREDICT_INPUTS: _images }, outputs = { PREDICT_OUTPUTS: predictions } ) } ) builder.save() batch_size = 64 for num_epochs in [1, 5]: for keep_prob in [0.5, 0.8, 1.0]: for num_hidden_layers in [1, 2]: for hidden_layer_size in [512, 1024, 2048]: for learning_rate in [0.01, 0.001]: log_string = 'logs/{}/e={},lr={},hl={},hs={},kp={},bs={}'.format(model_version, num_epochs, learning_rate, num_hidden_layers, hidden_layer_size, keep_prob, batch_size) writer = tf.summary.FileWriter(log_string) print("\n\nStarting {}".format(log_string)) train(writer, num_epochs, hidden_layer_size, learning_rate, num_hidden_layers, keep_prob, batch_size) ###Output _____no_output_____ ###Markdown Save model with promising hyperparameters ###Code # e=5, lr=0.001, hs=1024,hl=11,kp=0.5,bs=64 # train(None, 5, 1024, 0.001, 1, 0.5, 64, "{}/test1/".format(model_path)) # e=5,lr=0.01,hl=1,hs=512,kp=0.8,bs=64 train(None, 5, 512, 0.01, 1, 0.8, 64, True, "{}/test2/".format(model_path)) ###Output _____no_output_____ ###Markdown Checkpoint - Load previously stored test codes (fc6) ###Code keys = list(test_codes.keys()) # images = np.array(list(test_codes.values())) keys = list(map(lambda k: k[:-4], keys)) keys = np.array(sorted(keys, key=int)) examples = keys[2:6] images = [] for i,key in enumerate(keys): images.append(test_codes.get(key+'.jpg')) images = np.array(images) examples = test_keys[2:6] ###Output _____no_output_____ ###Markdown Example images ###Code %matplotlib inline import matplotlib.pyplot as plt import skimage.io fig = plt.figure(figsize=(20, 10)) for i, example in enumerate(examples): a = fig.add_subplot(1,len(examples), i+1) plt.imshow(skimage.io.imread(test_dataset_dir + example + '.jpg')) # a.set_title(codes[examples[0]]) ###Output _____no_output_____ ###Markdown Create predictions for test images ###Code import numpy as np import tensorflow as tf from tensorflow.python.saved_model import loader from tensorflow.python.saved_model.tag_constants import SERVING tf.reset_default_graph() # target_model_path = model_path #target_model_path = "{}/test2/".format(model_path) target_model_path = "{}test2/".format(model_path) with tf.device(default_device): with tf.Session(graph=tf.Graph()) as sess: loader.load(sess, [SERVING], target_model_path) with open('out6.csv', 'w') as f: f.write('id,label\n') for b_images, b_keys in get_batches(images, keys): s_keep_probability = sess.graph.get_tensor_by_name('inputs/keep_probability:0') s_images = sess.graph.get_tensor_by_name('inputs/images:0') s_is_training = sess.graph.get_tensor_by_name('inputs/is_training:0') s_predictions = sess.graph.get_tensor_by_name('outputs/predictions:0') preds = sess.run(s_predictions, feed_dict={ s_images: b_images, s_keep_probability: 1., s_is_training: False }) for idx,pred in enumerate(preds): s = '{},{:.5f}\n'.format(b_keys[idx], np.clip(pred[0], 0.05, 0.95)) f.write(s) ###Output _____no_output_____
content/notebooks/05-classes-oop.ipynb
###Markdown Classes and Object Oriented Programming We have looked at functions which take input and return output (or do things to the input). However, sometimes it is useful to think about *objects* first rather than the actions applied to them. Think about a polynomial, such as the cubic$$ p(x) = 12 - 14 x + 2 x^3. $$This is one of the standard forms that we would expect to see for a polynomial. We could imagine representing this in Python using a container containing the coefficients, such as: ###Code p_normal = (12, -14, 0, 2) ###Output _____no_output_____ ###Markdown The order of the polynomial is given by the number of coefficients (minus one), which is given by `len(p_normal)-1`. However, there are many other ways it could be written, which are useful in different contexts. For example, we are often interested in the roots of the polynomial, so would want to express it in the form$$ p(x) = 2 (x - 1)(x - 2)(x + 3). $$This allows us to read off the roots directly. We could imagine representing this in Python using a container containing the roots, such as: ###Code p_roots = (1, 2, -3) ###Output _____no_output_____ ###Markdown combined with a single variable containing the leading term, ###Code p_leading_term = 2 ###Output _____no_output_____ ###Markdown We see that the order of the polynomial is given by the number of roots (and hence by `len(p_roots)`). This form represents the same polynomial but requires two pieces of information (the roots and the leading coefficient). The different forms are useful for different things. For example, if we want to add two polynomials the standard form makes it straightforward, but the factored form does not. Conversely, multiplying polynomials in the factored form is easy, whilst in the standard form it is not. But the key point is that the object - the polynomial - is the same: the representation may appear different, but it's the object itself that we really care about. So we want to represent the object in code, and work with that object. Classes Python, and other languages that include *object oriented* concepts (which is most modern languages) allow you to define and manipulate your own objects. Here we will define a *polynomial* object step by step. ###Code class Polynomial(object): explanation = "I am a polynomial" def explain(self): print(self.explanation) ###Output _____no_output_____ ###Markdown We have defined a *class*, which is a single object that will represent a polynomial. We use the keyword `class` in the same way that we use the keyword `def` when defining a function. The definition line ends with a colon, and all the code defining the object is indented by four spaces.The name of the object - the general class, or type, of the thing that we're defining - is `Polynomial`. The convention is that class names start with capital letters, but this convention is frequently ignored.The type of object that we are building on appears in brackets after the name of the object. The most basic thing, which is used most often, is the `object` type as here. Class variables are defined in the usual way, but are only visible inside the class. Variables that are set outside of functions, such as `explanation` above, will be common to all class variables.Functions are defined inside classes in the usual way (using the `def` keyword, indented by four additional spaces). They work in a special way: they are not called directly, but only when you have a member of the class. This is what the `self` keyword does: it takes the specific *instance* of the class and uses its data. Class functions are often called *methods*.Let's see how this works on a specific example: ###Code p = Polynomial() print(p.explanation) p.explain() p.explanation = "I change the string" p.explain() ###Output I am a polynomial I am a polynomial I change the string ###Markdown The first line, `p = Polynomial()`, creates an *instance* of the class. That is, it creates a specific `Polynomial`. It is assigned to the variable named `p`. We can access class variables using the "dot" notation, so the string can be printed via `p.explanation`. The method that prints the class variable also uses the "dot" notation, hence `p.explain()`. The `self` variable in the definition of the function is the instance itself, `p`. This is passed through automatically thanks to the dot notation.Note that we can change class variables in specific instances in the usual way (`p.explanation = ...` above). This only changes the variable for that instance. To check that, let us define two polynomials: ###Code p = Polynomial() p.explanation = "Changed the string again" q = Polynomial() p.explanation = "Changed the string a third time" p.explain() q.explain() ###Output Changed the string a third time I am a polynomial ###Markdown We can of course make the methods take additional variables. We modify the class (note that we have to completely re-define it each time): ###Code class Polynomial(object): explanation = "I am a polynomial" def explain_to(self, caller): print("Hello, {}. {}.".format(caller,self.explanation)) ###Output _____no_output_____ ###Markdown We then use this, remembering that the `self` variable is passed through automatically: ###Code r = Polynomial() r.explain_to("Alice") ###Output Hello, Alice. I am a polynomial. ###Markdown At the moment the class is not doing anything interesting. To do something interesting we need to store (and manipulate) relevant variables. The first thing to do is to add those variables when the instance is actually created. We do this by adding a special function (method) which changes how the variables of type `Polynomial` are created: ###Code class Polynomial(object): """Representing a polynomial.""" explanation = "I am a polynomial" def __init__(self, roots, leading_term): self.roots = roots self.leading_term = leading_term self.order = len(roots) def explain_to(self, caller): print("Hello, {}. {}.".format(caller,self.explanation)) print("My roots are {}.".format(self.roots)) ###Output _____no_output_____ ###Markdown This `__init__` function is called when a variable is created. There are a number of special class functions, each of which has two underscores before and after the name. This is another Python *convention* that is effectively a rule: functions surrounded by two underscores have special effects, and will be called by other Python functions internally. So now we can create a variable that represents a specific polynomial by storing its roots and the leading term: ###Code p = Polynomial(p_roots, p_leading_term) p.explain_to("Alice") q = Polynomial((1,1,0,-2), -1) q.explain_to("Bob") ###Output Hello, Alice. I am a polynomial. My roots are (1, 2, -3). Hello, Bob. I am a polynomial. My roots are (1, 1, 0, -2). ###Markdown It is always useful to have a function that shows what the class represents, and in particular what this particular instance looks like. We can define another method that explicitly `display`s the `Polynomial`: ###Code class Polynomial(object): """Representing a polynomial.""" explanation = "I am a polynomial" def __init__(self, roots, leading_term): self.roots = roots self.leading_term = leading_term self.order = len(roots) def display(self): string = str(self.leading_term) for root in self.roots: if root == 0: string = string + "x" elif root > 0: string = string + "(x - {})".format(root) else: string = string + "(x + {})".format(-root) return string def explain_to(self, caller): print("Hello, {}. {}.".format(caller,self.explanation)) print("My roots are {}.".format(self.roots)) p = Polynomial(p_roots, p_leading_term) print(p.display()) q = Polynomial((1,1,0,-2), -1) print(q.display()) ###Output 2(x - 1)(x - 2)(x + 3) -1(x - 1)(x - 1)x(x + 2) ###Markdown Where classes really come into their own is when we manipulate them as objects in their own right. For example, we can multiply together two polynomials to get another polynomial. We can create a method to do that: ###Code class Polynomial(object): """Representing a polynomial.""" explanation = "I am a polynomial" def __init__(self, roots, leading_term): self.roots = roots self.leading_term = leading_term self.order = len(roots) def display(self): string = str(self.leading_term) for root in self.roots: if root == 0: string = string + "x" elif root > 0: string = string + "(x - {})".format(root) else: string = string + "(x + {})".format(-root) return string def multiply(self, other): roots = self.roots + other.roots leading_term = self.leading_term * other.leading_term return Polynomial(roots, leading_term) def explain_to(self, caller): print("Hello, {}. {}.".format(caller,self.explanation)) print("My roots are {}.".format(self.roots)) p = Polynomial(p_roots, p_leading_term) q = Polynomial((1,1,0,-2), -1) r = p.multiply(q) print(r.display()) ###Output -2(x - 1)(x - 2)(x + 3)(x - 1)(x - 1)x(x + 2)
Week 10/SLU17_2 - Exam Prep I/Exercise notebook.ipynb
###Markdown SLU 17 - Exam Prep I Batch 2 - Wave 1 Python Exam This is the Python exam from batch 2 - wave 1. Exam Duration: 2h ###Code from operator import itemgetter #used for evaluation import utils ###Output _____no_output_____ ###Markdown Question 1If you took a look at the dataset, you may have noticed that some of the numeric values are "polluted" with characters.In this task you'll have to write a function, `clean_percentage`, that should do the following:- Receive as argument a list of dictionaries; - Each of the dictionaries keeps the % of female students in higher education per area, for a specific year;- Remove all the characters and spaces from the numeric values;- Return the cleaned data;Hint: mind the naughty (R)'s :) ###Code def clean_percentage(data): # YOUR CODE HERE raise NotImplementedError() utils.b2w1_exerc_1_grading(clean_percentage) print("Answer is correct. Good Job.") ###Output _____no_output_____ ###Markdown Question 2In this task, we want to clean the names of the education areas, so they are all lower case and without white spaces.For instance, Social Sciences, Management and Law should become social_sciences_management_and_law.So you'll have to write two functions.The first function, `clean_header_string`, should do the following:- Receive a string as input;- Remove all the commas from the string;- Replace the white spaces in the string with underscores;- Convert the string to lower case;- Return the transformed string;The second function, `clean_header`, should do the following:- Receive as input, a list of dictionaries, similar to the one in the last question;- Use the first function to transform the education area names;- Return the transformed data; ###Code def clean_header_string(string): # YOUR CODE HERE raise NotImplementedError() def clean_header(d): # YOUR CODE HERE raise NotImplementedError() utils.b2w1_exerc_2_grading(clean_header_string, clean_header) print("Answer is correct. Good Job.") ###Output _____no_output_____ ###Markdown Question 3In this task, we want to convert our data to the right data types.For this you'll have to write two functions.The first function, `commas`, should do the following:- Receive as input a list of dictionaries, similar to the previous questions;- Replace the commas with dots in the numeric values;- Return the transformed data;The second function `data_types` should do the following:- Receive as input a list of dictionaries (specifically the one returned by the first function);- Convert the year values to integer;- Convert the % of female students values to float;- Return the transformed data; ###Code def commas(d): # YOUR CODE HERE raise NotImplementedError() def data_types(d): # YOUR CODE HERE raise NotImplementedError() utils.b2w1_exerc_3_grading(commas, data_types) print("Answer is correct. Good Job.") ###Output _____no_output_____ ###Markdown Question 4In this task, we want to focus on data about a specific education area.So you'll have to write a function, `education_years` that should do the following:- Receive as input a list of dictionaries (similar to the ones of the previous questions), and an education area (string);- Return a list of tuples where the first value of the tuple is the year and the second value is the % of females in the given education area;In this task you're required to use the following:- `map` function;- `lambda` function; ###Code def education_years(data, education_area): # YOUR CODE HERE raise NotImplementedError() utils.b2w1_exerc_4_grading(education_years) print("Answer is correct. Good Job.") ###Output _____no_output_____ ###Markdown Question 5In this task, for a certain education area, we want to understand what were the years where more females were enrolled in higher education.So you'll have to write a function named `female_enrolled` that should do the following:- Receive as input a list of tuples with year and % of females (like the output of last task) and a threshold;- Filter **out** all the tuples that correspond to a % of females strictly below the threshold;- Sort the remaining tuples from higher to lower % of females;- Return the resulting list of tuples;In this task you're required to use the following:- `sorted` functionHint: `itemgetter` may be handy ###Code def female_enrolled(data, threshold): # YOUR CODE HERE raise NotImplementedError() utils.b2w1_exerc_5_grading(female_enrolled, 58.1) print("Answer is correct. Good Job.") ###Output _____no_output_____ ###Markdown Question 6In this task we'll create a class to play the lottery!First, get to know the `raffle_key`. The `raffle_key` is a dictionary, associating ticket numbers with prizes. All the numbers that are not in the raffle_key won't get any prize. ###Code raffle_key = {1:"10 euros", 5:"100 euros", 9:"40 euros"} ###Output _____no_output_____ ###Markdown Step 1:- Create a `Lottery` class that takes one constructor parameter called `raffle_key`;- In the `Lottery` class create a method named _get_lucky_ that receives the lucky_number as parameter; and returns the strings "{X} euros" or "Better luck next time!", depending on the presence of the lucky number in the raffle key;- In our example above, our lucky number 9 corresponds to the response string "40 euros" if in some case we get a lucky number that doesn't exist in the raffle key, we should print out "Better luck next time!"; Step 2:- Instantiate the `Lottery` class with name `simple_lottery` and with the given raffle key;- Call the method `get_lucky` with the given lucky number and print out the result;In this task you're required to use the following:- `try/except` statements; ###Code class Lottery(): # YOUR CODE HERE raise NotImplementedError() utils.b2w1_exerc_6_grading(Lottery, simple_lottery, raffle_key) print("Answer is correct. Good Job.") ###Output _____no_output_____ ###Markdown Last but not least, submit your work! To submit your work, fill your slack ID in the `slack_id` variable (as a string).Example: `slack_id = "x-men"`Help: if you forgot your slack ID, [read this](https://moshfeu.medium.com/how-to-find-my-member-id-in-slack-workspace-d4bba942e38c). ###Code # Submit your work! #slack_id = # YOUR CODE HERE raise NotImplementedError() from submit import submit assert isinstance(slack_id, str) slu = 17_2 submit(slack_id, slu) ###Output _____no_output_____
Archive/solve_model.ipynb
###Markdown Model description The simple specification of our model in discrete time is given by the recursive problem\begin{equation} \begin{aligned} v(a_t) &= \max_{c} u(c_t) + \beta \mathrm{E}_t \big[ v(a_{t+1})\big] \\ & \text{s.t.} \\ a_{t+1} &= y_{t+1} + (1+r)(a_t - c_t). \\ a_t & \geq 0 \end{aligned}\end{equation}where\begin{equation}y_t = \begin{cases}y_1 \quad \text{w. prob.} \quad \pi \\y_2 \quad \text{w. prob.} \quad 1-\pi\end{cases}\end{equation} and instantaneous utility is given by\begin{equation}u(c_t) = \begin{cases}\frac{c^{1-\eta} - 1}{1-\eta} \quad & \text{for} \quad \eta \neq 1 \\\log c_t \quad & \text{for} \quad \eta = 1 \end{cases}\end{equation} First we setup all parameters used in the solution algortihms ###Code par = model.setup() ###Output _____no_output_____ ###Markdown Solve by value function iteration (VFI) We can then solve the model using value function iteration. Write a bit about the algorithm here. ###Code sol_vfi = vfi.solve_VFI(par) ###Output _____no_output_____ ###Markdown Check how many iterations are used to find the convergent solution ###Code print("Using value function iteration required " + str(sol_vfi.it) +" iterations before convergence") ###Output Using value function iteration required 267 iterations before convergence ###Markdown Solve by the endogenous grid method (EGM) It seems that to solve the model using the endogenous grid methods, we again rely on the contraction mapping theorem. Hence, the only difference from VFI is that we update each iteration more efficiently.The idea of EGM is to use the Euler equation to solve the problem:\begin{equation}c_t^{-\rho} = (1+r)\beta \mathrm{E}_t \Big[ \big(c_{t+1}(a_{t+1})\big)^{-\rho} \Big] = (1+r)\beta \mathrm{E}_t \Big[ \big(c_{t+1}\big((1+r)a_t-c_t\big) \big)^{-\rho} \Big]\end{equation}In the RHS i've inserted the transition rule for assets. Note that in our model assets are the same as the state ****** Ved ikke helt hvad der menes her. Run the EGM algortihm ###Code par = model.setup() sol_egm = egm.solve_EGM(par) ###Output _____no_output_____ ###Markdown Again, we check the number of iterations used ###Code print("Using value function iteration required " + str(sol_egm.it) +" iterations before convergence") ###Output Using value function iteration required 56 iterations before convergence ###Markdown Besides using considerably less iterations, we also note that each iteration should be much quicker for EGM as we can back out optimal consumption given last iteration WITHOUT using any solver To assert that our algorithms have run correctly, we can compare the converged policy functions derived from each algorithm ###Code fig = plt.figure(figsize=(14,5)) ax = fig.add_subplot(1,2,1) ax.plot(sol_egm.a, sol_egm.c, linestyle = ':', color = '0.4') ax.plot(sol_vfi.a, sol_vfi.c, linestyle = '-', color = '0.7') ax.set_xlabel(f"Assets, $a_t$") ax.set_ylabel(f"Consumption, $c^\star_t$") ax.set_title(f'Policy function') ax.set_xlim([-1,20]) plt.show() ###Output _____no_output_____ ###Markdown We can see that the two solutions are quite close. We now extend the EGM algortihm to solve a general Markov process for wages. The model is therefore\begin{equation} \begin{aligned} v_j(a_t) &= \max_{c} u(c_t) + \beta \Big[ P_{jj} v_j(a_{t+1}) + (1-P_{jj})v_{-j}(a_{t+1}) \big] \\ & \text{s.t.} \\ a_{t+1} &= y_j + (1+r)(a_t - c_t). \\ a_t & \geq 0 \end{aligned}\end{equation}for $j \in \{1,2\}$. We have the transition matrix between states\begin{equation}\mathbf{P} = \begin{pmatrix}P_{11} & 1 - P_{11} \\1 - P_{22} & P_{22}\end{pmatrix}\end{equation}where $P_{11}$ is the probability of of staying in state 1 conditional on being in state 1 and $P_{22}$ is the probability of staying in state 2 conditional on being in state 2. (Kommenter på, hvilken state vi antager er hhv. employment og unemployment)\end{equation} **Run the new and improved EGM solver** ###Code par = model.setup() sol_egm_2d = egm.solve_EGM_2d(par) fig = plt.figure(figsize=(8,6)) ax = fig.add_subplot(1,1,1) ax.plot(sol_egm_2d.a[0,:], sol_egm_2d.c[0,:], linestyle = ':', color = 'red', label = '$y_1$') ax.plot(sol_egm_2d.a[1,:], sol_egm_2d.c[1,:], linestyle = ':', color = 'blue', label = '$y_2$') # ax.plot(sol_egm_2d.a[1,:10], sol_egm_2d.a[1,:10], linestyle = '--', color = '0.6') # Check with 45 degree line. Seems correct ax.set_xlabel(f"Assets, $a_t$") ax.set_ylabel(f"Consumption, $c^\star_t$") ax.set_title(f'Policy function') ax.set_xlim([-1,20]) ax.legend(frameon=True) plt.plot() print("We again used " + str(sol_egm_2d.it) +" iterations before convergence") ###Output We again used 56 iterations before convergence ###Markdown Finite Difference (FD) Some test code here ###Code par = model.setup() sol_fd = fd.solve_fd(par) fig = plt.figure(figsize=(14,5)) ax = fig.add_subplot(1,2,1) ax.spy(sol_fd.A,markersize=1) plt.show() fig = plt.figure(figsize=(14,5)) ax = fig.add_subplot(1,2,1) ax.plot(sol_fd.a[:-1], sol_fd.c[0,:], linestyle = ':', color = '0.4') ax.plot(sol_fd.a[:-1], sol_fd.c[1,:], linestyle = ':', color = '0.4') # ax.plot(sol_vfi.a, sol_vfi.c, linestyle = '-', color = '0.7') # ax.set_xlabel(f"Assets, $a_t$") # ax.set_ylabel(f"Consumption, $c^\star_t$") # ax.set_title(f'Policy function') # ax.set_xlim([-1,20]) # ax.set_ylim([-.2,3.5]) plt.show() ###Output _____no_output_____
Notebooks/5_Model_Data_Fixing.ipynb
###Markdown Swish-based classifier using cosine-annealed LR with restarts and data fixing- Swish activation, 4 layers, 100 neurons per layer- LR using cosine-annealing with restarts and cycle multiplicity of 2- Data is fixed with primary lepton at phi=0 and postive eta and the tau in the positive phi region- Validation score use ensemble of 10 models weighted by loss Import modules ###Code %matplotlib inline from __future__ import division import sys import os sys.path.append('../') from Modules.Basics import * from Modules.Class_Basics import * ###Output /home/giles/anaconda3/lib/python3.6/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`. from ._conv import register_converters as _register_converters /home/giles/anaconda3/lib/python3.6/site-packages/statsmodels/compat/pandas.py:56: FutureWarning: The pandas.core.datetools module is deprecated and will be removed in a future version. Please use the pandas.tseries module instead. from pandas.core import datetools Using TensorFlow backend. ###Markdown Options ###Code with open(dirLoc + 'features.pkl', 'rb') as fin: classTrainFeatures = pickle.load(fin) nSplits = 10 patience = 2 maxEpochs = 200 ensembleSize = 10 ensembleMode = 'loss' compileArgs = {'loss':'binary_crossentropy', 'optimizer':'adam'} trainParams = {'epochs' : 1, 'batch_size' : 256, 'verbose' : 0} modelParams = {'version':'modelSwish', 'nIn':len(classTrainFeatures), 'compileArgs':compileArgs, 'mode':'classifier'} print ("\nTraining on", len(classTrainFeatures), "features:", [var for var in classTrainFeatures]) ###Output Training on 30 features: ['DER_mass_MMC', 'DER_mass_transverse_met_lep', 'DER_mass_vis', 'DER_pt_h', 'DER_deltaeta_jet_jet', 'DER_mass_jet_jet', 'DER_prodeta_jet_jet', 'DER_deltar_tau_lep', 'DER_pt_tot', 'DER_sum_pt', 'DER_pt_ratio_lep_tau', 'DER_met_phi_centrality', 'DER_lep_eta_centrality', 'PRI_met_pt', 'PRI_met_sumet', 'PRI_jet_num', 'PRI_jet_all_pt', 'PRI_tau_px', 'PRI_tau_py', 'PRI_tau_pz', 'PRI_lep_px', 'PRI_lep_pz', 'PRI_jet_leading_px', 'PRI_jet_leading_py', 'PRI_jet_leading_pz', 'PRI_jet_subleading_px', 'PRI_jet_subleading_py', 'PRI_jet_subleading_pz', 'PRI_met_px', 'PRI_met_py'] ###Markdown Import data ###Code with open(dirLoc + 'inputPipe.pkl', 'rb') as fin: inputPipe = pickle.load(fin) trainData = BatchYielder(h5py.File(dirLoc + 'train.hdf5', "r+")) ###Output _____no_output_____ ###Markdown Determine LR ###Code lrFinder = batchLRFind(trainData, getModel, modelParams, trainParams, lrBounds=[1e-5,1e-1], trainOnWeights=True, verbose=0) ###Output 2 classes found, running in binary mode ###Markdown Train classifier ###Code results, histories = batchTrainClassifier(trainData, nSplits, getModel, {**modelParams, 'compileArgs':{**compileArgs, 'lr':2e-3}}, trainParams, trainOnWeights=True, maxEpochs=maxEpochs, cosAnnealMult=2, plotLR=1, reduxDecay=1, patience=patience, verbose=1, amsSize=250000) ###Output Using cosine annealing Training using weights Running fold 1 / 10 2 classes found, running in binary mode 1 New best found: 3.86929836739415e-05 2 New best found: 3.647943555592361e-05 3 New best found: 3.4577553436234576e-05 4 New best found: 3.455136837034314e-05 5 New best found: 3.3895907057180085e-05 6 New best found: 3.338360659878356e-05 7 New best found: 3.333653244419613e-05 11 New best found: 3.303178689870165e-05 12 New best found: 3.263184744256568e-05 14 New best found: 3.255256915649959e-05 15 New best found: 3.253520906974089e-05 23 New best found: 3.250460929359209e-05 25 New best found: 3.242873912967296e-05 27 New best found: 3.230578908041312e-05 28 New best found: 3.2223146359115345e-05 29 New best found: 3.219880008701448e-05 30 New best found: 3.2189436271668564e-05 48 New best found: 3.217142636198869e-05 52 New best found: 3.2128781575826826e-05 53 New best found: 3.197256877764561e-05 57 New best found: 3.1970203616740196e-05 58 New best found: 3.1932453862306575e-05 59 New best found: 3.1907703030136986e-05 107 New best found: 3.1843169386139114e-05 CosineAnneal stalling after 255 epochs, entering redux decay at LR=0.000444946536711969 Early stopping after 265 epochs Score is: {'loss': 3.1843169386139114e-05, 'wAUC': 0.0637505589449926, 'AUC': 0.0978213835899111, 'AMS': 3.7283668484442973, 'cut': 0.9459168314933777} ###Markdown Comparing to 5_Model_Data_Augmentation, fixing the data allows the models to train much quicker, however comparison metrics are slightly worse. Construct ensemble ###Code with open('train_weights/resultsFile.pkl', 'rb') as fin: results = pickle.load(fin) ensemble, weights = assembleEnsemble(results, ensembleSize, ensembleMode, compileArgs) ###Output Choosing ensemble by loss Model 0 is 4 with loss = 3.14401249110233e-05 Model 1 is 3 with loss = 3.167605252674548e-05 Model 2 is 5 with loss = 3.1753759254934266e-05 Model 3 is 1 with loss = 3.1784107508802465e-05 Model 4 is 0 with loss = 3.1843169386139114e-05 Model 5 is 8 with loss = 3.19125038302915e-05 Model 6 is 2 with loss = 3.191284927864411e-05 Model 7 is 6 with loss = 3.195041188446339e-05 Model 8 is 7 with loss = 3.2213814113592564e-05 Model 9 is 9 with loss = 3.2241796406653954e-05 ###Markdown Response on validation data ###Code valData = BatchYielder(h5py.File(dirLoc + 'val.hdf5', "r+")) batchEnsemblePredict(ensemble, weights, valData, ensembleSize=ensembleSize, verbose=1) print('Testing ROC AUC: unweighted {}, weighted {}'.format(roc_auc_score(getFeature('targets', valData.source), getFeature('pred', valData.source)), roc_auc_score(getFeature('targets', valData.source), getFeature('pred', valData.source), sample_weight=getFeature('weights', valData.source)))) amsScanSlow(convertToDF(valData.source)) %%time bootstrapMeanAMS(convertToDF(valData.source), N=512) ###Output 50000 candidates loaded Mean AMS=4.0+-0.2, at mean cut of 0.96+-0.01 Exact mean cut 0.9631114136427641, corresponds to AMS of 3.9258569465289823 CPU times: user 1.75 s, sys: 9.31 s, total: 11.1 s Wall time: 2min 47s ###Markdown Comparing to 5_Model_Data_Augmentation, the validation metrics when fixing the data appear to be better with slight increases in maximal AMS and AMS corresponding to mean cut. The prediciton time is also much quicker due to lack of test-time augmentation Test scoring ###Code testData = BatchYielder(h5py.File(dirLoc + 'testing.hdf5', "r+")) %%time batchEnsemblePredict(ensemble, weights, testData, ensembleSize=ensembleSize, verbose=1) scoreTestOD(testData.source, 0.9631114136427641) ###Output Public:Private AMS: 3.5601902746277 : 3.665386798953047 ###Markdown The score on the test set, however in a lot worse for both public and private Save/Load ###Code name = "weights/Swish_CLR_Fixed" saveEnsemble(name, ensemble, weights, compileArgs, overwrite=1) ensemble, weights, compileArgs, _, _ = loadEnsemble(name) ###Output _____no_output_____
ProyectoModulo1_MaganaJ_PinedaB.ipynb
###Markdown Simulación de procesos financieros. **Nombres:** - Juan Pablo Ruíz Magaña.- Bryan Azahel Juárez Pineda.**Fecha:** 26 de abril del 2021.**Expediente** :- 721093.- 722176. **Profesor:** Oscar David Jaramillo Zuluaga. **Link Github**: [Repositorio proyecto.](https://github.com/Bryan722176/Proyecto_modulo1) **Link a la base de datos**: [Base de datos original.](https://www.kaggle.com/kapoorshivam/credit-analysis) Proyecto TEMA-2 1.1 Título del trabajo.**`Análisis de solicitud de productos bancarios de crédito.`** 1.2 Objetivos. Objetivo general.* Determinar las características de los productos bancarios de crédito que solicitarán futuros clientes. Objetivo secundarios.* Simular una cantidad considerable de escenarios que nos permita conocer posibles valores a futuro de las variables de interés.* Evaluar las simulaciones con el método Montecarlo con la finalidad de encontrar el valor esperado de las variables de interés.* Obtener las probabilidades de ocurrencia de ciertos eventos relacionados a nuestras variables de interés.* Definir los parámetros que caracterizan a las solicitudes de crédito a partir de las variables de interés. 1.3 Definición del problema. En las estructuras capitalistas que caracterizan al sistema económico mundial, el consumo forma parte elemental del día a día de las personas, la cuestión de interés surge en el momento en que la productividad / salario no corresponde a la capacidad adquisitiva que los individuos requieren para adquirir determinados bienes y servicios en el corto plazo. De ahí nace la necesidad y figura de los créditos.Situandonos en la posición de una institución bancaria, otorgar créditos conlleva un riesgo considerable en el modelo de negocios de la institución. Asumir el riesgo de impago en un crédito puede tener consecuencias graves si esto se llega a dar en escalas masivas, por ende quienes otorgan crédito deben realizar un análisis minucioso respecto a la capacidad crediticia de las personas o empresas que lo soliciten.Con el fin de cuidar los intereses del banco y maximizar las oportunidades de negocio de la institución, nos interesa analizar las características de las aplicaciones previas de diferentes productos bancarios de crédito, con la intención de que la institución bancaria pueda perfilar clientes pontenciales de manera más efectiva y a su vez gestione de manera más óptima sus recursos destinados a estas oportunidades de negocio.Para conocer lo antes descrito, y con nuestra base de datos, definimos los nodos que se deciden simular como los siguientes:* **Monto solicitado del crédito**: Esto para poder definir las expectativas crediticias de los potenciales clientes. Conocer el monto solicitado nos da una pauta para identificar y poder seccionar clientes a partir de su potencial capacidad adquisitiva y su necesidad.* **Monto aprobado del crédito**: Después de un análisis por parte de la institución, esta variable representa el monto designado a una solicitud en particular. Conocer el monto aprobado nos ayuda a determinar no solo la capacidad real de pago del cliente, sino también nos ayuda a saber si el banco cubre las necesidades de capitalización del cliente. * **Canal de venta**: Conducto por el cual se establece la relación entre el cliente y la institución, y se inicializa el trámite. Conocer esta variable nos ayudará a entender como se distribuye la captación de clientes para el banco, esto generará que se pueda saber cuales son los canales a fortalecer y en cuales continuar con la inercia descrita.* **Categoría de productos que se adquieren con instrumentos de crédito**: Esta variable representa el destino del crédito solicitado. Conocer la categoría de destino nos ayuda a entender no solo las necesidades y el por qué se solicitan los créditos, también nos puede ayudar a evaluar que sectores de consumo requieren de mayor capacidad crediticia. Diagrama de proceso.![image.png](attachment:image.png) Descripción del diagrama:En el diagrama se describe el proceso a modelar:1. Comenzando por el canal de venta elegido por el cliente, por medio del cual se hace el primer contacto entre la institución y el cliente.2. Consecuentemente el cliente solicita un monto determinado a manera de crédito.3. Posteriormente, la institución realiza un analisís crediticio y opta por asignar un monto a dicho al cliente.4. Por último el cliente hace uso del crédito adquiriendo bienes y servicios.A manera de conclusión de este diagrama lo que obtenemos es la caracterización de las solicitudes de crédito de los clientes de la institución. 1.4 Nodos y variables que se decidieron simular y porqué. Resumiendo lo antes descrito, los nodos a simular serán los siguientes:* **Monto solicitado del crédito**.* **Monto aprobado del crédito**.* **Canal de venta**.* **Categoría de productos adquirimos con instrumentos de crédito**.Y las posibles complicaciones que podriamos encontrar en su simulación, de manera generalizada son las siguientes:1. **Escalabilidad de los datos:** Es posible que se presenten valores extremos, por lo tanto debemos realizar un correcto análisis exploratorio de los datos y de ser necesario modificar su escala.2. **Ausencia de valores:** En datasets tan grandes es posibles que algunas variables reporten valores faltantes o simplemente se encuentren en una categoría desconocida, por lo tanto será conveniente observar su frecuencia en la muestra. 1.5 Definición de hipótesis y supuestos. De manera generalizada podemos definir los siguientes supuestos constantes en el proceso para conocer los parámetros de los créditos solicitados:1. **Se asume la aprobación del crédito.**2. **El monto aprobado del crédito no necesariamente corresponde al solicitado.**3. **El destino del crédito es rastreable.**4. **El monto de aplicación es mayor a 0.**En cuanto al desarrollo de hipótesis respecto al desarrollo del proyecto podemos definir lo siguiente:1. **El monto de crédito aprobado es independiente del canal de ventas por el cuál se haya tramitado el crédito.** 1.6 Obtención de bases de datos La base que se obtuvo a través de la plataforma web [_Kaggle_](https://www.kaggle.com/) describe las aplicaciones créditicias de un una institución financiera, el problema se presenta al momento de que las aplicaciones se aprueban de manera que el corporativo llega a otorgar créditos a individuos que no necesariamente terminan pagando en tiempo y forma.La base de datos original está conformada por $1'670,214$ filas y $37$ columnas. La realidad es que para el análisis particular que se desea realizar, existen variables y valores que no aportan demasiado, y con la finalidad de eficientar costos computacionales y dejar exclusivamente la información relevante al objetivo descrito previamente, realizaremos un análisis exploratorio y una limpieza de datos, la cual se podrá observar el notebook llamado "**`EDA_BD`**" presente en este mismo repositorio. 1.7 Visualización de resultados de simulación. Entonces, las variables a modelar serán las siguientes:- Canal de venta.- Monto solicitado.- Monto aprobado.- Bienes adquiridos. ###Code # Librerías a utilizar. import numpy as np import pandas as pd from scipy.optimize import minimize import seaborn as sns from statsmodels.nonparametric.kde import KDEUnivariate import matplotlib.pyplot as plt import scipy.stats as st import statsmodels.api as sm import warnings from fitter import Fitter import sklearn from sklearn.neighbors import KernelDensity %matplotlib inline # Importamos nuestro dataframe data = pd.read_csv('approval_clean.csv') data.head() ###Output _____no_output_____ ###Markdown Al observar nuestro dataset, nos encontramos con una gran incongruencia. El hecho de que un crédito se autorice implica que el **monto aprobado** de la línea de crédito debe ser mayor a 0, sin embargo existen valores que cuentan con está situación. Con la finalidad de ser congruentes a nuestro supuesto de aprobación, eliminaremos todas las filas que contengan estos valores. Los manejaremos como un _error de captura_. ###Code # Eliminar montos de crédito aprobados iguales a 0. Es un error de captura. data = data[data.AMT_CREDIT != 0] ###Output _____no_output_____ ###Markdown Comenzamos a explorar nuestras variables de interés. Primero observaremos los valores únicos y la cuenta de los mismos en el **canal de venta**. ###Code # Cuenta de valores únicos del canal de venta. data.CHANNEL_TYPE.value_counts() ###Output _____no_output_____ ###Markdown En el caso particular del **canal de venta** nos podemos percatar que existen 8 valores únicos, los cuales categorizamos como los distintos canales de venta sobre los cuales los clientes llegan a nosotros. A simple vista, observamos que el más popular es la categoria _Country-wide_.Ahora, visualizaremos un poco sobre nuestros **montos solicitados**. Para una visualización rápida lo observamos a partir de un histograma. ###Code # Visualizamos el monto solicitado. sns.histplot(data.AMT_APPLICATION, color='y', bins=10); print(f'El valor mínimo del monto de aplicación es de: ${int(min(data.AMT_APPLICATION))}') print(f'El valor máximo del monto de aplicación es de: ${int(max(data.AMT_APPLICATION))}') ###Output El valor mínimo del monto de aplicación es de: $0 El valor máximo del monto de aplicación es de: $5850000 ###Markdown En el caso de los **montos solicitados**, graficamente se observa que la escala sobre la cual se está trabajando no resulta del todo óptima en la visualización, esto porque la mayoría de las cantidades están abajo de un millón de dólares, sin embargo, existen algunos montos que superan los cinco millones, como lo es el máximo, el cual es de $5,850,00$. De está situación observamos otra cuestión que resulta interesante y que contrasta con uno de los supuestos planteados, y es que existen **montos solicitados** iguales a 0. A pesar de que no nos interesan para nuestra análisis, si se puede derivar un análisis particular del mismo. ###Code # Definimos caso particular. Monto de aplicación igual a 0. data_particular = data[data['AMT_APPLICATION']==0] print(f'El caso particular, representa un: {(len(data_particular) / len(data))*100}% del total de la muestra.') data_particular.head() ###Output El caso particular, representa un: 4.226403794325899% del total de la muestra. ###Markdown Como podemos observar, este conjunto de datos representan aproximadamente un $4.23\%$ de la muestra. Por lo tanto su eliminación del análisis principal no afectará a los objetivos inicialmente planteados. Ahora, para analizar este caso particular, las variables que nos interesa observar a partir del nuevo subconjunto de datos serán el **tipo de contrato**, **tipo de instrumento**, **categoria de gasto**, **tipo de cliente** y **monto aprobado**. ###Code # Observamos el tipo de contrato. data_particular.NAME_CONTRACT_TYPE.value_counts() ###Output _____no_output_____ ###Markdown Se observa que el tipo de contrato que se da en estos casos, solo corresponde a un **préstamo revolvente**, siendo que este abarca la totalidad de la muestra. ###Code # Observamos el tipo de instrumento utilizado. data_particular.NAME_PORTFOLIO.value_counts() ###Output _____no_output_____ ###Markdown Para el caso del instrumento otorgado, vemos nuevamente que existe solo uno, las **tarjetas de crédito**. ###Code # Observamos la categoria de gasto de la línea. data_particular.NAME_GOODS_CATEGORY.value_counts() ###Output _____no_output_____ ###Markdown En el caso de la categoría de gasto observamos que solo existe una, **XNA** la cual representa una ausencia de valor. Con esto podemos deducir que estas líneas de crédito no informan la categoria de bienes adquiridos, o al menos esta información pareciera no resultar determinante para la institución. ###Code # Observamos el tipo de cliente. data_particular.NAME_CLIENT_TYPE.value_counts() ###Output _____no_output_____ ###Markdown En esta variable encontramos otra cuestión interesante, y es que la mayoría de personas con este tipo particular de crédito o son clientes, o lo llegaron a ser en algun momento. Esto se representa en casi todos los casos, donde solo $214$ clientes fueron nuevos para la institución. Esto nos da pauta a pensar que este tipo de líneas no se otorgan a cualquier cliente, probablemente sea una línea preferencial. ###Code # Visualizamos rápidamente, con un histograma el monto aprobado. sns.histplot(data_particular.AMT_CREDIT, color='y', bins=10); print(f'El valor mínimo del monto de aprobación es de: ${int(min(data_particular.AMT_CREDIT))}') print(f'El valor máximo del monto de aprobación es de: ${int(max(data_particular.AMT_CREDIT))}') ###Output El valor mínimo del monto de aprobación es de: $22500 El valor máximo del monto de aprobación es de: $1350000 ###Markdown En el caso del **monto aprobado** observamos que las cantidades oscilan entre $\$22,500$ y $\$1,350,000$. Tomando en cuenta que el máximo monto registrado en el dataset ronda los seis millones, podemos decir que este tipo de líneas de crédito no se caracterizan por ser altas.Entonces con todo lo antes mencionado podemos definir las carácteristicas de este caso particular, donde el **monto solicitado** del crédito es igual a 0, es decir, la institución otorga estas líneas sin que exista una prevía solicitud por parte de un cliente:- Es específico a una línea de crédito conocidad como **crédito revolvente**.- El instrumento que se otorga para ejercer la línea es exclusivamente la **tarjeta de crédito**.- Los bienes adquiridos, no representan o al menos no son del interés de la institución financiera.- El tipo de cliente al cual están dirigidas estas líneas es en su mayoría para aquellos que ya tienen un historial con la institución.- Los montos de estas líneas de crédito son más bajos, respecto a otro tipo de solicitudes. ###Code # Ya con el caso particular descrito, nos quedamos exclusivamente con los montos solicitados mayores a 0. data = data[data['AMT_APPLICATION'] !=0] data.head() ###Output _____no_output_____ ###Markdown Ahora ya con un exploratorio de datos más completo, comenzamos con la simulación correspondiente a los nodos descritos en el objetivo principal. `Nodo: Canal de venta`. `Visualización de datos`. ###Code # Datos del canal de venta. data['CHANNEL_TYPE'].unique() ###Output _____no_output_____ ###Markdown Al observar nuestras categorías de canal de venta, vemos que tenemos valores tipo "string". Con la intención de trabajar de una manera más cómoda, lo que haremos será transformar los valores únicos tipo "string" por valores numéricos.Estos estarán categorizados de manera que en el dominio de la frecuencia nos permitan observar alguna distribución de probabilidad que sea más moldeable. ###Code # Categorización de lo datos. data['CHANNEL_TYPE']=data['CHANNEL_TYPE'].replace('Country-wide',0) data['CHANNEL_TYPE']=data['CHANNEL_TYPE'].replace('Contact center',5) data['CHANNEL_TYPE']=data['CHANNEL_TYPE'].replace('Credit and cash offices',1) data['CHANNEL_TYPE']=data['CHANNEL_TYPE'].replace('Stone',2) data['CHANNEL_TYPE']=data['CHANNEL_TYPE'].replace('Regional / Local',3) data['CHANNEL_TYPE']=data['CHANNEL_TYPE'].replace('AP+ (Cash loan)',4) data['CHANNEL_TYPE']=data['CHANNEL_TYPE'].replace('Car dealer',7) data['CHANNEL_TYPE']=data['CHANNEL_TYPE'].replace('Channel of corporate sales',6) data['CHANNEL_TYPE'].head() # Visualizando nuestros datos. sns.boxplot(x=data['CHANNEL_TYPE'], saturation=.5, color='r'); ###Output _____no_output_____ ###Markdown En nuestro **Boxplot**, podemos observar que el primer cuartil de nuestros datos pertenecen a la categoria 0 la cual representa el canal _Country-wide_.En el segundo cuartil, es decir la media de nuestros datos se encuentra en la categoria 1 la cual representa el canal _Credit and cash offices_.En nuestro tercer cuartil podemos observar que los datos se encuentran en la categoria 2 la cual representa el canal _Stone_.Por último nuestro rango intercuartilico llega hasta la categoría 5 _Contact center_ mientrás que la categoría 6 _Channel of corporate sales_ y la categoría 7 _Car dearler_ salen del rango intercuartílico y se pueden llegar a considerarse outliers. ###Code # Histograma de nuestros datos de canal de venta. sns.histplot(data['CHANNEL_TYPE'], color='y', bins=8); #Generamos una muestra de 10000 proveniente de nuestros datos originales. n = 10000 channel = data['CHANNEL_TYPE'].sample(n) # Histograma de la muestra. sns.histplot(channel, color='r', bins=8).set_title('Muestra de Canal de Venta'); ###Output _____no_output_____ ###Markdown A simple vista podemos observar que con una muestra de $10,000$ valores, masomenos podemos representar de buena manera nuestra distribución original. Con el fin de eficientar costos computacionales, trabajaremos sobre esta muestra significativa al **Canal de Venta**. ###Code # Boxplot de la muestra. sns.boxplot(x=channel, saturation=.5, color='r'); ###Output _____no_output_____ ###Markdown Nuevamente con el **Boxplot** reiteramos lo anterior. La muestra sí es significativa y representa el comportamiento de la distribución real. `Kernel Density Estimation`. Lo que sigue a continuación será estimar un **Kernel Density Estimation** (**KDE**) para nuestro conjunto de datos.Para el desarrollo de esta parte tenemos diferentes librerías que nos ayudan a realizar un **KDE**, sin embargo, tomando en cuenta las características de nuestros datos (univariados) y además debido al algoritmo de la Transformada Rápida de Fourier (**FFT**), el cual disminuye los costos computacionales de un problema de tamaño $N$ a un tamaño $N\text{log}_{2}N$ Esto se encuentra programado en la librería _Statsmodels KDEUnivariate_. Por lo tanto utilizaremos esta estimación para este apartado. ###Code # Semilla np.random.seed(4555) # Definimos función. Regresa la evaluación de la pdf. def kde_statsmodels_u(x, x_grid, **kwargs): """Univariate Kernel Density Estimation with Statsmodels""" kde = KDEUnivariate(x) kde.fit(bw='normal_reference', **kwargs) return kde.evaluate(x_grid) # Definición del modelo para encontar la distribución kde_univariate = KDEUnivariate(channel) # Histograma de nuestros datos discretos. y,x,_=plt.hist(channel,bins=8,density=True,label='data') # en un hist puedo almacenar y,x x = x[1:] # dominio de la función x_grid = np.arange(0,8,1) # Guardamos el bw óptimo para este modelo bw_op = kde_univariate.fit(bw='normal_reference').bw #error y_hat = kde_statsmodels_u(channel, x_grid) err0 = ((y-y_hat)**2).sum() # Graficamos la distribución junto con la estimación. plt.plot(x_grid,kde_statsmodels_u(channel, x_grid)) plt.hist(channel,bins=8,density=True) plt.title('KDE univariate') plt.show() print(f'El bandwidth óptimo es de: {bw_op}') ###Output _____no_output_____ ###Markdown Con la intención de hacer una comparación del ajuste de nuestro **KDE**, respecto a nuestros datos reales, lo que haremos será comparar su aproximación a través de una métrica de error, en este caso particular utilizaremos el **SSE** como referencia. ###Code # Definimos nuestro kde en una función. f_uni_kde_d = lambda x: kde_univariate.evaluate(x) # Definimos métrica de error. # Almacenamos la distribución real. y_real, x_real = np.histogram(data['CHANNEL_TYPE'], bins=8, density=True) x_real = x_real[1:] # Definimos la distribución generada por el KDE. y_est = f_uni_kde_d(x_real) # Calculamos el error. error_kde =((y_real-y_est)**2).sum() print(f'El error de la suma de la resta de cuadrados que se obtuvo entre nuestro KDE y la distribución real es de: {((error_kde)).round(4)}') ###Output El error de la suma de la resta de cuadrados que se obtuvo entre nuestro KDE y la distribución real es de: 0.0577 ###Markdown Observamos que el error de aproximación ronda en el $6\%$, sin embargo, visualmente no parece representar de buena manera la distribución real de los datos. `Buscando PDF´s`. Debido a los resultados que obtuvimos al estimar el kernel, donde pudimos observarque la pdf estimada parecía una exponencial decidimos utilizar la función de probabilidad continua de probabilidad exponencial para contrastar. ###Code # A fin de comparar. Visualizamos nuestros datos respecto a una distribución exponencial continua. plt.hist(channel,bins=8,density=True,label='data') error = pd.DataFrame(index=['expon'], columns=['error']) params = st.expon.fit(channel) y_hat = st.expon.pdf(x_real) error['error']=((y-y_hat)**2).sum() #SSE plt.title('PDF exponencial') plt.plot(x_real,y_hat,label='') plt.legend() error ###Output _____no_output_____ ###Markdown Ahora, con la intención de encontrar distribuciones de probabilidad existentes a nuestro conjunto de datos, realizaremos el test de ajuste con **Kolmogorov-Smirnov**. Este método lo que hace es determinar la bondad de ajuste de dos distribuciones de probabilidad entre sí. En nuestro caso una de ellas será el **Canal de Venta** y la otra será la que estaremos variando.Para conocer que tan buenos es el ajuste de la distribución exponencial que hemos hecho aplicamos la prueba **Kolmogorov-Smirnov**.Definimos un nivel de significancia ${\alpha} = 0.01$. Por lo tanto queremos un nivel de confianza $1-{\alpha} = 99\%$.$H_{0}$: Nuestro conjunto de datos sigue una distribución Exponencial.$H_{a}$: Nuestro conjunto de datos NO sigue una distribución Exponencial.Si el "p-value" es menor que ${\alpha}$ se rechaza $H_{0}$.Si el "p-value" es mayor que el nivel de significancia no se puede rechazar $H_{0}$. ###Code # Prueba de Kolmogorov-Smirnov para la distribución exponencial. st.kstest(channel,lambda x: st.expon.cdf(x,(1/np.mean(channel)),(1/np.mean(channel)**2))) ###Output _____no_output_____ ###Markdown El "p-value" resultó menor que ${\alpha}$ por lo tanto se rechaza ${H_{0}}$. `Prueba de bondad y ajuste con chi cuadrada` Ya que observamos que la _Distribución Exponencial_ no se ajusta a nuestro conjunto de datos, lo que haremos será ajustarlo para distribuciones discretas.Decidimos utilizar las siguientes funciones discretas para realizar el test de prueba de bondad y ajuste con chi cuadrada.1. Poisson2. Binomial3. Geometrica **Distirbución Binomial** **Formulación de hipótesis**$H_{0}$: Nuestro conjunto de datos tienen una distribución binomial.$H_{a}$: Nuestro conjunto de datos no sigue una distribución binomial. **Plan de análisis.**Para este análisis nuestro nivel de significancia es 5%.Por lo tanto nuestro nivel de confianza es del 95%.${\alpha} = 0.05$$1-{\alpha} = 0.95$ **Generación de datos**Como muestra observada utilizamos los "_counts_" del histograma de la muestra tomada de nuestro conjunto de datos original.Como muestra esperada utilizamos los "_counts_" del histograma de una muestra que se distribuya de manera binomial. ###Code # Parametros para la distribución binomial n, p = 8,.1452 # Muestra binomial bi = st.binom.rvs(n,p,size=10000) # Graficamos las distribuciones plt.title('Muestra Binomial vs Muestra Original') y2,x2,_=plt.hist(bi,bins=8,label='Binomial') y1,x1,_=plt.hist(channel,bins=8,label='Original') plt.legend() plt.show() warnings.filterwarnings("ignore") # Realizamos la prueba de bondad y ajuste. chi_bin = st.chisquare(f_obs=y1,f_exp=y2) chi_bin ###Output _____no_output_____ ###Markdown El "p-value" resulto menor que ${\alpha}$ por lo tanto se rechaza ${H_{0}}$ **Distribución de Poisson**. **Formulación de hipótesis**$H_{0}$: Nuestro conjunto de datos tienen una distribución de Poisson.$H_{a}$: Nuestro conjunto de datos no sigue una distribución de Poisson. **Plan de análisis.**Para este análisis nuestro nivel de significancia es 5%.Por lo tanto nuestro nivel de confianza es del 95%.${\alpha} = 0.05$$1-{\alpha} = 0.95$ **Generación de datos**Como muestra observada utilizamos los "_counts_" del histograma de la muestra tomada de nuestro conjunto de datos original.Como muestra esperada utilizamos los "_counts_" del histograma de una muestra que se distribuya como Poisson. **Obtención de parámetros**Para conocer si nuestra muestra se distribuye como **_Poisson_** nosotros identificamps los parametros que distribuyen esta distribución. En el caso de **_Poisson_** tanto ${\mu}$ y ${\sigma}^{2}$ son iguales al parametro ${\lambda}$.El parametro ${\lambda}$ es igual a la media de la los datos. ###Code # Calculando la media de la muestra. mu = np.mean(channel) # Vector de enteros con los valores a generar. k = x_grid # Muestra Poisson. poi = st.poisson.rvs(mu,size=len(channel)) # Graficamos las distribuciones. plt.title('Muestra Poisson vs Muestra Original') y2,x2,_=plt.hist(poi,bins=8,label='Poisson',density=True) y1,x1,_=plt.hist(channel,bins=8,label='Original',density=True) plt.legend() plt.show() warnings.filterwarnings("ignore") # Realizamos la prueba de bondad y ajuste. chi_poi = st.chisquare(f_obs=y1,f_exp=y2) chi_poi ###Output _____no_output_____ ###Markdown El "p-value" resultó mayor que ${\alpha}$ por lo tanto no se rechaza ${H_{0}}$ **Distribución Geométrica**. **Formulación de hipótesis**$H_{0}$: Nuestro conjunto de datos tienen una distribución geométrica.$H_{a}$: Nuestro conjunto de datos no sigue una distribución geométrica **Obtención de Parámetros**La media de la distribución geometrica se calcula de la siguiente manera:$$ E[X] = \frac{1-p}{p} $$Recordando que la media de neustra muestra es igual a: 1.1616. Podemos despejar la ecuación para obtener ${p}$.$$ 1.1616p = 1-p $$$$ 2.1616p = 1 $$$$ p = .4626 $$ ###Code # Definición de parámetros. p_g = 0.4626 g = st.geom.rvs(p_g,size=len(channel)) # Graficamos las distribuciones. plt.title('Muestra Geómetrica vs Muestra Original') y2,x2,_=plt.hist(g,bins=8,label='Geómetrica',density=True) y1,x1,_=plt.hist(channel,bins=8,label='Original',density=True) plt.legend() plt.show() warnings.filterwarnings("ignore") # Realizamos la prueba de bondad y ajuste. chi_geom = st.chisquare(f_obs=y1,f_exp=y2) chi_geom ###Output _____no_output_____ ###Markdown El "p-value" resultó mayor que ${\alpha}$ por lo tanto no se rechaza ${H_{0}}$ **Elección de distribución y KDE**Optamos por descartar el **KDE** ya que nuestra variable discreta no tiene tantas categorias como para utilizar un KDE el cualsería más útil para datos continuos. Por otra parte la distribución exponencial fue rechazada con el test de **Kolmogorov-Sminorv**. Finalmente optamos por utilizar la distribución de Poisson la cual tuvo el "p-value" más alto, modela todas nuestras categorias y visualmente parece ser la que mejor se ajusta a nuestros datos. `Transformada inversa`. ###Code # Definimos las funciones necesarias para evaluar la transformada inversa. # Comenzamos con la función para graficar histogramas discretos. def plot_histogram_discrete(distribucion:'distribución a graficar histograma', label:'label del legend'): # len(set(distribucion)) cuenta la cantidad de elementos distintos de la variable 'distribucion' plt.figure(figsize=[8,4]) y,x = np.histogram(distribucion,density = True,bins = len(set(distribucion))) plt.bar(list(set(distribucion)),y,label=label) plt.legend() plt.show() # Función que genera variables aleatorias discretas. def Gen_distr_discreta(p_acum: 'P.Acumulada de la distribución a generar', indices: 'valores reales a generar aleatoriamente', U: 'cantidad de números aleatorios a generar'): U =np.random.rand(U) # Diccionario de valores aleatorios rand2reales = {i: idx for i, idx in enumerate(indices)} # Series de los valores aletorios y = pd.Series([sum([1 for p in p_acum if p < ui]) for ui in U]).map(rand2reales) return y # Función que regresa la distribucion de Poisson. def poisson(mu,k): p=np.zeros(len(k)) p[0]=(np.exp(-mu)*mu**k[0])/1 # sabiendo que 0! es 1 def fill(i): nonlocal mu,p p[i+1]=(mu/k[i+1])*p[i] [fill(i) for i in range(len(k)-1)] return np.cumsum(p) # Definimos variables aleatorias. p_acum = poisson(mu,k) N = 1000 poisson_dist = Gen_distr_discreta(p_acum,k,N) plot_histogram_discrete(poisson_dist,'Label muestra Poisson Trans Inv.') ###Output _____no_output_____ ###Markdown `Reducción de Varianza` **Estimando la media**Fórmula para estimar la media de una distribución discreta$$\mathbb {E} [X]=x_{1}p(X=x_{1})+...+x_{n}p(X=x_{n})=\sum _{i=1}^{n}x_{i}p(x_{i})$$ ###Code # Media teórica probs=np.array([channel.value_counts()/10000]) vals = channel.value_counts().index.values media_teorica = np.mean(np.dot(probs,vals)) media_teorica ###Output _____no_output_____ ###Markdown **Montecarlo** ###Code print('monte carlo crudo', poisson_dist.mean()) ###Output monte carlo crudo 1.155 ###Markdown **Estratificado igualmente espaciado** ###Code def estra_igualmente_espaciado(B): """ Función que ingresa el número de estratos y retorna un conjunto de variables aleatorias con estratos igualmente espaciados """ U = np.random.rand(B) i = np.arange(0,B) v = (U+i)/B return v N=10000 U = estra_igualmente_espaciado(N) estra2 = Gen_distr_discreta(p_acum,k,U) print('Igualmente Estratificado =',np.mean(estra2)) N = 10000 u1 = np.random.rand(N) u2 = 1-u1 comp_dist = Gen_distr_discreta(p_acum,k,np.concatenate([u1,u2])) print('Complementario:',np.mean(comp_dist)) red_var = pd.DataFrame({'Media teo':media_teorica, 'monte carlo':poisson_dist.mean(), 'estra igual':np.mean(estra2), 'complementarios':np.mean(comp_dist)},index=range(0,1)) red_var=red_var.T red_var['error absoluto']=[np.abs((media_teorica-i)) for i in red_var[0].values] red_var.sort_values(by='error absoluto') ###Output _____no_output_____ ###Markdown Con lo antes descrito, observamos que el estrato igualmente espaciado resultó ser el mejor método de reducción de varianza.Ahora definimos un intervalo de confianza al $95\%$. ###Code print('El intervalo de confianza es', (mu-(1.96*(mu**2/len(poisson_dist))**0.5),mu+(1.96*(mu**2/len(poisson_dist))**0.5))) ###Output El intervalo de confianza es (1.0492684537029788, 1.1879315462970212) ###Markdown `Conclusiones`. En conclusión pudimos comprobar que una dsitribución discreta modelaba mejor nuestros datos que los métodos para variables continuas como el kde. Igualmente tanto con montecarlo como con los métdos de reducción de varianza pudimos aproximarnos a la media de la muestra proveninete de los datos orignales. Comprobando que la media de nuestros datos esta en la categoría 1 _credit and cash offices_.Por lo tanto como institución bancaria podriamos prever que la medía de los trámites comenzaran por este canal de venta. `Nodo: Monto solicitado`. `Visualización de los datos`. ###Code # Visualizando nuestros datos. sns.boxplot(x=data['AMT_APPLICATION'], saturation=.5, color='r'); ###Output _____no_output_____ ###Markdown Observamos que existen valores atipicos que excenden el rango intercuartilico en el boxplot, por lo tanto optaremos por utilizar una escala logaritmica que nos permita trabajar escalar los datos que pueden ser extremos. ###Code # Transformamos el monto de aplicación a una escala logaritmica. data_log = np.log(data['AMT_APPLICATION']) sns.boxplot(x=data_log, saturation=.5, color='r'); ###Output _____no_output_____ ###Markdown Con la escala logaritmica, si bien aún existen valores que exceden el rango intercuartilico, ya son menos. Además de manera visual podemos observar de mejor manera la distribución de nuestros datos. ###Code # Propiedades estadísticas de los datos en escala logarítmica. data_log.describe() # Visualizamos los datos en un histograma sns.histplot(data_log, color='y', bins=20); ###Output _____no_output_____ ###Markdown Ahora, podemos observar que los datos en escala logaritmica parecen seguir una distribución más pareja, de hecho pareciera aproximarse al comportamiento de una normal. Ya con nuestros datos preparados, proseguimos a buscar un **KDE**. ###Code # Trabajaremos con un kernel univariado. # Límites donde nosotros queremos graficar nuestra distribución. x_grid = np.linspace(9,15,1000) # Datos de prueba, tomaremos 100,000. x = data_log.sample(n=100000, random_state=1) # Datos reales. Muestra completa pdf_true = data_log # Con la intención de saber si nuestra muestra representa el comportamiento de la distribución, obtendremos su histograma. sns.histplot(x, color='r', bins=20).set_title('Muestra de 100,000 datos'); ###Output _____no_output_____ ###Markdown Al observar el histograma de la distribución, y comparandolo con la muestra aleatoria generada de 100,000. Podemos concluir que 100,000 muestras aleatorias del conjunto de datos, **sí** representa en buena parte la distribución. `Kernel Density Estimation`. ###Code # Definimos función. Regresa la evaluación de la pdf. # Es una estimación para un kernel univariado. def kde_statsmodels_u(x, x_grid, **kwargs): """Univariate Kernel Density Estimation with Statsmodels""" kde = KDEUnivariate(x) kde.fit(bw='normal_reference', **kwargs) return kde.evaluate(x_grid) # Definición del modelo para la distribución total. kde_univariate = KDEUnivariate(x) # Guardamos el bw óptimo para este modelo bw_optimo = kde_univariate.fit(bw='normal_reference').bw # Graficamos la distribución junto con la estimación. plt.plot(x_grid,kde_statsmodels_u(x, x_grid)) plt.hist(x,bins=20,density=True) plt.title('KDE univariate') plt.show() print(f'El bandwidth óptimo es de: {bw_optimo}') ###Output _____no_output_____ ###Markdown De la anterior gráfica podemos observar que nuestro **KDE** parece definir de una buena manera la función de distribución de probabilidad real de nuestros datos. Una cuestión que resalta a simple vista es que parece existir un problema de _overfitting_, donde el **KDE** esta sobreestimando el comportamiento de la distribución real. Con la intención de generar una función de distribución de probabilidad más suavizada, cambiaremos el parámetro bw a un $0.20$. ###Code # Definimos la función con un bandwidth más suave def kde_statsmodels_u20(x, x_grid, **kwargs): """Univariate Kernel Density Estimation with Statsmodels""" kde = KDEUnivariate(x) kde.fit(bw=0.20, **kwargs) return kde.evaluate(x_grid) # Graficamos la distribución junto con la estimación. plt.plot(x_grid,kde_statsmodels_u20(x, x_grid)) plt.hist(x,bins=20,density=True) plt.title('KDE univariate') plt.show() print(f'El bandwidth es de: 0.20') # Definimos la función de distribución de probabilidad a partir del KDEUnivariate. kde_univariate.fit(bw=0.20) f_uni_kde = lambda x: kde_univariate.evaluate(x) # Graficamos la función. plt.plot(x_grid, f_uni_kde(x_grid)) plt.title('PDF data'); ###Output _____no_output_____ ###Markdown Con la intención de hacer una comparación del ajuste de nuestro **KDE**, respecto a nuestros datos reales, lo que haremos será comparar su aproximación a través de una métrica de error, en este caso particular utilizaremos el **SSE** como referencia. ###Code # Definimos métrica de error. # Almacenamos la distribución real. y_real, x_real = np.histogram(data_log, bins=20, density=True) x_real = x_real[1:] # Definimos la distribución generada por el KDE. y_est = f_uni_kde(x_real) # Calculamos el error. error_kde =((y_real-y_est)**2).sum() print(f'El error de la suma de la resta de cuadrados que se obtuvo entre nuestro KDE y la distribución real es de: {((error_kde)).round(4)}') ###Output El error de la suma de la resta de cuadrados que se obtuvo entre nuestro KDE y la distribución real es de: 0.0264 ###Markdown Observando el **SSE** obtenido, podemos concluir que el **KDE** univariado que se obtuvo, parece aproximar de una buena manera la distribución que sigue nuesto **Monto Solicitado**, sin embargo, con la intención de generar un análisis más profundo, evaluaremos cuales de las funciones de distribución de probabilidad existentes se ajustan bien a nuestros datos. `Buscando PDF's`. ###Code # Tratamos de ajustar las distribuciones de probabilidad existentes a nuestro Monto Solicitado. # Comenzamos evaluando el método fit de la librería estadística. # Obtenemos las distribuciones continuas existentes en la librería estadística. dist_con = [d for d in dir(st) if isinstance(getattr(st,d),getattr(st,'rv_continuous'))] # Definimos una función que itere sobre las distribuciones que se indiquen y regrese su error respecto a la distribucion real. def give_error(data: 'Datos de la distribución real', distributions: 'Lista de distribuciones a evaluar'): # Definimos un data frame donde se almacena el MSE. errores = pd.DataFrame(index=distributions, columns=['MSE']) # Definimos los parámetros de la distribución real. y, x = np.histogram(data, bins=20, density=True) x = x[1:] # Iteramos en las distribuciones. for d in distributions: params = getattr(st,d).fit(data) y_hat =getattr(st,d)(*params).pdf(x) errores.loc[d] = ((y-y_hat)**2).mean() return errores ###Output _____no_output_____ ###Markdown Al explorar nuestra función _give_error_, nos dimos cuenta que el costo computacional de ajustar nuestro conjunto de datos en cada distribución continua, es demasiado alto. Además de esto observamos que existen distribuciones que parecen no ajustarse del todo bien, lanzando errores y deteniendo la ejecución. Entonces decidimos investigar respecto a la paquetería **Fitter** presente en python, y aquí obtuvimos una ventaja en tema de costos computacionales pues al varíar nosotros el parámetro "timeout", decidimos el tiempo de ejecución máximo al ajustar una distribución a los datos, con lo cual eficientamos y ajustamos solo aquellas que responden a las necesidades del problema. ###Code np.random.seed(4355) %%time warnings.filterwarnings("ignore") # Calculamos el costo computacional de la paquetería fitter. # Definimos el tiempo de ajuste máximo de 30 segundos. f_fitter = Fitter(x, bins=20, timeout=30) f_fitter.fit() error_fitter = f_fitter.summary(plot=False) error_fitter # Definimos el top 3 de las distribuciones que según el sse mejor se ajustan. best_error = error_fitter.sort_values(by='sumsquare_error').head(3) best_error ###Output _____no_output_____ ###Markdown Tomando en cuenta el **SSE**, con la paquetería **Fitter**, obtenemos que el top 3 de distribuciones que mejor se ajustan a nuestra muestra son las siguientes:- **Chi2**- **Erlang**- **Gamma** ###Code %%time # Con la intención de comparar el error, ahora utilizaremos nuestra función give_error. # En ella evaluamos el error medio cuadrático, esto con la intención de ver si evaluando otro error obtenemos el mismo orden. give_error(x, ['chi2', 'erlang', 'gamma']).sort_values(by='MSE') ###Output Wall time: 2min 53s ###Markdown Al observar los errores obtenidos tanto con el **SSE** y el **MSE**, en ambos casos observamos que los errores siguen el mismo orden, sin embargo, recordemos que el error solo nos sirve como una métrica de referencia. Ahora, con la intención de encontrar cuál de ellas resulta ser la que mejor se ajusta a nuestro conjunto de datos, realizaremos un análisis para cada una de las distribuciones descritas. Esto lo haremos a través de la prueba de **Kolmogorov-Smirnov** y una prueba **Cuantil-Cuantil**. Comenzamos comparando con la _distribución Chi2_, de manera que nuestra prueba de hipótesis se describe de la siguiente manera:- $H_{0}$: Los datos se distribuyen como una distribución de Chi2.- $H_{a}$: Los datos no se distribuyen como una distribución de Chi2. ###Code # Comenzamos para la distribución chi2. Prueba Kolmogorov-Smirnov. # Definimos los parámetros para la distribución. chi2_params = st.chi2.fit(x) # Definimos prueba de hipotesis. st.kstest(x, lambda x: st.chi2.cdf(x,*chi2_params)) ###Output _____no_output_____ ###Markdown Observando nuestro "p-value", vemos como al ser muy pequeño nos da indicios para rechazar la hipótesis nula, de manera que podemos decir que estadísticamente nuestro **Monto Solicitado** **NO** se distribuye como Chi2. Ahora realizamos la prueba **Cuantil-Cuantil**. ###Code # Definimos una función que nos grafique la distribución real, la ajustada y el q-q plot. def plot_pdf_qq(v_ale_real: 'Variables aleatorias de la distribución real', bins: 'Número de bins del histograma', distribution: 'Nombre de la distribución a evaluar'): # Histograma de las variables reales del conjunto de datos. divisiones = bins # Cantidad de barras en el histograma fig, ax = plt.subplots(2,1, figsize=(14,8)) fig.tight_layout(pad=3.0) y, x, _ = ax[0].hist(v_ale_real, bins, density=True, label='Histograma mediciones') # Función de distribución de probabilidad la cual creemos que mis datos distribuyen dist = distribution params = getattr(st, dist).fit(v_ale_real) # Graficar PDF de la distribución que queremos probar y_hat = getattr(st, dist).pdf(x, *params) ax[0].plot(x, y_hat,'r--', label='Distribución ajustada') ax[0].set_ylabel('Probability') ax[0].grid() # Gráfica de Q-Q entre mis datos y la curva que quiero probar que sigue mi distribución (dist) grap2 = st.probplot(v_ale_real, dist=dist, sparams=getattr(st, dist).fit(x)[:-2], plot=ax[1], fit=True) ax[1].grid() ax[1].set_title('Q-Q Plot') plt.show() # Graficamos la distribución y el q-q plot. # Comenzamos para la distribución chi2 v_ale_real = x bins_norm = 20 distribution_name = 'chi2' # Graficamos plot_pdf_qq(v_ale_real, bins_norm, distribution_name) ###Output _____no_output_____ ###Markdown Observando nuestro gráfico **Cuantil - Cuantil**, podemos confirmar lo descrito por nuestra prueba de **Kolmogorov-Smirnov**. Nuestros datos no se distribuyen de manera normal. Ahora evaluaremos la _distribución erlang_. Entonces tenemos la siguiente prueba de hipótesis:- $H_{0}$: Los datos se distribuyen como una distribución erlang.- $H_{a}$: Los datos no se distribuyen como distribución erlang. ###Code # Prueba Kolmogorov-Smirnov. Para la distribución de erlang. # Definimos los parámetros para la distribución. erlang_params = st.erlang.fit(x) # Definimos prueba de hipotesis. st.kstest(x, lambda x: st.erlang.cdf(x,*erlang_params)) ###Output _____no_output_____ ###Markdown En el caso de la _distribución erlang_ obsevamos que contamos con un "p-value" que ronda el $0$, el cual, ya nos dice de antemano que rechazemos la hipótesis nula ($H_{0}$), de manera que podemos decir que estadísticamente nuestro conjunto de datos no ditribuye como erlang.Ahora realizamos la prueba **Cuantil-Cuantil**. ###Code # Graficamos la distribución y el q-q plot. # Ahora para la distribución de erlang distribution_name2 = 'erlang' # Graficamos. plot_pdf_qq(v_ale_real, bins_norm, distribution_name2) ###Output _____no_output_____ ###Markdown Aquí nuevamente confirmamos los descrito por nuestra prueba de **Kolmogorov-Smirnov**, los datos no se distribuyen como una _distribución erlang_. Por último, para terminar de evaluar las distribuciones con menor error de aproximación, tenemos a la _distribución gamma_. La prueba de hipótesis se ve de la siguiente manera:- $H_{0}$: Los datos se distribuyen como una _distribución gamma_.- $H_{a}$: Los datos no se distribuyen como una _distribución gamma_. ###Code # Prueba Kolmogorov-Smirnov. Para la distribución de uniforme. # Definimos los parámetros para la distribución. gamma_params = st.gamma.fit(y_real) # Definimos prueba de hipotesis. st.kstest(y_real, lambda x: st.gamma.cdf(x,*gamma_params)) ###Output _____no_output_____ ###Markdown De esta distribución con su "p-value" pequeño, podemos decir de manera inmediata que rechazamos la hipótesis nula ($H_{0}$). Por lo tanto nuestro conjunto de datos, **NO** se distribuye como una _distribución gamma_. Ahora realizamos la prueba **Cuantil-Cuantil**. ###Code # Graficamos la distribución y el q-q plot. # Ahora para la distribución de uniforme. distribution_name3 = 'gamma' # Graficamos. plot_pdf_qq(v_ale_real, bins_norm, distribution_name3) ###Output _____no_output_____ ###Markdown Para el caso de la _distribución gamma_ confirmamos nuevamente la situación que se viene repitiendo. Nuestro conjunto de datos **Monto Solicitado** no se distribuyen como gamma. Entonces, después de analizar el ajuste de la distribución tanto para un **KDE** como para distribuciones de probabilidad hipóteticas, en este caso particular, para las distribuciones **Chi2**, **Erlang** y **Gamma** ninguna se ajustó de manera óptima a nuestro conjunto de datos, con lo cual asumiremos la estimación por **KDE** como la función óptima para generar variables aleatorias que distribuyen como $f(x)$ del **Monto Solicitado**. Para generar variables aleatorias que distribuyen como nuestra $f(x)$, utilizaremos el **Método de Aceptación y Rechazo**. `Aceptación y Rechazo`. ###Code # Método de aceptación rechazo. # Este metodo genera N variables aleatorias def values_acep_rechazo(N:'Cantidad de variables aleatorias a generar', f:'Función objetivo a generar', t:'Función que mayorea a f', lim_inf:'Límite inferior para R2', lim_sup:'Límite superior para R2'): c = [] i = 0 agregador = c.append # Iteramos hasta lograr N variables aleatorias. while i <= N: # Generación de variables aleatorias bajo los parámetros establecidos R1 = np.random.uniform(lim_inf,lim_sup,size=1) R2 = np.random.rand(1) f_x = f(R1) t_x = t(R1) # Condición de aceptación. if R2 * t_x <= f_x: agregador(float(R1)) i+=1 return c ###Output _____no_output_____ ###Markdown Ya con las función del **Método de Aceptación y Rechazo** programadas, ahora lo que haremos será definir los parámetros necesarios para generar los aleatorios. Definiremos como la función que mayora $t(x)$ como una constante, la cual será el máximo de la función $f(x)$. ###Code # Definimos el máximo de la función de distribución de probabilidad. max_f = max(f_uni_kde(x_grid)) print('El máximo de nuestra f(x) es:',max_f) # Transformamos el máximo en una función constante definida en el dominio de nuestra PDF. t = lambda x: max_f * np.ones([len(x)]) # Gráficamos nuestra PDF junto con la función que la mayora. plt.plot(x_grid, f_uni_kde(x_grid), label='f(x)') plt.plot(x_grid, t(x_grid), label='t(x)') plt.title('$t(x) \geq f(x)$') plt.legend(); ###Output El máximo de nuestra f(x) es: 0.3841074592743751 ###Markdown `Simulación`. Hasta ahora, ya tenemos como se generan variables aleatorias que distribuyen como $f(x)$ que es el función de distribución de probabilidad del **Monto Solicitado**. Lo que haremos a continuación será simular escenarios _a posteriori_ para el **Monto Solicitado**. ###Code # Calculamos los valores aleatorios. Generaremos 1,000 aleatorios. New_VA_MS = values_acep_rechazo(1000, f_uni_kde, t, min(x_real), max(x_real)) # A manera de comprobación, graficaremos un histograma de las variables aleatorias generadas. Se espera distribuyan como f(x). sns.histplot(New_VA_MS, color='r', bins=20).set_title('V.A.Generadas'); ###Output _____no_output_____ ###Markdown Del histograma antes descrito, podemos observar que masomenos distribuye como nuestra función $f(x)$, con lo cual podemos aceptar el **Método de Aceptación y Rechazo** cómo válido y nuestra simulación también. ###Code # Utilizando el metodo de numeros complementarios. # Complemento de variables aleatorias. a = min(New_VA_MS) b = max(New_VA_MS) U_sC = b - np.array(New_VA_MS) + a # Buscamos la media para este método. complementario = np.concatenate([New_VA_MS, U_sC]) media_complementario = np.mean(complementario) print(f'La media utilizando el método de números complementarios es de: {media_complementario}') ###Output La media utilizando el método de números complementarios es de: 12.247596951974685 ###Markdown Ahora definimos un intervalo de confianza para nuestro **Monto solicitado**. ###Code # Definimos un intervalo al 95 % de confianza. confianza = 0.95 muestra_generada = complementario intervalo_MS = st.t.interval(confianza, len(muestra_generada)-1, loc=np.mean(muestra_generada), scale=st.sem(muestra_generada)) print(f'Con una confianza del 95% nuestros datos se encontrarán en el intervalo: {intervalo_MS}') ###Output Con una confianza del 95% nuestros datos se encontrarán en el intervalo: (12.192777675298423, 12.302416228650948) ###Markdown Ya con nuestro intervalo definido, ahora desescalamos los datos. ###Code # Hacemos intervalo deescalado. intervalo_MS_N = ((np.exp(intervalo_MS[0]), np.exp(intervalo_MS[1]))) print(f'Con una confianza del 95% nuestros datos se encontrarán en el intervalo: {intervalo_MS_N}') ###Output Con una confianza del 95% nuestros datos se encontrarán en el intervalo: (197358.6035043664, 220227.46624156003) ###Markdown `Conclusiones`. En cuanto al valor esperado para la siguiente solicitud de crédito, observamos que para el **Monto solicitado** del crédito, este se define entre $\$197,358$ y $\$220,227$.Entonces nosotros como institución bancaria, a falta de conocer el **Monto Aprobado** de los créditos. Podemos saber aproximadamente como será el comportamiento medio de las solicitudes, con lo cual podemos irnos dando una idea de los recursos que serán necesario emplear a este rubro de la institución. `Nodo: Monto aprobado`. `Visualización de los datos`. ###Code # Visualizando nuestros datos. sns.boxplot(x=data['AMT_CREDIT'], saturation=.5, color='r'); ###Output _____no_output_____ ###Markdown De igual manera como se puede observar en el **monto solicitado**, existen valores extremos que excenden el rango intercuatílico de nuestro **boxplot**, con lo cual tenemos una pauta para pensar en alguna transformación matemática a nuestros datos, en este caso la más sencilla resultaría en una transformación logarítmica. ###Code # Transformamos el monto aprobado a una escala logaritmica. data_approved = np.log(data['AMT_CREDIT']) sns.boxplot(x=data_approved, saturation=.5, color='r'); ###Output _____no_output_____ ###Markdown Con la transformación logarítmica se puede observar que se logra reducir significativamente el sesgo generado por los valores outliers, y si bien aún siguen existiendo la tendencia generalizada ya parece ser modelable. ###Code # Propiedades estadísticas de los datos en escala logarítmica. data_approved.describe() ###Output _____no_output_____ ###Markdown Dentro de las propiedades estadísticas podemos ver que tenemos alrededor de $991,477$ datos, la media de la distribución ronda por el $11.58$ y el valor mínimo es de $8.54$ mientrás que el máximo es de $15.32$. ###Code # Visualizamos los datos en un histograma. sns.histplot(data_approved, color='y', bins=20).set_title('Monto aprobado. Muestra completa'); ###Output _____no_output_____ ###Markdown Recordemos que estamos trabajando con $991,477$ datos, y si tomamos en cuenta los costos computacionales sería recomendable reducir la muestra a alguna cantidad de datos que siga representando significativamente la distribución. Si obsevamos el comportamiento del **monto aprobado** vemos que es muy similar al del **monto solicitado**, nodo sobre el cual se trabajó con $100,000$ muestras aleatorias, por lo tanto realizaremos un proceso similar con este nodo. ###Code # Datos de prueba, tomaremos 100,000. x_approved = data_approved.sample(n=100000, random_state=1) # Con la intención de saber si nuestra muestra representa el comportamiento de la distribución, obtendremos su histograma. sns.histplot(x_approved, color='r', bins=20).set_title('Muestra de 100,000 datos'); ###Output _____no_output_____ ###Markdown Del histograma descrito se puede observar que tanto la muestra completa como la muestra de $100,000$ datos se comportan de una manera muy similar, con lo cual se puede aceptar está muestra como válida para poder trabajar sobre ella.Ya con nuestros datos preparados, proseguiremos a realizar una estimación por **KDE**. `Kernel Density Estimation`. ###Code # Trabajaremos con un kernel univariado. # Límites donde nosotros queremos graficar nuestra distribución. x_grid_approved = np.linspace(9,15,1000) # Datos reales. Muestra completa pdf_true_approved = data_approved # Definición del modelo para la distribución total. kde_univariate_approved = KDEUnivariate(x_approved) # Guardamos el bw óptimo para este modelo bw_optimo_approved = kde_univariate_approved.fit(bw='normal_reference').bw # Graficamos la distribución junto con la estimación. plt.plot(x_grid_approved,kde_statsmodels_u(x_approved, x_grid_approved)) plt.hist(x_approved,bins=20,density=True) plt.title('KDE univariate') plt.show() print(f'El bandwidth óptimo es de: {bw_optimo_approved}') ###Output _____no_output_____ ###Markdown Al observar nuestro ajuste por **KDE** podemos decir que con el bandwidth óptimo que es de $0.1138$ pareciera que estamos sobre estimando el comportamiento de la distribución real. Entonces al igual que lo hicimos con el **monto solicitado** lo que haremos será tratar de suavizar este ajuste aumentando el bandwidth. Utilizaremos uno de $0.20$. ###Code # Graficamos la distribución junto con la estimación. plt.plot(x_grid_approved,kde_statsmodels_u20(x_approved, x_grid_approved)) plt.hist(x_approved,bins=20,density=True) plt.title('KDE univariate') plt.show() print(f'El bandwidth es de: 0.20') ###Output _____no_output_____ ###Markdown Con un bandwidth de $0.20$ se puede observar que el ajuste es mucho más suave lo que hace que nuestro **KDE** sea más manejable, sobre todo en temas de costos computacionales.Ahora, con la intención de hacer una comparación del ajuste de nuestro **KDE**, respecto a nuestros datos reales, lo que haremos será comparar su aproximación a través de una métrica de error, en este caso particular utilizaremos el **SSE** como referencia. ###Code # Definimos la función de distribución de probabilidad a partir del KDEUnivariate. kde_univariate_approved.fit(bw=0.20) f_uni_kde_a = lambda x: kde_univariate_approved.evaluate(x) # Definimos métrica de error. # Almacenamos la distribución real. y_real_a, x_real_a = np.histogram(data_approved, bins=20, density=True) x_real_a = x_real_a[1:] # Definimos la distribución generada por el KDE. y_est_a = f_uni_kde_a(x_real_a) # Calculamos el error. error_kde_a =((y_real_a-y_est_a)**2).sum() print(f'El error de la suma de la resta de cuadrados que se obtuvo entre nuestro KDE y la distribución real es de: {((error_kde_a)).round(4)}') ###Output El error de la suma de la resta de cuadrados que se obtuvo entre nuestro KDE y la distribución real es de: 0.0168 ###Markdown Observando el **SSE** obtenido, podemos concluir que el **KDE** univariado que se obtuvo, parece aproximar de una buena manera la distribución que sigue nuesto **Monto Aprobado**, sin embargo, con la intención de generar un análisis más profundo, evaluaremos cuales de las funciones de distribución de probabilidad existentes se ajustan bien a nuestros conjunto de datos, esto para determinar si existe alguna otra que se ajuste de mejor manera. `Buscando PDF's`. Utilizaremos la paquetería **Fitter** para encontrar de las distribuciones de probabilidad continuas cuales son las que mejor se adecuan a nuestro **monto aprobado**. ###Code %%time # Calculamos el costo computacional de la paquetería fitter. # Definimos el tiempo de ajuste máximo de 30 segundos. f_fitter_a = Fitter(x_approved, bins=20, timeout=30) f_fitter_a.fit() error_fitter_a = f_fitter_a.summary(plot=False) error_fitter_a # Definimos el top 3 de las distribuciones que según el sse mejor se ajustan. best_error_a = error_fitter_a.sort_values(by='sumsquare_error').head(3) best_error_a ###Output _____no_output_____ ###Markdown De nuestro ajuste con **Fitter** podemos ver que según el **SSE** el top 3 de distribuciones que mejor se ajustan son las siguientes:- Beta- F- ErlangRecordemos que el error de aproximación solamente sirve como una métrica de referencia, para poder definir cual **PDF** se ajusta mejor a nuestro conjunto de datos realizaremos la prueba de **Kolmogorov-Smirnov** y una prueba **Cuantil-Cuantil**.Trabajaremos con un nivel de significancia del $5\%$ $\rightarrow \alpha = 0.05$ Comenzamos comparando con la _distribución Beta_, de manera que nuestra prueba de hipótesis se describe de la siguiente manera:- $H_{0}$: Los datos se distribuyen como una distribución Beta.- $H_{a}$: Los datos no se distribuyen como una distribución Beta. ###Code # Comenzamos para la distribución beta. Prueba Kolmogorov-Smirnov. # Definimos los parámetros para la distribución. beta_params = st.beta.fit(x_approved) # Definimos prueba de hipotesis. st.kstest(x_approved, lambda x: st.beta.cdf(x,*beta_params)) ###Output _____no_output_____ ###Markdown Recordando que estamos trabajando con $\alpha = 0.05$ observamos que nuestro "p-value" es muy pequeño ni siquiera supera el nivel de significancia, por lo tanto podemos rechazar la $H_{0}$ de manera que el **Monto Aprobado** **NO** se distribuye como una _Distribución Beta_. Ahora realizamos la prueba **Cuantil-Cuantil**. ###Code # Graficamos la distribución y el q-q plot. # Comenzamos para la distribución beta. v_ale_real_a = x_approved bins_norm = 20 distribution_name4 = 'beta' # Graficamos plot_pdf_qq(v_ale_real_a, bins_norm, distribution_name4) ###Output _____no_output_____ ###Markdown De la prueba **Cuantil-Cuantil** se observa que el ajuste a lo largo de la recta es muy bueno, sin embargo, la diferencia principal se da en las colas de la distribución donde el ajuste no es muy bueno. A pesar de lo antes descrito, el ajuste de la prueba **Cuantil-Cuantil** sí difiere a los resultados obtenidos por la prueba de **Kolmogorov-Smirnov**, entonces con la intención de tener una mayor certeza sobre si nuestro **Monto Aprobado** se distribuye como una Beta, realizaremos una prueba de hipótesis con $\chi^{2}$ Prueba de $\chi^{2}$. ###Code # Probability Density Function. pdf_beta = st.beta.pdf(x_real_a,*beta_params) # Cálculo de la esperanza usando la expresión teórica. Ei_beta = x_real_a * pdf_beta # Cálculo usando la librería estadística de la chi cuadrada. X2_beta = st.chisquare(y_real_a, Ei_beta) print('Valor de chi cuadrado librería = ', X2_beta) ###Output Valor de chi cuadrado librería = Power_divergenceResult(statistic=28.56808519399228, pvalue=0.07308943294731723) ###Markdown Despues de realizar la prueba de $\chi^{2}$ se obtiene un "p-value" que ronda en el $0.0730$, si recordamos que nuestro nivel de significancia es de $5\%$, entonces eso significa que nuestro "p-value" es mayor que nuestro $\alpha$ con lo cual teóricamente no podríamos rechazar $H_{0}$, sin embargo, la probabilidad (p-value) sigue siendo significativamente pequeña, con lo cual se decide rechazar la _Distribución Beta_ como la que mejor se ajusta al **Monto Aprobado**. Ahora lo compararemos con la _distribución F_, entonces tenemos la siguiente prueba de hipótesis.- $H_{0}$: Los datos se distribuyen como una distribución F.- $H_{a}$: Los datos no se distribuyen como una distribución F. ###Code # Definimos los parámetros para la distribución. f_params = st.f.fit(x_approved) # Definimos prueba de hipotesis. st.kstest(x_approved, lambda x: st.f.cdf(x,*f_params)) ###Output _____no_output_____ ###Markdown Nuevamente obtenemos un "p-value" muy pequeño, con lo cual rechazamos nuestra $H_{0}$ de manera que podemos decir que el **Monto Aprobado** **NO** se distribuye como una _Distribución F_. Ahora realizamos la prueba **Cuantil-Cuantil**. ###Code # Graficamos distribution_name5 = 'f' plot_pdf_qq(v_ale_real_a, bins_norm, distribution_name5) ###Output _____no_output_____ ###Markdown De la prueba de **Cuantil-Cuantil** para la _Distribución F_ se observa que no se ajusta bien a la recta, con lo cual podemos concluir que la _Distribución F_ no se ajusta correctamente a nuestro conjunto de datos del **Monto Aprobado**. Por último tenemos a la _distribución Erlang_, donde definimos la siguiente prueba de hipótesis.- $H_{0}$: Los datos se distribuyen como una distribución Erlang.- $H_{a}$: Los datos no se distribuyen como una distribución Erlang. ###Code # Definimos los parámetros para la distribución. erlang_params2 = st.erlang.fit(x_approved) # Definimos prueba de hipotesis. st.kstest(x_approved, lambda x: st.erlang.cdf(x,*erlang_params2)) ###Output _____no_output_____ ###Markdown Tenemos un "p-value" muy pequeño por lo cual rechazamos nuevamente $H_{0}$ de manera que el **Monto Aprobado** **NO** se distribuye como una _Distribución Erlang_. Ahora realizamos la prueba **Cuantil-Cuantil**. ###Code # Graficamos distribution_name6 = 'erlang' plot_pdf_qq(v_ale_real_a, bins_norm, distribution_name6) ###Output _____no_output_____ ###Markdown En cuanto a la prueba **Cuantil-Cuantil** de la _Distribución Erlang_ se puede percibir que el ajuste a la recta no es bueno, con lo cual rechazamos que la _Distribución de Erlang_ se ajuste de manera óptima a nuestro conjunto de datos del **Monto Aprobado**. Entonces, después de analizar el ajuste de la distribución tanto para un **KDE** como para distribuciones de probabilidad hipóteticas, en este caso particular, para las distribuciones **Beta**, **F** y **Erlang** ninguna se ajustó de manera óptima a nuestro conjunto de datos, con lo cual asumiremos la estimación por **KDE** como la función óptima para generar variables aleatorias que distribuyen como $f(x)$ del **Monto Aprobado**. Para generar variables aleatorias que distribuyen como nuestra $f(x)$, utilizaremos el **Método de Aceptación y Rechazo**. `Aceptación y Rechazo`. Ahora lo que haremos será definir los parámetros necesarios para generar los aleatorios. Definiremos como la función que mayora $t(x)$ como una constante, la cual será el máximo de la función $f(x)$. ###Code # Definimos el máximo de la función de distribución de probabilidad. max_f_a = max(f_uni_kde_a(x_grid_approved)) print('El máximo de nuestra f(x) es:',max_f_a) # Transformamos el máximo en una función constante definida en el dominio de nuestra PDF. t_a = lambda x: max_f_a * np.ones([len(x)]) # Gráficamos nuestra PDF junto con la función que la mayora. plt.plot(x_grid_approved, f_uni_kde_a(x_grid_approved), label='f(x)') plt.plot(x_grid_approved, t_a(x_grid_approved), label='t(x)') plt.title('$t(x) \geq f(x)$') plt.legend(); ###Output El máximo de nuestra f(x) es: 0.3618579237402727 ###Markdown `Simulación`. Hasta ahora, ya tenemos como se generan variables aleatorias que distribuyen como $f(x)$ que es el función de distribución de probabilidad del **Monto Aprobado**. Lo que haremos a continuación será simular escenarios _a posteriori_ para el **Monto Aprobado**. ###Code # Calculamos los valores aleatorios. Generaremos 1,000 aleatorios. Values_Approved = values_acep_rechazo(1000, f_uni_kde_a, t_a, min(x_real_a), max(x_real_a)) # A manera de comprobación, graficaremos un histograma de las variables aleatorias generadas. Se espera distribuyan como f(x). sns.histplot(Values_Approved, color='r', bins=20).set_title('V.A.Generadas'); ###Output _____no_output_____ ###Markdown Del histograma que se describe se puede observar que el comportamiento de nuestros $1,000$ aleatorios generados sí se distribuyen como nuestra función $f(x)$ con lo cual podemos aceptar el **Método de Aceptación y Rechazo** como válido para el **Monto Aprobado**.Ahora utilizaremos algún **Método de Reducción de Varianza** para poder obtener una muestra aún mejor. En este caso se eligió el **Método de Números Complementarios**. ###Code # Utilizando el metodo de numeros complementarios. # Complemento de variables aleatorias. a_2 = min(Values_Approved) b_2 = max(Values_Approved) U_sC_a = b_2 - np.array(Values_Approved) + a_2 # Buscamos la media para este método. complementario_a = np.concatenate([Values_Approved, U_sC_a]) media_complementario_a = np.mean(complementario_a) print(f'La media utilizando el método de números complementarios es de: {media_complementario_a}') ###Output La media utilizando el método de números complementarios es de: 12.001799466800012 ###Markdown Ahora definiremos un intervalo de confianza para el **Monto Aprobado**. ###Code # Definimos un intervalo al 95 % de confianza. confianza = 0.95 muestra_generada_a = complementario_a intervalo_MS_a = st.t.interval(confianza, len(muestra_generada_a)-1, loc=np.mean(muestra_generada_a), scale=st.sem(muestra_generada_a)) print(f'Con una confianza del 95% nuestros datos se encontrarán en el intervalo: {intervalo_MS_a}') ###Output Con una confianza del 95% nuestros datos se encontrarán en el intervalo: (11.949987001165324, 12.0536119324347) ###Markdown Ahora lo que haremos será desescalar los datos. Recordemos están en escala logarítmica. ###Code # Hacemos intervalo deescalado. intervalo_MS_a_N = ((np.exp(intervalo_MS_a[0]), np.exp(intervalo_MS_a[1]))) print(f'Con una confianza del 95% nuestros datos se encontrarán en el intervalo: {intervalo_MS_a_N}') ###Output Con una confianza del 95% nuestros datos se encontrarán en el intervalo: (154815.13414681912, 171718.5249480553) ###Markdown `Conclusiones`. En cuanto al valor esperado para la siguiente solicitud de crédito, observamos que para el **Monto Aprobado** del crédito, este se define entre $\$154,815$ y $\$171,718$.Si recordamos que el **Monto Solicitado** se mueve en el intervalo $[\$197,358$ y \$220,227]$, entonces se puede comenzar a observar una tendencia generalizada. Estadísticamente y tomando en cuenta los resultados de la simulación de $1,000$ aleatorios, podemos ver que la tendencia general es que el **Monto Aprobado** sea menor al **Monto Solicitado**, esto nos da pauta a ir estableciendo relaciones entre estos 2 nodos, comenzando por el hecho de que se espera que la cantidad que solicite no necesariamente se vea reflejado en la cantidad de crédito que se otorgue.Esta relación nos da pauta a preguntarnos **¿Cuál es la probabilidad de que el monto solicitado del crédito corresponda con la cantidad que se aprueba para la línea?** Para tratar de resolver está interrogante se utilizará un enfoque frecuentista de la probabilidad sobre los datos originales, es decir, veremos a través de los históricos que tenemos cual es la probabilidad de que el **Monto Aprobado** $=$ **Monto Solicitado**. ###Code # Almacenamos en un vector los valores donde son iguales is_equal = [1 if data.AMT_APPLICATION[i] == data.AMT_CREDIT[i] else 0 for i in data.index] # Calculamos la probabilidad de que sean iguales. prob_is_equal = sum(is_equal) / len(data) print(f'La probabilidad de que el Monto Solicitado sea igual al Monto Aprobado es de: {(prob_is_equal * 100)}%') ###Output La probabilidad de que el Monto Solicitado sea igual al Monto Aprobado es de: 19.699700547768632% ###Markdown Observamos que la probabilidad respalda lo representado en los aleatorios generados, existe una tendencia en donde aproximadamente en el $80\%$ de los casos de las solicitudes el **Monto Aprobado** no corresponderá al **Monto Solicitado**. `Nodo: Bienes adquiridos`. `Exploración y Visualización de los datos`. ###Code #observamos los valores unicos que contiene la variable data['NAME_GOODS_CATEGORY'].unique() #observamos los valores en función de su frecuencia data['NAME_GOODS_CATEGORY'].value_counts() ###Output _____no_output_____ ###Markdown Podemos observar que existe una categoría llamada _XNA_ lo cual significa que desconocemos el bien en el que se se gasto el crédito.Esta información no es útil para nuestro analisís, ya que nos interesa saber el fin en el que se empleo el dinero para así ayudar al banco a conocer los hábitos de consumo de sus clientes. Sin embargo, realizaremos un analisís rápido de este caso. ###Code particular2=data[data['NAME_GOODS_CATEGORY']=='XNA'] particular2.head() particular2['NAME_CONTRACT_TYPE'].value_counts() ###Output _____no_output_____ ###Markdown La mayoría pertenecen a los portafolios de Efectivo y tarjetas de crédito. ###Code particular2['NAME_CASH_LOAN_PURPOSE'].value_counts() ###Output _____no_output_____ ###Markdown Es interesante observar que igualmente la gran mayoría tampoco tiene un proposito de crédito especifíco a la hora que solicitaron el crédito. ###Code particular2['NAME_CLIENT_TYPE'].value_counts() ###Output _____no_output_____ ###Markdown La mayoría de los clientes que hicieron gastos en esta categoría son clientes que previamente ya han solicitado un servicio a la institución. ###Code #Definimos los datos con los que trabajaremos goods = data['NAME_GOODS_CATEGORY'][data['NAME_GOODS_CATEGORY'] != 'XNA'] goods ###Output _____no_output_____ ###Markdown Al observar nuestras categorías de la categoría de bienes, observamos que tenemos datos del tipo "string". Con la intención de trabajar de una manera más cómoda, lo que haremos será transformar los datos únicos tipo "string" por datos numéricos.Estos estarán categorizados de manera que en el dominio de la frecuencia nos permitan observar alguna distribución de probabilidad que sea más moldeable. ###Code goods.value_counts() goods = goods.replace('Mobile',0) goods = goods.replace('Consumer Electronics',1) goods = goods.replace('Audio/Video',2) goods = goods.replace('Computers',3) goods = goods.replace('Furniture',4) goods = goods.replace('Construction Materials',5) goods = goods.replace('Clothing and Accessories',6) goods = goods.replace('Photo / Cinema Equipment',7) goods = goods.replace('Auto Accessories',8) goods = goods.replace('Jewelry',9) goods = goods.replace('Homewares',10) goods = goods.replace('Medical Supplies',11) goods = goods.replace('Vehicles',12) goods = goods.replace('Sport and Leisure',13) goods = goods.replace('Gardening',14) goods = goods.replace('Other',15) goods = goods.replace('Office Appliances',16) goods = goods.replace('Tourism',17) goods = goods.replace('Medicine',18) goods = goods.replace('Direct Sales',19) goods = goods.replace('Fitness',20) goods = goods.replace('Additional Service',21) goods = goods.replace('Education',22) goods = goods.replace('Weapon',23) goods = goods.replace('Insurance',24) goods = goods.replace('Animals',25) # Visualización utilizando un histograma con los datos ordenados sns.histplot(x=goods,bins=26,color='y').set_title('NAME_GOODS_CATEGORY'); # Visualización utilizando un boxplot con los datos ordenados sns.boxplot(x=goods,color='r').set_title('NAME_GOODS_CATEGORY'); ###Output _____no_output_____ ###Markdown Tomamos una muestra aleatoria más pequeña para trabajar con mayor facilidad.La muestra que tomaremos será de 300,000 ###Code sample = goods.sample(300000) ###Output _____no_output_____ ###Markdown Observamos con el histograma y el boxplot que esta refleje el comportamiento de nuestros datos originales. ###Code sns.histplot(x=sample,bins=25,color='y').set_title('Muestra'); ###Output _____no_output_____ ###Markdown `Aceptación y Rechazo`. ###Code sns.boxplot(x=sample,color='r').set_title('Muestra'); ###Output _____no_output_____ ###Markdown `Kernel Density Estimation`. ###Code np.random.seed(455) # Definimos función. Regresa la evaluación de la pdf. def kde_statsmodels_u(x, x_grid, **kwargs): """Univariate Kernel Density Estimation with Statsmodels""" kde = KDEUnivariate(x) kde.fit(bw='normal_reference', **kwargs) return kde.evaluate(x_grid) # Definición del modelo para encontar la distribución kde_univariate = KDEUnivariate(sample) # Histograma de nuestros datos discretos. y,x,_=plt.hist(sample,bins=26,density=True,label='data') # en un hist puedo almacenar y,x x = x[1:] # dominio de la función x_grid = np.arange(0,26,1) # Guardamos el bw óptimo para este modelo bw_op = kde_univariate.fit(bw='normal_reference').bw #error y_hat = kde_statsmodels_u(sample, x_grid) err0 = ((y-y_hat)**2).sum() # Graficamos la distribución junto con la estimación. plt.plot(x_grid,kde_statsmodels_u(sample, x_grid)) plt.hist(sample,bins=25,density=True) plt.title('KDE univariate') plt.show() print(f'El bandwidth óptimo es de: {bw_op}') ###Output _____no_output_____ ###Markdown Con la intención de hacer una comparación del ajuste de nuestro **KDE**, respecto a nuestros datos reales, lo que haremos será comparar su aproximación a través de una métrica de error, en este caso particular utilizaremos el **SSE** como referencia. ###Code # Definimos nuestro kde en una función. f_uni_kde_d = lambda x: kde_univariate.evaluate(x) # Definimos métrica de error. # Almacenamos la distribución real. y_real, x_real = np.histogram(goods, bins=26, density=True) x_real = x_real[1:] # Definimos la distribución generada por el KDE. y_est = f_uni_kde_d(x_real) # Calculamos el error. error_kde =((y_real-y_est)**2).sum() print(f'El error de la suma de la resta de cuadrados que se obtuvo entre nuestro KDE y la distribución real es de: {((error_kde)).round(4)}') ###Output El error de la suma de la resta de cuadrados que se obtuvo entre nuestro KDE y la distribución real es de: 0.0247 ###Markdown `Buscando PDF´s`. ###Code # A fin de comparar. Visualizamos nuestros datos respecto a una distribución exponencial continua. plt.hist(sample,bins=26,density=True,label='data') error = pd.DataFrame(index=['expon'], columns=['error']) params = st.expon.fit(sample) y_hat = st.expon.pdf(x_real) error['error']=((y-y_hat)**2).sum() #SSE plt.title('PDF exponencial') plt.plot(x_real,y_hat,label='') plt.legend() error ###Output _____no_output_____ ###Markdown `Q-Q Plot`Mediante está grafica buscamos una confirmación visual con la cual podamos saber si nuestros ajuste es suficientemente bueno como para asegurar que nuestros datos se distribuyen de esta forma. ###Code v_ale_real_dis = sample bins = 26 distribution = 'expon' plot_pdf_qq(v_ale_real_dis,bins,distribution) ###Output _____no_output_____ ###Markdown Podemos observar que aunque nuestros datos son discretos el hecho de tener más categorías hace que una distribución continua pueda ajustarse mejor (en comparación con el **canal de ventas**), sin embargo podemos observar que tenemos la cola derecha es pesada y presenta outliers por lo que graficamente no parece ser un buen ajuste. Ahora, con la intención de encontrar distribuciones de probabilidad existentes a nuestro conjunto de datos, realizaremos el test de ajuste con **Kolmogorov-Smirnov**. Este método lo que hace es determinar la bondad de ajuste de dos distribuciones de probabilidad entre sí. En nuestro caso una de ellas será la **Categoría del bien** y la otra será la que estaremos variando.Para conocer que tan buenos es el ajuste de la distribución exponencial que hemos hecho aplicamos la prueba **Kolmogorov-Smirnov**.Definimos un nivel de significancia ${\alpha} = 0.01$. Por lo tanto queremos un nivel de confianza $1-{\alpha} = 99\%$.$H_{0}$: Nuestro conjunto de datos sigue una distribución Exponencial.$H_{a}$: Nuestro conjunto de datos NO sigue una distribución Exponencial.Si el "p-value" es menor que ${\alpha}$ se rechaza $H_{0}$.Si el "p-value" es mayor que el nivel de significancia no se puede rechazar $H_{0}$. ###Code # Prueba de Kolmogorov-Smirnov para la distribución exponencial. st.kstest(sample,lambda x: st.expon.cdf(x,(1/np.mean(sample)),(1/np.mean(sample)**2))) ###Output _____no_output_____ ###Markdown El "p-value" resultó menor que ${\alpha}$ por lo tanto se rechaza ${H_{0}}$. `Prueba de bondad y ajuste con chi cuadrada` Ya que observamos que la _Distribución Exponencial_ no se ajusta a nuestro conjunto de datos, lo que haremos será ajustarlo para distribuciones discretas.Decidimos utilizar las siguientes funciones discretas para realizar el test de prueba de bondad y ajuste con chi cuadrada.1. Poisson2. Binomial3. Geometrica **Distirbución Binomial** **Formulación de hipótesis**$H_{0}$: Nuestro conjunto de datos tienen una distribución binomial.$H_{a}$: Nuestro conjunto de datos no sigue una distribución binomial. **Plan de análisis.**Para este análisis nuestro nivel de significancia es 5%.Por lo tanto nuestro nivel de confianza es del 95%.${\alpha} = 0.05$$1-{\alpha} = 0.95$ **Parametros para la distribución binomial**n = 25 $$E[X] = np$$La esperanza de la muestra $E[X] =2.5118$$$2.5118= 26p$$$$p = 0.0966$$ **Generación de datos**Como muestra observada utilizamos los "_counts_" del histograma de la muestra tomada de nuestro conjunto de datos original.Como muestra esperada utilizamos los "_counts_" del histograma de una muestra que se distribuya de manera binomial. ###Code # Parametros para la distribución binomial n, p = 26,0.0966 # Muestra binomial bi = st.binom.rvs(n,p,size=10000) # Graficamos las distribuciones plt.title('Muestra Binomial vs Muestra Original') y2,x2,_=plt.hist(bi,bins=26,label='Binomial') y1,x1,_=plt.hist(sample,bins=26,label='Original') plt.legend() plt.show() warnings.filterwarnings("ignore") # Realizamos la prueba de bondad y ajuste. chi_bin = st.chisquare(f_obs=y1,f_exp=y2) chi_bin ###Output _____no_output_____ ###Markdown El "p-value" resulto menor que ${\alpha}$ por lo tanto se rechaza ${H_{0}}$ **Distribución de Poisson**. **Formulación de hipótesis**$H_{0}$: Nuestro conjunto de datos tienen una distribución de Poisson.$H_{a}$: Nuestro conjunto de datos no sigue una distribución de Poisson. **Plan de análisis.**Para este análisis nuestro nivel de significancia es 5%.Por lo tanto nuestro nivel de confianza es del 95%.${\alpha} = 0.05$$1-{\alpha} = 0.95$ **Generación de datos**Como muestra observada utilizamos los "_counts_" del histograma de la muestra tomada de nuestro conjunto de datos original.Como muestra esperada utilizamos los "_counts_" del histograma de una muestra que se distribuya como Poisson. **Obtención de parámetros**Para conocer si nuestra muestra se distribuye como **_Poisson_** nosotros identificamps los parametros que distribuyen esta distribución. En el caso de **_Poisson_** tanto ${\mu}$ y ${\sigma}^{2}$ son iguales al parametro ${\lambda}$.El parametro ${\lambda}$ es igual a la media de la los datos. ###Code # Calculando la media de la muestra. mu = np.mean(sample) # Vector de enteros con los valores a generar. k = x_grid # Muestra Poisson. poi = st.poisson.rvs(mu,size=len(sample)) # Graficamos las distribuciones. plt.title('Muestra Poisson vs Muestra Original') y2,x2,_=plt.hist(poi,bins=26,label='Poisson',density=True) y1,x1,_=plt.hist(sample,bins=26,label='Original',density=True) plt.legend() plt.show() warnings.filterwarnings("ignore") # Realizamos la prueba de bondad y ajuste. chi_poi = st.chisquare(f_obs=y1,f_exp=y2) chi_poi ###Output _____no_output_____ ###Markdown **Distribución Geométrica**. **Formulación de hipótesis**$H_{0}$: Nuestro conjunto de datos tienen una distribución geométrica.$H_{a}$: Nuestro conjunto de datos no sigue una distribución geométrica **Obtención de Parámetros**La media de la distribución geometrica se calcula de la siguiente manera:$$ E[X] = \frac{1-p}{p} $$Recordando que la media de neustra muestra es igual a:2.5118 Podemos despejar la ecuación para obtener ${p}$.$$ 2.5118p = 1-p $$$$ 3.5118p = 1 $$$$ p = .2847 $$ ###Code # Definición de parámetros. p_g = 0.2847 g = st.geom.rvs(p_g,size=len(sample)) # Graficamos las distribuciones. plt.title('Muestra Geómetrica vs Muestra Original') y2,x2,_=plt.hist(g,bins=25,label='Geómetrica',density=True) y1,x1,_=plt.hist(sample,bins=25,label='Original',density=True) plt.legend() plt.show() warnings.filterwarnings("ignore") # Realizamos la prueba de bondad y ajuste. chi_geom = st.chisquare(f_obs=y1,f_exp=y2) chi_geom ###Output _____no_output_____ ###Markdown El "p-value" resultó menor que ${\alpha}$ por lo tanto se rechaza ${H_{0}}$ olmogorov-smirnov. Por lo tanto después de simular redondearemos al entero más cercano para mantener el resultado de la simulación como una vari `Aceptación y Rechazo`. ###Code max_d2 = max(f_uni_kde_d(x_grid)) print('El máximo de nuestra f(x) es:',max_d2) # Transformamos el máximo en una función constante definida en el dominio de nuestra PDF. t_d2 = lambda x: max_d2*np.ones([len(x)]) # Gráficamos nuestra PDF junto con la función que la mayora. plt.plot(x_grid, f_uni_kde_d(x_grid), label='f(x)') plt.plot(x_grid, t_d2(x_grid), label='t(x)') plt.title('$t(x) \geq f(x)$') plt.legend(loc='best'); ###Output El máximo de nuestra f(x) es: 0.6262605142204624 ###Markdown `Simulación`. ###Code def values_acep_rechazo_Discrete(N:'Cantidad de variables aleatorias a generar', f:'Función objetivo a generar', t:'Función que mayorea a f', lim_inf:'Límite inferior para R2', lim_sup:'Límite superior para R2'): c = [] i = 0 agregador = c.append # Iteramos hasta lograr N variables aleatorias. while i < N: # Generación de variables aleatorias bajo los parámetros establecidos R1 = np.random.random_integers(lim_inf,lim_sup,size=1) R2 = np.random.rand(1) f_x = f(int(R1)) t_x = t(R1) # Condición de aceptación. if R2 * t_x <= f_x: agregador(int(R1)) i+=1 return c N = 1000 new_goods = values_acep_rechazo_Discrete(N,f_uni_kde_d,t_d2,0,25) new_goods = np.array(new_goods) ###Output _____no_output_____ ###Markdown `Reducción de Varianza` **Numeros Complementarios** ###Code f_ar = lambda x: values_acep_rechazo_Discrete(x,f_uni_kde_d,t_d2,0,25) def values_acep_rechazo_Discrete_Comp(N:'Cantidad de variables aleatorias a generar', f:'Función objetivo a generar', t:'Función que mayorea a f', lim_inf:'Límite inferior para R2', lim_sup:'Límite superior para R2', f_ar: 'función aceptación rechazo normal'): b = f_ar(N/2) c = [] i = 0 agregador = c.append # Iteramos hasta lograr N variables aleatorias. while i < N/2: # Generación de variables aleatorias bajo los parámetros establecidos R1 = np.random.random_integers(lim_inf,lim_sup,size=1) R1_C = lim_sup - R1 + lim_inf R2 = np.random.rand(1) f_x = f(int(R1_C)) t_x = t((R1_C)) # Condición de aceptación. if R2 * t_x <= f_x: agregador(int(R1_C)) i+=1 return np.concatenate([b,c]) c2=values_acep_rechazo_Discrete_Comp(N,f_uni_kde_d,t_d2,0,25,f_ar) # Función para graficar histogramas de una distribución discreta. def plot_histogram_discrete(distribucion_TI:'señal de varibles aleatorias de un distribución DISCRETA dada.', title:'título del histograma', label:'label de los legends a aparecer en el gráfica'): # Parametros del histograma de la muestra generada. plt.figure(figsize=[10,5]) labels1, counts1 = np.unique(distribucion_TI, return_counts=True) # Grafica plt.bar(labels1, (counts1 / len(distribucion_TI)), color='#F4D03F', align='center', label=label[0]) plt.gca().set_xticks(labels1) plt.title(title) plt.legend() plt.show() plot_histogram_discrete(c2,'Histograma Bienes Adquiridos','Prob asiganda') ###Output _____no_output_____ ###Markdown **Media del Complementario** ###Code np.round(np.mean(c2)) #redondeamos al entero más cercano ya que son variables discretas ###Output _____no_output_____ ###Markdown **Intervalo de confianza** ###Code confianza = 0.95 intervalo = st.t.interval(confianza,len(c2)-1,loc=np.mean(c2),scale=st.sem(c2)) print(f'Con una confianza del 95% nuestros datos se encuentran en el intervalo: {intervalo}') ###Output Con una confianza del 95% nuestros datos se encuentran en el intervalo: (2.323075776734061, 2.6909242232659394) ###Markdown En conclusión con una confianza del 95% podemos esperar que el el siguiente cliente que pida un crédito lo utilizara para adquirir bienes que pertenecen a la categoría de Audio/Video. `Simulación Montecarlo`. En este apartado lo que haremos será generar una **simulación montecarlo** para cada nodo seleccionado. A partir de los análisis anteriores se pudo determinar una manera óptima de generar aleatorios que distribuyan como cada conjunto de datos que fue de nuestro interés, los cuales recordemos son los siguientes:- **Canal de Venta**.- **Monto Solicitado**.- **Monto Aprobado**.- **Bienes Adquiridos**.Entonces ya conociendo las características de estos nodos, realizaremos una simulación con las siguientes características:- $1,000$ escenarios.- $10$ solicitudes.Esto implica que al nosotros realizar esta **simulación montecarlo** lo que haremos será conocer los principales atributos de las siguientes $10$ solicitudes de crédito que nosotros como institución bancaria recibiremos.Cómo técnica de reducción de varianza utilizaremos el **Método de Números Complementarios** esto porque a lo largo del desarrollo del proyecto hemos encontrado este método muy efectivo, a excepción del nodo de **Canal de Venta** donde utilizaremos el **Muestreo Estratificado Igualmente Espaciado**. Comenzamos para el nodo de **Canal de Venta**. En este caso utilizaremos el muestreo estratificado igualmente espaciado modificando nuestra función de Generación dedistribuciones discretas, agregando un nuevo argumento el cual es una función _lambda_ que llama a la función de muestras estratificadas (la cual definimos anteriormente). ###Code def Gen_distr_discreta2(p_acum: 'P.Acumulada de la distribución a generar', indices: 'valores reales a generar aleatoriamente', U: 'cantidad de números aleatorios a generar', f: 'función de estratificados'): U =f(U) # Diccionario de valores aleatorios rand2reales = {i: idx for i, idx in enumerate(indices)} # Series de los valores aletorios y = pd.Series([sum([1 for p in p_acum if p < ui]) for ui in U]).map(rand2reales) return y ###Output _____no_output_____ ###Markdown Definimos nuestros argumentos: ###Code # Simulación montecarlo. # Definimos probabilidad acumulada. p_acum_monte = poisson(mu,k) # Generamos muestra. N_aleatorios = 1000 #función que llama a la función de estratificados para variables discretas estra = lambda x: estra_igualmente_espaciado(x) ###Output _____no_output_____ ###Markdown Simulación ###Code # Realizamos la simulación montecarlo. n = 10 # solicitudes sim1 = np.asmatrix([Gen_distr_discreta2(p_acum,k,N_aleatorios,estra) for i in range(n)]) m1=np.mean(sim1, axis = 1) # obteniendo la media m1 ###Output _____no_output_____ ###Markdown Ahora para el nodo de **Monto Solicitado**. En el caso del **Monto Solicitado** y el **Monto Aprobado** estamos ante variables continuas, entonces con la intención de agilizar el proceso de simulación lo que haremos será definir directamente una función que nos permita generar la muestra con **Aceptación y Rechazo** y con reducción de varianza, específicamente para el **Método de Números Complementarios**. ###Code # Definimos función. Genera la muestra aleatoria. def random_values_cn(values: 'Muestra obtenida por aceptación rechazo regular'): # Definimos los parámetros para aplicar reducción de varianza. # Complemento de variables aleatorias. a = min(values) b = max(values) U_s = b - np.array(values) + a # Definimos el vector con la muestra complementaria complementario = np.concatenate([values, U_s]) return complementario ###Output _____no_output_____ ###Markdown Ya con nuestra función definida lo que haremos ahora será simular. ###Code # Simulamos monto_solicitado = np.asmatrix([random_values_cn(values_acep_rechazo(N_aleatorios, f_uni_kde, t, min(x_real), max(x_real))) for i in range(n)]) m2 = np.mean(monto_solicitado, axis=1) m2 # Desescalamos los datos. np.array(np.exp(m2)) ###Output _____no_output_____ ###Markdown Ahora para el nodo de **Monto Aprobado**. Realizamos la simulación montecarlo. ###Code # Simulamos monto_aprobado = np.asmatrix([random_values_cn(values_acep_rechazo(N_aleatorios, f_uni_kde_a, t_a, min(x_real_a), max(x_real_a))) for i in range(n)]) m3 = np.mean(monto_aprobado, axis=1) m3 # Desescalamos los datos. np.array(np.exp(m3)) ###Output _____no_output_____ ###Markdown Para el Nodo **Bienes Adquiridos** ###Code # Simulamos. n = 10 # solicitudes sim4 = np.asmatrix([values_acep_rechazo_Discrete_Comp(n,f_uni_kde_d,t_d2,0,25,f_ar) for i in range(n)]) m4=np.mean(sim4, axis = 1) # obteniendo la media m4=np.array(m4).flatten() m4=[round(i) for i in m4] # Visualizamos. m4 ###Output _____no_output_____ ###Markdown Estructuramos toda la información en un **dataframe**. ###Code # Definimos el data frame. final_results = pd.DataFrame(columns=['Canal de Venta', 'Monto Solicitado', 'Monto Aprobado', 'Bienes Adquiridos'], index=np.arange(1,11)) # Llenamos el data frame. final_results['Canal de Venta'] = (m1.round()) final_results['Monto Solicitado'] = np.array(np.exp(m2)) final_results['Monto Aprobado'] = np.array(np.exp(m3)) final_results['Bienes Adquiridos'] = m4 # Visualizamos dara frame. final_results ###Output _____no_output_____ ###Markdown `Prueba de hipótesis`. Recordando que unos de los postulados que sobre los cuales se trabajo es que **el monto de crédito aprobado es independiente del canal de ventas por el cuál se haya tramitado el crédito.** Con la intención de conocer si esto es real o no, realizaremos una prueba de hipótesis.Para esto utilizaremos el _test-t_ para demostrar independencia entre los diferentes **Montos Aprobados** por categoria. Si observamos las variables que se desean comparar (**Canal de Venta** y **Monto Aprobado**) nos podemos percatar que estamos ante un conjunto de datos discreto y otro de ellos es continuio. Entonces para poder realizar a cabo la prueba de hipótesis lo que haremos será encontrar los **Montos Aprobados** por categoría y sobre dichos vectores compararemos.Recordando la categorización del **Canal de Venta**:- 'Country-wide': 0- 'Credit and cash offices': 1- 'Stone': 2- 'Regional / local': 3- 'AP+ (cash loan)': 4- 'Contact center': 5- 'Channel of corporate sales': 6- 'Car dealer': 7Entonces lo que se hará será encontrar los valores correspondientes a los **Montos Aprobados** de cada canal y posteriormente se realizará el _test-t_ para cada una de las combinanciones, de manera que la prueba de hipótesis se verá de la siguiente manera:- $H_{0} : \mu_{0} = \mu_{1} = \mu_{2} = \mu_{3} = \mu_{4} = \mu_{5} = \mu_{6} = \mu_{7}$- $H_{a}: \mu_{0} \neq \mu_{1} \neq \mu_{2} \neq \mu_{3} \neq \mu_{4} \neq \mu_{5} \neq \mu_{6} \neq \mu_{7}$Trabajaremos con un nivel de significancia del $5\%$ de manera que $\rightarrow \alpha = 0.05$ ###Code # Almacenamos en vectores los valores de montos aprobados de cada canal de venta. # Trabajaremos sobre escala logarítmica para no canal_0 = np.array(np.log(data[data['CHANNEL_TYPE']==0]['AMT_CREDIT'])) canal_1 = np.array(np.log(data[data['CHANNEL_TYPE']==1]['AMT_CREDIT'])) canal_2 = np.array(np.log(data[data['CHANNEL_TYPE']==2]['AMT_CREDIT'])) canal_3 = np.array(np.log(data[data['CHANNEL_TYPE']==3]['AMT_CREDIT'])) canal_4 = np.array(np.log(data[data['CHANNEL_TYPE']==4]['AMT_CREDIT'])) canal_5 = np.array(np.log(data[data['CHANNEL_TYPE']==5]['AMT_CREDIT'])) canal_6 = np.array(np.log(data[data['CHANNEL_TYPE']==6]['AMT_CREDIT'])) canal_7 = np.array(np.log(data[data['CHANNEL_TYPE']==7]['AMT_CREDIT'])) # Realizamos el test-t de manera iterativa. res_canal_0 = [st.ttest_ind(canal_0, i, equal_var=False) for i in (canal_1, canal_2, canal_3, canal_4, canal_5, canal_6, canal_7)] res_canal_0 ###Output _____no_output_____
example/01-Keras-tutorial.ipynb
###Markdown Load Dataset ###Code from keras.datasets import mnist ((X_train, y_train), (X_test, y_test)) = mnist.load_data() print(X_train.shape, y_train.shape) print(X_test.shape, y_test.shape) ###Output (60000, 28, 28) (60000,) (10000, 28, 28) (10000,) ###Markdown Visualize ###Code %matplotlib inline import matplotlib.pyplot as plt plt.gray() print(y_train[0:10]) figures, axes = plt.subplots(nrows=2, ncols=5) figures.set_size_inches(18, 8) axes[0][0].matshow(X_train[0]) axes[0][1].matshow(X_train[1]) axes[0][2].matshow(X_train[2]) axes[0][3].matshow(X_train[3]) axes[0][4].matshow(X_train[4]) axes[1][0].matshow(X_train[5]) axes[1][1].matshow(X_train[6]) axes[1][2].matshow(X_train[7]) axes[1][3].matshow(X_train[8]) axes[1][4].matshow(X_train[9]) ###Output [5 0 4 1 9 2 1 3 1 4] ###Markdown Preprocessing ###Code X_train = X_train.reshape(60000, 28 * 28) X_test = X_test.reshape(10000, 28 * 28) print(X_train.shape, X_test.shape) from keras.utils import to_categorical # One hot encoding을 합니다. # np.eye(10)[y_train]과 동일합니다. y_train_hot = to_categorical(y_train) # np.eye(10)[y_test]와 동일합니다. y_test_hot = to_categorical(y_test) print(y_train_hot.shape, y_test_hot.shape) ###Output (60000, 10) (10000, 10) ###Markdown Build a Model Single-layer Neural Network ###Code from keras.models import Sequential from keras.layers import Dense, Activation from keras.initializers import RandomUniform from keras.optimizers import SGD model = Sequential() model.add(Dense(units=10, kernel_initializer=RandomUniform(minval=0.0, maxval=0.001), input_shape=(28 * 28,))) model.add(Activation('sigmoid')) optimizers = SGD(lr=0.00001) model.compile(optimizer=optimizers, loss='categorical_crossentropy', metrics=['accuracy']) model.fit(X_train, y_train_hot, epochs=20, validation_data=(X_test, y_test_hot)) import pandas as pd predictions = model.predict(X_test) predictions = np.argmax(predictions, axis=1) result = pd.DataFrame({'actual': y_test, 'predict': predictions}) accuracy = (result['actual'] == result['predict']).mean() print("Accuracy = {0:.6f}".format(accuracy)) result.head(10) ###Output Accuracy = 0.918000 ###Markdown Multi-layer Neural Network ###Code from keras.models import Sequential from keras.layers import Dense, Activation from keras.initializers import RandomUniform from keras.optimizers import SGD model = Sequential() model.add(Dense(units=1000, kernel_initializer=RandomUniform(minval=-0.058, maxval=0.058), input_shape=(28 * 28,))) model.add(Activation('sigmoid')) model.add(Dense(units=10, kernel_initializer=RandomUniform(minval=-0.077, maxval=0.077))) model.add(Activation('sigmoid')) optimizers = SGD(lr=0.0001) model.compile(optimizer=optimizers, loss='categorical_crossentropy', metrics=['accuracy']) model.fit(X_train, y_train_hot, epochs=20, validation_data=(X_test, y_test_hot)) import pandas as pd predictions = model.predict(X_test) predictions = np.argmax(predictions, axis=1) result = pd.DataFrame({'actual': y_test, 'predict': predictions}) accuracy = (result['actual'] == result['predict']).mean() print("Accuracy = {0:.6f}".format(accuracy)) result.head(10) ###Output _____no_output_____
Keras_TF_CNN_DeployModel_IoTEdge.ipynb
###Markdown Deploy ML Model to Azure IoT EdgeIn this exercise, we introduce the steps of deploying an ML module through [Azure IoT Edge](https://docs.microsoft.com/en-us/azure/iot-edge/how-iot-edge-works). The purpose is to deploy a trained image classification model to the edge device. When the image data is generated from a particular process pipeline and fed into the edge device, the deployed model is able to make predictions right on the edge device without accessing to the cloud. Outline- [Prerequisite](prerequisite)- [Step 1: Build the trained ML Model into Docker Image](step1)- [Step 2: Provision and Configure IoT Edge Device](step2)- [Step 3: Deploy ML Module on IoT Edge Device](step3)- [Step 4: Test ML Module](step4) Prerequisite Before starting this notebook, you should finish [Keras_TF_CNN_DeployModel.ipynb](Keras_TF_CNN_DeployModel.ipynb) in the same repository (except the last section "Clean up resources"). As a recap, we have created following resources in step "Deploy model as a Web Service" in this previous execercise: - Resource group defined in variable YOUR_RESOURCE_GROUP * Machine Learning Model Management * cluster environment (Microsoft.MachineLearningCompute/operationalizationClusters) - Resource group created during the cluster environment provision (YOUR_RESOURCE_GROUP plus "-azureml-xxxxx") * Container registry * Container service * .... a bunch of other automatically provisoned resourcesIn this notebook, we will be using *Machine Learning Model Managment*, and *Container registry* in later instructions. You need to find relevant information about these resources from [Azure portal](https://portal.azure.com). Also keep in mind that they are from two differnt resource group when you locate them from the Azure portal. Step 1: Build the trained ML Model into Docker Image If you have finished [Prerequisite](prerequisite), you can skip this step. Otherwise, you can follow Section *Create the Azure ML container* in [Deploy Azure Machine Learning as an IoT Edge module - preview](https://docs.microsoft.com/en-us/azure/iot-edge/tutorial-deploy-machine-learning) to deploy your own ML model. The expected output of this step include: 1. A docker image hosted on ACR (Azure Container Registry). This image will be used to create a docker container running on the edge device. 2. A web service. This web service can be used for testing purpose. Step 2: Provision and Configure IoT Edge Device In [Azure portal](https://portal.azure.com) we need to create following resources in YOUR_RESOURCE_GROUP.Resource 1: An IoT hub. Please follow the sections *Create an IoT hub* and *Register an IoT Edge device* in document [Deploy Azure IoT Edge on a simulated device in Linux or MacOS - preview](https://docs.microsoft.com/en-us/azure/iot-edge/tutorial-simulate-device-linux). After finishing these instructions, copy the value for **Connection string—primary key** from the IoT Edge Device page and save it to *ConnectionString* in the next cell. Notice that we need to have double quote in the command. You will need to execute this command in later instructions. ###Code ConnectionString = '"Your Connection string"' print('sudo iotedgectl setup --connection-string ', ConnectionString, ' --nopass') ###Output sudo iotedgectl setup --connection-string "Your Connection string" --nopass ###Markdown Resource 2: A Linux VM serving as the edge device. You can use the same Linux deep learning VM you have provisioned for running Keras_TF_CNN_DeployModel.ipynb (Or alternatively, you can use another Linux VM, e.g. Ubuntu server 16.04 LTS). The goal is to configure the VM so that it can run IoT Edge runtime and Docker. To finish the configuration, please follow the Sections *Prerequisites* and *Install and start the IoT Edge runtime* in document [Deploy Azure IoT Edge on a simulated device in Linux or MacOS - preview](https://docs.microsoft.com/en-us/azure/iot-edge/tutorial-simulate-device-linux). 【Tips】: When you are at [this page](https://docs.docker.com/install/linux/docker-ce/ubuntu/prerequisites), you probably only need to finish sections *Prerequisites - Uninstall old versions* and *Install Docker CE - Install using the repository*. You will also need to use the command generated from the previous cell when following *Install and start the IoT Edge runtime* in document [Deploy Azure IoT Edge on a simulated device in Linux or MacOS - preview](https://docs.microsoft.com/en-us/azure/iot-edge/tutorial-simulate-device-linux). Step 3: Deploy ML Module on IoT Edge Device In an IoT application, modules are function units on an edge device. The output of one module can serve as the input of another module. We often need multiple modules on the same IoT Edge device working together to achieve the realtime data analytics pipeline. ML module is typically one of them. The ML module takes input from a data module, and produces the prediction result as the output.We need following two steps to deploy ML module on IoT Edge device.a: Configure iot edge devce. Since our docker image is hosted on a private container registry (i.e. Azure container registry), please follow Section *Add registry credentials to your Edge device* in document [Deploy Azure Machine Learning as an IoT Edge module - preview](https://docs.microsoft.com/en-us/azure/iot-edge/tutorial-deploy-machine-learning). 【Tips】: You need to go to Azure portal to find out information about your container registry. Notice that the name of your resource group is likely to be YOUR_RESOURCE_GROUP plus "-azureml-xxxxx". Please see below screenshots for your reference. Save relevant information including *Login server*, *User name*, and *password* into the variables in the next cell. You will need to use the command generated from the next cell to finish this step.![containerregistry](./imgs/containerRegistry.PNG)![containerregistry2](./imgs/containerRegistry2.PNG) ###Code registryLoginServer = "Login_server" # replace with the real Login Server registryUsername = "User_name" # replace with the real User name registryPassword = "password" # replace with the real password # No double quotes are needed in this command. print('sudo iotedgectl login --address', registryLoginServer, '--username ', registryUsername, '--password', registryPassword) ###Output sudo iotedgectl login --address Login_server --username User_name --password password ###Markdown b: Deploy ML module Essentially, the objective is to deploy the ML container to the IoT Edge device.1. On the Azure portal, navigate to your IoT hub.2. Click *IoT Edge (preview)* and select your IoT Edge device.3. Select *Set modules*.4. Select *Add IoT Edge Module*.5. In the Name field, enter a name, `yourmodulename`. 6. In the Image field, enter your image location; for example `mlcrpacrdf78885275b6.azurecr.io/yanzimgclussrvc:1`. 【Tips】: You can find the image location in your machine learning account manamgement - `Model Management` - `Images` - click the image you have created - copy the address from `Location` field. Please see below screenshots for your reference. 7. In the *Container Create Options* field, set the following configuration. You can change the HostPort Binding port number to your desired port number. { "HostConfig": { "PortBindings": { "5001/tcp": [ { "HostPort": "5001" } ] } } }8. Click *Save*.9. Back in the *Add Modules* step, click *Next*.10. In the *Specify Routes* step. Put the following: {} 11. Select Next.12. In the *Review Deployment* step, click *Submit*.13. Return to the device details page and click *Refresh*. You should see the new `yourmodulename` running.![mm](./imgs/modelmanagement.PNG)![mm2](./imgs/modelmanagement2.PNG) An alternative example you can reference is the section *Run the solution* in document [Deploy Azure Machine Learning as an IoT Edge module - preview](https://docs.microsoft.com/en-us/azure/iot-edge/tutorial-deploy-machine-learning). ###Code # Check if the image, scoring script and model are in the same folder. os.listdir(model_path) # Change the current working directory to model_path os.chdir(model_path) #list files in current working directory os.listdir(os.curdir) ###Output _____no_output_____ ###Markdown Step 4: Test ML Module Now we have deployed your ML model as a module on the IoT edge device. How to test it and make sure it functions correctly? First of all, we should test the deployed web service and make sure it works. We assume this step has been completed in step "Test Web Service" in our previous exercise [Keras_TF_CNN_DeployModel.ipynb](./Keras_TF_CNN_DeployModel.ipynb). Secondly, we can test to score the deployed ML module from the IoT edge device with the service endpoint url address. We will show detailed instructions for this approach in the remaining of this section. ###Code # make sure you have correct working directory settings import os local_path = os.getcwd() o16n_path = os.path.join(local_path,'o16n') model_path = os.path.join(o16n_path,'kerastfmodel') model_path # Change the current working directory to model_path os.chdir(model_path) #list files in current working directory os.listdir(os.curdir) # service endpoint url address url = 'Your Scoring URL Here!!' #url = 'http://137.117.32.176:5001/score' #url = 'http://localhost:5001/score' ###Output _____no_output_____ ###Markdown The service endpoint url address should have the format of "http://localhost:[port_number]/score" or "http://[edge_device_ip]:[port_number]/score". If your ipython notebook and ML module are running from the same VM, you can use `localhost`. Otherwise you should use the ip address of the edge device VM. The `port_number` is predefined in step b of [Deploy ML Module on IoT Edge Device](step3). ###Code from PIL import Image # the test image to predict on test_img_name = 'automobile8.png' Image.open(test_img_name) import base64 import json img_file_name = os.path.split(test_img_name)[1] print(img_file_name) # prepare a test image with open(test_img_name, 'rb') as file: encoded = base64.b64encode(file.read()) img_dict = {img_file_name: encoded.decode('utf-8')} body = json.dumps(img_dict) import requests # call the web service end point headers = {'Content-Type':'application/json'} response = requests.post(url, headers=headers, data=body) response ###Output _____no_output_____ ###Markdown The output from above cell should be ``. ###Code prediction = json.loads(response.content.decode('ascii')) prediction # The firt part is the test image's name, and the second part is the predicted category. ###Output _____no_output_____ ###Markdown Clean up resources When you finish this example, you may want to avoid unnecessary cost by cleaning up the Azure resources you have provisioned. You need to delete two resource groups: YOUR_RESOURCE_GROUP and YOUR_RESOURCE_GROUP plus"-azureml-xxxxx". The exact name for the second resource group can be found in your Azure portal. For example, my resource group name is YOUR_RESOURCE_GROUP = "yanzimgrg" and the other system created resource group name is "yanzimgrg-azureml-a0c61". I then need to execute following commands to delete these two resource groups. az group delete -n yanzimgrg az group delete -n yanzimgrg-azureml-a0c61 ###Code # Delete resource group. Execute this command in the console. # Execute below command in CLI console, at the prompt type "y" (Q: Are you sure you want to perform this operation? (y/n):) # az group delete -n $YOUR_RESOURCE_GROUP print("az group delete -n ", YOUR_RESOURCE_GROUP) ###Output az group delete -n yanzimgrg
scripts/cm_work/four_steps_reran.ipynb
###Markdown Permutation Tests ###Code from data_viz import histogram_plot histogram_plot('rf_permutation_test.csv', 'Random Forest', 'log loss') histogram_plot('xgb_permutation_test.csv', 'XGBoost', 'log loss') histogram_plot('rf_permutation_acc.csv', 'Random Forest', 'accuracy') histogram_plot('xgb_permutation_acc.csv', 'XGBoost', 'accuracy') ###Output _____no_output_____ ###Markdown Data Prep ###Code X_train, y_train, X_test, y_test, X, y = create_x_y_data() print(X_train.shape) X_test.shape columns = X_train.columns ###Output _____no_output_____ ###Markdown Round 1 (Simple Train/Test Split) ###Code from sklearn.linear_model import LogisticRegression from sklearn.metrics import log_loss log_model = LogisticRegression(penalty='l1', solver='saga', max_iter=10000) log_model.fit(X_train, y_train) y_pred = log_model.predict_proba(X_test) log_model_ll = log_loss(y_test, y_pred) lasso_mask = log_model.coef_ != 0 lasso_columns = columns[lasso_mask[0]] print(f'Number of Features: {len(lasso_columns)}') print(f'Lasso Log Loss: {log_model_ll}') from sklearn.ensemble import RandomForestClassifier rf = RandomForestClassifier(n_estimators=1000, max_depth=20, random_state=8, n_jobs=-1) rf.fit(X_train, y_train) y_pred = rf.predict_proba(X_test) rf_ll = log_loss(y_test, y_pred) rf_mask = rf.feature_importances_ != 0 rf_columns = columns[rf_mask] print(f'Number of Features: {len(rf_columns)}') print(f'Random Forest Log Loss: {rf_ll}') from xgboost import XGBClassifier xgb = XGBClassifier(learning_rate = 0.01, max_depth = 3, n_estimators = 700, random_state=8, n_jobs=-1) xgb.fit(X_train, y_train) y_pred = xgb.predict_proba(X_test) xgb_ll = log_loss(y_test, y_pred) xgb_mask = xgb.feature_importances_ != 0 xgb_columns = columns[xgb_mask] print(f'Number of Features: {len(xgb_columns)}') print(f'XGBoost Log Loss: {xgb_ll}') model_performance = pd.DataFrame({'Model': ['Lasso Regression', 'Random Forest', 'XGBoost'], 'Log Loss Performance': [round(log_model_ll, 3), round(rf_ll, 3), round(xgb_ll, 3)], 'Number of Weighted Features': [88, 1649, 241]}) model_performance ###Output _____no_output_____ ###Markdown finding intersection of all three models ###Code #gene names round_1_important = set.intersection(set(lasso_columns), set(xgb_columns), set(rf_columns)) len(round_1_important) # arg position to get the weights round_1_mask = [x in round_1_important for x in columns] # using the mask to get the weights of lasso beta coefficients round_1_weights = log_model.coef_[0][round_1_mask] # 34 features in xgb found important in the others xgb_feat_importance = xgb.feature_importances_[round_1_mask] round_1_df = pd.DataFrame({'Genes': list(round_1_important), 'Lasso Weights': round_1_weights, 'XGB Feature Importance': xgb_feat_importance}) round_1_df = round_1_df.sort_values('Lasso Weights') round_1_df.head(20) len(set.intersection(set(xgb_columns), set(rf_columns))) ###Output _____no_output_____ ###Markdown come back here... maybe figure out SHAP values or partial dependency plots... Round 2 (Kfold Validation)Here we are doing kfold with X_train as X_. There are only 88 columns and with five kfolds thereis about 70 rows per fold Lasso ###Code # copy X_train and y_train as X_ and y_ for kfold X_ = X_train.copy() y_ = y_train.copy() X_.shape from sklearn.model_selection import train_test_split, KFold #lasso with kfold validation log_model = LogisticRegression(penalty='l1', solver='saga', max_iter=10000) kf = KFold(n_splits=5, shuffle=True) ll_performance = [] model_weights = [] #kfold split on X_ (which is X_train of len 110) for train_index, test_index in kf.split(X_): X_train, X_test = X_.iloc[train_index], X_.iloc[test_index] y_train, y_test = y_.iloc[train_index], y_.iloc[test_index] log_model.fit(X_train, y_train) y_pred = log_model.predict_proba(X_test) log_ll = log_loss(y_test, y_pred) ll_performance.append(log_ll) model_weights.append(log_model.coef_) # note that this average perofmrance is terrible display(ll_performance) print(np.mean(ll_performance)) def important_gene_mask(columns, coefs): """ inputs ------ columns: columns of df coefs: beta weights of lasso results ------ important_genes: name of genes with weight != 0 gene_weights: beta weights of genes """ mask = coefs[0] != 0 gene_weights = coefs[0][mask] important_genes = columns[mask] return dict(zip(important_genes, gene_weights)) # find the genes with weight != 0 + their weights l1_dict = important_gene_mask(columns, model_weights[0]) l2_dict = important_gene_mask(columns, model_weights[1]) l3_dict = important_gene_mask(columns, model_weights[2]) l4_dict = important_gene_mask(columns, model_weights[3]) l5_dict = important_gene_mask(columns, model_weights[4]) # find the genes found important in all five kfolds lasso_gene_intersection = set.intersection(set(l1_dict.keys()), set(l2_dict.keys()), set(l3_dict.keys()), set(l5_dict.keys()), set(l4_dict.keys())) len(lasso_gene_intersection) # find average weight for the 15 genes lasso_average_weight = {} for gene in lasso_gene_intersection: lasso_average_weight[gene] = l1_dict[gene] lasso_average_weight[gene] += l2_dict[gene] lasso_average_weight[gene] += l3_dict[gene] lasso_average_weight[gene] += l4_dict[gene] lasso_average_weight[gene] += l5_dict[gene] lasso_average_weight[gene] = lasso_average_weight[gene]/5 lasso_average_weight ###Output _____no_output_____ ###Markdown Random Forest/XGB ###Code xgb = XGBClassifier(learning_rate = 0.01, max_depth = 3, n_estimators = 500, random_state=8, n_jobs=-1) rf = RandomForestClassifier(n_estimators=1000, max_depth=15, random_state=8, n_jobs=-1) kf = KFold(n_splits=5, shuffle=True) rf_ll_performance = [] xgb_ll_performance = [] rf_weights = [] xgb_weights = [] for train_index, test_index in kf.split(X_): X_train, X_test = X_.iloc[train_index], X_.iloc[test_index] y_train, y_test = y_.iloc[train_index], y_.iloc[test_index] rf.fit(X_train, y_train) xgb.fit(X_train, y_train) p_rf = rf.predict_proba(X_test) p_xgb = xgb.predict_proba(X_test) rf_ll = log_loss(y_test, p_rf) xgb_ll = log_loss(y_test, p_xgb) rf_ll_performance.append(rf_ll) xgb_ll_performance.append(xgb_ll) rf_weights.append(rf.feature_importances_) xgb_weights.append(xgb.feature_importances_) display(rf_ll_performance) print(f'Average RF Log Loss: {np.mean(rf_ll_performance)}') display(xgb_ll_performance) print(f'Average XGB Log Loss: {np.mean(xgb_ll_performance)}') ###Output _____no_output_____ ###Markdown can tune these later... ###Code def important_gene_mask_tree(columns, coefs): """ gene finder for tree based models since coef_ and feature_importances work differently. inputs ------ columns: columns of df coefs: beta weights of lasso results ------ important_genes: name of genes with weight != 0 gene_weights: beta weights of genes """ mask = coefs != 0 gene_weights = coefs[mask] important_genes = columns[mask] return dict(zip(important_genes, gene_weights)) ###Output _____no_output_____ ###Markdown XGBoost ###Code xgb1_dict = important_gene_mask_tree(columns, xgb_weights[0]) xgb2_dict = important_gene_mask_tree(columns, xgb_weights[1]) xgb3_dict = important_gene_mask_tree(columns, xgb_weights[2]) xgb4_dict = important_gene_mask_tree(columns, xgb_weights[3]) xgb5_dict = important_gene_mask_tree(columns, xgb_weights[4]) # find the genes found important in all five kfolds xgb_gene_intersection = set.intersection(set(xgb1_dict.keys()), set(xgb2_dict.keys()), set(xgb3_dict.keys()), set(xgb4_dict.keys()), set(xgb5_dict.keys())) #number of genes in each round print(len(xgb_gene_intersection)) # find average weight for the 12 xgb genes xgb_average_weight = {} for gene in xgb_gene_intersection: xgb_average_weight[gene] = xgb1_dict[gene] xgb_average_weight[gene] += xgb2_dict[gene] xgb_average_weight[gene] += xgb3_dict[gene] xgb_average_weight[gene] += xgb5_dict[gene] xgb_average_weight[gene] += xgb5_dict[gene] xgb_average_weight[gene] = xgb_average_weight[gene]/5 xgb_average_weight ###Output 3 ###Markdown Random Forest ###Code rf1_dict = important_gene_mask_tree(columns, rf_weights[0]) rf2_dict = important_gene_mask_tree(columns, rf_weights[1]) rf3_dict = important_gene_mask_tree(columns, rf_weights[2]) rf4_dict = important_gene_mask_tree(columns, rf_weights[3]) rf5_dict = important_gene_mask_tree(columns, rf_weights[4]) # find the genes found important in all five kfolds rf_gene_intersection = set.intersection(set(rf1_dict.keys()), set(rf2_dict.keys()), set(rf3_dict.keys()), set(rf4_dict.keys()), set(rf5_dict.keys())) #number of genes in each round print(len(rf_gene_intersection)) # find average weight for the 12 xgb genes rf_average_weight = {} for gene in rf_gene_intersection: rf_average_weight[gene] = rf1_dict[gene] rf_average_weight[gene] += rf2_dict[gene] rf_average_weight[gene] += rf3_dict[gene] rf_average_weight[gene] += rf5_dict[gene] rf_average_weight[gene] += rf5_dict[gene] rf_average_weight[gene] = rf_average_weight[gene]/5 rf_average_weight # no intersection between 11 lasso and 4 xgb features display(set.intersection(set(xgb_gene_intersection), set(lasso_gene_intersection))) # union of xgb and lasso features set.union(set(xgb_gene_intersection), set(lasso_gene_intersection)) # check for large weights display(xgb_average_weight) display(lasso_average_weight) kfold_model_performance = pd.DataFrame({'Model': ['Lasso Regression', 'Random Forest', 'XGBoost'], 'Log Loss Performance': [round(np.mean(ll_performance), 3), round(np.mean(rf_ll_performance), 3), round(np.mean(xgb_ll_performance), 3)], 'KFold Features': [11, 640, 4]}) kfold_model_performance ###Output _____no_output_____ ###Markdown With all X and y data ###Code log_model = LogisticRegression(penalty='l1', solver='saga', max_iter=10000) xgb = XGBClassifier(learning_rate = 0.01, max_depth = 3, n_estimators = 500, random_state=8, n_jobs=-1) rf = RandomForestClassifier(n_estimators=1000, max_depth=15, random_state=8, n_jobs=-1) kf = KFold(n_splits=5, shuffle=True) lasso_performance = [] rf_ll_performance = [] xgb_ll_performance = [] lasso_weights = [] rf_weights = [] xgb_weights = [] #kfold split on X_ (which is X_train of len 110) for train_index, test_index in kf.split(X): X_train, X_test = X.iloc[train_index], X.iloc[test_index] y_train, y_test = y.iloc[train_index], y.iloc[test_index] log_model.fit(X_train, y_train) rf.fit(X_train, y_train) xgb.fit(X_train, y_train) p_lr = log_model.predict_proba(X_test) p_rf = rf.predict_proba(X_test) p_xgb = xgb.predict_proba(X_test) log_ll = log_loss(y_test, p_lr) rf_ll = log_loss(y_test, p_rf) xgb_ll = log_loss(y_test, p_xgb) lasso_performance.append(log_ll) rf_ll_performance.append(rf_ll) xgb_ll_performance.append(xgb_ll) lasso_weights.append(log_model.coef_) rf_weights.append(rf.feature_importances_) xgb_weights.append(xgb.feature_importances_) display(lasso_performance) print(np.mean(lasso_performance)) display(rf_ll_performance) print(np.mean(rf_ll_performance)) display(xgb_ll_performance) print(np.mean(xgb_ll_performance)) # all x data lasso weights lk1_dict = important_gene_mask(columns, lasso_weights[0]) lk2_dict = important_gene_mask(columns, lasso_weights[1]) lk3_dict = important_gene_mask(columns, lasso_weights[2]) lk4_dict = important_gene_mask(columns, lasso_weights[3]) lk5_dict = important_gene_mask(columns, lasso_weights[4]) lasso_kfold_intersection = set.intersection(set(lk1_dict.keys()), set(lk2_dict.keys()), set(lk3_dict.keys()), set(lk4_dict.keys()), set(lk5_dict.keys())) print(len(lasso_kfold_intersection)) lasso_average_weight1 = {} for gene in lasso_kfold_intersection: lasso_average_weight1[gene] = lk1_dict[gene] lasso_average_weight1[gene] += lk2_dict[gene] lasso_average_weight1[gene] += lk3_dict[gene] lasso_average_weight1[gene] += lk4_dict[gene] lasso_average_weight1[gene] += lk5_dict[gene] lasso_average_weight1[gene] = lasso_average_weight1[gene]/5 # lasso_average_weight1 # all rf weights rfk1_dict = important_gene_mask_tree(columns, rf_weights[0]) rfk2_dict = important_gene_mask_tree(columns, rf_weights[1]) rfk3_dict = important_gene_mask_tree(columns, rf_weights[2]) rfk4_dict = important_gene_mask_tree(columns, rf_weights[3]) rfk5_dict = important_gene_mask_tree(columns, rf_weights[4]) # find the genes found important in all five kfolds rf_kf_intersection = set.intersection(set(rfk1_dict.keys()), set(rfk2_dict.keys()), set(rfk3_dict.keys()), set(rfk4_dict.keys()), set(rfk5_dict.keys())) #number of genes in each round print(len(rf_kf_intersection)) # find average weight for the 12 xgb genes rfk_average_weight = {} for gene in rf_kf_intersection: rfk_average_weight[gene] = rfk1_dict[gene] rfk_average_weight[gene] += rfk2_dict[gene] rfk_average_weight[gene] += rfk3_dict[gene] rfk_average_weight[gene] += rfk5_dict[gene] rfk_average_weight[gene] += rfk5_dict[gene] rfk_average_weight[gene] = rfk_average_weight[gene]/5 # rfk_average_weight xgbk1_dict = important_gene_mask_tree(columns, xgb_weights[0]) xgbk2_dict = important_gene_mask_tree(columns, xgb_weights[1]) xgbk3_dict = important_gene_mask_tree(columns, xgb_weights[2]) xgbk4_dict = important_gene_mask_tree(columns, xgb_weights[3]) xgbk5_dict = important_gene_mask_tree(columns, xgb_weights[4]) # find the genes found important in all five kfolds xgb_kf_intersection = set.intersection(set(xgbk1_dict.keys()), set(xgbk2_dict.keys()), set(xgbk3_dict.keys()), set(xgbk4_dict.keys()), set(xgbk5_dict.keys())) #number of genes in each round print(len(xgb_kf_intersection)) # find average weight for the 12 xgb genes xgbk_average_weight = {} for gene in xgb_kf_intersection: xgbk_average_weight[gene] = xgbk1_dict[gene] xgbk_average_weight[gene] += xgbk2_dict[gene] xgbk_average_weight[gene] += xgbk3_dict[gene] xgbk_average_weight[gene] += xgbk5_dict[gene] xgbk_average_weight[gene] += xgbk5_dict[gene] xgbk_average_weight[gene] = xgbk_average_weight[gene]/5 # xgbk_average_weight kfold_model_performance = pd.DataFrame({'Model': ['Lasso Regression', 'Random Forest', 'XGBoost'], 'Log Loss Performance': [round(np.mean(lasso_performance), 3), round(np.mean(rf_ll_performance), 3), round(np.mean(xgb_ll_performance), 3)], 'KFold Features': [23, 971, 8]}) kfold_model_performance set.intersection(set(xgb_kf_intersection), set(lasso_kfold_intersection), set(rf_kf_intersection)) print(f'xgb + rf: {len(set.intersection(set(xgb_kf_intersection), set(rf_kf_intersection)))}') print(f'rf + lasso: {len(set.intersection(set(rf_kf_intersection), set(lasso_kfold_intersection)))}') print(f'xgb + lasso: {len(set.intersection(set(xgb_kf_intersection), set(lasso_kfold_intersection)))}') ###Output xgb + rf: 7 rf + lasso: 18 xgb + lasso: 0 ###Markdown tune and then do the same with model weights Round 3: Borutahow well does Boruta select features based on X_trainthen predict RF/XGboost on those features ###Code from boruta import BorutaPy rf_boruta = RandomForestClassifier(n_jobs=-1) feat_selector = BorutaPy(rf_boruta, n_estimators='auto', verbose=2, max_iter = 200, random_state=8) feat_selector.fit(X_train.values, y_train.values) selected = X_train.values[:, feat_selector.support_] print(selected.shape) # get the name of columns that boruta thinks is important boruta_mask = feat_selector.support_ rf_boruta_features_train = columns[boruta_mask] rf_boruta = RandomForestClassifier(n_jobs=-1) feat_selector = BorutaPy(rf_boruta, n_estimators='auto', verbose=2, max_iter = 100, random_state=8) feat_selector.fit(X.values, y.values) selected = X.values[:, feat_selector.support_] print(selected.shape) # get the name of columns that boruta thinks is important boruta_mask = feat_selector.support_ rf_boruta_features = columns[boruta_mask] feat_selector.get_params() # number of genes shared between original 34 and the boruta selected len(set.intersection(set(rf_boruta_features), set(round_1_important))) ###Output _____no_output_____ ###Markdown Round 4: Bootstrapped Boruta Sampling ###Code from tqdm import tqdm import warnings warnings.filterwarnings("ignore", category=RuntimeWarning) X.shape def boruta_bootstrap(n_rounds, X, y): rf_boruta = RandomForestClassifier(n_jobs=-1) total_boruta_features = [] for n in tqdm(range(n_rounds)): bootstrap_X = X.sample(n=200, replace=True) bootstrap_y = y[bootstrap_X.index] feat_selector = BorutaPy(rf_boruta, n_estimators='auto', verbose=0, max_iter = 100, random_state=8) feat_selector.fit(bootstrap_X.values, bootstrap_y.values) boruta_mask = feat_selector.support_ rf_boruta_features = columns[boruta_mask] total_boruta_features.append(rf_boruta_features) return total_boruta_features bf = boruta_bootstrap(2, X, y) # pd.DataFrame(boruta_counter, index=[0]).T boruta = pd.read_csv('bootstrap_boruta.csv') boruta.columns = ['Genes', 'Counts'] boruta.head() plt.hist(x = boruta.Counts, bins=20) plt.title('Distribution of Boruta Selection') plt.xlabel('Times Confirmed') plt.ylabel('Number of Genes'); plt.hist(x = boruta[boruta.Counts > 500]['Counts'], bins=20) plt.title('Distribution of Boruta Selection with > 500 Counts') plt.xlabel('Times Confirmed') plt.ylabel('Number of Genes'); boruta_10 = boruta[boruta.Counts == 1000] ###Output _____no_output_____ ###Markdown what would happen if we chose the top 50 genes from here then predicted.. Final Gene Table Round 1 Visualization (train/test) ###Code # set of 34 genes round_1_important # 34 genes found important by all three models round_1_df.head() model_performance ###Output _____no_output_____ ###Markdown Round 2 Visualization (kfold)- no intersection between the three models with kfold ###Code kfold_model_performance def dict_weight_helper(d, x): """ inputs ----- d: dictionary x: value outputs ------ new value into dataframe if it exists in dictionary """ if x not in d: return 0.0 else: return d[x] def kfold_dataframe_edits(df, dict_list): """ inputs ----- df dict_list ----- outputs ----- df with new columns """ names = ['Lasso KF', 'XGB KF', 'RF KF'] i = 0 for d in dict_list: name = names[i] w = name + ' Weight' df[name] = df['Genes'].apply(lambda x: x in d.keys()).astype(float) df[w] = df['Genes'].apply(lambda x: dict_weight_helper(d, x)) i += 1 # df['Kfold Sum'] = df['Lasso KF'] + df['XGB KF'] + df['RF KF'] return df gc_df = round_1_df.copy() test_df = kfold_dataframe_edits(gc_df, [lasso_average_weight1, xgbk_average_weight, rfk_average_weight]) test_df.head() # dictionaries of the genes and weights lasso_average_weight1 xgbk_average_weight rfk_average_weight print('These are dictionaries') ###Output These are dictionaries ###Markdown Round 3 (Boruta 1 Round) ###Code rf_boruta_features print('These are 1 round Boruta features') def dataframe_boruta_editor(df, boruta_features): df['Boruta'] = df['Genes'].apply(lambda x: x in boruta_features).astype(float) return df t2 = dataframe_boruta_editor(test_df, rf_boruta_features) t2.head() ###Output _____no_output_____ ###Markdown Round 4 (Boruta 10000 Rounds) ###Code boruta.head() t2.sort_values('Boruta', ascending=False).head() test_merge = pd.merge(t2, boruta, how='left', on='Genes') test_merge.head() def final_counts(df): df['Bootstrap Boruta'] = df['Counts'].notnull() df['Total'] = df['Lasso KF'] + df['XGB KF'] + df['RF KF'] + df['Boruta'] + df['Bootstrap Boruta'] df['Absolute Weights'] = df['Lasso Weights'].apply(lambda x: np.abs(x)) return df final_df = final_counts(test_merge) final_df.sort_values('Total', ascending=False, inplace=True) final_df.head(10) comparison_df = final_df.loc[:, ['Genes', 'Lasso Weights', 'Absolute Weights', 'Counts', 'Total']] comparison_df.head() comparison_df.sort_values('Counts', ascending=False).head(10) comparison_df.sort_values('Absolute Weights', ascending=False).head(10) ###Output _____no_output_____
Chapter14/Chapter_14.ipynb
###Markdown Generative Adversarial Networks ###Code %tensorflow_version 2.x ###Output _____no_output_____ ###Markdown Training a Generative Adversarial Network An MLP model ###Code from tensorflow.keras.models import Model from tensorflow.keras.layers import Dense, Activation, Input, Flatten from tensorflow.keras.layers import BatchNormalization, Dropout, Reshape from tensorflow.keras.optimizers import Adam from tensorflow.keras.datasets import mnist import numpy as np import matplotlib.pyplot as plt img_dims = 28 img_chnl = 1 ltnt_dim = 100 (x_train, y_train), (x_test, y_test) = mnist.load_data() x_train = x_train.astype('float32') / 255. x_test = x_test.astype('float32') / 255. # this makes sure that each image has a third dimension x_train = np.expand_dims(x_train, axis=3) # 28x28x1 x_test = np.expand_dims(x_test, axis=3) print('x_train shape:', x_train.shape) print('x_test shape:', x_test.shape) # building the generator network inpt_noise = Input(shape=(ltnt_dim,)) gl1 = Dense(256, activation='relu')(inpt_noise) gl2 = BatchNormalization()(gl1) gl3 = Dense(512, activation='relu')(gl2) gl4 = BatchNormalization()(gl3) gl5 = Dense(1024, activation='relu')(gl4) gl6 = BatchNormalization()(gl5) gl7 = Dropout(0.5)(gl6) gl8= Dense(img_dims*img_dims*img_chnl, activation='sigmoid')(gl7) gl9= Reshape((img_dims,img_dims,img_chnl))(gl8) generator = Model(inpt_noise, gl9) gnrtr_img = generator(inpt_noise) # uncomment this if you want to see the summary # generator.summary() # building the discriminator network inpt_img = Input(shape=(img_dims,img_dims,img_chnl)) dl1 = Flatten()(inpt_img) dl2 = Dropout(0.5)(dl1) dl3 = Dense(512, activation='relu')(dl2) dl4 = Dense(256, activation='relu')(dl3) dl5 = Dense(1, activation='sigmoid')(dl4) discriminator = Model(inpt_img, dl5) validity = discriminator(gnrtr_img) # uncomment this if you want to see the summary # discriminator.summary() # you can use either optimizer: # optimizer = RMSprop(0.0005) optimizer = Adam(0.0002, 0.5) # compiling the discriminator discriminator.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy']) # this will freeze the discriminator in gen_dis below discriminator.trainable = False gen_dis = Model(inpt_noise, validity) # full model gen_dis.compile(loss='binary_crossentropy', optimizer=optimizer) epochs = 12001 # this is up to you! batch_size=128 # small batches recommended sample_interval=400 # for generating samples # target vectors valid = np.ones((batch_size, 1)) fake = np.zeros((batch_size, 1)) # we will need these for plots and generated images samp_imgs = {} dloss = [] gloss = [] dacc = [] # this loop will train in batches manually for every epoch for epoch in range(epochs): # training the discriminator first >> # batch of valid images idx = np.random.randint(0, x_train.shape[0], batch_size) imgs = x_train[idx] # noise batch to generate fake images noise = np.random.uniform(0, 1, (batch_size, ltnt_dim)) gen_imgs = generator.predict(noise) # gradient descent on the batch d_loss_real = discriminator.train_on_batch(imgs, valid) d_loss_fake = discriminator.train_on_batch(gen_imgs, fake) d_loss = 0.5 * np.add(d_loss_real, d_loss_fake) # next we train the generator with the discriminator frozen >> # noise batch to generate fake images noise = np.random.uniform(0, 1, (batch_size, ltnt_dim)) # gradient descent on the batch g_loss = gen_dis.train_on_batch(noise, valid) # save performance dloss.append(d_loss[0]) dacc.append(d_loss[1]) gloss.append(g_loss) # print performance every sampling interval if epoch % sample_interval == 0: print ("%d [D loss: %f, acc.: %.2f%%] [G loss: %f]" % (epoch, d_loss[0], 100*d_loss[1], g_loss)) # use noise to generate some images noise = np.random.uniform(0, 1, (2, ltnt_dim)) gen_imgs = generator.predict(noise) samp_imgs[epoch] = gen_imgs import matplotlib.pyplot as plt fig, axs = plt.subplots(6, 10, figsize=(10,7.5)) cnt = sample_interval for i in range(6): for j in [0, 2, 4, 6, 8]: img = samp_imgs[cnt] axs[i,j].imshow(img[0,:,:,0], cmap='gray') axs[i,j].axis('off') axs[i,j].set_title(cnt) axs[i,j+1].imshow(img[1,:,:,0], cmap='gray') axs[i,j+1].axis('off') axs[i,j+1].set_title(cnt) cnt += sample_interval plt.savefig('ch.14.gan.mnist.generated.png', dpi=350, bbox_inches='tight') plt.show() import matplotlib.pyplot as plt fig, ax1 = plt.subplots(figsize=(10,6)) ax1.set_xlabel('Epoch') ax1.set_ylabel('Loss') ax1.plot(range(epochs), gloss, '-.', color='#dc267f', alpha=0.75, label='Generator') ax1.plot(range(epochs), dloss, '-.', color='#fe6100', alpha=0.75, label='Discriminator') ax1.legend(loc=1) ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis ax2.set_ylabel('Discriminator Accuracy') # we already handled the x-label with ax1 ax2.plot(range(epochs), dacc, color='#785ef0', alpha=0.75, label='Accuracy') ax2.legend(loc=4) fig.tight_layout() # otherwise the right y-label is slightly clipped plt.savefig('ch.14.gan.mnist.loss.png', dpi=350, bbox_inches='tight') plt.show() import matplotlib.pyplot as plt import numpy as np plt.figure(figsize=(10,10)) samples = np.random.uniform(0.0, 1.0, size=(400,ltnt_dim)) imgs = generator.predict(samples) for cnt in range(20*20): plt.subplot(20,20,cnt+1) img = imgs[cnt] plt.imshow(img[:,:,0], cmap='gray') plt.xticks([]) plt.yticks([]) plt.savefig('ch.14.gan.mnist.latent.png', bbox_inches='tight', dpi=350) plt.show() ###Output _____no_output_____ ###Markdown A convolutional model ###Code from tensorflow.keras.models import Model from tensorflow.keras.layers import Dense, Activation, Input, Conv2DTranspose, Flatten from tensorflow.keras.layers import BatchNormalization, Dropout, Reshape, Conv2D from tensorflow.keras.optimizers import Adam from tensorflow.keras.datasets import mnist import numpy as np import matplotlib.pyplot as plt img_dims = 28 img_chnl = 1 ltnt_dim = 100 (x_train, y_train), (x_test, y_test) = mnist.load_data() x_train = x_train.astype('float32') / 255. x_test = x_test.astype('float32') / 255. x_train = np.expand_dims(x_train, axis=3) x_test = np.expand_dims(x_test, axis=3) # building the generator convolutional network inpt_noise = Input(shape=(ltnt_dim,)) gl1 = Dense(7*7*256, activation='relu')(inpt_noise) gl2 = BatchNormalization()(gl1) gl3 = Reshape((7, 7, 256))(gl2) gl4 = Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', activation='relu')(gl3) gl5 = BatchNormalization()(gl4) gl6 = Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', activation='relu')(gl5) gl7 = BatchNormalization()(gl6) gl8 = Conv2DTranspose(1, (5, 5), strides=(2, 2), padding='same', activation='sigmoid')(gl7) generator = Model(inpt_noise, gl8) gnrtr_img = generator(inpt_noise) generator.summary() # print to verify dimensions # building the critic convolutional network inpt_img = Input(shape=(img_dims,img_dims,img_chnl)) dl1 = Conv2D(64, (5, 5), strides=(2, 2), padding='same', activation='relu')(inpt_img) dl2 = Dropout(0.3)(dl1) dl3 = Conv2D(128, (5, 5), strides=(2, 2), padding='same', activation='relu')(dl2) dl4 = Dropout(0.3)(dl3) dl5 = Flatten()(dl4) dl6 = Dense(1, activation='sigmoid')(dl5) critic = Model(inpt_img, dl6) validity = critic(gnrtr_img) critic.summary() # again, print for verification optimizer = Adam(0.0002, 0.5) critic.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy']) critic.trainable = False gen_crt = Model(inpt_noise, validity) gen_crt.compile(loss='binary_crossentropy', optimizer=optimizer) epochs = 12001 batch_size=64 sample_interval=400 valid = np.ones((batch_size, 1)) fake = np.zeros((batch_size, 1)) samp_imgs = {} closs = [] gloss = [] cacc = [] for epoch in range(epochs): idx = np.random.randint(0, x_train.shape[0], batch_size) imgs = x_train[idx] noise = np.random.uniform(0, 1, (batch_size, ltnt_dim)) gen_imgs = generator.predict(noise) c_loss_real = critic.train_on_batch(imgs, valid) c_loss_fake = critic.train_on_batch(gen_imgs, fake) c_loss = 0.5 * np.add(c_loss_real, c_loss_fake) noise = np.random.uniform(0, 1, (batch_size, ltnt_dim)) g_loss = gen_crt.train_on_batch(noise, valid) closs.append(c_loss[0]) cacc.append(c_loss[1]) gloss.append(g_loss) if epoch % sample_interval == 0: print ("%d [C loss: %f, acc.: %.2f%%] [G loss: %f]" % (epoch, d_loss[0], 100*d_loss[1], g_loss)) noise = np.random.uniform(0, 1, (2, ltnt_dim)) gen_imgs = generator.predict(noise) samp_imgs[epoch] = gen_imgs import matplotlib.pyplot as plt fig, axs = plt.subplots(6, 10, figsize=(10,7.5)) cnt = sample_interval for i in range(6): for j in [0, 2, 4, 6, 8]: img = samp_imgs[cnt] axs[i,j].imshow(img[0,:,:,0], cmap='gray') axs[i,j].axis('off') axs[i,j].set_title(cnt) axs[i,j+1].imshow(img[1,:,:,0], cmap='gray') axs[i,j+1].axis('off') axs[i,j+1].set_title(cnt) cnt += sample_interval plt.savefig('ch.14.gan.cnn.mnist.generated.png', dpi=350, bbox_inches='tight') plt.show() import matplotlib.pyplot as plt fig, ax1 = plt.subplots(figsize=(10,6)) ax1.set_xlabel('Epoch') ax1.set_ylabel('Loss') ax1.plot(range(epochs), gloss, '-.', color='#dc267f', alpha=0.75, label='Generator') ax1.plot(range(epochs), closs, '-.', color='#fe6100', alpha=0.75, label='Critic') ax1.legend(loc=1) ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis ax2.set_ylabel('Critic Accuracy') # we already handled the x-label with ax1 ax2.plot(range(epochs), cacc, color='#785ef0', alpha=0.75, label='Accuracy') ax2.legend(loc=4) fig.tight_layout() # otherwise the right y-label is slightly clipped plt.savefig('ch.14.gan.cnn.mnist.loss.png', dpi=350, bbox_inches='tight') plt.show() ###Output _____no_output_____ ###Markdown Comparing GANs and VAEs ###Code import matplotlib.pyplot as plt import numpy as np plt.figure(figsize=(10,10)) cnt = 1 samples = np.random.uniform(0.0, 1.0, size=(400,ltnt_dim)) imgs = generator.predict(samples) for z1 in np.arange(0.05, 1.05, 0.05): for z2 in np.arange(0.05, 1.05, 0.05): plt.subplot(20,20,cnt) img = imgs[cnt-1] plt.imshow(img[:,:,0], cmap='gray') plt.xticks([]) plt.yticks([]) cnt += 1 plt.savefig('ch.14.gan.cnn.mnist.latent.png', bbox_inches='tight', dpi=350) plt.show() ###Output _____no_output_____
ex06-Reproject DEM.ipynb
###Markdown ex06-Reproject DEMReprojection is all about changing the coordinates in a dataset from one coordinate system to another. While reprojection is less common these days due to more advanced methods of data distribution, sometimes you will work with multiple rasters that may come with the different projections and your will have to reproject the raster so they are in the same coordinate reference system. Another key reson is that many geospatial operations (such as watershed delineation) only support projected raster data (e.g., DEM), instead of the widely-used projection of latitude and longitude (for short EPSG:4326).Spatial data from different sources and that cover different extents are often in different Coordinate Reference Systems (CRS). Why is there an incredible variety of map projections? This is because each designed to solve a particular problem. Every map is a compromise favoring one or more projection properties: area, form (or angle), distance, and direction.When you meet a DEM with a unexpected projection in practice and you have to reproject it, according to my own experience, the best option is to use a GIS software (such as GRASS GIS, QGIS and SAGA GIS, etc), or apply the ***GDAL*** utility of [gdalwarp](https://gdal.org/programs/gdalwarp.html), which is really the most powerful tool (NOT ONE OF) for reprojecitons. Certainly, we also can write a python script to reproject a DEM. For example, the OGR Python API provides a full reprojection support using the Open Spatial Reference module (also known as osr). Hoever, the heavy package provides little abstraction for GDAL’s C API. This means that Python programs using them tend to read and run like C programs. Luckily, another python package of [rasterio](https://rasterio.readthedocs.io/en/stable/intro.html) expresses GDAL’s data model using fewer non-idiomatic extension classes and more idiomatic Python types and protocols, while performing as fast as GDAL’s Python bindings.This notebook will apply ***rasterio*** to reproject a demo DEM from the projection of EPSG:32613 to EPSG:4326. The demo data can be downloaded from Downloading from https://ndownloader.figshare.com/articles/8259098/versions/2. See more information about EPSG from https://spatialreference.org/ref/epsg/. ###Code %matplotlib inline import numpy as np import rasterio as rio from rasterio.warp import calculate_default_transform, reproject, Resampling import matplotlib.pyplot as plt import warnings warnings.filterwarnings("ignore") ###Output _____no_output_____ ###Markdown Reproject DEM with rasterioRasterio provides a rasterio.warp.calculate_default_transform() function to determine the optimal resolution and transform for the destination raster. Given a source dataset in a known coordinate reference system, this function will return a transform, width, height tuple which is calculated by libgdal, which makes reprojection easier. ###Code infile = "data/es_dem/pre_DTM.tif" outfile = "data/es_dem/pre_DTM_EPSG4326.tif" ###Output _____no_output_____ ###Markdown ***Check original projection*** ###Code scr_tif = rio.open(infile) print(scr_tif.meta) scr_tif.close() ###Output {'driver': 'GTiff', 'dtype': 'float32', 'nodata': -3.4028234663852886e+38, 'width': 4000, 'height': 2000, 'count': 1, 'crs': CRS.from_epsg(32613), 'transform': Affine(1.0, 0.0, 472000.0, 0.0, -1.0, 4436000.0)} ###Markdown ***Reproject to EPSG:4326*** ###Code dst_crs = 'EPSG:4326' with rio.open(infile) as src: transform, width, height = calculate_default_transform( src.crs, dst_crs, src.width, src.height, *src.bounds) kwargs = src.meta.copy() kwargs.update({ 'crs': dst_crs, 'transform': transform, 'width': width, 'height': height }) with rio.open(outfile, 'w', **kwargs) as dst: for i in range(1, src.count + 1): reproject( source=rio.band(src, i), destination=rio.band(dst, i), src_transform=src.transform, src_crs=src.crs, dst_transform=transform, dst_crs=dst_crs, resampling=Resampling.nearest) ###Output _____no_output_____ ###Markdown ***Check new projection*** ###Code dst_tif = rio.open(outfile) print(dst_tif.meta) print(dst_tif.bounds) dst_tif.close() ###Output {'driver': 'GTiff', 'dtype': 'float32', 'nodata': -3.4028234663852886e+38, 'width': 4179, 'height': 1614, 'count': 1, 'crs': CRS.from_epsg(4326), 'transform': Affine(1.124234793876272e-05, 0.0, -105.32837712340124, 0.0, -1.124234793876272e-05, 40.073923431943214)} BoundingBox(left=-105.32837712340124, bottom=40.05577828237005, right=-105.28139535136515, top=40.073923431943214)
Embeddings and Word2Vec/Skip-Gram word2vec.ipynb
###Markdown Skip-gram word2vecIn this notebook, I'll lead you through using TensorFlow to implement the word2vec algorithm using the skip-gram architecture. By implementing this, you'll learn about embedding words for use in natural language processing. This will come in handy when dealing with things like translations. ReadingsHere are the resources I used to build this notebook. I suggest reading these either beforehand or while you're working on this material.* A really good [conceptual overview](http://mccormickml.com/2016/04/19/word2vec-tutorial-the-skip-gram-model/) of word2vec from Chris McCormick * [First word2vec paper](https://arxiv.org/pdf/1301.3781.pdf) from Mikolov et al.* [NIPS paper](http://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf) with improvements for word2vec also from Mikolov et al.* An [implementation of word2vec](http://www.thushv.com/natural_language_processing/word2vec-part-1-nlp-with-deep-learning-with-tensorflow-skip-gram/) from Thushan Ganegedara* TensorFlow [word2vec tutorial](https://www.tensorflow.org/tutorials/word2vec) Word embeddingsWhen you're dealing with language and words, you end up with tens of thousands of classes to predict, one for each word. Trying to one-hot encode these words is massively inefficient, you'll have one element set to 1 and the other 50,000 set to 0. The word2vec algorithm finds much more efficient representations by finding vectors that represent the words. These vectors also contain semantic information about the words. Words that show up in similar contexts, such as "black", "white", and "red" will have vectors near each other. There are two architectures for implementing word2vec, CBOW (Continuous Bag-Of-Words) and Skip-gram.In this implementation, we'll be using the skip-gram architecture because it performs better than CBOW. Here, we pass in a word and try to predict the words surrounding it in the text. In this way, we can train the network to learn representations for words that show up in similar contexts.First up, importing packages. ###Code import time import numpy as np import tensorflow as tf import utils ###Output _____no_output_____ ###Markdown Load the [text8 dataset](http://mattmahoney.net/dc/textdata.html), a file of cleaned up Wikipedia articles from Matt Mahoney. The next cell will download the data set to the `data` folder. Then you can extract it and delete the archive file to save storage space. ###Code from urllib.request import urlretrieve from os.path import isfile, isdir from tqdm import tqdm import zipfile dataset_folder_path = 'data' dataset_filename = 'text8.zip' dataset_name = 'Text8 Dataset' class DLProgress(tqdm): last_block = 0 def hook(self, block_num=1, block_size=1, total_size=None): self.total = total_size self.update((block_num - self.last_block) * block_size) self.last_block = block_num if not isfile(dataset_filename): with DLProgress(unit='B', unit_scale=True, miniters=1, desc=dataset_name) as pbar: urlretrieve( 'http://mattmahoney.net/dc/text8.zip', dataset_filename, pbar.hook) if not isdir(dataset_folder_path): with zipfile.ZipFile(dataset_filename) as zip_ref: zip_ref.extractall(dataset_folder_path) with open('data/text8') as f: text = f.read() ###Output _____no_output_____ ###Markdown PreprocessingHere I'm fixing up the text to make training easier. This comes from the `utils` module I wrote. The `preprocess` function coverts any punctuation into tokens, so a period is changed to ` `. In this data set, there aren't any periods, but it will help in other NLP problems. I'm also removing all words that show up five or fewer times in the dataset. This will greatly reduce issues due to noise in the data and improve the quality of the vector representations. If you want to write your own functions for this stuff, go for it. ###Code words = utils.preprocess(text) print(words[:30]) print("Total words: {}".format(len(words))) print("Unique words: {}".format(len(set(words)))) ###Output Total words: 16680599 Unique words: 63641 ###Markdown And here I'm creating dictionaries to covert words to integers and backwards, integers to words. The integers are assigned in descending frequency order, so the most frequent word ("the") is given the integer 0 and the next most frequent is 1 and so on. The words are converted to integers and stored in the list `int_words`. ###Code vocab_to_int, int_to_vocab = utils.create_lookup_tables(words) int_words = [vocab_to_int[word] for word in words] ###Output _____no_output_____ ###Markdown SubsamplingWords that show up often such as "the", "of", and "for" don't provide much context to the nearby words. If we discard some of them, we can remove some of the noise from our data and in return get faster training and better representations. This process is called subsampling by Mikolov. For each word $w_i$ in the training set, we'll discard it with probability given by $$ P(w_i) = 1 - \sqrt{\frac{t}{f(w_i)}} $$where $t$ is a threshold parameter and $f(w_i)$ is the frequency of word $w_i$ in the total dataset.I'm going to leave this up to you as an exercise. This is more of a programming challenge, than about deep learning specifically. But, being able to prepare your data for your network is an important skill to have. Check out my solution to see how I did it.> **Exercise:** Implement subsampling for the words in `int_words`. That is, go through `int_words` and discard each word given the probablility $P(w_i)$ shown above. Note that $P(w_i)$ is the probability that a word is discarded. Assign the subsampled data to `train_words`. ###Code ## Your code here from collections import Counter import random threshold = 1e-5 word_counts = Counter(int_words) total_count = len(int_words) freqs = {word: count / total_count for word, count in word_counts.items()} p_drop = {word: 1 - np.sqrt(threshold / freqs[word]) for word in word_counts} train_words = [word for word in int_words if p_drop[word] < random.random()] ###Output _____no_output_____ ###Markdown Making batches Now that our data is in good shape, we need to get it into the proper form to pass it into our network. With the skip-gram architecture, for each word in the text, we want to grab all the words in a window around that word, with size $C$. From [Mikolov et al.](https://arxiv.org/pdf/1301.3781.pdf): "Since the more distant words are usually less related to the current word than those close to it, we give less weight to the distant words by sampling less from those words in our training examples... If we choose $C = 5$, for each training word we will select randomly a number $R$ in range $$, and then use $R$ words from history and $R$ words from the future of the current word as correct labels."> **Exercise:** Implement a function `get_target` that receives a list of words, an index, and a window size, then returns a list of words in the window around the index. Make sure to use the algorithm described above, where you choose a random number of words from the window. ###Code def get_target(words, idx, window_size=5): ''' Get a list of words in a window around an index. ''' # Your code here R = random.randint(1, window_size + 1) start = idx - R if (idx - R) > 0 else 0 stop = idx + R target_words = set(words[start:idx] + words[idx + 1:stop+1]) return target_words ###Output _____no_output_____ ###Markdown Here's a function that returns batches for our network. The idea is that it grabs `batch_size` words from a words list. Then for each of those words, it gets the target words in the window. I haven't found a way to pass in a random number of target words and get it to work with the architecture, so I make one row per input-target pair. This is a generator function by the way, helps save memory. ###Code def get_batches(words, batch_size, window_size=5): ''' Create a generator of word batches as a tuple (inputs, targets) ''' n_batches = len(words)//batch_size # only full batches words = words[:n_batches*batch_size] for idx in range(0, len(words), batch_size): x, y = [], [] batch = words[idx:idx+batch_size] for ii in range(len(batch)): batch_x = batch[ii] batch_y = get_target(batch, ii, window_size) y.extend(batch_y) x.extend([batch_x]*len(batch_y)) yield x, y ###Output _____no_output_____ ###Markdown Building the graphFrom Chris McCormick's blog, we can see the general structure of our network.![embedding_network](./assets/skip_gram_net_arch.png)The input words are passed in as one-hot encoded vectors. This will go into a hidden layer of linear units, then into a softmax layer. We'll use the softmax layer to make a prediction like normal.The idea here is to train the hidden layer weight matrix to find efficient representations for our words. This weight matrix is usually called the embedding matrix or embedding look-up table. We can discard the softmax layer becuase we don't really care about making predictions with this network. We just want the embedding matrix so we can use it in other networks we build from the dataset.I'm going to have you build the graph in stages now. First off, creating the `inputs` and `labels` placeholders like normal.> **Exercise:** Assign `inputs` and `labels` using `tf.placeholder`. We're going to be passing in integers, so set the data types to `tf.int32`. The batches we're passing in will have varying sizes, so set the batch sizes to [`None`]. To make things work later, you'll need to set the second dimension of `labels` to `None` or `1`. ###Code train_graph = tf.Graph() with train_graph.as_default(): inputs = tf.placeholder(tf.int32, shape=[None], name="inputs") labels = tf.placeholder(tf.int32, shape=[None, None], name="labels") ###Output _____no_output_____ ###Markdown Embedding The embedding matrix has a size of the number of words by the number of neurons in the hidden layer. So, if you have 10,000 words and 300 hidden units, the matrix will have size $10,000 \times 300$. Remember that we're using one-hot encoded vectors for our inputs. When you do the matrix multiplication of the one-hot vector with the embedding matrix, you end up selecting only one row out of the entire matrix:![one-hot matrix multiplication](assets/matrix_mult_w_one_hot.png)You don't actually need to do the matrix multiplication, you just need to select the row in the embedding matrix that corresponds to the input word. Then, the embedding matrix becomes a lookup table, you're looking up a vector the size of the hidden layer that represents the input word.> **Exercise:** Tensorflow provides a convenient function [`tf.nn.embedding_lookup`](https://www.tensorflow.org/api_docs/python/tf/nn/embedding_lookup) that does this lookup for us. You pass in the embedding matrix and a tensor of integers, then it returns rows in the matrix corresponding to those integers. Below, set the number of embedding features you'll use (200 is a good start), create the embedding matrix variable, and use [`tf.nn.embedding_lookup`](https://www.tensorflow.org/api_docs/python/tf/nn/embedding_lookup) to get the embedding tensors. For the embedding matrix, I suggest you initialize it with a uniform random numbers between -1 and 1 using [tf.random_uniform](https://www.tensorflow.org/api_docs/python/tf/random_uniform). This [TensorFlow tutorial](https://www.tensorflow.org/tutorials/word2vec) will help if you get stuck. ###Code n_vocab = len(int_to_vocab) n_embedding = 200 # Number of embedding features with train_graph.as_default(): # create embedding weight matrix here embedding = tf.Variable(tf.random_uniform([n_vocab, n_embedding], -1.0, 1.0)) # use tf.nn.embedding_lookup to get the hidden layer output embed = tf.nn.embedding_lookup(embedding, inputs) ###Output _____no_output_____ ###Markdown Negative sampling For every example we give the network, we train it using the output from the softmax layer. That means for each input, we're making very small changes to millions of weights even though we only have one true example. This makes training the network very inefficient. We can approximate the loss from the softmax layer by only updating a small subset of all the weights at once. We'll update the weights for the correct label, but only a small number of incorrect labels. This is called ["negative sampling"](http://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf). Tensorflow has a convenient function to do this, [`tf.nn.sampled_softmax_loss`](https://www.tensorflow.org/api_docs/python/tf/nn/sampled_softmax_loss).> **Exercise:** Below, create weights and biases for the softmax layer. Then, use [`tf.nn.sampled_softmax_loss`](https://www.tensorflow.org/api_docs/python/tf/nn/sampled_softmax_loss) to calculate the loss. Be sure to read the documentation to figure out how it works. ###Code # Number of negative labels to sample n_sampled = 100 with train_graph.as_default(): # create softmax weight matrix here softmax_w = tf.Variable(tf.truncated_normal((n_vocab, n_embedding), stddev=0.1)) # create softmax biases here softmax_b = tf.Variable(tf.zeros(n_vocab)) # Calculate the loss using negative sampling loss = tf.nn.sampled_softmax_loss(softmax_w, softmax_b, labels, embed, n_sampled, n_vocab) cost = tf.reduce_mean(loss) optimizer = tf.train.AdamOptimizer().minimize(cost) ###Output _____no_output_____ ###Markdown ValidationThis code is from Thushan Ganegedara's implementation. Here we're going to choose a few common words and few uncommon words. Then, we'll print out the closest words to them. It's a nice way to check that our embedding table is grouping together words with similar semantic meanings. ###Code with train_graph.as_default(): ## From Thushan Ganegedara's implementation valid_size = 16 # Random set of words to evaluate similarity on. valid_window = 100 # pick 8 samples from (0,100) and (1000,1100) each ranges. lower id implies more frequent valid_examples = np.array(random.sample(range(valid_window), valid_size//2)) valid_examples = np.append(valid_examples, random.sample(range(1000,1000+valid_window), valid_size//2)) valid_dataset = tf.constant(valid_examples, dtype=tf.int32) # We use the cosine distance: norm = tf.sqrt(tf.reduce_sum(tf.square(embedding), 1, keep_dims=True)) normalized_embedding = embedding / norm valid_embedding = tf.nn.embedding_lookup(normalized_embedding, valid_dataset) similarity = tf.matmul(valid_embedding, tf.transpose(normalized_embedding)) # If the checkpoints directory doesn't exist: !mkdir checkpoints ###Output _____no_output_____ ###Markdown TrainingBelow is the code to train the network. Every 100 batches it reports the training loss. Every 1000 batches, it'll print out the validation words. ###Code epochs = 10 batch_size = 1000 window_size = 10 with train_graph.as_default(): saver = tf.train.Saver() with tf.Session(graph=train_graph) as sess: iteration = 1 loss = 0 sess.run(tf.global_variables_initializer()) for e in range(1, epochs+1): batches = get_batches(train_words, batch_size, window_size) start = time.time() for x, y in batches: feed = {inputs: x, labels: np.array(y)[:, None]} train_loss, _ = sess.run([cost, optimizer], feed_dict=feed) loss += train_loss if iteration % 100 == 0: end = time.time() print("Epoch {}/{}".format(e, epochs), "Iteration: {}".format(iteration), "Avg. Training loss: {:.4f}".format(loss/100), "{:.4f} sec/batch".format((end-start)/100)) loss = 0 start = time.time() if iteration % 1000 == 0: ## From Thushan Ganegedara's implementation # note that this is expensive (~20% slowdown if computed every 500 steps) sim = similarity.eval() for i in range(valid_size): valid_word = int_to_vocab[valid_examples[i]] top_k = 8 # number of nearest neighbors nearest = (-sim[i, :]).argsort()[1:top_k+1] log = 'Nearest to %s:' % valid_word for k in range(top_k): close_word = int_to_vocab[nearest[k]] log = '%s %s,' % (log, close_word) print(log) iteration += 1 save_path = saver.save(sess, "checkpoints/text8.ckpt") embed_mat = sess.run(normalized_embedding) ###Output Epoch 1/10 Iteration: 100 Avg. Training loss: 5.6408 0.0999 sec/batch Epoch 1/10 Iteration: 200 Avg. Training loss: 5.5806 0.0982 sec/batch Epoch 1/10 Iteration: 300 Avg. Training loss: 5.5068 0.0980 sec/batch Epoch 1/10 Iteration: 400 Avg. Training loss: 5.5900 0.0965 sec/batch Epoch 1/10 Iteration: 500 Avg. Training loss: 5.5100 0.0979 sec/batch Epoch 1/10 Iteration: 600 Avg. Training loss: 5.5333 0.0977 sec/batch Epoch 1/10 Iteration: 700 Avg. Training loss: 5.5439 0.0974 sec/batch Epoch 1/10 Iteration: 800 Avg. Training loss: 5.4934 0.0978 sec/batch Epoch 1/10 Iteration: 900 Avg. Training loss: 5.4930 0.1041 sec/batch Epoch 1/10 Iteration: 1000 Avg. Training loss: 5.4331 0.1066 sec/batch Nearest to was: practice, nervousness, esm, catechetical, occultations, princeton, censor, heroes, Nearest to to: transfer, vulnerabilities, get, historically, rowing, curses, bulwer, layperson, Nearest to time: abbate, fini, request, gretzky, employ, donnell, kronos, hwa, Nearest to war: thicknesses, bogie, nbi, fuss, linkin, kidnappers, accession, fddi, Nearest to such: figuring, pocock, literally, byproduct, board, mantegna, lemass, alkyl, Nearest to as: pianist, exist, dmoz, disabled, eutheria, reeve, australasia, marguerite, Nearest to united: conscript, philadelphia, household, deciphered, niger, seidel, tahrir, talked, Nearest to zero: ricardo, path, initial, yachts, uribe, contiguous, barrios, molality, Nearest to egypt: lent, rockford, cusp, brandished, sand, hydrological, died, caccia, Nearest to proposed: regulators, widening, bernicia, jauron, jackie, connective, ticker, overload, Nearest to engineering: laboratory, sholay, euros, dismemberment, originally, odilo, rusted, sykes, Nearest to primarily: congress, ops, groening, slacks, coimbra, choke, cortez, qq, Nearest to bill: mishandled, paving, hebe, agreements, complemented, hymns, suggest, capabilities, Nearest to event: consonant, insula, reestablishment, ancestors, neurotoxicity, denied, quicker, celsius, Nearest to consists: presto, senile, gpa, leucippus, inquire, affix, scandinavia, cock, Nearest to rise: rq, slice, sills, breda, bombers, deut, codons, symbolize, Epoch 1/10 Iteration: 1100 Avg. Training loss: 5.4555 0.1099 sec/batch Epoch 1/10 Iteration: 1200 Avg. Training loss: 5.3343 0.1097 sec/batch Epoch 1/10 Iteration: 1300 Avg. Training loss: 5.2859 0.1111 sec/batch Epoch 1/10 Iteration: 1400 Avg. Training loss: 5.1831 0.1107 sec/batch Epoch 1/10 Iteration: 1500 Avg. Training loss: 5.1676 0.1089 sec/batch Epoch 1/10 Iteration: 1600 Avg. Training loss: 5.1497 0.1086 sec/batch Epoch 1/10 Iteration: 1700 Avg. Training loss: 5.0648 0.1104 sec/batch Epoch 1/10 Iteration: 1800 Avg. Training loss: 5.0284 0.1106 sec/batch Epoch 1/10 Iteration: 1900 Avg. Training loss: 4.9869 0.1113 sec/batch Epoch 1/10 Iteration: 2000 Avg. Training loss: 4.9462 0.1111 sec/batch Nearest to was: practice, princeton, meet, nervousness, catechetical, censor, heroes, esm, Nearest to to: get, historically, transfer, vulnerabilities, digital, rowing, advisor, purely, Nearest to time: request, employ, might, abbate, gretzky, lightweight, fini, donnell, Nearest to war: remains, collectively, thicknesses, accession, nbi, broke, wrote, detentions, Nearest to such: figuring, literally, pocock, board, byproduct, spelling, clone, studies, Nearest to as: exist, custom, pace, spoke, pianist, disabled, dmoz, marguerite, Nearest to united: philadelphia, household, conscript, force, talked, step, rescinded, niger, Nearest to zero: path, ricardo, initial, uribe, contiguous, coal, yachts, count, Nearest to egypt: lent, sand, died, cusp, incidents, sections, to, regicides, Nearest to proposed: regulators, widening, jackie, jauron, bernicia, wheel, lie, steep, Nearest to engineering: laboratory, originally, euros, dismemberment, odilo, sholay, rusted, swedish, Nearest to primarily: congress, treaties, slacks, coimbra, harbin, cortez, shoulder, obstacles, Nearest to bill: suggest, mishandled, agreements, complemented, paving, hebe, capabilities, independence, Nearest to event: ancestors, consonant, reestablishment, insula, quicker, neurotoxicity, denied, resemblance, Nearest to consists: inquire, scandinavia, leucippus, gpa, clearing, senile, presto, sovereign, Nearest to rise: slice, sills, bombers, evil, notable, associate, codons, breda, Epoch 1/10 Iteration: 2100 Avg. Training loss: 4.9136 0.1133 sec/batch Epoch 1/10 Iteration: 2200 Avg. Training loss: 4.8837 0.1124 sec/batch Epoch 1/10 Iteration: 2300 Avg. Training loss: 4.8676 0.1126 sec/batch Epoch 1/10 Iteration: 2400 Avg. Training loss: 4.8484 0.1117 sec/batch Epoch 1/10 Iteration: 2500 Avg. Training loss: 4.8163 0.1125 sec/batch Epoch 1/10 Iteration: 2600 Avg. Training loss: 4.8268 0.1135 sec/batch Epoch 1/10 Iteration: 2700 Avg. Training loss: 4.7829 0.1130 sec/batch Epoch 1/10 Iteration: 2800 Avg. Training loss: 4.7870 0.1132 sec/batch Epoch 1/10 Iteration: 2900 Avg. Training loss: 4.7509 0.1130 sec/batch Epoch 1/10 Iteration: 3000 Avg. Training loss: 4.7663 0.1136 sec/batch Nearest to was: princeton, practice, esm, nervousness, catechetical, meet, censor, occultations, Nearest to to: get, vulnerabilities, rowing, isomorphism, transfer, digital, egypt, precision, Nearest to time: request, employ, abbate, gretzky, orchestra, lightweight, churches, exercised, Nearest to war: accession, commander, thicknesses, collectively, deliver, fddi, detentions, pat, Nearest to such: figuring, literally, pocock, byproduct, spelling, digests, fantastic, board, Nearest to as: spoke, exist, custom, dmoz, pace, marguerite, pianist, ttl, Nearest to united: philadelphia, conscript, household, talked, nave, unhappy, deciphered, deciding, Nearest to zero: path, ricardo, count, yachts, nagano, uribe, km, contiguous, Nearest to egypt: lent, died, cusp, to, incidents, brandished, sand, sections, Nearest to proposed: regulators, widening, jackie, jauron, bernicia, shifted, lie, wheel, Nearest to engineering: laboratory, euros, originally, sholay, dismemberment, odilo, proportional, rusted, Nearest to primarily: congress, groening, lester, slacks, treaties, ops, coimbra, lessons, Nearest to bill: agreements, mishandled, complemented, paving, suggest, independence, hymns, hebe, Nearest to event: consonant, ancestors, reestablishment, insula, quicker, denied, neurotoxicity, celsius, Nearest to consists: inquire, senile, gpa, sovereign, leucippus, presto, scandinavia, establishes, Nearest to rise: slice, sills, bombers, breda, evil, codons, backups, coleridge, Epoch 1/10 Iteration: 3100 Avg. Training loss: 4.7554 0.1157 sec/batch Epoch 1/10 Iteration: 3200 Avg. Training loss: 4.7873 0.1164 sec/batch Epoch 1/10 Iteration: 3300 Avg. Training loss: 4.7045 0.1143 sec/batch Epoch 1/10 Iteration: 3400 Avg. Training loss: 4.7046 0.1153 sec/batch Epoch 1/10 Iteration: 3500 Avg. Training loss: 4.7520 0.1173 sec/batch Epoch 1/10 Iteration: 3600 Avg. Training loss: 4.7180 0.1161 sec/batch Epoch 1/10 Iteration: 3700 Avg. Training loss: 4.6777 0.1128 sec/batch Epoch 1/10 Iteration: 3800 Avg. Training loss: 4.7321 0.1141 sec/batch Epoch 1/10 Iteration: 3900 Avg. Training loss: 4.7006 0.1147 sec/batch Epoch 1/10 Iteration: 4000 Avg. Training loss: 4.6688 0.1160 sec/batch Nearest to was: esm, princeton, occultations, catechetical, meet, nervousness, bellarmine, practice, Nearest to to: vulnerabilities, rowing, get, isomorphism, egypt, geomagnetic, burbank, transfer, Nearest to time: request, abbate, finns, gretzky, might, mediated, employ, churches, Nearest to war: commander, herschel, detentions, sncf, thicknesses, accession, fddi, repudiating, Nearest to such: figuring, literally, pocock, byproduct, digests, fantastic, authorize, tasks, Nearest to as: spoke, dmoz, custom, exist, ttl, reeve, cavern, chari, Nearest to united: philadelphia, conscript, unhappy, cortland, niger, general, deciphered, force, Nearest to zero: count, km, nagano, tokyo, politician, five, subjecting, ricardo, Nearest to egypt: lent, cusp, to, died, brandished, occupied, sections, heterodox, Nearest to proposed: regulators, widening, jauron, jackie, bernicia, steep, shifted, atrocities, Nearest to engineering: laboratory, euros, sholay, dismemberment, originally, odilo, rusted, visualizing, Nearest to primarily: congress, lester, ops, cortez, choke, groening, coimbra, harbin, Nearest to bill: agreements, complemented, paving, mishandled, independence, suggest, hebe, patanjali, Nearest to event: consonant, insula, reestablishment, ancestors, denied, neurotoxicity, quicker, resemblance, Nearest to consists: senile, inquire, gpa, presto, sovereign, leucippus, affix, establishes, Nearest to rise: slice, sills, bombers, coleridge, breda, codons, backups, deut, Epoch 1/10 Iteration: 4100 Avg. Training loss: 4.6554 0.1156 sec/batch Epoch 1/10 Iteration: 4200 Avg. Training loss: 4.6600 0.1145 sec/batch Epoch 1/10 Iteration: 4300 Avg. Training loss: 4.6122 0.1163 sec/batch Epoch 1/10 Iteration: 4400 Avg. Training loss: 4.6240 0.1145 sec/batch Epoch 1/10 Iteration: 4500 Avg. Training loss: 4.6138 0.1150 sec/batch Epoch 1/10 Iteration: 4600 Avg. Training loss: 4.6219 0.1137 sec/batch Epoch 2/10 Iteration: 4700 Avg. Training loss: 4.6073 0.0832 sec/batch Epoch 2/10 Iteration: 4800 Avg. Training loss: 4.5520 0.1156 sec/batch Epoch 2/10 Iteration: 4900 Avg. Training loss: 4.5067 0.1142 sec/batch Epoch 2/10 Iteration: 5000 Avg. Training loss: 4.5011 0.1138 sec/batch Nearest to was: occultations, esm, catechetical, meet, nervousness, bellarmine, ethelred, censor, Nearest to to: vulnerabilities, burbank, rowing, geomagnetic, get, isomorphism, transfer, layperson, Nearest to time: request, might, finns, abbate, gretzky, mediated, employ, lightweight, Nearest to war: commander, herschel, detentions, sncf, accession, fddi, thicknesses, rebounds, Nearest to such: figuring, literally, pocock, tasks, digests, byproduct, lemass, logic, Nearest to as: spoke, exist, custom, ttl, dmoz, troubadors, filtered, cavern, Nearest to united: conscript, philadelphia, cortland, states, general, deciphered, niger, trollh, Nearest to zero: tokyo, five, nagano, two, est, uribe, count, km, Nearest to egypt: cusp, lent, occupied, to, brandished, died, heterodox, woden, Nearest to proposed: regulators, widening, jauron, bernicia, duryodhana, jackie, atrocities, lie, Nearest to engineering: laboratory, euros, dismemberment, sholay, rusted, processes, proportional, visualizing, Nearest to primarily: choke, cortez, ops, groening, coimbra, lester, harbin, congress, Nearest to bill: agreements, mishandled, paving, independence, hebe, complemented, congress, alliterative, Nearest to event: consonant, insula, ancestors, reestablishment, denied, neurotoxicity, quicker, remo, Nearest to consists: senile, gpa, inquire, presto, leucippus, affix, establishes, sovereign, Nearest to rise: sills, slice, breda, bombers, coleridge, codons, symbolize, deut, Epoch 2/10 Iteration: 5100 Avg. Training loss: 4.4947 0.1147 sec/batch Epoch 2/10 Iteration: 5200 Avg. Training loss: 4.4926 0.1127 sec/batch Epoch 2/10 Iteration: 5300 Avg. Training loss: 4.4857 0.1145 sec/batch Epoch 2/10 Iteration: 5400 Avg. Training loss: 4.5338 0.1157 sec/batch Epoch 2/10 Iteration: 5500 Avg. Training loss: 4.4748 0.1144 sec/batch Epoch 2/10 Iteration: 5600 Avg. Training loss: 4.4931 0.1147 sec/batch Epoch 2/10 Iteration: 5700 Avg. Training loss: 4.4544 0.1155 sec/batch Epoch 2/10 Iteration: 5800 Avg. Training loss: 4.4167 0.1152 sec/batch Epoch 2/10 Iteration: 5900 Avg. Training loss: 4.4299 0.1168 sec/batch Epoch 2/10 Iteration: 6000 Avg. Training loss: 4.4314 0.1173 sec/batch Nearest to was: occultations, meet, catechetical, esm, bellarmine, ethelred, censor, nervousness, Nearest to to: burbank, geomagnetic, get, vulnerabilities, rowing, transfer, isomorphism, tiberius, Nearest to time: might, request, mediated, onerous, finns, abbate, employ, multiplier, Nearest to war: commander, accession, herschel, sncf, detentions, against, surrender, rebounds, Nearest to such: figuring, digests, tasks, literally, pocock, byproduct, lemass, logic, Nearest to as: exist, custom, spoke, ttl, cavern, dmoz, filtered, australasia, Nearest to united: conscript, philadelphia, states, general, cortland, niger, deciding, deciphered, Nearest to zero: five, two, expectancy, est, km, tokyo, dollar, nagano, Nearest to egypt: occupied, lent, cusp, brandished, died, slavic, heterodox, reform, Nearest to proposed: regulators, widening, jauron, lie, bernicia, duryodhana, wheel, solvent, Nearest to engineering: laboratory, sholay, dismemberment, euros, processes, stalactites, rusted, visualizing, Nearest to primarily: choke, cortez, ops, environments, evaporation, harbin, jimbo, tuner, Nearest to bill: mishandled, hebe, paving, agreements, prog, complemented, independence, alliterative, Nearest to event: insula, consonant, reestablishment, ancestors, denied, neurotoxicity, quicker, radiocommunications, Nearest to consists: senile, leucippus, gpa, affix, presto, inquire, entropy, establishes, Nearest to rise: sills, slice, breda, coleridge, codons, skinny, backups, rq, Epoch 2/10 Iteration: 6100 Avg. Training loss: 4.4411 0.1184 sec/batch Epoch 2/10 Iteration: 6200 Avg. Training loss: 4.4201 0.1163 sec/batch Epoch 2/10 Iteration: 6300 Avg. Training loss: 4.4424 0.1148 sec/batch Epoch 2/10 Iteration: 6400 Avg. Training loss: 4.4007 0.1163 sec/batch Epoch 2/10 Iteration: 6500 Avg. Training loss: 4.4438 0.1138 sec/batch Epoch 2/10 Iteration: 6600 Avg. Training loss: 4.4428 0.1147 sec/batch Epoch 2/10 Iteration: 6700 Avg. Training loss: 4.3743 0.1149 sec/batch Epoch 2/10 Iteration: 6800 Avg. Training loss: 4.3946 0.1143 sec/batch Epoch 2/10 Iteration: 6900 Avg. Training loss: 4.4415 0.1158 sec/batch Epoch 2/10 Iteration: 7000 Avg. Training loss: 4.3770 0.1155 sec/batch Nearest to was: catechetical, bellarmine, occultations, censor, esm, meet, anniversary, nervousness, Nearest to to: geomagnetic, burbank, firstnode, get, multitasking, magistracies, purely, flavian, Nearest to time: onerous, might, request, lightweight, eastbourne, fini, employ, finns, Nearest to war: commander, accession, against, rebounds, sncf, fddi, herschel, surrender, Nearest to such: figuring, digests, byproduct, tasks, pocock, literally, lemass, mung, Nearest to as: exist, spoke, cavern, ttl, custom, dmoz, perpetuity, filtered, Nearest to united: conscript, philadelphia, cortland, states, general, deciding, niger, udall, Nearest to zero: two, five, expectancy, est, ratio, dollar, km, million, Nearest to egypt: occupied, lent, brandished, slavic, heterodox, woden, gela, cusp, Nearest to proposed: regulators, widening, jauron, duryodhana, bernicia, inari, lie, connective, Nearest to engineering: laboratory, sholay, dismemberment, euros, stalactites, processes, rusted, aeroplane, Nearest to primarily: environments, cortez, evaporation, ops, coimbra, choke, earnings, harbin, Nearest to bill: mishandled, hebe, keefe, paving, agreements, congress, murrow, prog, Nearest to event: insula, consonant, ancestors, neurotoxicity, reestablishment, celsius, denied, quicker, Nearest to consists: senile, gpa, leucippus, inquire, presto, affix, establishes, sovereign, Nearest to rise: sills, ghq, codons, coleridge, slice, skinny, backups, rq, Epoch 2/10 Iteration: 7100 Avg. Training loss: 4.3807 0.1152 sec/batch Epoch 2/10 Iteration: 7200 Avg. Training loss: 4.3977 0.1160 sec/batch Epoch 2/10 Iteration: 7300 Avg. Training loss: 4.3624 0.1162 sec/batch Epoch 2/10 Iteration: 7400 Avg. Training loss: 4.3908 0.1149 sec/batch Epoch 2/10 Iteration: 7500 Avg. Training loss: 4.4203 0.1131 sec/batch Epoch 2/10 Iteration: 7600 Avg. Training loss: 4.3718 0.1156 sec/batch Epoch 2/10 Iteration: 7700 Avg. Training loss: 4.3870 0.1140 sec/batch Epoch 2/10 Iteration: 7800 Avg. Training loss: 4.3894 0.1161 sec/batch Epoch 2/10 Iteration: 7900 Avg. Training loss: 4.3671 0.1158 sec/batch Epoch 2/10 Iteration: 8000 Avg. Training loss: 4.3358 0.1162 sec/batch Nearest to was: catechetical, bellarmine, meet, censor, occultations, tbms, esm, nervousness, Nearest to to: geomagnetic, magistracies, ban, firstnode, expectancies, burbank, multitasking, remove, Nearest to time: onerous, might, netted, request, employ, lightweight, expressly, multiplier, Nearest to war: commander, against, surrender, accession, broke, rebounds, parthian, roosevelt, Nearest to such: figuring, digests, tasks, byproduct, lemass, literally, pocock, instructions, Nearest to as: exist, spoke, cavern, custom, dmoz, perpetuity, worldly, filtered, Nearest to united: conscript, philadelphia, states, general, deciding, cortland, governing, niger, Nearest to zero: two, five, expectancy, est, ratio, million, four, dollar, Nearest to egypt: occupied, lent, brandished, heterodox, yemenite, slavic, cusp, forbade, Nearest to proposed: regulators, widening, jauron, duryodhana, inari, lie, role, society, Nearest to engineering: laboratory, sholay, dismemberment, euros, stalactites, processes, newbie, rusted, Nearest to primarily: cortez, environments, ops, earnings, evaporation, jimbo, coimbra, qq, Nearest to bill: mishandled, murrow, congress, hebe, keefe, prog, agreements, paving, Nearest to event: consonant, insula, ancestors, neurotoxicity, denied, quicker, reestablishment, trainer, Nearest to consists: senile, gpa, inquire, establishes, leucippus, presto, sovereign, affix, Nearest to rise: sills, coleridge, ghq, breda, nation, skinny, rq, backups, Epoch 2/10 Iteration: 8100 Avg. Training loss: 4.3735 0.1179 sec/batch Epoch 2/10 Iteration: 8200 Avg. Training loss: 4.3216 0.1154 sec/batch Epoch 2/10 Iteration: 8300 Avg. Training loss: 4.4150 0.1153 sec/batch Epoch 2/10 Iteration: 8400 Avg. Training loss: 4.3999 0.1150 sec/batch Epoch 2/10 Iteration: 8500 Avg. Training loss: 4.4064 0.1158 sec/batch Epoch 2/10 Iteration: 8600 Avg. Training loss: 4.2854 0.1159 sec/batch Epoch 2/10 Iteration: 8700 Avg. Training loss: 4.3377 0.1138 sec/batch Epoch 2/10 Iteration: 8800 Avg. Training loss: 4.3265 0.1140 sec/batch Epoch 2/10 Iteration: 8900 Avg. Training loss: 4.2198 0.1156 sec/batch Epoch 2/10 Iteration: 9000 Avg. Training loss: 4.3263 0.1147 sec/batch Nearest to was: meet, bellarmine, occultations, esm, catechetical, year, censor, anniversary, Nearest to to: expectancies, get, geomagnetic, multitasking, ban, firstnode, midfield, burbank, Nearest to time: onerous, might, eastbourne, netted, expressly, request, lightweight, employ, Nearest to war: commander, surrender, against, broke, accession, rebounds, fddi, detentions, Nearest to such: figuring, digests, byproduct, tasks, pocock, lemass, literally, extractive, Nearest to as: exist, cavern, spoke, australasia, plowing, custom, not, filtered, Nearest to united: conscript, philadelphia, cortland, general, udall, states, fada, trollh, Nearest to zero: two, five, est, expectancy, ratio, four, million, dollar, Nearest to egypt: occupied, brandished, lent, slavic, yemenite, antigonus, heterodox, woden, Nearest to proposed: regulators, widening, jauron, duryodhana, inari, bernicia, society, lie, Nearest to engineering: laboratory, dismemberment, sholay, euros, stalactites, newbie, aeroplane, processes, Nearest to primarily: earnings, cortez, coimbra, ops, environments, jimbo, evaporation, interrogation, Nearest to bill: mishandled, murrow, hebe, congress, keefe, prog, agreements, paving, Nearest to event: insula, consonant, ancestors, neurotoxicity, trainer, denied, reestablishment, resemblance, Nearest to consists: senile, establishes, presto, affix, gpa, sovereign, leucippus, inquire, Nearest to rise: sills, coleridge, ghq, breda, rq, nation, slice, skinny, Epoch 2/10 Iteration: 9100 Avg. Training loss: 4.3072 0.1161 sec/batch Epoch 2/10 Iteration: 9200 Avg. Training loss: 4.3091 0.1138 sec/batch Epoch 3/10 Iteration: 9300 Avg. Training loss: 4.3459 0.0482 sec/batch Epoch 3/10 Iteration: 9400 Avg. Training loss: 4.2775 0.1133 sec/batch Epoch 3/10 Iteration: 9500 Avg. Training loss: 4.2547 0.1136 sec/batch Epoch 3/10 Iteration: 9600 Avg. Training loss: 4.2258 0.1131 sec/batch Epoch 3/10 Iteration: 9700 Avg. Training loss: 4.2344 0.1143 sec/batch Epoch 3/10 Iteration: 9800 Avg. Training loss: 4.2490 0.1142 sec/batch Epoch 3/10 Iteration: 9900 Avg. Training loss: 4.2455 0.1145 sec/batch Epoch 3/10 Iteration: 10000 Avg. Training loss: 4.1928 0.1147 sec/batch Nearest to was: bellarmine, occultations, meet, esm, year, catechetical, the, ethelred, Nearest to to: expectancies, remove, karpov, magistracies, ban, chronically, get, polyrhythmic, Nearest to time: onerous, eastbourne, request, sextilis, lightweight, might, netted, plunges, Nearest to war: commander, surrender, against, broke, accession, detentions, fddi, rebounds, Nearest to such: figuring, digests, lemass, byproduct, tasks, pocock, extractive, embed, Nearest to as: exist, australasia, custom, spoke, ttl, cavern, hone, filtered, Nearest to united: conscript, philadelphia, cortland, states, udall, general, fada, consultative, Nearest to zero: two, five, ratio, expectancy, est, four, nine, million, Nearest to egypt: occupied, brandished, yemenite, lent, antigonus, imminent, slavic, heterodox, Nearest to proposed: regulators, widening, jauron, duryodhana, inari, lie, tenedos, society, Nearest to engineering: laboratory, dismemberment, euros, processes, stalactites, aeroplane, newbie, corse, Nearest to primarily: cortez, environments, earnings, evaporation, ops, jimbo, lessons, coimbra, Nearest to bill: mishandled, prog, congress, murrow, keefe, agreements, hebe, serendip, Nearest to event: ancestors, insula, consonant, denied, reestablishment, trainer, neurotoxicity, resemblance, Nearest to consists: senile, affix, establishes, leucippus, gpa, presto, sovereign, rectangular, Nearest to rise: sills, ghq, coleridge, nation, slice, breda, prevailing, skinny, Epoch 3/10 Iteration: 10100 Avg. Training loss: 4.2532 0.1152 sec/batch Epoch 3/10 Iteration: 10200 Avg. Training loss: 4.2425 0.1138 sec/batch Epoch 3/10 Iteration: 10300 Avg. Training loss: 4.2396 0.1152 sec/batch Epoch 3/10 Iteration: 10400 Avg. Training loss: 4.1714 0.1161 sec/batch Epoch 3/10 Iteration: 10500 Avg. Training loss: 4.1919 0.1155 sec/batch Epoch 3/10 Iteration: 10600 Avg. Training loss: 4.1972 0.1167 sec/batch Epoch 3/10 Iteration: 10700 Avg. Training loss: 4.1929 0.1159 sec/batch Epoch 3/10 Iteration: 10800 Avg. Training loss: 4.1721 0.1156 sec/batch Epoch 3/10 Iteration: 10900 Avg. Training loss: 4.1836 0.1167 sec/batch Epoch 3/10 Iteration: 11000 Avg. Training loss: 4.1762 0.1155 sec/batch Nearest to was: occultations, bellarmine, censor, esm, meet, anniversary, falklands, catechetical, Nearest to to: get, multitasking, expectancies, remove, permissible, chronically, polyrhythmic, layperson, Nearest to time: onerous, sextilis, lightweight, request, eastbourne, netted, might, marr, Nearest to war: commander, against, surrender, broke, command, accession, detentions, cavalry, Nearest to such: figuring, digests, byproduct, tasks, lemass, embed, instructions, pocock, Nearest to as: exist, custom, ttl, hone, perpetuity, plowing, australasia, spoke, Nearest to united: conscript, states, philadelphia, cortland, fada, nam, udall, general, Nearest to zero: two, five, expectancy, ratio, four, est, years, three, Nearest to egypt: occupied, brandished, antigonus, lent, yemenite, gela, mordecai, standoff, Nearest to proposed: regulators, widening, jauron, inari, duryodhana, society, lie, solvent, Nearest to engineering: laboratory, dismemberment, corse, euros, newbie, processes, aeroplane, stalactites, Nearest to primarily: environments, cortez, earnings, evaporation, tuner, ops, sauce, counterpart, Nearest to bill: mishandled, murrow, congress, prog, keefe, ketterle, fc, hebe, Nearest to event: ancestors, insula, consonant, neurotoxicity, trainer, celsius, reestablishment, resemblance, Nearest to consists: senile, affix, rectangular, establishes, leucippus, sovereign, abrasax, presto, Nearest to rise: sills, ghq, attitudes, coleridge, nation, skinny, rq, uncollected, Epoch 3/10 Iteration: 11100 Avg. Training loss: 4.2107 0.1158 sec/batch Epoch 3/10 Iteration: 11200 Avg. Training loss: 4.2407 0.1141 sec/batch Epoch 3/10 Iteration: 11300 Avg. Training loss: 4.1976 0.1145 sec/batch Epoch 3/10 Iteration: 11400 Avg. Training loss: 4.1896 0.1139 sec/batch Epoch 3/10 Iteration: 11500 Avg. Training loss: 4.2077 0.1153 sec/batch Epoch 3/10 Iteration: 11600 Avg. Training loss: 4.2423 0.1144 sec/batch Epoch 3/10 Iteration: 11700 Avg. Training loss: 4.2260 0.1151 sec/batch Epoch 3/10 Iteration: 11800 Avg. Training loss: 4.1560 0.1150 sec/batch Epoch 3/10 Iteration: 11900 Avg. Training loss: 4.1343 0.1150 sec/batch Epoch 3/10 Iteration: 12000 Avg. Training loss: 4.1936 0.1149 sec/batch Nearest to was: bellarmine, year, esm, occultations, anniversary, meet, censor, campesinos, Nearest to to: expectancies, foiling, burbank, get, multitasking, layperson, remove, firstnode, Nearest to time: onerous, request, lightweight, netted, eastbourne, sextilis, ineffable, multiplier, Nearest to war: commander, broke, against, surrender, command, roosevelt, nbi, detentions, Nearest to such: figuring, digests, byproduct, embed, spheroidal, lemass, extractive, tasks, Nearest to as: exist, plowing, perpetuity, australasia, nf, ttl, spoke, dmoz, Nearest to united: conscript, states, philadelphia, cortland, udall, deciding, general, consultative, Nearest to zero: two, five, four, ratio, expectancy, three, nine, est, Nearest to egypt: occupied, brandished, antigonus, lent, yemenite, gela, standoff, woden, Nearest to proposed: regulators, widening, jauron, inari, duryodhana, society, lie, landholders, Nearest to engineering: laboratory, dismemberment, corse, newbie, processes, euros, aeroplane, faculty, Nearest to primarily: earnings, cortez, coimbra, environments, ops, os, mande, commercial, Nearest to bill: mishandled, murrow, congress, keefe, serendip, prog, perkins, ketterle, Nearest to event: ancestors, insula, consonant, celsius, neurotoxicity, trainer, resemblance, attraction, Nearest to consists: senile, affix, rectangular, sovereign, establishes, abrasax, presto, cohesive, Nearest to rise: sills, ghq, nation, coleridge, attitudes, skinny, uncollected, roma, Epoch 3/10 Iteration: 12100 Avg. Training loss: 4.2136 0.1164 sec/batch Epoch 3/10 Iteration: 12200 Avg. Training loss: 4.1831 0.1147 sec/batch Epoch 3/10 Iteration: 12300 Avg. Training loss: 4.2016 0.1133 sec/batch Epoch 3/10 Iteration: 12400 Avg. Training loss: 4.1931 0.1154 sec/batch Epoch 3/10 Iteration: 12500 Avg. Training loss: 4.1468 0.1146 sec/batch Epoch 3/10 Iteration: 12600 Avg. Training loss: 4.1752 0.1148 sec/batch Epoch 3/10 Iteration: 12700 Avg. Training loss: 4.1943 0.1164 sec/batch Epoch 3/10 Iteration: 12800 Avg. Training loss: 4.1508 0.1162 sec/batch Epoch 3/10 Iteration: 12900 Avg. Training loss: 4.2220 0.1151 sec/batch Epoch 3/10 Iteration: 13000 Avg. Training loss: 4.2152 0.1165 sec/batch Nearest to was: bellarmine, the, year, anniversary, esm, meet, plainly, censor, Nearest to to: expectancies, burbank, get, foiling, karpov, remove, ban, magistracies, Nearest to time: onerous, eastbourne, netted, request, lightweight, marr, sextilis, ineffable, Nearest to war: commander, against, surrender, broke, roosevelt, command, nbi, hmas, Nearest to such: figuring, digests, byproduct, pocock, lemass, jerrold, extractive, ere, Nearest to as: exist, perpetuity, plowing, australasia, spoke, erewhon, worldly, not, Nearest to united: conscript, states, philadelphia, cortland, general, udall, moskva, consultative, Nearest to zero: five, two, four, three, nine, expectancy, ratio, one, Nearest to egypt: occupied, brandished, antigonus, standoff, yemenite, lent, gela, woden, Nearest to proposed: regulators, widening, jauron, inari, lester, jackie, herman, society, Nearest to engineering: laboratory, corse, newbie, dismemberment, euros, faculty, processes, aeroplane, Nearest to primarily: earnings, cortez, coimbra, os, ops, lessons, jute, mande, Nearest to bill: mishandled, murrow, congress, agreements, keefe, ketterle, serendip, prog, Nearest to event: ancestors, insula, consonant, trainer, neurotoxicity, celsius, denied, refund, Nearest to consists: senile, sovereign, affix, establishes, presto, rectangular, abrasax, cohesive, Nearest to rise: sills, coleridge, attitudes, nation, ghq, berber, uncollected, skinny, Epoch 3/10 Iteration: 13100 Avg. Training loss: 4.2576 0.1174 sec/batch Epoch 3/10 Iteration: 13200 Avg. Training loss: 4.1396 0.1190 sec/batch Epoch 3/10 Iteration: 13300 Avg. Training loss: 4.1328 0.1181 sec/batch Epoch 3/10 Iteration: 13400 Avg. Training loss: 4.1721 0.1166 sec/batch Epoch 3/10 Iteration: 13500 Avg. Training loss: 4.0443 0.1175 sec/batch Epoch 3/10 Iteration: 13600 Avg. Training loss: 4.1511 0.1204 sec/batch Epoch 3/10 Iteration: 13700 Avg. Training loss: 4.1451 0.1184 sec/batch Epoch 3/10 Iteration: 13800 Avg. Training loss: 4.1584 0.1175 sec/batch Epoch 4/10 Iteration: 13900 Avg. Training loss: 4.1603 0.0152 sec/batch Epoch 4/10 Iteration: 14000 Avg. Training loss: 4.1202 0.1196 sec/batch Nearest to was: bellarmine, anniversary, the, year, occultations, meet, tbms, sih, Nearest to to: get, multitasking, expectancies, foiling, midfield, burbank, layperson, cueball, Nearest to time: onerous, request, eastbourne, netted, sextilis, lightweight, calculations, ineffable, Nearest to war: commander, against, broke, surrender, roosevelt, nbi, command, detentions, Nearest to such: figuring, digests, byproduct, inferential, ere, tasks, lemass, embed, Nearest to as: exist, perpetuity, australasia, plowing, nf, hone, polluting, ttl, Nearest to united: conscript, philadelphia, states, cortland, udall, moskva, consultative, fada, Nearest to zero: two, five, four, ratio, three, expectancy, nine, one, Nearest to egypt: occupied, brandished, antigonus, standoff, yemenite, woden, gela, hittite, Nearest to proposed: regulators, widening, jauron, inari, duryodhana, lester, landholders, society, Nearest to engineering: laboratory, corse, faculty, processes, newbie, dismemberment, euros, technology, Nearest to primarily: earnings, cortez, os, coimbra, commercial, ops, lessons, environments, Nearest to bill: mishandled, murrow, keefe, congress, ketterle, perkins, agreements, serendip, Nearest to event: ancestors, insula, trainer, neurotoxicity, consonant, refund, celsius, denied, Nearest to consists: senile, rectangular, affix, sovereign, establishes, abrasax, cohesive, presto, Nearest to rise: sills, prevailing, berber, nation, attitudes, higher, coleridge, ghq, Epoch 4/10 Iteration: 14100 Avg. Training loss: 4.0852 0.1176 sec/batch Epoch 4/10 Iteration: 14200 Avg. Training loss: 4.0893 0.1167 sec/batch Epoch 4/10 Iteration: 14300 Avg. Training loss: 4.0962 0.1177 sec/batch Epoch 4/10 Iteration: 14400 Avg. Training loss: 4.0701 0.1164 sec/batch Epoch 4/10 Iteration: 14500 Avg. Training loss: 4.0932 0.1174 sec/batch Epoch 4/10 Iteration: 14600 Avg. Training loss: 4.0900 0.1175 sec/batch Epoch 4/10 Iteration: 14700 Avg. Training loss: 4.1185 0.1172 sec/batch Epoch 4/10 Iteration: 14800 Avg. Training loss: 4.1016 0.1180 sec/batch Epoch 4/10 Iteration: 14900 Avg. Training loss: 4.1148 0.1175 sec/batch Epoch 4/10 Iteration: 15000 Avg. Training loss: 4.0654 0.1184 sec/batch Nearest to was: the, bellarmine, occultations, anniversary, ethelred, year, esm, tbms, Nearest to to: get, multitasking, magistracies, foiling, remove, expectancies, dogged, transfer, Nearest to time: onerous, sextilis, netted, eastbourne, calculations, request, lightweight, ineffable, Nearest to war: commander, against, broke, surrender, command, sustaining, nbi, detentions, Nearest to such: figuring, digests, byproduct, embed, ere, lemass, alkyl, inferential, Nearest to as: exist, australasia, not, a, perpetuity, plowing, ttl, of, Nearest to united: states, conscript, philadelphia, cortland, fada, moskva, deciding, nam, Nearest to zero: two, five, four, three, ratio, expectancy, nine, years, Nearest to egypt: occupied, brandished, antigonus, yemenite, standoff, gela, mordecai, hittite, Nearest to proposed: regulators, jauron, widening, lester, inari, jackie, herman, duryodhana, Nearest to engineering: laboratory, corse, faculty, processes, newbie, euros, dismemberment, technology, Nearest to primarily: commercial, earnings, environments, cortez, coimbra, mande, os, sauce, Nearest to bill: mishandled, murrow, keefe, congress, serendip, prog, perkins, ketterle, Nearest to event: ancestors, insula, trainer, consonant, neurotoxicity, denied, refund, celsius, Nearest to consists: rectangular, affix, senile, cohesive, sovereign, abrasax, establishes, presto, Nearest to rise: sills, prevailing, nation, berber, higher, coleridge, ghq, proterozoic, Epoch 4/10 Iteration: 15100 Avg. Training loss: 4.0235 0.1203 sec/batch Epoch 4/10 Iteration: 15200 Avg. Training loss: 4.0433 0.1187 sec/batch Epoch 4/10 Iteration: 15300 Avg. Training loss: 4.0525 0.1187 sec/batch Epoch 4/10 Iteration: 15400 Avg. Training loss: 4.0738 0.1197 sec/batch Epoch 4/10 Iteration: 15500 Avg. Training loss: 4.1001 0.1183 sec/batch Epoch 4/10 Iteration: 15600 Avg. Training loss: 4.0854 0.1185 sec/batch Epoch 4/10 Iteration: 15700 Avg. Training loss: 4.0704 0.1184 sec/batch Epoch 4/10 Iteration: 15800 Avg. Training loss: 4.1247 0.1182 sec/batch Epoch 4/10 Iteration: 15900 Avg. Training loss: 4.0814 0.1170 sec/batch Epoch 4/10 Iteration: 16000 Avg. Training loss: 4.0782 0.1181 sec/batch Nearest to was: bellarmine, anniversary, year, esm, campesinos, the, tbms, censor, Nearest to to: get, multitasking, foiling, layperson, burbank, transfer, expectancies, cueball, Nearest to time: onerous, netted, eastbourne, sextilis, calculations, request, ineffable, preventable, Nearest to war: commander, against, broke, roosevelt, nbi, surrender, allegri, detentions, Nearest to such: figuring, digests, byproduct, ere, spheroidal, deistic, embed, inferential, Nearest to as: exist, not, a, australasia, plowing, torts, ttl, nf, Nearest to united: conscript, states, philadelphia, cortland, moskva, precipitate, kinnock, cfo, Nearest to zero: two, five, four, three, expectancy, ratio, nine, one, Nearest to egypt: occupied, brandished, standoff, antigonus, gela, yemenite, administrated, rockford, Nearest to proposed: regulators, widening, jauron, lester, society, inari, herman, solvent, Nearest to engineering: laboratory, corse, faculty, processes, dismemberment, technology, newbie, euros, Nearest to primarily: coimbra, earnings, commercial, mande, ops, lessons, cortez, environments, Nearest to bill: mishandled, murrow, keefe, congress, serendip, perkins, ketterle, agreements, Nearest to event: ancestors, insula, neurotoxicity, trainer, consonant, celsius, denied, refund, Nearest to consists: rectangular, affix, sovereign, abrasax, senile, presto, arrangement, cohesive, Nearest to rise: sills, prevailing, attitudes, coleridge, proterozoic, higher, ghq, rinzai, Epoch 4/10 Iteration: 16100 Avg. Training loss: 4.0592 0.1195 sec/batch Epoch 4/10 Iteration: 16200 Avg. Training loss: 4.1029 0.1181 sec/batch Epoch 4/10 Iteration: 16300 Avg. Training loss: 4.0672 0.1169 sec/batch Epoch 4/10 Iteration: 16400 Avg. Training loss: 4.0945 0.1192 sec/batch Epoch 4/10 Iteration: 16500 Avg. Training loss: 4.0873 0.1164 sec/batch Epoch 4/10 Iteration: 16600 Avg. Training loss: 4.0546 0.1177 sec/batch Epoch 4/10 Iteration: 16700 Avg. Training loss: 4.0832 0.1165 sec/batch Epoch 4/10 Iteration: 16800 Avg. Training loss: 4.0565 0.1178 sec/batch Epoch 4/10 Iteration: 16900 Avg. Training loss: 4.1392 0.1172 sec/batch Epoch 4/10 Iteration: 17000 Avg. Training loss: 4.1019 0.1168 sec/batch Nearest to was: the, year, cilicia, bellarmine, ethelred, campesinos, of, for, Nearest to to: get, foiling, layperson, disregarding, expectancies, magistracies, permissible, dogged, Nearest to time: onerous, sextilis, netted, eastbourne, calculations, request, bech, ineffable, Nearest to war: against, commander, broke, roosevelt, detentions, surrender, nbi, command, Nearest to such: figuring, digests, byproduct, spheroidal, deistic, lemass, ere, embed, Nearest to as: exist, australasia, plowing, a, perpetuity, of, not, erewhon, Nearest to united: states, conscript, philadelphia, cortland, precipitate, consultative, kinnock, fada, Nearest to zero: two, five, four, three, nine, one, expectancy, years, Nearest to egypt: occupied, brandished, antigonus, standoff, gela, hittite, mordecai, pharaoh, Nearest to proposed: regulators, jauron, widening, lester, inari, herman, jackie, culminated, Nearest to engineering: laboratory, corse, processes, faculty, newbie, dismemberment, multidisciplinary, technology, Nearest to primarily: coimbra, mande, earnings, commercial, ops, cortez, lessons, os, Nearest to bill: mishandled, murrow, congress, agreements, staffed, serendip, ketterle, prog, Nearest to event: ancestors, insula, neurotoxicity, trainer, consonant, denied, celsius, refund, Nearest to consists: rectangular, affix, coconut, presto, cohesive, abrasax, sovereign, establishes, Nearest to rise: sills, coleridge, prevailing, nation, attitudes, berber, proterozoic, ghq, Epoch 4/10 Iteration: 17100 Avg. Training loss: 4.0724 0.1201 sec/batch Epoch 4/10 Iteration: 17200 Avg. Training loss: 4.0245 0.1178 sec/batch Epoch 4/10 Iteration: 17300 Avg. Training loss: 4.0594 0.1184 sec/batch Epoch 4/10 Iteration: 17400 Avg. Training loss: 4.1116 0.1186 sec/batch Epoch 4/10 Iteration: 17500 Avg. Training loss: 4.0678 0.1182 sec/batch Epoch 4/10 Iteration: 17600 Avg. Training loss: 4.1340 0.1171 sec/batch Epoch 4/10 Iteration: 17700 Avg. Training loss: 4.1397 0.1169 sec/batch Epoch 4/10 Iteration: 17800 Avg. Training loss: 4.0564 0.1173 sec/batch Epoch 4/10 Iteration: 17900 Avg. Training loss: 4.0497 0.1170 sec/batch Epoch 4/10 Iteration: 18000 Avg. Training loss: 4.0819 0.1168 sec/batch Nearest to was: year, the, bellarmine, cilicia, campesinos, pekah, ethelred, crookes, Nearest to to: get, expectancies, foiling, multitasking, layperson, burbank, cueball, geomagnetic, Nearest to time: onerous, netted, ineffable, eastbourne, calculations, sextilis, notary, request, Nearest to war: against, commander, broke, roosevelt, detentions, surrender, nbi, surrendering, Nearest to such: figuring, digests, deistic, ere, lemass, shoemakers, byproduct, embed, Nearest to as: exist, plowing, a, not, australasia, connection, of, perpetuity, Nearest to united: states, conscript, philadelphia, cortland, precipitate, kinnock, consultative, fada, Nearest to zero: five, two, three, four, one, nine, expectancy, years, Nearest to egypt: occupied, brandished, antigonus, standoff, gela, hittite, rockford, mordecai, Nearest to proposed: regulators, jauron, widening, lester, inari, herman, jackie, culminated, Nearest to engineering: laboratory, corse, processes, faculty, newbie, dismemberment, technology, multidisciplinary, Nearest to primarily: coimbra, mande, earnings, commercial, ops, chuvash, cortez, lessons, Nearest to bill: mishandled, murrow, congress, agreements, serendip, ketterle, staffed, perkins, Nearest to event: ancestors, insula, neurotoxicity, trainer, refund, denied, consonant, remo, Nearest to consists: rectangular, affix, presto, sovereign, cohesive, glade, abrasax, cytoplasmic, Nearest to rise: sills, prevailing, coleridge, attitudes, nation, proterozoic, greater, berber, Epoch 4/10 Iteration: 18100 Avg. Training loss: 4.0496 0.1194 sec/batch Epoch 4/10 Iteration: 18200 Avg. Training loss: 4.0455 0.1170 sec/batch Epoch 4/10 Iteration: 18300 Avg. Training loss: 4.0673 0.1157 sec/batch Epoch 4/10 Iteration: 18400 Avg. Training loss: 4.0325 0.1165 sec/batch Epoch 4/10 Iteration: 18500 Avg. Training loss: 4.1098 0.1179 sec/batch Epoch 5/10 Iteration: 18600 Avg. Training loss: 4.0776 0.0983 sec/batch Epoch 5/10 Iteration: 18700 Avg. Training loss: 4.0056 0.1171 sec/batch Epoch 5/10 Iteration: 18800 Avg. Training loss: 4.0067 0.1162 sec/batch Epoch 5/10 Iteration: 18900 Avg. Training loss: 4.0225 0.1168 sec/batch Epoch 5/10 Iteration: 19000 Avg. Training loss: 3.9750 0.1145 sec/batch Nearest to was: the, cilicia, ethelred, year, bellarmine, pekah, sih, of, Nearest to to: get, layperson, expectancies, multitasking, foiling, cueball, in, logarithm, Nearest to time: onerous, eastbourne, sextilis, netted, notary, calculations, ineffable, request, Nearest to war: against, broke, commander, roosevelt, surrender, sustaining, nbi, detentions, Nearest to such: digests, figuring, deistic, lemass, byproduct, ere, shoemakers, embed, Nearest to as: a, australasia, of, exist, the, not, plowing, connection, Nearest to united: states, conscript, philadelphia, precipitate, cortland, fada, banshees, kinnock, Nearest to zero: two, five, three, four, one, nine, six, seven, Nearest to egypt: antigonus, brandished, occupied, pharaoh, standoff, mordecai, hittite, gela, Nearest to proposed: regulators, jauron, widening, lester, culminated, inari, herman, jackie, Nearest to engineering: laboratory, corse, faculty, processes, technology, multidisciplinary, newbie, venter, Nearest to primarily: commercial, mande, earnings, coimbra, chuvash, os, cortez, lessons, Nearest to bill: mishandled, murrow, congress, serendip, agreements, ketterle, keefe, prog, Nearest to event: ancestors, insula, neurotoxicity, trainer, refund, consonant, denied, remo, Nearest to consists: rectangular, affix, cohesive, presto, cytoplasmic, glade, arrangement, abrasax, Nearest to rise: sills, prevailing, coleridge, berber, nation, higher, attitudes, rinzai, Epoch 5/10 Iteration: 19100 Avg. Training loss: 4.0097 0.1185 sec/batch Epoch 5/10 Iteration: 19200 Avg. Training loss: 3.9616 0.1173 sec/batch Epoch 5/10 Iteration: 19300 Avg. Training loss: 4.0613 0.1160 sec/batch Epoch 5/10 Iteration: 19400 Avg. Training loss: 4.0430 0.1180 sec/batch Epoch 5/10 Iteration: 19500 Avg. Training loss: 4.0155 0.1183 sec/batch Epoch 5/10 Iteration: 19600 Avg. Training loss: 3.9828 0.1174 sec/batch Epoch 5/10 Iteration: 19700 Avg. Training loss: 3.9346 0.1176 sec/batch Epoch 5/10 Iteration: 19800 Avg. Training loss: 3.9714 0.1211 sec/batch Epoch 5/10 Iteration: 19900 Avg. Training loss: 3.9468 0.1189 sec/batch Epoch 5/10 Iteration: 20000 Avg. Training loss: 4.0002 0.1173 sec/batch Nearest to was: the, cilicia, pekah, year, ethelred, bellarmine, vba, sih, Nearest to to: get, layperson, in, expectancies, multitasking, transfer, rowing, foiling, Nearest to time: onerous, sextilis, eastbourne, ineffable, netted, notary, calculations, garrison, Nearest to war: against, commander, roosevelt, broke, nbi, surrender, detentions, noncombatants, Nearest to such: digests, figuring, deistic, byproduct, embed, lemass, ere, shoemakers, Nearest to as: a, of, the, exist, australasia, not, homophonous, abstention, Nearest to united: states, conscript, philadelphia, precipitate, cortland, banshees, treatment, nam, Nearest to zero: two, five, three, four, nine, one, six, years, Nearest to egypt: antigonus, brandished, mordecai, standoff, pharaoh, occupied, gela, overnight, Nearest to proposed: regulators, jauron, widening, lester, inari, herman, culminated, jackie, Nearest to engineering: laboratory, corse, faculty, processes, technology, multidisciplinary, newbie, engineers, Nearest to primarily: commercial, mande, earnings, coimbra, chuvash, ops, os, nontraditional, Nearest to bill: mishandled, murrow, serendip, congress, ketterle, prog, agreements, keefe, Nearest to event: ancestors, insula, neurotoxicity, refund, consonant, trainer, radiocommunications, celsius, Nearest to consists: rectangular, affix, cytoplasmic, presto, arrangement, coconut, cohesive, mosfets, Nearest to rise: prevailing, sills, higher, rinzai, greater, proterozoic, coleridge, attitudes, Epoch 5/10 Iteration: 20100 Avg. Training loss: 4.0453 0.1189 sec/batch Epoch 5/10 Iteration: 20200 Avg. Training loss: 4.0069 0.1174 sec/batch Epoch 5/10 Iteration: 20300 Avg. Training loss: 4.0026 0.1197 sec/batch Epoch 5/10 Iteration: 20400 Avg. Training loss: 4.0318 0.1179 sec/batch Epoch 5/10 Iteration: 20500 Avg. Training loss: 4.0280 0.1179 sec/batch Epoch 5/10 Iteration: 20600 Avg. Training loss: 3.9776 0.1163 sec/batch Epoch 5/10 Iteration: 20700 Avg. Training loss: 4.0245 0.1177 sec/batch Epoch 5/10 Iteration: 20800 Avg. Training loss: 4.0055 0.1187 sec/batch Epoch 5/10 Iteration: 20900 Avg. Training loss: 4.0043 0.1178 sec/batch Epoch 5/10 Iteration: 21000 Avg. Training loss: 4.0362 0.1169 sec/batch Nearest to was: the, cilicia, year, pekah, ethelred, bellarmine, esm, sih, Nearest to to: get, foiling, transfer, layperson, in, burbank, logarithm, cueball, Nearest to time: onerous, eastbourne, sextilis, netted, ineffable, garrison, queuing, halfway, Nearest to war: nbi, against, roosevelt, commander, broke, detentions, allegri, hrh, Nearest to such: digests, figuring, deistic, embed, lemass, spheroidal, byproduct, ere, Nearest to as: a, of, for, not, exist, torts, grammatical, the, Nearest to united: states, philadelphia, conscript, precipitate, cortland, banshees, kinnock, chilliwack, Nearest to zero: two, five, four, three, one, nine, six, seven, Nearest to egypt: antigonus, brandished, standoff, pharaoh, occupied, mordecai, gela, nasser, Nearest to proposed: regulators, jauron, widening, lester, herman, culminated, inari, infallible, Nearest to engineering: laboratory, corse, faculty, multidisciplinary, newbie, processes, engineers, technology, Nearest to primarily: mande, coimbra, earnings, lessons, ops, commercial, chuvash, os, Nearest to bill: mishandled, murrow, congress, serendip, agreements, keefe, ketterle, prog, Nearest to event: ancestors, neurotoxicity, insula, consonant, trainer, celsius, refund, angular, Nearest to consists: rectangular, affix, cytoplasmic, presto, arrangement, cohesive, sovereign, glade, Nearest to rise: prevailing, sills, rinzai, coleridge, greater, higher, attitudes, proterozoic, Epoch 5/10 Iteration: 21100 Avg. Training loss: 4.0239 0.1170 sec/batch Epoch 5/10 Iteration: 21200 Avg. Training loss: 3.9909 0.1159 sec/batch Epoch 5/10 Iteration: 21300 Avg. Training loss: 3.9992 0.1174 sec/batch Epoch 5/10 Iteration: 21400 Avg. Training loss: 4.0627 0.1166 sec/batch Epoch 5/10 Iteration: 21500 Avg. Training loss: 4.0035 0.1171 sec/batch Epoch 5/10 Iteration: 21600 Avg. Training loss: 4.0044 0.1162 sec/batch Epoch 5/10 Iteration: 21700 Avg. Training loss: 3.9896 0.1186 sec/batch Epoch 5/10 Iteration: 21800 Avg. Training loss: 3.9821 0.1202 sec/batch Epoch 5/10 Iteration: 21900 Avg. Training loss: 3.9664 0.1179 sec/batch Epoch 5/10 Iteration: 22000 Avg. Training loss: 4.0129 0.1191 sec/batch Nearest to was: the, for, cilicia, year, of, least, pekah, luoyang, Nearest to to: in, the, get, transfer, layperson, agreement, logarithm, foiling, Nearest to time: onerous, eastbourne, netted, garrison, sextilis, ineffable, halfway, slowed, Nearest to war: against, roosevelt, nbi, broke, commander, surrender, detentions, ngos, Nearest to such: digests, figuring, deistic, byproduct, spheroidal, embed, jerrold, shoemakers, Nearest to as: a, for, of, the, not, grammatical, and, exist, Nearest to united: states, conscript, philadelphia, precipitate, cortland, kinnock, nam, consultative, Nearest to zero: two, five, four, three, one, six, nine, seven, Nearest to egypt: antigonus, brandished, occupied, pharaoh, standoff, nasser, mordecai, tsang, Nearest to proposed: regulators, jauron, lester, herman, widening, infallible, culminated, inari, Nearest to engineering: laboratory, corse, faculty, multidisciplinary, processes, technology, newbie, engineers, Nearest to primarily: mande, commercial, chuvash, coimbra, biologically, lessons, earnings, nontraditional, Nearest to bill: mishandled, murrow, congress, serendip, ketterle, agreements, hazardous, staffed, Nearest to event: ancestors, insula, neurotoxicity, ago, refund, trainer, consonant, radiocommunications, Nearest to consists: rectangular, affix, sovereign, bicameral, cohesive, presto, arrangement, cytoplasmic, Nearest to rise: prevailing, coleridge, greater, higher, rinzai, proterozoic, attitudes, sills, Epoch 5/10 Iteration: 22100 Avg. Training loss: 4.0055 0.1181 sec/batch Epoch 5/10 Iteration: 22200 Avg. Training loss: 4.1121 0.1170 sec/batch Epoch 5/10 Iteration: 22300 Avg. Training loss: 4.0340 0.1170 sec/batch Epoch 5/10 Iteration: 22400 Avg. Training loss: 4.0577 0.1174 sec/batch Epoch 5/10 Iteration: 22500 Avg. Training loss: 3.9483 0.1175 sec/batch Epoch 5/10 Iteration: 22600 Avg. Training loss: 3.9626 0.1170 sec/batch Epoch 5/10 Iteration: 22700 Avg. Training loss: 4.0493 0.1181 sec/batch Epoch 5/10 Iteration: 22800 Avg. Training loss: 3.9334 0.1180 sec/batch Epoch 5/10 Iteration: 22900 Avg. Training loss: 4.0074 0.1182 sec/batch Epoch 5/10 Iteration: 23000 Avg. Training loss: 3.9902 0.1164 sec/batch Nearest to was: the, year, for, cilicia, pekah, subsequent, after, oblige, Nearest to to: get, in, transfer, logarithm, multitasking, the, foiling, expectancies, Nearest to time: onerous, eastbourne, halfway, rooney, garrison, sextilis, netted, slowed, Nearest to war: roosevelt, against, broke, nbi, surrender, commander, ungrammatical, synonymous, Nearest to such: digests, figuring, deistic, byproduct, embed, jerrold, objectivist, lemass, Nearest to as: a, of, for, grammatical, exist, and, not, the, Nearest to united: states, philadelphia, conscript, precipitate, kinnock, cortland, banshees, treatment, Nearest to zero: two, five, three, one, four, nine, six, seven, Nearest to egypt: antigonus, standoff, brandished, occupied, pharaoh, nasser, overnight, tsang, Nearest to proposed: regulators, jauron, lester, herman, widening, culminated, infallible, nanobots, Nearest to engineering: laboratory, faculty, corse, multidisciplinary, technology, processes, newbie, foresight, Nearest to primarily: mande, coimbra, lessons, commercial, os, including, ops, counterpart, Nearest to bill: mishandled, murrow, serendip, congress, ketterle, keefe, agreements, hazardous, Nearest to event: ancestors, neurotoxicity, refund, insula, naked, remo, zayas, trainer, Nearest to consists: rectangular, affix, sovereign, cohesive, bicameral, cytoplasmic, arrangement, presto, Nearest to rise: prevailing, higher, coleridge, greater, rinzai, purchasing, nation, sills, Epoch 5/10 Iteration: 23100 Avg. Training loss: 3.9996 0.1186 sec/batch Epoch 6/10 Iteration: 23200 Avg. Training loss: 4.0396 0.0642 sec/batch Epoch 6/10 Iteration: 23300 Avg. Training loss: 3.9626 0.1169 sec/batch Epoch 6/10 Iteration: 23400 Avg. Training loss: 3.9753 0.1169 sec/batch Epoch 6/10 Iteration: 23500 Avg. Training loss: 3.9672 0.1161 sec/batch Epoch 6/10 Iteration: 23600 Avg. Training loss: 3.9664 0.1170 sec/batch Epoch 6/10 Iteration: 23700 Avg. Training loss: 3.9714 0.1162 sec/batch Epoch 6/10 Iteration: 23800 Avg. Training loss: 3.9751 0.1165 sec/batch Epoch 6/10 Iteration: 23900 Avg. Training loss: 3.9236 0.1177 sec/batch Epoch 6/10 Iteration: 24000 Avg. Training loss: 3.9788 0.1176 sec/batch Nearest to was: the, cilicia, for, ethelred, pekah, after, year, of, Nearest to to: the, in, get, transfer, layperson, logarithm, disregarding, foiling, Nearest to time: onerous, halfway, sextilis, eastbourne, ineffable, slowed, garrison, record, Nearest to war: against, roosevelt, nbi, broke, chester, kills, surrender, sustaining, Nearest to such: digests, figuring, deistic, jerrold, byproduct, lemass, embed, titles, Nearest to as: of, a, the, for, and, with, not, ttl, Nearest to united: states, philadelphia, precipitate, conscript, treatment, banshees, kinnock, helms, Nearest to zero: two, five, three, four, one, nine, six, seven, Nearest to egypt: antigonus, brandished, standoff, pharaoh, occupied, uprooted, mordecai, nasser, Nearest to proposed: regulators, jauron, lester, herman, widening, infallible, jackie, nanobots, Nearest to engineering: laboratory, corse, multidisciplinary, faculty, processes, newbie, venter, foresight, Nearest to primarily: mande, commercial, lessons, coimbra, strong, including, lydon, biologically, Nearest to bill: mishandled, serendip, hazardous, prog, murrow, congress, carmack, keefe, Nearest to event: ancestors, neurotoxicity, refund, insula, radiocommunications, trainer, remo, naked, Nearest to consists: rectangular, affix, cytoplasmic, cohesive, bicameral, sovereign, coconut, presto, Nearest to rise: prevailing, rinzai, higher, coleridge, greater, sills, proterozoic, economists, Epoch 6/10 Iteration: 24100 Avg. Training loss: 3.9683 0.1189 sec/batch Epoch 6/10 Iteration: 24200 Avg. Training loss: 3.9912 0.1186 sec/batch Epoch 6/10 Iteration: 24300 Avg. Training loss: 3.8522 0.1186 sec/batch Epoch 6/10 Iteration: 24400 Avg. Training loss: 3.9438 0.1199 sec/batch Epoch 6/10 Iteration: 24500 Avg. Training loss: 3.9277 0.1188 sec/batch Epoch 6/10 Iteration: 24600 Avg. Training loss: 3.9065 0.1178 sec/batch Epoch 6/10 Iteration: 24700 Avg. Training loss: 3.9649 0.1181 sec/batch Epoch 6/10 Iteration: 24800 Avg. Training loss: 3.9658 0.1200 sec/batch Epoch 6/10 Iteration: 24900 Avg. Training loss: 3.9550 0.1192 sec/batch Epoch 6/10 Iteration: 25000 Avg. Training loss: 3.9453 0.1177 sec/batch Nearest to was: the, cilicia, year, for, ethelred, pekah, least, oblige, Nearest to to: get, in, the, layperson, transfer, logarithm, foiling, multitasking, Nearest to time: onerous, halfway, eastbourne, sextilis, queuing, rooney, record, ineffable, Nearest to war: against, roosevelt, nbi, synonymous, broke, commander, ungrammatical, chester, Nearest to such: digests, figuring, deistic, byproduct, or, spheroidal, embed, both, Nearest to as: of, a, for, and, the, with, which, not, Nearest to united: states, precipitate, conscript, philadelphia, kinnock, treatment, gluconeogenesis, registries, Nearest to zero: two, five, three, four, one, nine, six, seven, Nearest to egypt: antigonus, standoff, brandished, uprooted, occupied, mordecai, nasser, pharaoh, Nearest to proposed: regulators, jauron, lester, herman, widening, infallible, nanobots, ffo, Nearest to engineering: laboratory, corse, faculty, multidisciplinary, processes, technology, newbie, venter, Nearest to primarily: mande, biologically, including, commercial, strong, coimbra, lydon, lessons, Nearest to bill: mishandled, keefe, serendip, prog, murrow, congress, hazardous, carmack, Nearest to event: ancestors, neurotoxicity, naked, refund, radiocommunications, insula, ago, participate, Nearest to consists: rectangular, cytoplasmic, affix, arrangement, cohesive, sovereign, presto, coconut, Nearest to rise: prevailing, rinzai, higher, greater, coleridge, sills, proterozoic, uncollected, Epoch 6/10 Iteration: 25100 Avg. Training loss: 4.0177 0.1203 sec/batch Epoch 6/10 Iteration: 25200 Avg. Training loss: 3.9584 0.1177 sec/batch Epoch 6/10 Iteration: 25300 Avg. Training loss: 3.9235 0.1176 sec/batch Epoch 6/10 Iteration: 25400 Avg. Training loss: 3.9640 0.1171 sec/batch Epoch 6/10 Iteration: 25500 Avg. Training loss: 3.9622 0.1177 sec/batch Epoch 6/10 Iteration: 25600 Avg. Training loss: 3.9649 0.1174 sec/batch Epoch 6/10 Iteration: 25700 Avg. Training loss: 3.9642 0.1181 sec/batch Epoch 6/10 Iteration: 25800 Avg. Training loss: 3.9182 0.1170 sec/batch Epoch 6/10 Iteration: 25900 Avg. Training loss: 3.9532 0.1169 sec/batch Epoch 6/10 Iteration: 26000 Avg. Training loss: 4.0248 0.1154 sec/batch Nearest to was: the, year, cilicia, for, subsequent, oblige, of, pekah, Nearest to to: get, the, in, foiling, layperson, transfer, jettisoned, logarithm, Nearest to time: onerous, eastbourne, halfway, garrison, rooney, sextilis, netted, bech, Nearest to war: roosevelt, against, nbi, synonymous, broke, chester, ngos, ungrammatical, Nearest to such: digests, figuring, deistic, spheroidal, byproduct, ere, carisbrooke, embed, Nearest to as: of, a, for, the, exist, and, not, with, Nearest to united: states, precipitate, conscript, philadelphia, kinnock, gluconeogenesis, chilliwack, helms, Nearest to zero: two, five, one, three, four, nine, seven, six, Nearest to egypt: antigonus, standoff, uprooted, brandished, mordecai, nasser, pharaoh, occupied, Nearest to proposed: herman, regulators, jauron, lester, infallible, inari, nanobots, widening, Nearest to engineering: laboratory, corse, faculty, multidisciplinary, technology, processes, engineers, newbie, Nearest to primarily: mande, coimbra, commercial, biologically, strong, lydon, os, including, Nearest to bill: mishandled, keefe, hazardous, murrow, serendip, prog, agreements, congress, Nearest to event: ancestors, neurotoxicity, insula, naked, refund, ago, triassic, radiocommunications, Nearest to consists: rectangular, cytoplasmic, affix, cohesive, arrangement, coconut, jagged, presto, Nearest to rise: rinzai, prevailing, higher, greater, coleridge, uncollected, sills, proterozoic, Epoch 6/10 Iteration: 26100 Avg. Training loss: 3.9957 0.1184 sec/batch Epoch 6/10 Iteration: 26200 Avg. Training loss: 3.9401 0.1169 sec/batch Epoch 6/10 Iteration: 26300 Avg. Training loss: 4.0158 0.1180 sec/batch Epoch 6/10 Iteration: 26400 Avg. Training loss: 3.9473 0.1190 sec/batch Epoch 6/10 Iteration: 26500 Avg. Training loss: 3.9422 0.1175 sec/batch Epoch 6/10 Iteration: 26600 Avg. Training loss: 3.9897 0.1188 sec/batch Epoch 6/10 Iteration: 26700 Avg. Training loss: 3.9083 0.1171 sec/batch Epoch 6/10 Iteration: 26800 Avg. Training loss: 4.0310 0.1179 sec/batch Epoch 6/10 Iteration: 26900 Avg. Training loss: 4.0152 0.1169 sec/batch Epoch 6/10 Iteration: 27000 Avg. Training loss: 4.0282 0.1169 sec/batch Nearest to was: the, of, cilicia, after, year, for, in, pekah, Nearest to to: in, the, get, transfer, disregarding, agreement, that, of, Nearest to time: onerous, halfway, eastbourne, garrison, rooney, netted, record, maputo, Nearest to war: roosevelt, against, synonymous, chester, nbi, surrender, commander, ghb, Nearest to such: digests, figuring, deistic, jerrold, titles, carisbrooke, plc, ere, Nearest to as: of, a, the, for, and, not, exist, which, Nearest to united: states, precipitate, conscript, philadelphia, kinnock, gluconeogenesis, helms, authorising, Nearest to zero: two, five, one, three, four, nine, six, seven, Nearest to egypt: antigonus, standoff, brandished, uprooted, nasser, pharaoh, occupied, tsang, Nearest to proposed: herman, regulators, jauron, lester, infallible, jackie, ffo, nanobots, Nearest to engineering: laboratory, corse, technology, engineers, faculty, multidisciplinary, processes, venter, Nearest to primarily: mande, strong, coimbra, biologically, including, valuing, commercial, os, Nearest to bill: mishandled, murrow, congress, hazardous, keefe, serendip, prog, agreements, Nearest to event: ancestors, neurotoxicity, insula, refund, naked, rsv, remo, patagonia, Nearest to consists: rectangular, cytoplasmic, affix, cohesive, coconut, jagged, bicameral, presto, Nearest to rise: rinzai, coleridge, prevailing, greater, higher, uncollected, kinsella, proterozoic, Epoch 6/10 Iteration: 27100 Avg. Training loss: 3.9549 0.1185 sec/batch Epoch 6/10 Iteration: 27200 Avg. Training loss: 3.9244 0.1165 sec/batch Epoch 6/10 Iteration: 27300 Avg. Training loss: 3.9512 0.1168 sec/batch Epoch 6/10 Iteration: 27400 Avg. Training loss: 3.8605 0.1186 sec/batch Epoch 6/10 Iteration: 27500 Avg. Training loss: 3.9795 0.1184 sec/batch Epoch 6/10 Iteration: 27600 Avg. Training loss: 3.9717 0.1177 sec/batch Epoch 6/10 Iteration: 27700 Avg. Training loss: 3.9461 0.1188 sec/batch Epoch 7/10 Iteration: 27800 Avg. Training loss: 4.0112 0.0314 sec/batch Epoch 7/10 Iteration: 27900 Avg. Training loss: 3.9504 0.1172 sec/batch Epoch 7/10 Iteration: 28000 Avg. Training loss: 3.8960 0.1159 sec/batch Nearest to was: the, after, for, of, subsequent, cilicia, pekah, year, Nearest to to: in, the, get, transfer, logarithm, disregarding, that, expectancies, Nearest to time: onerous, halfway, eastbourne, sextilis, garrison, urgent, rooney, netted, Nearest to war: roosevelt, against, synonymous, nbi, chester, kills, ungrammatical, surrender, Nearest to such: digests, deistic, figuring, both, ere, titles, objectivist, digraph, Nearest to as: of, a, the, and, for, exist, which, not, Nearest to united: states, precipitate, conscript, philadelphia, gluconeogenesis, kinnock, presidencies, banshees, Nearest to zero: two, five, one, three, four, nine, six, seven, Nearest to egypt: antigonus, standoff, uprooted, pharaoh, brandished, nasser, sanusi, millennia, Nearest to proposed: regulators, herman, jauron, lester, infallible, culminated, nanobots, ffo, Nearest to engineering: laboratory, corse, multidisciplinary, faculty, processes, engineers, technology, venter, Nearest to primarily: mande, strong, commercial, including, coimbra, biologically, valuing, nontraditional, Nearest to bill: mishandled, hazardous, serendip, keefe, prog, murrow, carmack, staffed, Nearest to event: ancestors, neurotoxicity, refund, remo, insula, trainer, naked, vibrant, Nearest to consists: rectangular, affix, cytoplasmic, cohesive, jagged, arrangement, coconut, presto, Nearest to rise: prevailing, rinzai, higher, greater, coleridge, uncollected, purchasing, catering, Epoch 7/10 Iteration: 28100 Avg. Training loss: 3.9218 0.1172 sec/batch Epoch 7/10 Iteration: 28200 Avg. Training loss: 3.9326 0.1170 sec/batch Epoch 7/10 Iteration: 28300 Avg. Training loss: 3.8868 0.1168 sec/batch Epoch 7/10 Iteration: 28400 Avg. Training loss: 3.9291 0.1165 sec/batch Epoch 7/10 Iteration: 28500 Avg. Training loss: 3.8725 0.1161 sec/batch Epoch 7/10 Iteration: 28600 Avg. Training loss: 3.9508 0.1172 sec/batch Epoch 7/10 Iteration: 28700 Avg. Training loss: 3.9284 0.1173 sec/batch Epoch 7/10 Iteration: 28800 Avg. Training loss: 3.9837 0.1186 sec/batch Epoch 7/10 Iteration: 28900 Avg. Training loss: 3.8436 0.1174 sec/batch Epoch 7/10 Iteration: 29000 Avg. Training loss: 3.8872 0.1200 sec/batch Nearest to was: the, after, for, cilicia, pekah, in, of, year, Nearest to to: the, in, of, and, transfer, as, that, into, Nearest to time: onerous, halfway, sextilis, limewire, garrison, record, eastbourne, messagepad, Nearest to war: against, roosevelt, synonymous, chester, nbi, surrender, commander, potsdam, Nearest to such: digests, figuring, byproduct, deistic, titles, ere, some, embed, Nearest to as: of, and, a, the, for, which, with, in, Nearest to united: states, conscript, precipitate, authorising, baghdad, gluconeogenesis, helms, kinnock, Nearest to zero: two, five, one, three, four, nine, six, seven, Nearest to egypt: antigonus, uprooted, brandished, pharaoh, standoff, nasser, mordecai, sanusi, Nearest to proposed: herman, regulators, jauron, lester, infallible, ffo, culminated, jackie, Nearest to engineering: laboratory, engineers, corse, multidisciplinary, processes, faculty, technology, uncontrolled, Nearest to primarily: including, strong, mande, biologically, nontraditional, their, environments, transporting, Nearest to bill: mishandled, carmack, hazardous, serendip, keefe, prog, murrow, rescind, Nearest to event: ancestors, neurotoxicity, refund, naked, audible, radiocommunications, patagonia, rsv, Nearest to consists: rectangular, cytoplasmic, cohesive, affix, coconut, jagged, vogel, arrangement, Nearest to rise: higher, rinzai, prevailing, greater, coleridge, catering, uncollected, purchasing, Epoch 7/10 Iteration: 29100 Avg. Training loss: 3.8958 0.1194 sec/batch Epoch 7/10 Iteration: 29200 Avg. Training loss: 3.8440 0.1198 sec/batch Epoch 7/10 Iteration: 29300 Avg. Training loss: 3.9190 0.1188 sec/batch Epoch 7/10 Iteration: 29400 Avg. Training loss: 3.9426 0.1174 sec/batch Epoch 7/10 Iteration: 29500 Avg. Training loss: 3.9111 0.1182 sec/batch Epoch 7/10 Iteration: 29600 Avg. Training loss: 3.9419 0.1177 sec/batch Epoch 7/10 Iteration: 29700 Avg. Training loss: 3.9805 0.1175 sec/batch Epoch 7/10 Iteration: 29800 Avg. Training loss: 3.9420 0.1160 sec/batch Epoch 7/10 Iteration: 29900 Avg. Training loss: 3.8852 0.1174 sec/batch Epoch 7/10 Iteration: 30000 Avg. Training loss: 3.9379 0.1168 sec/batch Nearest to was: the, for, after, cilicia, pekah, year, of, in, Nearest to to: in, the, get, transfer, that, through, of, into, Nearest to time: onerous, halfway, garrison, eastbourne, messagepad, rooney, record, limewire, Nearest to war: against, roosevelt, synonymous, ungrammatical, nbi, chester, commander, potsdam, Nearest to such: digests, figuring, some, deistic, or, both, byproduct, spheroidal, Nearest to as: of, for, a, and, the, which, with, not, Nearest to united: states, precipitate, conscript, gluconeogenesis, registries, baghdad, authorising, philadelphia, Nearest to zero: two, five, one, three, four, nine, seven, six, Nearest to egypt: antigonus, standoff, uprooted, nasser, sanusi, brandished, pharaoh, tunisia, Nearest to proposed: herman, regulators, lester, ffo, infallible, jauron, nanobots, culminated, Nearest to engineering: laboratory, engineers, corse, faculty, electronics, technology, multidisciplinary, processes, Nearest to primarily: including, biologically, strong, mande, coimbra, counterpart, environments, cortez, Nearest to bill: mishandled, hazardous, keefe, carmack, serendip, murrow, congress, rescind, Nearest to event: ancestors, triassic, neurotoxicity, naked, refund, celsius, rsv, zayas, Nearest to consists: rectangular, cytoplasmic, affix, cohesive, arrangement, jagged, presto, coconut, Nearest to rise: rinzai, greater, higher, prevailing, coleridge, uncollected, purchasing, catering, Epoch 7/10 Iteration: 30100 Avg. Training loss: 3.9194 0.1194 sec/batch Epoch 7/10 Iteration: 30200 Avg. Training loss: 3.9707 0.1168 sec/batch Epoch 7/10 Iteration: 30300 Avg. Training loss: 3.9162 0.1181 sec/batch Epoch 7/10 Iteration: 30400 Avg. Training loss: 3.9377 0.1172 sec/batch Epoch 7/10 Iteration: 30500 Avg. Training loss: 3.9112 0.1191 sec/batch Epoch 7/10 Iteration: 30600 Avg. Training loss: 3.9143 0.1179 sec/batch Epoch 7/10 Iteration: 30700 Avg. Training loss: 3.9278 0.1168 sec/batch Epoch 7/10 Iteration: 30800 Avg. Training loss: 3.9462 0.1166 sec/batch Epoch 7/10 Iteration: 30900 Avg. Training loss: 3.9339 0.1169 sec/batch Epoch 7/10 Iteration: 31000 Avg. Training loss: 3.8989 0.1181 sec/batch Nearest to was: the, for, after, cilicia, of, in, year, pekah, Nearest to to: in, the, of, into, get, that, through, and, Nearest to time: onerous, halfway, eastbourne, messagepad, urgent, garrison, maputo, bech, Nearest to war: against, roosevelt, ungrammatical, nbi, potsdam, synonymous, chester, surrender, Nearest to such: digests, deistic, byproduct, both, spheroidal, titles, fragrant, some, Nearest to as: of, for, and, a, the, which, have, exist, Nearest to united: states, precipitate, conscript, gluconeogenesis, philadelphia, authorising, registries, baghdad, Nearest to zero: two, five, one, three, four, nine, six, seven, Nearest to egypt: antigonus, standoff, pharaoh, uprooted, nasser, brandished, sanusi, tunisia, Nearest to proposed: herman, lester, regulators, ffo, infallible, lollardy, culminated, jauron, Nearest to engineering: laboratory, corse, engineers, faculty, multidisciplinary, uncontrolled, electronics, technology, Nearest to primarily: strong, biologically, mande, including, their, commercial, lydon, valuing, Nearest to bill: mishandled, rescind, hazardous, bills, congress, patanjali, carmack, soi, Nearest to event: ancestors, triassic, neurotoxicity, radiocarbon, naked, refund, ago, celsius, Nearest to consists: rectangular, cohesive, cytoplasmic, affix, coconut, jagged, arrangement, fgc, Nearest to rise: rinzai, higher, greater, prevailing, coleridge, catering, uncollected, purchasing, Epoch 7/10 Iteration: 31100 Avg. Training loss: 3.8892 0.1201 sec/batch Epoch 7/10 Iteration: 31200 Avg. Training loss: 3.8970 0.1188 sec/batch Epoch 7/10 Iteration: 31300 Avg. Training loss: 3.9459 0.1175 sec/batch Epoch 7/10 Iteration: 31400 Avg. Training loss: 3.9646 0.1182 sec/batch Epoch 7/10 Iteration: 31500 Avg. Training loss: 3.9948 0.1181 sec/batch Epoch 7/10 Iteration: 31600 Avg. Training loss: 3.9762 0.1181 sec/batch Epoch 7/10 Iteration: 31700 Avg. Training loss: 3.9167 0.1173 sec/batch Epoch 7/10 Iteration: 31800 Avg. Training loss: 3.9009 0.1168 sec/batch Epoch 7/10 Iteration: 31900 Avg. Training loss: 3.9212 0.1160 sec/batch Epoch 7/10 Iteration: 32000 Avg. Training loss: 3.8552 0.1183 sec/batch Nearest to was: the, after, for, in, year, subsequent, of, pekah, Nearest to to: in, the, transfer, agreement, and, that, s, get, Nearest to time: onerous, halfway, netted, rooney, maputo, eastbourne, urgent, garrison, Nearest to war: against, roosevelt, synonymous, ungrammatical, nbi, chester, kills, surrender, Nearest to such: digests, deistic, some, both, figuring, byproduct, objectivist, other, Nearest to as: for, of, and, a, which, the, with, have, Nearest to united: states, conscript, precipitate, gluconeogenesis, stephan, registries, eyre, philadelphia, Nearest to zero: two, five, one, four, three, nine, seven, six, Nearest to egypt: antigonus, standoff, uprooted, nasser, brandished, sanusi, pharaoh, tunisia, Nearest to proposed: herman, regulators, lester, ffo, infallible, culminated, jauron, nanobots, Nearest to engineering: laboratory, corse, faculty, engineers, technology, uncontrolled, multidisciplinary, electronics, Nearest to primarily: strong, biologically, including, mande, commercial, valuing, nontraditional, transporting, Nearest to bill: mishandled, hazardous, rescind, congress, serendip, carmack, bills, severly, Nearest to event: ancestors, refund, naked, zayas, patagonia, triassic, lambeau, amiens, Nearest to consists: rectangular, affix, cytoplasmic, coconut, cohesive, fgc, arrangement, bicameral, Nearest to rise: rinzai, greater, higher, prevailing, coleridge, catering, purchasing, uncollected, Epoch 7/10 Iteration: 32100 Avg. Training loss: 3.9309 0.1196 sec/batch Epoch 7/10 Iteration: 32200 Avg. Training loss: 3.9181 0.1176 sec/batch Epoch 7/10 Iteration: 32300 Avg. Training loss: 3.9165 0.1179 sec/batch Epoch 7/10 Iteration: 32400 Avg. Training loss: 3.9430 0.1180 sec/batch Epoch 8/10 Iteration: 32500 Avg. Training loss: 3.9184 0.1138 sec/batch Epoch 8/10 Iteration: 32600 Avg. Training loss: 3.9040 0.1188 sec/batch Epoch 8/10 Iteration: 32700 Avg. Training loss: 3.8861 0.1165 sec/batch Epoch 8/10 Iteration: 32800 Avg. Training loss: 3.9223 0.1167 sec/batch Epoch 8/10 Iteration: 32900 Avg. Training loss: 3.8731 0.1158 sec/batch Epoch 8/10 Iteration: 33000 Avg. Training loss: 3.8920 0.1181 sec/batch Nearest to was: the, for, after, in, had, year, until, cilicia, Nearest to to: in, the, of, and, that, was, a, s, Nearest to time: onerous, halfway, urgent, sextilis, netted, limewire, rooney, was, Nearest to war: against, roosevelt, kills, synonymous, ungrammatical, nbi, chester, potsdam, Nearest to such: digests, deistic, some, figuring, byproduct, both, even, titles, Nearest to as: of, for, a, and, the, in, which, with, Nearest to united: states, conscript, precipitate, chessgames, gluconeogenesis, authorising, philadelphia, treatment, Nearest to zero: two, five, one, four, three, nine, seven, six, Nearest to egypt: antigonus, standoff, pharaoh, uprooted, sanusi, tunisia, nasser, brandished, Nearest to proposed: herman, lester, regulators, culminated, ffo, infallible, nanobots, jauron, Nearest to engineering: laboratory, corse, engineers, uncontrolled, electronics, technology, multidisciplinary, faculty, Nearest to primarily: strong, including, biologically, mande, lydon, transporting, coimbra, valuing, Nearest to bill: mishandled, hazardous, congress, serendip, rescind, soi, bills, carmack, Nearest to event: ancestors, refund, amiens, naked, neurotoxicity, time, zayas, patagonia, Nearest to consists: rectangular, cytoplasmic, cohesive, affix, fgc, coconut, jagged, presto, Nearest to rise: rinzai, greater, higher, prevailing, coleridge, catering, almohad, uncollected, Epoch 8/10 Iteration: 33100 Avg. Training loss: 3.8382 0.1189 sec/batch Epoch 8/10 Iteration: 33200 Avg. Training loss: 3.8952 0.1161 sec/batch Epoch 8/10 Iteration: 33300 Avg. Training loss: 3.9144 0.1171 sec/batch Epoch 8/10 Iteration: 33400 Avg. Training loss: 3.9364 0.1178 sec/batch Epoch 8/10 Iteration: 33500 Avg. Training loss: 3.8981 0.1188 sec/batch Epoch 8/10 Iteration: 33600 Avg. Training loss: 3.8479 0.1188 sec/batch Epoch 8/10 Iteration: 33700 Avg. Training loss: 3.8658 0.1173 sec/batch Epoch 8/10 Iteration: 33800 Avg. Training loss: 3.8104 0.1185 sec/batch Epoch 8/10 Iteration: 33900 Avg. Training loss: 3.8965 0.1174 sec/batch Epoch 8/10 Iteration: 34000 Avg. Training loss: 3.9122 0.1179 sec/batch Nearest to was: the, after, for, year, pekah, in, cilicia, had, Nearest to to: in, the, of, and, that, as, through, a, Nearest to time: onerous, limewire, was, halfway, netted, sextilis, at, messagepad, Nearest to war: against, synonymous, roosevelt, ungrammatical, kills, nbi, chester, potsdam, Nearest to such: digests, some, of, deistic, other, as, include, which, Nearest to as: of, and, for, a, the, which, with, in, Nearest to united: states, conscript, precipitate, gluconeogenesis, registries, authorising, chessgames, eyre, Nearest to zero: two, five, one, four, three, nine, seven, six, Nearest to egypt: antigonus, standoff, uprooted, pharaoh, sanusi, tunisia, nasser, overnight, Nearest to proposed: herman, lester, ffo, regulators, infallible, jauron, culminated, nanobots, Nearest to engineering: laboratory, engineers, corse, technology, faculty, uncontrolled, electronics, disciplines, Nearest to primarily: strong, including, biologically, mande, transporting, commercial, lydon, environments, Nearest to bill: mishandled, bills, serendip, carmack, forbes, congress, hazardous, qwest, Nearest to event: ancestors, refund, naked, neurotoxicity, amiens, radiocommunications, rsv, time, Nearest to consists: rectangular, cytoplasmic, cohesive, affix, coconut, fgc, arrangement, bicameral, Nearest to rise: rinzai, greater, higher, catering, coleridge, prevailing, uncollected, dwelling, Epoch 8/10 Iteration: 34100 Avg. Training loss: 3.8809 0.1203 sec/batch Epoch 8/10 Iteration: 34200 Avg. Training loss: 3.9167 0.1184 sec/batch Epoch 8/10 Iteration: 34300 Avg. Training loss: 3.9102 0.1174 sec/batch Epoch 8/10 Iteration: 34400 Avg. Training loss: 3.9428 0.1165 sec/batch Epoch 8/10 Iteration: 34500 Avg. Training loss: 3.8667 0.1181 sec/batch Epoch 8/10 Iteration: 34600 Avg. Training loss: 3.8983 0.1184 sec/batch Epoch 8/10 Iteration: 34700 Avg. Training loss: 3.8869 0.1174 sec/batch Epoch 8/10 Iteration: 34800 Avg. Training loss: 3.9332 0.1175 sec/batch Epoch 8/10 Iteration: 34900 Avg. Training loss: 3.8782 0.1172 sec/batch Epoch 8/10 Iteration: 35000 Avg. Training loss: 3.8998 0.1181 sec/batch Nearest to was: the, for, after, of, year, in, had, early, Nearest to to: the, in, of, but, a, through, transfer, and, Nearest to time: onerous, netted, limewire, was, halfway, messagepad, eastbourne, event, Nearest to war: against, roosevelt, synonymous, ungrammatical, nbi, kills, potsdam, afesd, Nearest to such: digests, some, deistic, byproduct, other, both, as, include, Nearest to as: of, and, for, a, which, the, not, have, Nearest to united: states, conscript, precipitate, gluconeogenesis, authorising, registries, chessgames, eyre, Nearest to zero: two, five, one, three, four, nine, seven, six, Nearest to egypt: standoff, uprooted, antigonus, pharaoh, tunisia, sanusi, nasser, egyptian, Nearest to proposed: herman, lester, ffo, infallible, culminated, jauron, regulators, nanobots, Nearest to engineering: laboratory, engineers, corse, technology, uncontrolled, electronics, newbie, faculty, Nearest to primarily: including, strong, mande, biologically, lydon, counterpart, commercial, coimbra, Nearest to bill: mishandled, congress, bills, forbes, hazardous, rescind, leviticus, carmack, Nearest to event: ancestors, triassic, time, naked, refund, ago, torii, rsv, Nearest to consists: rectangular, cohesive, cytoplasmic, affix, fgc, presto, cast, paschal, Nearest to rise: greater, rinzai, higher, catering, coleridge, uncollected, prevailing, grueling, Epoch 8/10 Iteration: 35100 Avg. Training loss: 3.8935 0.1191 sec/batch Epoch 8/10 Iteration: 35200 Avg. Training loss: 3.8613 0.1178 sec/batch Epoch 8/10 Iteration: 35300 Avg. Training loss: 3.9257 0.1159 sec/batch Epoch 8/10 Iteration: 35400 Avg. Training loss: 3.9090 0.1169 sec/batch Epoch 8/10 Iteration: 35500 Avg. Training loss: 3.8925 0.1170 sec/batch Epoch 8/10 Iteration: 35600 Avg. Training loss: 3.8971 0.1193 sec/batch Epoch 8/10 Iteration: 35700 Avg. Training loss: 3.8834 0.1173 sec/batch Epoch 8/10 Iteration: 35800 Avg. Training loss: 3.8724 0.1182 sec/batch Epoch 8/10 Iteration: 35900 Avg. Training loss: 3.9632 0.1180 sec/batch Epoch 8/10 Iteration: 36000 Avg. Training loss: 3.8648 0.1177 sec/batch Nearest to was: the, after, for, in, year, early, had, of, Nearest to to: the, in, of, and, a, that, through, get, Nearest to time: onerous, halfway, at, netted, was, eastbourne, to, bech, Nearest to war: roosevelt, against, synonymous, kills, chester, ungrammatical, nbi, battle, Nearest to such: digests, some, deistic, byproduct, other, include, krug, spheroidal, Nearest to as: of, for, and, the, a, in, with, which, Nearest to united: states, precipitate, conscript, philadelphia, eyre, authorising, duos, gluconeogenesis, Nearest to zero: two, five, one, three, four, nine, seven, six, Nearest to egypt: standoff, uprooted, antigonus, pharaoh, nasser, sanusi, tunisia, kacl, Nearest to proposed: herman, lester, ffo, infallible, culminated, jauron, chained, jackie, Nearest to engineering: laboratory, engineers, technology, uncontrolled, corse, multidisciplinary, electronics, newbie, Nearest to primarily: strong, including, biologically, mande, transporting, lydon, commercial, nontraditional, Nearest to bill: mishandled, congress, bills, rescind, sedition, severly, hazardous, carmack, Nearest to event: ancestors, time, torii, triassic, rsv, naked, radiocarbon, amiens, Nearest to consists: rectangular, bicameral, cohesive, cytoplasmic, affix, cast, fgc, chamber, Nearest to rise: rinzai, greater, higher, prevailing, coleridge, catering, uncollected, gurmukhi, Epoch 8/10 Iteration: 36100 Avg. Training loss: 4.0018 0.1190 sec/batch Epoch 8/10 Iteration: 36200 Avg. Training loss: 3.9467 0.1168 sec/batch Epoch 8/10 Iteration: 36300 Avg. Training loss: 3.9355 0.1168 sec/batch Epoch 8/10 Iteration: 36400 Avg. Training loss: 3.8653 0.1180 sec/batch Epoch 8/10 Iteration: 36500 Avg. Training loss: 3.8525 0.1178 sec/batch Epoch 8/10 Iteration: 36600 Avg. Training loss: 3.9015 0.1166 sec/batch Epoch 8/10 Iteration: 36700 Avg. Training loss: 3.8680 0.1177 sec/batch Epoch 8/10 Iteration: 36800 Avg. Training loss: 3.9182 0.1183 sec/batch Epoch 8/10 Iteration: 36900 Avg. Training loss: 3.8699 0.1174 sec/batch Epoch 8/10 Iteration: 37000 Avg. Training loss: 3.8944 0.1172 sec/batch Nearest to was: the, for, after, in, had, it, until, year, Nearest to to: in, the, of, and, that, a, transfer, as, Nearest to time: onerous, halfway, netted, at, was, maputo, eastbourne, limewire, Nearest to war: against, ungrammatical, synonymous, roosevelt, kills, nbi, afesd, ngos, Nearest to such: some, digests, deistic, other, include, embed, byproduct, as, Nearest to as: of, and, for, a, in, the, which, with, Nearest to united: states, precipitate, eyre, conscript, chessgames, philadelphia, chilliwack, africaine, Nearest to zero: two, five, one, four, three, nine, six, seven, Nearest to egypt: standoff, uprooted, antigonus, nasser, kacl, tunisia, sanusi, pharaoh, Nearest to proposed: lester, herman, ffo, infallible, culminated, chained, jauron, nanobots, Nearest to engineering: laboratory, uncontrolled, technology, engineers, corse, electronics, multidisciplinary, foresight, Nearest to primarily: strong, including, biologically, lydon, transporting, mande, coimbra, ops, Nearest to bill: mishandled, congress, bills, hazardous, severly, rescind, carmack, leviticus, Nearest to event: ancestors, amiens, time, lambeau, triassic, torii, rsv, patagonia, Nearest to consists: rectangular, affix, cohesive, cytoplasmic, bicameral, fgc, cast, counting, Nearest to rise: greater, rinzai, higher, prevailing, catering, coleridge, uncollected, kinsella, Epoch 9/10 Iteration: 37100 Avg. Training loss: 3.9159 0.0812 sec/batch Epoch 9/10 Iteration: 37200 Avg. Training loss: 3.9076 0.1177 sec/batch Epoch 9/10 Iteration: 37300 Avg. Training loss: 3.8428 0.1165 sec/batch Epoch 9/10 Iteration: 37400 Avg. Training loss: 3.8959 0.1159 sec/batch Epoch 9/10 Iteration: 37500 Avg. Training loss: 3.8404 0.1164 sec/batch Epoch 9/10 Iteration: 37600 Avg. Training loss: 3.9069 0.1157 sec/batch Epoch 9/10 Iteration: 37700 Avg. Training loss: 3.8219 0.1166 sec/batch Epoch 9/10 Iteration: 37800 Avg. Training loss: 3.8740 0.1184 sec/batch Epoch 9/10 Iteration: 37900 Avg. Training loss: 3.8861 0.1162 sec/batch Epoch 9/10 Iteration: 38000 Avg. Training loss: 3.8837 0.1180 sec/batch Nearest to was: the, after, for, in, had, it, early, until, Nearest to to: the, in, and, of, a, that, through, into, Nearest to time: halfway, was, at, netted, onerous, event, limewire, achieved, Nearest to war: against, roosevelt, battle, kills, ungrammatical, synonymous, chester, potsdam, Nearest to such: some, digests, deistic, other, byproduct, rainstorm, embed, as, Nearest to as: of, and, the, a, for, in, with, which, Nearest to united: states, precipitate, gluconeogenesis, conscript, eyre, chilliwack, baghdad, treatment, Nearest to zero: two, five, one, three, four, nine, six, seven, Nearest to egypt: standoff, uprooted, pharaoh, antigonus, nasser, accordingly, egyptian, tunisia, Nearest to proposed: herman, lester, culminated, infallible, ffo, jauron, jackie, nanobots, Nearest to engineering: laboratory, engineers, uncontrolled, electronics, corse, multidisciplinary, technology, foresight, Nearest to primarily: strong, including, biologically, transporting, lydon, mande, nontraditional, wands, Nearest to bill: mishandled, bills, congress, forbes, hazardous, carmack, rescind, severly, Nearest to event: ancestors, time, amiens, rsv, lambeau, naked, triassic, radiocarbon, Nearest to consists: rectangular, cytoplasmic, affix, cohesive, bicameral, coconut, vogel, campsites, Nearest to rise: greater, rinzai, higher, catering, prevailing, uncollected, pa, kinsella, Epoch 9/10 Iteration: 38100 Avg. Training loss: 3.9172 0.1203 sec/batch Epoch 9/10 Iteration: 38200 Avg. Training loss: 3.7592 0.1187 sec/batch Epoch 9/10 Iteration: 38300 Avg. Training loss: 3.8658 0.1194 sec/batch Epoch 9/10 Iteration: 38400 Avg. Training loss: 3.8075 0.1209 sec/batch Epoch 9/10 Iteration: 38500 Avg. Training loss: 3.8456 0.1197 sec/batch Epoch 9/10 Iteration: 38600 Avg. Training loss: 3.8776 0.1170 sec/batch Epoch 9/10 Iteration: 38700 Avg. Training loss: 3.8966 0.1197 sec/batch Epoch 9/10 Iteration: 38800 Avg. Training loss: 3.8666 0.1183 sec/batch Epoch 9/10 Iteration: 38900 Avg. Training loss: 3.8762 0.1181 sec/batch Epoch 9/10 Iteration: 39000 Avg. Training loss: 3.9527 0.1169 sec/batch Nearest to was: the, after, for, early, in, had, it, of, Nearest to to: in, the, of, and, a, that, through, but, Nearest to time: onerous, halfway, netted, limewire, at, achieved, was, messagepad, Nearest to war: against, ungrammatical, roosevelt, synonymous, ngos, chester, kills, afesd, Nearest to such: digests, some, other, both, deistic, which, as, or, Nearest to as: and, of, a, for, with, the, in, which, Nearest to united: states, precipitate, gluconeogenesis, conscript, treatment, eyre, authorising, registries, Nearest to zero: two, five, one, four, three, nine, six, seven, Nearest to egypt: standoff, uprooted, pharaoh, antigonus, egyptian, nasser, sanusi, commemorations, Nearest to proposed: herman, lester, culminated, jauron, ffo, nanobots, infallible, chained, Nearest to engineering: laboratory, engineers, uncontrolled, technology, electronics, multidisciplinary, corse, faculty, Nearest to primarily: strong, including, biologically, lydon, transporting, mande, environments, coimbra, Nearest to bill: bills, mishandled, congress, forbes, carmack, hazardous, severly, soi, Nearest to event: ancestors, lambeau, rsv, triassic, amiens, radiocarbon, naked, purim, Nearest to consists: cytoplasmic, rectangular, affix, cohesive, coconut, fgc, bicameral, vogel, Nearest to rise: greater, rinzai, higher, uncollected, catering, grueling, prevailing, pa, Epoch 9/10 Iteration: 39100 Avg. Training loss: 3.8462 0.1181 sec/batch Epoch 9/10 Iteration: 39200 Avg. Training loss: 3.8346 0.1181 sec/batch Epoch 9/10 Iteration: 39300 Avg. Training loss: 3.8521 0.1184 sec/batch Epoch 9/10 Iteration: 39400 Avg. Training loss: 3.8734 0.1177 sec/batch Epoch 9/10 Iteration: 39500 Avg. Training loss: 3.8779 0.1179 sec/batch Epoch 9/10 Iteration: 39600 Avg. Training loss: 3.8915 0.1178 sec/batch Epoch 9/10 Iteration: 39700 Avg. Training loss: 3.8745 0.1178 sec/batch Epoch 9/10 Iteration: 39800 Avg. Training loss: 3.8496 0.1178 sec/batch Epoch 9/10 Iteration: 39900 Avg. Training loss: 3.8856 0.1169 sec/batch Epoch 9/10 Iteration: 40000 Avg. Training loss: 3.8717 0.1166 sec/batch Nearest to was: the, after, for, in, had, it, however, early, Nearest to to: the, in, of, and, a, into, through, but, Nearest to time: onerous, halfway, was, netted, eastbourne, at, event, until, Nearest to war: against, roosevelt, ungrammatical, synonymous, potsdam, battle, propounded, nbi, Nearest to such: some, digests, other, deistic, as, byproduct, both, which, Nearest to as: and, of, a, have, in, for, the, which, Nearest to united: states, precipitate, chilliwack, perdido, duos, gluconeogenesis, conscript, registries, Nearest to zero: two, five, one, three, four, nine, six, seven, Nearest to egypt: standoff, uprooted, pharaoh, nasser, egyptian, uniforms, digest, kacl, Nearest to proposed: herman, lester, chained, culminated, infallible, ffo, jauron, weighs, Nearest to engineering: laboratory, engineers, uncontrolled, electronics, multidisciplinary, technology, corse, foresight, Nearest to primarily: including, strong, biologically, mande, transporting, lydon, their, chalcedonian, Nearest to bill: mishandled, bills, congress, forbes, carmack, soi, leviticus, severly, Nearest to event: ancestors, amiens, rsv, triassic, ago, torii, time, lambeau, Nearest to consists: cytoplasmic, rectangular, cohesive, bicameral, fgc, affix, vogel, goodies, Nearest to rise: greater, rinzai, higher, catering, uncollected, coleridge, pa, kinsella, Epoch 9/10 Iteration: 40100 Avg. Training loss: 3.8724 0.1189 sec/batch Epoch 9/10 Iteration: 40200 Avg. Training loss: 3.8679 0.1182 sec/batch Epoch 9/10 Iteration: 40300 Avg. Training loss: 3.8372 0.1191 sec/batch Epoch 9/10 Iteration: 40400 Avg. Training loss: 3.8475 0.1190 sec/batch Epoch 9/10 Iteration: 40500 Avg. Training loss: 3.9381 0.1188 sec/batch Epoch 9/10 Iteration: 40600 Avg. Training loss: 3.8439 0.1196 sec/batch Epoch 9/10 Iteration: 40700 Avg. Training loss: 3.9699 0.1169 sec/batch Epoch 9/10 Iteration: 40800 Avg. Training loss: 3.9416 0.1178 sec/batch Epoch 9/10 Iteration: 40900 Avg. Training loss: 3.9067 0.1179 sec/batch Epoch 9/10 Iteration: 41000 Avg. Training loss: 3.8667 0.1191 sec/batch Nearest to was: the, in, after, for, of, as, until, first, Nearest to to: in, the, and, of, a, but, was, as, Nearest to time: halfway, onerous, netted, was, urgent, eastbourne, enter, garrison, Nearest to war: against, ungrammatical, roosevelt, potsdam, synonymous, dinh, convince, truman, Nearest to such: some, other, digests, byproduct, as, many, deistic, both, Nearest to as: and, of, a, in, for, the, have, with, Nearest to united: states, precipitate, eyre, perdido, conscript, statehouse, authorising, registries, Nearest to zero: two, five, four, three, one, nine, seven, six, Nearest to egypt: standoff, uprooted, pharaoh, nasser, egyptian, tunisia, digest, antigonus, Nearest to proposed: herman, culminated, lester, ffo, chained, jauron, infallible, jackie, Nearest to engineering: laboratory, engineers, electronics, uncontrolled, multidisciplinary, technology, corse, foresight, Nearest to primarily: strong, including, biologically, transporting, mande, their, lydon, smaller, Nearest to bill: bills, mishandled, hazardous, rescind, congress, leviticus, forbes, severly, Nearest to event: ancestors, amiens, lambeau, rsv, triassic, time, ago, purim, Nearest to consists: rectangular, bicameral, cytoplasmic, cast, fgc, cohesive, affix, counting, Nearest to rise: greater, rinzai, higher, catering, uncollected, pa, prevailing, coleridge, Epoch 9/10 Iteration: 41100 Avg. Training loss: 3.8410 0.1184 sec/batch Epoch 9/10 Iteration: 41200 Avg. Training loss: 3.8838 0.1181 sec/batch Epoch 9/10 Iteration: 41300 Avg. Training loss: 3.8227 0.1180 sec/batch Epoch 9/10 Iteration: 41400 Avg. Training loss: 3.8884 0.1182 sec/batch Epoch 9/10 Iteration: 41500 Avg. Training loss: 3.8699 0.1191 sec/batch Epoch 9/10 Iteration: 41600 Avg. Training loss: 3.8857 0.1164 sec/batch Epoch 10/10 Iteration: 41700 Avg. Training loss: 3.9183 0.0463 sec/batch Epoch 10/10 Iteration: 41800 Avg. Training loss: 3.8294 0.1168 sec/batch Epoch 10/10 Iteration: 41900 Avg. Training loss: 3.8589 0.1171 sec/batch Epoch 10/10 Iteration: 42000 Avg. Training loss: 3.8603 0.1163 sec/batch Nearest to was: the, after, in, for, had, of, until, his, Nearest to to: the, in, of, and, but, was, as, made, Nearest to time: halfway, was, onerous, netted, urgent, enter, eastbourne, rooney, Nearest to war: ungrammatical, against, roosevelt, potsdam, synonymous, convince, battle, chester, Nearest to such: some, digests, deistic, byproduct, other, both, many, as, Nearest to as: of, and, the, in, a, have, for, with, Nearest to united: states, perdido, precipitate, eyre, statehouse, gluconeogenesis, duos, conscript, Nearest to zero: two, five, one, three, four, eight, seven, six, Nearest to egypt: pharaoh, standoff, uprooted, egyptian, nasser, tunisia, antigonus, abou, Nearest to proposed: herman, chained, culminated, ffo, nanobots, weighs, infallible, lester, Nearest to engineering: laboratory, uncontrolled, engineers, multidisciplinary, electronics, technology, corse, sewers, Nearest to primarily: strong, including, transporting, biologically, mande, lydon, commercial, coimbra, Nearest to bill: bills, hazardous, mishandled, rescind, congress, severly, carmack, qwest, Nearest to event: ancestors, amiens, rsv, aerom, triassic, lambeau, purim, neurotoxicity, Nearest to consists: rectangular, cytoplasmic, bicameral, fgc, affix, cast, counting, vogel, Nearest to rise: greater, rinzai, higher, catering, pa, uncollected, prevailing, almohad, Epoch 10/10 Iteration: 42100 Avg. Training loss: 3.8618 0.1192 sec/batch Epoch 10/10 Iteration: 42200 Avg. Training loss: 3.8323 0.1176 sec/batch Epoch 10/10 Iteration: 42300 Avg. Training loss: 3.8423 0.1181 sec/batch Epoch 10/10 Iteration: 42400 Avg. Training loss: 3.8105 0.1164 sec/batch Epoch 10/10 Iteration: 42500 Avg. Training loss: 3.8681 0.1172 sec/batch Epoch 10/10 Iteration: 42600 Avg. Training loss: 3.8564 0.1177 sec/batch Epoch 10/10 Iteration: 42700 Avg. Training loss: 3.9090 0.1183 sec/batch Epoch 10/10 Iteration: 42800 Avg. Training loss: 3.7877 0.1188 sec/batch Epoch 10/10 Iteration: 42900 Avg. Training loss: 3.8167 0.1189 sec/batch Epoch 10/10 Iteration: 43000 Avg. Training loss: 3.8342 0.1191 sec/batch Nearest to was: the, after, in, for, early, had, several, however, Nearest to to: the, in, of, and, a, but, is, with, Nearest to time: halfway, was, netted, of, onerous, to, this, enter, Nearest to war: against, potsdam, ungrammatical, roosevelt, battle, wars, convince, truman, Nearest to such: some, other, digests, as, many, which, byproduct, even, Nearest to as: of, and, in, for, a, the, with, have, Nearest to united: states, precipitate, countries, eyre, gluconeogenesis, conscript, registries, duos, Nearest to zero: two, five, one, three, four, nine, six, seven, Nearest to egypt: standoff, pharaoh, uprooted, egyptian, nasser, tunisia, accordingly, antigonus, Nearest to proposed: herman, culminated, chained, nanobots, lester, weighs, siddeley, ffo, Nearest to engineering: laboratory, engineers, uncontrolled, electronics, multidisciplinary, technology, corse, disciplines, Nearest to primarily: strong, including, transporting, biologically, mande, smaller, their, liouville, Nearest to bill: bills, hazardous, mishandled, forbes, rescind, carmack, severly, joanie, Nearest to event: ancestors, amiens, rsv, purim, aerom, triassic, radiocarbon, neurotoxicity, Nearest to consists: rectangular, cytoplasmic, bicameral, affix, of, fgc, cohesive, representatives, Nearest to rise: greater, rinzai, higher, catering, pa, uncollected, coleridge, prevailing, Epoch 10/10 Iteration: 43100 Avg. Training loss: 3.7712 0.1194 sec/batch Epoch 10/10 Iteration: 43200 Avg. Training loss: 3.8433 0.1188 sec/batch Epoch 10/10 Iteration: 43300 Avg. Training loss: 3.8725 0.1192 sec/batch Epoch 10/10 Iteration: 43400 Avg. Training loss: 3.8597 0.1193 sec/batch Epoch 10/10 Iteration: 43500 Avg. Training loss: 3.8487 0.1181 sec/batch Epoch 10/10 Iteration: 43600 Avg. Training loss: 3.8994 0.1170 sec/batch Epoch 10/10 Iteration: 43700 Avg. Training loss: 3.8691 0.1177 sec/batch Epoch 10/10 Iteration: 43800 Avg. Training loss: 3.8220 0.1186 sec/batch Epoch 10/10 Iteration: 43900 Avg. Training loss: 3.8691 0.1184 sec/batch Epoch 10/10 Iteration: 44000 Avg. Training loss: 3.8735 0.1180 sec/batch Nearest to was: the, after, for, in, of, made, early, first, Nearest to to: the, in, of, but, and, a, as, is, Nearest to time: was, halfway, this, performer, onerous, netted, messagepad, event, Nearest to war: against, ungrammatical, potsdam, roosevelt, synonymous, wars, chester, battle, Nearest to such: other, some, as, which, digests, byproduct, or, many, Nearest to as: of, and, in, for, a, with, have, the, Nearest to united: states, precipitate, eyre, countries, duos, gluconeogenesis, curse, conscript, Nearest to zero: two, five, one, four, three, nine, seven, six, Nearest to egypt: standoff, egyptian, pharaoh, nasser, uprooted, tunisia, accordingly, sanusi, Nearest to proposed: herman, ffo, culminated, nanobots, chained, lester, lrt, siddeley, Nearest to engineering: engineers, laboratory, electronics, uncontrolled, technology, multidisciplinary, corse, disciplines, Nearest to primarily: including, strong, biologically, transporting, mande, their, smaller, as, Nearest to bill: bills, hazardous, mishandled, forbes, rescind, carmack, soi, severly, Nearest to event: ancestors, triassic, amiens, rsv, ago, holocene, aerom, purim, Nearest to consists: rectangular, cytoplasmic, fgc, bicameral, of, counting, affix, campsites, Nearest to rise: greater, rinzai, higher, catering, uncollected, pa, grueling, coleridge, Epoch 10/10 Iteration: 44100 Avg. Training loss: 3.8849 0.1198 sec/batch Epoch 10/10 Iteration: 44200 Avg. Training loss: 3.8351 0.1183 sec/batch Epoch 10/10 Iteration: 44300 Avg. Training loss: 3.8259 0.1167 sec/batch Epoch 10/10 Iteration: 44400 Avg. Training loss: 3.8688 0.1170 sec/batch Epoch 10/10 Iteration: 44500 Avg. Training loss: 3.9065 0.1159 sec/batch Epoch 10/10 Iteration: 44600 Avg. Training loss: 3.8612 0.1174 sec/batch Epoch 10/10 Iteration: 44700 Avg. Training loss: 3.8250 0.1178 sec/batch Epoch 10/10 Iteration: 44800 Avg. Training loss: 3.8945 0.1163 sec/batch Epoch 10/10 Iteration: 44900 Avg. Training loss: 3.8180 0.1177 sec/batch Epoch 10/10 Iteration: 45000 Avg. Training loss: 3.8613 0.1174 sec/batch Nearest to was: the, after, for, in, early, of, made, it, Nearest to to: the, in, of, and, a, some, but, is, Nearest to time: was, halfway, until, of, onerous, this, netted, to, Nearest to war: against, potsdam, ungrammatical, roosevelt, wars, synonymous, chester, battle, Nearest to such: other, some, many, as, both, of, even, which, Nearest to as: and, of, for, in, a, have, the, with, Nearest to united: states, precipitate, countries, gluconeogenesis, authorising, conscript, duos, registries, Nearest to zero: two, five, one, four, three, nine, eight, six, Nearest to egypt: egyptian, standoff, pharaoh, nasser, tunisia, gamal, uprooted, digest, Nearest to proposed: herman, culminated, ffo, chained, nanobots, weighs, lester, pithecanthropus, Nearest to engineering: uncontrolled, engineers, laboratory, electronics, technology, multidisciplinary, corse, sewers, Nearest to primarily: strong, including, biologically, transporting, their, mande, chalcedonian, smaller, Nearest to bill: bills, hazardous, rescind, mishandled, forbes, eurosceptics, soi, severly, Nearest to event: ancestors, triassic, amiens, time, purim, ago, torii, rsv, Nearest to consists: of, rectangular, cytoplasmic, fgc, counting, bounding, affix, bicameral, Nearest to rise: greater, rinzai, higher, catering, pa, uncollected, grueling, prevailing, Epoch 10/10 Iteration: 45100 Avg. Training loss: 3.9055 0.1196 sec/batch Epoch 10/10 Iteration: 45200 Avg. Training loss: 3.8326 0.1169 sec/batch Epoch 10/10 Iteration: 45300 Avg. Training loss: 3.9137 0.1177 sec/batch Epoch 10/10 Iteration: 45400 Avg. Training loss: 3.9561 0.1165 sec/batch Epoch 10/10 Iteration: 45500 Avg. Training loss: 3.9058 0.1172 sec/batch Epoch 10/10 Iteration: 45600 Avg. Training loss: 3.8441 0.1199 sec/batch Epoch 10/10 Iteration: 45700 Avg. Training loss: 3.7859 0.1182 sec/batch Epoch 10/10 Iteration: 45800 Avg. Training loss: 3.8496 0.1182 sec/batch Epoch 10/10 Iteration: 45900 Avg. Training loss: 3.7384 0.1174 sec/batch Epoch 10/10 Iteration: 46000 Avg. Training loss: 3.9066 0.1183 sec/batch Nearest to was: after, the, in, for, had, early, several, as, Nearest to to: the, in, of, a, and, as, when, by, Nearest to time: was, halfway, netted, onerous, enter, during, nwa, until, Nearest to war: potsdam, against, roosevelt, ungrammatical, wars, dinh, synonymous, chester, Nearest to such: other, some, as, many, digests, even, byproduct, both, Nearest to as: and, of, a, in, have, for, with, the, Nearest to united: states, precipitate, eyre, gluconeogenesis, curse, statehouse, perdido, duos, Nearest to zero: two, five, one, three, four, seven, eight, six, Nearest to egypt: standoff, pharaoh, egyptian, nasser, tunisia, gamal, abou, uprooted, Nearest to proposed: culminated, herman, ffo, lrt, chained, abstaining, nanobots, lester, Nearest to engineering: uncontrolled, laboratory, technology, electronics, engineers, multidisciplinary, corse, biograph, Nearest to primarily: strong, including, biologically, transporting, mande, developed, smaller, coimbra, Nearest to bill: bills, hazardous, rescind, forbes, mishandled, severly, bitch, tyler, Nearest to event: ancestors, triassic, amiens, aerom, purim, lambeau, radiocarbon, rsv, Nearest to consists: rectangular, of, counting, cytoplasmic, fgc, affix, bicameral, representatives, Nearest to rise: greater, rinzai, higher, catering, pa, uncollected, grueling, prevailing, Epoch 10/10 Iteration: 46100 Avg. Training loss: 3.8563 0.1189 sec/batch Epoch 10/10 Iteration: 46200 Avg. Training loss: 3.8583 0.1173 sec/batch ###Markdown Restore the trained network if you need to: ###Code with train_graph.as_default(): saver = tf.train.Saver() with tf.Session(graph=train_graph) as sess: saver.restore(sess, tf.train.latest_checkpoint('checkpoints')) embed_mat = sess.run(embedding) ###Output _____no_output_____ ###Markdown Visualizing the word vectorsBelow we'll use T-SNE to visualize how our high-dimensional word vectors cluster together. T-SNE is used to project these vectors into two dimensions while preserving local stucture. Check out [this post from Christopher Olah](http://colah.github.io/posts/2014-10-Visualizing-MNIST/) to learn more about T-SNE and other ways to visualize high-dimensional data. ###Code %matplotlib inline %config InlineBackend.figure_format = 'retina' import matplotlib.pyplot as plt from sklearn.manifold import TSNE viz_words = 500 tsne = TSNE() embed_tsne = tsne.fit_transform(embed_mat[:viz_words, :]) fig, ax = plt.subplots(figsize=(14, 14)) for idx in range(viz_words): plt.scatter(*embed_tsne[idx, :], color='steelblue') plt.annotate(int_to_vocab[idx], (embed_tsne[idx, 0], embed_tsne[idx, 1]), alpha=0.7) ###Output _____no_output_____
Corey_Evitts_DS_BUILD_1.ipynb
###Markdown ###Code #What are the top 7 Crimes in two of the largest cities in the US. import pandas as pd import numpy as np import matplotlib.pyplot as plt #I used Chicago and Los Angeles public data to answer this question chicago = pd.read_csv('https://data.cityofchicago.org/api/views/w98m-zvie/rows.csv?accessType=DOWNLOAD') #Starting to clean and explore the data provided print(chicago.shape) chicago.head() #Removing uneccessary collumns chicago_df_crime = chicago[['Description']] chicago_df_crime = pd.DataFrame(chicago_df_crime['Description'].value_counts()) chicago_df_crime['Crimes'] = chicago_df_crime.index # chicago_df_crime = chicago_df1.rename(columns={"Primary Type": "Crime Count"}, errors="raise") chicago_df_crime chicago_df = chicago[['Primary Type']] chicago_df chicago_df.dtypes #checkin what type of the crime has highest numbers in ascending order chicago_df1 = pd.DataFrame(chicago_df['Primary Type'].value_counts()) chicago_df1 chicago_df1['Crimes'] = chicago_df1.index chicago_df1 = chicago_df1.rename(columns={"Primary Type": "Crime Count"}, errors="raise") chicago_df1 chicago_df1.head(5) #Creating Dataframe based on relevant stats chicago_final = pd.DataFrame(chicago_df1.loc[chicago_df1['Crimes'].isin(['THEFT','BATTERY', 'CRIMINAL DAMAGE', 'ASSAULT', 'DECEPTIVE PRACTICE', 'OTHER OFFENSE', 'NARCOTICS'])]) chicago_final #Created a pie chart visualizing the top 7 crimes in city by percentage labels = chicago_final['Crimes'] sizes = chicago_final['Crime Count'] fig1, ax1 = plt.subplots(figsize=(7, 10)) ax1.pie(sizes, labels=labels, autopct='%1.1f%%', shadow=True, startangle=90) ax1.axis('equal') plt.legend(title="Top 7 Crimes by Frequency", bbox_to_anchor=(1.2,0.9), loc="upper left") plt.show() #Loading crime stats from LA and cleaning = pd.read_csv('https://data.lacity.org/api/views/63jg-8b9z/rows.csv?accessType=DOWNLOAD', error_bad_lines=True) list(other) LA_Crime = other[['Crm Cd Desc']] LA_Crime1 = pd.DataFrame(LA_Crime['Crm Cd Desc'].value_counts()) LA_Crime1 LA_Crime1['Crimes'] = LA_Crime1.index LA_Crime1 = LA_Crime1.rename(columns={"Crm Cd Desc": "Crime Count"}, errors="raise") LA_Crime1.head(7) LA_Crime1 = pd.DataFrame(LA_Crime1.loc[LA_Crime1['Crimes'].isin(['BATTERY - SIMPLE ASSAULT','BURGLARY FROM VEHICLE', 'VEHICLE - STOLEN', 'THEFT PLAIN - PETTY ($950 & UNDER)', 'BURGLARY', 'THEFT OF IDENTITY', 'INTIMATE PARTNER - SIMPLE ASSAULT'])]) #Created a pie chart to visualize top 7 crimes by percentage labels = LA_Crime1['Crimes'] sizes = LA_Crime1['Crime Count'] fig1, ax1 = plt.subplots(figsize=(7, 10)) ax1.pie(sizes, labels=labels, autopct='%1.1f%%', shadow=True, startangle=90) ax1.axis('equal') plt.legend(title="Top 7 Crimes by Frequency", bbox_to_anchor=(1.2,0.9), loc="upper left") plt.show() ###Output _____no_output_____
docs/python/Plots/line-plot.ipynb
###Markdown ---title: "Line Plot"author: "Charles"date: 2020-08-12description: "-"type: technical_notedraft: false--- ###Code import matplotlib.pyplot as plt import pandas as pd data1 = {'objects': ['apple', 'banana', 'mango', 'orange', 'tomato', 'potato'], 'price':[4, 7, 12, 10, 9, 14]} plt.plot(data1['objects'], data1['price']) ###Output _____no_output_____
Week-04/3_Neural networks.ipynb
###Markdown Neural networksDuring the previous week when we analyzed the neural activity data, we discussed the main principles behind the neurons' communication: if a neuron sends electrochemical signal which is above a certain threshold, the nearby neuron is activated.Fundamentally, **artificial neural networks** (ANN) are quite similar. When we are building ANN, we are using (multiple) building blocks called neurons that can be defined as a mathematical function that takes data as input, performs transformation and produces an output.To better understand the mathematics behind ANN, let's look at a single neuron. Neuron![neuron](https://miro.medium.com/max/875/1*NZc0TcMCzpgVZXvUdEkqvA.png)The diagram above demonstrates the basic structure of the single neuron (also known as perceptron).When we pass input features through perceptron, each feature ($x_1, x_2, ...$) is multiplied by its weight ($w_1, w_2, ...$). The sum of the multiplication results is then added to the bias ($b$) that can be imagined as a first term independent from the features (*starting value*). The result is then passed through a nonlinear function called **activation** function which produces the output.The whole perceptron training process can be divided into 3 steps:- Forward propogation- Loss calculation- Backward calculation Foward propogationThe forward propogation can be described as a series of computations made to produce a prediction (it is the process we have just described in the previous section).The previously described steps can be expressed mathematically as follows:- The output from the neuron can be written as $z = \sum_{i = 1}^nw_ix_i + b$- This output is passed through the activation function ($A$) to produce an output, $\hat{y} = A(z)$Similar to the previous lectures, this produced output is compared to the expected value to calculate loss. But before moving to loss calculation, it might be useful to look at some of the activation functions. Activation functionsFor the simplicity sake, we will not cover all activation functions (at least in this tutorial). Instead, we will focus on two activation functions, we will most likely use in this week's challenge - **reLU** and **sigmoid**.**ReLU** (or rectified linear unit) is a simple function that compares the values with zero. In other words, if the passed value is greater than zero, it will output the value that was passed. Otherwise, the output is zero. In mathematical terms - $A(z) = max(0, x)$.We have already covered **sigmoid function** in the logistic regression tutorial. It can be mathematically expressed in the following way, $A(z) = \frac{1}{1+exp(-z)}$. Loss calculationThe loss function is a way of mathematicall measuring how good our model prediction is (to later adjust weights and biases).Throughout the series, we are going to introduce a variety of different loss functions, however, for a start let's look to just a few of them. Cross-Entropy loss- For the classification tasks, we commonly choose cross-entropy loss.- It can be calculated using the following formula: $loss = -\sum_{i}^Cy_ilog(\hat{y_i})$- For the binary classification problem ($C = 2$), such loss function can be written as $loss = -y_1log(\hat{y_1}) - (1 - y_1)log(1-\hat{y_1})$ Mean Squared Error (MSE)- Can be calculated using the following formula: $loss = \frac{1}{N}\sum_{i = 1}^n(y_i - \hat{y_i})^2$ Back propogationBack propogation is basically a process of training a neural network by updating its weights and bias. In a nutshell, our model computes predictions that are compared to the expected value which allows to calculate loss function. After some number of epochs, the weights and bias are adjusted in a way that minimizes the loss value, thus ensuring a more accurate predictions.Similar to the previous models, the process of updating coefficients (or in this case, weights and bias) involves calculating loss derivatives in respect to loss functions, multiplying value by the learning rate and subtracting from the previous coefficient value.To better visualize the whole process, let's look at neuron with 2 inputs and sigmoid activation function.![neuron](https://i0.wp.com/neptune.ai/wp-content/uploads/Backpropagation-parameters.png?resize=581%2C361&ssl=1)In such case, the weights and bias would be updated in the following way:- $w_{1new} = w_1 - lr * \frac{\partial loss}{\partial w_1}$- $w_{2new} = w_2 - lr * \frac{\partial loss}{\partial w_2}$- $b_{new} = b - lr * \frac{\partial loss}{\partial b}$On the other hand, coefficients are passed through multiple functions until they reach the final loss value meaning that we will have to use the chain rule.First, let's have a look how it writen for $w_1$. We know that the loss function is initial calculated from the predicted output ($\hat{y}$), which is calculated by inserting weighted sum ($z$) to sigmoid activation function. Finally, the weighted sum is dependent from the weight in respect to which we are trying to find the derivative. Using the chain rule:- $\frac{\partial loss}{\partial w_1} = \frac{\partial loss}{\partial \hat{y}}\frac{\partial \hat{y}}{\partial z}\frac{\partial z}{\partial w_1}$Similarly, we can find the derivatives for the remaining weights and bias to get the following update equations:- $w_{1new} = w_1 - lr * \frac{\partial loss}{\partial \hat{y}}\frac{\partial \hat{y}}{\partial z}\frac{\partial z}{\partial w_1}$- $w_{2new} = w_2 - lr * \frac{\partial loss}{\partial \hat{y}}\frac{\partial \hat{y}}{\partial z}\frac{\partial z}{\partial w_2}$- $b_{new} = b - lr * \frac{\partial loss}{\partial \hat{y}}\frac{\partial \hat{y}}{\partial z}\frac{\partial z}{\partial b}$ Python implementation Let's say we want to program a neuron containing 2 inputs, sigmoid activation and MSE loss functions. ###Code import numpy as np import pandas as pd import matplotlib.pyplot as plt def init(): np.random.seed(1) #definining weights for 3 features and 1 output (binary classification) W = np.random.randn(3, 1) b = np.random.rand(1,) lr = 0.001 return W, b #Defining our activation function def sigmoid(z): return 1 / (1 + np.exp(-z)) def mse_loss(y, yhat): num_sample = len(y) #To avoid assigning the initial value to 0, we are going to use extremely small value yhat = np.maximum(y_hat, 0.00000001) loss = - 1/num_sample * (np.subtract(y - yhat)) ^ 2 return loss def forward(X, W, b): z = X.dot(W) + b yhat = sigmoid(z) loss = mse_loss(y, yhat) return yhat, loss def back_propogation(yhat, X, lr): dl_wrt_yhat = np.subtract(yhat - y) dl_wrt_z = dl_wrt_yhat * (1 / (1 + np.exp(-yhat))) * (1 - 1 / (1 + np.exp(-yhat))) dl_wrt_w = dl_wrt_z.dot(X.T) W = W - lr * dl_wrt_w return W def fit(X, y, epochs): W, b, lr = init() for i in range(epochs): yhat, loss = forward(X, W, b) W = back_propagation(yhat, X, lr) ###Output _____no_output_____
ch04/notebooks/segregation.ipynb
###Markdown ABM: Residential segregation mechanim The segregation mechanism is an adaptation of Schelling's segregation model. Agents live in neighborhoods. At rate *t*, agents decide whether to move or stay in their neighborhood based on the proportion of people within the same quintile of income (e.g., 5 groups of income). Agents have a tolerance threshold (e.g., 20%) of people in the same quintile of income living in the same neighborhood. If the proportion of people of that quintile is lower than the tolerance threshold, agents move to another neighborhood **chosen randomly** from a pool of neighborhood that has not reach its population limit (e.g., more than 30% its original size).Changes in segregation are very sensitive to changes in the values of parameters and number of income groups. In this example, I use: - 20 neighbors with an initial population of 100 agents.- 5 income groups.- Population limit by neighborhood of 1.30 * 100.- Moving rate is 0.1 per year.- 100 replicates for each scenario. - Income distribution comes from CPS data. To measure segregation I use the **neighborhood sorting index or NSI** (Jargowsky's 1996), that compares the income variation across all neighborhoods in a metro area with the income variation across all households in that metro area. If households are segregated across neighborhoods by income, the income variation acrossneighborhoods will be similar to the income variation across households, and the NSI will equal almost 1. If all neighborhoods are perfectly economically integrated (i.e., each neighborhood is a microcosm of the entire metro area) the NSI will be almost 0. Because the NSI is based on relative variances in income, measured income segregation will be influenced by the metro areas’ overall inequality. I also use the **average proportion of similar agents**. ###Code import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt import pysdaza as sd %matplotlib inline # examining some data ind = sd.read_files('../output/example01/indiv*.csv') agg = sd.read_files('../output/example01/aggregate_data*.csv') ###Output _____no_output_____ ###Markdown The income distribution looks as expected and the average Gini coefficient of this distribution is 0.36. ###Code # income distribution from all replicates sns.distplot(ind.income); print('Gini', round(agg.gini.mean(),2), 'SD =', round(agg.gini.std(), 4)) # income distribution highest quintile sns.distplot(ind.loc[ind.quintile==5, 'income']); # income distribution lower quintile sns.distplot(ind.loc[ind.quintile==1, 'income']); ###Output _____no_output_____ ###Markdown Segregation measures Most of replicates (73%) reach convergence (all agents satisfy the moving threshold). ###Code (agg.unhappy==0).value_counts() ###Output _____no_output_____ ###Markdown NSI changes dramatically due to small changes in the moving threshold. In other words, segregation is very sensitive to changes in the moving threshold. This is related to the way the segregation model is implemented and the number of groups (income quintiles). Standard deviation is between 0.04 and 0.09. So there is about 7% of the variability of segregation due to the stochasticity of the simulation. ###Code sns.regplot(agg['threshold'], agg['nsi'], scatter_kws={'alpha':.10}, line_kws={'linestyle':'--', 'linewidth':0.6}); agg_group = agg.groupby('iter') agg_group.nsi.mean() # mean # standard deviation agg_group.nsi.std() ###Output _____no_output_____ ###Markdown I obtain similar results when observing the proportion of neighbors of similar income quintile. This time, variability increases with the moving threshold. That is, when the moving threshold is higher it becomes more difficult to satisfy that threshold and there is a higher chance agents will move, increasing the variability of similarity. This is confirmed by the plot of moving threshold and number of unhappy agent (i.e., who hasn't satisfied that threshold). The variability of **NSI** is more robust to higher thresholds. ###Code sns.regplot(agg['threshold'], agg['similar'], scatter_kws={'alpha':0.1}); agg_group.similar.std() sns.regplot(agg['threshold'], agg['unhappy'], fit_reg=False, scatter_kws={'alpha':0.1}); ###Output _____no_output_____
Feature Engineering Techniques/Scaling_to_minimum_and_maximum_values.ipynb
###Markdown **Connect With Me in Linkedin :-** https://www.linkedin.com/in/dheerajkumar1997/ Scaling to minimum and maximum valuesWe saw in previous lectures that the magnitude of the variables affects different machine learning algorithms for different reasons. In this section, I will cover a few standard ways of squeezing the magnitude of the variables.Minimum and maximum scaling squeezes the values between 0 and 1. It subtracts the minimum value from all the observations, and then divides it by the range:X_scaled = (X - X.min / (X.max - X.min)The minimum maximum scaling method has the same disadvantage that standarisation, which is that it compresses the observations in the narrow range if the variable is very skewed or has outliers. For an overview of the different scaling methods check:http://scikit-learn.org/stable/auto_examples/preprocessing/plot_all_scaling.htmlsphx-glr-auto-examples-preprocessing-plot-all-scaling-pyLet's demonstrate the MinMaxScaling method using scikit-learn. ###Code import pandas as pd import numpy as np from sklearn.preprocessing import MinMaxScaler from sklearn.metrics import roc_auc_score from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt % matplotlib inline # load the numerical variables of the Titanic Dataset data = pd.read_csv('titanic.csv', usecols = ['Pclass', 'Age', 'Fare', 'Survived']) data.head() # let's have a look at the values of those variables to get an idea of the magnitudes data.describe() ###Output _____no_output_____ ###Markdown We can see from the above statistics table that the magnitudes of the variables are different. The mean values and medians are different as well as the maximum values and the range over which the values are spread. ###Code # check missing data data.isnull().sum() ###Output _____no_output_____ ###Markdown Age contains missing information, so I will fill those observations with the median in the next cell. ###Code # let's separate into training and testing set X_train, X_test, y_train, y_test = train_test_split(data[['Pclass', 'Age', 'Fare']], data.Survived, test_size=0.3, random_state=0) X_train.shape, X_test.shape # fill missing data with the Age median X_train.Age.fillna(X_train.Age.median(), inplace=True) X_test.Age.fillna(X_train.Age.median(), inplace=True) ###Output _____no_output_____ ###Markdown Min_max_scaling ###Code # this scales the features between 0 and 1. scaler = MinMaxScaler() # create an instance X_train_scaled = scaler.fit_transform(X_train) # fit the scaler to the train set and then transform it X_test_scaled = scaler.transform(X_test) # transform (scale) the test set #let's have a look at the scaled training dataset: mean and standard deviation print('means (Pclass, Age and Fare): ', X_train_scaled.mean(axis=0)) print('std (Pclass, Age and Fare): ', X_train_scaled.std(axis=0)) ###Output means (Pclass, Age and Fare): [ 0.64365971 0.36641321 0.06335433] std (Pclass, Age and Fare): [ 0.41999093 0.16405255 0.09411705] ###Markdown After MinMaxScaling, the distributions are not centered in zero and the standard deviation is not 1 as when normalising the data. ###Code # let's look at the new minimum and maximum values print('Min values (Pclass, Age and Fare): ', X_train_scaled.min(axis=0)) print('Max values (Pclass, Age and Fare): ', X_train_scaled.max(axis=0)) ###Output Min values (Pclass, Age and Fare): [ 0. 0. 0.] Max values (Pclass, Age and Fare): [ 1. 1. 1.] ###Markdown But the minimum and maximum values are standarised across variables, different from what occurs with standarisation. ###Code # let's look at the distributions of the transformed variables: Age plt.hist(X_train_scaled[:,1], bins=20) # let's look at the distributions of the transformed variables: Fare plt.hist(X_train_scaled[:,2], bins=20) ###Output _____no_output_____ ###Markdown The variable Age has a somewhat normal distribution after the transformation, reflecting the approximately Gaussian distribution that shows the original variable. Fare on the other had shows a skewed distribution, which is also evidenced after variable transformation in the previous plot. In fact, we can see that the MinMaxScaling of Fare, shrinks the majority of the observations towards the lowest values. ###Code import seaborn as sns # let's look at how transformed age looks like compared to the original variable sns.jointplot(X_train.Age, X_train_scaled[:,1], kind='kde') # let's look at how transformed Fare looks like compared to the original variable sns.jointplot(X_train.Fare, X_train_scaled[:,2], kind='kde', xlim=(0,200), ylim=(-1,3)) ###Output _____no_output_____
tutorials/legacy/tutorial_legacy_api.ipynb
###Markdown This notebook shows how to use `autodp` to track differential privacy losses. 1. Installation and importSimply type in the terminal`pip install autodp` ###Code # Import package from autodp import rdp_bank, rdp_acct, dp_acct,privacy_calibrator ###Output _____no_output_____ ###Markdown 2. Declare the data structure for tracking privacy loss`autodp` has two privacy loss trackers: `dp_acct.DP_Acct` that tracks a fixed sequence of $(\epsilon,\delta)$ and `rdp_acct.anaRDPacct` that tracks the entire family of $(\epsilon,\delta)$ through the analytical expression of the Renyi Differential Privacy, or (up to scaling) equivalently, the CGF (log-MGF) of the privacy random variable:$$\log\Big(\frac{p(x)}{q(x)}\Big)$$induces ed by $x\sim p$.We will be mostly demostrating the latter for now. ###Code # declare the moment accountants acct = rdp_acct.anaRDPacct() ###Output _____no_output_____ ###Markdown 3. Obtain an analytical RDP for popular privacy mechanisms using `rdp_bank`The input to `anRDPacct` is in fact lambda functions that output the evaluations of the CGF that comes from.To make our lives easy, `rdp_bank` implements the CCF of most popular differentially private algorithms * Laplace mechanism * Gaussian mechanism * Randomized response * pDP of two Multivariate Gaussians * pDP of two Exponential FamilyEach of these would take a set of parameters as input in a dictionary form. For example, the Gaussian mechanism will take the ratio between the noise std and the L2 sensitivity of the function to be released. ###Code # A few example of these lambda functions here sigma = 5.0 b = 2.0 p = 0.7 # get the CGF functions func_gaussian = lambda x: rdp_bank.RDP_gaussian({'sigma': sigma}, x) func_laplace = lambda x: rdp_bank.RDP_laplace({'b': b}, x) func_randresp = lambda x: rdp_bank.RDP_randresponse({'p':p},x) ###Output _____no_output_____ ###Markdown 4. Now mix and match and track their RDP using `anaRDPacct.compose_mechanism` and get the smallest $\epsilon$ for your chosen $\delta$ ###Code acct.compose_mechanism(func_randresp) delta = 0 print('Pure DP of \eps = '+ repr(acct.get_eps(delta)) + ', after the RandResp.') acct.compose_mechanism(func_laplace) print('Pure DP of \eps = '+ repr(acct.get_eps(delta)) + ', after the RandResp + Laplace.') acct.compose_mechanism(func_gaussian) print('Pure DP of \eps = '+ repr(acct.get_eps(delta)) + ', after the RandResp + Laplace + Gaussian.') delta = 1e-6 print('Approx. DP with \eps = '+ repr(acct.get_eps(delta)) + ', \delta =' + repr(delta) + ' after the RandResp + Laplace + Gaussian.') ###Output Pure DP of \eps = 0.8472978603872034, after the RandResp. Pure DP of \eps = 1.3472978603872034, after the RandResp + Laplace. Pure DP of \eps = inf, after the RandResp + Laplace + Gaussian. Approx. DP with \eps = 2.3786291972507234, \delta =1e-06 after the RandResp + Laplace + Gaussian. ###Markdown 5. An additional feature that is supported by `autodp` is that we can do **subsampled-mechanisms** for anything that the RDP is available in closed form.This can be achieved by `anaRDPacct.compose_subsampled_mechanism` ###Code # decalre another moment accountant acct2 = rdp_acct.anaRDPacct() eps_seq = [] eps_seq2= [] eps_seq3= [] delta1 = 1e-8 delta2 = 1e-6 delta3 = 1e-4 sigma = 5.0 func = lambda x: rdp_bank.RDP_gaussian({'sigma': sigma}, x) k=1000 prob = 0.01 acct2.compose_subsampled_mechanism(func,prob,coeff=k) for i in range(int(k/100)): acct2.compose_subsampled_mechanism(func,prob,coeff=100) eps_seq.append(acct2.get_eps(delta1)) eps_seq2.append(acct2.get_eps(delta2)) eps_seq3.append(acct2.get_eps(delta3)) #if i%100==0: print("[",i,"]Privacy loss is",(eps_seq[-1],eps_seq2[-1],eps_seq3[-1])) print("Composition of 1000 subsampled Gaussian mechanisms gives ", (acct2.get_eps(delta), delta)) import matplotlib.pyplot as plt %matplotlib inline plt.figure(num=1, figsize=(12, 8), dpi=80, facecolor='w', edgecolor='k') plt.plot(eps_seq) plt.plot(eps_seq2) plt.plot(eps_seq3) plt.legend(['\delta = 1e-8', '\delta = 1e-6', '\delta = 1e-4'], loc='best') plt.title('Overall (eps,delta)-DP over composition.') plt.show() ###Output [ 0 ]Privacy loss is (0.8423893832071085, 0.7287500817892067, 0.5946199028279696) [ 1 ]Privacy loss is (0.8795413249514779, 0.760964111103689, 0.6210977220184837) [ 2 ]Privacy loss is (0.9151210096184355, 0.7919169819046258, 0.6465339247741531) [ 3 ]Privacy loss is (0.9494917773131786, 0.821775067123306, 0.671006744864439) [ 4 ]Privacy loss is (0.9825843221990638, 0.8505471827731135, 0.694605329594558) [ 5 ]Privacy loss is (1.0146188148049184, 0.8784065746627052, 0.7174318243986757) [ 6 ]Privacy loss is (1.045704998080816, 0.90545313353747, 0.7396039571497857) [ 7 ]Privacy loss is (1.0759173981100414, 0.9316465830277255, 0.7612582687711673) [ 8 ]Privacy loss is (1.1053390693305394, 0.9571495034169372, 0.7822301624120228) [ 9 ]Privacy loss is (1.1340629103641513, 0.9820768309075569, 0.8026649583953216) Composition of 1000 subsampled Gaussian mechanisms gives (0.9820768309075569, 1e-06)
jupyter_notebooks/Data Visualisation Cheat Sheet.ipynb
###Markdown Project goal For one reason or another, the matplotlib syntax constantly escapes me. So, I have created this notebook to slowly work through the ins and outs of matplotlib and seaborn in hope to 1) cement the knowledge in my head and/or 2) create a personal cheat sheet which I can refer to when I undoubtably forget the syntax again.Whilst there are a range of matplotlib and seaborn resources and cheat sheets already available, the best way for me to learn is to write, explain and teach. Thus, my recreation of the matplotlib basics wheel here. Resources The following is a list of useful matplotlib and seaborn resources:- Real Python: Python Plotting with Matplotlib Guide- Seaborn Tutorial- Python Data Science Handbook Import dependencies ###Code import matplotlib.pyplot as plt %matplotlib inline import seaborn as sns import pandas as pd import numpy as np import warnings warnings.filterwarnings('ignore') ###Output _____no_output_____ ###Markdown Load dummy datasetThe seaborn package comes with some pre-defined datasets to make our life easy, so we will load the titanic dataset ###Code # Display the available seaborn datasets (requires an internet connection) sns.get_dataset_names() # Load tips dataset df = sns.load_dataset('tips') df.head() ###Output _____no_output_____ ###Markdown Basic plots Setting a plot style ###Code plt.style.use('seaborn-white') ###Output _____no_output_____ ###Markdown Creating a single plot There are two basic methods to plotting with matplotlib:1. Using the matlab style syntax2. Using object-oriented style syntax Matlab style ###Code # Group the number of tips received by day and split out into an x and y varible grouped_by_day = df.groupby('day').sum() days = grouped_by_day.index tip_amt = grouped_by_day['tip'] bill_amt = grouped_by_day['total_bill'] # Will be used later # Create a figure plt.figure() # Plot the data plt.bar(days, tip_amt); ###Output _____no_output_____ ###Markdown Object-oriented styleNB: Could plot directly from DF - this is covered later ###Code # Create a stateless (OO) graph using the plt.subplots() method fig, ax = plt.subplots(figsize=(8,4)) # Use ax.type_of_plot and pass in x and y ax.bar(days, tip_amt); ###Output _____no_output_____ ###Markdown For simplicity, the rest of this notebook uses the object-orientated style of plotting, as this is generally more useful as plotting needs get more complicated. Titles ###Code # Add a title ax.set_title("Total Tips by Day of the Week") # Display the plot fig ###Output _____no_output_____ ###Markdown Axis Labels ###Code # Add axis labels ax.set_xlabel('Day of the Week') ax.set_ylabel('Total Tips Received ($)') # Display the plot fig ###Output _____no_output_____ ###Markdown Legends ###Code #Create a legend ax.legend(['Tip']) # Display plot fig ###Output _____no_output_____ ###Markdown Annotating plots with textUse ax.text(x_pos, y_pos, 'text') on object-oriented plots ###Code # Annotate Friday as low ax.text(1, 60, "Low tips on Friday. Why?", ha='center') fig ###Output _____no_output_____ ###Markdown Annotating plots with arrows and textUse ax.annotate('text', xy=(coord_of_arrow), xytext=(coord_of_text)) on object-oriented plots ###Code # Annotate Thursday with an arrow ax.annotate('Seem to do well on Thursday', xy=(0, 170), xytext=(-1.5, 240), arrowprops=dict(arrowstyle='->'), ha='center') fig ###Output _____no_output_____ ###Markdown Plotting multiple elements ###Code # Create length of the plot and set the width of the bars ind = np.arange(len(days)) width = 0.35 # Create a figure fig2, ax2 = plt.subplots() # Use ax.type_of_plot and pass in x and y ax2.bar(ind, tip_amt, width=width); ax2.bar(ind + width ,bill_amt, width=width); # Set the xticks width and set xtickslabels to days ax2.set_xticks(ind + width / 2) ax2.set_xticklabels(days) #Create a legend ax2.legend(['Tip', 'Total Bill']); ###Output _____no_output_____ ###Markdown Multiple plots Subplots ###Code # Create 2 subplots, which share the same x axis labels fig, ax = plt.subplots(2, sharex=True) # Plot axis 0 and 1 as tip_amt and bill_amt ax[0].bar(days, tip_amt) ax[1].bar(days, bill_amt, color='orange') ###Output _____no_output_____ ###Markdown Creating a grid ###Code # Create a grid of 2 rows x 3 cols # sharex and sharey removes inner x and y labelling of each plot fig, ax = plt.subplots(2, 3, sharex='col', sharey='row') # plot to each of the subplots (could have done with a for loop, but wanted to be explicit) ax[0,0].text(0.5, 0.5, str((0, 0)), fontsize=18, ha='center') ax[0,1].text(0.5, 0.5, str((0, 1)), fontsize=18, ha='center') ax[0,2].text(0.5, 0.5, str((0, 2)), fontsize=18, ha='center') ax[1,0].text(0.5, 0.5, str((1, 0)), fontsize=18, ha='center') ax[1,1].text(0.5, 0.5, str((1, 1)), fontsize=18, ha='center') ax[1,2].text(0.5, 0.5, str((1, 2)), fontsize=18, ha='center'); ###Output _____no_output_____ ###Markdown GridSpec for custom layoutsBelow example adapted from the Data Science Handbook. ###Code # Create x and y variables x = df['total_bill'] y = df['tip'] # Create a figure which is 6x6 in size fig = plt.figure(figsize=(6, 6)) # Create a GridSpec grid = plt.GridSpec(4, 4, hspace=0.2, wspace=0.4) # Create multiple plots add arrange on the 6x6 grid main_scat = fig.add_subplot(grid[:-1, 1:]) y_hist = fig.add_subplot(grid[:-1, 0], xticklabels = [], sharey=main_scat) x_hist = fig.add_subplot(grid[-1, 1:], yticklabels = [], sharex=main_scat) main_scat.plot(x, y, 'ok', markersize=4) x_hist.hist(x, orientation='vertical') x_hist.invert_yaxis() y_hist.hist(y, orientation='horizontal') y_hist.invert_xaxis() ###Output _____no_output_____ ###Markdown Plotting with pandasRather than splitting out variables from a pandas dataframe into separate variables for plotting, we can directly plot from the dataframe using the pd.plot() method. ###Code # Plotting a scatter plot directly from a pandas DF df.plot.scatter('total_bill', 'tip'); ###Output _____no_output_____ ###Markdown Combing with pandas groupby ###Code # Plot a Combine with pd.groupby to display the sum of tips and bill per day df.groupby('day')[['tip', 'total_bill']].sum().plot(kind='bar'); # Plot separate subplots - pass subplots=True arg df.groupby('day')[['tip', 'total_bill']].sum().plot(kind='bar', subplots=True); ###Output _____no_output_____ ###Markdown SeabornSeaborn is a package built on top of matplotlib which extends its functionality (e.g. adds violin plots) and provides some aesthetic improvements to the base matplotlib style of plotting.Many of the below examples are adapted from the seaborn docs Categorical plotsCan either use the sns.catplot() method and pass in the kind arg, or directly create the required plot by calling the appropriate sns plot (e.g. sns.swarmplot() ) Basic plot ###Code # Create a swarmplot sns.catplot(x = 'day', y = 'tip', kind='swarm', data = df); ###Output _____no_output_____ ###Markdown Adding hue ###Code # Display points split by sex sns.catplot(x = 'day', y = 'tip', kind='swarm', hue='sex', data = df); ###Output _____no_output_____ ###Markdown Conditional display with queries ###Code # Only display tips greater than $2 sns.catplot(x = 'day', y = 'tip', hue = 'sex', data = df.query('tip > 2')); ###Output _____no_output_____ ###Markdown Ordering ###Code # Show tips by male or female sns.catplot(x = 'sex', y = 'tip', order=['Male', 'Female'], data = df); ###Output _____no_output_____ ###Markdown Displaying multiple plots ###Code sns.catplot(x='day', y='tip', hue='sex', col='time', kind='bar', data=df); ###Output _____no_output_____ ###Markdown Continious plotsSimilar to categorical plots, we can either use the relplot() "parent" method and pass in the kind arg, or directly choose the type of plot by calling it (e.g. sns.scatterplot() ). Basic plot ###Code # Scatter plot of total_bill vs tips sns.relplot(x='total_bill', y='tip', data=df); ###Output _____no_output_____ ###Markdown Adding hue and styles ###Code # Add hue of sex and style of whether they are a smoker or not sns.relplot(x='total_bill', y='tip', hue='sex', style='smoker', data=df); ###Output _____no_output_____ ###Markdown Multiple plotsSimilar to catplot(), we can display multiple plots for continious variables. ###Code # Display total_bill vs tip, split by sex sns.relplot(x='total_bill', y='tip', col='sex', data=df); # Plot mutliple plots by the size of the group sns.relplot(x='total_bill', y='tip', col='size', col_wrap=3, data=df); ###Output _____no_output_____ ###Markdown Types of plots Matplotlib plots ###Code # Scatter df.plot.scatter('total_bill', 'tip'); # Bar df.groupby('day')[['tip']].sum().plot.bar(); # Horizontal Bar df.groupby('day')[['tip']].sum().plot.barh(); # Pie df.groupby('day')[['tip']].sum().plot.pie(subplots=True); # Box plot df[['size', 'tip']].plot.box(); # Histogram df['tip'].plot.hist(); ###Output _____no_output_____ ###Markdown Other matplotlib plots which aren't shown above include:- area- density- hexbin- kernal density estimation- line Seaborn plotsSeaborn includes all of the same plots as matplotlib, with some additional plots. ###Code # Strip plot sns.stripplot(x = 'day', y = 'tip', data = df, jitter=True); # Swarm plot sns.swarmplot(x = 'day', y = 'tip', data = df, hue='sex'); # Violin plots sns.violinplot(x = 'tip', y = 'day', hue='time', data = df); # Pairplot sns.pairplot(df); # Jointplot sns.jointplot(x = 'total_bill', y = 'tip', data = df); ###Output _____no_output_____
Cap08/DesafioDSA_Solucao/Missao5/missao5.ipynb
###Markdown Data Science Academy - Python Fundamentos - Capítulo 7 Download: http://github.com/dsacademybr Missão: Analisar o Comportamento de Compra de Consumidores. Nível de Dificuldade: Alto Você recebeu a tarefa de analisar os dados de compras de um web site! Os dados estão no formato JSON e disponíveis junto com este notebook.No site, cada usuário efetua login usando sua conta pessoal e pode adquirir produtos à medida que navega pela lista de produtos oferecidos. Cada produto possui um valor de venda. Dados de idade e sexo de cada usuário foram coletados e estão fornecidos no arquivo JSON.Seu trabalho é entregar uma análise de comportamento de compra dos consumidores. Esse é um tipo de atividade comum realizado por Cientistas de Dados e o resultado deste trabalho pode ser usado, por exemplo, para alimentar um modelo de Machine Learning e fazer previsões sobre comportamentos futuros.Mas nesta missão você vai analisar o comportamento de compra dos consumidores usando o pacote Pandas da linguagem Python e seu relatório final deve incluir cada um dos seguintes itens:** Contagem de Consumidores *** Número total de consumidores** Análise Geral de Compras *** Número de itens exclusivos* Preço médio de compra* Número total de compras* Rendimento total** Informações Demográficas Por Gênero *** Porcentagem e contagem de compradores masculinos* Porcentagem e contagem de compradores do sexo feminino* Porcentagem e contagem de outros / não divulgados** Análise de Compras Por Gênero *** Número de compras* Preço médio de compra* Valor Total de Compra* Compras for faixa etária** Identifique os 5 principais compradores pelo valor total de compra e, em seguida, liste (em uma tabela): *** Login* Número de compras* Preço médio de compra* Valor Total de Compra* Itens mais populares** Identifique os 5 itens mais populares por contagem de compras e, em seguida, liste (em uma tabela): *** ID do item* Nome do item* Número de compras* Preço do item* Valor Total de Compra* Itens mais lucrativos** Identifique os 5 itens mais lucrativos pelo valor total de compra e, em seguida, liste (em uma tabela): *** ID do item* Nome do item* Número de compras* Preço do item* Valor Total de Compra** Como considerações finais: *** Seu script deve funcionar para o conjunto de dados fornecido.* Você deve usar a Biblioteca Pandas e o Jupyter Notebook. ###Code # Imports import pandas as pd import numpy as np # Carrega o arquivo load_file = "dados_compras.json" purchase_file = pd.read_json(load_file, orient = "records") purchase_file.head() ###Output _____no_output_____ ###Markdown Informações Sobre os Consumidores ###Code # Implemente aqui sua solução ###Output _____no_output_____ ###Markdown Análise Geral de Compras ###Code # Implemente aqui sua solução ###Output _____no_output_____ ###Markdown Análise Demográfica ###Code # Implemente aqui sua solução ###Output _____no_output_____ ###Markdown Informações Demográficas Por Gênero ###Code # Implemente aqui sua solução ###Output _____no_output_____ ###Markdown Análise de Compras Por Gênero ###Code # Implemente aqui sua solução ###Output _____no_output_____ ###Markdown Consumidores Mais Populares (Top 5) ###Code # Implemente aqui sua solução ###Output _____no_output_____ ###Markdown Itens Mais Populares ###Code # Implemente aqui sua solução ###Output _____no_output_____ ###Markdown Itens Mais Lucrativos ###Code # Implemente aqui sua solução ###Output _____no_output_____
model_management/classification/resnet50_training.ipynb
###Markdown Get Global Model and Pretrained Snapshot ###Code model = dl.models.get(model_name='ResNet') snapshot = model.snapshots.get('pretrained-resnet18') model.snapshots.list().to_df() ###Output _____no_output_____ ###Markdown Upload Dataset ###Code project = dl.projects.get('Sheeps Face Proj') dataset = project.datasets.create('Sheep Face') dataset.to_df() _ = dataset.items.upload(local_path='../../assets/sample_datasets/SheepFace/items/*', local_annotations_path='../../assets/sample_datasets/SheepFace/json') ###Output 0it [00:00, ?it/s] 0%| | 1/1681 [00:01<40:50, 1.46s/it] 0%|▊ | 7/1681 [00:01<28:37, 1.03s/it] 2%|███▍ | 30/1681 [00:01<19:47, 1.39it/s] 3%|██████▏ | 53/1681 [00:01<13:43, 1.98it/s] 4%|███████▍ | 64/1681 [00:01<09:40, 2.79it/s] 5%|█████████▋ | 84/1681 [00:02<06:43, 3.96it/s] 7%|████████████▊ | 111/1681 [00:02<04:39, 5.61it/s] 8%|██████████████▋ | 127/1681 [00:02<03:19, 7.79it/s] 9%|█████████████████▏ | 149/1681 [00:02<02:19, 10.95it/s] 10%|███████████████████▋ | 171/1681 [00:02<01:38, 15.29it/s] 11%|█████████████████████▋ | 188/1681 [00:02<01:12, 20.63it/s] 12%|███████████████████████▋ | 205/1681 [00:02<00:52, 27.97it/s] 14%|██████████████████████████▉ | 233/1681 [00:03<00:38, 37.87it/s] 15%|████████████████████████████▉ | 251/1681 [00:03<00:30, 47.64it/s] 16%|███████████████████████████████▏ | 270/1681 [00:03<00:23, 60.49it/s] 17%|█████████████████████████████████▋ | 292/1681 [00:03<00:18, 76.51it/s] 18%|███████████████████████████████████▊ | 310/1681 [00:03<00:16, 85.48it/s] 19%|█████████████████████████████████████▌ | 326/1681 [00:03<00:13, 99.14it/s] 21%|███████████████████████████████████████▌ | 345/1681 [00:03<00:11, 115.64it/s] 22%|██████████████████████████████████████████▏ | 367/1681 [00:03<00:09, 132.11it/s] 23%|████████████████████████████████████████████▏ | 385/1681 [00:03<00:09, 130.20it/s] 24%|██████████████████████████████████████████████▏ | 402/1681 [00:04<00:09, 134.13it/s] 25%|█████████████████████████████████████████████████ | 427/1681 [00:04<00:08, 148.63it/s] 26%|██████████████████████████████████████████████████▉ | 444/1681 [00:04<00:08, 145.03it/s] 27%|████████████████████████████████████████████████████▊ | 460/1681 [00:04<00:08, 142.99it/s] 28%|██████████████████████████████████████████████████████▉ | 479/1681 [00:04<00:07, 154.16it/s] 30%|█████████████████████████████████████████████████████████▊ | 503/1681 [00:04<00:06, 169.16it/s] 31%|███████████████████████████████████████████████████████████▉ | 522/1681 [00:04<00:08, 143.77it/s] 32%|██████████████████████████████████████████████████████████████▏ | 542/1681 [00:04<00:07, 156.89it/s] 34%|████████████████████████████████████████████████████████████████▊ | 565/1681 [00:05<00:06, 167.16it/s] 35%|██████████████████████████████████████████████████████████████████▉ | 583/1681 [00:05<00:07, 149.04it/s] 36%|████████████████████████████████████████████████████████████████████▉ | 600/1681 [00:05<00:07, 148.97it/s] 37%|███████████████████████████████████████████████████████████████████████▎ | 621/1681 [00:05<00:06, 161.30it/s] 38%|█████████████████████████████████████████████████████████████████████████▎ | 639/1681 [00:05<00:06, 158.94it/s] 39%|███████████████████████████████████████████████████████████████████████████▎ | 656/1681 [00:05<00:06, 160.10it/s] 40%|█████████████████████████████████████████████████████████████████████████████▎ | 673/1681 [00:05<00:06, 153.86it/s] 41%|███████████████████████████████████████████████████████████████████████████████▉ | 696/1681 [00:05<00:05, 168.11it/s] 42%|█████████████████████████████████████████████████████████████████████████████████▉ | 714/1681 [00:06<00:06, 151.53it/s] 43%|███████████████████████████████████████████████████████████████████████████████████▉ | 731/1681 [00:06<00:06, 151.19it/s] 45%|█████████████████████████████████████████████████████████████████████████████████████▉ | 749/1681 [00:06<00:05, 158.10it/s] 46%|████████████████████████████████████████████████████████████████████████████████████████▍ | 770/1681 [00:06<00:05, 165.62it/s] 47%|██████████████████████████████████████████████████████████████████████████████████████████▎ | 787/1681 [00:06<00:06, 148.21it/s] 48%|████████████████████████████████████████████████████████████████████████████████████████████▋ | 807/1681 [00:06<00:05, 155.72it/s] 49%|██████████████████████████████████████████████████████████████████████████████████████████████▊ | 826/1681 [00:06<00:05, 163.49it/s] 50%|█████████████████████████████████████████████████████████████████████████████████████████████████ | 845/1681 [00:06<00:04, 170.20it/s] 51%|███████████████████████████████████████████████████████████████████████████████████████████████████ | 863/1681 [00:06<00:05, 153.49it/s] 53%|█████████████████████████████████████████████████████████████████████████████████████████████████████▍ | 883/1681 [00:07<00:05, 159.24it/s] 54%|███████████████████████████████████████████████████████████████████████████████████████████████████████▉ | 905/1681 [00:07<00:04, 169.77it/s] 55%|█████████████████████████████████████████████████████████████████████████████████████████████████████████▉ | 923/1681 [00:07<00:04, 155.74it/s] 56%|████████████████████████████████████████████████████████████████████████████████████████████████████████████▍ | 944/1681 [00:07<00:04, 162.19it/s] 57%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████▉ | 966/1681 [00:07<00:04, 172.61it/s] 59%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████ | 985/1681 [00:07<00:04, 162.53it/s] 60%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████▍ | 1002/1681 [00:07<00:04, 162.87it/s] 61%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████▋ | 1022/1681 [00:07<00:03, 166.75it/s] 62%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████▎ | 1045/1681 [00:08<00:03, 177.72it/s] 63%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████▌ | 1064/1681 [00:08<00:03, 159.49it/s] 64%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████▍ | 1081/1681 [00:08<00:03, 159.99it/s] 66%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████▉ | 1111/1681 [00:08<00:03, 174.25it/s] 67%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████ | 1130/1681 [00:08<00:03, 153.41it/s] 68%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████ | 1147/1681 [00:08<00:03, 157.16it/s] 69%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████▍ | 1168/1681 [00:08<00:03, 169.83it/s] 71%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████▌ | 1187/1681 [00:08<00:02, 173.23it/s] 72%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████▋ | 1205/1681 [00:09<00:03, 150.35it/s] 73%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████▍ | 1230/1681 [00:09<00:02, 162.12it/s] 74%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████ | 1252/1681 [00:09<00:02, 175.76it/s] 76%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████▏ | 1271/1681 [00:09<00:02, 151.65it/s] 77%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████ | 1288/1681 [00:09<00:02, 155.80it/s] 78%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████▋ | 1310/1681 [00:09<00:02, 166.64it/s] ###Markdown Run Pretrained Model ###Code adapter = model.build() ###Output _____no_output_____ ###Markdown Load the pretrained snapshot into the model adapter ###Code adapter.load_from_snapshot(snapshot=snapshot) ###Output I: dtlpy.ml.base_model_adapter 09:44:08 [resnet_adapter.py:51](load):: Loading a model from C:\Users\Shabtay\.dataloop\snapshots\resnset50-imagenet-pretrained I: dtlpy.ml.base_model_adapter 09:44:09 [resnet_adapter.py:56](load):: Loaded model from C:\Users\Shabtay\.dataloop\snapshots\resnset50-imagenet-pretrained\model.pth successfully ###Markdown Get an item and predict with upload ###Code item = dl.items.get(item_id='611e174e4c09acc3c5bb81d3') annotations = adapter.predict_items([item], with_upload=True) image = Image.open(item.download()) plt.imshow(item.annotations.show(np.asarray(image), thickness=5)) print('Classification: {}'.format(annotations[0][0].label)) ###Output predicting: 0%| | 0/1 [00:00<?, ?bt/s] 0%| | 0/1 [00:00<?, ?it/s] 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 1/1 [00:00<00:00, 3.08it/s] predicting: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 1/1 [00:01<00:00, 1.39s/bt] I: dtlpy.ml.base_model_adapter 09:44:19 [base_model_adapter.py:288](predict_items):: Uploading items annotation for snapshot 'resnset50-imagenet-pretrained'. cleanup False 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 1/1 [00:00<?, ?it/s] ###Markdown You can alos open the item in the platform to view and edit annotations easily ###Code item.open_in_web() ###Output _____no_output_____ ###Markdown Train on new datasetHere we will train on the Sheep dataset. First we will clone and split the dataset to 2 partitions - train and validation. After that we will clone the pretrained snapshot ###Code dataset = project.datasets.get(dataset_name='Sheep Face') partitions = {dl.SnapshotPartitionType.TRAIN: 0.8, dl.SnapshotPartitionType.VALIDATION: 0.2} cloned_dataset = train_utils.prepare_dataset(dataset, filters=None, partitions=partitions) snapshot_name='sheep-soft-augmentations' # create an Item Bucket to save snapshot in your project bucket = project.buckets.create(bucket_type=dl.BucketType.ITEM, model_name=model.name, snapshot_name=snapshot_name) new_snapshot = snapshot.clone(snapshot_name=snapshot_name, dataset_id=cloned_dataset.id, bucket=bucket, configuration={'batch_size': 16, 'start_epoch': 0, 'num_epochs': 2, 'input_size': 256}) new_snapshot = model.snapshots.get(snapshot_name=snapshot_name) ###Output 2021-08-31 10:18:52.888 [WARNING]-[MainThread]-[v1.35.3]dtlpy.ml.train_utils: Cloned dataset already exist. Using it... 0it [00:00, ?it/s] 0%| | 0.00/97.8M [00:00<?, ?B/s] 0%| | 64.0k/97.8M [00:00<03:16, 520kB/s] 50%|███████████████████████████████████████████████████████████████████████████████████████████████████▌ | 1/2 [00:01<00:01, 1.39s/it] 1%|█▌ | 832k/97.8M [00:00<02:02, 833kB/s] 3%|█████▊ | 2.94M/97.8M [00:00<01:24, 1.17MB/s] 6%|████████████ | 6.12M/97.8M [00:00<00:58, 1.65MB/s] 8%|██████████████▊ | 7.56M/97.8M [00:00<00:42, 2.22MB/s] 9%|█████████████████▌ | 8.94M/97.8M [00:00<00:31, 2.93MB/s] 10%|████████████████████ | 10.2M/97.8M [00:01<00:24, 3.78MB/s] 12%|██████████████████████▌ | 11.5M/97.8M [00:01<00:19, 4.75MB/s] 13%|█████████████████████████ | 12.8M/97.8M [00:01<00:15, 5.78MB/s] 14%|███████████████████████████▎ | 13.9M/97.8M [00:01<00:12, 6.83MB/s] 15%|█████████████████████████████▋ | 15.1M/97.8M [00:01<00:11, 7.80MB/s] 17%|████████████████████████████████ | 16.3M/97.8M [00:01<00:09, 8.67MB/s] 18%|██████████████████████████████████▎ | 17.5M/97.8M [00:01<00:08, 9.39MB/s] 19%|████████████████████████████████████▋ | 18.7M/97.8M [00:01<00:08, 10.0MB/s] 20%|███████████████████████████████████████ | 19.9M/97.8M [00:01<00:07, 10.4MB/s] 21%|█████████████████████████████████████████▏ | 21.0M/97.8M [00:01<00:07, 10.8MB/s] 23%|███████████████████████████████████████████▍ | 22.1M/97.8M [00:02<00:07, 11.1MB/s] 24%|█████████████████████████████████████████████▋ | 23.2M/97.8M [00:02<00:06, 11.2MB/s] 25%|███████████████████████████████████████████████▊ | 24.4M/97.8M [00:02<00:06, 11.4MB/s] 26%|██████████████████████████████████████████████████▏ | 25.6M/97.8M [00:02<00:06, 11.5MB/s] 27%|████████████████████████████████████████████████████▌ | 26.8M/97.8M [00:02<00:06, 11.6MB/s] 28%|██████████████████████████████████████████████████████▋ | 27.9M/97.8M [00:02<00:06, 11.6MB/s] 30%|█████████████████████████████████████████████████████████ | 29.1M/97.8M [00:02<00:06, 11.7MB/s] 31%|███████████████████████████████████████████████████████████▎ | 30.2M/97.8M [00:02<00:06, 11.7MB/s] 32%|█████████████████████████████████████████████████████████████▍ | 31.3M/97.8M [00:02<00:05, 11.7MB/s] 33%|███████████████████████████████████████████████████████████████▋ | 32.4M/97.8M [00:02<00:05, 11.7MB/s] 34%|██████████████████████████████████████████████████████████████████ | 33.6M/97.8M [00:03<00:05, 11.7MB/s] 36%|████████████████████████████████████████████████████████████████████▏ | 34.8M/97.8M [00:03<00:05, 11.8MB/s] 37%|██████████████████████████████████████████████████████████████████████▍ | 35.9M/97.8M [00:03<00:05, 11.7MB/s] 38%|████████████████████████████████████████████████████████████████████████▋ | 37.0M/97.8M [00:03<00:05, 11.7MB/s] 43%|██████████████████████████████████████████████████████████████████████████████████▋ | 42.1M/97.8M [00:03<00:03, 15.3MB/s] 45%|███████████████████████████████████████████████████████████████████████████████████████▎ | 44.5M/97.8M [00:03<00:03, 14.1MB/s] 48%|███████████████████████████████████████████████████████████████████████████████████████████▎ | 46.5M/97.8M [00:03<00:04, 13.3MB/s] 49%|██████████████████████████████████████████████████████████████████████████████████████████████▋ | 48.2M/97.8M [00:04<00:04, 12.8MB/s] 51%|█████████████████████████████████████████████████████████████████████████████████████████████████▊ | 49.8M/97.8M [00:04<00:04, 12.5MB/s] 52%|████████████████████████████████████████████████████████████████████████████████████████████████████▌ | 51.2M/97.8M [00:04<00:03, 12.3MB/s] 54%|███████████████████████████████████████████████████████████████████████████████████████████████████████▎ | 52.6M/97.8M [00:04<00:03, 12.1MB/s] 55%|█████████████████████████████████████████████████████████████████████████████████████████████████████████▊ | 53.9M/97.8M [00:04<00:03, 12.0MB/s] 56%|████████████████████████████████████████████████████████████████████████████████████████████████████████████▎ | 55.2M/97.8M [00:04<00:03, 11.9MB/s] 58%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████▊ | 56.4M/97.8M [00:04<00:03, 11.9MB/s] 59%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████ | 57.6M/97.8M [00:04<00:03, 11.9MB/s] 60%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████▍ | 58.8M/97.8M [00:04<00:03, 11.8MB/s] 61%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████▊ | 60.0M/97.8M [00:05<00:03, 11.8MB/s] 63%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████ | 61.2M/97.8M [00:05<00:03, 11.8MB/s] 64%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████▍ | 62.4M/97.8M [00:05<00:03, 11.8MB/s] 65%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████▊ | 63.6M/97.8M [00:05<00:03, 11.8MB/s] 66%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████ | 64.8M/97.8M [00:05<00:02, 11.8MB/s] 67%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████▍ | 65.9M/97.8M [00:05<00:02, 11.8MB/s] 69%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████▊ | 67.1M/97.8M [00:05<00:02, 11.8MB/s] 70%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████ | 68.3M/97.8M [00:05<00:02, 11.8MB/s] 71%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████▍ | 69.5M/97.8M [00:05<00:02, 11.8MB/s] 72%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████▊ | 70.7M/97.8M [00:06<00:02, 11.8MB/s] 73%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████▉ | 71.8M/97.8M [00:06<00:02, 11.8MB/s] 75%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████▏ | 72.9M/97.8M [00:06<00:02, 11.8MB/s] 76%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████▌ | 74.1M/97.8M [00:06<00:02, 11.7MB/s] 77%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████▊ | 75.3M/97.8M [00:06<00:02, 11.8MB/s] 78%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████ | 76.4M/97.8M [00:06<00:01, 11.8MB/s] 79%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████▎ | 77.6M/97.8M [00:06<00:01, 11.8MB/s] 81%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████▋ | 78.8M/97.8M [00:06<00:01, 11.8MB/s] 82%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████▉ | 79.9M/97.8M [00:06<00:01, 11.8MB/s] 83%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████ | 81.1M/97.8M [00:06<00:01, 11.8MB/s] 84%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████▎ | 82.2M/97.8M [00:07<00:01, 11.8MB/s] 85%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████▌ | 83.3M/97.8M [00:07<00:01, 11.8MB/s] 86%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████▋ | 84.4M/97.8M [00:07<00:01, 11.8MB/s] 87%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████▉ | 85.6M/97.8M [00:07<00:01, 11.8MB/s] ###Markdown We will load the new cloned un-trained snapshot to the model adapter and prepare for trainin. This will download the dataset locally. ###Code adapter.load_from_snapshot(snapshot=new_snapshot) root_path, data_path, output_path = adapter.prepare_training() ###Output 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 2/2 [00:00<00:00, 1939.11it/s] I: dtlpy.ml.base_model_adapter 10:19:22 [resnet_adapter.py:51](load):: Loading a model from C:\Users\Shabtay\.dataloop\snapshots\sheep-soft-augmentations I: dtlpy.ml.base_model_adapter 10:19:23 [resnet_adapter.py:56](load):: Loaded model from C:\Users\Shabtay\.dataloop\snapshots\sheep-soft-augmentations\model.pth successfully W: dtlpy.ml.base_model_adapter 10:19:24 [base_model_adapter.py:173](prepare_training):: Data path directory (C:\Users\Shabtay\.dataloop\datasets\612dd045ef3be2709836e286) is not empty.. 2021-08-31 10:19:24.499 [WARNING]-[MainThread]-[v1.35.3]dtlpy.ml.base_model_adapter: Data path directory (C:\Users\Shabtay\.dataloop\datasets\612dd045ef3be2709836e286) is not empty.. 17%|███████████████████████████████▊ | 223/1345 [00:00<00:00, 2207.93it/s] 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 1345/1345 [00:01<00:00, 1280.31it/s] I: dtlpy.ml.base_model_adapter 10:19:26 [base_model_adapter.py:183](prepare_training):: Downloaded <SnapshotPartitionType.TRAIN: 'train'> SnapshotPartition complete. 1345 total items 88%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████▉ | 294/336 [00:00<00:00, 2939.97it/s] 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 336/336 [00:00<00:00, 2799.99it/s] I: dtlpy.ml.base_model_adapter 10:19:26 [base_model_adapter.py:183](prepare_training):: Downloaded <SnapshotPartitionType.VALIDATION: 'validation'> SnapshotPartition complete. 336 total items 2021-08-31 10:19:26.676 [WARNING]-[MainThread]-[v1.35.3]dtlpy.repositories.downloader: No items found! Nothing was downloaded I: dtlpy.ml.base_model_adapter 10:19:26 [base_model_adapter.py:183](prepare_training):: Downloaded <SnapshotPartitionType.TEST: 'test'> SnapshotPartition complete. 0 total items 50%|██████████████████████████████████████████████████████████████████████████████████████████████████ | 50/100 [00:05<00:05, 9.63it/s] 95%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████▏ | 95/100 [00:05<00:00, 18.26it/s] ###Markdown Create Dataoader and Visualize ImagesWe can Visualize the data with augmentation for debug and exploration. You can skip this part and go straight to training ###Code from imgaug import augmenters as iaa from torchvision import transforms from dtlpy.ml.ml_dataset import get_torch_dataset augmentation = iaa.Sequential([ iaa.Resize({"height": 256, "width": 256}), # iaa.Superpixels(p_replace=(0, 0.5), n_segments=(10, 50)), iaa.flip.Fliplr(p=0.5), iaa.flip.Flipud(p=0.5), iaa.GaussianBlur(sigma=(0.0, 0.8)), ]) tfs = transforms.Compose([ augmentation, np.copy, # transforms.ToTensor() ]) dataloader = get_torch_dataset()(data_path=os.path.join(data_path, 'train'), dataset_entity=cloned_dataset, annotation_type=dl.AnnotationType.CLASSIFICATION, transforms=tfs) dataloader.visualize() ###Output _____no_output_____ ###Markdown Start The TrainFinally we are ready to train! ###Code print("Training {!r} with snapshot {!r} on data {!r}".format(model.name, new_snapshot.id, data_path)) adapter.train(data_path=data_path, output_path=output_path) ###Output Training 'ResNet-torch' with snapshot '612dd7dc3c18f7b46a8ba660' on data 'C:\\Users\\Shabtay\\.dataloop\\datasets\\612dd045ef3be2709836e286' ###Markdown Saving the snapshot - will upload the trained weights to the Item Bucket ###Code adapter.save_to_snapshot(local_path=output_path, replace=True) adapter.snapshot.bucket.list_content() ###Output _____no_output_____ ###Markdown Predict On Out New Trained SnapshotWe will create a load and visualize the prediction of the snapshot ###Code from imgaug import augmenters as iaa from torchvision import transforms from dtlpy.ml.ml_dataset import get_torch_dataset augmentation = iaa.Sequential([ iaa.Resize({"height": 256, "width": 256}), # iaa.Superpixels(p_replace=(0, 0.5), n_segments=(10, 50)), iaa.flip.Fliplr(p=0.5), iaa.flip.Flipud(p=0.5), # iaa.GaussianBlur(sigma=(0.0, 0.8)), ]) tfs = transforms.Compose([ augmentation, np.copy, transforms.ToTensor() ]) dataloader = get_torch_dataset()(data_path=os.path.join(data_path, 'train'), dataset_entity=cloned_dataset, annotation_type=dl.AnnotationType.CLASSIFICATION, transforms=tfs, with_orig=True) adapter = model.build() trained_snapshot = model.snapshots.get(snapshot_name='sheep-soft-augmentations') adapter.load_from_snapshot(snapshot=trained_snapshot, overwrite=True) fig, ax = plt.subplots(1,2) for i in range(2): image, target, orig_image, orig_targets = dataloader[np.random.randint(len(dataloader))] anno = adapter.predict([orig_image]) ax[i].imshow(orig_image) ax[i].set_title('GT: {!r}\n Pred: {!r}:{:.2f}'.format(adapter.label_map[str(int(target))], anno[0][0].label, anno[0][0].metadata['user']['model']['confidence'])) ###Output _____no_output_____ ###Markdown Predict and Upload Annotation to Item in PlatformWe will get an item from the dataset and predict with the new sheeps snapshot ###Code dataset.items.list().to_df() item = dataset.items.get(filepath='/Poll Dorset/000342 P.jpg') annotations = adapter.predict_items([item], with_upload=True) image = Image.open(item.download()) plt.imshow(item.annotations.show(np.asarray(image), with_text=True)) ###Output predicting: 0%| | 0/1 [00:00<?, ?bt/s] 0%| | 0/1 [00:00<?, ?it/s] 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 1/1 [00:00<00:00, 6.83it/s] predicting: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 1/1 [00:00<00:00, 5.37bt/s] I: dtlpy.ml.base_model_adapter 11:45:36 [base_model_adapter.py:288](predict_items):: Uploading items annotation for snapshot 'sheep-soft-augmentations'. cleanup False 100%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 1/1 [00:00<00:00, 999.83it/s]
csci3352_PS7_ClaytonSchneider.ipynb
###Markdown CSCI 3352 Biological Networks, Spring 2021, Prof. ClausetSubmit here: https://canvas.colorado.edu/courses/69236 Problem Set 7 : Network epidemiology*****Name**: Clayton Schneider***This assignment is due on Canvas by **11:55pm on Friday, March 12th**. Your solutions to non-programming questions should be done in Markdown directly below the associated question. Your solutions to computational questions should include any specified Python code and results as well as written commentary on your conclusions. Remember that you are encouraged to discuss the problems with your classmates, but **you must write all code and solutions on your own** (see syllabus for detailed guidance). There are 65 points total, and 20 pts extra credit.**NOTES**: - Unless a url is given for a data set, you will find the required data on the course Canvas.- If you're not familiar with typesetting math directly in Markdown, you may do your work on paper first and then typeset it later. This [reference guide](https://math.meta.stackexchange.com/questions/5020/mathjax-basic-tutorial-and-quick-reference) provides helpful guidance for writing math in Markdown. - It is **unhelpful** to make your reader interpret numerical output from your code. If a question asks you to compute some value from the data you should show your code output **AND** write a summary of the results in Markdown directly below your code.**** [Documentation for networkx](https://networkx.github.io/documentation/stable/)[//]: (Documentation for igraph Python https://igraph.org/python/ ) ###Code import networkx as nx import numpy as np import copy import matplotlib import matplotlib.pylab as plt %matplotlib inline import random as rnd rnd.seed() def drawGz(G,z): # DO NOT MODIFY THIS FUNCTION # This function draws G with node labels from partition z # # input : G is a networkx graph # : z is a dictionary of group labels for G's nodes # output : none # # WARNING: function is optimistic: assumes inputs are properly formatted colors = ['#d61111','#11d646','#11c6d6','#d67711','#1b11d6','#d611cc'] # map node labels to colors (for the visualization) node_colors = [] for i in G.nodes(): if z[i]=='S': cid = 0 if z[i]=='I': cid = 1 if z[i]=='R': cid = 2 node_colors.append(colors[int(cid)]) nsize = 600 flabel = True if G.order() > 50: nsize = 100 flabel = False nx.draw_kamada_kawai(G,with_labels=flabel,node_size=nsize,width=2,node_color=node_colors) # draw it prettier #nx.draw_networkx(G,with_labels=flabel,node_size=nsize,width=2,node_color=node_colors) # draw it pretty limits=plt.axis('off') # turn off axes plt.show() return def plot_epidemicDynamics(St,It,Rt): # DO NOT MODIFY THIS FUNCTION # This function plots the S(t),I(t),R(t) time series nicely # WARNING: function is optimistic: assumes inputs are properly formatted fig = plt.figure() ax1 = fig.add_subplot(111) # put multiple plt.plot(range(len(St)), St, 'bo-', alpha=0.5,label='S(t)') # plot the S(t) time series plt.plot(range(len(It)), It, 'rv-', alpha=0.5,label='I(t)') # plot the I(t) time series plt.plot(range(len(Rt)), Rt, 'gs-', alpha=0.5,label='R(t)') # plot the R(t) time series plt.ylabel('number of nodes') plt.xlabel('time, t') plt.legend(loc='upper right'); #ax1.set_xlim([0, 50]) plt.show() return ###Output _____no_output_____ ###Markdown *** Part 1 (10 pts) : Warming up* Using the SIR simulation code from the in-class lab, write a function `run_SIR(G,s,beta,gmma,flag)` that * takes as input a simple `networkx` graph, an integer `s` that specifies the _number_ of randomly chosen infected nodes at time $t=0$, choices of $\beta$ and $\gamma$, and a binary variable `flag` (`=0` means run silently; `=1` means display any intermediate outputs, like `drawGz()` or `print()` statements), * runs the simulation to its completion, and then * returns the $S(t)$, $I(t)$, $R(t)$ time series for the progression of the epidemic (for plotting using the `plot_epidemicDynamics()` function) _and_ a dictionary `xt` that stores for each node `i` the corresponding time `t` at which `i` became infected. You'll need to instrument the existing simulation code to track `xt` correctly. * Then, apply your new function to the toy graph from the lecture notes and lab, with $s=1$ and $\beta=\gamma=0.3$, and plot the resulting time series. ###Code def run_SIR(G,s,beta,gmma,flag): # Basic SIR simulation n = G.order() zt = dict.fromkeys(range(n), 'S') xt = dict.fromkeys(range(n), -1) St = [] It = [] Rt = [] Sc, Ic, Rc = n-s, s, 0 St.append(Sc) It.append(Ic) Rt.append(Rc) initial = 0 while initial < s: patientz = np.random.choice(G.nodes()) zt[patientz] = 'I' xt[patientz] = 0 initial += 1 t = 1 if flag == 1: print(f'time step {t}') drawGz(G, zt) while any(xi == 'I' for xi in zt.values()): zu = copy.deepcopy(zt) # nodes states for next time step (synchronous updates) xu = copy.deepcopy(xt) # do S -> I transitions for e in G.edges(): i, j = e[0], e[1] if (zt[i] == 'I') and (zt[j] == 'S') and (zu[j] != 'I'): if rnd.random() < beta: zu[j] = 'I' # i infects j for next round xu[j] = t Sc -= 1 Ic += 1 # update counts if (zt[i] == 'S') and (zt[j] == 'I') and (zu[i] != 'I'): if rnd.random() < beta: zu[i] = 'I' # j infects i for next round xu[i] = t Sc -= 1 Ic += 1 # do I - R transitions for i in G.nodes(): if zt[i] == 'I' and rnd.random() < gmma: zu[i] = 'R' # i recovers (R) Ic,Rc = Ic-1,Rc+1 # update counts # update all states synchronously, update clock zt = copy.deepcopy(zu) xt = copy.deepcopy(xu) t = t+1 if flag == 1: print(f'time step {t}') drawGz(G,zt) St.append(Sc) It.append(Ic) Rt.append(Rc) # append these counts to the time series return St,It,Rt,xt ##### do not modify below here ##### ## run this code once you've written the run_SIR() function G = nx.Graph([(0,1),(0,2),(1,2),(2,3),(3,4),(3,5),(4,5)]) # a simple graph G beta = 0.3 gmma = 0.3 flag = 1 St,It,Rt,xt = run_SIR(G,1,beta,gmma,flag) plot_epidemicDynamics(St,It,Rt) ##### do not modify above here ##### ###Output time step 1 ###Markdown *** Problem 2 : The role of the degree distribution (55 pts total)In this problem, you will investigate the role of the exposure graph's degree distribution $\Pr(k)$ on an epidemic's dynamics $I(t)$. This problem has three parts:* `Problem 2-A`, you'll generate high/medium/low variance degree distributions.* `Problem 2-B`, you'll run an epidemic on each and discuss the differences.* `Problem 2-C`, you'll investigate the role of degree on _when_ a node gets infected. Problem 2-A (20 pts)In `Problem 2-B` we'll use the Chung-Lu model to generate random graphs with a specified degree distribution. However, to see how varying the degree structure changes the epidemic's shape, we'll first need a low-dimensional way to vary the degree structure's shape. There are many ways to do this. Here, we'll use a 2-parameter distribution called a *stretched exponential*, which has the form:$\Pr(x) \propto x^{\beta-1} \textrm{exp}(-\lambda \, x^{\beta})$,where $\beta\geq 0$ and $\lambda\geq 0$. When $\beta=1$, this distribution returns a standard (low variance) exponential distribution with parameter $\lambda$; when $\beta=2$, it decays like a standard (lower variance) Gaussian or Normal distribution; and when $\beta1$ means a lower mean.* Use the provided function `drawFrom_SE(n,beta,lmbda)` to generate *three* lists, each containing $n=1000$ values. The three lists should be generated with different combinations of $\beta$ and $\lambda$, and should meet the following criteria: * one distribution should have "low" variance, one should have "high" variance, and one should be in between those, and * they should all have the roughly the same average value (e.g., in my solutions, the mean is about $2.3\pm0.1$).* Modify the `plot_2CCDF()` function from `Problem Set 2` to display all three of these distributions together, i.e., make and apply a function `plot_3CCDF()`.* Briefly describe the effect of increasing or decreasing $\beta$ and $\lambda$ on the shape of the plotted distributions, e.g., on the shape of the "body" (the middle part) and the upper tail of the distribution.Hint: You will likely need to fiddle with different choices of parameters in order to find good settings. Try starting at $\beta=\lambda=1$, and then increasing or decreasing one, then the other parameter, until you get something reasonable when you plot it. For the "high" variance distribution, you'll want the maximum value you generate to be no higher than 100 or so. ###Code def drawFrom_SE(n,beta,lmbda): # DO NOT MODIFY THIS FUNCTION # This function draws n values from a stretched exponential distribution with parameters beta,lambda # and then rounds them to integers for use as degrees. It returns these in a list x. xmin = 0.5 # minimum value x = [] # for i in range(n): xi = int(round( (xmin**beta - (1/lmbda)*np.log(1-rnd.random()))**(1/beta) )) x.append(xi) return x def plot_3CCDF(kis1,kis2,kis3): # input : three lists of degrees # output: one plot showing the CCDFs of the three degree distributions Pr(K>=k) for k>=1 ki1_max = max(kis1) ki2_max = max(kis2) ki3_max = max(kis3) all_kis = [] for i in kis1: all_kis.append(i) for i in kis2: all_kis.append(i) for i in kis3: all_kis.append(i) real_max = max(all_kis) # histograms icounts1, ibins1 = np.histogram(kis1, bins=[i for i in range(real_max+2)], density=True) icounts2, ibins2 = np.histogram(kis2, bins=[i for i in range(real_max+2)], density=True) icounts3, ibins3 = np.histogram(kis3, bins=[i for i in range(real_max+2)], density=True) icumcounts1 = np.cumsum(icounts1) icumcounts2 = np.cumsum(icounts2) icumcounts3 = np.cumsum(icounts3) icumcounts1 = np.insert(icumcounts1,0,0) icumcounts2 = np.insert(icumcounts2,0,0) icumcounts3 = np.insert(icumcounts3,0,0) # plots fig = plt.figure() ax1 = fig.add_subplot(111) # put multiple plt.loglog(ibins1[1:-1], 1-icumcounts1[1:-1], 'rs', alpha=0.5, label='kis1') plt.loglog(ibins2[1:-1], 1-icumcounts2[1:-1], 'bo', alpha=0.5, label='kis2') plt.loglog(ibins3[1:-1], 1-icumcounts3[1:-1], 'y*', alpha=0.5, label='kis3') plt.title('CCDF, 3 degree distributions (loglog)') plt.xlabel('Degree, k') plt.ylabel('Pr(K>=k)') plt.legend(loc='upper right'); plt.show() return n = 1000 beta1 = 1 beta2 = 0.75 beta3 = 0.5 lmbda1 = 0.9 lmbda2 = 0.9 lmbda3 = 0.9 ##### do not modify below here ##### ## run this code once you've written the plot_3CCDF() function x1 = drawFrom_SE(n,beta1,lmbda1) x2 = drawFrom_SE(n,beta2,lmbda2) x3 = drawFrom_SE(n,beta3,lmbda3) plot_3CCDF(x1,x2,x3) print(f'x1, mean = {sum(x1)/n}') print(f'x2, mean = {sum(x2)/n}') print(f'x3, mean = {sum(x3)/n}') ###Output _____no_output_____ ###Markdown Dropping beta1 down from 1 seems to scale down the graph of the CCDF on the x-axis, kind of scrunching it in towards x=0. Lambda is similar, but in the reverse direction, and that it doesn't seem to directly scale them. It floats the mean of the CCDF out further as the value of lambda shrinks. If it grows at all, the curve essentially disappears and becomes a sheer cliff. Problem 2-B (15 pts)Now, using $\beta=\gamma=0.4$ (at the critical threshold of $R_0$):* Use the built-in `networkx` Chung-Lu graph generator to make a graph for each of your three degree distributions.* Then, run your SIR simulation on each graph, and plot the corresponding epidemic's dynamics.* Discuss what you see as differences or similarities among or between the three cases, and offer an explanation _in terms of the role that node degree plays in spreading the epidemic_. Remember that the _only_ thing different about these three graphs is the degree structure. (You may find it useful to use `drawGz()` to visualize them.)Hint: You may need to re-run the simulation on the same graphs multiple times in order to get three good runs (since each run will start the epidemic in a different location, and occasionally the epidemic may not take off). Alternatively, you can increase the number of initially infected nodes to a small integer (maybe 5 at most). When comparing epidemics, don't forget to look at how _long_ they last. ###Code beta = 0.4 gmma = 0.4 flag = 0 G1 = nx.expected_degree_graph(x1, seed=None, selfloops=False) G2 = nx.expected_degree_graph(x2, seed=None, selfloops=False) G3 = nx.expected_degree_graph(x3, seed=None, selfloops=False) St1,It1,Rt1,xt1 = run_SIR(G1,1,beta,gmma,flag) plot_epidemicDynamics(St1,It1,Rt1) St2,It2,Rt2,xt2 = run_SIR(G2,1,beta,gmma,flag) plot_epidemicDynamics(St2,It2,Rt2) St3,It3,Rt3,xt3 = run_SIR(G3,1,beta,gmma,flag) plot_epidemicDynamics(St3,It3,Rt3) ###Output _____no_output_____ ###Markdown As we allow for greater variance in the degree distribution, we see that the pandemic has a much greater 'success'... Not the correct usage of that word really -- the infection does its job and infects more and faster. It really exhausts through the availability of S very rapidly. Obviously, as we raise the average degree, the infection will run more quickly and infecting more overall, but here we've shown that simply by varying the variance (haha). Problem 2-C (20 pts)High degree nodes, which are more common under high variance degree distributions, play a special role in epidemics. Let's investigate.* Using the `xt` output variable from your `run_SIR()` function, make three scatter plots of the time of infection $t_i$ as a function of node degree $k_i$, one for each network.* For each plot, calculate and report the correlation coefficient `r = np.corrcoef(x,y)[0,1]` between your $x$ and $y$ variables.* Finally, discuss any patterns you see within and across your high/medium/low variance networks from this perspective, and use the correlation coefficients you calculate to offer an explanation (in terms of the network structure and the rules of the SIR epidemic) as to why that pattern exists.Hint: Don't plot values where $t_i=-1$, which is a special value indicating that node $i$ was never infected. ###Code # YOUR CODE xt1n = [] x1n = [] xt2n = [] x2n = [] xt3n = [] x3n = [] for timepoint, degree in zip(xt1.values(), x1): if timepoint != -1: xt1n.append(timepoint) x1n.append(degree) for timepoint, degree in zip(xt2.values(), x2): if timepoint != -1: xt2n.append(timepoint) x2n.append(degree) for timepoint, degree in zip(xt3.values(), x3): if timepoint != -1: xt3n.append(timepoint) x3n.append(degree) plt.scatter([xt1[val] for val in xt1], x1, label='low variance') plt.scatter([xt2[val] for val in xt2], x2, label='med variance') plt.scatter([xt3[val] for val in xt3], x3, label='high variance') plt.legend() ###Output _____no_output_____ ###Markdown In the high variance version, the infected timepoints seem to be much lower. As we lower the variance, they stretch out, and we 'flatten the curve'. *** Problem 2-D (*10 pts extra credit*) : Flattening an epidemicComplete this section of the in-class lab.Using your SIR simulation, conduct the following three mini-experiments. For these, use the built-in `.gnp_random_graph(n, p)` network generator in `networkx`. Recall that `p` is the probability that an edge exists, and we choose it by first choosing the _mean degree_ $c$ (see Lecture Notes 3). The idea here is that the first experiment sets up your baseline for an SIR epidemic on a network, and then we'll first vary one parameter, and then vary a different parameter, to investigate the effects these parameters have on the dynamics of the epidemic. To do this, you'll want to plot the $S(t)$, $I(t)$, $R(t)$ functions after each simulation.Experiment 3.1: simulate a basic epidemic:* set $n=200$ and $c=6$* note around what time step $I(t)$ peaks, and how many time steps it mostly spans.Experiment 3.2: simulate the effect of reduced transmission (e.g., hand washing!):* lower $\beta$; keep $\gamma$ the same, $n=200$ and $c=6$Experiment 3.3: simulate the effect of "social distancing" (e.g., self-quarantine, staying at home, etc.):* lower $c$ in $G(n,p)$; keep $\beta=\gamma$, $n=200$As before, you'll want to run each setting several times to get a sense of what the epidemic tends to do. Start by lowering $\beta$ and $c$ only a little, and then keep lowering them until you see something change. Then, discuss with your teammates what you found, and answer the following specific questions:1. What effect does lowering $\beta$ (reducing the likelihood of transmission) have on $I(t)$, compared to experiment 3.1? How much reduced transmission would you need _everyone_ to do to achieve a real "flattening" of the epidemic? Do you think that's realistic?* What effect does lowering $c$ (reducing the likelihood of transmission) have on $I(t)$?, compared to experiment 3.1? How much social distancing would you need_everyone_ to do to achieve the results you found in experiment 3.3? (Consider what a typical $c$ might be for _you_ during a normal day. Is $c=6$ realistic?)* Which has a larger effect on $I(t)$, reduced transmission or social distancing? ###Code # YOUR CODE ###Output _____no_output_____
DataSet.ipynb
###Markdown ###Code import pandas as pd import numpy as np from google.colab import drive drive.mount('/content/gdrive') df=pd.read_csv('gdrive/My Drive/Colab Notebooks/Fraud/Fraud/fraud_data.csv') import datetime START_DATE = '2017-12-01' startdate = datetime.datetime.strptime(START_DATE, "%Y-%m-%d") df["Date"] = df['TransactionDT'].apply(lambda x: (startdate + datetime.timedelta(seconds=x))) df['_Weekdays'] = df['Date'].dt.dayofweek df['_Hours'] = df['Date'].dt.hour df['_Days'] = df['Date'].dt.day df['_Month'] = df['Date'].dt.month df.drop('TransactionDT', axis=1, inplace=True) train=pd.read_csv(הקובץ אחרי כל השינויים) train['_Month']=df['_Month'] pd.set_option('display.max_rows', 500) pd.set_option('display.max_columns', 500) pd.set_option('display.width', 1000) df.head() df['_Hours'] df.to_csv('gdrive/My Drive/fraud_dataset.csv',index=False,compression='gzip') df=pd.read_csv('gdrive/My Drive/fraud_dataset.csv',compression='gzip') df=df1 df.loc[df['id_30'].str.contains('Windows', na=False), 'id_30'] = 'Windows' df.loc[df['id_30'].str.contains('iOS', na=False), 'id_30'] = 'iOS' df.loc[df['id_30'].str.contains('Mac OS', na=False), 'id_30'] = 'Mac' df.loc[df['id_30'].str.contains('Android', na=False), 'id_30'] = 'Android' df.loc[df['id_31'].str.contains('chrome', na=False), 'id_31'] = 'Chrome' df.loc[df['id_31'].str.contains('firefox', na=False), 'id_31'] = 'Firefox' df.loc[df['id_31'].str.contains('safari', na=False), 'id_31'] = 'Safari' df.loc[df['id_31'].str.contains('edge', na=False), 'id_31'] = 'Edge' df.loc[df['id_31'].str.contains('ie', na=False), 'id_31'] = 'IE' df.loc[df['id_31'].str.contains('samsung', na=False), 'id_31'] = 'Samsung' df.loc[df['id_31'].str.contains('opera', na=False), 'id_31'] = 'Opera' emails = {'gmail': 'google', 'att.net': 'att', 'twc.com': 'spectrum', 'scranton.edu': 'other', 'optonline.net': 'other', 'hotmail.co.uk': 'microsoft', 'comcast.net': 'other', 'yahoo.com.mx': 'yahoo', 'yahoo.fr': 'yahoo', 'yahoo.es': 'yahoo', 'charter.net': 'spectrum', 'live.com': 'microsoft', 'aim.com': 'aol', 'hotmail.de': 'microsoft', 'centurylink.net': 'centurylink', 'gmail.com': 'google', 'me.com': 'apple', 'earthlink.net': 'other', 'gmx.de': 'other', 'web.de': 'other', 'cfl.rr.com': 'other', 'hotmail.com': 'microsoft', 'protonmail.com': 'other', 'hotmail.fr': 'microsoft', 'windstream.net': 'other', 'outlook.es': 'microsoft', 'yahoo.co.jp': 'yahoo', 'yahoo.de': 'yahoo', 'servicios-ta.com': 'other', 'netzero.net': 'other', 'suddenlink.net': 'other', 'roadrunner.com': 'other', 'sc.rr.com': 'other', 'live.fr': 'microsoft', 'verizon.net': 'yahoo', 'msn.com': 'microsoft', 'q.com': 'centurylink', 'prodigy.net.mx': 'att', 'frontier.com': 'yahoo', 'anonymous.com': 'other', 'rocketmail.com': 'yahoo', 'sbcglobal.net': 'att', 'frontiernet.net': 'yahoo', 'ymail.com': 'yahoo', 'outlook.com': 'microsoft', 'mail.com': 'other', 'bellsouth.net': 'other', 'embarqmail.com': 'centurylink', 'cableone.net': 'other', 'hotmail.es': 'microsoft', 'mac.com': 'apple', 'yahoo.co.uk': 'yahoo', 'netzero.com': 'other', 'yahoo.com': 'yahoo', 'live.com.mx': 'microsoft', 'ptd.net': 'other', 'cox.net': 'other', 'aol.com': 'aol', 'juno.com': 'other', 'icloud.com': 'apple'} us_emails = ['gmail', 'net', 'edu'] for c in ['P_emaildomain', 'R_emaildomain']: df[c + '_bin'] = df[c].map(emails) df[c + '_suffix'] = df[c].map(lambda x: str(x).split('.')[-1]) df[c + '_suffix'] = df[c + '_suffix'].map(lambda x: x if str(x) not in us_emails else 'us') df.loc[:,"R_emaildomain"].mode() df['P_emaildomain'].mode() import gc def setDevice(df): df['device_name'] = df['DeviceInfo'].str.split('/', expand=True)[0] df.loc[df['device_name'].str.contains('SM', na=False), 'device_name'] = 'Samsung' df.loc[df['device_name'].str.contains('SAMSUNG', na=False), 'device_name'] = 'Samsung' df.loc[df['device_name'].str.contains('GT-', na=False), 'device_name'] = 'Samsung' df.loc[df['device_name'].str.contains('Moto G', na=False), 'device_name'] = 'Motorola' df.loc[df['device_name'].str.contains('Moto', na=False), 'device_name'] = 'Motorola' df.loc[df['device_name'].str.contains('MOTO ', na=False), 'device_name'] = 'Motorola' df.loc[df['device_name'].str.contains('LG-', na=False), 'device_name'] = 'LG' df.loc[df['device_name'].str.contains('rv:', na=False), 'device_name'] = 'RV' df.loc[df['device_name'].str.contains('Huawei', na=False), 'device_name'] = 'Huawei' df.loc[df['device_name'].str.contains('ALE-', na=False), 'device_name'] = 'Huawei' df.loc[df['device_name'].str.contains('-L', na=False), 'device_name'] = 'Huawei' df.loc[df['device_name'].str.contains('Blade', na=False), 'device_name'] = 'ZTE' df.loc[df['device_name'].str.contains('BLADE', na=False), 'device_name'] = 'ZTE' df.loc[df['device_name'].str.contains('Linux', na=False), 'device_name'] = 'Linux' df.loc[df['device_name'].str.contains('XT', na=False), 'device_name'] = 'Sony' df.loc[df['device_name'].str.contains('HTC', na=False), 'device_name'] = 'HTC' df.loc[df['device_name'].str.contains('ASUS', na=False), 'device_name'] = 'Asus' # df.loc[df.device_name.isin(df.device_name.value_counts()[df.device_name.value_counts() < 200].index), 'device_name'] = "Others" df['had_id'] = 1 gc.collect() return df df=setDevice(df) df1["device_name"].isnull().sum() print(df["device_name"].unique()) df3.loc[:,"device_name"].mode() df['device_name'].mode() df=df2 df.loc[df.id_31.isin(df.id_31.value_counts()[df.id_31.value_counts() < 200].index), 'id_31'] = "Others" df['id_31'].mode() df.loc[df.C2.isin(df.C2\ .value_counts()[df.C2.value_counts() <= 350 ]\ .index), 'C2'] = "Others" df['C2'].mode() df['C2'].unique() df.loc[df.C1.isin(df.C1\ .value_counts()[df.C1.value_counts() <= 400 ]\ .index), 'C1'] = "Others" df['C1'].mode() df2=df df=df2 df.loc[df.addr1.isin(df.addr1.value_counts()[df.addr1.value_counts() <=4000 ].index), 'addr1'] = "Others" df.loc[df.addr2.isin(df.addr2.value_counts()[df.addr2.value_counts() <= 50 ].index), 'addr2'] = "Others" print(df["addr1"].unique()) df['addr1'].mode() df['addr2'].unique() df.loc[df.card3.isin(df.card3.value_counts()[df.card3.value_counts() <300].index), 'card3'] = "Others" df['card3'].mode() df.loc[df.card5.isin(df.card5.value_counts()[df.card5.value_counts() < 500].index), 'card5'] = "Others" df['card5'].mode() df.to_csv('gdrive/My Drive/dataSET.csv',index=False,compression='gzip') data=pd.read_csv('gdrive/My Drive/dataSET.csv',compression='gzip') int_cols=[] float_cols=data.select_dtypes(include=['floating']).columns pd.set_option('display.max_rows', 500) pd.set_option('display.max_columns', 500) pd.set_option('display.width', 1000) for col in float_cols: flo=df[col].dropna() float_cols = data.select_dtypes(include=['float']) float_cols = float_cols.fillna(float_cols.median().round()) # median imputation col_should_be_int = float_cols.applymap(float.is_integer).all() float_to_int_cols = col_should_be_int[col_should_be_int].index float_to_int_cols list(float_to_int_cols) df.loc[:, float_to_int_cols] = float_cols[float_to_int_cols].astype(int) ###Output _____no_output_____
001-Jupyter/001-Tutorials/001-Basic-Tutorials/001-IPython-Kernel/Beyond Plain Python.ipynb
###Markdown IPython: beyond plain Python When executing code in IPython, all valid Python syntax works as-is, but IPython provides a number of features designed to make the interactive experience more fluid and efficient. First things first: running code, getting help In the notebook, to run a cell of code, hit `Shift-Enter`. This executes the cell and puts the cursor in the next cell below, or makes a new one if you are at the end. Alternately, you can use: - `Alt-Enter` to force the creation of a new cell unconditionally (useful when inserting new content in the middle of an existing notebook).- `Control-Enter` executes the cell and keeps the cursor in the same cell, useful for quick experimentation of snippets that you don't need to keep permanently. ###Code print("Hi") ###Output _____no_output_____ ###Markdown Getting help: ###Code ? ###Output _____no_output_____ ###Markdown Typing `object_name?` will print all sorts of details about any object, including docstrings, function definition lines (for call arguments) and constructor details for classes. ###Code import collections collections.namedtuple? collections.Counter?? *int*? ###Output _____no_output_____ ###Markdown An IPython quick reference card: ###Code %quickref ###Output _____no_output_____ ###Markdown Tab completion Tab completion, especially for attributes, is a convenient way to explore the structure of any object you’re dealing with. Simply type `object_name.` to view the object’s attributes. Besides Python objects and keywords, tab completion also works on file and directory names. ###Code # collections. ###Output _____no_output_____ ###Markdown The interactive workflow: input, output, history ###Code 2+10 _+10 ###Output _____no_output_____ ###Markdown You can suppress the storage and rendering of output if you append `;` to the last cell (this comes in handy when plotting with matplotlib, for example): ###Code 10+20; _ ###Output _____no_output_____ ###Markdown The output is stored in `_N` and `Out[N]` variables: ###Code _9 == Out[9] ###Output _____no_output_____ ###Markdown Previous inputs are available, too: ###Code In[11] _i %history -n 1-5 ###Output _____no_output_____ ###Markdown **Exercise**Write the last 10 lines of history to a file named `log.py`. Accessing the underlying operating system ###Code !pwd files = !ls print("My current directory's files:") print(files) !echo $files !echo {files[0].upper()} ###Output _____no_output_____ ###Markdown Note that all this is available even in multiline blocks: ###Code import os for i,f in enumerate(files): if f.endswith('ipynb'): !echo {"%02d" % i} - "{os.path.splitext(f)[0]}" else: print('--') ###Output _____no_output_____ ###Markdown Beyond Python: magic functions The IPyhton 'magic' functions are a set of commands, invoked by prepending one or two `%` signs to their name, that live in a namespace separate from your normal Python variables and provide a more command-like interface. They take flags with `--` and arguments without quotes, parentheses or commas. The motivation behind this system is two-fold: - To provide an orthogonal namespace for controlling IPython itself and exposing other system-oriented functionality.- To expose a calling mode that requires minimal verbosity and typing while working interactively. Thus the inspiration taken from the classic Unix shell style for commands. ###Code %magic ###Output _____no_output_____ ###Markdown Line vs cell magics: ###Code %timeit list(range(1000)) %%timeit list(range(10)) list(range(100)) ###Output _____no_output_____ ###Markdown Line magics can be used even inside code blocks: ###Code for i in range(1, 5): size = i*100 print('size:', size, end=' ') %timeit list(range(size)) ###Output _____no_output_____ ###Markdown Magics can do anything they want with their input, so it doesn't have to be valid Python: ###Code %%bash echo "My shell is:" $SHELL echo "My disk usage is:" df -h ###Output _____no_output_____ ###Markdown Another interesting cell magic: create any file you want locally from the notebook: ###Code %%writefile test.txt This is a test file! It can contain anything I want... And more... !cat test.txt ###Output _____no_output_____ ###Markdown Let's see what other magics are currently defined in the system: ###Code %lsmagic ###Output _____no_output_____ ###Markdown Running normal Python code: execution and errors Not only can you input normal Python code, you can even paste straight from a Python or IPython shell session: ###Code >>> # Fibonacci series: ... # the sum of two elements defines the next ... a, b = 0, 1 >>> while b < 10: ... print(b) ... a, b = b, a+b In [1]: for i in range(10): ...: print(i, end=' ') ...: ###Output _____no_output_____ ###Markdown And when your code produces errors, you can control how they are displayed with the `%xmode` magic: ###Code %%writefile mod.py def f(x): return 1.0/(x-1) def g(y): return f(y+1) ###Output _____no_output_____ ###Markdown Now let's call the function `g` with an argument that would produce an error: ###Code import mod #mod.g(0) %xmode plain #mod.g(0) %xmode verbose #mod.g(0) ###Output _____no_output_____ ###Markdown The default `%xmode` is "context", which shows additional context but not all local variables. Let's restore that one for the rest of our session. ###Code %xmode context ###Output _____no_output_____ ###Markdown Running code with %run ###Code %%writefile script.py x = 10 y = 20 z = x+y print('z is: %s' % z) %run script x ###Output _____no_output_____ ###Markdown Running code in other languages with special `%%` magics ###Code %%perl @months = ("July", "August", "September"); print $months[0]; %%ruby name = "world" puts "Hello #{name.capitalize}!" ###Output _____no_output_____ ###Markdown Raw Input in the notebook Since 1.0 the IPython notebook web application support `raw_input` which for example allow us to invoke the `%debug` magic in the notebook: ###Code #mod.g(0) #%debug ###Output _____no_output_____ ###Markdown Don't foget to exit your debugging session. Raw input can of course be use to ask for user input: ###Code #enjoy = input('Are you enjoying this tutorial? ') #print('enjoy is:', enjoy) ###Output _____no_output_____ ###Markdown Plotting in the notebook This magic configures matplotlib to render its figures inline: ###Code %matplotlib inline import numpy as np import matplotlib.pyplot as plt x = np.linspace(0, 2*np.pi, 300) y = np.sin(x**2) plt.plot(x, y) plt.title("A little chirp") fig = plt.gcf() # let's keep the figure object around for later... ###Output _____no_output_____ ###Markdown The IPython kernel/client model ###Code %connect_info ###Output _____no_output_____ ###Markdown We can connect automatically a Qt Console to the currently running kernel with the `%qtconsole` magic, or by typing `ipython console --existing ` in any terminal: ###Code #%qtconsole ###Output _____no_output_____ ###Markdown Cleanup ###Code !rm -f test.txt !rm -f mod.py !rm -f script.py ###Output _____no_output_____
recommend/bx_recommender.ipynb
###Markdown Build a Recommender System for BooksThis notebook demonstrates the use of many actions within the Recommender System (recommend)action set for SAS Cloud Analytic Services (CAS).This example uses explicit ratings. The data set is the Book-Crossing data set[1](data_attribution). The data preparation excludes the implicit ratings and also excludes ratings that do not match an ISBN in the books data set.You must have access to a SAS Viya 3.3 release of CAS. To connect to CAS from Python, you must install the SAS Scripting Wrapper for Analytics Transfer (SWAT).* For information about SWAT, including installation, see [Python-SWAT](https://sassoftware.github.io/python-swat/).* For information about the CAS actions used in this example, see [Recommender System Action Set: Details](http://documentation.sas.com?cdcId=pgmsascdc&cdcVersion=9.4_3.3&docsetId=casanpg&docsetTarget=n05bxq1zhsoxzun1padoh6m829y0.htm) in the *SAS Visual Analytics 8.2: Programming Guide*.Copyright SAS Institute, Inc.---- Notebook contents1. [Initial setup](initial_setup)Import packages, including the SAS Wrapper for Analytic Transfer (SWAT) and open source libraries &cir; Connect to CAS and start a session &cir; Import the books file &cir; Import the ratings file &cir; Upload the data frames to the server 2. [Simple exploration](simple_explore)Calculate the sparsity &cir; View the ratings distribution 3. [Build the recommender system](build_recommender)Partition the ratings table &cir; Calculate average ratings by item and user &cir; Explore the item ratings and user ratings4. [Build a matrix factorization model](build_matrixfactorization)Sample the data for a hold-out group &cir; Build the model using ALS &cir; Make recommendations for one user5. [Build a KNN model](build_knn)Calculate similarity between users &cir; View rating history for similar users &cir; Recommend 10 books for one user6. [Combine search with recommendations](build_searchindex)Build a search index &cir; Create a simple filter table &cir; Make recommendations from the filter table Initial Setup Import Packages: SAS Wrapper for Analytic Transfer (SWAT) and Open Source Libraries ###Code import html import matplotlib import matplotlib.pyplot as plt import numpy as np import pandas as pd import re import swat from IPython.core.display import display, HTML, Markdown from io import StringIO %matplotlib inline ###Output _____no_output_____ ###Markdown Connect to CAS and start a session ###Code s = swat.CAS("host.example.com", 5570); s.loadactionset("dataPreprocess") s.loadactionset("fedSql") s.loadactionset("recommend") data_dir = '/path/to/data/' ratings_file = data_dir + 'BX-Book-Ratings.csv' books_file = data_dir + 'BX-Books.csv' ###Output NOTE: Added action set 'dataPreprocess'. NOTE: Added action set 'fedSql'. NOTE: Added action set 'recommend'. ###Markdown Import the books fileThis file include HTML entities such as &amp;amp; that interfere with parsing. Keep thebooks with 10 characters for the ISBN. ###Code with open(books_file, 'r', encoding='iso-8859-1') as f: content = html.unescape(f.read()) books_df = pd.read_csv(StringIO(content), header=0, error_bad_lines=False, sep=';', encoding='iso-8859-1', names=['ISBN', 'Title', 'Author', 'Year_Of_Publication', 'Publisher'], usecols=['ISBN', 'Title', 'Author', 'Year_Of_Publication', 'Publisher'], dtype={'Year_Of_Publication': str}) books_df = books_df[books_df.ISBN.str.len() == 10] books_df.head() ###Output _____no_output_____ ###Markdown Import the ratings file The regex is used to skip invalid lines and to skip ratings with a 0. This exampleuses explicit ratings only. ###Code pattern = re.compile('"\d+";"[0-9X]{10}";"[1-9][0]?"') buffer = StringIO() with open(ratings_file, 'r', encoding='iso-8859-1') as f: for line in f: if pattern.match(line): buffer.write(line) buffer.seek(0) ratings_df = pd.read_csv(buffer, skiprows=0, sep=';', names=['User_ID', 'ISBN', 'Rating'], dtype={'User_ID': str, 'ISBN': str, 'Rating': int}) buffer.close() ratings_df.drop_duplicates(inplace=True) ratings_df.head() ###Output _____no_output_____ ###Markdown Finally, upload the data frames to the server Because the ISBNs that are used from the data set are all 10 characters long, it is moreefficient to use a fixed-width size for them. The author and title strings vary greatly, soVARCHAR is a better choice for those two columns. ###Code ratings = s.upload_frame(ratings_df, casout=s.CASTable('ratings', replace=True, indexVars=['isbn']), importoptions=dict(filetype="csv", vars=[ dict(name="User_ID", type="double"), dict(name="ISBN", type="CHAR", length=10), dict(name="Rating", type="double") ])) books = s.upload_frame(books_df, casout=s.CASTable('books', replace=True), importoptions=dict(filetype="csv", vars=[dict(name="ISBN", type="CHAR", length=10)])) display(Markdown('### Books')) display(books.table.columninfo()) display(Markdown('### Ratings')) display(ratings.table.columninfo()) ###Output NOTE: Cloud Analytic Services made the uploaded file available as table RATINGS in caslib CASUSERHDFS(mimcki). NOTE: The table RATINGS has been created in caslib CASUSERHDFS(mimcki) from binary data uploaded to Cloud Analytic Services. NOTE: Cloud Analytic Services made the uploaded file available as table BOOKS in caslib CASUSERHDFS(mimcki). NOTE: The table BOOKS has been created in caslib CASUSERHDFS(mimcki) from binary data uploaded to Cloud Analytic Services. ###Markdown Discard any ratings that do not have a corresponding ISBN in the books table. ###Code original_row_count = len(ratings) s.dataStep.runCode(code=''' data ratings; merge ratings(in=ratings) books(in=books keep=isbn); by isbn; if books and ratings then output; run; ''') final_row_count = len(ratings) df = pd.DataFrame([[original_row_count], [final_row_count]], columns=['Ratings Count'], index=['Original', 'Final']) df ###Output _____no_output_____ ###Markdown Confirm there are no missing values for ratingsCheck that the value for the NMiss column in the results is 0. ###Code ratings['rating'].describe(stats=['mean', 'count', 'nmiss']) ###Output _____no_output_____ ###Markdown Simple exploration Calculate the sparsitySparsity of ratings is a common problem with recommender systems. ###Code out = ratings.simple.distinct().Distinct.set_index('Column') out # Store the number of rows in a variable rating_count = len(ratings) result = ratings.simple.distinct().Distinct.set_index('Column') # Store the distinct number of users. user_count = result.loc['User_ID', 'NDistinct'] # Store the distinct number of items. item_count = result.loc['ISBN', 'NDistinct'] # Finally, here's the sparsity. sparsity = 1.0 - (rating_count / (user_count * item_count)) df = pd.DataFrame([rating_count, user_count, item_count, sparsity], index=['Ratings', 'Users', 'Items', 'Sparsity'], columns=['Value']) df ###Output _____no_output_____ ###Markdown View the distribution of ratings ###Code results = ratings['rating'].value_counts(sort=False) results ax = results.plot.bar( title='Distribution of Ratings', figsize=(15,5) ) ###Output _____no_output_____ ###Markdown Build the recommender system Partition the tablesSubsequent actions are more efficient if the ratings table is partitioned once by the itemand a second table is partitioned by the user.If you do not perform this step now, then many of the subsequent actions will automaticallymake a copy of the data and group by the item or the user. If that is done once, it isconvenient. However the notebook shows several actions so the data transfer and reorganization is done once so that subsequent actions are more memory and CPU efficient. ###Code ratings_by_item = s.CASTable('ratings_by_item', replace=True) result = ratings.groupby('ISBN').partition(casout=ratings_by_item) ratings_by_user = s.CASTable('ratings_by_user', replace=True) result = ratings.groupby('User_ID').partition(casout=ratings_by_user) ###Output _____no_output_____ ###Markdown Now that we have two instances of the ratings table, partitioned by different variables,we can drop the original ratings table. ###Code ratings.table.droptable() book_recommend = s.CASTable('bookRecommend', replace=True) results = s.recommend.recomCreate( system=book_recommend, user='User_ID', item='ISBN', rate='Rating') ###Output _____no_output_____ ###Markdown Determine average ratings by user and itemFor the average ratings by user, specify the ratings table that is partitioned by user. If the table is not already partitioned by user, then the action will temporarily group the data for you. However, it slows the action. ###Code avg_user = s.CASTable('avg_user', replace=True) results = ratings_by_user.recommend.recomRateinfo( label='avg_user_model', system=book_recommend, id='User_ID', sparseid='ISBN', sparseval='rating', casout=avg_user) result = avg_user.head() result ###Output _____no_output_____ ###Markdown You can view the ratings to confirm that the average is shown. If the first userhas a single rating, you can rerun the preceding cell with `avg_user.query("_nrating_ = 2").head()` or a related query. ###Code firstUser = result.loc[0,'User_ID'] count = result.loc[0,'_NRatings_'] ratings_by_user[ratings_by_user.user_id == firstUser].head(count) ###Output _____no_output_____ ###Markdown Create an average ratings by item table. ###Code avg_item = s.CASTable('avg_item', replace=True) results = ratings_by_item.recommend.recomRateinfo( label='avg_item_model', system=book_recommend, id='isbn', sparseid='user_id', sparseval='rating', casOut=avg_item) avg_item.head() ###Output _____no_output_____ ###Markdown Explore item ratings and user ratingsThe tables that are created with the `recomrateinfo` action can beused for simple data exploration. Discerning reviewers ###Code avg_user.query('_nratings_ > 3').sort_values('_stat_').head(10) ###Output _____no_output_____ ###Markdown Generous reviewers ###Code avg_user.sort_values(['_stat_', '_nratings_'], ascending=False).head(10) ###Output _____no_output_____ ###Markdown Ten most frequently reviewed books ###Code s.fedSql.execDirect(query=''' select t1.isbn, t1._stat_ as "Average Rating", t1._nratings_ as "Number of Ratings", t2.author, t2.title from avg_item as t1 join books as t2 on (t1.isbn = t2.isbn) order by 3 desc limit 10 ''') ###Output _____no_output_____ ###Markdown Frequently reviewed books with low ratings ###Code result = avg_item.query('_nratings_ > 10').sort_values('_stat_').head(10) result #Store the ISBN for the first row. first_isbn = result.loc[0, 'ISBN'] result = ratings_by_item['rating'].query("isbn eq '%s'" % first_isbn).dataPreprocess.histogram() display(Markdown('#### Ratings Distribution for ISBN %s' % first_isbn)) display(result.BinDetails.loc[:, ['BinLowerBnd', 'NInBin', 'Percent']]) ###Output _____no_output_____ ###Markdown Build a matrix factorization modelFirst, create a hold-out group. From a random selection of 20% of users, hold out 1 rating.After that, create the model. ###Code holdout_users = s.CASTable('holdout_users', replace=True) ratings_by_user.recommend.recomSample( system=book_recommend, label='holdout_users', withhold=.2, hold=1, seed=1234, id='user_id', sparseid='isbn', casout=holdout_users ) holdout_users.head(10) als_u = s.CASTable('als_u', replace=True) als_i = s.CASTable('als_i', replace=True) result = s.recommend.recomAls( system=book_recommend, tableu=ratings_by_user, tablei=ratings_by_item, label='als1', casoutu=als_u, casouti=als_i, rateinfo=avg_user, maxiter=20, hold=holdout_users, seed=1234, details=True, k=20, stagnation=10, threshold=.1 ) result.ModelInfo.set_index('Descr') ax = result.IterHistory.plot( x='Iteration', y='Objective', title='Objective Function', figsize=(9,6) ) result.IterHistory.set_index('Iteration') ###Output _____no_output_____ ###Markdown Make recommendations for one user ###Code users= '104437' recommendations = s.CASTable('recommendations', replace=True) s.recommend.recomMfScore( system=book_recommend, label='als1', userlist=users, n=5, casout=recommendations ) s.fedSql.execDirect(query=''' select t1.*, t2.author, t2.title, t3._stat_ as "Average Rating", t3._nratings_ as "Number of Ratings" from recommendations as t1 left outer join books as t2 on (t1.isbn = t2.isbn) left outer join avg_item as t3 on (t1.isbn = t3.isbn) order by user_id, _rank_ ''') ###Output _____no_output_____ ###Markdown Make recommendations for hold-out users ###Code recommend_heldout = s.CASTable('recommend_heldout', replace=True) s.recommend.recomMfScore( system=book_recommend, label='als1', usertable=holdout_users, n=5, casout=recommend_heldout ) result = s.fedsql.execdirect(query=''' select t1.*, t2.author, t2.title, t3._stat_ as "Average Rating", t3._nratings_ as "Number of Ratings" from recommend_heldout as t1 left outer join books as t2 on (t1.isbn = t2.isbn) left outer join avg_item as t3 on (t1.isbn = t3.isbn) order by user_id, _rank_ ''') # There are many rows in the results. Print results for the first three users only. three = result['Result Set'].loc[[0,5,10],:'User_ID'].values for user in np.nditer(three): display(Markdown('#### Recommendations for user %s ' % user)) display(result['Result Set'].query('User_ID == %s' % user)) ###Output _____no_output_____ ###Markdown Build a KNN model Calculate the similarity between users ###Code similar_users = s.CASTable("similar_users", replace=True) ratings_by_user.recommend.recomSim( label="similar_users", system=book_recommend, id="user_id", sparseId="isbn", sparseVal="rating", measure="cos", casout=similar_users, threshold=.2) ###Output _____no_output_____ ###Markdown View the similarity for one pair of usersIn this case, these two users read three of the same books. They rated the threeat 7 and above. ###Code result = similar_users.query("user_id_1 = 104437 and user_id_2 = 199981").head(1) display(result) def one_users_ratings(user_id): result = s.fedSql.execDirect(query=''' select t1.*, t2.author, t2.title from ratings_by_user as t1 left outer join books as t2 on (t1.isbn = t2.isbn) where t1.user_id = {} order by author, isbn; '''.format(user_id)) display(Markdown('#### Ratings by user %s' % user_id)) display(result) one_users_ratings(104437) one_users_ratings(199981) ###Output _____no_output_____ ###Markdown Calculate KNN based on user's similar ratings ###Code ratings_by_item.recommend.recomKnnTrain( label='knn1', system=book_recommend, similarity=similar_users, k=20, hold=holdout_users, rateinfo=avg_user, user=True # need to tell if similarity is for the user or the item ) users = ['104437'] knn_recommended = s.CASTable("knn_recommended", replace=True) s.recommend.recomKnnScore( system="bookRecommend", label="knn1", userList=users, n=10, casout=knn_recommended ) s.fedSql.execDirect( query=''' select t1.*, t2.author, t2.title, t3._stat_ as "Average Rating", t3._nratings_ as "Number of Ratings" from knn_recommended as t1 left outer join books as t2 on (t1.isbn = t2.isbn) left outer join avg_item as t3 on (t1.isbn = t3.isbn) order by user_id, _rank_; ''') ###Output _____no_output_____ ###Markdown Combine search with recommendations Search can be included with recommendationsFirst, build the search index. * The recomSearchIndex action generates a global-scope table that is named the same as the label parameter.* The generated index table is always appended when recomSearchIndex is run again with the same label. This can generate duplicate documents in the index table.* To avoid duplicates, the table.dropTable action is run first. The quiet=True parameter is used to ignore whether the table exists or not.Afterward, run search queries for terms. ###Code book_search = s.CASTable("book_search", replace=True) book_search.table.droptable(quiet=True) books.recommend.recomSearchIndex( system=book_recommend, label='book_search', id='isbn') yoga_query = 'yoga fitness' query_filter = s.CASTable("query_filter", replace=True) result = book_search.recommend.recomSearchQuery( system=book_recommend, label='book_search', casout=query_filter, query=yoga_query, n=100) query_filter.columnInfo() yoga_reader = '99955' filtered_results = s.CASTable('filtered_results', replace=True) filtered_results = s.recommend.recomMfScore( system=book_recommend, label='als1', filter=query_filter, userlist=yoga_reader, n=5, casout=filtered_results ) s.fedSql.execDirect(query=''' select t1.*, t2.author, t2.title, t3._stat_ as "Average Rating", t3._nratings_ as "Number of Ratings" from filtered_results as t1 left outer join books as t2 on (t1.isbn = t2.isbn) left outer join avg_item as t3 on (t1.isbn = t3.isbn) order by user_id, _rank_; ''') #s.close() ###Output _____no_output_____
IPython-parallel-tutorial/Overview.ipynb
###Markdown Overview and getting started IntroductionLet's start with an overview of IPython's architecture for paralleland distributed computing. This architecture abstracts out parallelismin a very general way, which enables IPython to support many differentstyles of parallelism including:- Single program, multiple data (SPMD) parallelism- Multiple program, multiple data (MPMD) parallelism- Message passing using MPI or ØMQ- Task farming- Data parallel- Coordination of distributed processes- Combinations of these approaches- Custom user defined approachesMost importantly, IPython enables all types of parallel applications tobe developed, executed, debugged and monitored *interactively*. Hence,the `I` in `IPython`. Some example use cases for`IPython.parallel`:- Quickly parallelize algorithms that are embarrassingly parallel using a number of simple approaches. Many simple things can be parallelized interactively in one or two lines of code.- Steer traditional MPI applications on a supercomputer from an IPython session on your laptop.- Analyze and visualize large datasets (that could be remote and/or distributed) interactively using IPython and tools like matplotlib/TVTK.- Develop, test and debug new parallel algorithms (that may use MPI or PyZMQ) interactively.- Tie together multiple MPI jobs running on different systems into one giant distributed and parallel system.- Start a parallel job on your cluster and then have a remote collaborator connect to it and pull back data into their local IPython session for plotting and analysis.- Run a set of tasks on a set of CPUs using dynamic load balancing. Architecture overviewThe IPython architecture consists of four components:- The IPython engine- The IPython hub- The IPython schedulers- The cluster clientThese components live in the `IPython.parallel` package and areinstalled with IPython. IPython engineThe IPython engine is a Python instance that accepts Python commands overa network connection. When multiple engines are started, paralleland distributed computing becomes possible. An important property of anIPython engine is that it blocks while user code is being executed. Readon for how the IPython controller solves this problem to expose a cleanasynchronous API to the user. IPython controllerThe IPython controller processes provide an interface for working with aset of engines. At a general level, the controller is a collection ofprocesses to which IPython engines and clients can connect. Thecontroller is composed of a `Hub` and a collection of`Schedulers`, which may be in processes or threads.The controller provides a single point of contact for users whowish to utilize the engines in the cluster. There is a variety ofdifferent ways of working with a controller, but all of thesemodels are implemented via the `View.apply` method, afterconstructing `View` objects to represent different collections engines.The two primary models for interacting with engines are:- A **Direct** interface, where engines are addressed explicitly.- A **LoadBalanced** interface, where the Scheduler is trusted with assigning work to appropriate engines.Advanced users can readily extend the View models to enable other stylesof parallelism. The HubThe center of an IPython cluster is the Hub. The Hub can be viewed as an über-logger, which keeps track of engine connections, schedulers, clients, as well as persist alltask requests and results in a database for later use. SchedulersAll actions that can be performed on the engine go through a Scheduler.While the engines themselves block when user code is run, the schedulershide that from the user to provide a fully asynchronous interface to aset of engines. Each Scheduler is a small GIL-less function in C providedby pyzmq (the Python load-balanced scheduler being an exception). ØMQ and PyZMQAll of this is implemented with the lovely ØMQ messaging library,and pyzmq, the lightweight Python bindings, which allows very fastzero-copy communication of objects like numpy arrays. IPython client and viewsThere is one primary object, the `Client`, forconnecting to a cluster. For each execution model, there is acorresponding `View`. These views allow users tointeract with a set of engines through the interface. Here are the twodefault views:- The `DirectView` class for explicit addressing.- The `LoadBalancedView` class for destination-agnostic scheduling. Getting Started Starting the IPython controller and enginesTo follow along with this tutorial, you will need to start the IPythoncontroller and four IPython engines. The simplest way of doing this iswith the [clusters tab](/clusters),or you can use the `ipcluster` command in a terminal: $ ipcluster start -n 4There isn't time to go into it here, but ipcluster can be used to start enginesand the controller with various batch systems including:* SGE* PBS* LSF* MPI* SSH* WinHPCMore information on starting and configuring the IPython cluster in [the IPython.parallel docs](http://ipython.org/ipython-doc/stable/parallel/parallel_process.html).Once you have started the IPython controller and one or more engines,you are ready to use the engines to do something useful. To make sure everything is working correctly, let's do a very simple demo: ###Code from IPython import parallel rc = parallel.Client() rc.block = True rc.ids def mul(a,b): return a*b def summary(): """summarize some info about this process""" import os import socket import sys return { 'cwd': os.getcwd(), 'Python': sys.version, 'hostname': socket.gethostname(), 'pid': os.getpid(), } mul(5,6) summary() ###Output _____no_output_____ ###Markdown What does it look like to call this function remotely?Just turn `f(*args, **kwargs)` into `view.apply(f, *args, **kwargs)`! ###Code rc[0].apply(mul, 5, 6) rc[0].apply(summary) ###Output _____no_output_____ ###Markdown And the same thing in parallel? ###Code rc[:].apply(mul, 5, 6) rc[:].apply(summary) ###Output _____no_output_____ ###Markdown Python has a builtin map for calling a function with a variety of arguments ###Code map(mul, range(1,10), range(2,11)) ###Output _____no_output_____ ###Markdown So how do we do this in parallel? ###Code view = rc.load_balanced_view() view.map(mul, range(1,10), range(2,11)) ###Output _____no_output_____ ###Markdown And a preview of parallel magics: ###Code %%px import os, socket print os.getpid() print socket.gethostname() ###Output _____no_output_____
Neural Networks/Time Series/Week_1_Exercise_Question.ipynb
###Markdown Now that we have the time series, let's split it so we can start forecasting ###Code split_time = 1100 time_train = time[:split_time] x_train = series[:split_time] time_valid = time[split_time:] x_valid = series[split_time:] plt.figure(figsize=(10, 6)) plot_series(time_train, x_train) plt.show() plt.figure(figsize=(10, 6)) plot_series(time_valid, x_valid) plt.show() # EXPECTED OUTPUT # Chart WITH 4 PEAKS between 50 and 65 and 3 troughs between -12 and 0 # Chart with 2 Peaks, first at slightly above 60, last at a little more than that, should also have a single trough at about 0 ###Output _____no_output_____ ###Markdown Naive Forecast ###Code naive_forecast = series[split_time - 1:-1] plt.figure(figsize=(10, 6)) plot_series(time_valid, x_valid) plot_series(time_valid, naive_forecast) # Expected output: Chart similar to above, but with forecast overlay ###Output _____no_output_____ ###Markdown Let's zoom in on the start of the validation period: ###Code plt.figure(figsize=(10, 6)) plot_series(time_valid, x_valid, start=100, end=250) plot_series(time_valid, naive_forecast, start=101, end=251) # EXPECTED - Chart with X-Axis from 1100-1250 and Y Axes with series value and projections. Projections should be time stepped 1 unit 'after' series ###Output _____no_output_____ ###Markdown Now let's compute the mean squared error and the mean absolute error between the forecasts and the predictions in the validation period: ###Code print(keras.metrics.mean_squared_error(x_valid, naive_forecast).numpy()) print(keras.metrics.mean_absolute_error(x_valid, naive_forecast).numpy()) # Expected Output # 19.578304 # 2.6011968 ###Output 19.578304 2.6011972 ###Markdown That's our baseline, now let's try a moving average: ###Code def moving_average_forecast(series, window_size): """Forecasts the mean of the last few values. If window_size=1, then this is equivalent to naive forecast""" # YOUR CODE HERE forecast = [] for time in range(len(series) - window_size): forecast.append(series[time:time + window_size].mean()) return np.array(forecast) moving_avg = moving_average_forecast(series, 30)[split_time - 30:] plt.figure(figsize=(10, 6)) plot_series(time_valid, x_valid) plot_series(time_valid, moving_avg) # EXPECTED OUTPUT # CHart with time series from 1100->1450+ on X # Time series plotted # Moving average plotted over it print(keras.metrics.mean_squared_error(x_valid, moving_avg).numpy()) print(keras.metrics.mean_absolute_error(x_valid, moving_avg).numpy()) # EXPECTED OUTPUT # 65.786224 # 4.3040023 diff_series = (series[365:] - series[:-365]) diff_time = time[365:] plt.figure(figsize=(10, 6)) plot_series(diff_time, diff_series) plt.show() # EXPECETED OUTPUT: CHart with diffs ###Output _____no_output_____ ###Markdown Great, the trend and seasonality seem to be gone, so now we can use the moving average: ###Code diff_moving_avg = moving_average_forecast(diff_series, 50)[split_time - 365 - 50:] plt.figure(figsize=(10, 6)) plot_series(time_valid, diff_series[split_time - 365:]) plot_series(time_valid, diff_moving_avg) plt.show() # Expected output. Diff chart from 1100->1450 + # Overlaid with moving average ###Output _____no_output_____ ###Markdown Now let's bring back the trend and seasonality by adding the past values from t – 365: ###Code diff_moving_avg_plus_past = series[split_time - 365:-365] + diff_moving_avg plt.figure(figsize=(10, 6)) plot_series(time_valid, x_valid) plot_series(time_valid, diff_moving_avg_plus_past) plt.show() # Expected output: Chart from 1100->1450+ on X. Same chart as earlier for time series, but projection overlaid looks close in value to it print(keras.metrics.mean_squared_error(x_valid, diff_moving_avg_plus_past).numpy()) print(keras.metrics.mean_absolute_error(x_valid, diff_moving_avg_plus_past).numpy()) # EXPECTED OUTPUT # 8.498155 # 2.327179 ###Output 8.498155 2.327179 ###Markdown Better than naive forecast, good. However the forecasts look a bit too random, because we're just adding past values, which were noisy. Let's use a moving averaging on past values to remove some of the noise: ###Code diff_moving_avg_plus_smooth_past = moving_average_forecast(series[split_time - 370:-360], 10) + diff_moving_avg plt.figure(figsize=(10, 6)) plot_series(time_valid, x_valid) plot_series(time_valid, diff_moving_avg_plus_smooth_past) plt.show() # EXPECTED OUTPUT: # Similar chart to above, but the overlaid projections are much smoother print(keras.metrics.mean_squared_error(x_valid, diff_moving_avg_plus_smooth_past).numpy()) print(keras.metrics.mean_absolute_error(x_valid, diff_moving_avg_plus_smooth_past).numpy()) # EXPECTED OUTPUT # 12.527958 # 2.2034433 ###Output 12.527956 2.2034435
Create ClimateWNA Plot List from FIA Locations.ipynb
###Markdown Prepare the input file for ClimateWNA ###Code pg_engine='postgresql://postgres@localhost:5432/PNWFIADB_FVSIn' SQL = ''' SELECT plot.cn AS id1, NULL as id2, plot.lat as lat, plot.lon as lon, plot.elev*0.3048 as el FROM plot, subplot, tree WHERE plot.cn = tree.plt_cn AND subplot.plt_cn = plot.cn AND tree.subp = subplot.subp AND tree.spcd = 202 AND tree.dia IS NOT NULL AND tree.cr IS NOT NULL AND tree.inc10yr_pnwrs IS NOT NULL AND subplot.slope IS NOT NULL AND subplot.aspect IS NOT NULL GROUP BY id1, id2, lat, lon, el ''' # read in the stands from the FVSIn database locations = pd.read_sql(sql=SQL, con=pg_engine) locations.head() locations.info() locations.to_csv('PNWFIA_DF_plots.csv', index=False) SQL = ''' SELECT MIN(inv_year), MAX(inv_year) FROM fvs_standinit ''' # read in the stands from the FVSIn database year_ranges = pd.read_sql(sql=SQL, con=pg_engine) year_ranges ###Output _____no_output_____ ###Markdown Visualize a map of our plot locations ###Code # read in the FVS variants fvs_variants = gpd.read_file('FVS_Variants_and_Locations.shp') # now lets turn those FIA location points into a geodataframe to get the # correspondong FVS regional variants and location codes geometry = [Point(xy) for xy in zip(locations.lon, locations.lat)] locations = locations.drop(['lon', 'lat'], axis=1) FIA_locs = gpd.GeoDataFrame(locations, crs={'init': 'epsg:4326'}, geometry=geometry) FIA_locs = FIA_locs.to_crs(fvs_variants.crs) # let's gather the major west coast variants (OR, WA, and CA) variants = ['WC', 'PN', 'EC', 'BM', 'SO', 'CA', 'NC', 'CA', 'WS'] west_coast = fvs_variants.loc[fvs_variants.FVSVariant.isin(variants)] # grab the row index values for the additional locations we want # from the other variants more_locs = [1, 200, 240, 46, 92, 110] # concatenate the extra locations and the major west coast variants my_locations = pd.concat([fvs_variants.iloc[more_locs],west_coast]) my_variants = my_locations.dissolve(by='FVSVariant').reset_index() fig, ax = plt.subplots(figsize=(10, 10)) # color by FVS Regional Varian my_variants.plot(ax = ax, column='FVSVariant', linewidth=1, cmap='Pastel1', edgecolor='black') FIA_locs.plot(ax = ax, marker='o', color='red', markersize=0.25) my_locations.plot(ax=ax, linewidth=0.4, facecolor='none', edgecolor='black') ax.set_aspect('equal') ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False); fig.savefig('DF_plots.png') ###Output _____no_output_____
simulations/notebooks_sim_bin/1.4_sim_toe_compLasso_binary_update.ipynb
###Markdown summarize compositional lasso results on Toeplitz Simulation Scenarios for binary outcome ###Code dir = '/panfs/panfs1.ucsd.edu/panscratch/lij014/Stability_2020/sim_data' dim.list = list() size = c(50, 100, 500, 1000) idx = 0 for (P in size){ for (N in size){ idx = idx + 1 dim.list[[idx]] = c(P=P, N=N) } } rou.list = seq(0.1, 0.9, 0.2) files = NULL for (rou in rou.list){ for (dim in dim.list){ p = dim[1] n = dim[2] files = cbind(files, paste0(dir, '/sim_toeplitz_corr', rou, paste('P', p, 'N', n, sep='_'), '.RData', sep='')) } } length(files) avg_FDR = NULL table_toe = NULL tmp_num_select = rep(0, length(files)) for (i in 1:length(files)){ print(paste0('indx: ', i)) load(paste0(dir, '/binary_update/toe_GenCompLasso_binary_', i, '.RData')) table_toe = rbind(table_toe, results_toe_GenCompLasso[c('n', 'p', 'rou', 'FP', 'FN', 'ROC', 'Stab')]) tmp_num_select[i] = mean(rowSums(results_toe_GenCompLasso$Stab.table)) # calculate FDR load(file_name, dat <- new.env()) sub = dat$sim_array[[i]] p = sub$p # take true values from 1st replicate of each simulated data coef = sub$beta coef.true = which(coef != 0) tt = results_toe_GenCompLasso$Stab.table FDR = NULL # false positive rate for (r in 1:nrow(tt)){ FDR = c(FDR, length(setdiff(which(tt[r, ] !=0), coef.true))/sum(tt[r, ])) } avg_FDR = c(avg_FDR, mean(FDR, na.rm=T)) } table_toe = as.data.frame(table_toe) table_toe$num_select = tmp_num_select table_toe$FDR = round(avg_FDR,2) head(table_toe) tail(table_toe) # export result result.table_toe <- apply(table_toe,2,as.character) rownames(result.table_toe) = rownames(table_toe) result.table_toe = as.data.frame(result.table_toe) # extract numbers only for 'n' & 'p' result.table_toe$n = tidyr::extract_numeric(result.table_toe$n) result.table_toe$p = tidyr::extract_numeric(result.table_toe$p) result.table_toe$ratio = result.table_toe$p / result.table_toe$n result.table_toe = result.table_toe[c('n', 'p', 'rou', 'ratio', 'Stab', 'ROC', 'FP', 'FN', 'num_select', 'FDR')] colnames(result.table_toe)[1:4] = c('N', 'P', 'Corr', 'Ratio') # convert interested measurements to be numeric result.table_toe$Stab = as.numeric(as.character(result.table_toe$Stab)) result.table_toe$num_select = as.numeric(as.character(result.table_toe$num_select)) result.table_toe$ROC_mean = as.numeric(sub("\\(.*", "", result.table_toe$ROC)) result.table_toe$FP_mean = as.numeric(sub("\\(.*", "", result.table_toe$FP)) result.table_toe$FN_mean = as.numeric(sub("\\(.*", "", result.table_toe$FN)) # check whether missing values exists result.table_toe[rowSums(is.na(result.table_toe)) > 0,] head(result.table_toe) tail(result.table_toe) result.table_toe ## export write.table(result.table_toe, '../results_summary_bin/sim_toe_GencompLasso_binary.txt', sep='\t', row.names=F) ###Output _____no_output_____
lag-energy.ipynb
###Markdown This notebook computes the lag-energy spectrum from the saved cross spectrum. ###Code import numpy as np from astropy.table import Table, Column from astropy.io import fits from scipy.stats import binned_statistic import os import subprocess import matplotlib.pyplot as plt import matplotlib.font_manager as font_manager from matplotlib.ticker import MultipleLocator from matplotlib.ticker import ScalarFormatter, NullFormatter import matplotlib.colors as colors from matplotlib._color_data import XKCD_COLORS as xkcdcolor %matplotlib inline from xcor_tools import find_nearest, pairwise, Energy_lags font_prop = font_manager.FontProperties(size=20) homedir = os.path.expanduser("~") maxi_dir = homedir+"/Dropbox/Research/MAXIJ1535_B-QPO" cs_file = maxi_dir+"/out/MAXIJ1535_64sec_256dt_ratecut_cs.fits" assert os.path.isfile(cs_file) cs_tab = Table.read(cs_file, format='fits') print(cs_tab.info) print(cs_tab.meta) cs2_file = maxi_dir+"/out/MAXIJ1535_64sec_256dt_window4_cs.fits" assert os.path.isfile(cs2_file) cs2_tab = Table.read(cs2_file, format='fits') rsp_matrix_file = maxi_dir+"/nicer_v1.02rbn.rsp" assert os.path.isfile(rsp_matrix_file) rsp = Table.read(rsp_matrix_file, format='fits', hdu='EBOUNDS') energy_list = np.asarray([np.mean([x, y]) for x,y in zip(rsp['E_MIN'], rsp['E_MAX'])]) e_chans = rsp['CHANNEL'] energy_step = energy_list - rsp['E_MIN'] # lf = 0.1 # hf = 2.0 # lag_string="bbn" # bbn_lags = Energy_lags(cs_tab, low_freq_bound=lf, high_freq_bound=hf, debug=False) # print(bbn_lags.energy_tab.info) # bbn_lags.energy_tab.write(maxi_dir+"/out/lags_%s.fits" % (lag_string), format='fits', overwrite=True) lf = 4.29 hf = 7.15 lag_string = "qpo" qpo_lags = Energy_lags(cs_tab, low_freq_bound=lf, high_freq_bound=hf, debug=False) print(qpo_lags.energy_tab.info) qpo_lags.energy_tab.write(maxi_dir+"/out/lags_%s.fits" % (lag_string), format='fits', overwrite=True) lag_string = "both" # lf = 4.29 # hf = 7.15 # lag_string = "win4-qpo" # qpo2_lags = Energy_lags(cs2_tab, low_freq_bound=lf, high_freq_bound=hf, debug=False) # print(qpo2_lags.energy_tab.info) # qpo2_lags.energy_tab.write(maxi_dir+"/out/lags_%s.fits" % (lag_string), format='fits', overwrite=True) # lag_string="win_compare" lf = 9.6 hf = 12.6 lag_string = "harmonic" harmonic_lags = Energy_lags(cs_tab, low_freq_bound=lf, high_freq_bound=hf, debug=False) # print(harmonic_lags.energy_tab.info) harmonic_lags.energy_tab.write(maxi_dir+"/out/lags_%s.fits" % (lag_string), format='fits', overwrite=True) lag_string = "all" # ## Putting the time lags in units of milliseconds # tlag = lags.energy_tab['TIME_LAG'] / 1e-3 # tlag_err = lags.energy_tab['TIME_ERR'] / 1e-3 # fig, ax = plt.subplots(1, 1, figsize=(9,6.75), dpi=300, tight_layout=True) # ax.hlines(0.0, 1, 10.2, linestyle='dashed', lw=2, color='black') # ax.errorbar(energy_list[:-2], tlag[:-2], xerr=energy_step[:-2], # yerr=tlag_err[:-2], lw=3, drawstyle='steps-mid', ls='none', # ms=10, mew=2, mec="black", mfc='black', # color="black", ecolor="black", elinewidth=3, capsize=0) # ## BBN: # ax.set_ylim(-70, 130) # ax.text(1.06, 116, 'a', fontsize=28, color='blue') # ax.text(6.66, 116, r'%.1f$-$%.0f$\,$Hz' % (lf,hf), fontsize=20) # yLocator = MultipleLocator(10) ## loc of minor ticks on y-axis # ax.yaxis.set_major_locator(MultipleLocator(20)) # ## QPO: # # ax.set_ylim(-28, 80) # # ax.text(1.08, 72, 'b', fontsize=28, color='blue') # # ax.text(5.44, 72, r'%.2f$-$%.2f$\,$Hz' % (lf,hf), fontsize=20) # # yLocator = MultipleLocator(5) ## loc of minor ticks on y-axis # # ## Harmonic # # ax.set_ylim(-45, 55) # # ax.text(1.05, 48, 'c', fontsize=28, color='blue') # # ax.text(5.8, 48, r'%.1f$-$%.1f$\,$Hz' % (lf,hf), fontsize=20) # # yLocator = MultipleLocator(5) ## loc of minor ticks on y-axis # ax.yaxis.set_minor_locator(yLocator) # ax.set_ylabel(r'Time lag (ms)', fontproperties=font_prop) # ax.set_xlabel('Energy (keV)', fontproperties=font_prop) # ax.set_xlim(1, 10.2) # ax.set_xscale('log') # x_maj_loc = [1,2,3,4,5,6,8,10] # ax.set_xticks(x_maj_loc) # xLocator = MultipleLocator(1) ## loc of minor ticks on x-axis # ax.xaxis.set_minor_locator(xLocator) # ax.xaxis.set_major_formatter(ScalarFormatter()) # ax.xaxis.set_minor_formatter(NullFormatter()) # ax.tick_params(axis='x', labelsize=20, bottom=True, top=True, # labelbottom=True, labeltop=False, direction="in") # ax.tick_params(axis='y', labelsize=20, left=True, right=True, # labelleft=True, labelright=False, direction="in") # ax.tick_params(which='major', width=1.5, length=9, direction="in") # ax.tick_params(which='minor', width=1.5, length=6, direction="in") # for axis in ['top', 'bottom', 'left', 'right']: # ax.spines[axis].set_linewidth(1.5) # # plotfile = "lags_%s.eps" % (lag_string) # # plotfile = "lags_HARDER_%s.eps" % (lag_string) # # plt.savefig(maxi_dir+"/out/"+plotfile, dpi=300) # plt.show() # bbn_plag = bbn_lags.energy_tab['PHASE_LAG'] / (2*np.pi) # bbn_plag_err = bbn_lags.energy_tab['PHASE_ERR'] / (2*np.pi) qpo_plag = qpo_lags.energy_tab['PHASE_LAG'] / (2*np.pi) qpo_plag_err = qpo_lags.energy_tab['PHASE_ERR'] / (2*np.pi) harmonic_plag = harmonic_lags.energy_tab['PHASE_LAG'] / (2*np.pi) harmonic_plag_err = harmonic_lags.energy_tab['PHASE_ERR'] / (2*np.pi) # qpo2_plag = qpo2_lags.energy_tab['PHASE_LAG'] / (2*np.pi) # qpo2_plag_err = qpo2_lags.energy_tab['PHASE_ERR'] / (2*np.pi) fig, ax = plt.subplots(1, 1, figsize=(9,6.75), dpi=300, tight_layout=True) ax.hlines(0.0, 1, 10.2, linestyle='dashed', lw=2, color='black') ax.errorbar(energy_list[10:-2], harmonic_plag[10:-2], xerr=energy_step[10:-2], yerr=harmonic_plag_err[10:-2], lw=3, drawstyle='steps-mid', ms=10, mew=2, mec="black", mfc='black', color="black", ecolor="black", elinewidth=3, capsize=0) # ax.errorbar(energy_list[10:-2], qpo2_plag[10:-2], xerr=energy_step[10:-2], # yerr=qpo2_plag_err[10:-2], lw=3, drawstyle='steps-mid', # ms=10, mew=2, mec="green", mfc='green', # color="green", ecolor="green", elinewidth=3, capsize=0) ax.errorbar(energy_list[10:-2], qpo_plag[10:-2], xerr=energy_step[10:-2], yerr=qpo_plag_err[10:-2], lw=3, drawstyle='steps-mid', ms=10, mew=2, mec=xkcdcolor['xkcd:azure'], mfc=xkcdcolor['xkcd:azure'], color=xkcdcolor['xkcd:azure'], ecolor=xkcdcolor['xkcd:azure'], elinewidth=3, capsize=0) # ax.errorbar(energy_list[10:-2], bbn_plag[10:-2], xerr=energy_step[10:-2], # yerr=bbn_plag_err[10:-2], lw=3, drawstyle='steps-mid', # ms=10, mew=2, mec="red", mfc='red', # color="red", ecolor="red", elinewidth=3, capsize=0) ## BBN: # ax.set_ylim(-0.1, 0.2) # ax.text(6.66, 0.18, r'%.1f$-$%.0f$\,$Hz' % (lf,hf), fontsize=20) # yLocator = MultipleLocator(0.01) ## loc of minor ticks on y-axis # ax.yaxis.set_major_locator(MultipleLocator(0.05)) ## QPO: # ax.set_ylim(-0.15, 0.45) # ax.text(5.4, 0.405, r'0.1$-$2$\,$Hz', fontsize=20, color='red') # ax.text(5.4, 0.375, r'4.29$-$7.15$\,$Hz', fontsize=20, color=xkcdcolor['xkcd:azure']) # ax.text(5.4, 0.375, r'Normal', fontsize=20, color=xkcdcolor['xkcd:azure']) # ax.text(5.4, 0.405, r'Window 4', fontsize=20, color='green') # yLocator = MultipleLocator(0.05) ## loc of minor ticks on y-axis # ## Harmonic ax.set_ylim(-0.55, 0.6) ax.text(5.4, 0.45, r'9.6$-$12.6$\,$Hz', fontsize=20, color='black') ax.text(5.4, 0.51, r'4.29$-$7.15$\,$Hz', fontsize=20, color=xkcdcolor['xkcd:azure']) yLocator = MultipleLocator(0.1) ## loc of minor ticks on y-axis ax.yaxis.set_minor_locator(yLocator) ax.set_ylabel(r'Phase lag (cycles)', fontproperties=font_prop) ax.set_xlabel('Energy (keV)', fontproperties=font_prop) ax.set_xlim(1, 10.2) ax.set_xscale('log') x_maj_loc = [1,2,3,4,5,6,8,10] ax.set_xticks(x_maj_loc) xLocator = MultipleLocator(1) ## loc of minor ticks on x-axis ax.xaxis.set_minor_locator(xLocator) ax.xaxis.set_major_formatter(ScalarFormatter()) ax.xaxis.set_minor_formatter(NullFormatter()) ax.tick_params(axis='x', labelsize=20, bottom=True, top=True, labelbottom=True, labeltop=False, direction="in") ax.tick_params(axis='y', labelsize=20, left=True, right=True, labelleft=True, labelright=False, direction="in") ax.tick_params(which='major', width=1.5, length=9, direction="in") ax.tick_params(which='minor', width=1.5, length=6, direction="in") for axis in ['top', 'bottom', 'left', 'right']: ax.spines[axis].set_linewidth(1.5) plotfile = "lags_phase_%s.eps" % (lag_string) print(plotfile) plt.savefig(maxi_dir+"/out/"+plotfile, dpi=300) plt.show() ###Output lags_phase_all.eps
codebase/notebooks/05_transition_analyses/Upskilling_02_Analysis.ipynb
###Markdown Simulating upskillingUpskilling is a costly investment of time and resources andit is therefore important to target this investment in theright direction. We identified a number of skills that canhelp all at-risk workers to increase their number of safe anddesirable transitions.For this purpose, we leveraged the ESCO framework ofskills and our job transition recommendation algorithm tomeasure the effect of adding a single skill to a worker’srange of transition options. First, we pre-selected 100 coreskills that reflect the central competencies for a largenumber of occupations and which, therefore, would beexpected to have a positive impact on workers’ careerprospects (see `Upskilling_01_Core_skills.iypnb`) We tested each of these skills by adding themto the at-risk workers’ skills sets and measuring the changein the number of transition options. While the present analysis is restricted tothese 100 core skills, the same approach could be usedto evaluate any of the 13,485 ESCO skills and, as we areusing NLP, potentially even skills that are not part ofthe ESCO framework. 0. Import dependencies and inputs ###Code %run ../notebook_preamble_Transitions.ipy import os data = load_data.Data() sims = load_data.Similarities() ###Output _____no_output_____ ###Markdown 1. Examples of upskilling specific occupations with one skillWe show examples of new safe and desirable transitions that emerge when we add the skill to 'manage staff' to a hotel concierge's or shop assistant's skills set. ###Code # Check the effect of training the skill to "manage staff" skill_id = 2778 data.skills.loc[skill_id] ###Output _____no_output_____ ###Markdown Hotel concierge ###Code # Try upskilling a hotel concierge with the skill to 'manage staff' # NB: Testing many destination_ids will take a minute or so; # one can slightly speed it up by only checking lower risk occupations, or occupations closer to similarity threshold... destination_ids = data.occ_report[data.occ_report.risk_category!='High risk'].id.to_list() upskilling = trans_utils.Upskilling(origin_ids=[329], new_skillsets=[2778], destination_ids=destination_ids) upskilling.effectiveness(safe_definition='strict') ###Output _____no_output_____ ###Markdown The table shows the mean and median new transitions per occupation in origin_ids list (here, we have only one occupation) ###Code # Check the new safe and desirable transitions df = upskilling.upskilling_effects[0]['transition_table'] df[df.is_strictly_safe_desirable].sort_values('new_similarity') ###Output _____no_output_____ ###Markdown Shop assistant ###Code # Try upskilling a shop assistant with the skill to 'manage staff' upskilling = trans_utils.Upskilling(origin_ids=[139], new_skillsets=[2778], destination_ids=destination_ids) upskilling.effectiveness(safe_definition='strict') # Check the new transitions df = upskilling.upskilling_effects[0]['transition_table'] df[df.is_strictly_safe_desirable].sort_values('destination_label') ###Output _____no_output_____ ###Markdown Note: One can also add a combination of skills ###Code # Try upskilling a shop assistant with the skill to 'manage staff' AND 'manage budgets' upskilling = trans_utils.Upskilling(origin_ids=[139], new_skillsets=[2778, [2778, 1776]], destination_ids=destination_ids) upskilling.effectiveness(safe_definition='strict') ###Output _____no_output_____ ###Markdown Note that if you use combinations of skills, the output table won't feature skills categories. 2. Analysis of 100 core skills for high risk occupationsWe ran the analysis for 100 different skills, across approximately 1700 potential origin and destination occupations (all top level ESCO occupations). This was optimised and done separately on several AWS cloud instances. Here, we load the pre-computed results and analyse most effective skills across all 'high risk' occupations.Note: You'll need to download [this very large file](https://ojd-mapping-career-causeways.s3.eu-west-2.amazonaws.com/data/interim/upskilling_analysis/top_occupations_100_core_skills.pickle) (approx. 2GB) that contains all of the recalculated similarity matrices. ###Code # Import pre-computed results fpath = f'{useful_paths.data_dir}interim/upskilling_analysis/top_occupations_100_core_skills.pickle' upskilling = trans_utils.Upskilling(load_data_path=fpath, verbose=False) core_skills_effect = upskilling.effectiveness( select_origin_ids = data.occ_report[data.occ_report.risk_category=='High risk'].id.to_list(), safe_definition='strict' ).round(2) core_skills_effect.head(25) ###Output _____no_output_____ ###Markdown 2.1 Specific origin occupation groupsThe effect of upskilling might vary depending on the specific occupation or occupational group in question. ###Code def check_specific_origin_sectors(sector): ids = data.occ_report[(data.occ_report.risk_category=='High risk') & (data.occ_report.skills_based_sector==sector) ].id.to_list() return upskilling.effectiveness( select_origin_ids = ids, safe_definition='strict', ).round(2) check_specific_origin_sectors('business & administration workers').head(6) check_specific_origin_sectors('sales & services workers').head(6) check_specific_origin_sectors('arts & media workers').head(6) check_specific_origin_sectors('ict workers').head(6) ###Output _____no_output_____
chapter5/chapter5_lab_intro_to_inference_knowledge.ipynb
###Markdown Chapter 5 Laboratory - Is Knowledge Power? Foundations for statistical inference - Sampling distributionsIn this lab, you will investigate the ways in which the statistics from a random sample of data can serve as point estimates for population parameters. We’re interested in formulating a _sampling distribution_ of our estimate in order to learn about the properties of the estimate, such as its distribution.__Setting a seed:__ We will take some random samples and build sampling distributions in this lab, which means we should set a seed at the start of the lab. Getting Started Load packagesIn this lab, we will explore and visualize the data using the tidyverse suite of packages. We will also use the infer package for resampling.Let’s load the packages. ###Code import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns # Setting the seed np.random.seed(0) # Setting seaborn theme sns.set_theme() ###Output _____no_output_____ ###Markdown The dataA 2019 Gallup report states the following:> The premise that scientific progress benefits people has been embodied in discoveries throughout the ages – from the development of vaccinations to the explosion of technology in the past few decades, resulting in billions of supercomputers now resting in the hands and pockets of people worldwide. Still, not everyone around the world feels science benefits them personally.Source: [World Science Day: Is Knowledge Power?](https://news.gallup.com/opinion/gallup/268121/world-science-day-knowledge-power.aspx)The Wellcome Global Monitor finds that 20% of people globally do not believe that the work scientists do benefits people like them. In this lab, you will assume this 20% is a true population proportion and learn about how sample proportions can vary from sample to sample by taking smaller samples from the population. We will first create our population assuming a population size of 100,000. This means 20,000 (20%) of the population think the work scientists do does not benefit them personally and the remaining 80,000 think it does. ###Code global_monitor = pd.DataFrame({ "scientist_work": np.array(["Benefits"] * 80000 + ["Doesn't benefit"] * 20000) }) ###Output _____no_output_____ ###Markdown The name of the data frame is `global_monitor` and the name of the variable that contains responses to the question _“Do you believe that the work scientists do benefit people like you?”_ is `scientist_work`.We can quickly visualize the distribution of these responses using a bar plot. ###Code ax = sns.histplot(y="scientist_work", data=global_monitor) plt.title("Do you believe that the work scientists do benefit people like you?") plt.xlabel("") plt.ylabel("") plt.show() ###Output _____no_output_____ ###Markdown We can also obtain summary statistics to confirm we constructed the data frame correctly. ###Code stats = pd.concat([global_monitor.value_counts(), global_monitor.value_counts() / global_monitor.shape[0]], axis=1, ignore_index=True) stats.columns = ["n", "p"] stats ###Output _____no_output_____ ###Markdown The unknown sampling distributionIn this lab, you have access to the entire population, but this is rarely the case in real life. Gathering information on an entire population is often extremely costly or impossible. Because of this, we often take a sample of the population and use that to understand the properties of the population. ###Code sampl = global_monitor.sample(50) ###Output _____no_output_____ ###Markdown This command collects a simple random sample of size 50 from the global_monitor dataset, and assigns the result to samp1. This is similar to randomly drawing names from a hat that contains the names of all in the population. Working with these 50 names is considerably simpler than working with all 100,000 people in the population. Exercise 1 - Describe the distribution of responses in this sample. How does it compare to the distribution of responses in the population. __Hint__: Although the `sample_n` function takes a random sample of observations (i.e. rows) from the dataset, you can still refer to the variables in the dataset with the same names. Code you presented earlier for visualizing and summarising the population data will still be useful for the sample, however be careful to not label your proportion `p` since you’re now calculating a sample statistic, not a population parameters. You can customize the label of the statistics to indicate that it comes from the sample.We can just use `value_counts()` to get some statistics about this new sample. ###Code stats_hat = pd.concat([sampl.value_counts(), sampl.value_counts() / sampl.shape[0]], axis=1, ignore_index=True) stats_hat.columns = ["n", "p"] stats_hat ###Output _____no_output_____ ###Markdown Depending on which 50 people you selected, your estimate could be a bit above or a bit below the true population proportion of 0.26. In general, though, the sample proportion turns out to be a pretty good estimate of the true population proportion, and you were able to get it by sampling less than 1% of the population. Exercise 2 - Would you expect the sample proportion to match the sample proportion of another student’s sample? Why, or why not? If the answer is no, would you expect the proportions to be somewhat different or very different? Ask a student team to confirm your answer.I would expect the sample proportions to vary slightly, not by a huge amount but to slightly vary around the population mean. Exercise 3 - Take a second sample, also of size 50, and call it `samp2`. How does the sample proportion of `samp2` compare with that of `samp1`? Suppose we took two more samples, one of size 100 and one of size 1000. Which would you think would provide a more accurate estimate of the population proportion?Let's make `samp2`. ###Code samp2 = global_monitor.sample(50) samp2.value_counts() / samp2.shape[0] ###Output _____no_output_____ ###Markdown We see that proportions slightly change here. Let's now take two more samples, one of size 100 and another one of size 1000. ###Code samp100 = global_monitor.sample(100) samp1000 = global_monitor.sample(1000) print(samp100.value_counts() / samp100.shape[0]) print(samp1000.value_counts() / samp1000.shape[0]) ###Output scientist_work Benefits 0.81 Doesn't benefit 0.19 dtype: float64 scientist_work Benefits 0.806 Doesn't benefit 0.194 dtype: float64 ###Markdown We still see variance but as the sample gets bigger, the sample proportions get closer to the true population proportions. Not surprisingly, every time you take another random sample, you might get a different sample proportion. It’s useful to get a sense of just how much variability you should expect when estimating the population mean this way. The distribution of sample proportions, called the _sampling distribution (of the proportion)_, can help you understand this variability. In this lab, because you have access to the population, you can build up the sampling distribution for the sample proportion by repeating the above steps many times. Here, we use Python to take 15,000 different samples of size 50 from the population, calculate the proportion of responses in each sample, filter for only the _Doesn’t benefit_ responses, and store each result in a vector called `sample_props50`. Note that we specify that `replace = True` since sampling distributions are constructed by sampling with replacement. ###Code sample_props50 = [global_monitor.sample(50) for _ in range(15000)] sample_props50_df = pd.concat(sample_props50, axis=0, ignore_index=True) sample_props50_df = pd.concat([sample_props50_df.value_counts(), sample_props50_df.value_counts() / sample_props50_df.shape[0]], axis=1, ignore_index=True) sample_props50_df.columns = ["n", "p"] sample_props50_df ###Output _____no_output_____ ###Markdown And we can visualize the distribution of these proportions with a histogram. ###Code p_hats_db = np.array([df[df["scientist_work"] == "Doesn't benefit"].shape[0] / df.shape[0] for df in sample_props50]) sns.histplot(p_hats_db, binwidth=0.02) plt.xlabel("p_hat (Doesn't benefit)") plt.title("Sampling distribution of p_hat") plt.show() ###Output _____no_output_____ ###Markdown Exercise 4 - How many elements are there in sample_props50? Describe the sampling distribution, and be sure to specifically note its center. Make sure to include a plot of the distribution in your answer.There are 15,000 samples of 50 elements each, so in total there are 750,000 observations. You can see above the distribution of the proportion of those labelled as _Doesn't benefit_. If we want to see who _Benefits_, we can use the same logic as above. In any case, we see that the sample proportion of _Doesn't benefit_ is centered around 0.2, while the sample proportion of those labelled as _Benefits_ is centered around 0.8. ###Code p_hats_b = np.array([df[df["scientist_work"] == "Benefits"].shape[0] / df.shape[0] for df in sample_props50]) sns.histplot(p_hats_b, binwidth=0.02) plt.xlabel("p_hat (Benefits)") plt.title("Sampling distribution of p_hat") plt.show() ###Output _____no_output_____ ###Markdown Interlude: Sampling distributionsThe idea behind the previous is _repetition_. Earlier, you took a single sample of size `n` (50) from the population of all people in the population. With this new function, you can repeat this sampling procedure different times in order to build a distribution of a series of sample statistics, which is called the __sampling distribution__.Note that in practice one rarely gets to build true sampling distributions, because one rarely has access to data from the entire population. Exercise 5 - To make sure you understand how sampling distributions are built, and exactly what the `rep_sample_n` (in R) function does, try modifying the code to create a sampling distribution of 25 sample proportions from samples of size 10, and put them in a data frame named sample_props_small. Print the output. How many observations are there in this object called sample_props_small? What does each observation represent?In Python, we just make an array of 25 dataframes each containing 10 observations sampled from the population data. It's easy to predict that there will be a total of $25 * 10 = 250$ observations in total, split over our samples. Each observation is randomly sampled (with replacement) from the true population data. ###Code sample_props_small = [global_monitor.sample(10) for _ in range(25)] sample_props_small_df = pd.concat(sample_props_small, axis=0) p_hat_db_small = np.array([df[df["scientist_work"] == "Doesn't benefit"].shape[0] / df.shape[0] for df in sample_props_small]) p_hat_b_small = np.array([df[df["scientist_work"] == "Benefits"].shape[0] / df.shape[0] for df in sample_props_small]) sns.histplot(p_hat_b_small) plt.title("Benefits") plt.show() sns.histplot(p_hat_db_small) plt.title("Doesn't benefits") plt.show() ###Output _____no_output_____ ###Markdown Sample size and the sampling distributionMechanics aside, let’s return to the reason we used the above function: to compute a sampling distribution, specifically, the sampling distribution of the proportions from samples of 50 people.The sampling distribution that you computed tells you much about estimating the true proportion of people who think that the work scientists do doesn’t benefit them. Because the sample proportion is an unbiased estimator, the sampling distribution is centered at the true population proportion, and the spread of the distribution indicates how much variability is incurred by sampling only 50 people at a time from the population.In the remainder of this section, you will work on getting a sense of the effect that sample size has on your sampling distribution. Exercise 6 - Use the app below to create sampling distributions of proportions of Doesn’t benefit from samples of size 10, 50, and 100. Use 5,000 simulations. What does each observation in the sampling distribution represent? How does the mean, standar error, and shape of the sampling distribution change as the sample size increases? How (if at all) do these values change if you increase the number of simulations? (You do not need to include plots in your answer.)The app can be found [here](https://openintro.shinyapps.io/sampling_distributions/) scrolling down to exercise 6. Each observation in the sampling distribution is the observed distribution parameter of that sample. As the sample size increases, _mean_ gets closer to the true population mean, _standard error_ get's smaller and thus the _shape_ get's narrower and of course more symmetric. Values don't change, we just have more observations, which are as noisy as the others. More PracticeSo far, you have only focused on estimating the proportion of those you think the work scientists doesn’t benefit them. Now, you’ll try to estimate the proportion of those who think it does.Note that while you might be able to answer some of these questions using the app, you are expected to write the required code and produce the necessary plots and summary statistics. You are welcome to use the app for exploration. Exercise 7 - Take a sample of size 15 from the population and calculate the proportion of people in this sample who think the work scientists do enchances their lives. Using this sample, what is your best point estimate of the population proportion of people who think the work scientists do enchances their lives? ###Code sample_15 = global_monitor.sample(15) mean_15 = sample_15[sample_15["scientist_work"] == "Benefits"].shape[0] / sample_15.shape[0] print(f"Sample mean (n = 15) is {mean_15}") ###Output Sample mean (n = 15) is 0.7333333333333333 ###Markdown Exercise 8 - Since you have access to the population, simulate the sampling distribution of proportion of those who think the work scientists do enchances their lives for samples of size 15 by taking 2000 samples from the population of size 15 and computing 2000 sample proportions. Store these proportions in as `sample_props15`. Plot the data, then describe the shape of this sampling distribution. Based on this sampling distribution, what would you guess the true proportion of those who think the work scientists do enchances their lives to be? Finally, calculate and report the population proportion. ###Code sample_props15 = [global_monitor.sample(15) for _ in range(2000)] sample_props15_df = pd.concat(sample_props15) sample_props15_p = np.array([df[df["scientist_work"] == "Benefits"].shape[0] / df.shape[0] for df in sample_props15]) sns.histplot(sample_props15_p, binwidth=0.02) plt.show() ###Output _____no_output_____ ###Markdown The shape is approximately normal, centered roughly on 0.80/0.85, which is reasonably close to the true population mean, but still very inaccurate. Exercise 9 - Change your sample size from 15 to 150, then compute the sampling distribution using the same method as above, and store these proportions in a new object called sample_props150. Describe the shape of this sampling distribution and compare it to the sampling distribution for a sample size of 15. Based on this sampling distribution, what would you guess to be the true proportion of those who think the work scientists do enchances their lives? ###Code sample_props150 = [global_monitor.sample(150) for _ in range(2000)] sample_props150_df = pd.concat(sample_props150) sample_props150_p = np.array([df[df["scientist_work"] == "Benefits"].shape[0] / df.shape[0] for df in sample_props150]) sns.histplot(sample_props150_p, binwidth=0.02) plt.show() ###Output _____no_output_____
text_featurization/lm_finetune/1A-Toxic_BoW_vs_LM.ipynb
###Markdown Featurizing Toxic Comments Using Pre-Trained Word Vectors and a Language Model's Encoder OverviewThis notebook provides an analysis of featurization methods for text. The central idea we examine is that we can represent text (words, phrases, and even entire sentences or paragraphs) as vectors. However, we'll see that some vector representations may provide more semantic information than others. Pre-Trained Word VectorsAs a first foray we featurize our data using publically available pre-trained word vectors. There are numerous word vectors available, the most common being [Word2Vec](https://code.google.com/archive/p/word2vec/), [GloVe](https://nlp.stanford.edu/projects/glove/), and [fasttext](https://github.com/facebookresearch/fastText/). We'll use the **GloVe** vectors trained on Wikipedia and Gigaword 5. Our input dataset contains a variable sequence of tokens (words), which we vectorize into a list of real-valued vectors. In order to use a machine learning model with such a representation we need to transform it into a fixed-vector representation. We can do this by many different aggregation schemes: sum/mean, max, min, etc. For this notebook we simply utilize unweighted averages of all the tokens, but you'll likely find that for some applications it may be more useful to consider max/min in addition, and concatenate multiple representations.![](https://image.slidesharecdn.com/starsem-170916142844/95/yejin-choi-2017-from-naive-physics-to-connotation-modeling-commonsense-in-frame-semantics-83-638.jpg?cb=1505572199)_image credit: Yejin Choi - 2017 - From Naive Physics to Connotation: Modeling Commonsense in Frame Semantics__quote credit: Ray Mooney_ Language Model EncodersWe'll then examine a more advanced method of featurizing our sequence of tokens. In particular, we'll use the encoder from a pre-trained language model. The encoder is a fixed-length vector representation that is typically the last hidden vector in a recurrent neural network trained for machine translation or language modeling.![](http://ruder.io/content/images/2018/07/lm_objective.png)_image credit: Seabstain Ruder and TheGradient: NLP's ImageNet moment has arrived_Our hope is that rather than naively aggregating our word vectors by their average representation, the last hidden layer will contain contextual information from the entire sequence of tokens. Imports:We import our dataset of comments to Wikipedia page-edits from our helper `load_data`. We'll also import a dictionary of GloVe vectors and a helper function for using it to lookup word vectors for our tokenized comments: ###Code import sys !{sys.executable} -m spacy download en NUM_WORKERS=4 from load_data import load_wiki_attacks, load_attack_encoded from load_data import tokenize, create_glove_lookup, download_glove import pathlib import pandas as pd import numpy as np import csv import tensorflow as tf import tensorflow_hub as hub data_dir = pathlib.Path("/data") / "active-learning-data" / "active-learning-workshop" / "text_featurization" / "data" if not data_dir.exists(): data_dir.mkdir(parents=True, exist_ok=True) ###Output _____no_output_____ ###Markdown We use the [spaCy](https://spacy.io/) library to tokenize our text, but aside from some prior data cleanup there's nothing fancy happening in preprocessing. ###Code glove_src = str(data_dir / "glove.6B.300d.txt") if not pathlib.Path(glove_src).exists(): download_glove(data_dir) glove_lookup = create_glove_lookup(glove_src) toxic_df = load_wiki_attacks(data_dir) toxic_df = tokenize(toxic_df, "comment_text") toxic_df.loc[:5, ['comment_text', "tokens"]] ###Output 100% | 61 MB | 7.78 MB/s | 7 sec elapsed ###Markdown Vectorize with GloVe:We can use our `glove_lookup` dictionary to vectorize all the tokens in our text. We apply the function to every token in our comment, and then take the average over all word vectors. Again, you should definitely consider other aggregation methods such as max/min. ###Code toxic_df['glove_aggregate'] = toxic_df.tokens.apply(lambda x: np.mean([glove_lookup[v] for v in x], axis=0)) toxic_df.loc[:5, ["comment_text", "tokens", "glove_aggregate"]] ###Output _____no_output_____ ###Markdown Language Model Our language model encoder utilizes pre-trained language models hosted on [TensorFlow Hub](https://www.tensorflow.org/hub/modules/text). Our helper script `encoder.py` provides a simple class entitled `encoder` with methods for encoding text using three different encoder models: [ELMO](http://www.aclweb.org/anthology/N18-1202), [USE](https://arxiv.org/pdf/1803.11175.pdf), and [NNLM](http://www.jmlr.org/papers/volume3/bengio03a/bengio03a.pdf). Encoder Imports:The class is a bit verbose for readability, but it's conceptually very simple. We load the pre-trained module, which defines a static computational graph with the learned weights from the language model on it's dataset. We initialize this computational graph into a Keras session, which we can then use for fine-tuning or for featurizing an input sequence by computing a forward pass of the computational graph. Note, we could have also just used `tensorflow` directly to do the model building and training, but Keras has some helpful utilities for data min-batching and pre-fetching that makes this very easy (at the cost of some incompatibilities: [issues with fine-tuning may arise](https://groups.google.com/a/tensorflow.org/forum/!topic/hub/Y4AdAM7HpX0). ###Code from encoder import encoder ??encoder ###Output _____no_output_____ ###Markdown Featurized DatasetHere's an example usage of converting the `comment_text` into a fixed sequence using our encoder and the **Universal Sentence Encoder**:```pythonuse_encoder = encoder(model="use")featurizer = use_encoder.transform_model()featurizer.summary()with tf.Session() as session: K.set_session(session) session.run(tf.global_variables_initializer()) session.run(tf.tables_initializer()) transformed_review = featurizer.predict(toxic_df.comment_text.values, batch_size=64)```This operation will take some time, ~1.5 hours on a machine with 16 cores. We have a pre-featurized version of this dataset already saved for you, which you can download using our helper functions: ###Code encoded_attacks = load_attack_encoded(data_dir) toxic_df['encoded_comment'] = encoded_attacks.values.tolist() toxic_df.loc[:5, ['comment_text', 'encoded_comment']] ###Output _____no_output_____ ###Markdown Model EvaluationHow do these features compare on discrimaniting between toxic / non-toxic comments? Let's put them in a raceoff. Sklearn Imports: ###Code from sklearn.model_selection import learning_curve from sklearn.naive_bayes import GaussianNB from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.preprocessing import LabelBinarizer from sklearn.metrics import roc_auc_score, make_scorer lb = LabelBinarizer() train_sizes = np.arange(0.1, 1.1, 0.1) estimator = LogisticRegression() # estimator = GaussianNB() # estimator = RandomForestClassifier() def featurize(df=toxic_df): labels = np.concatenate(lb.fit_transform(df.is_attack.values)) glove_features = np.vstack(df.glove_aggregate.values) use_features = np.vstack(df.encoded_comment.values) return labels, glove_features, use_features labels, glove_features, use_features = featurize() ###Output _____no_output_____ ###Markdown Learning Curves:Learning curves allows us to visualize the performance of the system as a function of the amount of examples it's seen. We first create learning curves using the `glove_features`, and then we create learning curves of the `encoded_features`. We plot them together so we can compare: ###Code %%time g_train_sizes, g_train_scores, g_test_scores = learning_curve(estimator=estimator, X=glove_features, y=labels, scoring=make_scorer(roc_auc_score), n_jobs=NUM_WORKERS, train_sizes=train_sizes) %%time e_train_sizes, e_train_scores, e_test_scores = learning_curve(estimator=estimator, X=use_features, y=labels, scoring=make_scorer(roc_auc_score), n_jobs=NUM_WORKERS, train_sizes=train_sizes) ###Output CPU times: user 29.6 s, sys: 0 ns, total: 29.6 s Wall time: 1min 21s ###Markdown ResultsWe used AUC as our scoring criteria, but you could also use accuracy. ###Code import seaborn as sns %matplotlib inline results_df = pd.DataFrame({"train_perc": train_sizes, "bow_auc": np.mean(g_test_scores, axis=1), "encoder_auc": np.mean(e_test_scores, axis=1)}) sns.lineplot(x="train_perc", y="AUC", hue="features", data=results_df.melt("train_perc", var_name="features", value_name="AUC"), ) ###Output _____no_output_____ ###Markdown Discussion: The encoder features outperform the bag-of-words (BoW) glove vectors at every level of training data experience. While a more careful aggregation procedure of the word vectors would have done much better (in fact, there's good evidence that a thoughtful weighted-average can be [very hard to beat](https://openreview.net/forum?id=SyK00v5xx) on many discriminative tasks), the main point of this analysis is that using pre-trained encoders can basically be a drop-in replacement for word vectors for many applications and give significant gains, _modulo_ additional computation time to featurize the dataset (the BoW approach uses a lookup to compute features, which is very fast, whereas the encoder approach requires a full forward pass through a complicated recurrent neural network, which are inherently sequential (this is why there is a greater push towards [feed-forward architectures for language modeling](https://blog.openai.com/language-unsupervised/), which can be much faster during training and evaluation time). Error Analysis __WARNING: FOUL LANGUAGE AHEAD__:Let's see some examples of where our model using language model features outperformed our BoW model. We'll be a bit more fair to both models this time and do a quick grid search to find a well-optimized model: ###Code %%time model_df = toxic_df.loc[:,['is_attack', 'comment_text', 'glove_aggregate', 'encoded_comment']] from sklearn.model_selection import train_test_split, GridSearchCV train_df, test_df = train_test_split(model_df, train_size=0.75, random_state=12) def cv_predict_eval(): # cv = GridSearchCV(RandomForestClassifier(), # param_grid={ # 'n_estimators': [10, 100], # 'max_features': ['sqrt', 'log2'], # 'max_depth': [3, 5, None] # }, # refit=True, # n_jobs=NUM_WORKERS) cv = LogisticRegression() labels, glove_features, use_features = featurize(train_df) labels_test, glove_test, use_test = featurize(test_df) glove_fit = cv.fit(glove_features, labels) glove_hat = glove_fit.predict(glove_test) use_fit = cv.fit(use_features, labels) use_hat = use_fit.predict(use_test) results_df = test_df results_df['use_pred'] = use_hat results_df['glove_pred'] = glove_hat return results_df results_df = cv_predict_eval() ###Output /anaconda/envs/embeddings/lib/python3.6/site-packages/sklearn/model_selection/_split.py:2026: FutureWarning: From version 0.21, test_size will always complement train_size unless both are specified. FutureWarning) /anaconda/envs/embeddings/lib/python3.6/site-packages/ipykernel_launcher.py:27: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy ###Markdown Where BoW-GloVe Fails and the Encoder Succeeds: ###Code results_df.loc[(results_df["is_attack"] == results_df["use_pred"]) & (results_df["is_attack"] != results_df["glove_pred"]) & (results_df["is_attack"] == False), ["comment_text", "is_attack"]] results_df.loc[[59456], "comment_text"].values results_df.loc[[105512], "comment_text"].values ###Output _____no_output_____
Unsupervised Learning/Unsupervised Machine Learning.ipynb
###Markdown Author: Mahmoud EL OMAR : M1 - CORO IMARO.Artificial Intelligence Report : Lab2 - Clustering DataIn this lab, we will consider two types of data : \begin{enumerate}\item Synthetic 2-dimensional data split into 10 clusters along the $y=x$ axis\item A realistic N-dimesional data set, read from a csv file\end{enumerate}The synthetic data serves as a proof of concept since it's more easy to visualizeThe function below is the one we're gonna use to generate the synthetic data ###Code def getSynData(synNbClasses = 10,overlapping_coeff = 1,noise_coeff = 1): syn = np.empty([2, 0]) synLabels = [] for k in range(synNbClasses): syn = np.concatenate((syn, noise_coeff*np.random.rand(2, 100)+overlapping_coeff*k), axis=1) synLabels = np.concatenate((synLabels, np.ones(100)*k)) return syn.T,synLabels syn,synLabels = getSynData(overlapping_coeff=2,noise_coeff=1.5) print(syn.shape) plt.figure(figsize=(7,7)) plt.scatter(syn[:,0], syn[:,1], c = 10 - synLabels, cmap = 'rainbow', edgecolors = 'black') plt.show() ###Output (1000, 2) ###Markdown In the cell below, we see the snippet of code in which we import the real dataset ###Code rea1 = np.loadtxt('./data/data1.csv') rea2 = np.loadtxt('./data/data2.csv') rea = np.concatenate((rea1, rea2)) reaLabels = rea[:, 0] rea = rea[:, 1:] ###Output _____no_output_____ ###Markdown Evaluation MetricThe metric we'll use to evaluate the performance of our clustering models is the \textit{Adjusted Mutual Information} : \begin{equation} AMI(X,Y) = \frac{I(X;Y) - E\{ I(X;Y)\}}{max\{ H(X),H(Y)\} - E\{ I(X;Y)\}}\end{equation}With $I(X;Y)$ being the mutual information between the two random variables $X$ and $Y$. And $H(X), H(Y)$ being the respective entropies of the random variables $X$ and $Y$$E\{ I(X;Y)\} $ is the expectation of the mutual informationSee [\textit{Information Theory}](https://en.wikipedia.org/wiki/Information_theory) Euclidean Space ClusteringIn the next cell, we will implement the KMeans Algorithm on the synthetic data, taking the euclidean norm as our distance (Minkowski distance with $p = 2$) ###Code synModel = sklearn.cluster.KMeans(n_clusters = 10) synOutput = synModel.fit(syn) synScore = sklearn.metrics.adjusted_mutual_info_score(synLabels,synOutput.labels_) print("The score of our KMeans model for the synthetic data is the following : "+str(synScore)) ###Output The score of our KMeans model for the synthetic data is the following : 1.0 ###Markdown Now, we will repeat the same thing, but on our more realistic N-dimensional dataset ###Code reaModel = sklearn.cluster.KMeans(n_clusters=len(set(reaLabels))) reaOutput = reaModel.fit(rea) reaScore = sklearn.metrics.adjusted_mutual_info_score(reaLabels, reaOutput.labels_) print("The score of our KMeans model for the realistic data is the following : "+str(reaScore)) ###Output The score of our KMeans model for the realistic data is the following : 0.8999213285069089 ###Markdown Model SelectionNow, we will implement the Gap Statistics Criterion, to the see the robustness of our model, and how the loss evolves with respect to the number of clusters we look for.The inertia of the KMeans model is the within-cluster sum-of-squares : \begin{equation} \sum_{i = 0}^{n}\min_{\mu_j \in C}(||x_i - \mu_j||^2)\end{equation}The KMeans algorithm aims to minimize this measure.The Gap Statistics is the ratio of the Inertia of KMeans performed on random data, and the Inertia of KMeans performed on our target data. We will see how this criterion evoles as a function of the number of the clusters we chose to look for.This can also serve to find the optimum number of clusters $k$ when we don't know it a priori Gap Statistic Criterion for Synthetic DataWe will begin to do that for the synthetic data ###Code synGapVector = [] randomSyn = np.random.rand(syn.shape[0],syn.shape[1]) for k in range(40): # the number of clusters would belong the interval [1,40]. randomModel = sklearn.cluster.KMeans(n_clusters= k + 1).fit(syn) synModel = sklearn.cluster.KMeans(n_clusters= k + 1).fit(randomSyn) current_gap = randomModel.inertia_/reaModel.inertia_ synGapVector.append(current_gap) plt.figure(figsize=(7,7)) plt.plot(range(len(synGapVector)),synGapVector) plt.axvline(x = 10, linewidth = 0.5,color = 'r') # plots vertical line plt.axhline(y = synGapVector[10],linewidth = 0.5,color = 'r') # plots horizontal line plt.xlabel("k Number Of Clusters") plt.ylabel("Statistical Gap") plt.title("Gap Criterion For The Synthetic Dataset") plt.show() ###Output _____no_output_____ ###Markdown In the plot above, we see the plot of the Gap Statistics as a function of the number of clusters. We can evidently see it decreasing monotonically as the number of clusters increases. But after a specific number of clusters, we can see that the gap flattens and the decrease is no longer noticeable. That location at the "elbow" of the plot indicates the appropiate number of clusters we should adopt, and above which we would be increasing the computational costs without improving the performance of the model. So it's not wise at all to chose a number of clusters that lies beyond the "elbow".From the plot above, we can see that the appropriate number of clusters is indeed $10$.Now, let's increase the overlapping of the clusters in the synthetic dataset and see what happens. ###Code syn_1,synLabels_1 = getSynData(overlapping_coeff=1.75,noise_coeff=3) plt.figure(figsize=(7,7)) plt.scatter(syn_1[:,0], syn_1[:,1], c = 10 - synLabels_1, cmap = 'rainbow', edgecolors = 'black') plt.show() synGapVector_1 = [] randomSyn = np.random.rand(syn.shape[0],syn.shape[1]) for k in range(40): # the number of clusters would belong the interval [1,40]. randomModel = sklearn.cluster.KMeans(n_clusters= k + 1).fit(syn_1) synModel = sklearn.cluster.KMeans(n_clusters= k + 1).fit(randomSyn) current_gap = randomModel.inertia_/reaModel.inertia_ synGapVector_1.append(current_gap) plt.figure(figsize=(7,7)) plt.plot(range(len(synGapVector_1)),synGapVector_1) plt.axvline(x = 10, linewidth = 0.5,color = 'r') # plots vertical line plt.axhline(y = synGapVector_1[10],linewidth = 0.5,color = 'r') # plots horizontal line plt.xlabel("k Number Of Clusters") plt.ylabel("Statistical Gap") plt.title("Gap Criterion For The Synthetic Dataset with increased overlap") plt.show() ###Output _____no_output_____ ###Markdown After increasing the overlap as shown in the previous scatter plot, we can still see that the "elbow" is still located at k = 10, the decrease is more noticeable than before, but still ultimately not worth the extra computation.In the following cell, we'll increase the overlapping by much more than before. We'll discover in the scatter plot below that the clusters structure is pretty much gone. ###Code syn_2,synLabels_2 = getSynData(overlapping_coeff=0.1,noise_coeff=1) plt.figure(figsize=(7,7)) plt.scatter(syn_2[:,0], syn_2[:,1], c = 10 - synLabels_2, cmap = 'rainbow', edgecolors = 'black') plt.show() synGapVector_2 = [] randomSyn = np.random.rand(syn.shape[0],syn.shape[1]) for k in range(40): # the number of clusters would belong the interval [1,40]. randomModel = sklearn.cluster.KMeans(n_clusters= k + 1).fit(syn_2) synModel = sklearn.cluster.KMeans(n_clusters= k + 1).fit(randomSyn) current_gap = randomModel.inertia_/reaModel.inertia_ synGapVector_2.append(current_gap) plt.figure(figsize=(7,7)) plt.plot(range(len(synGapVector_2)),synGapVector_2) plt.axvline(x = 10, linewidth = 0.5,color = 'r') # plots vertical line plt.axhline(y = synGapVector_2[10],linewidth = 0.5,color = 'r') # plots horizontal line plt.xlabel("k Number Of Clusters") plt.ylabel("Statistical Gap") plt.title("Gap Criterion For The Synthetic Dataset with way too much overlap") plt.show() ###Output _____no_output_____ ###Markdown After increasing the overlapping of the clusters way too much, we're starting to see the statistical gap still decreases considerably after what used to be the "elbow" in previous cases. One would be quick to judge that a higher number of clusters automatically means a better model. However that would be wrong, because if we increase the number of clusters to the limit, we'll end up assigning a unique cluster to each point, which is definitely NOT what we're looking for. In the case of too much overlapping, the data is just too noisy. So the statistic is not robust in the case of overlapping clustersThis is what we call a trivial solution that minimises the loss function. In this case, it sets the loss function to zero. We should avoid trivial solutions when implementing any optimisation algorithm. Gap Statistic Criterion for realistic dataWe will now implement the Gap Statistics Criterion on the realistic N-dimensional dataset ###Code reaGapVector = [] randomRea = np.random.rand(rea.shape[0],rea.shape[1]) for k in range(40): randomModel = sklearn.cluster.KMeans(n_clusters= k + 1).fit(rea) reaModel = sklearn.cluster.KMeans(n_clusters= k + 1).fit(randomRea) current_gap = randomModel.inertia_/reaModel.inertia_ reaGapVector.append(current_gap) plt.figure(figsize=(7,7)) plt.plot(range(len(reaGapVector)),reaGapVector) plt.axvline(x = 8, linewidth = 0.5,color = 'r') plt.axhline(y = reaGapVector[8],linewidth = 0.5,color = 'r') plt.xlabel("k Number Of Clusters") plt.ylabel("Statistical Gap") plt.title("Gap Criterion For The Real Dataset") plt.show() ###Output _____no_output_____ ###Markdown We can see, by inspection, from the plot above that "elbow" lies at $k = 8$ clusters. We could say the gap criterion allowed us to pick the correct number of clusters in the case of real data Non Euclidean Clustering (Spectral Clustering)The spectral clustering algorithm is implemented step by step as indicated in the given.We begin by writing the RBF kernel function in the cell below. \begin{equation} RBF(x,y) = -e^{\frac{||x-y||^2}{2\sigma^2}}\end{equation} The way I implemented the RBF kernel below, allows me to skip the step where we compute the matrix of pairwise euclidean distance, and it allows me to directly compute the matrix of weights $W$ ###Code def rbf(x,y,sigma = 1): n = np.linalg.norm(x-y)**2 n /= (2*sigma*sigma) return np.exp(-n) def getAdjancencyMatrix(X,sigma = 1.0,kernel = 'rbf'): W = np.zeros([X.shape[0],X.shape[0]]) for p in range(X.shape[0]): x = X[p,:] for j in range(p,X.shape[0]): y = X[j,:] if kernel == 'rbf': W[p,j] = rbf(x,y,sigma = sigma) elif kernel == 'normal': W[p,j] = np.linalg.norm(x-y)**2 W[j,p] = W[p,j] return W def spectralClustering(X,nbClusters,sigma = 1): assert nbClusters >= 2 ," Really ? You're looking for only one cluster ??" W = getAdjancencyMatrix(X,sigma = sigma) W -= np.eye(W.shape[0]) # making the diagonals zero, because there is no edge connecting # a point to itself in the graph, therefore according to graph theory, # the diagonals in the weighted adjancency matrix should be zero D = np.diag(W.sum(axis=1)) L = D - W s, Vh = np.linalg.eig(L) Vh = Vh[:,np.argsort(s)] # sorting the eigenvectors s = s[np.argsort(s)] # sorting the eigenvalues in increasing order eigenvector = Vh.real[:,:nbClusters].copy() # taking the first K eigenvectors into account, # because we are interested in finding 10 clusters #eigenvector /= np.linalg.norm(eigenvector,axis = 0) # normalize #Performing the KMeans on the eigenvectors spectralModel = sklearn.cluster.KMeans(n_clusters=nbClusters) spectralOuput = spectralModel.fit(eigenvector) return spectralOuput, s, Vh spectralOuput,s, Vh = spectralClustering(syn,10,sigma = 1) plt.figure(figsize=(7,7)) plt.scatter(syn[:,0], syn[:,1], c = spectralOuput.labels_, cmap='Spectral', edgecolors='black') plt.title("Visualisation of Spectral Output labels") ###Output _____no_output_____ ###Markdown We'll display a staircase plot of the eigenvalues in the cell below ###Code plt.figure(figsize=(7,7)) plt.step(range(len(s[...,:15])),s[...,:15]) plt.xlabel('eigenvalue index') plt.ylabel('value') plt.title("Staircase plot of the 10 first eigenvalues") ###Output _____no_output_____ ###Markdown We can see from the plot above that the eigenvector decomposition of the laplacian of the synthetic dataset is indeed step wise. We now shall try the packaged spectral clustering algorithm that comes with scikit-learn library, and see how it performs on the syn dataset ###Code adjacency_matrix = getAdjancencyMatrix(syn) sc = sklearn.cluster.SpectralClustering(10, affinity='precomputed', n_init=100, assign_labels='kmeans') sk_spectral_labels_out = sc.fit_predict(adjacency_matrix) print("The AMI score of the scikit-learn's own spectral clustering implementation, in the case of synthetic data, is "+str(adjusted_mutual_info_score(synLabels,sk_spectral_labels_out))) rea_adjacency_matrix = getAdjancencyMatrix(rea) sc = sklearn.cluster.SpectralClustering(8, affinity='precomputed', n_init=100, assign_labels='kmeans') sk_spectral_labels_out = sc.fit_predict(rea_adjacency_matrix) print("The AMI score of the scikit-learn's own spectral clustering implementation, in the case of the realistic data, is " + str(adjusted_mutual_info_score(reaLabels,sk_spectral_labels_out))) ###Output The AMI score of the scikit-learn's own spectral clustering implementation, in the case of the realistic data, is 0.5016533909555771 ###Markdown The SpectralClustering class in the scikit-learn library performs just as well as our own implementation. ###Code sigmas = np.arange(0.01,10,0.5) print("Number of Sigmas : "+str(len(sigmas))) scores = [] for i in range(len(sigmas)): spectralOuput,_,_ = spectralClustering(syn,10,sigma = sigmas[i]) current_score = adjusted_mutual_info_score(synLabels,spectralOuput.labels_) scores.append(current_score) print(str(i) + "th Iteration. Score : " + str(current_score) + " | Sigma : "+str(sigmas[i])) ###Output Number of Sigmas : 20 0th Iteration. Score : 0.018769406548532507 | Sigma : 0.01 1th Iteration. Score : 1.0 | Sigma : 0.51 2th Iteration. Score : 1.0 | Sigma : 1.01 3th Iteration. Score : 1.0 | Sigma : 1.51 4th Iteration. Score : 0.7920039398621844 | Sigma : 2.01 5th Iteration. Score : 0.755773638791977 | Sigma : 2.51 6th Iteration. Score : 0.6343401227660639 | Sigma : 3.01 7th Iteration. Score : 0.5897686017128981 | Sigma : 3.51 8th Iteration. Score : 0.5437652838713587 | Sigma : 4.01 9th Iteration. Score : 0.5125163173635389 | Sigma : 4.51 10th Iteration. Score : 0.4441106744957 | Sigma : 5.01 11th Iteration. Score : 0.40502683002741546 | Sigma : 5.51 12th Iteration. Score : 0.40835059783908456 | Sigma : 6.01 13th Iteration. Score : 0.4083505978390845 | Sigma : 6.51 14th Iteration. Score : 0.40835059783908456 | Sigma : 7.01 15th Iteration. Score : 0.34159956581509315 | Sigma : 7.51 16th Iteration. Score : 0.34159956581509315 | Sigma : 8.01 17th Iteration. Score : 0.311265072253379 | Sigma : 8.51 18th Iteration. Score : 0.3055178140569142 | Sigma : 9.01 19th Iteration. Score : 0.3018971757631512 | Sigma : 9.51 ###Markdown Below, lies the plot depicting how well spectral clustering performs as $\sigma$ varies from $0.01$ until $10$By inspection, we see that, in the case of our synthetic data, we have several optimal values for $\sigma$, one of those is $\sigma = 1.0$ ###Code plt.figure(figsize=(7,7)) plt.plot(sigmas,scores) plt.axhline(y = 1.0, color = 'r', linewidth = 0.5) plt.axvline(x = 1.0, color = 'r', linewidth = 0.5) plt.xlabel("$σ$") plt.ylabel('AMI') plt.title("AMI wrt values of sigma : Synthetic Data") plt.grid(True) sigmas = np.arange(0.01,5,0.5) print(len(sigmas)) rea_scores = [] for sigma in sigmas: spectralOuput,_,_ = spectralClustering(rea,8,sigma = sigma) current_score = adjusted_mutual_info_score(reaLabels,spectralOuput.labels_) print(current_score) rea_scores.append(current_score) index = 4 plt.figure(figsize=(7,7)) plt.plot(sigmas,rea_scores) plt.axhline(y = rea_scores[index], color = 'r', linewidth = 0.5) plt.axvline(x = sigmas[index], color = 'r', linewidth = 0.5) plt.xlabel("$σ$") plt.ylabel('AMI') plt.title("AMI wrt values of sigma : Real Data") plt.grid(True) ###Output _____no_output_____
Sequence Modelling/Sequential_Modelling.ipynb
###Markdown ###Code import tensorflow as tf print(tf.__version__) ###Output 2.3.0 ###Markdown Sequence modelling Coding tutorials [1. The IMDb dataset](coding_tutorial_1) [2. Padding and masking sequence data](coding_tutorial_2) [3. The Embedding layer](coding_tutorial_3) [4. The Embedding Projector](coding_tutorial_4) [5. Recurrent neural network layers](coding_tutorial_5) [6. Stacked RNNs and the Bidirectional wrapper](coding_tutorial_6) *** The IMDb Dataset Load the IMDB review sentiment dataset ###Code # Import imdb import tensorflow.keras.datasets.imdb as imdb # Download and assign the data set using load_data() (x_train, y_train), (x_test, y_test) = imdb.load_data() ###Output Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/imdb.npz 17465344/17464789 [==============================] - 0s 0us/step ###Markdown Inspect the dataset ###Code # Inspect the type of the data type(x_train) # Display the first dataset element input # Notice encoding x_train[0] # Display the first dataset element output y_train[0] ###Output _____no_output_____ ###Markdown Load dataset with different options ###Code # Load the dataset with defaults imdb.load_data(path='imdb.npz', index_from=3) # ~/.keras/dataset/ # Limit the vocabulary to the top 500 words using num_words imdb.load_data(num_words=1000) # Ignore the top 10 most frequent words using skip_top imdb.load_data(skip_top=10, num_words=1000, oov_char=2) # Limit the sequence lengths to 500 using maxlen imdb.load_data(maxlen=500) # Use '1' as the character that indicates the start of a sequence imdb.load_data(start_char=1) ###Output _____no_output_____ ###Markdown Explore the dataset word index ###Code # Load the imdb word index using get_word_index() imbd_word_index = imdb.get_word_index() # View the word index as a dictionary, # accounting for index_from. index_from = 3 imbd_word_index = {key:value + index_from for key, value in imbd_word_index.items()} # Retrieve a specific word's index imbd_word_index['simpsonian'] # View an input sentence inv_imdb_word_index = {value: key for key, value in imbd_word_index.items()} [inv_imdb_word_index[index] for index in x_train[0] if index > index_from] # Get the sentiment value y_train[0] ###Output _____no_output_____ ###Markdown --- Padding and Masking Sequence Data ###Code # Load the imdb data set import tensorflow.keras.datasets.imdb as imdb (x_train, y_train), (x_test, y_test) = imdb.load_data() ###Output _____no_output_____ ###Markdown Preprocess the data with padding ###Code # Inspect the input data shape x_train.shape # Pad the inputs to the maximum length using maxlen padded_x_train = tf.keras.preprocessing.sequence.pad_sequences( x_train, maxlen=300, padding='post', truncating='pre' ) # Inspect the output data shape padded_x_train.shape ###Output _____no_output_____ ###Markdown Create a Masking layer ###Code # Import numpy import numpy as np # Masking expects to see (batch, sequence, features) # Create a dummy feature dimension using expand_dims padded_x_train = np.expand_dims(padded_x_train, -1) # Create a Masking layer tf_x_train = tf.convert_to_tensor(padded_x_train, dtype='float32') masking_layer = tf.keras.layers.Masking(mask_value=0.0) # Pass tf_x_train to it masked_x_train = masking_layer(tf_x_train) # Look at the dataset tf_x_train[0] # Look at the ._keras_mask for the dataset masked_x_train._keras_mask ###Output _____no_output_____ ###Markdown *** The Embedding layer Create and apply an `Embedding` layer ###Code # Create an embedding layer using layers.Embedding # Specify input_dim, output_dim, input_length embedding_layer = tf.keras.layers.Embedding(input_dim=501, output_dim=16) # Inspect an Embedding layer output for a fixed input # Expects an input of shape (batch, sequence, feature) sequence_of_indices = tf.constant([[0], [1], [5], [500]]) sequence_of_embeddings = embedding_layer(sequence_of_indices) sequence_of_embeddings # Inspect the Embedding layer weights using get_weights() embedding_layer.get_weights()[0] # Get the embedding for the 14th index embedding_layer.get_weights()[0][14,:] ###Output _____no_output_____ ###Markdown Create and apply an `Embedding` layer that uses `mask_zero=True` ###Code # Create a layer that uses the mask_zero kwarg masking_embedding_layer = tf.keras.layers.Embedding(input_dim=501, output_dim=16, mask_zero=True) # Apply this layer to the sequence and see the _keras_mask property masked_sequence_of_embeddings = masking_embedding_layer(sequence_of_indices) masked_sequence_of_embeddings._keras_mask ###Output _____no_output_____ ###Markdown --- The Embedding Projector Load and preprocess the IMDb data ###Code # A function to load and preprocess the IMDB dataset def get_and_pad_imdb_dataset(num_words=10000, maxlen=None, index_from=2): from tensorflow.keras.datasets import imdb # Load the reviews (x_train, y_train), (x_test, y_test) = imdb.load_data(path='imdb.npz', num_words=num_words, skip_top=0, maxlen=maxlen, start_char=1, oov_char=2, index_from=index_from) x_train = tf.keras.preprocessing.sequence.pad_sequences(x_train, maxlen=None, padding='pre', truncating='pre', value=0) x_test = tf.keras.preprocessing.sequence.pad_sequences(x_test, maxlen=None, padding='pre', truncating='pre', value=0) return (x_train, y_train), (x_test, y_test) # Load the dataset (x_train, y_train), (x_test, y_test) = get_and_pad_imdb_dataset() # A function to get the dataset word index def get_imdb_word_index(num_words=10000, index_from=2): imdb_word_index = tf.keras.datasets.imdb.get_word_index( path='imdb_word_index.json') imdb_word_index = {key: value + index_from for key, value in imdb_word_index.items() if value <= num_words-index_from} return imdb_word_index # Get the word index imdb_word_index = get_imdb_word_index() # Swap the keys and values of the word index inv_imdb_word_index = {value:key for key, value in imbd_word_index.items()} # View the first dataset example sentence [inv_imdb_word_index[index] for index in x_train[100] if index > 3] ###Output _____no_output_____ ###Markdown Build an Embedding layer into a model ###Code # Get the maximum token value max_index_value = max(imbd_word_index.values()) # Specify an embedding dimension embedding_dim = 16 # Build a model using Sequential: # 1. Embedding layer # 2. GlobalAveragePooling1D # 3. Dense model = tf.keras.Sequential([ tf.keras.layers.Embedding(input_dim=max_index_value+1, output_dim=embedding_dim, mask_zero=False), tf.keras.layers.GlobalAveragePooling1D(), tf.keras.layers.Dense(units=1, activation='sigmoid') ]) # Functional API refresher: use the Model to build the same model review_sequence = tf.keras.Input((None, )) embedding_sequece = tf.keras.layers.Embedding(input_dim=max_index_value+1, output_dim=embedding_dim)(review_sequence) average_embedding = tf.keras.layers.GlobalAveragePooling1D()(embedding_sequece) positive_probability = tf.keras.layers.Dense(units=1, activation='sigmoid')(average_embedding) model = tf.keras.Model(inputs=review_sequence, outputs=positive_probability) model.summary() ###Output Model: "functional_1" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= input_1 (InputLayer) [(None, None)] 0 _________________________________________________________________ embedding_3 (Embedding) (None, None, 16) 1417408 _________________________________________________________________ global_average_pooling1d_1 ( (None, 16) 0 _________________________________________________________________ dense_1 (Dense) (None, 1) 17 ================================================================= Total params: 1,417,425 Trainable params: 1,417,425 Non-trainable params: 0 _________________________________________________________________ ###Markdown Compile, train, and evaluate the model ###Code # Compile the model with a binary cross-entropy loss model.compile(loss='binary_crossentropy', metrics=['accuracy'], optimizer='adam') # Train the model using .fit(), savng its history history = model.fit(x_train, y_train, epochs=5, batch_size=32, validation_data=(x_test, y_test), validation_steps=20) # Plot the training and validation accuracy import matplotlib.pyplot as plt %matplotlib inline plt.style.use('ggplot') history_dict = history.history acc = history_dict['accuracy'] val_acc = history_dict['val_accuracy'] loss = history_dict['loss'] val_loss = history_dict['val_loss'] epochs = range(1, len(acc) + 1) plt.figure(figsize=(14,5)) plt.plot(epochs, acc, marker='.', label='Training acc') plt.plot(epochs, val_acc, marker='.', label='Validation acc') plt.title('Training and validation accuracy') plt.xlabel('Epoch') plt.ylabel('Classification accuracy') plt.legend(loc='lower right') plt.ylim(0, 1); ###Output _____no_output_____ ###Markdown The TensorFlow embedding projectorThe Tensorflow embedding projector can be found [here](https://projector.tensorflow.org/). ###Code # Retrieve the embedding layer's weights from the trained model weights = model.layers[1].get_weights()[0] # Save the word Embeddings to tsv files # Two files: # one contains the embedding labels (meta.tsv), # one contains the embeddings (vecs.tsv) import io from os import path out_v = io.open(path.join('data', 'vecs.tsv'), 'w', encoding='utf-8') out_m = io.open(path.join('data', 'meta.tsv'), 'w', encoding='utf-8') k = 0 for word, token in word_index.items(): if k != 0: out_m.write('\n') out_v.write('\n') out_v.write('\t'.join([str(x) for x in weights[token]])) out_m.write(word) k += 1 out_v.close() out_m.close() # beware large collections of embeddings! ###Output _____no_output_____ ###Markdown --- Recurrent neural network layers Initialize and pass an input to a SimpleRNN layer ###Code # Create a SimpleRNN layer and test it simplernn_layer = tf.keras.layers.SimpleRNN(units=16) # Note that only the final cell output is returned sequence = tf.constant([[[1.0, 1.0], [2., 2.], [56., -100.]]]) layer_output = simplernn_layer(sequence) layer_output ###Output _____no_output_____ ###Markdown Load and transform the IMDB review sentiment dataset ###Code # A function to load and preprocess the IMDB dataset def get_and_pad_imdb_dataset(num_words=10000, maxlen=None, index_from=2): from tensorflow.keras.datasets import imdb # Load the reviews (x_train, y_train), (x_test, y_test) = imdb.load_data(path='imdb.npz', num_words=num_words, skip_top=0, maxlen=maxlen, start_char=1, oov_char=2, index_from=index_from) x_train = tf.keras.preprocessing.sequence.pad_sequences(x_train, maxlen=None, padding='pre', truncating='pre', value=0) x_test = tf.keras.preprocessing.sequence.pad_sequences(x_test, maxlen=None, padding='pre', truncating='pre', value=0) return (x_train, y_train), (x_test, y_test) # Load the dataset (x_train, y_train), (x_test, y_test) = get_and_pad_imdb_dataset(maxlen=250) # A function to get the dataset word index def get_imdb_word_index(num_words=10000, index_from=2): imdb_word_index = tf.keras.datasets.imdb.get_word_index( path='imdb_word_index.json') imdb_word_index = {key: value + index_from for key, value in imdb_word_index.items() if value <= num_words-index_from} return imdb_word_index # Get the word index using get_imdb_word_index() imbd_word_index = get_imdb_word_index() ###Output _____no_output_____ ###Markdown Create a recurrent neural network model ###Code # Get the maximum index value max_index_value = max(imdb_word_index.values()) # Using Sequential, build the model: # 1. Embedding. # 2. LSTM. # 3. Dense. model = tf.keras.Sequential([ tf.keras.layers.Embedding(input_dim=max_index_value+1, output_dim=embedding_dim, mask_zero=True), tf.keras.layers.LSTM(units=16), tf.keras.layers.Dense(units=1, activation='sigmoid') ]) ###Output _____no_output_____ ###Markdown Compile and fit the model ###Code # Compile the model with binary cross-entropy loss model.compile(loss='binary_crossentropy', metrics=['accuracy'], optimizer='adam') # Fit the model and save its training history model.fit(x_train, y_train, epochs=3, batch_size=3) ###Output Epoch 1/3 5707/5707 [==============================] - 374s 66ms/step - loss: 0.4668 - accuracy: 0.7851 Epoch 2/3 5707/5707 [==============================] - 381s 67ms/step - loss: 0.3105 - accuracy: 0.8764 Epoch 3/3 5707/5707 [==============================] - 369s 65ms/step - loss: 0.2129 - accuracy: 0.9190 ###Markdown Plot learning curves ###Code # Plot the training and validation accuracy import matplotlib.pyplot as plt %matplotlib inline plt.style.use('ggplot') history_dict = history.history acc = history_dict['accuracy'] val_acc = history_dict['val_accuracy'] loss = history_dict['loss'] val_loss = history_dict['val_loss'] epochs = range(1, len(acc) + 1) plt.figure(figsize=(14,5)) plt.plot(epochs, acc, marker='.', label='Training acc') plt.plot(epochs, val_acc, marker='.', label='Validation acc') plt.title('Training and validation accuracy') plt.xlabel('Epoch') plt.ylabel('Classification accuracy') plt.legend(loc='lower right') plt.ylim(0, 1); ###Output _____no_output_____ ###Markdown Make predictions with the model ###Code # View the first test data example sentence # (invert the word index) inv_imdb_word_index = {value:key for key, value in imdb_word_index.items()} [inv_imdb_word_index[index] for index in x_test[0] if index > 2] # Get the model prediction using model.predict() model.predict(x_test[None, 0, :]) # Get the corresponding label y_test[0] ###Output _____no_output_____ ###Markdown --- Stacked RNNs and the Bidirectional wrapper Load and transform the IMDB review sentiment dataset ###Code # A function to load and preprocess the IMDB dataset def get_and_pad_imdb_dataset(num_words=10000, maxlen=None, index_from=2): from tensorflow.keras.datasets import imdb # Load the reviews (x_train, y_train), (x_test, y_test) = imdb.load_data(path='imdb.npz', num_words=num_words, skip_top=0, maxlen=maxlen, start_char=1, oov_char=2, index_from=index_from) x_train = tf.keras.preprocessing.sequence.pad_sequences(x_train, maxlen=None, padding='pre', truncating='pre', value=0) x_test = tf.keras.preprocessing.sequence.pad_sequences(x_test, maxlen=None, padding='pre', truncating='pre', value=0) return (x_train, y_train), (x_test, y_test) # Load the dataset (x_train, y_train), (x_test, y_test) = get_and_pad_imdb_dataset(maxlen=250) # A function to get the dataset word index def get_imdb_word_index(num_words=10000, index_from=2): imdb_word_index = tf.keras.datasets.imdb.get_word_index( path='imdb_word_index.json') imdb_word_index = {key: value + index_from for key, value in imdb_word_index.items() if value <= num_words-index_from} return imdb_word_index # Get the word index using get_imdb_word_index() imdb_word_index = get_imdb_word_index(num_words=5000) ###Output _____no_output_____ ###Markdown Build stacked and bidirectional recurrent models ###Code # Get the maximum index value and specify an embedding dimension max_index_value = max(imbd_word_index.values()) embedding_dim = 16 # Using Sequential, build a stacked LSTM model via return_sequences=True model = tf.keras.Sequential([ tf.keras.layers.Embedding(input_dim=max_index_value+1, output_dim=embedding_dim, mask_zero=True), tf.keras.layers.LSTM(units=32, return_sequences=True), tf.keras.layers.LSTM(units=32, return_sequences=False), tf.keras.layers.Dense(units=1, activation='sigmoid'), ]) # Using Sequential, build a bidirectional RNN with merge_mode='sum' model = tf.keras.Sequential([ tf.keras.layers.Embedding(input_dim=max_index_value+1, output_dim=embedding_dim, mask_zero=True), tf.keras.layers.Bidirectional( tf.keras.layers.LSTM(units=8), merge_mode='sum', backward_layer=tf.keras.layers.LSTM(units=8, go_backwards=True) ), tf.keras.layers.Dense(units=1, activation='sigmoid') ]) # Create a model featuring both stacked recurrent layers and a bidirectional layer model = tf.keras.Sequential([ tf.keras.layers.Embedding(input_dim=max_index_value+1, output_dim=embedding_dim, mask_zero=True), tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(units=8, return_sequences=True), merge_mode='concat'), tf.keras.layers.GRU(units=8, return_sequences=False), tf.keras.layers.Dense(units=1, activation='sigmoid') ]) ###Output _____no_output_____ ###Markdown Compile and fit the model ###Code # Compile the model model.compile(loss='binary_crossentropy', metrics=['accuracy'], optimizer='adam') # Train the model, saving its history model.fit(x_train, y_train, epochs=3, batch_size=32) # Plot the training and validation accuracy import matplotlib.pyplot as plt %matplotlib inline plt.style.use('ggplot') history_dict = history.history acc = history_dict['accuracy'] val_acc = history_dict['val_accuracy'] loss = history_dict['loss'] val_loss = history_dict['val_loss'] epochs = range(1, len(acc) + 1) plt.figure(figsize=(14,5)) plt.plot(epochs, acc, marker='.', label='Training acc') plt.plot(epochs, val_acc, marker='.', label='Validation acc') plt.title('Training and validation accuracy') plt.xlabel('Epoch') plt.ylabel('Classification accuracy') plt.legend(loc='lower right') plt.ylim(0, 1); ###Output _____no_output_____
notebooks/lab1/stage2/pdc_2_5.ipynb
###Markdown Imports ###Code from configuration.paths import * import numpy as np import pandas as pd import tensorflow as tf from tensorflow.keras import models, layers from src.datasets.pneumonia_detection_challenge import PneumoniaDetectionChallenge from src.utils.schemes import Scheme from src.utils.image import Image ###Output _____no_output_____ ###Markdown Dataset loading for training ###Code IMAGE_SIZE = (256, 256) pdc = PneumoniaDetectionChallenge(DATASET_PNEUMONIA_DETECTION_CHALLENGE_PATH, IMAGE_SIZE) Scheme.dataset_info(pdc) x_train, y_train, _ = pdc.load_train_data() x_train = x_train / 255.0 x_val, y_val, _ = pdc.load_val_data() x_val = x_val / 255.0 Scheme.labeled_images(x_train, y_train) ###Output _____no_output_____ ###Markdown Mdel definition ###Code model = models.Sequential() model.add(layers.Conv2D(filters=8, activation='relu', kernel_size=3, padding='same', input_shape=(IMAGE_SIZE[0], IMAGE_SIZE[1], 3))) model.add(layers.MaxPool2D(pool_size=2, strides=2, padding='valid')) model.add(layers.Conv2D(filters=16, activation="relu", kernel_size=3, padding="same")) model.add(layers.MaxPool2D(pool_size=2, strides=2, padding='valid')) model.add(layers.Conv2D(filters=32, activation="relu", kernel_size=3, padding="same")) model.add(layers.MaxPool2D(pool_size=2, strides=2, padding='valid')) model.add(layers.Conv2D(filters=128, activation="relu", kernel_size=3, padding="same")) model.add(layers.MaxPool2D(pool_size=2, strides=2, padding='valid')) model.add(layers.Conv2D(filters=128, activation="relu", kernel_size=3, padding="same")) model.add(layers.MaxPool2D(pool_size=2, strides=2, padding='valid')) model.summary() model.add(layers.Flatten()) model.add(layers.Dense(256, activation='relu')) model.add(layers.Dropout(0.5)) model.add(layers.Dropout(0.5)) model.add(layers.Dense(1, activation='sigmoid')) model.summary() model.compile(optimizer='adam', loss="binary_crossentropy", metrics=['binary_accuracy', tf.keras.metrics.Precision(name='precision'), tf.keras.metrics.Recall(name='recall')]) ###Output _____no_output_____ ###Markdown Model training ###Code history = model.fit(x=x_train, y=y_train, validation_data=(x_val, y_val), epochs=35) Scheme.training_graphs(history) del x_train del y_train ###Output _____no_output_____ ###Markdown Evaluating the model ###Code x_test, y_test, images = pdc.load_test_data() x_test = x_test / 255.0 test_loss, test_accuracy, _, _ = model.evaluate(x_test, y_test) images = [x_test[6], x_test[15], x_test[18], x_test[20], x_test[25], x_test[9], x_test[10], x_test[11], x_test[12], x_test[13]] labels = [1, 1, 1, 1, 1, 0, 0, 0, 0, 0] images = Image.explainer(images, model, IMAGE_SIZE) Scheme.labeled_images(images, labels) predictions = model.predict(x_test) predictions_rounded = np.round(predictions).astype(int) Scheme.confusion_matrix(predictions_rounded, pdc.get_test_df().diagnosis.to_numpy()) ###Output _____no_output_____
reports/c12-deploy_2021-10-13_19-00-01.ipynb
###Markdown 0.0. Imports ###Code import re import sqlite3 import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt import plotly.express as px from sqlalchemy import create_engine from umap.umap_ import UMAP from scipy.cluster import hierarchy as hc from sklearn import cluster from sklearn import metrics from sklearn import preprocessing as pp from sklearn.decomposition import PCA from sklearn.manifold import TSNE from sklearn import ensemble as en from sklearn.mixture import GaussianMixture as gm ###Output _____no_output_____ ###Markdown 0.1. Helper Functions ###Code def descriptive_statistics(num_attr): # Central Tendency: mean, median c1 = pd.DataFrame(num_attr.apply(np.mean)) c2 = pd.DataFrame(num_attr.apply(np.median)) # Dispension: min, max, range, std, skew, kurtosis d1 = pd.DataFrame(num_attr.apply(min)) d2 = pd.DataFrame(num_attr.apply(max)) d3 = pd.DataFrame(num_attr.apply(lambda x: x.max() - x.min())) d4 = pd.DataFrame(num_attr.apply(lambda x: x.std())) # Measures of Shape s1 = pd.DataFrame(num_attr.apply(lambda x: x.skew())) s2 = pd.DataFrame(num_attr.apply(lambda x: x.kurtosis())) # concat m = pd.concat([d1,d2,d3,c1,c2,d4,s1,s2], axis=1).reset_index() m.columns = ['attributes', 'min', 'max', 'range', 'mean', 'median', 'std', 'skew', 'kurtosis'] return m ###Output _____no_output_____ ###Markdown 0.2. Load Data ###Code path = '/home/cid/repos/clustering-high-value-customers-identification/' df_raw = pd.read_csv(path + '/data/raw/Ecommerce.csv', encoding='latin1') # drop extra column df_raw = df_raw.drop('Unnamed: 8', axis=1) ###Output _____no_output_____ ###Markdown 1.0. Data Description ###Code df1 = df_raw.copy() ###Output _____no_output_____ ###Markdown 1.1. Rename Columns ###Code cols_new = ['invoice_no', 'stock_code', 'description', 'quantity', 'invoice_date', 'unit_price', 'customer_id', 'country'] df1.columns = cols_new ###Output _____no_output_____ ###Markdown 1.2. Data Dimnesions ###Code print('Number of Rows: {}'.format(df1.shape[0])) print('Number of Columns: {}'.format(df1.shape[1])) ###Output Number of Rows: 541909 Number of Columns: 8 ###Markdown 1.3. Data Types ###Code df1.dtypes ###Output _____no_output_____ ###Markdown 1.4. Check NA ###Code df1.isna().sum() ###Output _____no_output_____ ###Markdown 1.5. Replace NA ###Code df_missing = df1.loc[df1['customer_id'].isna(), :] df_not_missing = df1.loc[~df1['customer_id'].isna(), :] df_backup = pd.DataFrame(df_missing['invoice_no'].drop_duplicates()) df_backup['customer_id'] = np.arange(19000, 19000+len(df_backup), 1) # merge df1 = pd.merge(df1, df_backup, how='left', on='invoice_no' ) # coalesce df1['customer_id'] = df1['customer_id_x'].combine_first(df1['customer_id_y']) df1 = df1.drop(['customer_id_x', 'customer_id_y'], axis=1) df1.isna().sum() ###Output _____no_output_____ ###Markdown 1.6. Change dtypes ###Code df1.dtypes df1['invoice_date'] = pd.to_datetime(df1['invoice_date']) df1['customer_id'] = df1['customer_id'].astype(int) ###Output _____no_output_____ ###Markdown 1.7. Descriptive Statistics ###Code num_att = df1.select_dtypes(include=['int64', 'float64']) cat_att = df1.select_dtypes(include=['object']) ###Output _____no_output_____ ###Markdown 1.7.1. Numerical Attributes ###Code descriptive_statistics(num_att) ###Output _____no_output_____ ###Markdown 1.7.2. Categorical Attributes ###Code cat_att.describe(include=['O']) ###Output _____no_output_____ ###Markdown 2.0. Data Filtering ###Code df2 = df1.copy() ###Output _____no_output_____ ###Markdown 2.1. Filter Columns ###Code cols_drop = ['description'] df2 = df2.drop(cols_drop, axis=1) ###Output _____no_output_____ ###Markdown 2.2. Filter Rows ###Code # Numerical Attributes df2 = df2.loc[df2['unit_price'] >= 0.4, :] # Categorical Attributes df2 = df2.loc[~df2['stock_code'].isin(['POST', 'D', 'DOT', 'M', 'S', 'AMAZONFEE', 'm', 'DCGSSBOY', 'DCGSSGIRL', 'PADS', 'B', 'CRUK'] ), :] # map df2 = df2.loc[~df2['country'].isin(['European Community', 'Unspecified' ]), :] # bad user df2 = df2[~df2['customer_id'].isin( [16446] )] # quantity df2_returns = df2.loc[df2['quantity'] < 0, :] df2_purchases = df2.loc[df2['quantity'] >= 0, :] ###Output _____no_output_____ ###Markdown 3.0. Feature Engineering ###Code df3 = df2.copy() ###Output _____no_output_____ ###Markdown 3.1. Feature Creation ###Code drop_cols = ['invoice_no', 'stock_code', 'quantity', 'invoice_date', 'unit_price', 'country'] df_ref = df3.drop(drop_cols, axis=1).drop_duplicates(ignore_index=True) df2_purchases.loc[:, ['gross_revenue']] = (df2_purchases.loc[:, 'quantity'] * df2_purchases.loc[:, 'unit_price']) ###Output /home/cid/.pyenv/versions/3.8.0/envs/clustering-high-value-customers-identification/lib/python3.8/site-packages/pandas/core/indexing.py:1773: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy self._setitem_single_column(ilocs[0], value, pi) ###Markdown 3.1.1. Gross Revenue ###Code df_monetary = df2_purchases.loc[:, ['customer_id', 'gross_revenue']].groupby('customer_id').sum().reset_index() # .rename(columns={'gross_revenue': 'monetary'}) df_ref = pd.merge(df_ref, df_monetary, how='left', on='customer_id') df_ref.isna().sum() ###Output _____no_output_____ ###Markdown 3.1.2. Recency ###Code df_recency = df2_purchases.loc[:, ['customer_id', 'invoice_date']].groupby('customer_id').max().reset_index() df_recency['recency_days'] = (df_recency['invoice_date'].max() - df_recency['invoice_date']).dt.days df_ref = pd.merge(df_ref, df_recency[['customer_id', 'recency_days']], how='left', on='customer_id') df_ref.isna().sum() ###Output _____no_output_____ ###Markdown 3.1.3. Quatily of purchased ###Code df_freq = df2_purchases[['customer_id', 'invoice_no']].drop_duplicates().groupby('customer_id').count().reset_index().\ rename(columns={'invoice_no': 'qtde_invoices'}) df_ref = pd.merge(df_ref, df_freq, how='left', on='customer_id') df_ref.isna().sum() ###Output _____no_output_____ ###Markdown 3.1.4. Quantity total of items purchased ###Code df_freq = (df2_purchases.loc[:, ['customer_id', 'quantity']].groupby('customer_id') .sum() .reset_index() .rename(columns={'quantity': 'qtde_items'})) df_ref = pd.merge(df_ref, df_freq, how='left', on='customer_id') df_ref.isna().sum() ###Output _____no_output_____ ###Markdown 3.1.5. Quantity of products purchased ###Code df_freq = ( df2_purchases.loc[:, ['customer_id', 'stock_code']].groupby('customer_id') .count() .reset_index() .rename(columns={'stock_code': 'qtde_products'})) df_ref = pd.merge(df_ref, df_freq, how='left', on='customer_id') df_ref.isna().sum() ###Output _____no_output_____ ###Markdown 3.1.6. Average Ticket ###Code df_avg_ticket = (df2_purchases.loc[:, ['customer_id','gross_revenue']].groupby('customer_id') .mean() .reset_index() .rename(columns={'gross_revenue': 'avg_ticket'})) df_ref = pd.merge(df_ref, df_avg_ticket, how='left', on='customer_id') df_ref.isna().sum() ###Output _____no_output_____ ###Markdown 3.1.7. Average Recency Days ###Code # df_aux = df2[['customer_id', 'invoice_date']].drop_duplicates().sort_values(['customer_id', 'invoice_date'], ascending=[False, False]) # df_aux['next_customer_id'] = df_aux['customer_id'].shift() # df_aux['previus_date'] = df_aux['invoice_date'].shift() # df_aux['avg_recency_days'] = df_aux.apply( lambda x: (x['invoice_date'] - x['previus_date']).days if x['customer_id'] == x['next_customer_id'] else np.nan, axis=1) # df_aux['avg_recency_days'] = df_aux['avg_recency_days'] * -1 # df_aux = df_aux.drop(columns=['invoice_date', 'next_customer_id', 'previus_date'], axis=1).dropna() # df_avg_recency_days = df_aux.groupby( 'customer_id' ).mean().reset_index() # df_ref = pd.merge(df_ref, df_avg_recency_days, on='customer_id', how='left') # df_ref.isna().sum() ###Output _____no_output_____ ###Markdown 3.1.8. Frequency Purchase ###Code df_aux = (df2_purchases[['customer_id', 'invoice_no', 'invoice_date']].drop_duplicates() .groupby('customer_id') .agg( max_ = ('invoice_date', 'max'), min_ = ('invoice_date', 'min'), days = ('invoice_date', lambda x: ((x.max() - x.min()).days) + 1 ), buy_ = ( 'invoice_no', 'count' ))).reset_index() df_aux['frequency'] = df_aux[['buy_', 'days']].apply( lambda x: x['buy_'] / x['days'] if x['days'] != 0 else 0, axis=1 ) df_ref = pd.merge(df_ref, df_aux[['customer_id', 'frequency']], on='customer_id', how='left') df_ref.isna().sum() ###Output _____no_output_____ ###Markdown 3.1.9. Number Or Returns ###Code df_returns = df2_returns[['quantity', 'customer_id']].groupby('customer_id').sum().reset_index().rename(columns={'quantity': 'qtde_returns'}) df_returns['qtde_returns'] = df_returns['qtde_returns'] * -1 df_ref = pd.merge(df_ref, df_returns, on='customer_id', how='left') df_ref['qtde_returns'].fillna(0, inplace=True) df_ref.isna().sum() ###Output _____no_output_____ ###Markdown 3.1.10. Basket Size ###Code df_aux = (df2_purchases[['customer_id', 'invoice_no', 'quantity']].groupby('customer_id') .agg( n_purchase=('invoice_no', 'nunique'), n_products=('quantity', 'sum'))).reset_index() df_aux['avg_basket_size'] = df_aux['n_products'] / df_aux['n_purchase'] df_ref = pd.merge(df_ref, df_aux[['avg_basket_size', 'customer_id']], on='customer_id', how='left') df_ref.isna().sum() ###Output _____no_output_____ ###Markdown 3.1.11. Unique Basket Size ###Code df_aux = (df2_purchases.loc[:, ['customer_id', 'invoice_no', 'stock_code']].groupby( 'customer_id' ) .agg(n_purchase=('invoice_no', 'nunique'), n_products=('stock_code', 'nunique'))).reset_index() df_aux['avg_unique_basket_size'] = df_aux['n_products'] / df_aux['n_purchase'] df_ref = pd.merge(df_ref, df_aux[['avg_unique_basket_size', 'customer_id']], on='customer_id', how='left') df_ref.isna().sum() ###Output _____no_output_____ ###Markdown 4.0. EDA ###Code df_ref = df_ref.dropna() df4 = df_ref.copy() ###Output _____no_output_____ ###Markdown 4.3. Space Study ###Code # select dataser cols_selected = ['customer_id', 'gross_revenue', 'recency_days', 'qtde_products', 'frequency', 'qtde_returns'] df43 = df4[cols_selected].copy() mms = pp.MinMaxScaler() df43['gross_revenue'] = mms.fit_transform( df43[['gross_revenue']].values ) df43['recency_days'] = mms.fit_transform( df43[['recency_days']].values ) df43['qtde_products'] = mms.fit_transform( df43[['qtde_products']].values ) df43['frequency'] = mms.fit_transform( df43[['frequency']].values ) df43['qtde_returns'] = mms.fit_transform( df43[['qtde_returns']].values ) ###Output _____no_output_____ ###Markdown 4.3.4. Tree-Based Embbedding ###Code X = df43.drop(columns=['customer_id', 'gross_revenue']) y = df43['gross_revenue'] # model training rf_model = en.RandomForestRegressor( n_estimators=100, random_state=42 ) # model training definition rf_model.fit( X, y) # leaf df_leaf = pd.DataFrame( rf_model.apply( X ) ) # create dataframe tree df_tree = pd.DataFrame() # reduzer dimensionality reducer = UMAP(random_state=42) embedding = reducer.fit_transform( df_leaf ) # embedding df_tree['embedding_x'] = embedding[:, 0] df_tree['embedding_y'] = embedding[:, 1] ###Output /home/cid/.pyenv/versions/3.8.0/envs/clustering-high-value-customers-identification/lib/python3.8/site-packages/sklearn/manifold/_spectral_embedding.py:245: UserWarning: Graph is not fully connected, spectral embedding may not work as expected. warnings.warn("Graph is not fully connected, spectral embedding" ###Markdown 5.0. Data Preparation ###Code df5 = df_tree.copy() # df5.to_csv( '../src/data/tree_based_embedding.csv', index=False ) ###Output _____no_output_____ ###Markdown 7.0. Hyperpameter Fine Tuning ###Code X = df5.copy() X.head() ###Output _____no_output_____ ###Markdown 8.0. Model Training ###Code # model definition k = 8 gmm_model = gm(n_components=k, n_init=100, random_state=42) # model training gmm_model.fit(X) # model predict labels = gmm_model.predict(X) ###Output _____no_output_____ ###Markdown 8.2. Cluster Validation ###Code print("SS value: {}".format(metrics.silhouette_score( X, labels, metric='euclidean' ))) ###Output SS value: 0.37607449293136597 ###Markdown 9.0. Cluster Analysis ###Code df92 = df4[cols_selected].copy() df92['cluster'] = labels # change dtypes df92['recency_days'] = df92['recency_days'].astype(int) df92['qtde_products'] = df92['qtde_products'].astype(int) df92['qtde_returns'] = df92['qtde_returns'].astype(int) ###Output _____no_output_____ ###Markdown 9.2. Cluster Profile ###Code # cluster - qt_users - per_user df_cluster = df92[['customer_id', 'cluster']].groupby('cluster').count().reset_index().rename(columns={'customer_id': 'qt_users'}) df_cluster['per_user'] = 100 * (df_cluster['qt_users'] / df_cluster['qt_users'].sum()) # gross_revenue monetary = df92[['gross_revenue', 'cluster']].groupby('cluster').mean().reset_index() df_cluster = pd.merge(df_cluster, monetary, how='left', on='cluster') # recency_days recency_days = df92[['recency_days', 'cluster']].groupby('cluster').mean().reset_index() df_cluster = pd.merge(df_cluster, recency_days, how='left', on='cluster') # qtde_products qtde_products = df92[['qtde_products', 'cluster']].groupby('cluster').mean().reset_index() df_cluster = pd.merge(df_cluster, qtde_products, how='left', on='cluster') # frequency frequency = df92[['frequency', 'cluster']].groupby('cluster').mean().reset_index() df_cluster = pd.merge(df_cluster, frequency, how='left', on='cluster') # qtde_returns qtde_returns = df92[['qtde_returns', 'cluster']].groupby('cluster').mean().reset_index() df_cluster = pd.merge(df_cluster, qtde_returns, how='left', on='cluster') df_cluster.sort_values('gross_revenue', ascending=False).style.highlight_max( color='lightgreen', axis=0 ) # 1 Cluster Insiders # 5 Cluster More Products # 4 Cluster Spend Money # 2 Cluster Even More Products # 6 Cluster Less Days # 0 Cluster Less 1k # 7 Cluster Stop Returners # 3 Cluster More Buy ###Output _____no_output_____ ###Markdown **Cluster 01: ( Candidato à Insider )**- Número de customers: 468 (16% do customers )- Faturamento médio: 8836- Recência média: 21 dias- Média de Produtos comprados: 424 produtos- Frequência de Produtos comprados: 0.09 produtos/dia- Receita em média: $8836.13,00 dólares 10.0. EDA ###Code df10 = df92.copy() ###Output _____no_output_____ ###Markdown 11.0. Deploy to Product ###Code df11 = df10.copy() ###Output _____no_output_____ ###Markdown 11.1. Insert into SQLITE ###Code # create table query_create_table_insiders = """ CREATE TABLE insiders ( customer_id INTEGER, gross_revenue REAL, recency_days INTEGER, qtde_products INTEGER, frequency REAL, qtde_returns INTEGER, cluster INTEGER ) """ # conn = sqlite3.connect( 'insiders_db.sqlite' ) # conn.execute( query_create_table_insiders ) # conn.commit() # conn.close() # Drop Table query_drop_table = """ DROP TABLE insiders """ # insert data conn = create_engine( 'sqlite:///insiders_db.sqlite' ) df92.to_sql( 'insiders', con=conn, if_exists='append', index=False ) # consulting database query = """ SELECT * FROM insiders """ conn = create_engine( 'sqlite:///insiders_db.sqlite' ) df = pd.read_sql_query( query, conn ) df.head() ###Output _____no_output_____
Examples/ReasoningEngineBasicsDotNetInteractive.ipynb
###Markdown Reasoning Engine BasicsThis notebook provides a basic introduction to the reasoning engine framework and the Reasoning Engine Intermediate Language (REIL). * [Introduction](Introduction)* [Model Definition and Model Checking](ModelChecking): Basic model definition using REIL and SMT-based verification/synthesis * ['Hybrid' Models](HybridModels): Defining models that combine Boolean and numerical variables* [Synchronous vs Asynchronous Updates](SyncVsAsync): Examples of defining synchronous and asynchronous dynamical systems* [Model Enumeration](Enumeration): When synthesizing a single satisfying model is not enough* [System and Path Variables](VariableScope): Examples of different variable scopes* [Higher-level DSLs](DSLs): Examples of using REIL to define richer, domain-specific languages* [Notes](Notes): Some additional notes ###Code #load @"../REInteractiveAPI/ReLoad.fsx" open ReasoningEngine ###Output _____no_output_____ ###Markdown IntroductionA Reasoning Engine ``model`` is described by a number of discrete state variables, which could be of ``int`` (int), ``nat`` (non-negative integer) or ``bool`` (Boolean) type. Each variable can be either a ``system``, ``path`` or ``state`` variable. Path variables are replicated for each trajectory of the system that is considered as part of the analysis. State variables are replicated for each experiment and at every discrete time step and system variable are not replicated. Constraints are defined over the different variables of the system. Model Definition and Model CheckingFor our first model, we consider a simple system with only a single state variable ``x``, which is of type ``int``. We specify the model using the REIL language, load it and check if solutions exist using the SMT solver Z3. ###Code """ state int x; """ |> ReilAPI.Load |> ReilAPI.CheckAndPrint ###Output _____no_output_____ ###Markdown Using the SMT solver we find that solutions exist, meaning that a valid assignment of all system variables can be found. However, this is not very interesting because our system does not specify any constraints or even system dynamics yet. Let's fix that next by specifying that our system is a simple counter, where the value of ``x`` increases at every step. We will also specify some constraints about the executions of our counter. We consider an execution (also reffered to as a trajectory or path) of the system called ``test`` and specify that in this execution the value of the counter was initially ``0`` and is above ``0`` at step ``10``. ###Code """ state int x; update p[k].x := p[k-1].x + 1; #test[0].x = 0; #test[10].x > 0; """ |> ReilAPI.Load |> ReilAPI.Check |> TrajVis.PlotSolutionTrajectories ###Output _____no_output_____ ###Markdown Next, we relax the constraints on our counter a bit. Instead of specifying the initial state, we simply require that the value of ``x`` is above ``20`` at step ``10``. We synthesize a solution, where a suitable initial state has been selected by the solver to ensure that the constraint is satisfied. ###Code """ state int x; update p[k].x := p[k-1].x + 1; #test[10].x > 20; """ |> ReilAPI.Load |> ReilAPI.Check |> TrajVis.PlotSolutionTrajectories ###Output _____no_output_____ ###Markdown Multiple executions of the same system but with different constraints can be specified and considered as part of the synthesis problem. Here, we require that for the same counter system one trajectory ``test1`` is above ``20`` and another is below ``5`` at step ``10``. Once again, suitable and different initial states are synthesized for the two trajectories to satisfy the constraints. ###Code """ state int x; update p[k].x := p[k-1].x + 1; #test1[10].x > 20; #test2[10].x < 5; """ |> ReilAPI.Load |> ReilAPI.Check |> TrajVis.PlotSolutionTrajectories ###Output _____no_output_____ ###Markdown 'Hybrid' ModelsAlthough the Reasoning Engine currently supports only discrete models, variables can be of types ``int`` (or ``nat``) and ``bool``. This allows for the construction of 'hybrid' models that combine logical and numerical dynamics. In the following, we develop a basic temperature control system model. We assume that the temperature increases by one degree per time step whenever the heating is on but decreases when the heating is off. To capture this, we use two ``state`` variables: an ``int`` variable ``temperature`` (since no continuous quantities are currently supported) and a ``bool`` ``heasIsOn``. We define the system dynamics such that the heating is switch on whenever the temperature drops below ``18`` degrees. We are interested in checking whether it is possible to reach temperature below ``18`` despite our temperature controller, which turns out to be possible as illustrated by the trajectories synthesized by the Reasoning Engine. ###Code """ unique state int temperature; unique state bool heatIsOn; update p[k].temperature := if (p[k-1].heatIsOn) then (p[k-1].temperature + 1) else (p[k-1].temperature - 1), p[k].heatIsOn := p[k-1].temperature < 18; #test[0].temperature = 20; #test[10].temperature < 18; """ |> ReilAPI.Load |> ReilAPI.Check |> TrajVis.PlotSolutionTrajectories ###Output _____no_output_____ ###Markdown Synchronous vs Asynchronous UpdatesThe temperature controller example above introduced update rules for two separate variables. As defined for the temperature controller this specifies that both variables are updated synchronously, which generally results in deterministic updates (unless there are other sources of non-determinism). The same update rules are used to define the example below, where state variables ``x`` and ``y`` are updated synchronously at each time step. No solutions are found in this case because the value reached at step ``10`` (for both ``x`` and ``y``) cannot be ``5``. ###Code """ unique state int x; unique state int y; update p[k].x := p[k-1].x + 1, p[k].y := p[k-1].y + 1; #test[0].x = 0; #test[0].y = 0; #test[10].x = 5; """ |> ReilAPI.Load |> ReilAPI.Enumerate 10 |> TrajVis.PlotSolutionTrajectories ###Output _____no_output_____ ###Markdown The Reasoning Engine also supports asynchronous update rule definitions. The example below is similar to the one above but now either of the two update rules can be triggered asynchronously at each time step. Here, we find solutions for the same constraints because different interleavings of the two update rules can result in trajectories, where ``x`` reaches the value of ``5``. ###Code """ unique state int x; unique state int y; update p[k].x := p[k-1].x + 1; update p[k].y := p[k-1].y + 1; #test[0].x = 0; #test[0].y = 0; #test[10].x = 5; """ |> ReilAPI.Load |> ReilAPI.Enumerate 10 |> TrajVis.PlotSolutionTrajectories ###Output _____no_output_____ ###Markdown Model EnumerationIn the examples so far, we were only interested in synthesizing a single model of the dynamical system satisfying all constraints. If we assume that the constraints represent some behavior of the system we have observer, in general there might be many possible models capable of reproducing this behavior. Selecting only a single solution is a common problem in the modeling of physical system, which can bias the results and conclusions by introducing hidden assumptions (e.g. why is the first model better). To address this problem, the Reasoning Engine provides functionality for enumerating multiple models consistent with the constraints. To illustrate this, we again consider the counter system. We modify the system definition by specifying that the variable ``x`` is ``unique``. Defining variables as unique provides a mechanisms for specifying when is one solution different from another (e.g. at least one unique variable must be different). Similarly, non-unique variables are not considered as part of the enumeration, thus reducing the number of possible solutions. Enumerating 5 different solutions for our counter system reveals different ways of achieving the specification and reaching a value above ``20`` at step ``10``. ###Code """ unique state int x; update p[k].x := p[k-1].x + 1; #test[10].x > 20; """ |> ReilAPI.Load |> ReilAPI.Enumerate 5 |> TrajVis.PlotSolutionTrajectories ###Output _____no_output_____ ###Markdown System and path variablesIn the examples so far we only considered ``state`` variables (i.e. variables that change along the executions of the system). Besides ``state`` variables, the Reasoning Engine also handles variables with ``system`` or ``path`` scope. In the example below, we extend the counter system, so that it can count by an arbitrary ``increment``. In this model, the increment is a system properties, so all trajectories of the system would increase by the same amount at each step. We also tighten the constraints again, by specifying that our counter starts at ``0`` and enumerate solutions. While the state variable ``x`` is unique, trajectories of this model are in fact deterministic (and here start from ``0``) once the ``increment`` is selected. Therefore, the different solutions represent different possible choices of the unique ``increment`` variable. While we are enumerating up to ``100`` different solutions, only three models are synthesized, indicating that no other models are consistent with the constraints of achieving a value between ``10`` and ``50`` at step ``10``. ###Code """ unique state int x; unique system nat increment; update p[k].x := p[k-1].x + increment; #test[0].x = 0; #test[10].x > 10; #test[10].x < 50; """ |> ReilAPI.Load |> ReilAPI.Enumerate 100 |> TrajVis.PlotSolutionTrajectories ###Output _____no_output_____ ###Markdown To illustrate the difference between ``system`` and ``path`` variables, we consider the problem of synthesizing a counter that can reach the value of either ``10`` or ``20`` in two different executions (``test1`` and ``test1``), both of which start at ``0``. ###Code """ unique state int x; unique system nat increment; update p[k].x := p[k-1].x + increment; #test1[0].x = 0; #test2[0].x = 0; #test1[10].x = 10; #test2[10].x = 20; """ |> ReilAPI.Load |> ReilAPI.Enumerate 100 |> TrajVis.PlotSolutionTrajectories ###Output _____no_output_____ ###Markdown We verify that this problem is unsatisfiable and no solutions exist. This is expected, since the same ``system`` increment cannot produce two different trajectories. In the example below we modify the model so that ``increment`` is now a ``path`` variable. This allows for a distinct value of ``increment`` to be synthesized for each of the two executions and the problem becomes satisfiable. Notice that only a single solution is found despite the enumeration. Indeed ``test1.increment = 1`` and ``test2.increment = 2`` is the only possible variable assignment that is consistent with the specification. ###Code """ unique state int x; unique path nat increment; update p[k].x := p[k-1].x + p.increment; #test1[0].x = 0; #test2[0].x = 0; #test1[10].x = 10; #test2[10].x = 20; """ |> ReilAPI.Load |> ReilAPI.Enumerate 100 |> TrajVis.PlotSolutionTrajectories """ unique state int x; unique state int y; update p[k].x := p[k-1].x + p[k].y; #test[0].x = 0; #test[0].y = 1; #test[10].x > 0; """ |> ReilAPI.Load |> ReilAPI.Enumerate 100 |> TrajVis.PlotSolutionTrajectories ###Output _____no_output_____ ###Markdown Higher-level DSLsThe purpose of REIL is to provide a basic intermediate language that could be a compilation target for a variety of DSLs. With this approach, the low-level details of encoding problems into SMT, calling the solver (in this case Z3) and processing the solutions are handled by the Reasoning Engine. Furthermore, this approach allows for additional tactics, problem transformations, simplifications and reasoning strategies to be implemented as part of the Reasoning Engine and reused when reasoning about models defined using higher-level DSLs. The approach has been used to define several biological DSLs compiling to REIL in other projects.In the following we illustrate this idea by creating a simple 'embedded DSL' for Boolean networks. These networks are simple models of genetic interaction. At each state, each gene is either active (true) or inactive (false). Interactions between the genes define the next state of the system. For simplicity, here we consider only synchronous networks with AND-type regulation. We also implement only very limited constraints (e.g. to capture experimental biological observations). Instead of producing REIL text, the example below constructs directly the F Reasoning Engine model as an illustration of possible programmatic modeling. ###Code open Microsoft.Research.ReasoningEngine.Model open Microsoft.Research.ReasoningEngine.Var open Microsoft.Research.ReasoningEngine.Dynamics open Microsoft.Research.ReasoningEngine.Constraint type Gene = Gene of string type Interaction = | Activates of (Gene * Gene) //Source * Target | Represses of (Gene * Gene) type Observation = | Active of (string * int * Gene) //experiment * gene * step | Inactive of (string * int * Gene) type Network = { genes : Gene[] interactions : Interaction[] observations : Observation[] } static member Encode (network:Network) = //define a state variable for each gene let sys = Array.fold (fun (acc:DSystem) (Gene g) -> acc.DeclareStateVar(g,Type.Bool)) DSystem.EmptySystem network.genes //add update rules based on the unteractions let updateRules = network.interactions |> Array.map (fun interaction -> match interaction with | Activates (source, target) -> (true, source, target) | Represses (source, target) -> (false, source, target) ) |> Array.groupBy (fun (_,_,target) -> target) |> Array.map(fun (Gene target, regulators) -> regulators |> Seq.map(fun (flag, Gene source, _) -> let v = AbsStateVar(-1, source) //for all paths, for all steps k, consider k-1 |> BVar |> BTerm if flag then v else Not v ) |> LAnd |> BExpr |> fun expr -> AssignmentRule.Create(target, expr) |> Assignment ) let sys = {sys with updates = [Update.Create(None, updateRules)]} let model = Model.NewModel sys //add constraints network.observations |> Array.map(fun obs -> match obs with | Active (exp, t, g) -> (true, exp, t, g) | Inactive (exp, t, g) -> (false, exp, t, g) ) |> Array.map(fun (flag, exp, t, Gene g) -> let v = StateVar(exp, t, g) //for all paths, for all steps k, consider k-1 |> BVar |> BTerm if flag then v else Not v ) |> Array.fold (fun acc cst -> {acc with constraints = acc.constraints.AddObservation(cst, "Constraint")} ) model ###Output _____no_output_____ ###Markdown Next, we use our 'embedded DSL' to define a simple oscillating gene network based on the [Repressilator design](https://en.wikipedia.org/wiki/Repressilator). ###Code let A, B, C = Gene "A", Gene "B", Gene "C" { genes = [|A; B; C|] interactions = [| Represses (A, B) Represses (B, C) Represses (C, A) |] observations = [| Active("Experiment1", 0, A) Inactive("Experiment1", 0, B) Inactive("Experiment1", 0, C) Inactive("Experiment1", 10, A) |] } |> Network.Encode |> ReilAPI.Enumerate 100 |> TrajVis.PlotSolutionTrajectories ###Output _____no_output_____
notebooks/Monopartite Weight Distribution.ipynb
###Markdown Monopartite projection edge weight distribution ###Code # Some path wizardry to make python acknowledge relative paths, just ignore this part... import sys; sys.path.append('..') import csv import networkx as nx import matplotlib.pyplot as plt from pelote import ( table_to_bipartite_graph, graph_to_edges_dataframe, monopartite_projection ) with open('../data/bipartite2.csv') as f: bi = table_to_bipartite_graph(csv.DictReader(f), 'account', 'post') for metric in [None, 'jaccard', 'dice', 'overlap', 'binary_cosine']: mono = monopartite_projection(bi, 'account', metric=metric) ax = graph_to_edges_dataframe(mono).weight.plot.hist(bins=25) ax.set_title(metric or 'raw') plt.show() ###Output _____no_output_____
02_lab/Solution_lab2_classification_seminar.ipynb
###Markdown Logistic regressionIn this seminar you will implement a logistic regression and train it using stochastic gradient descent modiffications, numpy and your brain. ###Code !wget https://github.com/yandexdataschool/MLatImperial2020/raw/master/02_lab/dataset_scaled.pkl . !wget https://github.com/yandexdataschool/MLatImperial2020/raw/master/02_lab/dataset_not_scaled.pkl . import numpy as np import matplotlib.pyplot as plt ###Output _____no_output_____ ###Markdown Two-dimensional classification problemTo make things more intuitive, let's solve a 2D classification problem with syntetic data. ###Code import pickle with open("dataset_scaled.pkl", "rb") as f: X, y = pickle.load(f) plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired) plt.show() print("X:\n{}\ny:\n{}".format(X[:3],y[:3])) ###Output X: [[-0.9698787 -1.31431435] [ 0.84830537 -0.43881443] [-1.52651763 -2.13810144]] y: [0 1 0] ###Markdown **Your task starts here**Since the problem above isn't linearly separable, we add quadratic features to the classifier.Implement this transformation in the __expand__ function. ###Code def expand(X): """ Adds quadratic features. This function allows your linear model to make non-linear separation. For each sample (row in matrix), compute an expanded row: [feature0, feature1, feature0^2, feature1^2, feature1*feature2, 1] :param X: matrix of features, shape [n_samples,2] :returns: expanded features of shape [n_samples,6] """ X_0 = X[:,0] X_1 = X[:,1] X_expanded = np.stack([X_0, X_1, X_0**2, X_1**2, X_0 * X_1, np.ones_like(X_0)], axis=1) return X_expanded #simple test on random numbers #[all 8 random numbers are 100% random :P] dummy_X = np.array([ [0,0], [1,0], [2.61,-1.28], [-0.59,2.1] ]) #call your expand function dummy_expanded = expand(dummy_X) #what it should have returned: x0 x1 x0^2 x1^2 x0*x1 1 dummy_expanded_ans = np.array([[ 0. , 0. , 0. , 0. , 0. , 1. ], [ 1. , 0. , 1. , 0. , 0. , 1. ], [ 2.61 , -1.28 , 6.8121, 1.6384, -3.3408, 1. ], [-0.59 , 2.1 , 0.3481, 4.41 , -1.239 , 1. ]]) #tests assert isinstance(dummy_expanded,np.ndarray), "please make sure you return numpy array" assert dummy_expanded.shape==dummy_expanded_ans.shape, "please make sure your shape is correct" assert np.allclose(dummy_expanded,dummy_expanded_ans,1e-3), "Something's out of order with features" print("Seems legit!") ###Output Seems legit! ###Markdown Logistic regressionNow, let's write function that predicts class given X as in logistic regression.The math should look like this:$$ P(y| \vec x, \vec w) = \sigma(\vec x \cdot \vec w )$$where x represents features, w are weights and $$\sigma(a) = {1 \over {1+e^{-a}}}$$We shall omit $ \vec {arrows} $ in further formulae for simplicity. ###Code def sigmoid(x): return 1 / (1 + np.exp(-x)) logits = np.linspace(-10, 10, 101) plt.plot(logits, sigmoid(logits)); def classify(X, w): """ Given input features and weights return predicted probabilities of y==1 given x, P(y=1|x), see description above __don't forget to expand X inside classify and other functions__ :param X: feature matrix X of shape [n_samples,2] (non-exanded) :param w: weight vector w of shape [6] for each of the expanded features :returns: an array of predicted probabilities in [0,1] interval. """ a = np.dot(expand(X),w) return sigmoid(a) #sample usage / test just as the previous one dummy_weights = np.linspace(-1,1,6) dummy_probs = classify(dummy_X,dummy_weights) dummy_answers = np.array([ 0.73105858, 0.450166 , 0.02020883, 0.59844257]) assert isinstance(dummy_probs,np.ndarray), "please return np.array" assert dummy_probs.shape == dummy_answers.shape, "please return an 1-d vector with answers for each object" assert np.allclose(dummy_probs,dummy_answers,1e-3), "There's something non-canonic about how probabilties are computed" ###Output _____no_output_____ ###Markdown The loss you should try to minimize is the Logistic Loss aka crossentropy aka negative log-likelihood:$$ L = - {1 \over N} \sum_i {y \cdot log P(y|x,w) + (1-y) \cdot log (1-P(y|x,w))}$$ ###Code def compute_loss(X, y, w): """ Given feature matrix X [n_samples,2], target vector [n_samples] of +1/0, and weight vector w [6], compute scalar loss function using formula above. """ return -np.mean(y*np.log(classify(X,w))+(1-y)*np.log(1-classify(X,w))) dummy_y = np.array([0,1,0,1]) dummy_loss = compute_loss(dummy_X,dummy_y,dummy_weights) assert np.allclose(dummy_loss,0.66131), "something wrong with loss" ###Output _____no_output_____ ###Markdown Since we train our model with gradient descent, we gotta compute gradients.To be specific, we need a derivative of loss function over each weight [6 of them].$$ \nabla L = {\partial L \over \partial w} = ...$$No, we won't be giving you the exact formula this time. Instead, try figuring out a derivative with pen and paper. As usual, we've made a small test for you, but if you need more, feel free to check your math against finite differences (estimate how L changes if you shift w by $10^-5$ or so). ###Code def compute_grad(X, y, w): """ Given feature matrix X [n_samples,2], target vector [n_samples] of +1/0, and weight vector w [6], compute vector [6] of derivatives of L over each weights. """ return ((classify(X, w) - y)[:,np.newaxis] * expand(X)).mean(axis=0) #tests dummy_grads = compute_grad(dummy_X,dummy_y,dummy_weights) #correct answers in canonic form dummy_grads_ans = np.array([-0.06504252, -0.21728448, -0.1379879 , -0.43443953, 0.107504 , -0.05003101]) assert isinstance(dummy_grads,np.ndarray) assert dummy_grads.shape == (6,), "must return a vector of gradients for each weight" assert len(set(np.round(dummy_grads/dummy_grads_ans,3))), "gradients are wrong" assert np.allclose(dummy_grads,dummy_grads_ans,1e-3), "gradients are off by a coefficient" ###Output _____no_output_____ ###Markdown Here's an auxiliary function that visualizes the predictions ###Code from IPython import display h = 0.01 x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) def visualize(X, y, w, history): plt.figure(figsize=(12,6)) """draws classifier prediction with matplotlib magic""" Z = classify(np.c_[xx.ravel(), yy.ravel()], w) Z = Z.reshape(xx.shape) plt.subplot(1,2,1) plt.contourf(xx, yy, Z, alpha=0.8) plt.colorbar() plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired) plt.xlim(xx.min(), xx.max()) plt.ylim(yy.min(), yy.max()) plt.subplot(1,2,2) plt.plot(history) plt.grid() plt.ylabel("Loss") plt.xlabel("Iteration") ymin, ymax = plt.ylim() plt.ylim(0, ymax) display.clear_output(wait=True) plt.show() visualize(X, y, dummy_weights, [1, 0.5, 0.25],) ###Output _____no_output_____ ###Markdown TrainingIn this section, we'll use the functions you wrote to train our classifier using stochastic gradient descent.Try to find an optimal learning rate for gradient descent for the given batch size. **Don't change the batch size!** ###Code w = np.array([0,0,0,0,0,1]) alpha = 0.1 n_iter = 50 batch_size = 4 loss = np.zeros(n_iter) plt.figure(figsize=(12,5)) for i in range(n_iter): ind = np.random.choice(X.shape[0], batch_size) loss[i] = compute_loss(X, y, w) visualize(X[ind,:], y[ind], w, loss) w = w - alpha * compute_grad(X[ind,:], y[ind], w) visualize(X, y, w, loss) plt.clf() ###Output _____no_output_____ ###Markdown Now, let's see what is happening, when we do not normalise features first. What do you think will happen? ###Code with open("dataset_not_scaled.pkl", "rb") as f: X, y = pickle.load(f) plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired) plt.show() # Set parameters to show plots nicely h = 0.01 x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, 50), np.arange(y_min, y_max, h)) w = np.array([0,0,0,0,0,1]) alpha = 1.e-12 n_iter = 50 batch_size = 4 loss = np.zeros(n_iter) plt.figure(figsize=(12,5)) for i in range(n_iter): ind = np.random.choice(X.shape[0], batch_size) loss[i] = compute_loss(X, y, w) visualize(X[ind,:], y[ind], w, loss) w = w - alpha * compute_grad(X[ind,:], y[ind], w) visualize(X, y, w, loss) plt.clf() loss ###Output _____no_output_____ ###Markdown `````````````````````````````````````````````````````````````````````````````` Bonus quest If you're done and there's still time left, try implementing __momentum SGD__ as described [here](https://distill.pub/2017/momentum/).Find alpha & beta that results in fastest convergence rate. ###Code w = np.array([0,0,0,0,0,1]) z = np.array([0,0,0,0,0,0]) alpha = ??? beta = ??? <YOUR CODE> ###Output _____no_output_____
GAN_Examples/StyleGAN_Encoder_Notebook.ipynb
###Markdown Notebook I: Encoding images into StyleGAN's latent space ![alt text](https://miro.medium.com/max/1280/0*eeFaGLx96mlbQcrK.gif) Start: To run this demo StyleGAN code, make a local copy of this notebook in your drive! Before you move on, make sure you have GPU acceleration enabled:> Click 'Runtime' in the menu tab at the top> Click 'Change runtime type'> Make sure the hardware accelerator is set to 'GPU' This is a hosted IPython notebook For those not familiar, all you really need to know is:* There are text cells (like this one) * And Python code cells (like the one below)* If you want the full tutorial, you can find it here: https://colab.research.google.com/notebooks/welcome.ipynb To run a code cell, simply click inside it and hit "shift+enter"(hitting shift + enter repeatedly will run through all the cells sequentially) Don't worry if you're new to this: > If things don't work, you can always click "Runtime-->Reset all runtimes" and restart the whole notebook if you mess up! ###Code # This is a cell with Python code # Execute the cell by clicking inside it and hitting shift+enter, or by clicking the 'run' button on the left of this cell a = 20 b = 30 c = a+b print("The sum of %d and %d is %d." %(a,b,c)) ###Output The sum of 20 and 30 is 50. ###Markdown --EDIT-- Colab tqdm version fix: 1. Upgrade tqdm: ###Code !pip install --upgrade tqdm ###Output Collecting tqdm [?25l Downloading https://files.pythonhosted.org/packages/4a/1c/6359be64e8301b84160f6f6f7936bbfaaa5e9a4eab6cbc681db07600b949/tqdm-4.45.0-py2.py3-none-any.whl (60kB)  |█████▍ | 10kB 17.0MB/s eta 0:00:01  |██████████▊ | 20kB 5.9MB/s eta 0:00:01  |████████████████▏ | 30kB 7.4MB/s eta 0:00:01  |█████████████████████▌ | 40kB 5.5MB/s eta 0:00:01  |███████████████████████████ | 51kB 6.1MB/s eta 0:00:01  |████████████████████████████████| 61kB 3.6MB/s [?25hInstalling collected packages: tqdm Found existing installation: tqdm 4.38.0 Uninstalling tqdm-4.38.0: Successfully uninstalled tqdm-4.38.0 Successfully installed tqdm-4.45.0 ###Markdown 2. Restart the Python kernel to load the updated version: ###Code import os os.kill(os.getpid(), 9) ###Output _____no_output_____ ###Markdown Now we can start for real: Let's first clone the Github repo we'll use: https://github.com/pbaylies/stylegan-encoder ###Code !rm -rf sample_data !git clone https://github.com/pbaylies/stylegan-encoder ###Output Cloning into 'stylegan-encoder'... remote: Enumerating objects: 483, done. remote: Total 483 (delta 0), reused 0 (delta 0), pack-reused 483 Receiving objects: 100% (483/483), 12.79 MiB | 15.02 MiB/s, done. Resolving deltas: 100% (266/266), done. ###Markdown cd into the repo folder: (only run this cell once or things might get buggy) ###Code cd stylegan-encoder ###Output /content/stylegan-encoder ###Markdown Let's see the files inside the repo we just cloned: ###Code ls ###Output adaptive.py Play_with_latent_directions.ipynb align_images.py pretrained_example.py* config.py* README.md* dataset_tool.py* requirements.txt dnnlib/ robust_loss/ encode_images.py run_metrics.py* encoder/ StyleGAN_Encoder_Tutorial.ipynb ffhq_dataset/ swa.py generate_figures.py* teaser.png Learn_direction_in_latent_space.ipynb train_effnet.py LICENSE.txt* training/ metrics/ train.py* mona_example.jpg train_resnet.py ###Markdown Some housekeeping: setting up folder structure for our images: ###Code rm -rf aligned_images raw_images mkdir aligned_images raw_images ###Output _____no_output_____ ###Markdown I. Get Images: Some tips for the images:* Use HD images (preferably > 1000x1000 pixels)* Make sure your face is not too small* Neutral expressions & front facing faces will give better results* Clear, uniform lighting conditions are also recommened Option 1: Upload Images manually (usually gives the best results) * Click the '>' icon in the panel on the top left * Go to the 'Files' tab* Unfold the stylegan-encoder folder (left-click)* Right click the 'stylegan-encoder/raw_images' folder and click "upload"* I'd recommend starting with 3 - 6 different images containing faces Option 2: Take images using your webcam ###Code from IPython.display import HTML, Audio from google.colab.output import eval_js from base64 import b64decode import numpy as np import io from PIL import Image from datetime import datetime VIDEO_HTML = """ <video autoplay width=%d height=%d style='cursor: pointer;'></video> <script> var video = document.querySelector('video') navigator.mediaDevices.getUserMedia({ video: true }) .then(stream=> video.srcObject = stream) var data = new Promise(resolve=>{ video.onclick = ()=>{ var canvas = document.createElement('canvas') var [w,h] = [video.offsetWidth, video.offsetHeight] canvas.width = w canvas.height = h canvas.getContext('2d') .drawImage(video, 0, 0, w, h) video.srcObject.getVideoTracks()[0].stop() video.replaceWith(canvas) resolve(canvas.toDataURL('image/jpeg', %f)) } }) </script> """ def take_photo(quality=1.0, size=(800,600)): display(HTML(VIDEO_HTML % (size[0],size[1],quality))) data = eval_js("data") binary = b64decode(data.split(',')[1]) f = io.BytesIO(binary) img = np.asarray(Image.open(f)) timestampStr = datetime.now().strftime("%d-%b-%Y (%H:%M:%S.%f)") filename = 'raw_images/photo_%s.jpeg' %timestampStr Image.fromarray(img).save(filename) print('Image captured and saved to %s' %filename) img = take_photo() # click the image to capture a frame! ###Output _____no_output_____ ###Markdown Let's check the contents of our image folder before we start: (You can always manually delete images by right clicking on them in the file tab) ###Code from PIL import Image import os imgs = sorted(os.listdir('raw_images')) print("Found %d images in %s" %(len(imgs), 'raw_images')) if len(imgs) == 0: print("Upload images to the \"raw_images\" folder!") else: print(imgs) for img_path in imgs: img = Image.open('raw_images/' + img_path) w,h = img.size rescale_ratio = 256 / min(w,h) img = img.resize((int(rescale_ratio*w),int(rescale_ratio*h)), Image.LANCZOS) display(img) ###Output _____no_output_____ ###Markdown Make sure we're using the right TensorFlow version (1.15): ###Code %tensorflow_version 1.x import tensorflow as tf print(tf.__version__) ###Output TensorFlow 1.x selected. 1.15.2 ###Markdown II. Auto-Align faces: This script wil:1. Look for faces in the images2. Crop out the faces from the images3. Align the faces (center the nose and make the eyes horizontal)4. Rescale the resulting images and save them in "aligned_images" folder The cell below takes about a minute to run ###Code !python align_images.py raw_images/ aligned_images/ --output_size=1024 ###Output Using TensorFlow backend. Downloading data from http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2 64045056/64040097 [==============================] - 5s 0us/step Aligning photo_13-Apr-2020 (12:57:53.712933).jpeg ... Getting landmarks... Starting face alignment... Wrote result aligned_images/photo_13-Apr-2020 (12:57:53.712933)_01.png ###Markdown Let's take a look at our aligned images: ###Code def display_folder_content(folder, res = 256): if folder[-1] != '/': folder += '/' for i, img_path in enumerate(sorted(os.listdir(folder))): if '.png' in img_path: display(Image.open(folder+img_path).resize((res,res)), 'img %d: %s' %(i, img_path)) print('\n') display_folder_content('aligned_images') ###Output _____no_output_____ ###Markdown Important, before moving on: Manually clean the 'aligned_images' directory> 1. Manually remove all 'bad' images that are not faces / don't look sharp / clear > (Use the image names from the plots above to guide you)> 2. Make sure you don't have too many faces in this folder (8 at most preferably) Encoding faces into StyleGAN latent space: ![title](https://raw.githubusercontent.com/pbaylies/stylegan-encoder/master/mona_example.jpg) We'll be using pbaylies' awesome encoder repo (building on original work from Puzer): https://github.com/pbaylies/stylegan-encoder First, let's download a pretrained resnet encoder: (see video for what this does) --> This model takes an image as input and estimates the corresponding latent code ###Code !gdown https://drive.google.com/uc?id=1aT59NFy9-bNyXjDuZOTMl0qX0jmZc6Zb !mkdir data !mv finetuned_resnet.h5 data !rm -rf generated_images latent_representations ###Output Downloading... From: https://drive.google.com/uc?id=1aT59NFy9-bNyXjDuZOTMl0qX0jmZc6Zb To: /content/stylegan-encoder/finetuned_resnet.h5 330MB [00:08, 40.5MB/s] ###Markdown III. The actual encoding process:> Highly recommended: play with the encoding params: they have a huge effect on the latent representations & images!> Extra encoding options: https://github.com/pbaylies/stylegan-encoder/blob/master/encode_images.py Note: This script will also download:* The pretrained StyleGAN network from NVIDIA trained on faces* A pretrained VGG-16 network, trained on ImageNet After guessing the initial latent codes using the pretrained ResNet, it will run gradient descent to optimize the latent faces! Note that by default, we're optimizing w vectors, not z-vectors! ###Code print("aligned_images contains %d images ready for encoding!" %len(os.listdir('aligned_images/'))) print("Recommended batch_size for the encode_images process: %d" %min(len(os.listdir('aligned_images/')), 8)) ###Output aligned_images contains 1 images ready for encoding! Recommended batch_size for the encode_images process: 1 ###Markdown Important: to avoid issues, set the batch_size argument lower than or equal to the number of aligned_images (see previous cell)> Keep batch_size<8 or the GPU might run out of memory Depending on the settings, the encoding process might take a few minutes... Fast version: ###Code !python encode_images.py --optimizer=lbfgs --face_mask=True --iterations=6 --use_lpips_loss=0 --use_discriminator_loss=0 --output_video=True aligned_images/ generated_images/ latent_representations/ print("\n************ Latent code optimization finished! ***************") ###Output _____no_output_____ ###Markdown Slow version: ###Code !python encode_images.py --optimizer=adam --lr=0.02 --decay_rate=0.95 --decay_steps=6 --use_l1_penalty=0.3 --face_mask=True --iterations=400 --early_stopping=True --early_stopping_threshold=0.05 --average_best_loss=0.5 --use_lpips_loss=0 --use_discriminator_loss=0 --output_video=True aligned_images/ generated_images/ latent_representations/ print("\n************ Latent code optimization finished! ***************") ###Output _____no_output_____ ###Markdown Showtime! Let's load the StyleGAN network into memory: ###Code import dnnlib, pickle import dnnlib.tflib as tflib tflib.init_tf() synthesis_kwargs = dict(output_transform=dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True), minibatch_size=1) model_dir = 'cache/' model_path = [model_dir+f for f in os.listdir(model_dir) if 'stylegan-ffhq' in f][0] print("Loading StyleGAN model from %s..." %model_path) with dnnlib.util.open_url(model_path) as f: generator_network, discriminator_network, averaged_generator_network = pickle.load(f) print("StyleGAN loaded & ready for sampling!") def generate_images(generator, latent_vector, z = True): batch_size = latent_vector.shape[0] if z: #Start from z: run the full generator network return generator.run(latent_vector.reshape((batch_size, 512)), None, randomize_noise=False, **synthesis_kwargs) else: #Start from w: skip the mapping network return generator.components.synthesis.run(latent_vector.reshape((batch_size, 18, 512)), randomize_noise=False, **synthesis_kwargs) import matplotlib.pyplot as plt %matplotlib inline import numpy as np def plot_imgs(model, rows, columns): for i in range(rows): f, axarr = plt.subplots(1,columns, figsize = (20,8)) for j in range(columns): img = generate_images(model, np.random.randn(1,512), z = True)[0] axarr[j].imshow(img) axarr[j].axis('off') axarr[j].set_title('Resolution: %s' %str(img.shape)) plt.show() ###Output _____no_output_____ ###Markdown Let's plot some random StyleGAN samples: ###Code plot_imgs(averaged_generator_network, 3, 3) ###Output _____no_output_____ ###Markdown Let's take a look at the results of our encoding: If the results don't look great: Play with the encoding arguments!!!> 1. Run the optimization for more iterations (eg 500)> 2. Decrease the L1 penalty (to eg 0.15)> 3. Try a lower initial learning rate (eg 0.02) or play with the decay_rate> 4. Find out about the other encoding options here: https://github.com/pbaylies/stylegan-encoder/blob/master/encode_images.py> 5. You can find a bunch of good presets on the repo documentation: https://github.com/pbaylies/stylegan-encoder ###Code import numpy as np for f in sorted(os.listdir('latent_representations')): w = np.load('latent_representations/' + f).reshape((1,18,-1)) img = generate_images(averaged_generator_network, w, z = False)[0] plt.imshow(img) plt.axis('off') plt.title("Generated image from %s" %f) plt.show() ###Output _____no_output_____ ###Markdown Let's compare our encoded samples with the original ones:**Note: when you optimized with the setting --face_mask=True, the hair will be copied from the source images. If you don't want this, optimize without that setting!** ###Code import matplotlib.pyplot as plt def plot_two_images(img1,img2, img_id, fs = 12): f, axarr = plt.subplots(1,2, figsize=(fs,fs)) axarr[0].imshow(img1) axarr[0].title.set_text('Encoded img %d' %img_id) axarr[1].imshow(img2) axarr[1].title.set_text('Original img %d' %img_id) plt.setp(plt.gcf().get_axes(), xticks=[], yticks=[]) plt.show() def display_sbs(folder1, folder2, res = 256): if folder1[-1] != '/': folder1 += '/' if folder2[-1] != '/': folder2 += '/' imgs1 = sorted([f for f in os.listdir(folder1) if '.png' in f]) imgs2 = sorted([f for f in os.listdir(folder2) if '.png' in f]) if len(imgs1)!=len(imgs2): print("Found different amount of images in aligned vs raw image directories. That's not supposed to happen...") for i in range(len(imgs1)): img1 = Image.open(folder1+imgs1[i]).resize((res,res)) img2 = Image.open(folder2+imgs2[i]).resize((res,res)) plot_two_images(img1,img2, i) print("") display_sbs('generated_images/', 'aligned_images/', res = 512) ###Output _____no_output_____ ###Markdown Note: If you want to watch the whole thing unfold for yourself, you can **download the optimization videos** from the "videos" folder IV. Cherry pick images & dump their latent vectors to disk Manipulating latent vectors (Notebook II) is tricky and will only work well if the face encoding looks 'good' Cherry pick a few images where the optimization worked well> (Use the image indices from the plot titles above) ###Code good_images = [0,1] #Change these numbers to pick out latents that worked well (see the image plots) ###Output _____no_output_____ ###Markdown Save these latent vectors to disk: ###Code import numpy as np latents = sorted(os.listdir('latent_representations')) out_file = '/content/output_vectors.npy' final_w_vectors = [] for img_id in good_images: w = np.load('latent_representations/' + latents[img_id]) final_w_vectors.append(w) final_w_vectors = np.array(final_w_vectors) np.save(out_file, final_w_vectors) print("%d latent vectors of shape %s saved to %s!" %(len(good_images), str(w.shape), out_file)) ###Output 2 latent vectors of shape (18, 512) saved to /content/output_vectors.npy!
alphalens/examples/pyfolio_integration.ipynb
###Markdown Alphalens and Pyfolio integration Alphalens can simulate the performance of a portfolio where the factor values are use to weight stocks. Once the portfolio is built, it can be analyzed by Pyfolio. For details on how this portfolio is built see:- alphalens.performance.factor_returns- alphalens.performance.cumulative_returns - alphalens.performance.create_pyfolio_input ###Code %pylab inline --no-import-all import alphalens import pyfolio import pandas as pd import numpy as np import datetime ###Output Populating the interactive namespace from numpy and matplotlib ###Markdown First load some stocks data ###Code tickers = [ 'ACN', 'ATVI', 'ADBE', 'AMD', 'AKAM', 'ADS', 'GOOGL', 'GOOG', 'APH', 'ADI', 'ANSS', 'AAPL', 'AVGO', 'CA', 'CDNS', 'CSCO', 'CTXS', 'CTSH', 'GLW', 'CSRA', 'DXC', 'EBAY', 'EA', 'FFIV', 'FB', 'FLIR', 'IT', 'GPN', 'HRS', 'HPE', 'HPQ', 'INTC', 'IBM', 'INTU', 'JNPR', 'KLAC', 'LRCX', 'MA', 'MCHP', 'MSFT', 'MSI', 'NTAP', 'NFLX', 'NVDA', 'ORCL', 'PAYX', 'PYPL', 'QRVO', 'QCOM', 'RHT', 'CRM', 'STX', 'AMG', 'AFL', 'ALL', 'AXP', 'AIG', 'AMP', 'AON', 'AJG', 'AIZ', 'BAC', 'BK', 'BBT', 'BRK.B', 'BLK', 'HRB', 'BHF', 'COF', 'CBOE', 'SCHW', 'CB', 'CINF', 'C', 'CFG', 'CME', 'CMA', 'DFS', 'ETFC', 'RE', 'FITB', 'BEN', 'GS', 'HIG', 'HBAN', 'ICE', 'IVZ', 'JPM', 'KEY', 'LUK', 'LNC', 'L', 'MTB', 'MMC', 'MET', 'MCO', 'MS', 'NDAQ', 'NAVI', 'NTRS', 'PBCT', 'PNC', 'PFG', 'PGR', 'PRU', 'RJF', 'RF', 'SPGI', 'STT', 'STI', 'SYF', 'TROW', 'ABT', 'ABBV', 'AET', 'A', 'ALXN', 'ALGN', 'AGN', 'ABC', 'AMGN', 'ANTM', 'BCR', 'BAX', 'BDX', 'BIIB', 'BSX', 'BMY', 'CAH', 'CELG', 'CNC', 'CERN', 'CI', 'COO', 'DHR', 'DVA', 'XRAY', 'EW', 'EVHC', 'ESRX', 'GILD', 'HCA', 'HSIC', 'HOLX', 'HUM', 'IDXX', 'ILMN', 'INCY', 'ISRG', 'IQV', 'JNJ', 'LH', 'LLY', 'MCK', 'MDT', 'MRK', 'MTD', 'MYL', 'PDCO', 'PKI', 'PRGO', 'PFE', 'DGX', 'REGN', 'RMD', 'SYK', 'TMO', 'UNH', 'UHS', 'VAR', 'VRTX', 'WAT', 'MMM', 'AYI', 'ALK', 'ALLE', 'AAL', 'AME', 'AOS', 'ARNC', 'BA', 'CHRW', 'CAT', 'CTAS', 'CSX', 'CMI', 'DE', 'DAL', 'DOV', 'ETN', 'EMR', 'EFX', 'EXPD', 'FAST', 'FDX', 'FLS', 'FLR', 'FTV', 'FBHS', 'GD', 'GE', 'GWW', 'HON', 'INFO', 'ITW', 'IR', 'JEC', 'JBHT', 'JCI', 'KSU', 'LLL', 'LMT', 'MAS', 'NLSN', 'NSC', 'NOC', 'PCAR', 'PH', 'PNR', 'PWR', 'RTN', 'RSG', 'RHI', 'ROK', 'COL', 'ROP', 'LUV', 'SRCL', 'TXT', 'TDG', 'UNP', 'UAL', 'AES', 'LNT', 'AEE', 'AEP', 'AWK', 'CNP', 'CMS', 'ED', 'D', 'DTE', 'DUK', 'EIX', 'ETR', 'ES', 'EXC'] import pandas_datareader.data as web pan = web.DataReader(tickers, "yahoo", datetime.datetime(2015, 1, 1), datetime.datetime(2017, 1, 1)) pan = pan.transpose(2,1,0) ###Output _____no_output_____ ###Markdown We'll compute a simple mean reversion factor looking at recent stocks performance: stocks that performed well in the last 5 days will have high rank and vice versa. ###Code factor = pan.loc[:,:,'Open'] factor = -factor.pct_change(5) factor = factor.stack() factor.index = factor.index.set_names(['date', 'asset']) ###Output _____no_output_____ ###Markdown The pricing data passed to alphalens should contain the entry price for the assets so it must reflect the next available price after a factor value was observed at a given timestamp. Those prices must not be used in the calculation of the factor values for that time. Always double check to ensure you are not introducing lookahead bias to your study.The pricing data must also contain the exit price for the assets, for period 1 the price at the next timestamp will be used, for period 2 the price after 2 timestats will be used and so on.There are no restrinctions/assumptions on the time frequencies a factor should be computed at and neither on the specific time a factor should be traded (trading at the open vs trading at the close vs intraday trading), it is only required that factor and price DataFrames are properly aligned given the rules above.In our example, before the trading starts every day, we observe yesterday factor values. The price we pass to alphalens is the next available price after that factor observation: the daily open price that will be used as assets entry price. Also, we are not adding additional prices so the assets exit price will be the following days open prices (how many days depends on 'periods' argument). The retuns computed by Alphalens will therefore based on assets open prices. ###Code pricing = pan.loc[:,:,'Open'].iloc[1:] ###Output _____no_output_____ ###Markdown Prepare data and run Alphalens Pyfolio wants timezone set to UTC ###Code pricing.index = pricing.index.tz_localize('UTC') factor = factor.unstack() factor.index = factor.index.tz_localize('UTC') factor = factor.stack() factor_data = alphalens.utils.get_clean_factor_and_forward_returns(factor, pricing, periods=(1, 3), quantiles=5, bins=None) alphalens.tears.create_summary_tear_sheet(factor_data) ###Output Quantiles Statistics ###Markdown Prepare data for Pyfolio We can see in Alphalens analysis that quantiles 1 and 5 are the most predictive so we'll build a portfolio data using only those quantiles. ###Code pf_returns, pf_benchmark = alphalens.performance.create_pyfolio_input(factor_data, period='3D', long_short=True, group_neutral=False, quantiles=[1,5], groups=None) ###Output _____no_output_____ ###Markdown Now that we have prepare the data we can run Pyfolio functions ###Code pyfolio.tears.create_full_tear_sheet(pf_returns, benchmark_rets=pf_benchmark) ###Output _____no_output_____
terra-notebooks-playground/R - How to save images and tables to files.ipynb
###Markdown How to save images and tables to filesResearchers need to include images and tables of data in manuscripts.* One can copy images from a notebook, but researchers also need to control the dpi and image size. Here we instead emit these images to files with a specific dpi and size.* Copying/pasting the table data from the notebook to your document is prone to error. Here we instead emit these tables as CSV files with can then be imported to Microsoft Word documents.See also [Notebooks 101 - How not to lose data output files or collaborator edits](https://broadinstitute.zendesk.com/hc/en-us/articles/360027300571-Notebooks-101-How-not-to-lose-data-output-files-or-collaborator-edits). Setup ###Code lapply(c('gapminder'), function(pkg) { if(! pkg %in% installed.packages()) { install.packages(pkg)} } ) library(gapminder) library(lubridate) library(tidyverse) ###Output Attaching package: ‘lubridate’ The following objects are masked from ‘package:base’: date, intersect, setdiff, union ── Attaching packages ─────────────────────────────────────── tidyverse 1.3.1 ── ✔ ggplot2 3.3.3 ✔ purrr  0.3.4 ✔ tibble  3.1.1 ✔ dplyr  1.0.6 ✔ tidyr  1.1.3 ✔ stringr 1.4.0 ✔ readr  1.4.0 ✔ forcats 0.5.1 ── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ── ✖ lubridate::as.difftime() masks base::as.difftime() ✖ lubridate::date() masks base::date() ✖ dplyr::filter() masks stats::filter() ✖ lubridate::intersect() masks base::intersect() ✖ dplyr::lag() masks stats::lag() ✖ lubridate::setdiff() masks base::setdiff() ✖ lubridate::union() masks base::union() ###Markdown Image examples ggplot example ###Code theme_set(theme_gray(base_size = 18)) options(repr.plot.height = 30, repr.plot.width = 16) gapminder %>% ggplot(aes(continent, lifeExp, color = continent)) + geom_boxplot() + facet_wrap(~ year, ncol = 1) ggtitle("Full data = ") ###Output _____no_output_____ ###Markdown Save the boxplots to an external file. ###Code ggsave('myboxplot-highres-6x4.png', dpi = 300, width = 16, height = 30) options(repr.plot.height = 16, repr.plot.width = 16) gapminder %>% ggplot(aes(year, lifeExp, group = country)) + geom_line(alpha = 1/3) + facet_wrap(~ continent, ncol = 1) ###Output _____no_output_____ ###Markdown Save the line plots to an external file. ###Code ggsave('mylineplot-highres-6x4.png', dpi = 300, width = 16, height = 16) ###Output _____no_output_____ ###Markdown Base R exampleUnlike the nice useability of ggplot and ggsave, I'm not sure how best to view a base R plot in the notebook and then save it to a file. If you know a better way, please feel free to update this section about sending base R plots to files! ###Code options(repr.plot.height = 10, repr.plot.width = 10) # Just view the plot. hist(islands) ###Output _____no_output_____ ###Markdown Plot directly to a file, then view the contents of that file. ###Code png('print_histogram_direct_file.png', width = 6, height = 6, units = 'in', res = 300) hist(islands) dev.off() IRdisplay::display_png(file = 'print_histogram_direct_file.png') ###Output _____no_output_____ ###Markdown Model exampleFrom http://r4ds.had.co.nz/many-models.html ###Code gm_mod <- lm(lifeExp ~ year + continent, data = gapminder) gm_mod broom::glance(gm_mod) broom::tidy(gm_mod) ###Output _____no_output_____ ###Markdown Write the model results to a CSV file. ###Code write_csv(broom::tidy(gm_mod), 'gapminder-model.csv') ###Output _____no_output_____ ###Markdown [Optional] Download the files to your local machine If you just need to download a few files, its easy to open the Jupyter console and download them. Tip: You can open the Jupyter console by right clicking on the Jupyter logo at the top left of the Terra UI to open it in a new tab. But if you want to download a **batch of files** to your local machine, you can transfer the batch to the workspace bucket, and then download the batch to your local machine. ###Code list.files(pattern = '*png') (DESTINATION <- paste(Sys.getenv('WORKSPACE_BUCKET'), 'images', Sys.getenv('OWNER_EMAIL'), strftime(now(), '%Y%m%d/%H%M%S'), sep = '/')) system(str_glue('gsutil -m cp *png {DESTINATION} 2>&1'), intern = TRUE) system(str_glue('gsutil ls {DESTINATION} 2>&1'), intern = TRUE) ###Output _____no_output_____ ###Markdown Now in the terminal **on your local machine**, you can run the following command to download the batch of files from the workspace bucket: ###Code print(str_glue('# Run this gsutil command in your local terminal.\ngsutil -m cp {DESTINATION}/* . ')) ###Output # Run this gsutil command in your local terminal. gsutil -m cp gs://fc-ce0a87d7-b388-489d-9169-79ad4aa2e58d/images/[email protected]/20210520/172928/* . ###Markdown Provenance ###Code devtools::session_info() ###Output _____no_output_____
PYTHON_EXERCISE_1.ipynb
###Markdown Average of three no.. ###Code a=10 b=20 c=30 average=(a+b+c)/3 print(average) print(type(average)) ###Output 20.0 <class 'float'> ###Markdown Multiplication of 3 no.. ###Code a=10 b=20 c=30 d=a*b*c print(d) print(type(d)) ###Output 6000 <class 'int'> ###Markdown Dynamic addition of 2 no... ###Code a=int(input("enter a no: ")) b=int(input("enter a no: ")) addition=(a+b) print(addition) print(type(addition)) ###Output enter a no: 10 enter a no: 20 30 <class 'int'> ###Markdown Dynamic multipication of 2 no.. ###Code a=int(input("enter a no: ")) b=int(input("enter a no: ")) multiplication=(a*b) print(multiplication) print(type(multiplication)) ###Output enter a no: 20 enter a no: 30 600 <class 'int'> ###Markdown Dynamic add 3 no.. ###Code a=int(input("enter a no: ")) b=int(input("enter a no: ")) c=int(input("enter a no: ")) addition=(a+b+c) print(addition) print(type(addition)) ###Output enter a no: 20 enter a no: 10 enter a no: 20 50 <class 'int'> ###Markdown Dynamic multi 3 no.. ###Code a=int(input("enter a no: ")) b=int(input("enter a no: ")) c=int(input("enter a no: ")) multi=(a*b*c) print(multi) print(type(multi)) ###Output enter a no: 10 enter a no: 10 enter a no: 10 1000 <class 'int'> ###Markdown Dynamic avg of 3 no.. ###Code a=int(input("enter a no: ")) b=int(input("enter a no: ")) c=int(input("enter a no: ")) avg=(a+b+c)/3 print(avg) print(type(avg)) ###Output enter a no: 10 enter a no: 10 enter a no: 0 6.666666666666667 <class 'float'> ###Markdown TYPE CONVERSIONS 1.INT TO FLOAT ###Code a=10 print(type(a)) b=float(a) print(type(b)) ###Output <class 'int'> <class 'float'> ###Markdown 2.float to int ###Code a=10.32 print(type(a)) b=int(a) print(type(b)) print(b) ###Output <class 'float'> <class 'int'> 10 ###Markdown 3.int to string ###Code a=10 print(type(a)) b=str(a) print(type(b)) print(b) ###Output <class 'int'> <class 'str'> 10 ###Markdown 4.float to string ###Code a=10.33 print(type(a)) b=str(a) print(type(b)) print(b) ###Output <class 'float'> <class 'str'> 10.33 ###Markdown 5.string to int ###Code a="10" print(type(a)) b=int(a) print(type(b)) print(b) ###Output <class 'str'> <class 'int'> 10 ###Markdown Boolean ###Code a=30 b=20 c=a>b print(bool(c)) ###Output True ###Markdown Boolean to int ###Code a=bool(30) print(type(a)) b=int(a) print(type(b)) ###Output <class 'bool'> <class 'int'> ###Markdown Boolean to string ###Code a=bool(30) print(type(a)) b=str(a) print(type(b)) ###Output <class 'bool'> <class 'str'> ###Markdown Complex ###Code a=5+2j print(type(a)) b=str(a) print(type(b)) # checking whether the triangle is equilateral, isosceles or scalene triangle a=input("enter the value of 1st triangle") b=input("enter the value of 2nd triangle") c=input("enter the value of 3rd triangle") if a==b==c: print("Equilateral triangle") elif a==b or b==c or c==a: print("isosceles triangle") else: print("Scalene triangle") # checking weather entered year is a leap year or not a=int(input("Enter year:")) if (a%4==0): print("leap year") else: print("not leap year") # find minimum among the numers a=input("enter the value of 1st number") b=input("enter the value of 2nd number") c=input("enter the value of 3rd number") if a<b and a<c: print("1st number is minimum") elif a==b or b==c or c==a: print("2nd number is minimum") else: print("3rd number is minimum") # check whether a number is positive or negative or zero num = float(input("Enter a number: ")) if num > 0: print("Positive number") elif num == 0: print("Zero") else: print("Negative number") ###Output Enter a number: 0 Zero
Reproducible research.ipynb
###Markdown Narzędzia odtwarzalnej analizy danych (a.k.a. Reproducible Research) Dariusz Brzeziński Agenda========================================================- **Definicje** - Repeatable vs Reproducible research - Literate (Statistical) Programming- **Narzędzia** - Git i Github - Jupyter (i knitr) - Zenodo- **Praktyczne przykłady** - Uruchamianie skryptu - Rysunki i wizualizacje - Odpytywanie bazy CSD - Analiza danych Definicje Powtarzalność (Repeatability)Eksperyment jest powtarzalny, jeżeli inny zespół jest w stanie powtórzyć ten sam eksperyment (w możliwie tych samych warunkach) i otrzymać te same wyniki. ------------------------------------------- Odtwarzalność (Reproduciblity)Wyniki eksperymentu są odtwarzalne, jeżeli autorzy eksperymentu dzielą się danymi i pozwalają odtworzyć wszystkie opublikowane wyniki.------------------------------------------- Spostrzeżenia**Eksperyment może być powtarzalny, ale jego wyniki nie muszą być odtwarzalne.**Przykład: Autorzy nie podzielili się zebranymi danymi na temat członków plemienia Yanomami i zaprezentowali tylko wyniki swojej analizy. Eksperyment można powtórzyć (pojechać nad Amazonkę i zebrać nowe dane), ale analizy nie można odtworzyć na tych samych danych. **Eksperyment może być niepowtarzalny, ale jego wyniki mogą być odtwarzalne.** Przykład: Autorzy zbadali zwyczaje plemienia Atakapa. Eksperymentu nie można powtórzyć, bo plemię wymarło, ale ponieważ autorzy podzielili się zebranymi danymi i spisali wszystkie kroki obliczeń, analizę można w pełni odtworzyć. ![images/reproducibility-spectrum.png](images/reproducibility-spectrum.png) Dlaczego warto robić odtwarzalne analizy danych========================================- Niczego nie ukrywamy przed resztą świata naukowego (**transparentność**) - Inni naukowcy chętniej będą korzystali z naszych wyników (**cytowania**)- Inne osoby łatwiej znajdą błędy w naszych badaniach (**dokładność**)- Osoby z zespołu nie będą musiały nas pytać jak coś zrobiliśmy (**czas**)- Wykonanie analizy na nowych danych będzie dużo prostsze (**produktywność**)- Nie boimy się powtórzyć własnej analizy (**pewność**)*Podobne zyski dotyczą również [analiz wykonywanych w firmach](https://medium.com/airbnb-engineering/scaling-knowledge-at-airbnb-875d73eff091.rf1fniree)* Podstawy odtwarzalnej analizy danych====================================- **Zdecydować, że analiza będzie odtwarzalna**- Korzystać z repozytorium- Wykorzystać oprogramowanie, w którym można zaprogramować kolejne kroki- Pracować na surowych danych (**nie edytować niczego ręcznie**)- Nie zapisywać wyników końcowych- Korzystać z otwartych formatów danych Literate Programming==================- Pomysł autorstwa [Donalda Knutha](https://pl.wikipedia.org/wiki/Donald_Knuth)- Artykuł jako strumień tekstu i kodu- Potrzebny język programowania i język do dokumentacji - Kod podzielony na fragmenty- Opis tłumaczący co się po kolei dzieje- Z artykułu można wyciągnąć tylko kod albo zostawić tylko opis- Przykłady: [LaTeX](https://en.wikipedia.org/wiki/LaTeX), [Sweave](https://en.wikipedia.org/wiki/Sweave), [knitr](http://yihui.name/knitr/), [Jupyter](https://jupyter.org/) Git====- Git to **system kontroli wersji**- Pomaga śledzić hietorię zmian w plikach- Po edycji pliku potrafi pokazać **kto** zmienił **co** i **dlaczego**- Ułatwia współpracę wielu osób nad jednym projektem - Typowo używany do pracy nad kodem, ale można go też wykorzystywać do prac nad: - projektem graficznym, - projektem architektonicznym, - analizą danych, - dokumentami, - materiałami dydaktycznymi... Git====- Tradycyjnie obsługiwany z linii poleceń - Istnieją też narzędzia graficzne (np. [TortoiseGit](https://tortoisegit.org/) albo [GitHub Desktop](https://desktop.github.com/))- Podstawowe operacje: - **commit** zrób checkpoint lokalnie na dysku - **push** zapisz zmiany na serwerze zewnętrznym - **pull** wczytanie zmian z serwera zewnętrznego - **revert** cofnięcie zmian do wybranego checkpointa - **diff** porównaj zmiany Demo===== ![demo](images/examples.png) GitHub======- Najpopularniejsze otwarte repozytorium kodu - Serwer zewnętrzny dla repozytoriów Git- Darmowe dla publicznych projektów- Standard współpracy nad otwartymi projektami informatycznymi - Możliwość zgłaszania błędów - Możliwość zgłaszania poprawek - Wersjonowanie Github======[![github-example.png](images/github-example.png)](https://github.com/dabrze/CheckMyBlob) Jupyter======= - Jedna z implementacji idei Literate Programming- Najpopularniejsze narzędzie dla powtarzalnych analiz w języku Python- Obsługuje tez inne języki (**Ju**=Julia, **pyt**=Python, **er**=R)- Tekst i kod powiązane w **notatnikach** (ang. notebook)- Do pisania tekstu wykorzystywany jest język **Markdown**- Działa w przeglądarce (**możliwość pracy zdalnej**) Jupyter (dashboard)=================![demo](images/jupyter-dashboard.png) Jupyter (notebook)================![demo](images/jupyter-notebook.png) Demo=====![demo](images/examples.png) Zenodo======- Darmowe repozytorium danych naukowych - Serwery hostowane przez CERN- Każdy wgrany element otrzymuje własne DOI- Pliki wgrane na Zenodo będą dostępne i łatwo wyszukiwalne przez lata- Integracja z GitHub Przykłady========- Na kolejnych slajdach przedstawione zostaną przykłady wykorzystania notatników Jupyter- Przykłady zostały dobrane tak, aby miały związek z krystalografią- Można oczywiście wykorzystać notatniki do wielu innych zadań! Uruchamianie skryptu==================Za pomocą jupyter notebooka można pokazać innym autorom kolejne kroki, które wykonano w trakcie analizy. Ponieważ notatniki można uruchamiać, inni naukowcy mogą łatwo powtórzyć te kroki samemu. Pomysły na wykorzystanie: refmac, phenix, XDS, pymol, ... ###Code # cat ./Reproducible research.ipynb | wc -l !findstr /R /N "^" "Reproducible research.ipynb" | find /c /v "" ###Output 2532 ###Markdown Uruchamianie skryptu================== ###Code PHENIX_BIN = "C:/Tools/Crystallography/Phenix/phenix-installer-1.14-3260-intel-windows-x86_64/build/bin/" PDB = "data/ISO2-1_6A_refine_96.pdb" MTZ = "data/ISO2-1_6A_refine_96.mtz" CIF = "data/IC-IG-QM-paired-lib.cif" OPTIONS = "main.random_seed=7 main.number_of_macro_cycles=0" cmd = "{0}phenix.refine {1} {2} {3} {4}".format(PHENIX_BIN, MTZ, PDB, CIF, OPTIONS) !{cmd} ###Output # Date 2019-06-07 Time 09:48:01 Środkowoeuropejski czas letni +0200 (1559893681.56 s) #phil __OFF__ Command line arguments: "data/ISO2-1_6A_refine_96.mtz" "data/ISO2-1_6A_refine_96.pdb" "data/IC-IG-QM-paired-lib.cif" "main.random_seed=7" "main.number_of_macro_cycles=0" COMPUTERNAME = KERMIT PROCESSOR_ARCHITECTURE = AMD64 USERNAME = Lenovo PID = 14512 ------------------------------------------------------------------------------- PHENIX: Python-based Hierarchical ENvironment for Integrated Xtallography Version: 1.14 Release tag: 3260 Platform: intel-windows-x86_64 User: Lenovo ------------------------------------------------------------------------------- phenix.refine: Macromolecular Structure Refinement ------------------------------------------------------------------------------- Phenix developers include: Paul Adams, Pavel Afonine, Gabor Bunkoczi, Tom Burnley, Vincent Chen, Youval Dar, Ian Davis, Eli Draizen, Nathaniel Echols, Richard Gildea, Piet Gros, Ralf Grosse-Kunstleve, Jeffrey Headd, Bradley Hintze, Li-Wei Hung, Tom Ioerger, Dorothee Liebschner, Airlie McCoy, Erik McKee, Nigel Moriarty, Robert Oeffner, Billy Poon, Randy Read, Jane Richardson, David Richardson, Jim Sacchettini, Nicholas Sauter, Oleg Sobolev, Laurent Storoni, Tom Terwilliger, Christopher Williams, Peter Zwart Phenix home page: http://www.phenix-online.org/ ------------------------------------------------------------------------------- ------------------------------------------------------------------------------- Phenix components are copyrighted by: - Lawrence Berkeley National Laboratory - Los Alamos National Laboratory - University of Cambridge - Duke University - Texas Agricultural Experiment Station & Texas Engineering Experiment Station ------------------------------------------------------------------------------- ------------------------------------------------------------------------------- Major third-party components of Phenix include: Python, wxWidgets, wxPython, Boost, SCons, Clipper, CCP4 Monomer Library, CCP4 I/O libraries, PyCifRW, FFTPACK, L-BFGS, MUSCLE, KSDSSP, PULCHRA, ANTLRv3 Enter phenix.acknowledgments for details. ------------------------------------------------------------------------------- Processing inputs. This may take a minute or two. Command line parameter definitions: refinement.main.random_seed = 7 refinement.main.number_of_macro_cycles = 0 No user-defined map coefficients or files defined; will use default map outputs instead. Working crystal symmetry after inspecting all inputs: Unit cell: (42.251, 38.802, 84.18, 90, 93.479, 90) Space group: C 1 2 1 (No. 5) ================================== X-ray data ================================= I-obs: data/ISO2-1_6A_refine_96.mtz:I-obs,SIGI-obs Miller array info: data/ISO2-1_6A_refine_96.mtz:I-obs,SIGI-obs Observation type: xray.intensity Type of data: double, size=17941 Type of sigmas: double, size=17941 Number of Miller indices: 17941 Anomalous flag: False Unit cell: (42.251, 38.802, 84.18, 90, 93.479, 90) Space group: C 1 2 1 (No. 5) Systematic absences: 0 Centric reflections: 994 Resolution range: 28.5547 1.60043 Completeness in resolution range: 0.989302 Completeness with d_max=infinity: 0.989193 Wavelength: 1.0000 ----------Scaling input intensities via French-Wilson Method---------- Trying 60 bins... Number of bins = 60 ** Calculating bin mean intensity values for each intensity ** ** Total # rejected intensities: 0 ** Intensities converted to amplitudes for use in refinement. Number of F-obs in resolution range: 17941 Number of F-obs<0 (these reflections will be rejected): 0 Number of F-obs=0 (these reflections will be used in refinement): 0 Refinement resolution range: d_max = 28.5547 d_min = 1.6004 R-free flags: data/ISO2-1_6A_refine_96.mtz:R-free-flags Miller array info: data/ISO2-1_6A_refine_96.mtz:R-free-flags Observation type: None Type of data: int, size=17941 Type of sigmas: None Number of Miller indices: 17941 Anomalous flag: False Unit cell: (42.251, 38.802, 84.18, 90, 93.479, 90) Space group: C 1 2 1 (No. 5) Systematic absences: 0 Centric reflections: 994 Resolution range: 28.5547 1.60043 Completeness in resolution range: 0.989302 Completeness with d_max=infinity: 0.989193 Wavelength: 1.0000 Test (R-free flags) flag value: 0 Number of work/free reflections by resolution: work free %free bin 1: 28.5592 - 3.4462 [1864/1881] 1760 104 5.6% bin 2: 3.4462 - 2.7361 [1819/1828] 1717 102 5.6% bin 3: 2.7361 - 2.3904 [1788/1804] 1689 99 5.5% bin 4: 2.3904 - 2.1719 [1818/1829] 1716 102 5.6% bin 5: 2.1719 - 2.0163 [1796/1809] 1696 100 5.6% bin 6: 2.0163 - 1.8974 [1789/1801] 1690 99 5.5% bin 7: 1.8974 - 1.8024 [1765/1791] 1666 99 5.6% bin 8: 1.8024 - 1.7240 [1804/1823] 1704 100 5.5% bin 9: 1.7240 - 1.6576 [1755/1785] 1657 98 5.6% bin 10: 1.6576 - 1.6004 [1743/1784] 1646 97 5.6% overall 16941 1000 5.6% Monomer Library directory: "c:\tools\crystallography\phenix\phenix-installer-1.14-3260-intel-windows-x86_64\modules\chem_data\mon_lib" Total number of atoms: 1850 Number of models: 1 Model: "" Number of chains: 5 Chain: "A" Number of atoms: 326 Number of conformers: 1 Conformer: "" Number of residues, atoms: 10, 326 Classifications: {'RNA': 10} Modifications used: {'5*END': 1, '3*END': 1, 'rna3p': 2, 'rna3p_pur': 5, 'rna3p_pyr': 3} Link IDs: {'rna3p': 9} Unresolved non-hydrogen bonds: 2 Unresolved non-hydrogen angles: 6 Unresolved non-hydrogen dihedrals: 2 Chain: "B" Number of atoms: 391 Number of conformers: 2 Conformer: "A" Number of residues, atoms: 10, 326 Classifications: {'RNA': 10} Modifications used: {'5*END': 1, '3*END': 1, 'rna3p': 2, 'rna3p_pur': 5, 'rna3p_pyr': 3} Link IDs: {'rna3p': 9} Unresolved non-hydrogen bonds: 2 Unresolved non-hydrogen angles: 6 Unresolved non-hydrogen dihedrals: 2 Conformer: "B" Number of residues, atoms: 10, 326 Classifications: {'RNA': 10} Modifications used: {'5*END': 1, '3*END': 1, 'rna3p': 2, 'rna3p_pur': 5, 'rna3p_pyr': 3} Link IDs: {'rna3p': 9} Unresolved non-hydrogen bonds: 2 Unresolved non-hydrogen angles: 6 Unresolved non-hydrogen dihedrals: 2 bond proxies already assigned to first conformer: 280 Chain: "C" Number of atoms: 652 Number of conformers: 2 Conformer: "A" Number of residues, atoms: 10, 326 Classifications: {'RNA': 10} Modifications used: {'5*END': 1, '3*END': 1, 'rna3p': 2, 'rna3p_pur': 5, 'rna3p_pyr': 3} Link IDs: {'rna3p': 9} Unresolved non-hydrogen bonds: 2 Unresolved non-hydrogen angles: 6 Unresolved non-hydrogen dihedrals: 2 Conformer: "B" Number of residues, atoms: 10, 326 Classifications: {'RNA': 10} Modifications used: {'5*END': 1, '3*END': 1, 'rna3p': 2, 'rna3p_pur': 5, 'rna3p_pyr': 3} Link IDs: {'rna3p': 9} Unresolved non-hydrogen bonds: 2 Unresolved non-hydrogen angles: 6 Unresolved non-hydrogen dihedrals: 2 Chain: "D" Number of atoms: 377 Number of conformers: 2 Conformer: "A" Number of residues, atoms: 10, 326 Classifications: {'RNA': 10} Modifications used: {'5*END': 1, '3*END': 1, 'rna3p': 2, 'rna3p_pur': 5, 'rna3p_pyr': 3} Link IDs: {'rna3p': 9} Unresolved non-hydrogen bonds: 2 Unresolved non-hydrogen angles: 6 Unresolved non-hydrogen dihedrals: 2 Conformer: "B" Number of residues, atoms: 10, 326 Classifications: {'RNA': 10} Modifications used: {'5*END': 1, '3*END': 1, 'rna3p': 2, 'rna3p_pur': 5, 'rna3p_pyr': 3} Link IDs: {'rna3p': 9} Unresolved non-hydrogen bonds: 2 Unresolved non-hydrogen angles: 6 Unresolved non-hydrogen dihedrals: 2 bond proxies already assigned to first conformer: 296 Chain: "S" Number of atoms: 104 Number of conformers: 2 Conformer: "A" Number of residues, atoms: 100, 100 Classifications: {'water': 100} Link IDs: {None: 99} Conformer: "B" Number of residues, atoms: 100, 100 Classifications: {'water': 100} Link IDs: {None: 99} Residues with excluded nonbonded symmetry interactions: 17 residue: pdb=" C1'A IC B 14 " occ=0.38 ... (60 atoms not shown) pdb="HO2'B IC B 14 " occ=0.62 residue: pdb=" P A G B 15 " occ=0.48 ... (66 atoms not shown) pdb=" H22B G B 15 " occ=0.52 residue: pdb=" O5'A G C 1 " occ=0.52 ... (62 atoms not shown) pdb="HO5'B G C 1 " occ=0.48 residue: pdb=" P A G C 2 " occ=0.52 ... (66 atoms not shown) pdb=" H22B G C 2 " occ=0.48 residue: pdb=" P A C C 3 " occ=0.52 ... (60 atoms not shown) pdb=" H6 B C C 3 " occ=0.48 residue: pdb=" C1'A IC C 4 " occ=0.60 ... (60 atoms not shown) pdb="HO2'B IC C 4 " occ=0.40 residue: pdb=" P A G C 5 " occ=0.48 ... (66 atoms not shown) pdb=" H22B G C 5 " occ=0.52 residue: pdb=" P A A C 6 " occ=0.48 ... (64 atoms not shown) pdb=" H2 B A C 6 " occ=0.52 residue: pdb=" C1'A IG C 7 " occ=0.61 ... (66 atoms not shown) pdb="HO2'B IG C 7 " occ=0.39 residue: pdb=" P A G C 8 " occ=0.61 ... (66 atoms not shown) pdb=" H22B G C 8 " occ=0.39 residue: pdb=" P A C C 9 " occ=0.61 ... (60 atoms not shown) pdb=" H6 B C C 9 " occ=0.39 residue: pdb=" P A C C 10 " occ=0.61 ... (62 atoms not shown) pdb=" H6 B C C 10 " occ=0.39 ... (remaining 5 not shown) Time building chain proxies: 0.39, per 1000 atoms: 0.21 Number of scatterers: 1850 At special positions: 1 Unit cell: (42.251, 38.802, 84.18, 90, 93.479, 90) Space group: C 1 2 1 (No. 5) Number of sites at special positions: 1 Minimum distance between symmetrically equivalent sites: 0.5 Label Mult Shift Fractional coordinates pdb=" O HOH S 66 " 2 0.000 ( 0.0000 -0.1744 0.0000) original site sym 2 ( 0.0000 -0.1744 0.0000) exact 0,y,0 Number of scattering types: 5 Type Number sf(0) P 49 15.00 O 466 8.00 N 221 7.00 C 513 6.00 H 601 1.00 sf(0) = scattering factor at diffraction angle 0. No array of experimental phases found. ============================== Scattering factors ============================= ----------X-ray scattering dictionary---------- Number of scattering types: 5 Type Number sf(0) Gaussians P 49 14.95 2 O 466 7.97 2 N 221 6.97 2 C 513 5.97 2 H 601 1.00 2 sf(0) = scattering factor at diffraction angle 0. ==================== Fixing bad ADP in input model (if any) =================== ========================== Anomalous scatterer groups ========================= All atoms refined with f_prime=0 and f_double_prime=0. Number of disulfides: simple=0, symmetry=0 Automatic linking Parameters for automatic linking Linking & cutoffs Metal : False - 3.50 Amimo acid : False - 1.90 Carbohydrate : True - 1.99 Ligands : True - 1.99 Small molecules : False - 1.98 Amino acid - RNA/DNA : False Number of custom bonds: simple=0, symmetry=0 Time building additional restraints: 0.43 Conformation dependent library (CDL) restraints added in 8.0 milliseconds Adding C-beta torsion restraints... Number of C-beta restraints generated: 0 Time building geometry restraints manager: 0.47 seconds NOTE: a complete listing of the restraints can be obtained by requesting output of .geo file. Histogram of bond lengths: 0.84 - 1.00: 493 1.00 - 1.15: 108 1.15 - 1.31: 63 1.31 - 1.47: 762 1.47 - 1.62: 455 Bond restraints: 1881 Sorted by residual: bond pdb=" O5'A C C 3 " pdb=" C5'A C C 3 " ideal model delta sigma weight residual 1.420 1.474 -0.054 1.50e-02 4.44e+03 1.30e+01 bond pdb=" N1 A IG C 7 " pdb=" H1 A IG C 7 " ideal model delta sigma weight residual 1.040 0.969 0.071 2.00e-02 2.50e+03 1.26e+01 bond pdb=" O3' C B 19 " pdb=" P C B 20 " ideal model delta sigma weight residual 1.607 1.563 0.044 1.50e-02 4.44e+03 8.66e+00 bond pdb=" C1' IG D 17 " pdb=" N9 IG D 17 " ideal model delta sigma weight residual 1.475 1.434 0.041 1.50e-02 4.44e+03 7.62e+00 bond pdb=" N1 IG D 17 " pdb=" H1 IG D 17 " ideal model delta sigma weight residual 1.040 0.990 0.050 2.00e-02 2.50e+03 6.27e+00 ... (remaining 1876 not shown) Histogram of bond angle deviations from ideal: 98.71 - 105.88: 329 105.88 - 113.04: 1659 113.04 - 120.21: 796 120.21 - 127.38: 456 127.38 - 134.55: 111 Bond angle restraints: 3351 Sorted by residual: angle pdb=" C4' G D 15 " pdb=" C3' G D 15 " pdb=" O3' G D 15 " ideal model delta sigma weight residual 113.00 107.13 5.87 1.50e+00 4.44e-01 1.53e+01 angle pdb=" C3' IC A 4 " pdb=" O3' IC A 4 " pdb=" P G A 5 " ideal model delta sigma weight residual 120.20 115.31 4.89 1.50e+00 4.44e-01 1.06e+01 angle pdb=" O4'B C C 3 " pdb=" C4'B C C 3 " pdb=" C3'B C C 3 " ideal model delta sigma weight residual 104.00 100.86 3.14 1.00e+00 1.00e+00 9.85e+00 angle pdb=" O3'A IG C 7 " pdb=" P A G C 8 " pdb=" OP1A G C 8 " ideal model delta sigma weight residual 108.00 98.71 9.29 3.00e+00 1.11e-01 9.60e+00 angle pdb=" OP2 G D 15 " pdb=" P G D 15 " pdb=" O5' G D 15 " ideal model delta sigma weight residual 108.00 98.73 9.27 3.00e+00 1.11e-01 9.55e+00 ... (remaining 3346 not shown) Histogram of dihedral angle deviations from ideal: 0.00 - 14.75: 608 14.75 - 29.50: 37 29.50 - 44.24: 33 44.24 - 58.99: 14 58.99 - 73.74: 9 Dihedral angle restraints: 701 sinusoidal: 701 harmonic: 0 Sorted by residual: dihedral pdb=" C3' IG B 17 " pdb=" C2' IG B 17 " pdb=" O2' IG B 17 " pdb="HO2' IG B 17 " ideal model delta sinusoidal sigma weight residual -61.35 -135.09 73.74 1 2.00e+01 2.50e-03 1.73e+01 dihedral pdb=" C3'B IG C 7 " pdb=" C2'B IG C 7 " pdb=" O2'B IG C 7 " pdb="HO2'B IG C 7 " ideal model delta sinusoidal sigma weight residual -61.35 -133.22 71.87 1 2.00e+01 2.50e-03 1.65e+01 dihedral pdb=" C3'A IC C 4 " pdb=" C2'A IC C 4 " pdb=" O2'A IC C 4 " pdb="HO2'A IC C 4 " ideal model delta sinusoidal sigma weight residual -61.44 -128.22 66.77 1 2.00e+01 2.50e-03 1.45e+01 ... (remaining 698 not shown) Histogram of chiral volume deviations from ideal: 0.001 - 0.069: 198 0.069 - 0.137: 44 0.137 - 0.205: 7 0.205 - 0.273: 3 0.273 - 0.342: 2 Chirality restraints: 254 Sorted by residual: chirality pdb=" P G A 2 " pdb=" OP1 G A 2 " pdb=" OP2 G A 2 " pdb=" O5' G A 2 " both_signs ideal model delta sigma weight residual True 2.41 -2.07 0.34 2.00e-01 2.50e+01 2.92e+00 chirality pdb=" P B C D 19 " pdb=" OP1B C D 19 " pdb=" OP2B C D 19 " pdb=" O5'B C D 19 " both_signs ideal model delta sigma weight residual True 2.41 -2.70 -0.29 2.00e-01 2.50e+01 2.03e+00 chirality pdb=" P B G C 5 " pdb=" OP1B G C 5 " pdb=" OP2B G C 5 " pdb=" O5'B G C 5 " both_signs ideal model delta sigma weight residual True 2.41 -2.18 0.23 2.00e-01 2.50e+01 1.35e+00 ... (remaining 251 not shown) Planarity restraints: 107 Sorted by residual: delta sigma weight rms_deltas residual plane pdb=" C1' G A 2 " -0.062 2.00e-02 2.50e+03 3.48e-02 4.24e+01 pdb=" N9 G A 2 " -0.001 2.00e-02 2.50e+03 pdb=" C8 G A 2 " 0.028 2.00e-02 2.50e+03 pdb=" N7 G A 2 " -0.022 2.00e-02 2.50e+03 pdb=" C5 G A 2 " -0.001 2.00e-02 2.50e+03 pdb=" C6 G A 2 " -0.032 2.00e-02 2.50e+03 pdb=" O6 G A 2 " -0.038 2.00e-02 2.50e+03 pdb=" N1 G A 2 " 0.010 2.00e-02 2.50e+03 pdb=" C2 G A 2 " 0.027 2.00e-02 2.50e+03 pdb=" N2 G A 2 " 0.033 2.00e-02 2.50e+03 pdb=" N3 G A 2 " -0.027 2.00e-02 2.50e+03 pdb=" C4 G A 2 " -0.016 2.00e-02 2.50e+03 pdb=" H8 G A 2 " 0.076 2.00e-02 2.50e+03 pdb=" H1 G A 2 " 0.025 2.00e-02 2.50e+03 delta sigma weight rms_deltas residual plane pdb=" C1'B G C 8 " 0.038 2.00e-02 2.50e+03 3.30e-02 3.81e+01 pdb=" N9 B G C 8 " 0.023 2.00e-02 2.50e+03 pdb=" C8 B G C 8 " -0.021 2.00e-02 2.50e+03 pdb=" N7 B G C 8 " 0.005 2.00e-02 2.50e+03 pdb=" C5 B G C 8 " 0.005 2.00e-02 2.50e+03 pdb=" C6 B G C 8 " 0.019 2.00e-02 2.50e+03 pdb=" O6 B G C 8 " 0.057 2.00e-02 2.50e+03 pdb=" N1 B G C 8 " -0.016 2.00e-02 2.50e+03 pdb=" C2 B G C 8 " 0.013 2.00e-02 2.50e+03 pdb=" N2 B G C 8 " -0.009 2.00e-02 2.50e+03 pdb=" N3 B G C 8 " -0.013 2.00e-02 2.50e+03 pdb=" C4 B G C 8 " 0.025 2.00e-02 2.50e+03 pdb=" H8 B G C 8 " -0.065 2.00e-02 2.50e+03 pdb=" H1 B G C 8 " -0.061 2.00e-02 2.50e+03 delta sigma weight rms_deltas residual plane pdb=" C1' IC D 14 " 0.040 2.00e-02 2.50e+03 3.21e-02 3.35e+01 pdb=" C2 IC D 14 " -0.023 2.00e-02 2.50e+03 pdb=" C4 IC D 14 " -0.006 2.00e-02 2.50e+03 pdb=" C5 IC D 14 " -0.026 2.00e-02 2.50e+03 pdb=" C6 IC D 14 " 0.001 2.00e-02 2.50e+03 pdb=" N1 IC D 14 " -0.010 2.00e-02 2.50e+03 pdb=" N2 IC D 14 " -0.017 2.00e-02 2.50e+03 pdb=" N3 IC D 14 " 0.047 2.00e-02 2.50e+03 pdb=" O4 IC D 14 " 0.051 2.00e-02 2.50e+03 pdb=" H21 IC D 14 " -0.018 2.00e-02 2.50e+03 pdb=" H22 IC D 14 " -0.011 2.00e-02 2.50e+03 pdb=" H5 IC D 14 " -0.061 2.00e-02 2.50e+03 pdb=" H6 IC D 14 " 0.034 2.00e-02 2.50e+03 ... (remaining 104 not shown) Histogram of nonbonded interaction distances: 1.55 - 2.16: 171 2.16 - 2.77: 2362 2.77 - 3.38: 5294 3.38 - 3.99: 9208 3.99 - 4.60: 11978 Nonbonded interactions: 29013 Sorted by model distance: nonbonded pdb=" OP2A C D 20 " pdb=" O HOH S 58 " model vdw sym.op. 1.547 2.200 x+1/2,y-1/2,z nonbonded pdb=" O HOH S 58 " pdb=" OP2A C D 20 " model vdw sym.op. 1.547 2.200 x-1/2,y+1/2,z nonbonded pdb=" H42B C C 10 " pdb=" O6 G D 11 " model vdw 1.607 1.850 nonbonded pdb=" O HOH S 91 " pdb=" O HOH S 96 " model vdw 1.718 2.200 nonbonded pdb="HO2' C D 13 " pdb=" O2'A C D 20 " model vdw sym.op. 1.722 1.850 x-1/2,y+1/2,z ... (remaining 29008 not shown) NOTE: a complete listing of the restraints can be obtained by requesting output of .geo file. ================== TLS group selections from PDB file header ================== TLS group selections: selection string: (chain 'C' and resid 1 through 10) selects 652 atoms selection string: (chain 'D' and resid 11 through 20) selects 377 atoms selection string: (chain 'A' and resid 1 through 10) selects 326 atoms selection string: (chain 'B' and resid 11 through 20) selects 391 atoms ======================== Summary of geometry restraints ======================= ====================== Modifying start model if requested ===================== n_use = 1850 n_use_u_iso = 705 n_use_u_aniso = 1145 n_grad_site = 0 n_grad_u_iso = 0 n_grad_u_aniso = 0 n_grad_occupancy = 0 n_grad_fp = 0 n_grad_fdp = 0 total number of scatterers = 1850 ================== Extract refinement strategy and selections ================= Refinement flags and selection counts: individual_sites = True (1850 atoms) torsion_angles = False (0 atoms) rigid_body = False (0 atoms in 0 groups) individual_adp = True (iso = 705 aniso = 1145) group_adp = False (0 atoms in 0 groups) tls = False (0 atoms in 0 groups) occupancies = True (892 atoms) group_anomalous = False ******************************************************************************* Automatic adjustment: hydrogens.refine=riding ******************************************************************************* ==================== Process input NCS or/and find new NCS ==================== Using existing and finding new NCS is disabled. Use ncs_search.enabled=true to activate it. Look at refinement.ncs for more NCS related parameters. =================== Write initial parameters into .eff file =================== Writing effective parameters to file: C:\Praca\Uczelnia\Praca naukowa\Seminaria\2019 Seminarium CBB - Reproducible research\repo\ISO2-1_6A_refine_96_refine_001.eff Writing geometry restraints to file: C:\Praca\Uczelnia\Praca naukowa\Seminaria\2019 Seminarium CBB - Reproducible research\repo\ISO2-1_6A_refine_96_refine_001.geo CPU time processing inputs: 5.25 ============================ Non-default parameters =========================== A complete record of all parameters was written to the .eff file above. Below are only the non-defaults. #phil __ON__ refinement { crystal_symmetry { unit_cell = 42.25099945 38.80199814 84.18000031 90 93.47899628 90 space_group = "C 1 2 1" } input { pdb { file_name = "data/ISO2-1_6A_refine_96.pdb" } xray_data { file_name = "data/ISO2-1_6A_refine_96.mtz" labels = "I-obs,SIGI-obs" r_free_flags { file_name = "data/ISO2-1_6A_refine_96.mtz" label = "R-free-flags" test_flag_value = 0 } } monomers { file_name = "C:\\Praca\\Uczelnia\\Praca naukowa\\Seminaria\\2019 Seminarium CBB - Reproducible research\\repo\\data\\IC-IG-QM-paired-lib.cif" } } output { prefix = "ISO2-1_6A_refine_96_refine" serial = 1 } electron_density_maps { map_coefficients { map_type = "2mFo-DFc" mtz_label_amplitudes = "2FOFCWT" mtz_label_phases = "PH2FOFCWT" fill_missing_f_obs = True } map_coefficients { map_type = "2mFo-DFc" mtz_label_amplitudes = "2FOFCWT_no_fill" mtz_label_phases = "PH2FOFCWT_no_fill" } map_coefficients { map_type = "mFo-DFc" mtz_label_amplitudes = "FOFCWT" mtz_label_phases = "PHFOFCWT" } map_coefficients { map_type = "anomalous" mtz_label_amplitudes = "ANOM" mtz_label_phases = "PANOM" } map { map_type = "2mFo-DFc" fill_missing_f_obs = True } map { map_type = "2mFo-DFc" } map { map_type = "mFo-DFc" } } refine { adp { tls = (chain 'C' and resid 1 through 10) tls = (chain 'D' and resid 11 through 20) tls = (chain 'A' and resid 1 through 10) tls = (chain 'B' and resid 11 through 20) } } main { number_of_macro_cycles = 0 random_seed = 7 nproc = Auto } hydrogens { refine = individual *riding Auto } } #phil __OFF__ =============================== refinement start ============================== ================================== Hydrogens ================================== Total: count: 601 occupancy sum: 448.00 (% of total atoms 31.91) Rotatable: count: 65 occupancy sum: 48.00 (% of total atoms 3.42) ----------structure factors based statistics (before refinement)---------- ----------X-ray data---------- |--(resolution: 1.60 - 28.55 A, n_refl.=17941 (all), 5.57 % free)------------| | | | r_work= 0.4178 r_free= 0.7112 coordinate error (max.-lik. estimate): 0.13 A | | | | normalized target function (ml) (work): 3.294322 | | target function (ml) not normalized (work): 55809.113519 | | target function (ml) not normalized (free): 3323.845711 | |-----------------------------------------------------------------------------| |-----------------------------------------------------------------------------| | Bin Resolution Compl. No. Refl. R-factors Targets | |number range work test work test work test| | 1: 28.5592 - 3.0604 0.99 2507 148 0.4098 0.7110 5.2382 5.2373| | 2: 3.0604 - 2.4296 0.99 2423 143 0.4099 0.6927 4.5863 4.6338| | 3: 2.4296 - 2.1226 0.99 2437 144 0.4173 0.7121 3.2801 3.2735| | 4: 2.1226 - 1.9285 0.99 2394 141 0.4483 0.7244 2.9638 3.0351| | 5: 1.9285 - 1.7903 0.99 2393 142 0.4580 0.7317 2.6524 2.6289| | 6: 1.7903 - 1.6848 0.99 2400 141 0.4410 0.7442 2.2914 2.4308| | 7: 1.6848 - 1.6004 0.98 2387 141 0.4496 0.7192 1.9391 1.92| |-----------------------------------------------------------------------------| |-----------------------------------------------------------------------------| |R-free likelihood based estimates for figures of merit, absolute phase error,| |and distribution parameters alpha and beta (Acta Cryst. (1995). A51, 880-887)| | | | Bin Resolution No. Refl. FOM Phase Scale Alpha Beta | | # range work test error factor | | 1: 28.5592 - 3.0604 2507 148 0.22 73.16 0.95 0.08 8580.15| | 2: 3.0604 - 2.4296 2423 143 0.21 74.04 1.55 0.21 2988.28| | 3: 2.4296 - 2.1226 2437 144 0.68 37.54 1.49 0.29 148.30| | 4: 2.1226 - 1.9285 2394 141 0.66 39.13 1.51 0.29 66.93| | 5: 1.9285 - 1.7903 2393 142 0.66 39.59 1.53 0.31 36.86| | 6: 1.7903 - 1.6848 2400 141 0.67 38.88 1.51 0.30 19.80| | 7: 1.6848 - 1.6004 2387 141 0.69 37.06 1.58 0.27 9.03| |alpha: min = 0.08 max = 0.31 mean = 0.25| |beta: min = 9.03 max = 8580.15 mean = 1737.20| |figures of merit: min = 0.00 max = 1.00 mean = 0.54| |phase err.(work): min = 0.00 max = 89.99 mean = 48.67| |phase err.(test): min = 0.00 max = 89.95 mean = 48.96| |-----------------------------------------------------------------------------| ----------Initial model statistics (before refinement)---------- min max mean <Bi,j> iso aniso Overall: 17.99 137.63 42.48 6.20 1249 0 RNA/DNA: 17.99 137.63 41.37 6.34 910 0 Water: 26.74 100.33 44.57 N/A 104 0 Other: 20.62 106.68 45.84 N/A 235 0 Chain A: 25.33 137.63 63.05 N/A 214 0 Chain C: 17.99 49.13 26.56 N/A 428 0 Chain B: 27.87 129.12 64.47 N/A 257 0 Chain S: 26.74 100.33 44.57 N/A 104 0 Chain D: 22.13 59.49 28.41 N/A 246 0 Histogram: Values Number of atoms 17.99 - 29.95 535 29.95 - 41.92 249 41.92 - 53.88 116 53.88 - 65.84 111 65.84 - 77.81 141 77.81 - 89.77 62 89.77 - 101.74 23 101.74 - 113.70 7 113.70 - 125.67 2 125.67 - 137.63 3 |-Occupancies statistics------------------------------------------------------| | occupancies: max = 1.00 min = 0.24 number of occupancies < 0.1 = 0 | |-----------------------------------------------------------------------------| ----------X-ray data---------- |--(resolution: 1.60 - 28.55 A, n_refl.=17941 (all), 5.57 % free)------------| | | | r_work= 0.4178 r_free= 0.7112 coordinate error (max.-lik. estimate): 0.13 A | | | | normalized target function (ml) (work): 3.294322 | | target function (ml) not normalized (work): 55809.113519 | | target function (ml) not normalized (free): 3323.845711 | |-----------------------------------------------------------------------------| |-----------------------------------------------------------------------------| | Bin Resolution Compl. No. Refl. R-factors Targets | |number range work test work test work test| | 1: 28.5592 - 3.0604 0.99 2507 148 0.4098 0.7110 5.2382 5.2373| | 2: 3.0604 - 2.4296 0.99 2423 143 0.4099 0.6927 4.5863 4.6338| | 3: 2.4296 - 2.1226 0.99 2437 144 0.4173 0.7121 3.2801 3.2735| | 4: 2.1226 - 1.9285 0.99 2394 141 0.4483 0.7244 2.9638 3.0351| | 5: 1.9285 - 1.7903 0.99 2393 142 0.4580 0.7317 2.6524 2.6289| | 6: 1.7903 - 1.6848 0.99 2400 141 0.4410 0.7442 2.2914 2.4308| | 7: 1.6848 - 1.6004 0.98 2387 141 0.4496 0.7192 1.9391 1.92| |-----------------------------------------------------------------------------| |-----------------------------------------------------------------------------| |R-free likelihood based estimates for figures of merit, absolute phase error,| |and distribution parameters alpha and beta (Acta Cryst. (1995). A51, 880-887)| | | | Bin Resolution No. Refl. FOM Phase Scale Alpha Beta | | # range work test error factor | | 1: 28.5592 - 3.0604 2507 148 0.22 73.16 0.95 0.08 8580.15| | 2: 3.0604 - 2.4296 2423 143 0.21 74.04 1.55 0.21 2988.28| | 3: 2.4296 - 2.1226 2437 144 0.68 37.54 1.49 0.29 148.30| | 4: 2.1226 - 1.9285 2394 141 0.66 39.13 1.51 0.29 66.93| | 5: 1.9285 - 1.7903 2393 142 0.66 39.59 1.53 0.31 36.86| | 6: 1.7903 - 1.6848 2400 141 0.67 38.88 1.51 0.30 19.80| | 7: 1.6848 - 1.6004 2387 141 0.69 37.06 1.58 0.27 9.03| |alpha: min = 0.08 max = 0.31 mean = 0.25| |beta: min = 9.03 max = 8580.15 mean = 1737.20| |figures of merit: min = 0.00 max = 1.00 mean = 0.54| |phase err.(work): min = 0.00 max = 89.99 mean = 48.67| |phase err.(test): min = 0.00 max = 89.95 mean = 48.96| |-----------------------------------------------------------------------------| ==================================== Final ==================================== ============================= updating all scales ============================= start: r(all,work,free)=0.4505 0.4178 0.7112 n_refl.: 17941 re-set all scales: r(all,work,free)=0.4505 0.4178 0.7112 n_refl.: 17941 remove outliers: r(all,work,free)=0.4611 0.4280 0.7112 n_refl.: 17894 overall B=0.00 to atoms: r(all,work,free)=0.4611 0.4280 0.7112 n_refl.: 17894 bulk-solvent and scaling: r(all,work,free)=0.2014 0.1989 0.2470 n_refl.: 17894 remove outliers: r(all,work,free)=0.2014 0.1989 0.2470 n_refl.: 17894 ======================== Statistics in resolution bins ======================== Total model structure factor: F_model = k_total * (F_calc + k_mask * F_mask) k_total = k_isotropic * k_anisotropic Resolution Compl Nwork Nfree R_work <Fobs> <Fmodel> kiso kani kmask 28.555-9.352 100.00 94 6 0.1561 144.260 137.119 0.803 0.995 0.328 9.349-7.384 100.00 94 6 0.1095 96.244 94.867 0.841 0.991 0.297 7.375-5.820 100.00 192 11 0.1431 75.960 74.160 0.902 0.983 0.276 5.799-4.591 97.34 381 22 0.1459 91.651 89.311 1.017 0.977 0.270 4.589-3.621 98.88 751 45 0.1623 89.915 86.882 1.161 0.963 0.220 3.619-2.856 98.11 1517 91 0.2013 56.649 54.946 1.170 0.945 0.040 2.855-2.253 99.31 3117 184 0.2196 23.593 22.416 1.046 0.929 0.010 2.252-1.777 99.06 6280 370 0.2339 10.554 9.781 1.065 0.917 0.010 1.777-1.600 97.87 4470 263 0.3080 5.190 4.328 0.995 0.915 0.010 Approximation of k_total with k_overall*exp(-b_overall*s**2/4) k_overall=1.1000 b_overall=3.5320 ================= overall refinement statistics: step by step ================= ****************** REFINEMENT STATISTICS STEP BY STEP ****************** leading digit, like 1_, means number of macro-cycle 0 : statistics at the very beginning when nothing is done yet 1_bss: bulk solvent correction and/or (anisotropic) scaling 1_xyz: refinement of coordinates 1_adp: refinement of ADPs (Atomic Displacement Parameters) 1_occ: refinement of occupancies ------------------------------------------------------------------------ stage r-work r-free bonds angles b_min b_max b_ave n_water shift 0 : 0.4178 0.7112 0.009 1.79 18.0 137.6 42.5 104 0.000 end: 0.1989 0.2470 0.009 1.79 18.0 137.6 42.5 104 0.000 ------------------------------------------------------------------------ CPU time actual refinement: 18.38 ============================== Exporting results ============================== Writing refined structure to PDB file: C:\Praca\Uczelnia\Praca naukowa\Seminaria\2019 Seminarium CBB - Reproducible research\repo\ISO2-1_6A_refine_96_refine_001.pdb n_use = 1850 n_use_u_iso = 1850 n_use_u_aniso = 0 n_grad_site = 0 n_grad_u_iso = 0 n_grad_u_aniso = 0 n_grad_occupancy = 0 n_grad_fp = 0 n_grad_fdp = 0 total number of scatterers = 1850 Writing default parameters for subsequent refinement: C:\Praca\Uczelnia\Praca naukowa\Seminaria\2019 Seminarium CBB - Reproducible research\repo\ISO2-1_6A_refine_96_refine_002.def =============================== Detailed timings ============================== Micro-tasks: mask = 0.00 f_calc = 0.00 alpha_beta = 0.02 target = 0.00 gradients_wrt_atomic_parameters = 0.00 fmodel = 0.00 r_factors = 0.02 phase_errors = 0.23 foms = 0.00 TOTAL for micro-tasks = 0.27 NUMBER OF MASK CALCS= 0 Total CPU time: 35.48 seconds from_scatterers_fft: 7 calls, 0.22 s =========================== phenix.refine: finished =========================== # Date 2019-06-07 Time 09:48:36 Środkowoeuropejski czas letni +0200 (1559893716.47 s) wall clock time: 36.97 s Start R-work = 0.4178, R-free = 0.7112 Final R-work = 0.1989, R-free = 0.2470 ###Markdown Rysunki i wizualizacje==================Notatniki pozwalają w łatwy sposób pokazać jak uzyskano daną wizualizację. Jest to sposób na udowodnienie, że wykresy są oparte na danych eksperymentalnych i nie były w żaden sposób ręcznie modyfikowane. ###Code import nglview as nv w = nv.show_pdbid("3pqr") w ###Output _____no_output_____ ###Markdown Odpytywanie baz danych======================W notatniku można pokazać jak działając na danych bezpośrednio z bazy (np. CSD albo PDB) uzyskano wyniki przedstawiane w pracy naukowej. ###Code %matplotlib inline from ccdc import io from ccdc.diagram import DiagramGenerator from ccdc.search import TextNumericSearch, SubstructureSearch, ConnserSubstructure csd_reader = io.EntryReader('CSD') # można podać własną (pod)bazę CSD first_entry = csd_reader[0] # pierwszy wpis w bazie CSD print(first_entry.identifier) ###Output AABHTZ ###Markdown Odpytywanie baz danych (diagramy)==============================Znalezioną strukturę można zwizualizować... ###Code diagram_generator = DiagramGenerator() img = diagram_generator.image(first_entry) img ###Output _____no_output_____ ###Markdown Odpytywanie baz danych (przeszukiwanie tekstowe)===========================================CSD można oczywiście przeszukiwać tekstowo (również po synonimach)... ###Code text_numeric_search = TextNumericSearch() text_numeric_search.add_compound_name('aspirin') identifiers = [h.identifier for h in text_numeric_search.search()] text_numeric_search.clear() text_numeric_search.add_synonym('aspirin') identifiers.extend([h.identifier for h in text_numeric_search.search()]) print(identifiers) ###Output ['ACMEBZ', 'ACSALA', 'ACSALA01', 'ACSALA02', 'ACSALA03', 'ACSALA04', 'ACSALA05', 'ACSALA06', 'ACSALA07', 'ACSALA08', 'ACSALA09', 'ACSALA10', 'ACSALA11', 'ACSALA12', 'ACSALA13', 'ACSALA14', 'ACSALA15', 'ACSALA16', 'ACSALA17', 'ACSALA18', 'ACSALA19', 'ACSALA20', 'ACSALA21', 'ACSALA22', 'ACSALA23', 'ACSALA24', 'ACSALA25', 'ARIFOX', 'ASPRIN', 'BEHWOA', 'CUASPR', 'CUASPR01', 'CUASPR02', 'DIFHOP', 'DIFQAK', 'DISXOU', 'ECONOA', 'EYOMEL', 'EYOMIP', 'EYOMOV', 'EYOMUB', 'EYONAI', 'HUNJEH', 'HUPPOX', 'HUPPOX01', 'IBOBUY', 'IBOBUY01', 'IBOCEJ', 'IBOCEJ01', 'IBOCOT', 'IBOCOT01', 'JIRNEE', 'KEWNOQ', 'KEWNOQ01', 'KICVUP', 'LAJVUO01', 'NINFUN', 'NUKXOH', 'NUWTIJ01', 'NUWTOP01', 'NUWTOP02', 'OKEZUZ', 'PETZAQ', 'PETZIY', 'PETZOE', 'PETZUK', 'PIKYOA', 'PIKYUG', 'PIKZAN', 'SIBYUA', 'SIBYUA01', 'TAZRAO', 'TORQUM02', 'UTUCIW', 'VUGMIT', 'XOJMOZ', 'YIRPEW', 'YOSMOI', 'DIPJAQ', 'TAZRAO01', 'TAZRAO02', 'ACMEBZ', 'ACSALA', 'ACSALA01', 'ACSALA02', 'ACSALA03', 'ACSALA04', 'ACSALA05', 'ACSALA06', 'ACSALA07', 'ACSALA08', 'ACSALA09', 'ACSALA10', 'ACSALA11', 'ACSALA12', 'ACSALA13', 'ACSALA14', 'ACSALA15', 'ACSALA16', 'ACSALA17', 'ACSALA18', 'ACSALA19', 'ACSALA20', 'ACSALA21', 'ACSALA22', 'ACSALA23', 'ACSALA24', 'ACSALA25', 'ARIFOX', 'BEHWOA', 'DIFHOP', 'DIFQAK', 'DISXOU', 'EYOMEL', 'EYOMIP', 'EYOMOV', 'EYOMUB', 'EYONAI', 'HUNJEH', 'HUPPOX', 'HUPPOX01', 'IBOBUY', 'IBOBUY01', 'IBOCEJ', 'IBOCEJ01', 'IBOCOT', 'IBOCOT01', 'JIRNEE', 'KEWNOQ', 'KEWNOQ01', 'LAJVUO01', 'NINFUN', 'NUKXOH', 'NUWTIJ01', 'NUWTOP01', 'NUWTOP02', 'PIKYOA', 'PIKYUG', 'PIKZAN', 'SIBYUA', 'SIBYUA01', 'TORQUM02', 'UTUCIW', 'VUGMIT', 'XOJMOZ', 'YIRPEW', 'YOSMOI', 'DIPJAQ', 'TAZRAO01', 'TAZRAO02'] ###Markdown Odpytywanie baz danych (przeszukiwanie tekstowe)================================================ ###Code aspiryna = csd_reader.entry('ACMEBZ') img = diagram_generator.image(aspiryna) img ###Output _____no_output_____ ###Markdown Odpytywanie baz danych (zapytania graficzne)======================================Można również zadawać zapytania dotyczące geometrii cząsteczek. W tym celu najłatwiej "wyklikać" zapytanie w programie CONQUEST. Przykład zapytania poniżej: Odpytywanie baz danych (zapytania graficzne)====================================== ###Code con_substructure = ConnserSubstructure("data/adenine.con") substructure_search = SubstructureSearch() substructure_search.add_substructure(con_substructure) hits = substructure_search.search() print("Znaleziono {0} struktur pasujących do zapytania".format(hits.__len__())) ###Output Znaleziono 437 struktur pasujących do zapytania ###Markdown Odpytywanie baz danych (analiza wyników)====================================== ###Code import pandas as pd import numpy as np df = pd.DataFrame([h.measurements for h in hits], index=list([h.identifier + "_" + str(idx) for idx, h in enumerate(hits)])) df.head() ###Output _____no_output_____ ###Markdown Odpytywanie baz danych (analiza wyników)====================================== ###Code bonds = ['N1_C2', 'C2_N3', 'N3_C4', 'C4_C5', 'C5_C6', 'N1_C6', 'C5_N7', 'N7_C8', 'C8_N9', 'N9_C', 'C6_NH'] df.loc[:, bonds].hist(bins=20, figsize=(16, 8), grid=False, layout=(3,4)); ###Output _____no_output_____ ###Markdown Analiza danych=============Jako przykład spróbujemy przeanalizować jakość struktur w PDB z podziałem na rozdzielczość, lata i czasopisma. *Dane dzięki uprzejmości Zbyszka Dautera* ###Code # wczytujemy dane z plików pdb_df = pd.read_fwf("data/percentiles_all.diffr", names=["PDB code", "Resolution", "Date", "Rfree_percentile_abs", "Clash_percentile_abs", "Ramachandran_percentile_abs", "Rota_percentile_abs", "RSRZ_percentile_abs", "Rfree_percentile_rel", "Clash_percentile_rel", "Ramachandran_percentile_rel", "Rota_percentile_rel", "RSRZ_percentile_rel", "Rfree_raw", "Clash_raw", "Ramachandran_raw", "Rota_raw", "RSRZ_raw"], na_values="NotAv", infer_nrows=10000) journal_df = pd.read_fwf("data/journ.all_xray", names=["PDB code", "Journal", "Volume", "Page", "Publication year"], infer_nrows=10000) pdb_df.head() ###Output _____no_output_____ ###Markdown Analiza danych============= ###Code # łączymy zbiory danych merged_df = pd.merge(pdb_df, journal_df, on="PDB code") # zostawiamy tylko wybrane kolumny i wiersze bez wartości pustych merged_df = merged_df.loc[:, ["PDB code", "Resolution", "Date", "RSRZ_raw", "Journal", "Publication year"]] merged_df = merged_df.dropna() # czyścimy dane i nazwy kolumn merged_df.loc[:, "Date"] = merged_df.loc[:, "Date"].apply(str).str.slice(stop=4).apply(int) merged_df.loc[:, "Publication year"] = merged_df.loc[:, "Publication year"].astype(int) merged_df.loc[:, "Resolution"] = merged_df.loc[:, "Resolution"].round(decimals=1) merged_df = merged_df.rename(index=str, columns={"RSRZ_raw": "RSR Z-score", "Date": "Year"}) merged_df.head() ###Output _____no_output_____ ###Markdown Analiza danych============= ###Code from plotnine import * import plotnine.options plotnine.options.figure_size = (10, 5) resolution_df = merged_df.groupby(['Resolution'], as_index=False).mean() (ggplot(resolution_df, aes(x="Resolution", y="RSR Z-score")) + geom_point() + theme_bw()).draw(); ###Output _____no_output_____ ###Markdown Analiza danych============= ###Code year_df = merged_df.groupby(['Year'], as_index=False).mean() (ggplot(year_df, aes(x="Year", y="RSR Z-score")) + geom_point() + theme_bw()).draw(); ###Output _____no_output_____ ###Markdown Analiza danych============= ###Code venue_count_df = merged_df.groupby(['Journal'], as_index=False).size() popular_venues = venue_count_df[venue_count_df >= 20].index venue_df = merged_df.loc[merged_df.Journal.isin(popular_venues), :].groupby(['Journal'], as_index=False).mean() top_ten = venue_df.sort_values(by="RSR Z-score", ascending=True).head(20) top_ten['Journal'] = pd.Categorical(top_ten['Journal'], categories=top_ten.Journal) (ggplot(top_ten, aes(x="Journal", y="RSR Z-score")) + geom_bar(stat="identity") + theme_bw() + theme(axis_text_x = element_text(angle = 90))).draw(); ###Output _____no_output_____ ###Markdown Analiza danych============= ###Code bottom_ten = venue_df.sort_values(by="RSR Z-score", ascending=False).head(20) bottom_ten['Journal'] = pd.Categorical(bottom_ten['Journal'], categories=bottom_ten.Journal) (ggplot(bottom_ten, aes(x="Journal", y="RSR Z-score")) + geom_bar(stat="identity") + theme_bw() + theme(axis_text_x = element_text(angle = 90))).draw(); ###Output _____no_output_____ ###Markdown Analiza danych============= ###Code year_venue_df = merged_df.loc[merged_df.Journal.isin(['FEBS J.', 'SCIENCE', 'NATURE', 'ACTA CRYSTALLOGR.,SECT.D']), :]\ .groupby(['Year', 'Journal'], as_index=False).median() (ggplot(year_venue_df, aes(x="Year", y="RSR Z-score", color="Journal")) + geom_point() + facet_grid('.~Journal') + geom_smooth(method="glm") + theme_bw()).draw(); ###Output _____no_output_____
deepchembed/notebook_scripts/auxiliary_distribution_exploration.ipynb
###Markdown Visualize the hardening function ###Code fig, axes = plt.subplots(1,3,figsize=(21,7)) x = np.arange(0,1,0.01) strength = [1.0, 1.5, 2.0] for i in range(len(strength)): for j in DCE.HARDENING_FUNCS.keys(): y = DCE.HARDENING_FUNCS[j](x) ** strength[i] axes[i].plot(x, y, label='order ' + str(j)) axes[i].set_title('Strength: ' + str(strength[i])) axes[i].legend() axes[i].set_xlabel('x') axes[i].set_ylabel('y') ###Output _____no_output_____ ###Markdown Creat Random data ###Code a_0 = np.random.random(size=100) a_1 = np.random.random(size=100) a_2 = np.random.random(size=100) a_01 = a_0 + a_1 q_2 = np.array([a_0/a_01, a_1/a_01]).T a_012 = a_01 + a_2 q_3 = np.array([a_0/a_012, a_1/a_012, a_2/a_012]).T ###Output _____no_output_____ ###Markdown Cluster = 2 ###Code strength = [1.0, 1.5, 2.0] fig, axes = plt.subplots(2,3,figsize=(21,14)) for i in range(len(strength)): for order, h_func in DCE.HARDENING_FUNCS.items(): p = DCE.hardening(q_2, h_func, strength[i]) for j in range(2): axes[j][i].plot(q_2.T[j],p.T[j], 'o', label='order ' + str(order)) axes[j][i].axhline(0.5, c='k') axes[j][i].plot([0,1],[0,1],c='k') axes[j][i].set_title('Strength: ' + str(strength[i])) axes[j][i].legend() axes[j][i].set_xlabel('q') axes[j][i].set_ylabel('p') ###Output _____no_output_____ ###Markdown Cluster = 3 ###Code strength = [1.0, 1.5, 2.0] fig, axes = plt.subplots(3,3,figsize=(21,21)) for i in range(len(strength)): for order, h_func in DCE.HARDENING_FUNCS.items(): p = DCE.hardening(q_3, h_func, strength[i]) for j in range(3): axes[j][i].plot(q_3.T[j],p.T[j], 'o', label='order ' + str(order)) axes[j][i].axhline(0.333333, c='k') axes[j][i].axhline(0.666667, c='k') axes[j][i].plot([0,1],[0,1],c='k') axes[j][i].set_title('Strength: ' + str(strength[i])) axes[j][i].legend() axes[j][i].set_xlabel('q') axes[j][i].set_ylabel('p') ###Output _____no_output_____ ###Markdown Comparisons in cluster = 3 ###Code fig, axes = plt.subplots(1,3,figsize=(21,7)) o_s = [[1, 1.5], [1, 2.0], [3, 1.0], [3, 1.5]] for i in range(len(o_s)): order = o_s[i][0] strength = o_s[i][1] h_func = DCE.HARDENING_FUNCS[order] p = DCE.hardening(q_3, h_func, strength) for j in range(3): axes[j].plot(q_3.T[j],p.T[j], 'o', label='Order ' + str(order) + '; Strength: ' + str(strength)) axes[j].axhline(0.333333, c='k') axes[j].axhline(0.666667, c='k') axes[j].plot([0,1],[0,1],c='k') axes[j].legend(fontsize=15) axes[j].set_xlabel('q', fontsize=15) axes[j].set_ylabel('p', fontsize=15) a .adead fig, axes = plt.subplots(1,3,figsize=(21,7)) o_s = [[1, 1.5], [1, 2.0], [3, 1.0], [3, 1.5]] for i in range(len(o_s)): order = o_s[i][0] strength = o_s[i][1] h_func = DCE.HARDENING_FUNCS[order] p = DCE.hardening(q_3, h_func, strength) for j in range(3): axes[j].plot(q_3.T[j],p.T[j], 'o', label='Order ' + str(order) + '; Strength: ' + str(strength)) axes[j].axhline(0.333333, c='k') axes[j].axhline(0.666667, c='k') axes[j].plot([0,1],[0,1],c='k') axes[j].legend(fontsize=15) axes[j].set_xlabel('q', fontsize=15) axes[j].set_ylabel('p', fontsize=15) ###Output _____no_output_____
get_Git_repo_example.ipynb
###Markdown ###Code #On IBM Q-exprince run this in the background #using the white tringle in the black circle to the left of the plus above #then take a look at the file 'get_Git_repo-example-background-results.ipynb' %matplotlib inline # Importing standard Qiskit libraries and configuring account from qiskit import QuantumCircuit, execute, Aer, IBMQ from qiskit.compiler import transpile, assemble from qiskit.tools.jupyter import * from qiskit.visualization import * # Loading your IBM Q account(s) provider = IBMQ.load_account() #if you need to pull from a private repo ask me for the details to do so RepoName='Entanglement-Revisited' #https://github.com/VGGatGitHub/Entanglement-Revisited RepoURL='https://github.com/VGGatGitHub/'+RepoName+'.git' #you can set up with if file and so on but let's do it quickly by hand! !git clone https://github.com/VGGatGitHub/Entanglement-Revisited.git #!git pull https://github.com/VGGatGitHub/Entanglement-Revisited.git !ls -al !ls -al ./Entanglement-Revisited #look at where is your browser point now ... #https://quantum-computing.ibm.com/jupyter/user/get_Git_repo-example.ipynb #in a different window change the nb name for various nbs #to see an html file #https://quantum-computing.ibm.com/jupyter/user/Entanglement-Revisited/Entanglement_Revisited-VGG.html #to open a notebook see below but don't try to run it - the Qisket version is outdated ! #https://quantum-computing.ibm.com/jupyter/user/Entanglement-Revisited/Entanglement_Revisited-VGG.ipynb #if you have fixed the dependencies and got it running let me know! #uncoment %load line in the next cell at the first run to get the "hello world" loaded from first.py %load ./Entanglement-Revisited/first.py # %load ./Entanglement-Revisited/first.py print("hello world") str="*" for i in range(5): print(str) str+="*" #rmove the repo! !rm -f -r ./Entanglement-Revisited !ls -al ./Entanglement-Revisited !ls -al ###Output _____no_output_____
Logistic_regression_self.ipynb
###Markdown Inference : From these rankings we can conclude that 0th column is not at all important so we can drop that ###Code # Dropping the casenum column because its not necessary claimants.drop(['CASENUM'],inplace = True,axis = 1) claimants.shape # Dropping the NA values claimants = claimants.dropna() X = claimants.iloc[:,[1,2,3,4,5]] Y = claimants.iloc[:,0] classifier = LogisticRegression() classifier.fit(X,Y) y_pred = classifier.predict_proba(X) y_pred y_pred = classifier.predict(X) y_pred from sklearn.metrics import confusion_matrix confusion_matrix = confusion_matrix(y_pred,Y) confusion_matrix from sklearn.metrics import classification_report print(classification_report(Y,y_pred)) from sklearn.metrics import roc_curve from sklearn.metrics import roc_auc_score fpr, tpr, thresholds = roc_curve(Y, classifier.predict_proba (X)[:,1]) auc = roc_auc_score(Y, y_pred) import matplotlib.pyplot as plt plt.plot(fpr, tpr, color='red', label='logit model ( area = %0.2f)'%auc) plt.plot([0,1],[0,1],'k--') plt.xlabel('False Positive Rate or [1 - True Negative Rate]') plt.ylabel('True Positive Rate') auc ###Output _____no_output_____
include/edit_plugin_expectation_suite.ipynb
###Markdown Edit Your Expectation SuiteUse this notebook to recreate and modify your expectation suite:**Expectation Suite Name**: `plugin_expectation_suite` ###Code import datetime import pandas as pd import great_expectations as ge import great_expectations.jupyter_ux from great_expectations.core.batch import RuntimeBatchRequest from great_expectations.checkpoint import SimpleCheckpoint from great_expectations.exceptions import DataContextError from expectations.expect_column_to_exist_again import ExpectColumnToExistAgain context = ge.data_context.DataContext() # Note that if you modify this batch request, you may save the new version as a .json file # to pass in later via the --batch-request option df = pd.DataFrame({ "fruits": ["apple","banana","cherry", "date"], "animals": ["zebra", "yak", "xylo", "walrus"], "places": ["house", "school", "park", "store"] }) batch_request = { "datasource_name": "my_datasource", "data_connector_name": "default_runtime_data_connector_name", "data_asset_name": "my_alphabetical_dataframe", "runtime_parameters": { "batch_data": df }, "batch_identifiers": {"default_identifier_name": "default_identifier"}, } # Feel free to change the name of your suite here. Renaming this will not remove the other one. expectation_suite_name = "plugin_expectation_suite_2" try: suite = context.get_expectation_suite(expectation_suite_name=expectation_suite_name) print( f'Loaded ExpectationSuite "{suite.expectation_suite_name}" containing {len(suite.expectations)} expectations.' ) except DataContextError: suite = context.create_expectation_suite( expectation_suite_name=expectation_suite_name ) print(f'Created ExpectationSuite "{suite.expectation_suite_name}".') validator = context.get_validator( batch_request=RuntimeBatchRequest(**batch_request), expectation_suite_name=expectation_suite_name, ) column_names = [f'"{column_name}"' for column_name in validator.columns()] print(f"Columns: {', '.join(column_names)}.") validator.head(n_rows=5, fetch_all=False) validator.expect_column_values_to_be_alphabetical(column="fruits") validator.expect_column_values_to_be_alphabetical(column="animals", reverse=True) ###Output /usr/local/anaconda3/envs/ge_airflow/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above. and should_run_async(code) ###Markdown Create & Edit ExpectationsAdd expectations by calling specific expectation methods on the `validator` object. They all begin with `.expect_` which makes autocompleting easy using tab.Because you selected interactive mode, you are now creating or editing an Expectation Suite with validator feedback from the sample batch of data that you specified (see `batch_request`).Note that if you select manual mode you may still create or edit an Expectation Suite directly, without feedback from the `validator`. See our documentation for more info and examples: [How to create a new Expectation Suite without a sample batch](https://docs.greatexpectations.io/en/latest/guides/how_to_guides/creating_and_editing_expectations/how_to_create_a_new_expectation_suite_without_a_sample_batch.html).You can see all the available expectations in the **[expectation gallery](https://greatexpectations.io/expectations)**. Table Expectation(s) No table level expectations are in this suite. Feel free to add some here.They all begin with `validator.expect_table_...`. Column Expectation(s) No column level expectations are in this suite. Feel free to add some here.They all begin with`validator.expect_column_...`. Save & Review Your ExpectationsLet's save the expectation suite as a JSON file in the `great_expectations/expectations` directory of your project.Let's now rebuild your Data Docs, which helps you communicate about your data with both machines and humans. ###Code print(validator.get_expectation_suite(discard_failed_expectations=True)) validator.save_expectation_suite(discard_failed_expectations=True) checkpoint_config = { "class_name": "SimpleCheckpoint" } checkpoint = SimpleCheckpoint( f"_tmp_checkpoint_{expectation_suite_name}", context, **checkpoint_config ) checkpoint_result = checkpoint.run(validations=[ { "batch_request": batch_request, "expectation_suite_name": expectation_suite_name } ]) context.build_data_docs() validation_result_identifier = checkpoint_result.list_validation_result_identifiers()[0] context.open_data_docs(resource_identifier=validation_result_identifier) ###Output 2022-02-23T12:20:10-0500 - INFO - 2 expectation(s) included in expectation_suite. { "meta": { "citations": [ { "batch_request": { "data_asset_name": "yellow_tripdata_sample_2019-01.csv", "data_connector_name": "default_inferred_data_connector_name", "datasource_name": "my_datasource", "limit": 1000 }, "citation_date": "2022-02-23T17:08:53.686634Z", "comment": "Created suite added via CLI" } ], "great_expectations_version": "0.14.6" }, "data_asset_type": null, "ge_cloud_id": null, "expectations": [ { "kwargs": { "column": "animals", "reverse": true }, "expectation_type": "expect_column_values_to_be_alphabetical", "meta": {} }, { "kwargs": { "column": "fruits" }, "expectation_type": "expect_column_values_to_be_alphabetical", "meta": {} } ], "expectation_suite_name": "plugin_expectation_suite" } 2022-02-23T12:20:10-0500 - INFO - 2 expectation(s) included in expectation_suite. 2022-02-23T12:20:10-0500 - INFO - 2 expectation(s) included in expectation_suite.
examples/5_token_classification_task.ipynb
###Markdown Token Classification Task (POS Tagging) In this tutorial we will see how we can use PyTorchWrapper to tackle the task of pos tagging in the Penn Treebankdataset. Additional librariesFirst of all we need to install the `nltk` library in order to download the data. ###Code ! pip install nltk ###Output _____no_output_____ ###Markdown Downloading DataNext we download the data. ###Code import nltk nltk.download('treebank') nltk.download('universal_tagset') ###Output _____no_output_____ ###Markdown Import Statements ###Code import numpy as np import torch import random import math import os from tqdm.auto import tqdm from nltk.corpus import treebank from torch import nn from torch.utils.data.dataset import Dataset from torch.utils.data.dataloader import DataLoader from torch.utils.data.sampler import SubsetRandomSampler from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence from pytorch_wrapper import modules, System from pytorch_wrapper import evaluators as evaluators from pytorch_wrapper.samplers import SubsetSequentialSampler from pytorch_wrapper.loss_wrappers import TokenLabelingGenericPointWiseLossWrapper from pytorch_wrapper.training_callbacks import EarlyStoppingCriterionCallback ###Output _____no_output_____ ###Markdown Dataset Definition ###Code class TreeBankDataset(Dataset): def __init__(self, sentences, w2i, l2i): self.ids = [] self.texts = [] self.texts_len = [] self.targets = [] for i, ex in enumerate(tqdm(sentences)): self.ids.append(i) tokens, labels = list(zip(*ex)) self.texts.append(TreeBankDataset.convert_tokens_to_indices(tokens, w2i)) self.texts_len.append(len(tokens)) self.targets.append(TreeBankDataset.convert_tokens_to_indices(labels, l2i)) self._shuffle_examples() def __getitem__(self, index): return ( self.ids[index], ( self.texts[index], self.texts_len[index] ), self.targets[index] ) def __len__(self): return len(self.ids) def _shuffle_examples(self, seed=12345): """ Shuffles the examples with the given seed. :param seed: The seed used for shuffling. """ random.seed(seed) l = list(zip(self.ids, self.texts, self.texts_len, self.targets)) random.shuffle(l) self.ids, self.texts, self.texts_len, self.targets = zip(*l) @staticmethod def collate_fn(batch): """ Function that combines a list of examples in order to a batch. Called internally by dataloaders. """ batch_zipped = list(zip(*batch)) input_zipped = list(zip(*batch_zipped[1])) ids = batch_zipped[0] texts = torch.tensor(TreeBankDataset.pad_to_max(input_zipped[0]), dtype=torch.long) texts_len = torch.tensor(input_zipped[1], dtype=torch.int) targets = torch.tensor(TreeBankDataset.pad_to_max(batch_zipped[2]), dtype=torch.long) return { 'id': ids, 'input': [texts, texts_len], 'target': targets } @staticmethod def convert_tokens_to_indices(token_list, t2i, unk_token_index=1): return [t2i[t] if t in t2i else unk_token_index for t in token_list] @staticmethod def pad_to_max(lst, pad_int=0): pad = len(max(lst, key=len)) return [i + [pad_int] * (pad - len(i)) if len(i) <= pad else i[:pad] for i in lst] @staticmethod def create_vocab(sentences): vocab = set() labels = set() for s in tqdm(sentences): s_tokens, s_labels = list(zip(*s)) vocab.update(s_tokens) labels.update(s_labels) i2w = ['!!PAD!!', '!!UNK!!'] + [x for x in vocab] w2i = {i2w[i]: i for i in range(len(i2w))} i2l = [x for x in labels] l2i = {i2l[i]: i for i in range(len(i2l))} return w2i, i2w, l2i, i2l ###Output _____no_output_____ ###Markdown Model DefinitionIn this example we will use a bidirectional GRU. ###Code class Model(nn.Module): def __init__(self, vocab_size, output_size): super(Model, self).__init__() embeddings_size = 128 self.embedding_layer = modules.EmbeddingLayer( vocab_size, embeddings_size, trainable=True, padding_idx=0 ) self.text_rnn = nn.GRU( input_size=embeddings_size, hidden_size=128, num_layers=2, bidirectional=True, batch_first=True ) self.output_mlp = modules.MLP( input_size=256, num_hidden_layers=1, hidden_layer_size=128, hidden_activation=nn.ReLU, output_size=output_size, output_activation=None ) def forward(self, text, text_len): text = self.embedding_layer(text) text = pack_padded_sequence(text, text_len, batch_first=True, enforce_sorted=False) text_rnn_out = self.text_rnn(text)[0] text_rnn_out = pad_packed_sequence(text_rnn_out, batch_first=True)[0] out = self.output_mlp(text_rnn_out) return out ###Output _____no_output_____ ###Markdown TrainingNext we create the dataset object along with three data loaders (for training, validation, and testing). ###Code sentences = treebank.tagged_sents(tagset='universal') w2i, i2w, l2i, i2l = TreeBankDataset.create_vocab(sentences) dataset = TreeBankDataset(sentences, w2i, l2i) eval_size = math.floor(0.1 * len(dataset)) dataset_indicis = list(range(len(dataset))) train_split_indicis = dataset_indicis[2 * eval_size:] val_split_indicis = dataset_indicis[eval_size:2 * eval_size] test_split_indicis = dataset_indicis[:eval_size] train_dataloader = DataLoader( dataset, sampler=SubsetRandomSampler(train_split_indicis), batch_size=128, collate_fn=TreeBankDataset.collate_fn ) val_dataloader = DataLoader( dataset, sampler=SubsetSequentialSampler(val_split_indicis), batch_size=128, collate_fn=TreeBankDataset.collate_fn ) test_dataloader = DataLoader( dataset, sampler=SubsetSequentialSampler(test_split_indicis), batch_size=128, collate_fn=TreeBankDataset.collate_fn ) ###Output _____no_output_____ ###Markdown Then we create the model and we wrap it with a System object. ###Code model = Model(len(i2w), len(i2l)) last_activation = nn.Softmax(dim=-1) if torch.cuda.is_available(): system = System(model, last_activation=last_activation, device=torch.device('cuda')) else: system = System(model, last_activation=last_activation, device=torch.device('cpu')) ###Output _____no_output_____ ###Markdown Next we train the model on the training set, using the validation set for early stopping. PyTorchWrapper provides`pytorch_wrapper.loss_wrappers.SequenceLabelingGenericPointWiseLossWrapper` that wraps a native pointwise loss and `pytorch_wrapper.evaluators.SequenceLabelingEvaluatorWrapper` which wraps an evaluator. These two classes make sure that labelsthat correspond to padding tokens are ignored. For this reason they need the `batch_input_sequence_length_idx` argument that points to the position of the input list where the length of each example of the batch resides. ###Code loss_wrapper = TokenLabelingGenericPointWiseLossWrapper( loss=nn.CrossEntropyLoss(), batch_input_sequence_length_idx=1 ) evals = { 'prec': evaluators.TokenLabelingEvaluatorWrapper( evaluators.MultiClassPrecisionEvaluator(average='macro'), batch_input_sequence_length_idx=1 ), 'rec': evaluators.TokenLabelingEvaluatorWrapper( evaluators.MultiClassRecallEvaluator(average='macro'), batch_input_sequence_length_idx=1 ), 'f1': evaluators.TokenLabelingEvaluatorWrapper( evaluators.MultiClassF1Evaluator(average='macro'), batch_input_sequence_length_idx=1 ) } optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, system.model.parameters())) os.makedirs('tmp', exist_ok=True) _ = system.train( loss_wrapper, optimizer, train_data_loader=train_dataloader, evaluators=evals, evaluation_data_loaders={ 'val': val_dataloader }, callbacks=[ EarlyStoppingCriterionCallback( patience=3, evaluation_data_loader_key='val', evaluator_key='f1', tmp_best_state_filepath='tmp/pos_tagging_cur_best.weights' ) ] ) ###Output _____no_output_____ ###Markdown Next we evaluate the model. ###Code results = system.evaluate(test_dataloader, evals) for r in results: print(results[r]) ###Output _____no_output_____ ###Markdown We can also use the `predict` method in order to predict for all the examples returned by a `Dataloder`. ###Code predictions = system.predict(test_dataloader, perform_last_activation=True) example_id = 50 input_loc = 1 text_loc = 0 tokens = [i2w[x] for x in dataset[test_split_indicis[example_id]][input_loc][text_loc]] predicted_labes = [i2l[np.argmax(scores)] for scores in predictions['outputs'][example_id][:len(tokens)]] print(list(zip(tokens, predicted_labes))) ###Output _____no_output_____ ###Markdown Finally we save the model's weights. ###Code # Then we save the model's state. system.save_model_state('data/pos_tagging_final.weights') ###Output _____no_output_____
Introduzione_alla_reti_neurali_convoluzionali.ipynb
###Markdown ###Code %tensorflow_version 2.x import tensorflow as tf ###Output _____no_output_____ ###Markdown PremessaUtilizzeremo lo stesso training loop e struttura del codice definita nel notebook precedente (https://bit.ly/2lno7O5), ma metteremo a confronto le performance della rete di classificazione implementata mediante layer completamente connnessi e quella implementata con layer convoluzionali. Fully Connected cv CNN per la classificazione di immaginiRiprendiamo la struttura della pipeline di ML definita nel precedente notebook:- Ottenere ed Analizzare i dati- Definire la pipeline di input- Definire il modello- Definire le metriche- Definire il training loop- Allenare il modello e misurare le metriche durante ed alla fine di ogni epoca- Selezionare il modello migliore (basandosi sulla metrica di validation)- Misurare le performance sullo split di testAbbiamo già implementato tutti i punti qui descritti, ma basandoci sull'idea che il modello da definire ed utilizzare fosse fully connected.Per cui abbiamo:- definito la pipeline di input per produrre immagini "flat" (`32*32*3`)- definito il modello di classificazione usando solo layer FCPer poter utilizzare un modello basato su layer CNN, dobbiamo modificare:- la pipeline: per produrre immagini, quindi tensori `(32,32,3)`- il modello: deve essere in grado di accettare immagini grandi almeno `(32,32,3)`Riprendiamo **tutto** il codice definito nel precedente notebook, e lo utilizziamo come base di partenza per **mettere a confronto** le due soluzioni (FC vs CNN). La pipeline di inputUtilizziamo nuovamente il dataset Cifar10, quindi installiamo TensorFlow Datasets e riutilizziamo il codice già scritto. ###Code !pip install --upgrade tensorflow_datasets ###Output _____no_output_____ ###Markdown Ora, senza dilungarci oltre, otteniamo il dataset e definitiamo la funzione `transform` da mappare agli elementi del dataset, in modo tale da creare **l'input per il modello fc**. ###Code import tensorflow_datasets as tfds data, info = tfds.load("cifar10", with_info=True, split=tfds.Split.ALL) def transform(row): # trasformare i dati da uint a float row["image"] = tf.image.convert_image_dtype(row["image"], dtype=tf.float32) # 1-hot row["label"] = tf.one_hot(row["label"], depth=10, on_value=1, off_value=0) # [-1,1] range row["image"] = (row["image"] - 0.5) * 2. # flatten row["image"] = tf.reshape(row["image"], (-1,)) return row # Input for the fully connected model dataset_fc = data.map(transform) # split, batch, prefetch (FC) train_fc = dataset_fc.take(50000).batch(32).prefetch(1) validation_fc = dataset_fc.skip(50000).take(5000).batch(32).prefetch(1) test_fc = dataset_fc.skip(50000 + 5000).take(5000).batch(32).prefetch(1) ###Output _____no_output_____ ###Markdown Definiamo ora **l'input per il modello conovluzionale**.Anche in questo caso, vogliamo effettuare la stessa trasformazione sulle label (codifica one-hot) e lo stesso scaling nel range [-1,1] per i valori di input dell'immagine.Possiamo quindi riutilizzare l'oggetto `dataset_fc`, cambiando semplicemente la forma da `(32*32*3)` all'originaria `(32,32,3`). ###Code def undo_flattening(row): row["image"] = tf.reshape(row["image"], (32,32,3)) return row # Input for the convolutional model dataset_cnn = dataset_fc.map(undo_flattening) # split, batch, prefetch (FC) train_cnn = dataset_cnn.take(50000).batch(32).prefetch(1) validation_cnn = dataset_cnn.skip(50000).take(5000).batch(32).prefetch(1) test_cnn = dataset_cnn.skip(50000 + 5000).take(5000).batch(32).prefetch(1) ###Output _____no_output_____ ###Markdown Definizione modelliRi-utilizziamo il modello completamente connesso precedentemente definito, e definiamo un modello convoluzionale in grado di accettare immagini `32x32x3` in input. Modello Completamente Connesso ###Code inputs = tf.keras.layers.Input(shape=(32*32*3)) net = tf.keras.layers.Dense(512, activation=tf.nn.relu)(inputs) net = tf.keras.layers.Dense(256, activation=tf.nn.relu)(net) net = tf.keras.layers.Dense(128, activation=tf.nn.relu)(net) out = tf.keras.layers.Dense(10)(net) model_fc = tf.keras.Model(inputs=inputs, outputs=out) model_fc.summary() ###Output _____no_output_____ ###Markdown Definizione Rete Neurale ConvoluzionaleLa struttura della rete, esattamente come per il caso FC, è arbitraria.Dato che il nostro obiettivo è quello di confrontare le performance (in termini di numero di parametri e metriche misurate) dei due modelli, cerchiamo di definire la CNN in modo "simile" alla rete FC. ###Code # Begin definition: feature extractor inputs = tf.keras.layers.Input(shape=(32,32,3)) net = tf.keras.layers.Conv2D(32, (5,5), strides=(2,2), padding='same', activation=tf.nn.relu)(inputs) # padding = same -> output side = input_side / stride = 16 # output shape = (16,16,32) net = tf.keras.layers.Conv2D(64, (5,5), strides=(2,2), padding='same', activation=tf.nn.relu)(net) # output shape = (8,8,64) net = tf.keras.layers.Conv2D(128, (5,5), strides=(2,2), padding='same', activation=tf.nn.relu)(net) # output size = (4, 4, 128) # Classification layer: flatten the (4,4,128) tensor in a (4*4*128) tensor net = tf.keras.layers.Flatten()(net) # End definition: feature extractor # Classification layer out = tf.keras.layers.Dense(10)(net) # building the whole model model_cnn = tf.keras.Model(inputs=inputs, outputs=out) model_cnn.summary() ###Output _____no_output_____ ###Markdown DifferenzeIl numero di parametri della rete FC è 1,738,890 mentre la CNN ha solo 279,114 parametri.La rete neurale convoluzionale ha un numero di parametri di \~6 volte inferiore, il ché significa **\~600%** di parametri in meno.Il numero di parametri della CNN aumenta all'aumentare della profondita, ma aumenta solo perché abbiamo arbitrariamente deciso di mettere più parametri apprendibili (numero di filtri) nei layer "deep" della rete.La rete completamente connessa, invece, ha un numero di parametri che diminuisce layer dopo layer (perché abbiamo definito l'archiettura in questo modo), ma **il solo layer di input** ha più parametri di tutta la CNN. Definizione e riuso di oggetti KerasLa CNN è in tutto e per tutto un classificatore, quindi possiamo riutilizzare la categorical cross-entropy loss.Possiamo quindi definire un oggetto callable (una Keras loss) e ritutilizzarla per il train di entrambi i modelli. Alla fine, la keras loss altro non fa che mettere in relazione l'output prodotto dalla rete e la predizione attesa; non avendo alcuno stato al suo interno possiamo utilizzarla senza alcun problema per il train di due modelli.Lo stesso ragionamento si può applicare anche all'ottimizzatore (solo finché utilizziamo SGD ed altri ottimizzatori senza variabili), deve solo applicare la regola di aggiornamento e non ha alcuno stato.È invece **sbagliato** riutilizzare la stessa `tf.GradientTape` dato che questa tiene traccia di quanto accade all'interno dello step di train ed il suo contenuto **viene distrutto** nel momento in cui viene invocato il metodo `.gradient`.Ed è **sbagliato** anche riutilizzare gli stessi oggetti `tf.keras.metric` in quanto anch'essi dotati di uno stato (e lo stato e relativo alle performance dello specifico modello). ###Code # Loss is a callable object loss = tf.keras.losses.CategoricalCrossentropy(from_logits=True) # Metrics accuracy_cnn = tf.keras.metrics.Accuracy() mean_loss_cnn = tf.keras.metrics.Mean(name="loss") accuracy_fc = tf.keras.metrics.Accuracy() mean_loss_fc = tf.keras.metrics.Mean(name="loss") # Define the optimizer optimizer = tf.keras.optimizers.SGD(learning_rate=1e-2) def compute_loss(input_samples, model): predictions = model(input_samples["image"]) loss_value = loss(input_samples["label"], predictions) return loss_value @tf.function def train_step(input_samples, model): with tf.GradientTape() as tape: loss_value = compute_loss(input_samples, model) gradient = tape.gradient(loss_value, model.trainable_variables) optimizer.apply_gradients(zip(gradient, model.trainable_variables)) return loss_value def measure_metrics(input_samples, model, id): predicted_labels = tf.argmax(model(input_samples["image"]), axis=1) if id == "cnn": accuracy_cnn.update_state(tf.argmax(input_samples["label"], axis=1), predicted_labels) mean_loss_cnn.update_state(compute_loss(input_samples, model)) else: accuracy_fc.update_state(tf.argmax(input_samples["label"], axis=1), predicted_labels) mean_loss_fc.update_state(compute_loss(input_samples, model)) ###Output _____no_output_____ ###Markdown LoggingDato che vogliamo confrontare le performance del modello FC e del modello CNN, è necessario creare il corretto numero di `FileWriter' ed usarli correttamente.Dato che TensorBoard utilizza la struttura delle cartelle per creare curve differenti sullo stesso grafico, possiamo definire sei diversi writer nelle directory corrette: ###Code # FC writers train_writer_fc = tf.summary.create_file_writer("logs/train/fc") validation_writer_fc = tf.summary.create_file_writer("logs/validation/fc") test_writer_fc = tf.summary.create_file_writer("logs/test/fc") # CNN writers train_writer_cnn = tf.summary.create_file_writer("logs/train/cnn") validation_writer_cnn = tf.summary.create_file_writer("logs/validation/cnn") test_writer_cnn = tf.summary.create_file_writer("logs/test/cnn") ###Output _____no_output_____ ###Markdown Training loopSiamo ora pronti per definire il training loop.La funzione accetterà il modello, il dataset, e l'ID ("cnn" o "fc") corretto ed allenerà il modello per il numero desiderato di epoche.Dato che vogliamo fare due train distinti, ma plottarli sullo stesso grafico come se fossero stati eseguiti in parallelo, dobbiamo ricordarci di azzerare la variable `global_step` e `epoch_counter` al termine del primo training. ###Code global_step = tf.Variable(0, dtype=tf.int64, trainable=False) epoch_counter = tf.Variable(0, dtype=tf.int64, trainable=False) def train_loop(num_epochs, model, dataset, id): if id == "cnn": mean_loss = mean_loss_cnn accuracy = accuracy_cnn train_writer = train_writer_cnn else: mean_loss = mean_loss_fc accuracy = accuracy_fc train_writer = train_writer_fc # Loop for epoch in tf.range(epoch_counter, num_epochs): for input_samples in dataset: loss_value = train_step(input_samples, model) measure_metrics(input_samples, model, id) global_step.assign_add(1) if tf.equal(tf.math.mod(global_step, 100), 0): mean_loss_value = mean_loss.result() accuracy_value = accuracy.result() mean_loss.reset_states() accuracy.reset_states() tf.print(f"[{global_step.numpy()}] loss value: ", mean_loss_value," - train acc: ", accuracy_value) with train_writer.as_default(): tf.summary.scalar("loss", mean_loss_value, step=global_step) tf.summary.scalar("accuracy", accuracy_value, step=global_step) tf.summary.image("images", tf.reshape(input_samples["image"], (-1, 32,32,3)), step=global_step, max_outputs=5) # end of epoch: measure performance on validation set and log the values on tensorboard tf.print(f"Epoch {epoch.numpy() + 1 } completed") epoch_counter.assign(epoch + 1) # TODO: insert validation code here ###Output _____no_output_____ ###Markdown Tensorboard e trainingLanciamo tensorboard e subito dopo invochiamo la funzione di train: prima sul modello fc, poi resettiamo le due variabili globali, e infine alleniamo il modello cnn. ###Code %load_ext tensorboard %tensorboard --logdir logs global_step.assign(0) epoch_counter.assign(0) train_loop(num_epochs=5, model=model_fc, dataset=train_fc, id="fc") print("#### END FC MODEL TRAINING ####") global_step.assign(0) epoch_counter.assign(0) train_loop(num_epochs=5, model=model_cnn, dataset=train_cnn, id="cnn") print("#### END CNN MODEL TRAINING ####") ###Output _____no_output_____
notebooks/Pandas Basic.ipynb
###Markdown Pandas Exercises This time we are going to pull data directly from the internet.Special thanks to: https://github.com/justmarkham for sharing the dataset and materials. Step 1. Import the necessary libraries ###Code import pandas as pd ###Output _____no_output_____ ###Markdown Step 2. Import the dataset from this address https://raw.githubusercontent.com/justmarkham/DAT8/master/data/u.user Step 3. Assign it to a variable called users and use the 'user_id' as index ###Code users = pd.read_table('https://raw.githubusercontent.com/justmarkham/DAT8/master/data/u.user', sep='|', index_col='user_id') ###Output _____no_output_____ ###Markdown Step 4. See the first 25 entries ###Code users.head(25) ###Output _____no_output_____ ###Markdown Step 5. See the last 10 entries ###Code users.tail(10) ###Output _____no_output_____ ###Markdown Step 6. What is the number of observations in the dataset? ###Code users.shape[0] ###Output _____no_output_____ ###Markdown Step 7. What is the number of columns in the dataset? ###Code users.shape[1] ###Output _____no_output_____ ###Markdown Step 8. Print the name of all the columns. ###Code users.columns ###Output _____no_output_____ ###Markdown Step 9. How is the dataset indexed? ###Code # "the index" (aka "the labels") users.index ###Output _____no_output_____ ###Markdown Step 10. What is the data type of each column? ###Code users.dtypes ###Output _____no_output_____ ###Markdown Step 11. Print only the occupation column ###Code users.occupation #OR users['occupation'] ###Output _____no_output_____ ###Markdown Step 12. How many different occupations there are in this dataset? ###Code users.occupation.nunique() ###Output _____no_output_____ ###Markdown Step 13. What is the most frequent occupation? ###Code users.occupation.value_counts().head() ###Output _____no_output_____ ###Markdown Step 14. Summarize the DataFrame. ###Code users.describe() #Notice: By default, only the numeric columns are returned. ###Output _____no_output_____ ###Markdown Step 15. Summarize all the columns ###Code users.describe(include="all") #Notice: By default, only the numeric columns are returned. ###Output _____no_output_____ ###Markdown Step 16. Summarize only the occupation column ###Code users.occupation.describe() ###Output _____no_output_____ ###Markdown Step 17. What is the mean age of users? ###Code round(users.age.mean()) ###Output _____no_output_____ ###Markdown Step 18. What is the age with least occurrence? ###Code users.age.value_counts().tail() #7, 10, 11, 66 and 73 years -> only 1 occurrence ###Output _____no_output_____
training/Postal-parser-pipelines.ipynb
###Markdown Import ###Code import sparknlp spark=sparknlp.start() from pyspark.ml import Pipeline from sparknlp.annotator import * from sparknlp.base import * from pyspark.sql.functions import * from pyspark.sql.types import * import pyspark.sql.functions as F import re from pyspark.sql import Row spark=sparknlp.start() ###Output _____no_output_____ ###Markdown Create inital spark dataframe by reading a openaddress CSV sample ###Code df=spark.read.format('com.databricks.spark.csv').options(header='true', inferschema='true').option("encoding", "utf-8").load('sample_of_toronto.csv') df.show() def make_address_dataframe(df): columns_to_drop = ['LON', 'LAT', 'HASH', 'ID', 'DISTRICT', 'REGION'] df = df.drop(*columns_to_drop) df = df.withColumnRenamed('NUMBER', 'house_number') df=df.withColumnRenamed('STREET', 'road') df=df.withColumnRenamed('UNIT', 'unit') df=df.withColumnRenamed('CITY', 'city') df=df.withColumnRenamed('POSTCODE', 'postcode') return df df=make_address_dataframe(df) df.show() ###Output +------------+----------------+----+---------+--------+ |house_number| road|unit| city|postcode| +------------+----------------+----+---------+--------+ | 22|Lloyd George Ave|null|Etobicoke| null| | 3|Lloyd George Ave|null|Etobicoke| null| | 7A|Lloyd George Ave|null|Etobicoke| null| | 58| Foch Ave|null|Etobicoke| null| | 54| Foch Ave|null|Etobicoke| null| | 60| Foch Ave|null|Etobicoke| null| | 62| Foch Ave|null|Etobicoke| null| | 64| Foch Ave|null|Etobicoke| null| | 46| Jellicoe Ave|null|Etobicoke| null| | 44| Jellicoe Ave|null|Etobicoke| null| | 11| Jellicoe Ave|null|Etobicoke| null| | 9| Jellicoe Ave|null|Etobicoke| null| | 7| Jellicoe Ave|null|Etobicoke| null| | 12| Jellicoe Ave|null|Etobicoke| null| | 17| Owen Dr|null|Etobicoke| null| | 15B| Owen Dr|null|Etobicoke| null| | 15| Owen Dr|null|Etobicoke| null| | 29| Forty First St|null|Etobicoke| null| | 2A| Forty Second St|null|Etobicoke| null| | 7| Forty Second St|null|Etobicoke| null| +------------+----------------+----+---------+--------+ only showing top 20 rows ###Markdown Create text, text_token, and label for the df ###Code def text_and_label_maker(df): def clean_NULL(a): a=a.split(" //// ") while "NULL" in a: a.remove("NULL") while '[NULL]' in a: a.remove("[NULL]") a=" //// ".join(a) return a def split(a): address=[] a=a.split(' //// ') for i in range(len(a)): if len(a[i].split(" "))>1: b=a[i].split(" ") for i in range(len(b)): address.append(b[i]) else: address.append(a[i]) return address def remove_annotation(a): a="".join(re.sub(re.compile(r'\s+'), '', a).split("////")) return a def unit_func(u): a=[] if int(u)>1: a.append("B-UNIT") for i in range(1,int(u)): a.append("I-UNIT") elif int(u)>0: a.append("B-UNIT") else: a.append('NULL') return a def house_func(h): a=[] if int(h)>1: a.append("B-House_number") for i in range(1,int(h)): a.append("I-House_number") elif int(h)>0: a.append("B-House_number") else: a.append('NULL') return a def road_func(r): a=[] if int(r)>1: a.append("B-Street") for i in range(1,int(r)): a.append("I-Street") elif int(r)>0: a.append("B-Street") else: a.append('NULL') return a def post_func(p): a=[] if int(p)>1: a.append("B-Postcode") for i in range(1,int(p)): a.append("I-Postcode") elif int(p)>0: a.append("B-Postcode") else: a.append('NULL') return a def city_func(c): a=[] if int(c)>1: a.append("B-City") for i in range(1,int(c)): a.append("I-City") elif int(c)>0: a.append("B-City") else: a.append('NULL') return a def label(a): a=str(a).replace(']','') a=str(a).replace('[','') a=str(a).replace(',',' ') a=str(a).replace("'","") a=a.split(' //// ') a=" ".join(a) return a #udf_functions concat_udf = F.udf(lambda cols: " //// ".join([x if x is not None else "NULL" for x in cols]), StringType()) NULL_udf = F.udf(lambda address: clean_NULL(address)) split_udf=F.udf(lambda address: split(address)) len_token_udf=F.udf(lambda x: len(str(x).split(' ')) if x is not None else 0) remove_annotation_udf=F.udf(lambda x:" ".join(x.split("////"))) tagging1_udf=F.udf(lambda u: unit_func(u)) tagging2_udf=F.udf(lambda h: house_func(h)) tagging3_udf=F.udf(lambda r: road_func(r)) tagging4_udf=F.udf(lambda p: post_func(p)) tagging5_udf=F.udf(lambda c: city_func(c)) label_udf=F.udf(lambda l: label(l)) df=df.withColumn("text_with_null", concat_udf(F.array("unit", "house_number", "road","postcode","city"))) df=df.withColumn("annotated_text", NULL_udf("text_with_null")).drop("text_with_null") df=df.withColumn("text_tokens", split_udf("annotated_text")) df=df.withColumn("text", remove_annotation_udf("annotated_text")) df=df.withColumn("unitl", len_token_udf("unit")) df=df.withColumn("house_numberl", len_token_udf("house_number")) df=df.withColumn("postcodel", len_token_udf("postcode")) df=df.withColumn("roadl", len_token_udf("road")) df=df.withColumn("cityl", len_token_udf("city")) df=df.withColumn("unit_taggedTokens", tagging1_udf('unitl')).drop('unitl') df=df.withColumn("house_number_taggedTokens", tagging2_udf('house_numberl')).drop('house_numberl') df=df.withColumn("road_taggedTokens", tagging3_udf('roadl')).drop('roadl') df=df.withColumn("postcode_taggedTokens", tagging4_udf('postcodel')).drop('postcodel') df=df.withColumn("city_taggedTokens", tagging5_udf('cityl')).drop('cityl') df=df.withColumn("concat_label_with_null", concat_udf(F.array("unit_taggedTokens", "house_number_taggedTokens", "road_taggedTokens", "postcode_taggedTokens","city_taggedTokens"))) df=df.withColumn("concat_label", NULL_udf("concat_label_with_null")).drop("concat_label_with_null") df=df.withColumn("label",label_udf("concat_label")).drop("concat_label") return df df=text_and_label_maker(df) print(df.select("text").limit(1).collect()) print(df.select("text_tokens").limit(1).collect()) print(df.select("label").limit(1).collect()) df.show(20) ###Output [Row(text='22 Lloyd George Ave Etobicoke')] [Row(text_tokens='[22, Lloyd, George, Ave, Etobicoke]')] [Row(label='B-House_number B-Street I-Street I-Street B-City')] +------------+----------------+----+---------+--------+--------------------+--------------------+-----------------+-------------------------+--------------------+---------------------+-----------------+--------------------+--------------------+--------------------+ |house_number| road|unit| city|postcode| text_tokens| text|unit_taggedTokens|house_number_taggedTokens| road_taggedTokens|postcode_taggedTokens|city_taggedTokens| label2| label| annotated_text| +------------+----------------+----+---------+--------+--------------------+--------------------+-----------------+-------------------------+--------------------+---------------------+-----------------+--------------------+--------------------+--------------------+ | 22|Lloyd George Ave|null|Etobicoke| null|[22, Lloyd, Georg...|22 Lloyd George...| [NULL]| [B-House_number]|[B-Street, I-Stre...| [NULL]| [B-City]|B-House_number B-...|B-House_number B-...|22 //// Lloyd Geo...| | 3|Lloyd George Ave|null|Etobicoke| null|[3, Lloyd, George...|3 Lloyd George ...| [NULL]| [B-House_number]|[B-Street, I-Stre...| [NULL]| [B-City]|B-House_number B-...|B-House_number B-...|3 //// Lloyd Geor...| | 7A|Lloyd George Ave|null|Etobicoke| null|[7A, Lloyd, Georg...|7A Lloyd George...| [NULL]| [B-House_number]|[B-Street, I-Stre...| [NULL]| [B-City]|B-House_number B-...|B-House_number B-...|7A //// Lloyd Geo...| | 58| Foch Ave|null|Etobicoke| null|[58, Foch, Ave, E...|58 Foch Ave E...| [NULL]| [B-House_number]|[B-Street, I-Street]| [NULL]| [B-City]|B-House_number B-...|B-House_number B-...|58 //// Foch Ave ...| | 54| Foch Ave|null|Etobicoke| null|[54, Foch, Ave, E...|54 Foch Ave E...| [NULL]| [B-House_number]|[B-Street, I-Street]| [NULL]| [B-City]|B-House_number B-...|B-House_number B-...|54 //// Foch Ave ...| | 60| Foch Ave|null|Etobicoke| null|[60, Foch, Ave, E...|60 Foch Ave E...| [NULL]| [B-House_number]|[B-Street, I-Street]| [NULL]| [B-City]|B-House_number B-...|B-House_number B-...|60 //// Foch Ave ...| | 62| Foch Ave|null|Etobicoke| null|[62, Foch, Ave, E...|62 Foch Ave E...| [NULL]| [B-House_number]|[B-Street, I-Street]| [NULL]| [B-City]|B-House_number B-...|B-House_number B-...|62 //// Foch Ave ...| | 64| Foch Ave|null|Etobicoke| null|[64, Foch, Ave, E...|64 Foch Ave E...| [NULL]| [B-House_number]|[B-Street, I-Street]| [NULL]| [B-City]|B-House_number B-...|B-House_number B-...|64 //// Foch Ave ...| | 46| Jellicoe Ave|null|Etobicoke| null|[46, Jellicoe, Av...|46 Jellicoe Ave...| [NULL]| [B-House_number]|[B-Street, I-Street]| [NULL]| [B-City]|B-House_number B-...|B-House_number B-...|46 //// Jellicoe ...| | 44| Jellicoe Ave|null|Etobicoke| null|[44, Jellicoe, Av...|44 Jellicoe Ave...| [NULL]| [B-House_number]|[B-Street, I-Street]| [NULL]| [B-City]|B-House_number B-...|B-House_number B-...|44 //// Jellicoe ...| | 11| Jellicoe Ave|null|Etobicoke| null|[11, Jellicoe, Av...|11 Jellicoe Ave...| [NULL]| [B-House_number]|[B-Street, I-Street]| [NULL]| [B-City]|B-House_number B-...|B-House_number B-...|11 //// Jellicoe ...| | 9| Jellicoe Ave|null|Etobicoke| null|[9, Jellicoe, Ave...|9 Jellicoe Ave ...| [NULL]| [B-House_number]|[B-Street, I-Street]| [NULL]| [B-City]|B-House_number B-...|B-House_number B-...|9 //// Jellicoe A...| | 7| Jellicoe Ave|null|Etobicoke| null|[7, Jellicoe, Ave...|7 Jellicoe Ave ...| [NULL]| [B-House_number]|[B-Street, I-Street]| [NULL]| [B-City]|B-House_number B-...|B-House_number B-...|7 //// Jellicoe A...| | 12| Jellicoe Ave|null|Etobicoke| null|[12, Jellicoe, Av...|12 Jellicoe Ave...| [NULL]| [B-House_number]|[B-Street, I-Street]| [NULL]| [B-City]|B-House_number B-...|B-House_number B-...|12 //// Jellicoe ...| | 17| Owen Dr|null|Etobicoke| null|[17, Owen, Dr, Et...|17 Owen Dr Et...| [NULL]| [B-House_number]|[B-Street, I-Street]| [NULL]| [B-City]|B-House_number B-...|B-House_number B-...|17 //// Owen Dr /...| | 15B| Owen Dr|null|Etobicoke| null|[15B, Owen, Dr, E...|15B Owen Dr E...| [NULL]| [B-House_number]|[B-Street, I-Street]| [NULL]| [B-City]|B-House_number B-...|B-House_number B-...|15B //// Owen Dr ...| | 15| Owen Dr|null|Etobicoke| null|[15, Owen, Dr, Et...|15 Owen Dr Et...| [NULL]| [B-House_number]|[B-Street, I-Street]| [NULL]| [B-City]|B-House_number B-...|B-House_number B-...|15 //// Owen Dr /...| | 29| Forty First St|null|Etobicoke| null|[29, Forty, First...|29 Forty First ...| [NULL]| [B-House_number]|[B-Street, I-Stre...| [NULL]| [B-City]|B-House_number B-...|B-House_number B-...|29 //// Forty Fir...| | 2A| Forty Second St|null|Etobicoke| null|[2A, Forty, Secon...|2A Forty Second...| [NULL]| [B-House_number]|[B-Street, I-Stre...| [NULL]| [B-City]|B-House_number B-...|B-House_number B-...|2A //// Forty Sec...| | 7| Forty Second St|null|Etobicoke| null|[7, Forty, Second...|7 Forty Second ...| [NULL]| [B-House_number]|[B-Street, I-Stre...| [NULL]| [B-City]|B-House_number B-...|B-House_number B-...|7 //// Forty Seco...| +------------+----------------+----+---------+--------+--------------------+--------------------+-----------------+-------------------------+--------------------+---------------------+-----------------+--------------------+--------------------+--------------------+ only showing top 20 rows ###Markdown provide annotation requiring for NerDL approach Label annotation ###Code def createAnnotation(token,label,text): lastBegin =0 i=0 data=[] text_tokens=token.replace(']','') text_tokens=text_tokens.replace('[','') text_tokens=text_tokens.split(',') tags=label.split(" ") while "" in tags: tags.remove("") for i in range(len(text_tokens)): a=Row( annotatorType="named_entity", begin=lastBegin, end=lastBegin + len(text_tokens[i]) - 1, result=tags[i], metadata={'word': text_tokens[i]}, embeddings=[0.00] ) lastBegin += len(text_tokens[i])+1 data.append(a) return {'text':text,'label':data} ###Output _____no_output_____ ###Markdown document, sentence, tokenizer, and pose annotation ###Code def get_formatting_model(): document = DocumentAssembler()\ .setInputCol("text")\ .setOutputCol("document") sentence = SentenceDetector()\ .setInputCols(['document'])\ .setOutputCol('sentence') tokenizer = Tokenizer() \ .setInputCols(["sentence"]) \ .setOutputCol("token") pos = PerceptronModel.pretrained() \ .setInputCols(["sentence", "token"]) \ .setOutputCol("pos") formatting_pipeline = Pipeline( stages = [ document, sentence, tokenizer, pos ] ) empty_data = spark.createDataFrame([['']]).toDF("text") formatting_model = formatting_pipeline.fit(empty_data) return formatting_model ###Output _____no_output_____ ###Markdown Get the final dataframe ready to pass to the training pipeline ###Code def format(df): df=df.select(['text_tokens', 'text','label']) data_rdd = df.rdd.map(lambda row: row.asDict()) data_rdd = data_rdd.map(lambda x: createAnnotation(x['text_tokens'],x['label'],x['text'])) Schema = StructType([StructField("text", StringType(), False), StructField('label',ArrayType( StructType([ StructField("annotatorType", StringType(), False), StructField("begin", IntegerType(), False), StructField("end", IntegerType(), False), StructField("result", StringType(), False), StructField("metadata", MapType(StringType(), StringType())), StructField("embeddings", ArrayType(FloatType()), False) ])))]) data = spark.createDataFrame(data_rdd, schema=Schema) formatting_model=get_formatting_model() training_data=formatting_model.transform(data) return training_data training_data=format(df) training_data.show() training_data.printSchema() ###Output root |-- text: string (nullable = false) |-- label: array (nullable = true) | |-- element: struct (containsNull = true) | | |-- annotatorType: string (nullable = false) | | |-- begin: integer (nullable = false) | | |-- end: integer (nullable = false) | | |-- result: string (nullable = false) | | |-- metadata: map (nullable = true) | | | |-- key: string | | | |-- value: string (valueContainsNull = true) | | |-- embeddings: array (nullable = false) | | | |-- element: float (containsNull = true) |-- document: array (nullable = true) | |-- element: struct (containsNull = true) | | |-- annotatorType: string (nullable = true) | | |-- begin: integer (nullable = false) | | |-- end: integer (nullable = false) | | |-- result: string (nullable = true) | | |-- metadata: map (nullable = true) | | | |-- key: string | | | |-- value: string (valueContainsNull = true) | | |-- embeddings: array (nullable = true) | | | |-- element: float (containsNull = false) |-- sentence: array (nullable = true) | |-- element: struct (containsNull = true) | | |-- annotatorType: string (nullable = true) | | |-- begin: integer (nullable = false) | | |-- end: integer (nullable = false) | | |-- result: string (nullable = true) | | |-- metadata: map (nullable = true) | | | |-- key: string | | | |-- value: string (valueContainsNull = true) | | |-- embeddings: array (nullable = true) | | | |-- element: float (containsNull = false) |-- token: array (nullable = true) | |-- element: struct (containsNull = true) | | |-- annotatorType: string (nullable = true) | | |-- begin: integer (nullable = false) | | |-- end: integer (nullable = false) | | |-- result: string (nullable = true) | | |-- metadata: map (nullable = true) | | | |-- key: string | | | |-- value: string (valueContainsNull = true) | | |-- embeddings: array (nullable = true) | | | |-- element: float (containsNull = false) |-- pos: array (nullable = true) | |-- element: struct (containsNull = true) | | |-- annotatorType: string (nullable = true) | | |-- begin: integer (nullable = false) | | |-- end: integer (nullable = false) | | |-- result: string (nullable = true) | | |-- metadata: map (nullable = true) | | | |-- key: string | | | |-- value: string (valueContainsNull = true) | | |-- embeddings: array (nullable = true) | | | |-- element: float (containsNull = false) ###Markdown Building Training Pipeline ###Code bert_annotator = BertEmbeddings.pretrained('bert_base_cased', 'en') \ .setInputCols(["sentence",'token'])\ .setOutputCol("bert")\ .setCaseSensitive(False)\ .setPoolingLayer(0) training_data = bert_annotator.transform(training_data) training_data.show() nerTagger = NerDLApproach()\ .setInputCols(["sentence", "token", "bert"])\ .setLabelColumn("label")\ .setOutputCol("ner")\ .setMaxEpochs(1)\ .setLr(0.001)\ .setPo(0.005)\ .setBatchSize(8)\ .setRandomSeed(0)\ .setVerbose(1)\ .setValidationSplit(0.2)\ .setEvaluationLogExtended(True) \ .setEnableOutputLogs(True)\ .setIncludeConfidence(True)\ NER_pipeline = Pipeline( stages = [ bert_annotator, nerTagger ]) Ner_model = NER_pipeline.fit(training_data) import pyspark.sql.functions as F predictions = Ner_model.transform(training_data) predictions.select(F.explode(F.arrays_zip('token.result','label.result','ner.result')).alias("cols")) \ .select(F.expr("cols['0']").alias("token"), F.expr("cols['1']").alias("ground_truth"), F.expr("cols['2']").alias("prediction")).show(truncate=False) ###Output +---------+--------------+--------------+ |token |ground_truth |prediction | +---------+--------------+--------------+ |22 |B-House_number|B-House_number| |Lloyd |B-Street |O | |George |I-Street |O | |Ave |I-Street |O | |Etobicoke|B-City |O | |3 |B-House_number|B-House_number| |Lloyd |B-Street |O | |George |I-Street |O | |Ave |I-Street |O | |Etobicoke|B-City |O | |7A |B-House_number|B-House_number| |Lloyd |B-Street |O | |George |I-Street |O | |Ave |I-Street |O | |Etobicoke|B-City |O | |58 |B-House_number|B-House_number| |Foch |B-Street |O | |Ave |I-Street |O | |Etobicoke|B-City |O | |54 |B-House_number|B-House_number| +---------+--------------+--------------+ only showing top 20 rows
Course1Part4Lesson4.ipynb
###Markdown Copyright 2019 The TensorFlow Authors. ###Code #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tensorflow as tf class myCallback(tf.keras.callbacks.Callback): def on_epoch_end(self, epoch, logs={}): if(logs.get('accuracy')>0.6): print("\nReached 60% accuracy so cancelling training!") self.model.stop_training = True mnist = tf.keras.datasets.fashion_mnist (x_train, y_train),(x_test, y_test) = mnist.load_data() x_train, x_test = x_train / 255.0, x_test / 255.0 callbacks = myCallback() model = tf.keras.models.Sequential([ tf.keras.layers.Flatten(input_shape=(28, 28)), tf.keras.layers.Dense(512, activation=tf.nn.relu), tf.keras.layers.Dense(10, activation=tf.nn.softmax) ]) model.compile(optimizer=tf.optimizers.Adam(), loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.fit(x_train, y_train, epochs=10, callbacks=[callbacks]) ###Output _____no_output_____
submodules/resource/d2l-zh/pytorch/chapter_recurrent-neural-networks/sequence.ipynb
###Markdown 序列模型:label:`sec_sequence`想象一下你正在看网飞(Netflix,一个国外的视频网站)上的电影。作为一名忠实的用户,你对每一部电影都给出评价,毕竟一部好电影需要更多的支持和认可。然而事实证明,事情并不那么简单。随着时间的推移,人们对电影的看法会发生很大的变化。事实上,心理学家甚至对这些现象起了名字:* *锚定*(anchoring)效应:基于其他人的意见做出评价。 例如,奥斯卡颁奖后,受到关注的电影的评分会上升,尽管它还是原来那部电影。 这种影响将持续几个月,直到人们忘记了这部电影曾经获得的奖项。 结果表明( :cite:`Wu.Ahmed.Beutel.ea.2017`),这种效应会使评分提高半个百分点以上。* *享乐适应*(hedonic adaption):人们迅速接受并且适应一种更好或者更坏的情况 作为新的常态。 例如,在看了很多好电影之后,人们会强烈期望下部电影会更好。 因此,在许多精彩的电影被看过之后,即使是一部普通的也可能被认为是糟糕的。* *季节性*(seasonality):少有观众喜欢在八月看圣诞老人的电影。* 有时,电影会由于导演或演员在制作中的不当行为变得不受欢迎。* 有些电影因为其极度糟糕只能成为小众电影。*Plan9from Outer Space*和*Troll2*就因为这个原因而臭名昭著的。简而言之,电影评分决不是固定不变的。因此,使用时间动力学可以得到更准确的电影推荐 :cite:`Koren.2009`。当然,序列数据不仅仅是关于电影评分的。下面给出了更多的场景:* 在使用应用程序时,许多用户都有很强的特定习惯。 例如,在学生放学后社交媒体应用更受欢迎。在市场开放时股市交易软件更常用。* 预测明天的股价要比过去的股价更困难,尽管两者都只是估计一个数字。 毕竟,先见之明比事后诸葛亮难得多。 在统计学中,前者(对超出已知观测范围进行预测)称为*外推法*(extrapolation), 而后者(在现有观测值之间进行估计)称为*内插法*(interpolation)。* 在本质上,音乐、语音、文本和视频都是连续的。 如果它们的序列被我们重排,那么就会失去原有的意义。 比如,一个文本标题“狗咬人”远没有“人咬狗”那么令人惊讶,尽管组成两句话的字完全相同。* 地震具有很强的相关性,即大地震发生后,很可能会有几次小余震, 这些余震的强度比非大地震后的余震要大得多。 事实上,地震是时空相关的,即余震通常发生在很短的时间跨度和很近的距离内。* 人类之间的互动也是连续的,这可以从微博上的争吵和辩论中看出。 统计工具处理序列数据需要统计工具和新的深度神经网络架构。为了简单起见,我们以 :numref:`fig_ftse100`所示的股票价格(富时100指数)为例。![近30年的富时100指数](../img/ftse100.png):width:`400px`:label:`fig_ftse100`其中,用$x_t$表示价格,即在*时间步*(time step)$t \in \mathbb{Z}^+$时,观察到的价格$x_t$。请注意,$t$对于本文中的序列通常是离散的,并在整数或其子集上变化。假设一个交易员想在$t$日的股市中表现良好,于是通过以下途径预测$x_t$:$$x_t \sim P(x_t \mid x_{t-1}, \ldots, x_1).$$ 自回归模型为了实现这个预测,交易员可以使用回归模型,例如在 :numref:`sec_linear_concise`中训练的模型。仅有一个主要问题:输入数据的数量,输入$x_{t-1}, \ldots, x_1$本身因$t$而异。也就是说,输入数据的数量这个数字将会随着我们遇到的数据量的增加而增加,因此需要一个近似方法来使这个计算变得容易处理。本章后面的大部分内容将围绕着如何有效估计$P(x_t \mid x_{t-1}, \ldots, x_1)$展开。简单地说,它归结为以下两种策略。第一种策略,假设在现实情况下相当长的序列$x_{t-1}, \ldots, x_1$可能是不必要的,因此我们只需要满足某个长度为$\tau$的时间跨度,即使用观测序列$x_{t-1}, \ldots, x_{t-\tau}$。当下获得的最直接的好处就是参数的数量总是不变的,至少在$t > \tau$时如此,这就使我们能够训练一个上面提及的深度网络。这种模型被称为*自回归模型*(autoregressive models),因为它们是对自己执行回归。第二种策略,如 :numref:`fig_sequence-model`所示,是保留一些对过去观测的总结$h_t$,并且同时更新预测$\hat{x}_t$和总结$h_t$。这就产生了基于$\hat{x}_t = P(x_t \mid h_{t})$估计$x_t$,以及公式$h_t = g(h_{t-1}, x_{t-1})$更新的模型。由于$h_t$从未被观测到,这类模型也被称为*隐变量自回归模型*(latent autoregressive models)。![隐变量自回归模型](../img/sequence-model.svg):label:`fig_sequence-model`这两种情况都有一个显而易见的问题:如何生成训练数据?一个经典方法是使用历史观测来预测下一个未来观测。显然,我们并不指望时间会停滞不前。然而,一个常见的假设是虽然特定值$x_t$可能会改变,但是序列本身的动力学不会改变。这样的假设是合理的,因为新的动力学一定受新的数据影响,而我们不可能用目前所掌握的数据来预测新的动力学。统计学家称不变的动力学为*静止的*(stationary)。因此,整个序列的估计值都将通过以下的方式获得:$$P(x_1, \ldots, x_T) = \prod_{t=1}^T P(x_t \mid x_{t-1}, \ldots, x_1).$$注意,如果我们处理的是离散的对象(如单词),而不是连续的数字,则上述的考虑仍然有效。唯一的差别是,对于离散的对象,我们需要使用分类器而不是回归模型来估计$P(x_t \mid x_{t-1}, \ldots, x_1)$。 马尔可夫模型回想一下,在自回归模型的近似法中,我们使用$x_{t-1}, \ldots, x_{t-\tau}$而不是$x_{t-1}, \ldots, x_1$来估计$x_t$。只要这种是近似精确的,我们就说序列满足*马尔可夫条件*(Markov condition)。特别是,如果$\tau = 1$,得到一个*一阶马尔可夫模型*(first-order Markov model),$P(x)$由下式给出:$$P(x_1, \ldots, x_T) = \prod_{t=1}^T P(x_t \mid x_{t-1}) \text{ 当 } P(x_1 \mid x_0) = P(x_1).$$当假设$x_t$仅是离散值时,这样的模型特别棒,因为在这种情况下,使用动态规划可以沿着马尔可夫链精确地计算结果。例如,我们可以高效地计算$P(x_{t+1} \mid x_{t-1})$:$$\begin{aligned}P(x_{t+1} \mid x_{t-1})&= \frac{\sum_{x_t} P(x_{t+1}, x_t, x_{t-1})}{P(x_{t-1})}\\&= \frac{\sum_{x_t} P(x_{t+1} \mid x_t, x_{t-1}) P(x_t, x_{t-1})}{P(x_{t-1})}\\&= \sum_{x_t} P(x_{t+1} \mid x_t) P(x_t \mid x_{t-1})\end{aligned}$$利用这一事实,我们只需要考虑过去观察中的一个非常短的历史:$P(x_{t+1} \mid x_t, x_{t-1}) = P(x_{t+1} \mid x_t)$。隐马尔可夫模型中的动态规划超出了本节的范围(我们将在 :numref:`sec_bi_rnn`再次遇到),而动态规划这些计算工具已经在控制算法和强化学习算法广泛使用。 因果关系原则上,将$P(x_1, \ldots, x_T)$倒序展开也没什么问题。毕竟,基于条件概率公式,我们总是可以写出:$$P(x_1, \ldots, x_T) = \prod_{t=T}^1 P(x_t \mid x_{t+1}, \ldots, x_T).$$事实上,如果基于一个马尔可夫模型,我们还可以得到一个反向的条件概率分布。然而,在许多情况下,数据存在一个自然的方向,即在时间上是前进的。很明显,未来的事件不能影响过去。因此,如果我们改变$x_t$,可能会影响未来发生的事情$x_{t+1}$,但不能反过来。也就是说,如果我们改变$x_t$,基于过去事件得到的分布不会改变。因此,解释$P(x_{t+1} \mid x_t)$应该比解释$P(x_t \mid x_{t+1})$更容易。例如,在某些情况下,对于某些可加性噪声$\epsilon$,显然我们可以找到$x_{t+1} = f(x_t) + \epsilon$,而反之则不行 :cite:`Hoyer.Janzing.Mooij.ea.2009`。这是个好消息,因为这个前进方向通常也是我们感兴趣的方向。彼得斯等人写的这本书 :cite:`Peters.Janzing.Scholkopf.2017`已经解释了关于这个主题的更多内容,而我们仅仅触及了它的皮毛。 训练在了解了上述统计工具后,让我们在实践中尝试一下!首先,我们生成一些数据:(**使用正弦函数和一些可加性噪声来生成序列数据,时间步为$1, 2, \ldots, 1000$。**) ###Code %matplotlib inline import torch from torch import nn from d2l import torch as d2l T = 1000 # 总共产生1000个点 time = torch.arange(1, T + 1, dtype=torch.float32) x = torch.sin(0.01 * time) + torch.normal(0, 0.2, (T,)) d2l.plot(time, [x], 'time', 'x', xlim=[1, 1000], figsize=(6, 3)) ###Output _____no_output_____ ###Markdown 接下来,我们将这个序列转换为模型的“特征-标签”(feature-label)对。基于嵌入维度$\tau$,我们[**将数据映射为数据对$y_t = x_t$和$\mathbf{x}_t = [x_{t-\tau}, \ldots, x_{t-1}]$。**]你可能已经注意到,这比我们提供的数据样本少了$\tau$个,因为我们没有足够的历史记录来描述前$\tau$个数据样本。一个简单的解决办法是:如果拥有足够长的序列就丢弃这几项;另一个方法是用零填充序列。在这里,我们仅使用前600个“特征-标签”对进行训练。 ###Code tau = 4 features = torch.zeros((T - tau, tau)) for i in range(tau): features[:, i] = x[i: T - tau + i] labels = x[tau:].reshape((-1, 1)) batch_size, n_train = 16, 600 # 只有前n_train个样本用于训练 train_iter = d2l.load_array((features[:n_train], labels[:n_train]), batch_size, is_train=True) ###Output _____no_output_____ ###Markdown 在这里,我们[**使用一个相当简单的架构训练模型:一个拥有两个全连接层的多层感知机**],ReLU激活函数和平方损失。 ###Code # 初始化网络权重的函数 def init_weights(m): if type(m) == nn.Linear: nn.init.xavier_uniform_(m.weight) # 一个简单的多层感知机 def get_net(): net = nn.Sequential(nn.Linear(4, 10), nn.ReLU(), nn.Linear(10, 1)) net.apply(init_weights) return net # 平方损失。注意:MSELoss计算平方误差时不带系数1/2 loss = nn.MSELoss(reduction='none') ###Output _____no_output_____ ###Markdown 现在,准备[**训练模型**]了。实现下面的训练代码的方式与前面几节(如 :numref:`sec_linear_concise`)中的循环训练基本相同。因此,我们不会深入探讨太多细节。 ###Code def train(net, train_iter, loss, epochs, lr): trainer = torch.optim.Adam(net.parameters(), lr) for epoch in range(epochs): for X, y in train_iter: trainer.zero_grad() l = loss(net(X), y) l.sum().backward() trainer.step() print(f'epoch {epoch + 1}, ' f'loss: {d2l.evaluate_loss(net, train_iter, loss):f}') net = get_net() train(net, train_iter, loss, 5, 0.01) ###Output epoch 1, loss: 0.075906 epoch 2, loss: 0.057385 epoch 3, loss: 0.055220 epoch 4, loss: 0.053874 epoch 5, loss: 0.054573 ###Markdown 预测由于训练损失很小,因此我们期望模型能有很好的工作效果。让我们看看这在实践中意味着什么。首先是检查[**模型预测下一个时间步**]的能力,也就是*单步预测*(one-step-ahead prediction)。 ###Code onestep_preds = net(features) d2l.plot([time, time[tau:]], [x.detach().numpy(), onestep_preds.detach().numpy()], 'time', 'x', legend=['data', '1-step preds'], xlim=[1, 1000], figsize=(6, 3)) ###Output _____no_output_____ ###Markdown 正如我们所料,单步预测效果不错。即使这些预测的时间步超过了$600+4$(`n_train + tau`),其结果看起来仍然是可信的。然而有一个小问题:如果数据观察序列的时间步只到$604$,我们需要一步一步地向前迈进:$$\hat{x}_{605} = f(x_{601}, x_{602}, x_{603}, x_{604}), \\\hat{x}_{606} = f(x_{602}, x_{603}, x_{604}, \hat{x}_{605}), \\\hat{x}_{607} = f(x_{603}, x_{604}, \hat{x}_{605}, \hat{x}_{606}),\\\hat{x}_{608} = f(x_{604}, \hat{x}_{605}, \hat{x}_{606}, \hat{x}_{607}),\\\hat{x}_{609} = f(\hat{x}_{605}, \hat{x}_{606}, \hat{x}_{607}, \hat{x}_{608}),\\\ldots$$通常,对于直到$x_t$的观测序列,其在时间步$t+k$处的预测输出$\hat{x}_{t+k}$称为$k$*步预测*($k$-step-ahead-prediction)。由于我们的观察已经到了$x_{604}$,它的$k$步预测是$\hat{x}_{604+k}$。换句话说,我们必须使用我们自己的预测(而不是原始数据)来[**进行多步预测**]。让我们看看效果如何。 ###Code multistep_preds = torch.zeros(T) multistep_preds[: n_train + tau] = x[: n_train + tau] for i in range(n_train + tau, T): multistep_preds[i] = net( multistep_preds[i - tau:i].reshape((1, -1))) d2l.plot([time, time[tau:], time[n_train + tau:]], [x.detach().numpy(), onestep_preds.detach().numpy(), multistep_preds[n_train + tau:].detach().numpy()], 'time', 'x', legend=['data', '1-step preds', 'multistep preds'], xlim=[1, 1000], figsize=(6, 3)) ###Output _____no_output_____ ###Markdown 如上面的例子所示,绿线的预测显然并不理想。经过几个预测步骤之后,预测的结果很快就会衰减到一个常数。为什么这个算法效果这么差呢?事实是由于错误的累积:假设在步骤$1$之后,我们积累了一些错误$\epsilon_1 = \bar\epsilon$。于是,步骤$2$的输入被扰动了$\epsilon_1$,结果积累的误差是依照次序的$\epsilon_2 = \bar\epsilon + c \epsilon_1$,其中$c$为某个常数,后面的预测误差依此类推。因此误差可能会相当快地偏离真实的观测结果。例如,未来$24$小时的天气预报往往相当准确,但超过这一点,精度就会迅速下降。我们将在本章及后续章节中讨论如何改进这一点。基于$k = 1, 4, 16, 64$,通过对整个序列预测的计算,让我们[**更仔细地看一下$k$步预测**]的困难。 ###Code max_steps = 64 features = torch.zeros((T - tau - max_steps + 1, tau + max_steps)) # 列i(i<tau)是来自x的观测,其时间步从(i+1)到(i+T-tau-max_steps+1) for i in range(tau): features[:, i] = x[i: i + T - tau - max_steps + 1] # 列i(i>=tau)是来自(i-tau+1)步的预测,其时间步从(i+1)到(i+T-tau-max_steps+1) for i in range(tau, tau + max_steps): features[:, i] = net(features[:, i - tau:i]).reshape(-1) steps = (1, 4, 16, 64) d2l.plot([time[tau + i - 1: T - max_steps + i] for i in steps], [features[:, (tau + i - 1)].detach().numpy() for i in steps], 'time', 'x', legend=[f'{i}-step preds' for i in steps], xlim=[5, 1000], figsize=(6, 3)) ###Output _____no_output_____
notebooks/Evaluations/Continuous_Timeseries/King_County/Hourly_Timeseries/PointWilliams/2016_PointWilliams_Timeseries.ipynb
###Markdown This notebook will hopefully contain timeseries that plot continuous data from moorings alongside model output. ###Code import sys sys.path.append('/ocean/kflanaga/MEOPAR/analysis-keegan/notebooks/Tools') import numpy as np import matplotlib.pyplot as plt import os import pandas as pd import netCDF4 as nc import xarray as xr import datetime as dt from salishsea_tools import evaltools as et, viz_tools import gsw import matplotlib.gridspec as gridspec import matplotlib as mpl import matplotlib.dates as mdates import cmocean as cmo import scipy.interpolate as sinterp import pickle import cmocean import json import f90nml import Keegan_eval_tools as ket from collections import OrderedDict from matplotlib.colors import LogNorm fs=16 mpl.rc('xtick', labelsize=fs) mpl.rc('ytick', labelsize=fs) mpl.rc('legend', fontsize=fs) mpl.rc('axes', titlesize=fs) mpl.rc('axes', labelsize=fs) mpl.rc('figure', titlesize=fs) mpl.rc('font', size=fs) mpl.rc('font', family='sans-serif', weight='normal', style='normal') import warnings #warnings.filterwarnings('ignore') from IPython.display import Markdown, display %matplotlib inline saveloc='/ocean/kflanaga/MEOPAR/savedData/King_CountyData/hourly_pickle_files' year=2019 Mooring='PointWilliams' # Parameters saveloc = "/ocean/kflanaga/MEOPAR/savedData/King_CountyData/hourly_pickle_files" year = 2016 Mooring = "PointWilliams" ##### Loading in pickle file data with open(os.path.join(saveloc,f'hourly_data_{Mooring}_{year}.pkl'),'rb') as hh: data=pickle.load(hh) grid=xr.open_mfdataset(f'/ocean/kflanaga/MEOPAR/savedData/201905_grid_data/ts_HC201905_{year}_{Mooring}.nc') %%time tt=grid.time_centered vot=grid.votemper.isel(deptht=0,y=0,x=0) vos=grid.vosaline.isel(deptht=0,y=0,x=0) obsvar='CT' fig,ax=plt.subplots(1,1,figsize=(14,7)) ps=[] p0,=ax.plot(data['dtUTC'],data[obsvar],'.',color='blue',label=f'Observed ') ps.append(p0) p0,=ax.plot(tt,vot,'-',color='red',label='Modeled') ps.append(p0) ax.legend(handles=ps) ax.set_ylabel(f'{obsvar}') ax.set_xlabel('Date') ax.set_title('Temperature timeseries') plt.setp(ax.get_xticklabels(), rotation=30, horizontalalignment='right') M = 15 xticks = mpl.ticker.MaxNLocator(M) ax.xaxis.set_major_locator(xticks) yearsFmt = mdates.DateFormatter('%d %b %y') ax.xaxis.set_major_formatter(yearsFmt) obsvar='SA' fig,ax=plt.subplots(1,1,figsize=(14,7)) ps=[] p0,=ax.plot(data['dtUTC'],data[obsvar],'.',color='blue',label=f'Observed') ps.append(p0) p0,=ax.plot(tt,vos,'-',color='red',label='Modeled') ps.append(p0) ax.legend(handles=ps) ax.set_ylabel(f'{obsvar}') ax.set_xlabel('Date') ax.set_title('Salinity timeseries') plt.setp(ax.get_xticklabels(), rotation=30, horizontalalignment='right') M = 15 xticks = mpl.ticker.MaxNLocator(M) ax.xaxis.set_major_locator(xticks) yearsFmt = mdates.DateFormatter('%d %b %y') ax.xaxis.set_major_formatter(yearsFmt) grid.close() bio=xr.open_mfdataset(f'/ocean/kflanaga/MEOPAR/savedData/201905_ptrc_data/ts_HC201905_{year}_{Mooring}.nc') ik=0 ij=0 ii=0 %%time tt=bio.time_counter mod_nitrate=(bio.nitrate.isel(deptht=ik,y=ij,x=ii)) diatom=bio.diatoms.isel(deptht=ik,y=ij,x=ii) flagellate=bio.flagellates.isel(deptht=ik,y=ij,x=ii) ciliate=bio.ciliates.isel(deptht=ik,y=ij,x=ii) mod_Chl=(diatom+flagellate+ciliate)*1.8 data.columns obsvar='Chl' modvar=mod_Chl fig,ax=plt.subplots(1,1,figsize=(14,7)) ps=[] p0,=ax.plot(data['dtUTC'],data[obsvar],'.',color='blue',label=f'Observed ') ps.append(p0) p0,=ax.plot(tt,modvar,'-',color='red',label='Modeled') ps.append(p0) ax.legend(handles=ps) ax.set_ylabel(f'{obsvar}') ax.set_xlabel('Date') ax.set_title('Chlorophyll Timeseries') plt.setp(ax.get_xticklabels(), rotation=30, horizontalalignment='right') M = 15 xticks = mpl.ticker.MaxNLocator(M) ax.xaxis.set_major_locator(xticks) yearsFmt = mdates.DateFormatter('%d %b %y') ax.xaxis.set_major_formatter(yearsFmt) obsvar='NO23' modvar=mod_nitrate fig,ax=plt.subplots(1,1,figsize=(14,7)) ps=[] p0,=ax.plot(data['dtUTC'],data[obsvar],'.',color='blue',label=f'Observed ') ps.append(p0) p0,=ax.plot(tt,modvar,'-',color='red',label='Modeled') ps.append(p0) ax.legend(handles=ps) ax.set_ylabel(f'{obsvar}') ax.set_ylim((0,40)) ax.set_xlabel('Date') ax.set_title('Chlorophyll Timeseries') plt.setp(ax.get_xticklabels(), rotation=30, horizontalalignment='right') M = 15 xticks = mpl.ticker.MaxNLocator(M) ax.xaxis.set_major_locator(xticks) yearsFmt = mdates.DateFormatter('%d %b %y') ax.xaxis.set_major_formatter(yearsFmt) ###Output _____no_output_____
Day14_and_15_RNN_and_LSTM/Building_a_Recurrent_Neural_Network_Step_by_Step_v3b.ipynb
###Markdown Building your Recurrent Neural Network - Step by StepWelcome to Course 5's first assignment! In this assignment, you will implement key components of a Recurrent Neural Network in numpy.Recurrent Neural Networks (RNN) are very effective for Natural Language Processing and other sequence tasks because they have "memory". They can read inputs $x^{\langle t \rangle}$ (such as words) one at a time, and remember some information/context through the hidden layer activations that get passed from one time-step to the next. This allows a unidirectional RNN to take information from the past to process later inputs. A bidirectional RNN can take context from both the past and the future. **Notation**:- Superscript $[l]$ denotes an object associated with the $l^{th}$ layer. - Superscript $(i)$ denotes an object associated with the $i^{th}$ example. - Superscript $\langle t \rangle$ denotes an object at the $t^{th}$ time-step. - **Sub**script $i$ denotes the $i^{th}$ entry of a vector.Example: - $a^{(2)[3]}_5$ denotes the activation of the 2nd training example (2), 3rd layer [3], 4th time step , and 5th entry in the vector. Pre-requisites* We assume that you are already familiar with `numpy`. * To refresh your knowledge of numpy, you can review course 1 of this specialization "Neural Networks and Deep Learning". * Specifically, review the week 2 assignment ["Python Basics with numpy (optional)"](https://www.coursera.org/learn/neural-networks-deep-learning/item/Zh0CU). Be careful when modifying the starter code* When working on graded functions, please remember to only modify the code that is between the```Python START CODE HERE```and```Python END CODE HERE```* In particular, Be careful to not modify the first line of graded routines. These start with:```Python GRADED FUNCTION: routine_name```* The automatic grader (autograder) needs these to locate the function.* Even a change in spacing will cause issues with the autograder. * It will return 'failed' if these are modified or missing." Updates for 3b If you were working on the notebook before this update...* The current notebook is version "3b".* You can find your original work saved in the notebook with the previous version name ("v3a") * To view the file directory, go to the menu "File->Open", and this will open a new tab that shows the file directory. List of updates* `rnn_cell_backward` - fixed error in equations - harmonize rnn backward diagram with rnn_forward diagram and fixed Wax multiple (changed from at to xt). - clarified dba batch as summing 'm' examples - aligned equations* `lstm_cell_backward` - aligned equations* `lstm_forward` - fixed typo, Wb to bf* `lstm_cell_forward` - changed c_next_tmp.shape to a_next_tmp.shape in test case - clarified dbxx batch as summing 'm' examples Let's first import all the packages that you will need during this assignment. ###Code import numpy as np from rnn_utils import * ###Output _____no_output_____ ###Markdown 1 - Forward propagation for the basic Recurrent Neural NetworkLater this week, you will generate music using an RNN. The basic RNN that you will implement has the structure below. In this example, $T_x = T_y$. **Figure 1**: Basic RNN model Dimensions of input $x$ Input with $n_x$ number of units* For a single timestep of a single input example, $x^{(i) \langle t \rangle }$ is a one-dimensional input vector.* Using language as an example, a language with a 5000 word vocabulary could be one-hot encoded into a vector that has 5000 units. So $x^{(i)\langle t \rangle}$ would have the shape (5000,). * We'll use the notation $n_x$ to denote the number of units in a single timestep of a single training example. Time steps of size $T_{x}$* A recurrent neural network has multiple time steps, which we'll index with $t$.* In the lessons, we saw a single training example $x^{(i)}$ consist of multiple time steps $T_x$. For example, if there are 10 time steps, $T_{x} = 10$ Batches of size $m$* Let's say we have mini-batches, each with 20 training examples. * To benefit from vectorization, we'll stack 20 columns of $x^{(i)}$ examples.* For example, this tensor has the shape (5000,20,10). * We'll use $m$ to denote the number of training examples. * So the shape of a mini-batch is $(n_x,m,T_x)$ 3D Tensor of shape $(n_{x},m,T_{x})$* The 3-dimensional tensor $x$ of shape $(n_x,m,T_x)$ represents the input $x$ that is fed into the RNN. Taking a 2D slice for each time step: $x^{\langle t \rangle}$* At each time step, we'll use a mini-batches of training examples (not just a single example).* So, for each time step $t$, we'll use a 2D slice of shape $(n_x,m)$.* We're referring to this 2D slice as $x^{\langle t \rangle}$. The variable name in the code is `xt`. Definition of hidden state $a$* The activation $a^{\langle t \rangle}$ that is passed to the RNN from one time step to another is called a "hidden state." Dimensions of hidden state $a$* Similar to the input tensor $x$, the hidden state for a single training example is a vector of length $n_{a}$.* If we include a mini-batch of $m$ training examples, the shape of a mini-batch is $(n_{a},m)$.* When we include the time step dimension, the shape of the hidden state is $(n_{a}, m, T_x)$* We will loop through the time steps with index $t$, and work with a 2D slice of the 3D tensor. * We'll refer to this 2D slice as $a^{\langle t \rangle}$. * In the code, the variable names we use are either `a_prev` or `a_next`, depending on the function that's being implemented.* The shape of this 2D slice is $(n_{a}, m)$ Dimensions of prediction $\hat{y}$* Similar to the inputs and hidden states, $\hat{y}$ is a 3D tensor of shape $(n_{y}, m, T_{y})$. * $n_{y}$: number of units in the vector representing the prediction. * $m$: number of examples in a mini-batch. * $T_{y}$: number of time steps in the prediction.* For a single time step $t$, a 2D slice $\hat{y}^{\langle t \rangle}$ has shape $(n_{y}, m)$.* In the code, the variable names are: - `y_pred`: $\hat{y}$ - `yt_pred`: $\hat{y}^{\langle t \rangle}$ Here's how you can implement an RNN: **Steps**:1. Implement the calculations needed for one time-step of the RNN.2. Implement a loop over $T_x$ time-steps in order to process all the inputs, one at a time. 1.1 - RNN cellA recurrent neural network can be seen as the repeated use of a single cell. You are first going to implement the computations for a single time-step. The following figure describes the operations for a single time-step of an RNN cell. **Figure 2**: Basic RNN cell. Takes as input $x^{\langle t \rangle}$ (current input) and $a^{\langle t - 1\rangle}$ (previous hidden state containing information from the past), and outputs $a^{\langle t \rangle}$ which is given to the next RNN cell and also used to predict $\hat{y}^{\langle t \rangle}$ rnn cell versus rnn_cell_forward* Note that an RNN cell outputs the hidden state $a^{\langle t \rangle}$. * The rnn cell is shown in the figure as the inner box which has solid lines. * The function that we will implement, `rnn_cell_forward`, also calculates the prediction $\hat{y}^{\langle t \rangle}$ * The rnn_cell_forward is shown in the figure as the outer box that has dashed lines. **Exercise**: Implement the RNN-cell described in Figure (2).**Instructions**:1. Compute the hidden state with tanh activation: $a^{\langle t \rangle} = \tanh(W_{aa} a^{\langle t-1 \rangle} + W_{ax} x^{\langle t \rangle} + b_a)$.2. Using your new hidden state $a^{\langle t \rangle}$, compute the prediction $\hat{y}^{\langle t \rangle} = softmax(W_{ya} a^{\langle t \rangle} + b_y)$. We provided the function `softmax`.3. Store $(a^{\langle t \rangle}, a^{\langle t-1 \rangle}, x^{\langle t \rangle}, parameters)$ in a `cache`.4. Return $a^{\langle t \rangle}$ , $\hat{y}^{\langle t \rangle}$ and `cache` Additional Hints* [numpy.tanh](https://www.google.com/search?q=numpy+tanh&rlz=1C5CHFA_enUS854US855&oq=numpy+tanh&aqs=chrome..69i57j0l5.1340j0j7&sourceid=chrome&ie=UTF-8)* We've created a `softmax` function that you can use. It is located in the file 'rnn_utils.py' and has been imported.* For matrix multiplication, use [numpy.dot](https://docs.scipy.org/doc/numpy/reference/generated/numpy.dot.html) ###Code # GRADED FUNCTION: rnn_cell_forward def rnn_cell_forward(xt, a_prev, parameters): """ Implements a single forward step of the RNN-cell as described in Figure (2) Arguments: xt -- your input data at timestep "t", numpy array of shape (n_x, m). a_prev -- Hidden state at timestep "t-1", numpy array of shape (n_a, m) parameters -- python dictionary containing: Wax -- Weight matrix multiplying the input, numpy array of shape (n_a, n_x) Waa -- Weight matrix multiplying the hidden state, numpy array of shape (n_a, n_a) Wya -- Weight matrix relating the hidden-state to the output, numpy array of shape (n_y, n_a) ba -- Bias, numpy array of shape (n_a, 1) by -- Bias relating the hidden-state to the output, numpy array of shape (n_y, 1) Returns: a_next -- next hidden state, of shape (n_a, m) yt_pred -- prediction at timestep "t", numpy array of shape (n_y, m) cache -- tuple of values needed for the backward pass, contains (a_next, a_prev, xt, parameters) """ # Retrieve parameters from "parameters" Wax = parameters["Wax"] Waa = parameters["Waa"] Wya = parameters["Wya"] ba = parameters["ba"] by = parameters["by"] ### START CODE HERE ### (≈2 lines) # compute next activation state using the formula given above a_next = np.tanh(np.dot(Waa, a_prev) + np.dot(Wax, xt) + ba) # compute output of the current cell using the formula given above yt_pred = softmax(np.dot(Wya, a_next) + by) ### END CODE HERE ### # store values you need for backward propagation in cache cache = (a_next, a_prev, xt, parameters) return a_next, yt_pred, cache np.random.seed(1) xt_tmp = np.random.randn(3,10) a_prev_tmp = np.random.randn(5,10) parameters_tmp = {} parameters_tmp['Waa'] = np.random.randn(5,5) parameters_tmp['Wax'] = np.random.randn(5,3) parameters_tmp['Wya'] = np.random.randn(2,5) parameters_tmp['ba'] = np.random.randn(5,1) parameters_tmp['by'] = np.random.randn(2,1) a_next_tmp, yt_pred_tmp, cache_tmp = rnn_cell_forward(xt_tmp, a_prev_tmp, parameters_tmp) print("a_next[4] = \n", a_next_tmp[4]) print("a_next.shape = \n", a_next_tmp.shape) print("yt_pred[1] =\n", yt_pred_tmp[1]) print("yt_pred.shape = \n", yt_pred_tmp.shape) ###Output a_next[4] = [ 0.59584544 0.18141802 0.61311866 0.99808218 0.85016201 0.99980978 -0.18887155 0.99815551 0.6531151 0.82872037] a_next.shape = (5, 10) yt_pred[1] = [ 0.9888161 0.01682021 0.21140899 0.36817467 0.98988387 0.88945212 0.36920224 0.9966312 0.9982559 0.17746526] yt_pred.shape = (2, 10) ###Markdown **Expected Output**: ```Pythona_next[4] = [ 0.59584544 0.18141802 0.61311866 0.99808218 0.85016201 0.99980978 -0.18887155 0.99815551 0.6531151 0.82872037]a_next.shape = (5, 10)yt_pred[1] = [ 0.9888161 0.01682021 0.21140899 0.36817467 0.98988387 0.88945212 0.36920224 0.9966312 0.9982559 0.17746526]yt_pred.shape = (2, 10)``` 1.2 - RNN forward pass - A recurrent neural network (RNN) is a repetition of the RNN cell that you've just built. - If your input sequence of data is 10 time steps long, then you will re-use the RNN cell 10 times. - Each cell takes two inputs at each time step: - $a^{\langle t-1 \rangle}$: The hidden state from the previous cell. - $x^{\langle t \rangle}$: The current time-step's input data.- It has two outputs at each time step: - A hidden state ($a^{\langle t \rangle}$) - A prediction ($y^{\langle t \rangle}$)- The weights and biases $(W_{aa}, b_{a}, W_{ax}, b_{x})$ are re-used each time step. - They are maintained between calls to rnn_cell_forward in the 'parameters' dictionary. **Figure 3**: Basic RNN. The input sequence $x = (x^{\langle 1 \rangle}, x^{\langle 2 \rangle}, ..., x^{\langle T_x \rangle})$ is carried over $T_x$ time steps. The network outputs $y = (y^{\langle 1 \rangle}, y^{\langle 2 \rangle}, ..., y^{\langle T_x \rangle})$. **Exercise**: Code the forward propagation of the RNN described in Figure (3).**Instructions**:* Create a 3D array of zeros, $a$ of shape $(n_{a}, m, T_{x})$ that will store all the hidden states computed by the RNN.* Create a 3D array of zeros, $\hat{y}$, of shape $(n_{y}, m, T_{x})$ that will store the predictions. - Note that in this case, $T_{y} = T_{x}$ (the prediction and input have the same number of time steps).* Initialize the 2D hidden state `a_next` by setting it equal to the initial hidden state, $a_{0}$.* At each time step $t$: - Get $x^{\langle t \rangle}$, which is a 2D slice of $x$ for a single time step $t$. - $x^{\langle t \rangle}$ has shape $(n_{x}, m)$ - $x$ has shape $(n_{x}, m, T_{x})$ - Update the 2D hidden state $a^{\langle t \rangle}$ (variable name `a_next`), the prediction $\hat{y}^{\langle t \rangle}$ and the cache by running `rnn_cell_forward`. - $a^{\langle t \rangle}$ has shape $(n_{a}, m)$ - Store the 2D hidden state in the 3D tensor $a$, at the $t^{th}$ position. - $a$ has shape $(n_{a}, m, T_{x})$ - Store the 2D $\hat{y}^{\langle t \rangle}$ prediction (variable name `yt_pred`) in the 3D tensor $\hat{y}_{pred}$ at the $t^{th}$ position. - $\hat{y}^{\langle t \rangle}$ has shape $(n_{y}, m)$ - $\hat{y}$ has shape $(n_{y}, m, T_x)$ - Append the cache to the list of caches.* Return the 3D tensor $a$ and $\hat{y}$, as well as the list of caches. Additional Hints- [np.zeros](https://docs.scipy.org/doc/numpy/reference/generated/numpy.zeros.html)- If you have a 3 dimensional numpy array and are indexing by its third dimension, you can use array slicing like this: `var_name[:,:,i]`. ###Code # GRADED FUNCTION: rnn_forward def rnn_forward(x, a0, parameters): """ Implement the forward propagation of the recurrent neural network described in Figure (3). Arguments: x -- Input data for every time-step, of shape (n_x, m, T_x). a0 -- Initial hidden state, of shape (n_a, m) parameters -- python dictionary containing: Waa -- Weight matrix multiplying the hidden state, numpy array of shape (n_a, n_a) Wax -- Weight matrix multiplying the input, numpy array of shape (n_a, n_x) Wya -- Weight matrix relating the hidden-state to the output, numpy array of shape (n_y, n_a) ba -- Bias numpy array of shape (n_a, 1) by -- Bias relating the hidden-state to the output, numpy array of shape (n_y, 1) Returns: a -- Hidden states for every time-step, numpy array of shape (n_a, m, T_x) y_pred -- Predictions for every time-step, numpy array of shape (n_y, m, T_x) caches -- tuple of values needed for the backward pass, contains (list of caches, x) """ # Initialize "caches" which will contain the list of all caches caches = [] # Retrieve dimensions from shapes of x and parameters["Wya"] n_x, m, T_x = x.shape n_y, n_a = parameters["Wya"].shape ### START CODE HERE ### # initialize "a" and "y_pred" with zeros (≈2 lines) a = np.zeros((n_a, m, T_x)) y_pred = np.zeros((n_y, m, T_x)) # Initialize a_next (≈1 line) a_next = a0 # loop over all time-steps of the input 'x' (1 line) for t in range(len(x)): # Update next hidden state, compute the prediction, get the cache (≈2 lines) xt = x[:, :, t] a_next, yt_pred, cache = rnn_cell_forward(xt, a_next, parameters) # Save the value of the new "next" hidden state in a (≈1 line) a[:,:,t] = a_next # Save the value of the prediction in y (≈1 line) y_pred[:,:,t] = yt_pred # Append "cache" to "caches" (≈1 line) caches.append(cache) ### END CODE HERE ### # store values needed for backward propagation in cache caches = (caches, x) return a, y_pred, caches np.random.seed(1) x_tmp = np.random.randn(3,10,4) a0_tmp = np.random.randn(5,10) parameters_tmp = {} parameters_tmp['Waa'] = np.random.randn(5,5) parameters_tmp['Wax'] = np.random.randn(5,3) parameters_tmp['Wya'] = np.random.randn(2,5) parameters_tmp['ba'] = np.random.randn(5,1) parameters_tmp['by'] = np.random.randn(2,1) a_tmp, y_pred_tmp, caches_tmp = rnn_forward(x_tmp, a0_tmp, parameters_tmp) print("a[4][1] = \n", a_tmp[4][1]) print("a.shape = \n", a_tmp.shape) print("y_pred[1][3] =\n", y_pred_tmp[1][3]) print("y_pred.shape = \n", y_pred_tmp.shape) print("caches[1][1][3] =\n", caches_tmp[1][1][3]) print("len(caches) = \n", len(caches_tmp)) ###Output a[4][1] = [-0.99999375 0.77911235 -0.99861469 0. ] a.shape = (5, 10, 4) y_pred[1][3] = [ 0.79560373 0.86224861 0.11118257 0. ] y_pred.shape = (2, 10, 4) caches[1][1][3] = [-1.1425182 -0.34934272 -0.20889423 0.58662319] len(caches) = 2 ###Markdown **Expected Output**:```Pythona[4][1] = [-0.99999375 0.77911235 -0.99861469 -0.99833267]a.shape = (5, 10, 4)y_pred[1][3] = [ 0.79560373 0.86224861 0.11118257 0.81515947]y_pred.shape = (2, 10, 4)caches[1][1][3] = [-1.1425182 -0.34934272 -0.20889423 0.58662319]len(caches) = 2``` Congratulations! You've successfully built the forward propagation of a recurrent neural network from scratch. Situations when this RNN will perform better:- This will work well enough for some applications, but it suffers from the vanishing gradient problems. - The RNN works best when each output $\hat{y}^{\langle t \rangle}$ can be estimated using "local" context. - "Local" context refers to information that is close to the prediction's time step $t$.- More formally, local context refers to inputs $x^{\langle t' \rangle}$ and predictions $\hat{y}^{\langle t \rangle}$ where $t'$ is close to $t$.In the next part, you will build a more complex LSTM model, which is better at addressing vanishing gradients. The LSTM will be better able to remember a piece of information and keep it saved for many timesteps. 2 - Long Short-Term Memory (LSTM) networkThe following figure shows the operations of an LSTM-cell. **Figure 4**: LSTM-cell. This tracks and updates a "cell state" or memory variable $c^{\langle t \rangle}$ at every time-step, which can be different from $a^{\langle t \rangle}$. Note, the $softmax^{*}$ includes a dense layer and softmaxSimilar to the RNN example above, you will start by implementing the LSTM cell for a single time-step. Then you can iteratively call it from inside a "for-loop" to have it process an input with $T_x$ time-steps. Overview of gates and states - Forget gate $\mathbf{\Gamma}_{f}$* Let's assume we are reading words in a piece of text, and plan to use an LSTM to keep track of grammatical structures, such as whether the subject is singular ("puppy") or plural ("puppies"). * If the subject changes its state (from a singular word to a plural word), the memory of the previous state becomes outdated, so we "forget" that outdated state.* The "forget gate" is a tensor containing values that are between 0 and 1. * If a unit in the forget gate has a value close to 0, the LSTM will "forget" the stored state in the corresponding unit of the previous cell state. * If a unit in the forget gate has a value close to 1, the LSTM will mostly remember the corresponding value in the stored state. Equation$$\mathbf{\Gamma}_f^{\langle t \rangle} = \sigma(\mathbf{W}_f[\mathbf{a}^{\langle t-1 \rangle}, \mathbf{x}^{\langle t \rangle}] + \mathbf{b}_f)\tag{1} $$ Explanation of the equation:* $\mathbf{W_{f}}$ contains weights that govern the forget gate's behavior. * The previous time step's hidden state $[a^{\langle t-1 \rangle}$ and current time step's input $x^{\langle t \rangle}]$ are concatenated together and multiplied by $\mathbf{W_{f}}$. * A sigmoid function is used to make each of the gate tensor's values $\mathbf{\Gamma}_f^{\langle t \rangle}$ range from 0 to 1.* The forget gate $\mathbf{\Gamma}_f^{\langle t \rangle}$ has the same dimensions as the previous cell state $c^{\langle t-1 \rangle}$. * This means that the two can be multiplied together, element-wise.* Multiplying the tensors $\mathbf{\Gamma}_f^{\langle t \rangle} * \mathbf{c}^{\langle t-1 \rangle}$ is like applying a mask over the previous cell state.* If a single value in $\mathbf{\Gamma}_f^{\langle t \rangle}$ is 0 or close to 0, then the product is close to 0. * This keeps the information stored in the corresponding unit in $\mathbf{c}^{\langle t-1 \rangle}$ from being remembered for the next time step.* Similarly, if one value is close to 1, the product is close to the original value in the previous cell state. * The LSTM will keep the information from the corresponding unit of $\mathbf{c}^{\langle t-1 \rangle}$, to be used in the next time step. Variable names in the codeThe variable names in the code are similar to the equations, with slight differences. * `Wf`: forget gate weight $\mathbf{W}_{f}$* `bf`: forget gate bias $\mathbf{b}_{f}$* `ft`: forget gate $\Gamma_f^{\langle t \rangle}$ Candidate value $\tilde{\mathbf{c}}^{\langle t \rangle}$* The candidate value is a tensor containing information from the current time step that **may** be stored in the current cell state $\mathbf{c}^{\langle t \rangle}$.* Which parts of the candidate value get passed on depends on the update gate.* The candidate value is a tensor containing values that range from -1 to 1.* The tilde "~" is used to differentiate the candidate $\tilde{\mathbf{c}}^{\langle t \rangle}$ from the cell state $\mathbf{c}^{\langle t \rangle}$. Equation$$\mathbf{\tilde{c}}^{\langle t \rangle} = \tanh\left( \mathbf{W}_{c} [\mathbf{a}^{\langle t - 1 \rangle}, \mathbf{x}^{\langle t \rangle}] + \mathbf{b}_{c} \right) \tag{3}$$ Explanation of the equation* The 'tanh' function produces values between -1 and +1. Variable names in the code* `cct`: candidate value $\mathbf{\tilde{c}}^{\langle t \rangle}$ - Update gate $\mathbf{\Gamma}_{i}$* We use the update gate to decide what aspects of the candidate $\tilde{\mathbf{c}}^{\langle t \rangle}$ to add to the cell state $c^{\langle t \rangle}$.* The update gate decides what parts of a "candidate" tensor $\tilde{\mathbf{c}}^{\langle t \rangle}$ are passed onto the cell state $\mathbf{c}^{\langle t \rangle}$.* The update gate is a tensor containing values between 0 and 1. * When a unit in the update gate is close to 1, it allows the value of the candidate $\tilde{\mathbf{c}}^{\langle t \rangle}$ to be passed onto the hidden state $\mathbf{c}^{\langle t \rangle}$ * When a unit in the update gate is close to 0, it prevents the corresponding value in the candidate from being passed onto the hidden state.* Notice that we use the subscript "i" and not "u", to follow the convention used in the literature. Equation$$\mathbf{\Gamma}_i^{\langle t \rangle} = \sigma(\mathbf{W}_i[a^{\langle t-1 \rangle}, \mathbf{x}^{\langle t \rangle}] + \mathbf{b}_i)\tag{2} $$ Explanation of the equation* Similar to the forget gate, here $\mathbf{\Gamma}_i^{\langle t \rangle}$, the sigmoid produces values between 0 and 1.* The update gate is multiplied element-wise with the candidate, and this product ($\mathbf{\Gamma}_{i}^{\langle t \rangle} * \tilde{c}^{\langle t \rangle}$) is used in determining the cell state $\mathbf{c}^{\langle t \rangle}$. Variable names in code (Please note that they're different than the equations)In the code, we'll use the variable names found in the academic literature. These variables don't use "u" to denote "update".* `Wi` is the update gate weight $\mathbf{W}_i$ (not "Wu") * `bi` is the update gate bias $\mathbf{b}_i$ (not "bu")* `it` is the forget gate $\mathbf{\Gamma}_i^{\langle t \rangle}$ (not "ut") - Cell state $\mathbf{c}^{\langle t \rangle}$* The cell state is the "memory" that gets passed onto future time steps.* The new cell state $\mathbf{c}^{\langle t \rangle}$ is a combination of the previous cell state and the candidate value. Equation$$ \mathbf{c}^{\langle t \rangle} = \mathbf{\Gamma}_f^{\langle t \rangle}* \mathbf{c}^{\langle t-1 \rangle} + \mathbf{\Gamma}_{i}^{\langle t \rangle} *\mathbf{\tilde{c}}^{\langle t \rangle} \tag{4} $$ Explanation of equation* The previous cell state $\mathbf{c}^{\langle t-1 \rangle}$ is adjusted (weighted) by the forget gate $\mathbf{\Gamma}_{f}^{\langle t \rangle}$* and the candidate value $\tilde{\mathbf{c}}^{\langle t \rangle}$, adjusted (weighted) by the update gate $\mathbf{\Gamma}_{i}^{\langle t \rangle}$ Variable names and shapes in the code* `c`: cell state, including all time steps, $\mathbf{c}$ shape $(n_{a}, m, T)$* `c_next`: new (next) cell state, $\mathbf{c}^{\langle t \rangle}$ shape $(n_{a}, m)$* `c_prev`: previous cell state, $\mathbf{c}^{\langle t-1 \rangle}$, shape $(n_{a}, m)$ - Output gate $\mathbf{\Gamma}_{o}$* The output gate decides what gets sent as the prediction (output) of the time step.* The output gate is like the other gates. It contains values that range from 0 to 1. Equation$$ \mathbf{\Gamma}_o^{\langle t \rangle}= \sigma(\mathbf{W}_o[\mathbf{a}^{\langle t-1 \rangle}, \mathbf{x}^{\langle t \rangle}] + \mathbf{b}_{o})\tag{5}$$ Explanation of the equation* The output gate is determined by the previous hidden state $\mathbf{a}^{\langle t-1 \rangle}$ and the current input $\mathbf{x}^{\langle t \rangle}$* The sigmoid makes the gate range from 0 to 1. Variable names in the code* `Wo`: output gate weight, $\mathbf{W_o}$* `bo`: output gate bias, $\mathbf{b_o}$* `ot`: output gate, $\mathbf{\Gamma}_{o}^{\langle t \rangle}$ - Hidden state $\mathbf{a}^{\langle t \rangle}$* The hidden state gets passed to the LSTM cell's next time step.* It is used to determine the three gates ($\mathbf{\Gamma}_{f}, \mathbf{\Gamma}_{u}, \mathbf{\Gamma}_{o}$) of the next time step.* The hidden state is also used for the prediction $y^{\langle t \rangle}$. Equation$$ \mathbf{a}^{\langle t \rangle} = \mathbf{\Gamma}_o^{\langle t \rangle} * \tanh(\mathbf{c}^{\langle t \rangle})\tag{6} $$ Explanation of equation* The hidden state $\mathbf{a}^{\langle t \rangle}$ is determined by the cell state $\mathbf{c}^{\langle t \rangle}$ in combination with the output gate $\mathbf{\Gamma}_{o}$.* The cell state state is passed through the "tanh" function to rescale values between -1 and +1.* The output gate acts like a "mask" that either preserves the values of $\tanh(\mathbf{c}^{\langle t \rangle})$ or keeps those values from being included in the hidden state $\mathbf{a}^{\langle t \rangle}$ Variable names and shapes in the code* `a`: hidden state, including time steps. $\mathbf{a}$ has shape $(n_{a}, m, T_{x})$* 'a_prev`: hidden state from previous time step. $\mathbf{a}^{\langle t-1 \rangle}$ has shape $(n_{a}, m)$* `a_next`: hidden state for next time step. $\mathbf{a}^{\langle t \rangle}$ has shape $(n_{a}, m)$ - Prediction $\mathbf{y}^{\langle t \rangle}_{pred}$* The prediction in this use case is a classification, so we'll use a softmax.The equation is:$$\mathbf{y}^{\langle t \rangle}_{pred} = \textrm{softmax}(\mathbf{W}_{y} \mathbf{a}^{\langle t \rangle} + \mathbf{b}_{y})$$ Variable names and shapes in the code* `y_pred`: prediction, including all time steps. $\mathbf{y}_{pred}$ has shape $(n_{y}, m, T_{x})$. Note that $(T_{y} = T_{x})$ for this example.* `yt_pred`: prediction for the current time step $t$. $\mathbf{y}^{\langle t \rangle}_{pred}$ has shape $(n_{y}, m)$ 2.1 - LSTM cell**Exercise**: Implement the LSTM cell described in the Figure (4).**Instructions**:1. Concatenate the hidden state $a^{\langle t-1 \rangle}$ and input $x^{\langle t \rangle}$ into a single matrix: $$concat = \begin{bmatrix} a^{\langle t-1 \rangle} \\ x^{\langle t \rangle} \end{bmatrix}$$ 2. Compute all the formulas 1 through 6 for the gates, hidden state, and cell state.3. Compute the prediction $y^{\langle t \rangle}$. Additional Hints* You can use [numpy.concatenate](https://docs.scipy.org/doc/numpy/reference/generated/numpy.concatenate.html). Check which value to use for the `axis` parameter.* The functions `sigmoid()` and `softmax` are imported from `rnn_utils.py`.* [numpy.tanh](https://docs.scipy.org/doc/numpy/reference/generated/numpy.tanh.html)* Use [np.dot](https://docs.scipy.org/doc/numpy/reference/generated/numpy.dot.html) for matrix multiplication.* Notice that the variable names `Wi`, `bi` refer to the weights and biases of the **update** gate. There are no variables named "Wu" or "bu" in this function. ###Code # GRADED FUNCTION: lstm_cell_forward def lstm_cell_forward(xt, a_prev, c_prev, parameters): """ Implement a single forward step of the LSTM-cell as described in Figure (4) Arguments: xt -- your input data at timestep "t", numpy array of shape (n_x, m). a_prev -- Hidden state at timestep "t-1", numpy array of shape (n_a, m) c_prev -- Memory state at timestep "t-1", numpy array of shape (n_a, m) parameters -- python dictionary containing: Wf -- Weight matrix of the forget gate, numpy array of shape (n_a, n_a + n_x) bf -- Bias of the forget gate, numpy array of shape (n_a, 1) Wi -- Weight matrix of the update gate, numpy array of shape (n_a, n_a + n_x) bi -- Bias of the update gate, numpy array of shape (n_a, 1) Wc -- Weight matrix of the first "tanh", numpy array of shape (n_a, n_a + n_x) bc -- Bias of the first "tanh", numpy array of shape (n_a, 1) Wo -- Weight matrix of the output gate, numpy array of shape (n_a, n_a + n_x) bo -- Bias of the output gate, numpy array of shape (n_a, 1) Wy -- Weight matrix relating the hidden-state to the output, numpy array of shape (n_y, n_a) by -- Bias relating the hidden-state to the output, numpy array of shape (n_y, 1) Returns: a_next -- next hidden state, of shape (n_a, m) c_next -- next memory state, of shape (n_a, m) yt_pred -- prediction at timestep "t", numpy array of shape (n_y, m) cache -- tuple of values needed for the backward pass, contains (a_next, c_next, a_prev, c_prev, xt, parameters) Note: ft/it/ot stand for the forget/update/output gates, cct stands for the candidate value (c tilde), c stands for the cell state (memory) """ # Retrieve parameters from "parameters" Wf = parameters["Wf"] # forget gate weight bf = parameters["bf"] Wi = parameters["Wi"] # update gate weight (notice the variable name) bi = parameters["bi"] # (notice the variable name) Wc = parameters["Wc"] # candidate value weight bc = parameters["bc"] Wo = parameters["Wo"] # output gate weight bo = parameters["bo"] Wy = parameters["Wy"] # prediction weight by = parameters["by"] # Retrieve dimensions from shapes of xt and Wy n_x, m = xt.shape n_y, n_a = Wy.shape ### START CODE HERE ### # Concatenate a_prev and xt (≈1 line) concat = np.concatenate((a_prev, xt), axis = 0) # Compute values for ft (forget gate), it (update gate), # cct (candidate value), c_next (cell state), # ot (output gate), a_next (hidden state) (≈6 lines) ft = sigmoid((np.dot(Wf, concat)) + bf) # forget gate it = sigmoid((np.dot(Wi, concat)) + bi) # update gate cct = np.tanh((np.dot(Wc, concat)) + bc) # candidate value c_next = it * cct + ft *c_prev # cell state ot = sigmoid(np.dot(Wo, concat) + bo) # output gate a_next = ot * np.tanh(c_next) # hidden state # Compute prediction of the LSTM cell (≈1 line) yt_pred = sigmoid(np.dot(Wy, ot) + by) ### END CODE HERE ### # store values needed for backward propagation in cache cache = (a_next, c_next, a_prev, c_prev, ft, it, cct, ot, xt, parameters) return a_next, c_next, yt_pred, cache np.random.seed(1) xt_tmp = np.random.randn(3,10) a_prev_tmp = np.random.randn(5,10) c_prev_tmp = np.random.randn(5,10) parameters_tmp = {} parameters_tmp['Wf'] = np.random.randn(5, 5+3) parameters_tmp['bf'] = np.random.randn(5,1) parameters_tmp['Wi'] = np.random.randn(5, 5+3) parameters_tmp['bi'] = np.random.randn(5,1) parameters_tmp['Wo'] = np.random.randn(5, 5+3) parameters_tmp['bo'] = np.random.randn(5,1) parameters_tmp['Wc'] = np.random.randn(5, 5+3) parameters_tmp['bc'] = np.random.randn(5,1) parameters_tmp['Wy'] = np.random.randn(2,5) parameters_tmp['by'] = np.random.randn(2,1) a_next_tmp, c_next_tmp, yt_tmp, cache_tmp = lstm_cell_forward(xt_tmp, a_prev_tmp, c_prev_tmp, parameters_tmp) print("a_next[4] = \n", a_next_tmp[4]) print("a_next.shape = ", a_next_tmp.shape) print("c_next[2] = \n", c_next_tmp[2]) print("c_next.shape = ", c_next_tmp.shape) print("yt[1] =", yt_tmp[1]) print("yt.shape = ", yt_tmp.shape) print("cache[1][3] =\n", cache_tmp[1][3]) print("len(cache) = ", len(cache_tmp)) ###Output a_next[4] = [-0.66408471 0.0036921 0.02088357 0.22834167 -0.85575339 0.00138482 0.76566531 0.34631421 -0.00215674 0.43827275] a_next.shape = (5, 10) c_next[2] = [ 0.63267805 1.00570849 0.35504474 0.20690913 -1.64566718 0.11832942 0.76449811 -0.0981561 -0.74348425 -0.26810932] c_next.shape = (5, 10) yt[1] = [ 0.44757809 0.29938125 0.20759306 0.08484967 0.23156874 0.26979022 0.41978863 0.16438506 0.5204766 0.16641233] yt.shape = (2, 10) cache[1][3] = [-0.16263996 1.03729328 0.72938082 -0.54101719 0.02752074 -0.30821874 0.07651101 -1.03752894 1.41219977 -0.37647422] len(cache) = 10 ###Markdown **Expected Output**:```Pythona_next[4] = [-0.66408471 0.0036921 0.02088357 0.22834167 -0.85575339 0.00138482 0.76566531 0.34631421 -0.00215674 0.43827275]a_next.shape = (5, 10)c_next[2] = [ 0.63267805 1.00570849 0.35504474 0.20690913 -1.64566718 0.11832942 0.76449811 -0.0981561 -0.74348425 -0.26810932]c_next.shape = (5, 10)yt[1] = [ 0.79913913 0.15986619 0.22412122 0.15606108 0.97057211 0.31146381 0.00943007 0.12666353 0.39380172 0.07828381]yt.shape = (2, 10)cache[1][3] = [-0.16263996 1.03729328 0.72938082 -0.54101719 0.02752074 -0.30821874 0.07651101 -1.03752894 1.41219977 -0.37647422]len(cache) = 10``` 2.2 - Forward pass for LSTMNow that you have implemented one step of an LSTM, you can now iterate this over this using a for-loop to process a sequence of $T_x$ inputs. **Figure 5**: LSTM over multiple time-steps. **Exercise:** Implement `lstm_forward()` to run an LSTM over $T_x$ time-steps. **Instructions*** Get the dimensions $n_x, n_a, n_y, m, T_x$ from the shape of the variables: `x` and `parameters`.* Initialize the 3D tensors $a$, $c$ and $y$. - $a$: hidden state, shape $(n_{a}, m, T_{x})$ - $c$: cell state, shape $(n_{a}, m, T_{x})$ - $y$: prediction, shape $(n_{y}, m, T_{x})$ (Note that $T_{y} = T_{x}$ in this example). - **Note** Setting one variable equal to the other is a "copy by reference". In other words, don't do `c = a', otherwise both these variables point to the same underlying variable.* Initialize the 2D tensor $a^{\langle t \rangle}$ - $a^{\langle t \rangle}$ stores the hidden state for time step $t$. The variable name is `a_next`. - $a^{\langle 0 \rangle}$, the initial hidden state at time step 0, is passed in when calling the function. The variable name is `a0`. - $a^{\langle t \rangle}$ and $a^{\langle 0 \rangle}$ represent a single time step, so they both have the shape $(n_{a}, m)$ - Initialize $a^{\langle t \rangle}$ by setting it to the initial hidden state ($a^{\langle 0 \rangle}$) that is passed into the function.* Initialize $c^{\langle t \rangle}$ with zeros. - The variable name is `c_next`. - $c^{\langle t \rangle}$ represents a single time step, so its shape is $(n_{a}, m)$ - **Note**: create `c_next` as its own variable with its own location in memory. Do not initialize it as a slice of the 3D tensor $c$. In other words, **don't** do `c_next = c[:,:,0]`.* For each time step, do the following: - From the 3D tensor $x$, get a 2D slice $x^{\langle t \rangle}$ at time step $t$. - Call the `lstm_cell_forward` function that you defined previously, to get the hidden state, cell state, prediction, and cache. - Store the hidden state, cell state and prediction (the 2D tensors) inside the 3D tensors. - Also append the cache to the list of caches. ###Code # GRADED FUNCTION: lstm_forward def lstm_forward(x, a0, parameters): """ Implement the forward propagation of the recurrent neural network using an LSTM-cell described in Figure (4). Arguments: x -- Input data for every time-step, of shape (n_x, m, T_x). a0 -- Initial hidden state, of shape (n_a, m) parameters -- python dictionary containing: Wf -- Weight matrix of the forget gate, numpy array of shape (n_a, n_a + n_x) bf -- Bias of the forget gate, numpy array of shape (n_a, 1) Wi -- Weight matrix of the update gate, numpy array of shape (n_a, n_a + n_x) bi -- Bias of the update gate, numpy array of shape (n_a, 1) Wc -- Weight matrix of the first "tanh", numpy array of shape (n_a, n_a + n_x) bc -- Bias of the first "tanh", numpy array of shape (n_a, 1) Wo -- Weight matrix of the output gate, numpy array of shape (n_a, n_a + n_x) bo -- Bias of the output gate, numpy array of shape (n_a, 1) Wy -- Weight matrix relating the hidden-state to the output, numpy array of shape (n_y, n_a) by -- Bias relating the hidden-state to the output, numpy array of shape (n_y, 1) Returns: a -- Hidden states for every time-step, numpy array of shape (n_a, m, T_x) y -- Predictions for every time-step, numpy array of shape (n_y, m, T_x) c -- The value of the cell state, numpy array of shape (n_a, m, T_x) caches -- tuple of values needed for the backward pass, contains (list of all the caches, x) """ # Initialize "caches", which will track the list of all the caches caches = [] ### START CODE HERE ### Wy = parameters['Wy'] # saving parameters['Wy'] in a local variable in case students use Wy instead of parameters['Wy'] # Retrieve dimensions from shapes of x and parameters['Wy'] (≈2 lines) n_x, m, T_x = x.shape n_y, n_a = parameters['Wy'].shape # initialize "a", "c" and "y" with zeros (≈3 lines) a = np.zeros((n_a, m, T_x)) c = np.zeros((n_a, m, T_x)) y = np.zeros((n_y, m, T_x)) # Initialize a_next and c_next (≈2 lines) a_next = a0 c_next = np.zeros((n_a, m)) # loop over all time-steps for t in range(T_x): # Get the 2D slice 'xt' from the 3D input 'x' at time step 't' xt = x[:, :, t] # Update next hidden state, next memory state, compute the prediction, get the cache (≈1 line) a_next, c_next, yt, cache = lstm_cell_forward(xt, a_next, c_next, parameters) # Save the value of the new "next" hidden state in a (≈1 line) a[:,:,t] = a_next # Save the value of the next cell state (≈1 line) c[:,:,t] = c_next # Save the value of the prediction in y (≈1 line) y[:,:,t] = yt # Append the cache into caches (≈1 line) caches.append(cache) ### END CODE HERE ### # store values needed for backward propagation in cache caches = (caches, x) return a, y, c, caches np.random.seed(1) x_tmp = np.random.randn(3,10,7) a0_tmp = np.random.randn(5,10) parameters_tmp = {} parameters_tmp['Wf'] = np.random.randn(5, 5+3) parameters_tmp['bf'] = np.random.randn(5,1) parameters_tmp['Wi'] = np.random.randn(5, 5+3) parameters_tmp['bi']= np.random.randn(5,1) parameters_tmp['Wo'] = np.random.randn(5, 5+3) parameters_tmp['bo'] = np.random.randn(5,1) parameters_tmp['Wc'] = np.random.randn(5, 5+3) parameters_tmp['bc'] = np.random.randn(5,1) parameters_tmp['Wy'] = np.random.randn(2,5) parameters_tmp['by'] = np.random.randn(2,1) a_tmp, y_tmp, c_tmp, caches_tmp = lstm_forward(x_tmp, a0_tmp, parameters_tmp) print("a[4][3][6] = ", a_tmp[4][3][6]) print("a.shape = ", a_tmp.shape) print("y[1][4][3] =", y_tmp[1][4][3]) print("y.shape = ", y_tmp.shape) print("caches[1][1][1] =\n", caches_tmp[1][1][1]) print("c[1][2][1]", c_tmp[1][2][1]) print("len(caches) = ", len(caches_tmp)) ###Output a[4][3][6] = 0.172117767533 a.shape = (5, 10, 7) y[1][4][3] = 0.744999740353 y.shape = (2, 10, 7) caches[1][1][1] = [ 0.82797464 0.23009474 0.76201118 -0.22232814 -0.20075807 0.18656139 0.41005165] c[1][2][1] -0.855544916718 len(caches) = 2 ###Markdown **Expected Output**:```Pythona[4][3][6] = 0.172117767533a.shape = (5, 10, 7)y[1][4][3] = 0.95087346185y.shape = (2, 10, 7)caches[1][1][1] = [ 0.82797464 0.23009474 0.76201118 -0.22232814 -0.20075807 0.18656139 0.41005165]c[1][2][1] -0.855544916718len(caches) = 2``` Congratulations! You have now implemented the forward passes for the basic RNN and the LSTM. When using a deep learning framework, implementing the forward pass is sufficient to build systems that achieve great performance. The rest of this notebook is optional, and will not be graded. 3 - Backpropagation in recurrent neural networks (OPTIONAL / UNGRADED)In modern deep learning frameworks, you only have to implement the forward pass, and the framework takes care of the backward pass, so most deep learning engineers do not need to bother with the details of the backward pass. If however you are an expert in calculus and want to see the details of backprop in RNNs, you can work through this optional portion of the notebook. When in an earlier [course](https://www.coursera.org/learn/neural-networks-deep-learning/lecture/0VSHe/derivatives-with-a-computation-graph) you implemented a simple (fully connected) neural network, you used backpropagation to compute the derivatives with respect to the cost to update the parameters. Similarly, in recurrent neural networks you can calculate the derivatives with respect to the cost in order to update the parameters. The backprop equations are quite complicated and we did not derive them in lecture. However, we will briefly present them below. Note that this notebook does not implement the backward path from the Loss 'J' backwards to 'a'. This would have included the dense layer and softmax which are a part of the forward path. This is assumed to be calculated elsewhere and the result passed to rnn_backward in 'da'. It is further assumed that loss has been adjusted for batch size (m) and division by the number of examples is not required here. This section is optional and ungraded. It is more difficult and has fewer details regarding its implementation. This section only implements key elements of the full path. 3.1 - Basic RNN backward passWe will start by computing the backward pass for the basic RNN-cell and then in the following sections, iterate through the cells. **Figure 6**: RNN-cell's backward pass. Just like in a fully-connected neural network, the derivative of the cost function $J$ backpropagates through the time steps of the RNN by following the chain-rule from calculus. Internal to the cell, the chain-rule is also used to calculate $(\frac{\partial J}{\partial W_{ax}},\frac{\partial J}{\partial W_{aa}},\frac{\partial J}{\partial b})$ to update the parameters $(W_{ax}, W_{aa}, b_a)$. The operation can utilize the cached results from the forward path. Recall from lecture, the shorthand for the partial derivative of cost relative to a variable is dVariable. For example, $\frac{\partial J}{\partial W_{ax}}$ is $dW_{ax}$. This will be used throughout the remaining sections. **Figure 7**: This implementation of rnn_cell_backward does **not** include the output dense layer and softmax which are included in rnn_cell_forward. $da_{next}$ is $\frac{\partial{J}}{\partial a^{\langle t \rangle}}$ and includes loss from previous stages and current stage output logic. The addition shown in green will be part of your implementation of rnn_backward. EquationsTo compute the rnn_cell_backward you can utilize the following equations. It is a good exercise to derive them by hand. Here, $*$ denotes element-wise multiplication while the absence of a symbol indicates matrix multiplication.\begin{align}\displaystyle a^{\langle t \rangle} &= \tanh(W_{ax} x^{\langle t \rangle} + W_{aa} a^{\langle t-1 \rangle} + b_{a})\tag{-} \\[8pt]\displaystyle \frac{\partial \tanh(x)} {\partial x} &= 1 - \tanh^2(x) \tag{-} \\[8pt]\displaystyle {dW_{ax}} &= (da_{next} * ( 1-\tanh^2(W_{ax}x^{\langle t \rangle}+W_{aa} a^{\langle t-1 \rangle} + b_{a}) )) x^{\langle t \rangle T}\tag{1} \\[8pt]\displaystyle dW_{aa} &= (da_{next} * ( 1-\tanh^2(W_{ax}x^{\langle t \rangle}+W_{aa} a^{\langle t-1 \rangle} + b_{a}) )) a^{\langle t-1 \rangle T}\tag{2} \\[8pt]\displaystyle db_a& = \sum_{batch}( da_{next} * ( 1-\tanh^2(W_{ax}x^{\langle t \rangle}+W_{aa} a^{\langle t-1 \rangle} + b_{a}) ))\tag{3} \\[8pt]\displaystyle dx^{\langle t \rangle} &= { W_{ax}}^T (da_{next} * ( 1-\tanh^2(W_{ax}x^{\langle t \rangle}+W_{aa} a^{\langle t-1 \rangle} + b_{a}) ))\tag{4} \\[8pt]\displaystyle da_{prev} &= { W_{aa}}^T(da_{next} * ( 1-\tanh^2(W_{ax}x^{\langle t \rangle}+W_{aa} a^{\langle t-1 \rangle} + b_{a}) ))\tag{5}\end{align} Implementing rnn_cell_backwardThe results can be computed directly by implementing the equations above. However, the above can optionally be simplified by computing 'dz' and utlilizing the chain rule. This can be further simplified by noting that $\tanh(W_{ax}x^{\langle t \rangle}+W_{aa} a^{\langle t-1 \rangle} + b_{a})$ was computed and saved in the forward pass. To calculate dba, the 'batch' above is a sum across all 'm' examples (axis= 1). Note that you should use the keepdims = True option.It may be worthwhile to review Course 1 [Derivatives with a computational graph](https://www.coursera.org/learn/neural-networks-deep-learning/lecture/0VSHe/derivatives-with-a-computation-graph) through [Backpropagation Intuition](https://www.coursera.org/learn/neural-networks-deep-learning/lecture/6dDj7/backpropagation-intuition-optional), which decompose the calculation into steps using the chain rule. Matrix vector derivatives are described [here](http://cs231n.stanford.edu/vecDerivs.pdf), though the equations above incorporate the required transformations.Note rnn_cell_backward does __not__ include the calculation of loss from $y \langle t \rangle$, this is incorporated into the incoming da_next. This is a slight mismatch with rnn_cell_forward which includes a dense layer and softmax. Note: in the code: $\displaystyle dx^{\langle t \rangle}$ is represented by dxt, $\displaystyle d W_{ax}$ is represented by dWax, $\displaystyle da_{prev}$ is represented by da_prev, $\displaystyle dW_{aa}$ is represented by dWaa, $\displaystyle db_{a}$ is represented by dba, dz is not derived above but can optionally be derived by students to simplify the repeated calculations. ###Code def rnn_cell_backward(da_next, cache): """ Implements the backward pass for the RNN-cell (single time-step). Arguments: da_next -- Gradient of loss with respect to next hidden state cache -- python dictionary containing useful values (output of rnn_cell_forward()) Returns: gradients -- python dictionary containing: dx -- Gradients of input data, of shape (n_x, m) da_prev -- Gradients of previous hidden state, of shape (n_a, m) dWax -- Gradients of input-to-hidden weights, of shape (n_a, n_x) dWaa -- Gradients of hidden-to-hidden weights, of shape (n_a, n_a) dba -- Gradients of bias vector, of shape (n_a, 1) """ # Retrieve values from cache (a_next, a_prev, xt, parameters) = cache # Retrieve values from parameters Wax = parameters["Wax"] Waa = parameters["Waa"] Wya = parameters["Wya"] ba = parameters["ba"] by = parameters["by"] ### START CODE HERE ### # compute the gradient of the loss with respect to z (optional) (≈1 line) dz = None # compute the gradient of the loss with respect to Wax (≈2 lines) dxt = None dWax = None # compute the gradient with respect to Waa (≈2 lines) da_prev = None dWaa = None # compute the gradient with respect to b (≈1 line) dba = None ### END CODE HERE ### # Store the gradients in a python dictionary gradients = {"dxt": dxt, "da_prev": da_prev, "dWax": dWax, "dWaa": dWaa, "dba": dba} return gradients np.random.seed(1) xt_tmp = np.random.randn(3,10) a_prev_tmp = np.random.randn(5,10) parameters_tmp = {} parameters_tmp['Wax'] = np.random.randn(5,3) parameters_tmp['Waa'] = np.random.randn(5,5) parameters_tmp['Wya'] = np.random.randn(2,5) parameters_tmp['ba'] = np.random.randn(5,1) parameters_tmp['by'] = np.random.randn(2,1) a_next_tmp, yt_tmp, cache_tmp = rnn_cell_forward(xt_tmp, a_prev_tmp, parameters_tmp) da_next_tmp = np.random.randn(5,10) gradients_tmp = rnn_cell_backward(da_next_tmp, cache_tmp) print("gradients[\"dxt\"][1][2] =", gradients_tmp["dxt"][1][2]) print("gradients[\"dxt\"].shape =", gradients_tmp["dxt"].shape) print("gradients[\"da_prev\"][2][3] =", gradients_tmp["da_prev"][2][3]) print("gradients[\"da_prev\"].shape =", gradients_tmp["da_prev"].shape) print("gradients[\"dWax\"][3][1] =", gradients_tmp["dWax"][3][1]) print("gradients[\"dWax\"].shape =", gradients_tmp["dWax"].shape) print("gradients[\"dWaa\"][1][2] =", gradients_tmp["dWaa"][1][2]) print("gradients[\"dWaa\"].shape =", gradients_tmp["dWaa"].shape) print("gradients[\"dba\"][4] =", gradients_tmp["dba"][4]) print("gradients[\"dba\"].shape =", gradients_tmp["dba"].shape) ###Output _____no_output_____ ###Markdown **Expected Output**: **gradients["dxt"][1][2]** = -1.3872130506 **gradients["dxt"].shape** = (3, 10) **gradients["da_prev"][2][3]** = -0.152399493774 **gradients["da_prev"].shape** = (5, 10) **gradients["dWax"][3][1]** = 0.410772824935 **gradients["dWax"].shape** = (5, 3) **gradients["dWaa"][1][2]** = 1.15034506685 **gradients["dWaa"].shape** = (5, 5) **gradients["dba"][4]** = [ 0.20023491] **gradients["dba"].shape** = (5, 1) Backward pass through the RNNComputing the gradients of the cost with respect to $a^{\langle t \rangle}$ at every time-step $t$ is useful because it is what helps the gradient backpropagate to the previous RNN-cell. To do so, you need to iterate through all the time steps starting at the end, and at each step, you increment the overall $db_a$, $dW_{aa}$, $dW_{ax}$ and you store $dx$.**Instructions**:Implement the `rnn_backward` function. Initialize the return variables with zeros first and then loop through all the time steps while calling the `rnn_cell_backward` at each time timestep, update the other variables accordingly. * Note that this notebook does not implement the backward path from the Loss 'J' backwards to 'a'. * This would have included the dense layer and softmax which are a part of the forward path. * This is assumed to be calculated elsewhere and the result passed to rnn_backward in 'da'. * You must combine this with the loss from the previous stages when calling rnn_cell_backward (see figure 7 above).* It is further assumed that loss has been adjusted for batch size (m). * Therefore, division by the number of examples is not required here. ###Code def rnn_backward(da, caches): """ Implement the backward pass for a RNN over an entire sequence of input data. Arguments: da -- Upstream gradients of all hidden states, of shape (n_a, m, T_x) caches -- tuple containing information from the forward pass (rnn_forward) Returns: gradients -- python dictionary containing: dx -- Gradient w.r.t. the input data, numpy-array of shape (n_x, m, T_x) da0 -- Gradient w.r.t the initial hidden state, numpy-array of shape (n_a, m) dWax -- Gradient w.r.t the input's weight matrix, numpy-array of shape (n_a, n_x) dWaa -- Gradient w.r.t the hidden state's weight matrix, numpy-arrayof shape (n_a, n_a) dba -- Gradient w.r.t the bias, of shape (n_a, 1) """ ### START CODE HERE ### # Retrieve values from the first cache (t=1) of caches (≈2 lines) (caches, x) = None (a1, a0, x1, parameters) = None # Retrieve dimensions from da's and x1's shapes (≈2 lines) n_a, m, T_x = None n_x, m = None # initialize the gradients with the right sizes (≈6 lines) dx = None dWax = None dWaa = None dba = None da0 = None da_prevt = None # Loop through all the time steps for t in reversed(range(None)): # Compute gradients at time step t. # Remember to sum gradients from the output path (da) and the previous timesteps (da_prevt) (≈1 line) gradients = None # Retrieve derivatives from gradients (≈ 1 line) dxt, da_prevt, dWaxt, dWaat, dbat = gradients["dxt"], gradients["da_prev"], gradients["dWax"], gradients["dWaa"], gradients["dba"] # Increment global derivatives w.r.t parameters by adding their derivative at time-step t (≈4 lines) dx[:, :, t] = None dWax += None dWaa += None dba += None # Set da0 to the gradient of a which has been backpropagated through all time-steps (≈1 line) da0 = None ### END CODE HERE ### # Store the gradients in a python dictionary gradients = {"dx": dx, "da0": da0, "dWax": dWax, "dWaa": dWaa,"dba": dba} return gradients np.random.seed(1) x_tmp = np.random.randn(3,10,4) a0_tmp = np.random.randn(5,10) parameters_tmp = {} parameters_tmp['Wax'] = np.random.randn(5,3) parameters_tmp['Waa'] = np.random.randn(5,5) parameters_tmp['Wya'] = np.random.randn(2,5) parameters_tmp['ba'] = np.random.randn(5,1) parameters_tmp['by'] = np.random.randn(2,1) a_tmp, y_tmp, caches_tmp = rnn_forward(x_tmp, a0_tmp, parameters_tmp) da_tmp = np.random.randn(5, 10, 4) gradients_tmp = rnn_backward(da_tmp, caches_tmp) print("gradients[\"dx\"][1][2] =", gradients_tmp["dx"][1][2]) print("gradients[\"dx\"].shape =", gradients_tmp["dx"].shape) print("gradients[\"da0\"][2][3] =", gradients_tmp["da0"][2][3]) print("gradients[\"da0\"].shape =", gradients_tmp["da0"].shape) print("gradients[\"dWax\"][3][1] =", gradients_tmp["dWax"][3][1]) print("gradients[\"dWax\"].shape =", gradients_tmp["dWax"].shape) print("gradients[\"dWaa\"][1][2] =", gradients_tmp["dWaa"][1][2]) print("gradients[\"dWaa\"].shape =", gradients_tmp["dWaa"].shape) print("gradients[\"dba\"][4] =", gradients_tmp["dba"][4]) print("gradients[\"dba\"].shape =", gradients_tmp["dba"].shape) ###Output _____no_output_____ ###Markdown **Expected Output**: **gradients["dx"][1][2]** = [-2.07101689 -0.59255627 0.02466855 0.01483317] **gradients["dx"].shape** = (3, 10, 4) **gradients["da0"][2][3]** = -0.314942375127 **gradients["da0"].shape** = (5, 10) **gradients["dWax"][3][1]** = 11.2641044965 **gradients["dWax"].shape** = (5, 3) **gradients["dWaa"][1][2]** = 2.30333312658 **gradients["dWaa"].shape** = (5, 5) **gradients["dba"][4]** = [-0.74747722] **gradients["dba"].shape** = (5, 1) 3.2 - LSTM backward pass 3.2.1 One Step backwardThe LSTM backward pass is slightly more complicated than the forward pass. **Figure 8**: lstm_cell_backward. Note the output functions, while part of the lstm_cell_forward, are not included in lstm_cell_backward The equations for the LSTM backward pass are provided below. (If you enjoy calculus exercises feel free to try deriving these from scratch yourself.) 3.2.2 gate derivativesNote the location of the gate derivatives ($\gamma$..) between the dense layer and the activation function (see graphic above). This is convenient for computing parameter derivatives in the next step. \begin{align}d\gamma_o^{\langle t \rangle} &= da_{next}*\tanh(c_{next}) * \Gamma_o^{\langle t \rangle}*\left(1-\Gamma_o^{\langle t \rangle}\right)\tag{7} \\[8pt]dp\widetilde{c}^{\langle t \rangle} &= \left(dc_{next}*\Gamma_u^{\langle t \rangle}+ \Gamma_o^{\langle t \rangle}* (1-\tanh^2(c_{next})) * \Gamma_u^{\langle t \rangle} * da_{next} \right) * \left(1-\left(\widetilde c^{\langle t \rangle}\right)^2\right) \tag{8} \\[8pt]d\gamma_u^{\langle t \rangle} &= \left(dc_{next}*\widetilde{c}^{\langle t \rangle} + \Gamma_o^{\langle t \rangle}* (1-\tanh^2(c_{next})) * \widetilde{c}^{\langle t \rangle} * da_{next}\right)*\Gamma_u^{\langle t \rangle}*\left(1-\Gamma_u^{\langle t \rangle}\right)\tag{9} \\[8pt]d\gamma_f^{\langle t \rangle} &= \left(dc_{next}* c_{prev} + \Gamma_o^{\langle t \rangle} * (1-\tanh^2(c_{next})) * c_{prev} * da_{next}\right)*\Gamma_f^{\langle t \rangle}*\left(1-\Gamma_f^{\langle t \rangle}\right)\tag{10}\end{align} 3.2.3 parameter derivatives $ dW_f = d\gamma_f^{\langle t \rangle} \begin{bmatrix} a_{prev} \\ x_t\end{bmatrix}^T \tag{11} $$ dW_u = d\gamma_u^{\langle t \rangle} \begin{bmatrix} a_{prev} \\ x_t\end{bmatrix}^T \tag{12} $$ dW_c = dp\widetilde c^{\langle t \rangle} \begin{bmatrix} a_{prev} \\ x_t\end{bmatrix}^T \tag{13} $$ dW_o = d\gamma_o^{\langle t \rangle} \begin{bmatrix} a_{prev} \\ x_t\end{bmatrix}^T \tag{14}$To calculate $db_f, db_u, db_c, db_o$ you just need to sum across all 'm' examples (axis= 1) on $d\gamma_f^{\langle t \rangle}, d\gamma_u^{\langle t \rangle}, dp\widetilde c^{\langle t \rangle}, d\gamma_o^{\langle t \rangle}$ respectively. Note that you should have the `keepdims = True` option.$\displaystyle db_f = \sum_{batch}d\gamma_f^{\langle t \rangle}\tag{15}$$\displaystyle db_u = \sum_{batch}d\gamma_u^{\langle t \rangle}\tag{16}$$\displaystyle db_c = \sum_{batch}d\gamma_c^{\langle t \rangle}\tag{17}$$\displaystyle db_o = \sum_{batch}d\gamma_o^{\langle t \rangle}\tag{18}$Finally, you will compute the derivative with respect to the previous hidden state, previous memory state, and input.$ da_{prev} = W_f^T d\gamma_f^{\langle t \rangle} + W_u^T d\gamma_u^{\langle t \rangle}+ W_c^T dp\widetilde c^{\langle t \rangle} + W_o^T d\gamma_o^{\langle t \rangle} \tag{19}$Here, to account for concatenation, the weights for equations 19 are the first n_a, (i.e. $W_f = W_f[:,:n_a]$ etc...)$ dc_{prev} = dc_{next}*\Gamma_f^{\langle t \rangle} + \Gamma_o^{\langle t \rangle} * (1- \tanh^2(c_{next}))*\Gamma_f^{\langle t \rangle}*da_{next} \tag{20}$$ dx^{\langle t \rangle} = W_f^T d\gamma_f^{\langle t \rangle} + W_u^T d\gamma_u^{\langle t \rangle}+ W_c^T dp\widetilde c^{\langle t \rangle} + W_o^T d\gamma_o^{\langle t \rangle}\tag{21} $where the weights for equation 21 are from n_a to the end, (i.e. $W_f = W_f[:,n_a:]$ etc...)**Exercise:** Implement `lstm_cell_backward` by implementing equations $7-21$ below. Note: In the code:$d\gamma_o^{\langle t \rangle}$ is represented by `dot`, $dp\widetilde{c}^{\langle t \rangle}$ is represented by `dcct`, $d\gamma_u^{\langle t \rangle}$ is represented by `dit`, $d\gamma_f^{\langle t \rangle}$ is represented by `dft` ###Code def lstm_cell_backward(da_next, dc_next, cache): """ Implement the backward pass for the LSTM-cell (single time-step). Arguments: da_next -- Gradients of next hidden state, of shape (n_a, m) dc_next -- Gradients of next cell state, of shape (n_a, m) cache -- cache storing information from the forward pass Returns: gradients -- python dictionary containing: dxt -- Gradient of input data at time-step t, of shape (n_x, m) da_prev -- Gradient w.r.t. the previous hidden state, numpy array of shape (n_a, m) dc_prev -- Gradient w.r.t. the previous memory state, of shape (n_a, m, T_x) dWf -- Gradient w.r.t. the weight matrix of the forget gate, numpy array of shape (n_a, n_a + n_x) dWi -- Gradient w.r.t. the weight matrix of the update gate, numpy array of shape (n_a, n_a + n_x) dWc -- Gradient w.r.t. the weight matrix of the memory gate, numpy array of shape (n_a, n_a + n_x) dWo -- Gradient w.r.t. the weight matrix of the output gate, numpy array of shape (n_a, n_a + n_x) dbf -- Gradient w.r.t. biases of the forget gate, of shape (n_a, 1) dbi -- Gradient w.r.t. biases of the update gate, of shape (n_a, 1) dbc -- Gradient w.r.t. biases of the memory gate, of shape (n_a, 1) dbo -- Gradient w.r.t. biases of the output gate, of shape (n_a, 1) """ # Retrieve information from "cache" (a_next, c_next, a_prev, c_prev, ft, it, cct, ot, xt, parameters) = cache ### START CODE HERE ### # Retrieve dimensions from xt's and a_next's shape (≈2 lines) n_x, m = None n_a, m = None # Compute gates related derivatives, you can find their values can be found by looking carefully at equations (7) to (10) (≈4 lines) dot = None dcct = None dit = None dft = None # Compute parameters related derivatives. Use equations (11)-(18) (≈8 lines) dWf = None dWi = None dWc = None dWo = None dbf = None dbi = None dbc = None dbo = None # Compute derivatives w.r.t previous hidden state, previous memory state and input. Use equations (19)-(21). (≈3 lines) da_prev = None dc_prev = None dxt = None ### END CODE HERE ### # Save gradients in dictionary gradients = {"dxt": dxt, "da_prev": da_prev, "dc_prev": dc_prev, "dWf": dWf,"dbf": dbf, "dWi": dWi,"dbi": dbi, "dWc": dWc,"dbc": dbc, "dWo": dWo,"dbo": dbo} return gradients np.random.seed(1) xt_tmp = np.random.randn(3,10) a_prev_tmp = np.random.randn(5,10) c_prev_tmp = np.random.randn(5,10) parameters_tmp = {} parameters_tmp['Wf'] = np.random.randn(5, 5+3) parameters_tmp['bf'] = np.random.randn(5,1) parameters_tmp['Wi'] = np.random.randn(5, 5+3) parameters_tmp['bi'] = np.random.randn(5,1) parameters_tmp['Wo'] = np.random.randn(5, 5+3) parameters_tmp['bo'] = np.random.randn(5,1) parameters_tmp['Wc'] = np.random.randn(5, 5+3) parameters_tmp['bc'] = np.random.randn(5,1) parameters_tmp['Wy'] = np.random.randn(2,5) parameters_tmp['by'] = np.random.randn(2,1) a_next_tmp, c_next_tmp, yt_tmp, cache_tmp = lstm_cell_forward(xt_tmp, a_prev_tmp, c_prev_tmp, parameters_tmp) da_next_tmp = np.random.randn(5,10) dc_next_tmp = np.random.randn(5,10) gradients_tmp = lstm_cell_backward(da_next_tmp, dc_next_tmp, cache_tmp) print("gradients[\"dxt\"][1][2] =", gradients_tmp["dxt"][1][2]) print("gradients[\"dxt\"].shape =", gradients_tmp["dxt"].shape) print("gradients[\"da_prev\"][2][3] =", gradients_tmp["da_prev"][2][3]) print("gradients[\"da_prev\"].shape =", gradients_tmp["da_prev"].shape) print("gradients[\"dc_prev\"][2][3] =", gradients_tmp["dc_prev"][2][3]) print("gradients[\"dc_prev\"].shape =", gradients_tmp["dc_prev"].shape) print("gradients[\"dWf\"][3][1] =", gradients_tmp["dWf"][3][1]) print("gradients[\"dWf\"].shape =", gradients_tmp["dWf"].shape) print("gradients[\"dWi\"][1][2] =", gradients_tmp["dWi"][1][2]) print("gradients[\"dWi\"].shape =", gradients_tmp["dWi"].shape) print("gradients[\"dWc\"][3][1] =", gradients_tmp["dWc"][3][1]) print("gradients[\"dWc\"].shape =", gradients_tmp["dWc"].shape) print("gradients[\"dWo\"][1][2] =", gradients_tmp["dWo"][1][2]) print("gradients[\"dWo\"].shape =", gradients_tmp["dWo"].shape) print("gradients[\"dbf\"][4] =", gradients_tmp["dbf"][4]) print("gradients[\"dbf\"].shape =", gradients_tmp["dbf"].shape) print("gradients[\"dbi\"][4] =", gradients_tmp["dbi"][4]) print("gradients[\"dbi\"].shape =", gradients_tmp["dbi"].shape) print("gradients[\"dbc\"][4] =", gradients_tmp["dbc"][4]) print("gradients[\"dbc\"].shape =", gradients_tmp["dbc"].shape) print("gradients[\"dbo\"][4] =", gradients_tmp["dbo"][4]) print("gradients[\"dbo\"].shape =", gradients_tmp["dbo"].shape) ###Output _____no_output_____ ###Markdown **Expected Output**: **gradients["dxt"][1][2]** = 3.23055911511 **gradients["dxt"].shape** = (3, 10) **gradients["da_prev"][2][3]** = -0.0639621419711 **gradients["da_prev"].shape** = (5, 10) **gradients["dc_prev"][2][3]** = 0.797522038797 **gradients["dc_prev"].shape** = (5, 10) **gradients["dWf"][3][1]** = -0.147954838164 **gradients["dWf"].shape** = (5, 8) **gradients["dWi"][1][2]** = 1.05749805523 **gradients["dWi"].shape** = (5, 8) **gradients["dWc"][3][1]** = 2.30456216369 **gradients["dWc"].shape** = (5, 8) **gradients["dWo"][1][2]** = 0.331311595289 **gradients["dWo"].shape** = (5, 8) **gradients["dbf"][4]** = [ 0.18864637] **gradients["dbf"].shape** = (5, 1) **gradients["dbi"][4]** = [-0.40142491] **gradients["dbi"].shape** = (5, 1) **gradients["dbc"][4]** = [ 0.25587763] **gradients["dbc"].shape** = (5, 1) **gradients["dbo"][4]** = [ 0.13893342] **gradients["dbo"].shape** = (5, 1) 3.3 Backward pass through the LSTM RNNThis part is very similar to the `rnn_backward` function you implemented above. You will first create variables of the same dimension as your return variables. You will then iterate over all the time steps starting from the end and call the one step function you implemented for LSTM at each iteration. You will then update the parameters by summing them individually. Finally return a dictionary with the new gradients. **Instructions**: Implement the `lstm_backward` function. Create a for loop starting from $T_x$ and going backward. For each step call `lstm_cell_backward` and update the your old gradients by adding the new gradients to them. Note that `dxt` is not updated but is stored. ###Code def lstm_backward(da, caches): """ Implement the backward pass for the RNN with LSTM-cell (over a whole sequence). Arguments: da -- Gradients w.r.t the hidden states, numpy-array of shape (n_a, m, T_x) caches -- cache storing information from the forward pass (lstm_forward) Returns: gradients -- python dictionary containing: dx -- Gradient of inputs, of shape (n_x, m, T_x) da0 -- Gradient w.r.t. the previous hidden state, numpy array of shape (n_a, m) dWf -- Gradient w.r.t. the weight matrix of the forget gate, numpy array of shape (n_a, n_a + n_x) dWi -- Gradient w.r.t. the weight matrix of the update gate, numpy array of shape (n_a, n_a + n_x) dWc -- Gradient w.r.t. the weight matrix of the memory gate, numpy array of shape (n_a, n_a + n_x) dWo -- Gradient w.r.t. the weight matrix of the save gate, numpy array of shape (n_a, n_a + n_x) dbf -- Gradient w.r.t. biases of the forget gate, of shape (n_a, 1) dbi -- Gradient w.r.t. biases of the update gate, of shape (n_a, 1) dbc -- Gradient w.r.t. biases of the memory gate, of shape (n_a, 1) dbo -- Gradient w.r.t. biases of the save gate, of shape (n_a, 1) """ # Retrieve values from the first cache (t=1) of caches. (caches, x) = caches (a1, c1, a0, c0, f1, i1, cc1, o1, x1, parameters) = caches[0] ### START CODE HERE ### # Retrieve dimensions from da's and x1's shapes (≈2 lines) n_a, m, T_x = None n_x, m = None # initialize the gradients with the right sizes (≈12 lines) dx = None da0 = None da_prevt = None dc_prevt = None dWf = None dWi = None dWc = None dWo = None dbf = None dbi = None dbc = None dbo = None # loop back over the whole sequence for t in reversed(range(None)): # Compute all gradients using lstm_cell_backward gradients = None # Store or add the gradient to the parameters' previous step's gradient da_prevt = None dc_prevt = None dx[:,:,t] = None dWf += None dWi += None dWc += None dWo += None dbf += None dbi += None dbc += None dbo += None # Set the first activation's gradient to the backpropagated gradient da_prev. da0 = None ### END CODE HERE ### # Store the gradients in a python dictionary gradients = {"dx": dx, "da0": da0, "dWf": dWf,"dbf": dbf, "dWi": dWi,"dbi": dbi, "dWc": dWc,"dbc": dbc, "dWo": dWo,"dbo": dbo} return gradients np.random.seed(1) x_tmp = np.random.randn(3,10,7) a0_tmp = np.random.randn(5,10) parameters_tmp = {} parameters_tmp['Wf'] = np.random.randn(5, 5+3) parameters_tmp['bf'] = np.random.randn(5,1) parameters_tmp['Wi'] = np.random.randn(5, 5+3) parameters_tmp['bi'] = np.random.randn(5,1) parameters_tmp['Wo'] = np.random.randn(5, 5+3) parameters_tmp['bo'] = np.random.randn(5,1) parameters_tmp['Wc'] = np.random.randn(5, 5+3) parameters_tmp['bc'] = np.random.randn(5,1) parameters_tmp['Wy'] = np.zeros((2,5)) # unused, but needed for lstm_forward parameters_tmp['by'] = np.zeros((2,1)) # unused, but needed for lstm_forward a_tmp, y_tmp, c_tmp, caches_tmp = lstm_forward(x_tmp, a0_tmp, parameters_tmp) da_tmp = np.random.randn(5, 10, 4) gradients_tmp = lstm_backward(da_tmp, caches_tmp) print("gradients[\"dx\"][1][2] =", gradients_tmp["dx"][1][2]) print("gradients[\"dx\"].shape =", gradients_tmp["dx"].shape) print("gradients[\"da0\"][2][3] =", gradients_tmp["da0"][2][3]) print("gradients[\"da0\"].shape =", gradients_tmp["da0"].shape) print("gradients[\"dWf\"][3][1] =", gradients_tmp["dWf"][3][1]) print("gradients[\"dWf\"].shape =", gradients_tmp["dWf"].shape) print("gradients[\"dWi\"][1][2] =", gradients_tmp["dWi"][1][2]) print("gradients[\"dWi\"].shape =", gradients_tmp["dWi"].shape) print("gradients[\"dWc\"][3][1] =", gradients_tmp["dWc"][3][1]) print("gradients[\"dWc\"].shape =", gradients_tmp["dWc"].shape) print("gradients[\"dWo\"][1][2] =", gradients_tmp["dWo"][1][2]) print("gradients[\"dWo\"].shape =", gradients_tmp["dWo"].shape) print("gradients[\"dbf\"][4] =", gradients_tmp["dbf"][4]) print("gradients[\"dbf\"].shape =", gradients_tmp["dbf"].shape) print("gradients[\"dbi\"][4] =", gradients_tmp["dbi"][4]) print("gradients[\"dbi\"].shape =", gradients_tmp["dbi"].shape) print("gradients[\"dbc\"][4] =", gradients_tmp["dbc"][4]) print("gradients[\"dbc\"].shape =", gradients_tmp["dbc"].shape) print("gradients[\"dbo\"][4] =", gradients_tmp["dbo"][4]) print("gradients[\"dbo\"].shape =", gradients_tmp["dbo"].shape) ###Output _____no_output_____
notebooks/.ipynb_checkpoints/09-DroneDesign_Optimization_Visu3D-checkpoint.ipynb
###Markdown Sizing Code with Voilà + Visualization 3D Drone _Written by Marc Budinger, Aitor Ochotorena (INSA Toulouse) and Scott Delbecq (ISAE Supaero)_ This section presents the optimal preliminary design of a multirotor drone. The application of the surrogate models and scaling laws in a sizing code enable to estimate the key parameters and characteristics of the drone components. A Notebook with an interactive web design is to be created through the use of [Voilà](https://blog.jupyter.org/and-voil%C3%A0-f6a2c08a4a93) and their widgets. Once the optimization is completed, a parametric design of the resulting drone is included with the [OpenJSCAD](https://en.wikibooks.org/wiki/OpenJSCAD_User_Guide) tool. ###Code # necessary packages for the optimization import scipy import scipy.optimize from math import pi from math import sqrt import math import timeit import time import numpy as np # necessary packages for the web layout in voilà import ipywidgets as widgets from ipywidgets import interactive from IPython.display import display import pandas as pd # necessary packages for the 3D Visualization import os from IPython.display import IFrame import bs4 from urllib.request import urlopen import github ###Output _____no_output_____ ###Markdown Problem Definition Creation of a web interface based on the employment of text fields and toggle buttons to input the specifications of the system. ###Code #1st column data definition style = {'description_width': '240pt'} layout = {'width': '500pt'} label_layout = widgets.Layout(width='50px') specs = widgets.HTML(value = f"<b><font color='red'>{'Specifications:'}</b>") M_pay = widgets.FloatSlider( value=4., min=1, max=100.0, step=.1, description='Load mass [kg]:', readout_format='.1f', style=style, layout=layout ) M_load = widgets.FloatText(description="", continuous_update=False,layout=label_layout) widgets.link((M_pay, 'value'), (M_load, 'value')) M_load_w=widgets.HBox([M_pay,M_load]) t_hf = widgets.FloatSlider( value=18, min=1, max=100.0, step=1, description='Hover flight time [min]', readout_format='.0f', style=style, layout=layout ) t_h = widgets.FloatText(description="", continuous_update=False,layout=label_layout) widgets.link((t_hf, 'value'), (t_h, 'value')) t_h_w=widgets.HBox([t_hf,t_h]) MTOW_slider = widgets.FloatSlider( value=10, min=1, max=400.0, step=1, description='Maximum take-off weight [kg]', readout_format='.0f', style=style, layout=layout ) MTOW = widgets.FloatText(description="", continuous_update=False,layout=label_layout) widgets.link((MTOW_slider, 'value'), (MTOW, 'value')) MTOW_w=widgets.HBox([MTOW_slider,MTOW]) k_maxthrust_slider = widgets.FloatSlider( value=3, min=1.1, max=4, step=.1, description='Ratio max thrust-hover [-]', readout_format='.1f', style=style, layout=layout ) k_maxthrust = widgets.FloatText(description="", continuous_update=False,layout=label_layout) widgets.link((k_maxthrust_slider, 'value'), (k_maxthrust, 'value')) k_maxthrust_w=widgets.HBox([k_maxthrust_slider,k_maxthrust]) archit = widgets.HTML(value = f"<b><font color='red'>{'Architecture'}</b>") Narm_slider = widgets.FloatSlider( value=8, min=3, max=12, step=1, description='Number of arms [-]', readout_format='.0f', style=style, layout=layout ) Narm = widgets.FloatText(description="", continuous_update=False,layout=label_layout) widgets.link((Narm_slider, 'value'), (Narm, 'value')) Narm_w=widgets.HBox([Narm_slider,Narm]) Np_arm=widgets.ToggleButtons(options=['Single rotor setup', 'Coaxial setup'], description='Number of propellers per arm:',style=style, layout=layout, disabled=False) Mod=widgets.ToggleButtons(options=['Direct Drive', 'Gear Drive'], description='Motor configuration:', tooltips=['No speed reductor', 'Motor with reduction'],style=style, layout=layout) vertical = widgets.HTML(value = f"<b><font color='red'>{'Vertical climb'}</b>") V_cl_slider = widgets.FloatSlider( value=8, min=1, max=10, step=1, description='Rate of climb [m/s]', readout_format='.0f', style=style, layout=layout ) V_cl = widgets.FloatText(description="", continuous_update=False,layout=label_layout) widgets.link((V_cl_slider, 'value'), (V_cl, 'value')) V_cl_w = widgets.HBox([V_cl_slider,V_cl]) A_top_slider = widgets.FloatSlider( value=0.09, min=0.01, max=1, step=0.01, description='Top surface [m^2]', readout_format='.2f', style=style, layout=layout ) A_top = widgets.FloatText(description="", continuous_update=False,layout=label_layout) widgets.link((A_top_slider, 'value'), (A_top, 'value')) A_top_w = widgets.HBox([A_top_slider,A_top]) objectif =widgets.HTML(value = f"<b><font color='red'>{'Objective'}</b>") Obj=widgets.ToggleButtons(options=['Minimize mass', 'Maximize time'], description='Objective:',style=style, layout=layout, disabled=False) Narm_slider = widgets.FloatSlider( value=8, min=3, max=12, step=1, description='Number of arms [-]', readout_format='.0f', style=style, layout=layout ) Narm = widgets.FloatText(description="", continuous_update=False,layout=label_layout) widgets.link((Narm_slider, 'value'), (Narm, 'value')) Narm_w=widgets.HBox([Narm_slider,Narm]) Np_arm=widgets.ToggleButtons(options=['Single rotor setup', 'Coaxial setup'], description='Number of propellers per arm:',style=style, layout=layout, disabled=False) Mod=widgets.ToggleButtons(options=['Direct Drive', 'Gear Drive'], description='Motor configuration:', #disabled=False, button_style='', # 'success', 'info', 'warning', 'danger' or '' tooltips=['No speed reductor', 'Motor with reduction', 'Description of fast'], style=style, layout=layout ) vertical = widgets.HTML(value = f"<b><font color='red'>{'Vertical climb'}</b>") C_D_slider = widgets.FloatSlider( value=1.18, min=1, max=3, step=0.1, description='Drag coefficient [-]', readout_format='.1f', style=style, layout=layout ) C_D = widgets.FloatText(description="", continuous_update=False,layout=label_layout) widgets.link((C_D_slider, 'value'), (C_D, 'value')) C_D_w = widgets.HBox([C_D_slider,C_D]) prop = widgets.HTML(value = f"<b><font color='red'>{'Propeller characteristics'}</b>") NDmax_slider = widgets.FloatSlider( value=105000/60*0.0254, min=0.1, max=100, step=1, description='Max Rotational Speed [Hz*m]', readout_format='.2f', style=style, layout=layout ) NDmax = widgets.FloatText(description="", readout_format='.2f', continuous_update=False,layout=label_layout) widgets.link((NDmax_slider, 'value'), (NDmax, 'value')) NDmax_w = widgets.HBox([NDmax_slider,NDmax]) objectif =widgets.HTML(value = f"<b><font color='red'>{'Objective and algorithm '}</b>") Obj=widgets.ToggleButtons(options=['Minimize mass', 'Maximize time'], description='Objective:', disabled=False, button_style='', # 'success', 'info', 'warning', 'danger' or '' #tooltips=['No speed reductor', 'Motor with reduction', 'Description of fast'], # icons=['check'] * 3 style=style, layout=layout ) Algo=widgets.ToggleButtons(options=['SLSQP', 'Differential evolution'], description='Resolution algorithm:', value='Differential evolution', disabled=False, style=style, layout=layout ) w1=widgets.VBox([specs,M_load_w,t_h_w,MTOW_w,k_maxthrust_w,archit,Narm_w,Np_arm,Mod,vertical,V_cl_w,C_D_w,A_top_w,prop,NDmax_w,objectif,Obj,Algo]) # Specifications display(w1) ###Output _____no_output_____ ###Markdown Sizing code The set of equations of a sizing code can generate typical issues such : - Underconstrained set of equations: the lacking equations can come from additional scenarios, estimation models or additional sizing variable. - overconstrained equations often due to the selection of a component on multiple critera: the adding of over-sizing coefficients and constraints in the optimization problem can generally fix this issue - algebraic loops often due to selection criteria requiring informations generally available after the selection Concerning overconstraints components, we have here:- Brushless motors with multiple torque and voltage constraints (hover and transient vertical displacement) Multiple algebraic loops appears in the sizing problem:- The thrust depends of the total mass which depend of components required for generating this thrustThe final optimization problem depends thus of these parameters:- $\beta_{pro}=pitch/diameter$ ratio to define the propeller- $k_{os}$ over sizing coefficient on the load mass to estimate the final total mass- $k_{mot}$ over sizing coeffcient on the motor torque to estimate the max torque with the hover flight conditions- $k_{speed,mot}$ over sizing coeffcient on the motor speed to take into account voltage limits during hover or take-off flight- $k_{ND}$ slow down propeller coef : ND = kNDmax / k_ND- $k_{D}$ aspect ratio e_arm/D_out_arm (thickness/diameter) for the beam of the frame- $k_{mb}$ ratio battery mass / payload mass- $k_{vb}$ over sizing coefficient for the battery voltage More details in the setting up of sizing code can be found in the [following paper](https://www.researchgate.net/profile/Marc_Budinger/publication/277933677_Computer-aided_definition_of_sizing_procedures_and_optimization_problems_of_mechatronic_systems/links/55969de508ae793d137c7ea5/Computer-aided-definition-of-sizing-procedures-and-optimization-problems-of-mechatronic-systems.pdf): > Reysset, A., Budinger, M., & Maré, J. C. (2015). Computer-aided definition of sizing procedures and optimization problems of mechatronic systems. Concurrent Engineering, 23(4), 320-332.The sizing code is defined here in a function which can give:- an evaluation of the objective: here the total mass- an evaluation of the constraints: Here is an non-exhaustive XDSM diagram of the multirotor sizing code:![XDSM](img/xdsm_multirotor_optimization_ALGO3.png) ###Code #### 3.- Sizing Code # ----------------------- # sizing code # ----------------------- # inputs: # - param: optimisation variables vector (reduction ratio, oversizing coefficient) # - arg: selection of output # output: # - objective if arg='Obj', problem characteristics if arg='Prt', constraints other else def SizingCode(param, arg): # Design variables # --- k_M=param[0] # over sizing coefficient on the load mass k_mot=param[1] # over sizing coefficient on the motor torque k_speed_mot=param[2] # over sizing coefficient on the motor speed k_vb=param[3] # over sizing coefficient for the battery voltage k_ND=param[4] # slow down propeller coef : ND = kNDmax / k_ND D_ratio=param[5] # aspect ratio e/c (thickness/side) for the beam of the frame k_Mb=param[6] # over sizing coefficient on the battery load mass beta=param[7] # pitch/diameter ratio of the propeller J=param[8] # advance ratio k_ESC=param[9] # over sizing coefficient on the ESC power if Mod.value=='Gear Drive': Nred=param[10] # Reduction Ratio [-] # Hover, Climbing & Take-Off thrust # --- rho_air=1.18 #[kg/m^3] air density if Np_arm.value=='Single rotor setup': Npro_arm=1 Npro=Npro_arm*(Narm.value) # [-] Propellers number else: Npro_arm=2 Npro=Npro_arm*(Narm.value) # [-] Propellers number Mtotal=k_M*(M_load.value) # [kg] Estimation of the total mass (or equivalent weight of dynamic scenario) F_pro_hov=Mtotal*(9.81)/Npro # [N] Thrust per propeller for hover F_pro_to=F_pro_hov*(k_maxthrust.value) # [N] Max Thrust per propeller F_pro_cl=(Mtotal*9.81+0.5*rho_air*(C_D.value)*(A_top.value)*(V_cl.value)**2)/Npro # [N] Thrust per propeller for climbing # Propeller characteristicss # Ref : APC static C_t_sta=4.27e-02 + 1.44e-01 * beta # Thrust coef with T=C_T.rho.n^2.D^4 C_p_sta=-1.48e-03 + 9.72e-02 * beta # Power coef with P=C_p.rho.n^3.D^5 Dpro_ref=11*.0254 # [m] diameter Mpro_ref=0.53*0.0283 # [kg] mass # Ref: APC dynamics C_t_dyn=0.02791-0.06543*J+0.11867*beta+0.27334*beta**2-0.28852*beta**3+0.02104*J**3-0.23504*J**2+0.18677*beta*J**2 # thrust coef for APC props in dynamics C_p_dyn=0.01813-0.06218*beta+0.00343*J+0.35712*beta**2-0.23774*beta**3+0.07549*beta*J-0.1235*J**2 # power coef for APC props in dynamics #Choice of diameter and rotational speed from a maximum thrust Dpro=(F_pro_to/(C_t_sta*rho_air*((NDmax.value)*k_ND)**2))**0.5 # [m] Propeller diameter pitch=Dpro*beta #[m] pitch diameter n_pro_to=(NDmax.value)*k_ND/Dpro # [Hz] Propeller speed n_pro_cl=sqrt(F_pro_cl/(C_t_dyn*rho_air*Dpro**4)) # [Hz] climbing speed # Propeller selection with take-off scenario Wpro_to=n_pro_to*2*3.14 # [rad/s] Propeller speed Mpro=Mpro_ref*(Dpro/Dpro_ref)**3 # [kg] Propeller mass Ppro_to=C_p_sta*rho_air*n_pro_to**3*Dpro**5# [W] Power per propeller Qpro_to=Ppro_to/Wpro_to # [N.m] Propeller torque # Propeller torque& speed for hover n_pro_hover=sqrt(F_pro_hov/(C_t_sta*rho_air*Dpro**4)) # [Hz] hover speed Wpro_hover=n_pro_hover*2*3.14 # [rad/s] Propeller speed Ppro_hover=C_p_sta*rho_air*n_pro_hover**3*Dpro**5# [W] Power per propeller Qpro_hover=Ppro_hover/Wpro_hover # [N.m] Propeller torque V_bat_est=k_vb*1.84*(Ppro_to)**(0.36) # [V] battery voltage estimation #Propeller torque &speed for climbing Wpro_cl=n_pro_cl*2*3.14 # [rad/s] Propeller speed for climbing Ppro_cl=C_p_dyn*rho_air*n_pro_cl**3*Dpro**5# [W] Power per propeller for climbing Qpro_cl=Ppro_cl/Wpro_cl # [N.m] Propeller torque for climbing # Motor selection & scaling laws # --- # Motor reference sized from max thrust # Ref : AXI 5325/16 GOLD LINE Tmot_ref=2.32 # [N.m] rated torque Tmot_max_ref=85/70*Tmot_ref # [N.m] max torque Rmot_ref=0.03 # [Ohm] resistance Mmot_ref=0.575 # [kg] mass Ktmot_ref=0.03 # [N.m/A] torque coefficient Tfmot_ref=0.03 # [N.m] friction torque (zero load, nominal speed) Lmot_ref=0.088# [m] motor length ref #Motor speeds: if Mod.value=='Gear Drive': W_hover_motor=Wpro_hover*Nred # [rad/s] Nominal motor speed with reduction W_cl_motor=Wpro_cl*Nred # [rad/s] Motor Climb speed with reduction W_to_motor=Wpro_to*Nred # [rad/s] Motor take-off speed with reduction else: W_hover_motor=Wpro_hover # [rad/s] Nominal motor speed W_cl_motor=Wpro_cl # [rad/s] Motor Climb speed W_to_motor=Wpro_to # [rad/s] Motor take-off speed #Motor torque: if Mod.value=='Gear Drive': Tmot_hover=Qpro_hover/Nred # [N.m] motor nominal torque with reduction Tmot_to=Qpro_to/Nred # [N.m] motor take-off torque with reduction Tmot_cl=Qpro_cl/Nred # [N.m] motor climbing torque with reduction else: Tmot_hover=Qpro_hover# [N.m] motor take-off torque Tmot_to=Qpro_to # [N.m] motor take-off torque Tmot_cl=Qpro_cl # [N.m] motor climbing torque Tmot=k_mot*Tmot_hover# [N.m] required motor nominal torque for reductor Tmot_max=Tmot_max_ref*(Tmot/Tmot_ref)**(1) # [N.m] max torque Mmot=Mmot_ref*(Tmot/Tmot_ref)**(3/3.5) # [kg] Motor mass Lmot=Lmot_ref*(Mmot/Mmot_ref)**(1/3)# [m] Motor length # Selection with take-off speed Ktmot=V_bat_est/(k_speed_mot*W_to_motor) # [N.m/A] or [V/(rad/s)] Kt motor (RI term is missing) Rmot=Rmot_ref*(Tmot/Tmot_ref)**(-5/3.5)*(Ktmot/Ktmot_ref)**(2) # [Ohm] motor resistance Tfmot=Tfmot_ref*(Tmot/Tmot_ref)**(3/3.5) # [N.m] Friction torque # Hover current and voltage Imot_hover = (Tmot_hover+Tfmot)/Ktmot # [I] Current of the motor per propeller Umot_hover = Rmot*Imot_hover + W_hover_motor*Ktmot # [V] Voltage of the motor per propeller P_el_hover = Umot_hover*Imot_hover # [W] Hover : output electrical power # Take-Off current and voltage Imot_to = (Tmot_to+Tfmot)/Ktmot # [I] Current of the motor per propeller Umot_to = Rmot*Imot_to + W_to_motor*Ktmot # [V] Voltage of the motor per propeller P_el_to = Umot_to*Imot_to # [W] Takeoff : output electrical power # Climbing current and voltage Imot_cl = (Tmot_cl+Tfmot)/Ktmot # [I] Current of the motor per propeller for climbing Umot_cl = Rmot*Imot_cl + W_cl_motor*Ktmot # [V] Voltage of the motor per propeller for climbing P_el_cl = Umot_cl*Imot_cl # [W] Power : output electrical power for climbing #Gear box model if Mod.value=='Gear Drive': mg1=0.0309*Nred**2+0.1944*Nred+0.6389 # Ratio input pinion to mating gear WF=1+1/mg1+mg1+mg1**2+Nred**2/mg1+Nred**2 # Weight Factor (ƩFd2/C) [-] k_sd=1000 # Surface durability factor [lb/in] C=2*8.85*Tmot_hover/k_sd # Coefficient (C=2T/K) [in3] Fd2=WF*C # Solid rotor volume [in3] Mgear=Fd2*0.3*0.4535 # Mass reducer [kg] (0.3 is a coefficient evaluated for aircraft application and 0.4535 to pass from lb to kg) Fdp2=C*(Nred+1)/Nred # Solid rotor pinion volume [in3] dp=(Fdp2/0.7)**(1/3)*0.0254 # Pinion diameter [m] (0.0254 to pass from in to m) dg=Nred*dp # Gear diameter [m] di=mg1*dp # Inler diameter [m] # Battery selection & scaling laws sized from hover # --- # Battery # Ref : Prolitex TP3400-4SPX25 Mbat_ref=.329 # [kg] mass Cbat_ref= 3.400*3600#[A.s] Vbat_ref=4*3.7#[V] Imax_ref=170#[A] Volbat_ref=150.930#[cm^3] Lbat_ref=0.135 #[m] Ncel=V_bat_est/3.7# [-] Cell number, round (up value) V_bat=3.7*Ncel # [V] Battery voltage Mbat=k_Mb*(M_load.value) # Battery mass # Hover --> autonomy C_bat = Mbat/Mbat_ref*Cbat_ref/V_bat*Vbat_ref # [A.s] Capacity of the battery Vol_bat=Volbat_ref*(C_bat*V_bat/(Cbat_ref*Vbat_ref))# [cm^3] Volume of the battery I_bat = (P_el_hover*Npro)/.95/V_bat # [I] Current of the battery t_hf = .8*C_bat/I_bat/60 # [min] Hover time Imax=Imax_ref*C_bat/Cbat_ref # [A] max current battery # ESC sized from max speed # Ref : Turnigy K_Force 70HV Pesc_ref=3108 # [W] Power Vesc_ref=44.4 #[V]Voltage Mesc_ref=.115 # [kg] Mass P_esc=k_ESC*(P_el_to*V_bat/Umot_to) # [W] power electronic power max thrust P_esc_cl=P_el_cl*V_bat/Umot_cl # [W] power electronic power max climb Mesc = Mesc_ref*(P_esc/Pesc_ref) # [kg] Mass ESC Vesc = Vesc_ref*(P_esc/Pesc_ref)**(1/3)# [V] ESC voltage # Frame sized from max thrust # --- Mfra_ref=.347 #[kg] MK7 frame Marm_ref=0.14#[kg] Mass of all arms # Length calculation # sep= 2*pi/Narm #[rad] interior angle separation between propellers Lbra=Dpro/2/(math.sin(pi/(Narm.value))) #[m] length of the arm # Static stress # Sigma_max=200e6/4 # [Pa] Alu max stress (2 reduction for dynamic, 2 reduction for stress concentration) Sigma_max=280e6/4 # [Pa] Composite max stress (2 reduction for dynamic, 2 reduction for stress concentration) # Tube diameter & thickness Dout=(F_pro_to*Lbra*32/(pi*Sigma_max*(1-D_ratio**4)))**(1/3) # [m] outer diameter of the beam D_ratio # [m] inner diameter of the beam # Mass Marm=pi/4*(Dout**2-(D_ratio*Dout)**2)*Lbra*1700*(Narm.value) # [kg] mass of the arms Mfra=Mfra_ref*(Marm/Marm_ref)# [kg] mass of the frame # Thrust Bearing reference # Ref : SKF 31309/DF Life=5000 # Life time [h] k_bear=1 Cd_bear_ref=2700 # Dynamic reference Load [N] C0_bear_ref=1500 # Static reference load[N] Db_ref=0.032 # Exterior reference diameter [m] Lb_ref=0.007 # Reference lenght [m] db_ref=0.020 # Interior reference diametere [m] Mbear_ref=0.018 # Reference mass [kg] # Thrust bearing model""" L10=(60*(Wpro_hover*60/2/3.14)*(Life/10**6)) # Nominal endurance [Hours of working] Cd_ap=(2*F_pro_hov*L10**(1/3))/2 # Applied load on bearing [N] Fmax=2*4*F_pro_to/2 C0_bear=k_bear*Fmax # Static load [N] Cd_bear=Cd_bear_ref/C0_bear_ref**(1.85/2)*C0_bear**(1.85/2) # Dynamic Load [N] Db=Db_ref/C0_bear_ref**0.5*C0_bear**0.5 # Bearing exterior Diameter [m] db=db_ref/C0_bear_ref**0.5*C0_bear**0.5 # Bearing interior Diameter [m] Lb=Lb_ref/C0_bear_ref**0.5*C0_bear**0.5 # Bearing lenght [m] Mbear=Mbear_ref/C0_bear_ref**1.5*C0_bear**1.5 # Bearing mass [kg] # Objective and Constraints sum up # --- if Mod.value=='Direct Drive': Mtotal_final = (Mesc+Mpro+Mmot+Mbear)*Npro+(M_load.value)+Mbat+Mfra+Marm #total mass without reducer else: Mtotal_final = (Mesc+Mpro+Mmot+Mgear+Mbear)*Npro+(M_load.value)+Mbat+Mfra+Marm #total mass with reducer if Obj.value=='Maximize time': constraints = [(Mtotal-Mtotal_final)/Mtotal_final, ((NDmax.value)-n_pro_cl*Dpro)/(NDmax.value), (Tmot_max-Tmot_to)/Tmot_max, (Tmot_max-Tmot_cl)/Tmot_max, (-J*n_pro_cl*Dpro+(V_cl.value)), 0.01+(J*n_pro_cl*Dpro-(V_cl.value)), (V_bat-Umot_to)/V_bat, (V_bat-Umot_cl)/V_bat, (V_bat-Vesc)/V_bat, (V_bat*Imax-Umot_to*Imot_to*Npro/0.95)/(V_bat*Imax), (V_bat*Imax-Umot_cl*Imot_cl*Npro/0.95)/(V_bat*Imax), (P_esc-P_esc_cl)/P_esc, ((MTOW.value)-Mtotal_final)/Mtotal_final ] else: constraints = [(Mtotal-Mtotal_final)/Mtotal_final, ((NDmax.value)-n_pro_cl*Dpro)/(NDmax.value), (Tmot_max-Tmot_to)/Tmot_max, (Tmot_max-Tmot_cl)/Tmot_max, (-J*n_pro_cl*Dpro+(V_cl.value)), 0.01+(J*n_pro_cl*Dpro-(V_cl.value)), (V_bat-Umot_to)/V_bat, (V_bat-Umot_cl)/V_bat, (V_bat-Vesc)/V_bat, (V_bat*Imax-Umot_to*Imot_to*Npro/0.95)/(V_bat*Imax), (V_bat*Imax-Umot_cl*Imot_cl*Npro/0.95)/(V_bat*Imax), (P_esc-P_esc_cl)/P_esc, (t_hf-(t_h.value))/t_hf, ] # Objective and contraints # Objective and contraints if arg=='Obj': if Obj.value=='Maximize time': return 1/t_hf # for time maximisation else: return Mtotal_final # for mass optimisation if arg=='ObjP': P=0 # Penalisation nulle if Obj.value=='Minimize mass': for C in constraints: if (C<0.): P=P-1e9*C return Mtotal_final+P # for mass optimisation else: for C in constraints: if (C<0.): P=P-1e9*C return 1/t_hf+P # for time optimisation elif arg=='Prt': col_names_opt = ['Type', 'Name', 'Min', 'Value', 'Max', 'Unit', 'Comment'] df_opt = pd.DataFrame() df_opt = df_opt.append([{'Type': 'Optimization', 'Name': 'k_M', 'Min': bounds[0][0], 'Value': k_M, 'Max': bounds[0][1], 'Unit': '[-]', 'Comment': 'over sizing coefficient on the load mass '}])[col_names_opt] df_opt = df_opt.append([{'Type': 'Optimization', 'Name': 'k_mot', 'Min': bounds[1][0], 'Value': k_mot, 'Max': bounds[1][1], 'Unit': '[-]', 'Comment': 'over sizing coefficient on the motor torque '}])[col_names_opt] df_opt = df_opt.append([{'Type': 'Optimization', 'Name': 'k_speed_mot', 'Min': bounds[2][0], 'Value': k_speed_mot, 'Max': bounds[2][1], 'Unit': '[-]', 'Comment': 'over sizing coefficient on the motor speed'}])[col_names_opt] df_opt = df_opt.append([{'Type': 'Optimization', 'Name': 'k_vb', 'Min': bounds[3][0], 'Value': k_vb, 'Max': bounds[3][1], 'Unit': '[-]', 'Comment': 'over sizing coefficient for the battery voltage'}])[col_names_opt] df_opt = df_opt.append([{'Type': 'Optimization', 'Name': 'k_ND', 'Min': bounds[4][0], 'Value': k_ND, 'Max': bounds[4][1], 'Unit': '[-]', 'Comment': 'Ratio ND/NDmax'}])[col_names_opt] df_opt = df_opt.append([{'Type': 'Optimization', 'Name': 'D_ratio', 'Min': bounds[5][0], 'Value': D_ratio, 'Max': bounds[5][1], 'Unit': '[-]', 'Comment': 'aspect ratio e/c (thickness/side) for the beam of the frame'}])[col_names_opt] df_opt = df_opt.append([{'Type': 'Optimization', 'Name': 'k_Mb', 'Min': bounds[6][0], 'Value': k_Mb, 'Max': bounds[6][1], 'Unit': '[-]', 'Comment': 'over sizing coefficient on the battery load mass '}])[col_names_opt] df_opt = df_opt.append([{'Type': 'Optimization', 'Name': 'beta_pro', 'Min': bounds[7][0], 'Value': beta, 'Max': bounds[7][1], 'Unit': '[-]', 'Comment': 'pitch/diameter ratio of the propeller'}])[col_names_opt] df_opt = df_opt.append([{'Type': 'Optimization', 'Name': 'J', 'Min': bounds[8][0], 'Value': J, 'Max': bounds[8][1], 'Unit': '[-]', 'Comment': 'Advance ratio'}])[col_names_opt] df_opt = df_opt.append([{'Type': 'Optimization', 'Name': 'k_ESC', 'Min': bounds[9][0], 'Value': k_ESC, 'Max': bounds[9][1], 'Unit': '[-]', 'Comment': 'over sizing coefficient on the ESC power'}])[col_names_opt] if Mod.value=='Gear Drive': df_opt = df_opt.append([{'Type': 'Optimization', 'Name': 'N_red', 'Min': bounds[10][0], 'Value': Nred, 'Max': bounds[10][1], 'Unit': '[-]', 'Comment': 'Reduction ratio'}])[col_names_opt] df_opt = df_opt.append([{'Type': 'Constraints', 'Name': 'Const 0', 'Min': 0, 'Value': constraints[0], 'Max': '-', 'Unit': '[-]', 'Comment': '(Mtotal-Mtotal_final)'}])[col_names_opt] df_opt = df_opt.append([{'Type': 'Constraints', 'Name': 'Const 1', 'Min': 0, 'Value': constraints[1], 'Max': '-', 'Unit': '[-]', 'Comment': '(NDmax-n_pro_cl*Dpro)/NDmax'}])[col_names_opt] df_opt = df_opt.append([{'Type': 'Constraints', 'Name': 'Const 2', 'Min': 0, 'Value': constraints[2], 'Max': '-', 'Unit': '[-]', 'Comment': '(Tmot_max-Tmot_to)/Tmot_max'}])[col_names_opt] df_opt = df_opt.append([{'Type': 'Constraints', 'Name': 'Const 3', 'Min': 0, 'Value': constraints[3], 'Max': '-', 'Unit': '[-]', 'Comment': '(Tmot_max-Tmot_cl)/Tmot_max'}])[col_names_opt] df_opt = df_opt.append([{'Type': 'Constraints', 'Name': 'Const 4', 'Min': 0, 'Value': constraints[4], 'Max': '-', 'Unit': '[-]', 'Comment': '(-J*n_pro_cl*Dpro+V_cl)'}])[col_names_opt] df_opt = df_opt.append([{'Type': 'Constraints', 'Name': 'Const 5', 'Min': 0, 'Value': constraints[5], 'Max': '-', 'Unit': '[-]', 'Comment': '0.01+(+J*n_pro_cl*Dpro-V_cl)'}])[col_names_opt] df_opt = df_opt.append([{'Type': 'Constraints', 'Name': 'Const 6', 'Min': 0, 'Value': constraints[6], 'Max': '-', 'Unit': '[-]', 'Comment': '(V_bat-Umot_to)/V_bat'}])[col_names_opt] df_opt = df_opt.append([{'Type': 'Constraints', 'Name': 'Const 7', 'Min': 0, 'Value': constraints[7], 'Max': '-', 'Unit': '[-]', 'Comment': '(V_bat-Umot_cl)/V_bat'}])[col_names_opt] df_opt = df_opt.append([{'Type': 'Constraints', 'Name': 'Const 8', 'Min': 0, 'Value': constraints[8], 'Max': '-', 'Unit': '[-]', 'Comment': '(V_bat-Vesc)/V_bat'}])[col_names_opt] df_opt = df_opt.append([{'Type': 'Constraints', 'Name': 'Const 9', 'Min': 0, 'Value': constraints[9], 'Max': '-', 'Unit': '[-]', 'Comment': '(V_bat*Imax-Umot_to*Imot_to*Npro/0.95)/(V_bat*Imax)'}])[col_names_opt] df_opt = df_opt.append([{'Type': 'Constraints', 'Name': 'Const 10', 'Min': 0, 'Value': constraints[10], 'Max': '-', 'Unit': '[-]', 'Comment': '(V_bat*Imax-Umot_cl*Imot_cl*Npro/0.95)/(V_bat*Imax)'}])[col_names_opt] df_opt = df_opt.append([{'Type': 'Constraints', 'Name': 'Const 11', 'Min': 0, 'Value': constraints[11], 'Max': '-', 'Unit': '[-]', 'Comment': '(P_esc-P_esc_cl)/P_esc'}])[col_names_opt] if Obj.value=='Minimize mass': df_opt = df_opt.append([{'Type': 'Constraints', 'Name': 'Const 12', 'Min': 0, 'Value': constraints[12], 'Max': '-', 'Unit': '[-]', 'Comment': '(t_hf-t_h)/t_hf'}])[col_names_opt] else: df_opt = df_opt.append([{'Type': 'Constraints', 'Name': 'Const 12', 'Min': 0, 'Value': constraints[12], 'Max': '-', 'Unit': '[-]', 'Comment': '(MTOW-Mtotal_final)/Mtotal_final'}])[col_names_opt] df_opt = df_opt.append([{'Type': 'Objective', 'Name': 'Objective', 'Min': 0, 'Value': Mtotal_final, 'Max': '-', 'Unit': '[kg]', 'Comment': 'Total mass'}])[col_names_opt] col_names = ['Type', 'Name', 'Value', 'Unit', 'Comment'] df = pd.DataFrame() df = df.append([{'Type': 'Propeller', 'Name': 'F_pro_to', 'Value': F_pro_to, 'Unit': '[N]', 'Comment': 'Thrust for 1 propeller during Take Off'}])[col_names] df = df.append([{'Type': 'Propeller', 'Name': 'F_pro_cl', 'Value': F_pro_cl, 'Unit': '[N]', 'Comment': 'Thrust for 1 propeller during Take Off'}])[col_names] df = df.append([{'Type': 'Propeller', 'Name': 'F_pro_hov', 'Value': F_pro_hov, 'Unit': '[N]', 'Comment': 'Thrust for 1 propeller during Hover'}])[col_names] df = df.append([{'Type': 'Propeller', 'Name': 'rho_air', 'Value': rho_air, 'Unit': '[kg/m^3]', 'Comment': 'Air density'}])[col_names] df = df.append([{'Type': 'Propeller', 'Name': 'ND_max', 'Value': (NDmax.value), 'Unit': '[Hz.m]', 'Comment': 'Max speed limit (N.D max)'}])[col_names] df = df.append([{'Type': 'Propeller', 'Name': 'Dpro_ref', 'Value': Dpro_ref, 'Unit': '[m]', 'Comment': 'Reference propeller diameter'}])[col_names] df = df.append([{'Type': 'Propeller', 'Name': 'M_pro_ref', 'Value': Mpro_ref, 'Unit': '[kg]', 'Comment': 'Reference propeller mass'}])[col_names] df = df.append([{'Type': 'Propeller', 'Name': 'C_t_sta', 'Value': C_t_sta, 'Unit': '[-]', 'Comment': 'Static thrust coefficient of the propeller'}])[col_names] df = df.append([{'Type': 'Propeller', 'Name': 'C_t_dyn', 'Value': C_t_dyn, 'Unit': '[-]', 'Comment': 'Dynamic thrust coefficient of the propeller'}])[col_names] df = df.append([{'Type': 'Propeller', 'Name': 'C_p_sta', 'Value': C_p_sta, 'Unit': '[-]', 'Comment': 'Static power coefficient of the propeller'}])[col_names] df = df.append([{'Type': 'Propeller', 'Name': 'C_p_dyn', 'Value': C_p_dyn, 'Unit': '[-]', 'Comment': 'Dynamic power coefficient of the propeller'}])[col_names] df = df.append([{'Type': 'Propeller', 'Name': 'D_pro', 'Value': Dpro, 'Unit': '[m]', 'Comment': 'Diameter of the propeller'}])[col_names] df = df.append([{'Type': 'Propeller', 'Name': 'n_pro_cl', 'Value': n_pro_cl, 'Unit': '[Hz]', 'Comment': 'Rev speed of the propeller during climbing'}])[col_names] df = df.append([{'Type': 'Propeller', 'Name': 'n_pro_to', 'Value': n_pro_to, 'Unit': '[Hz]', 'Comment': 'Rev speed of the propeller during takeoff'}])[col_names] df = df.append([{'Type': 'Propeller', 'Name': 'n_pro_hov', 'Value': n_pro_hover, 'Unit': '[Hz]', 'Comment': 'Rev speed of the propeller during hover'}])[col_names] df = df.append([{'Type': 'Propeller', 'Name': 'P_pro_cl', 'Value': Ppro_cl, 'Unit': '[W]', 'Comment': 'Power on the mechanical shaft of the propeller during climbing'}])[col_names] df = df.append([{'Type': 'Propeller', 'Name': 'P_pro_to', 'Value': Ppro_to, 'Unit': '[W]', 'Comment': 'Power on the mechanical shaft of the propeller during takeoff'}])[col_names] df = df.append([{'Type': 'Propeller', 'Name': 'P_pro_hov', 'Value': Ppro_hover, 'Unit': '[W]', 'Comment': 'Power on the mechanical shaft of the propeller during hover'}])[col_names] df = df.append([{'Type': 'Propeller', 'Name': 'M_pro', 'Value': Mpro, 'Unit': '[kg]', 'Comment': 'Mass of the propeller'}])[col_names] df = df.append([{'Type': 'Propeller', 'Name': 'Omega_pro_cl', 'Value': Wpro_cl, 'Unit': '[rad/s]', 'Comment': 'Rev speed of the propeller during climbing'}])[col_names] df = df.append([{'Type': 'Propeller', 'Name': 'Omega_pro_to', 'Value': Wpro_to, 'Unit': '[rad/s]', 'Comment': 'Rev speed of the propeller during takeoff'}])[col_names] df = df.append([{'Type': 'Propeller', 'Name': 'Omega_pro_hov', 'Value': Wpro_hover, 'Unit': '[rad/s]', 'Comment': 'Rev speed of the propeller during hover'}])[col_names] df = df.append([{'Type': 'Propeller', 'Name': 'T_pro_hov', 'Value': Qpro_hover, 'Unit': '[N.m]', 'Comment': 'Torque on the mechanical shaft of the propeller during hover'}])[col_names] df = df.append([{'Type': 'Propeller', 'Name': 'T_pro_to', 'Value': Qpro_to, 'Unit': '[N.m]', 'Comment': 'Torque on the mechanical shaft of the propeller during takeoff'}])[col_names] df = df.append([{'Type': 'Propeller', 'Name': 'T_pro_cl', 'Value': Qpro_cl, 'Unit': '[N.m]', 'Comment': 'Torque on the mechanical shaft of the propeller during climbing'}])[col_names] df = df.append([{'Type': 'Motor', 'Name': 'T_max_mot_ref', 'Value': Tmot_max_ref, 'Unit': '[N.m]', 'Comment': 'Max torque'}])[col_names] df = df.append([{'Type': 'Motor', 'Name': 'R_mot_ref', 'Value': Rmot_ref, 'Unit': '[Ohm]', 'Comment': 'Resistance'}])[col_names] df = df.append([{'Type': 'Motor', 'Name': 'M_mot_ref', 'Value': Mmot_ref, 'Unit': '[kg]', 'Comment': 'Reference motor mass'}])[col_names] df = df.append([{'Type': 'Motor', 'Name': 'K_mot_ref', 'Value': Ktmot_ref, 'Unit': '[N.m/A]', 'Comment': 'Torque coefficient'}])[col_names] df = df.append([{'Type': 'Motor', 'Name': 'T_mot_fr_ref', 'Value': Tfmot_ref, 'Unit': '[N.m]', 'Comment': 'Friction torque (zero load, nominal speed)'}])[col_names] df = df.append([{'Type': 'Motor', 'Name': 'T_nom_mot', 'Value': Tmot_hover, 'Unit': '[N.m]', 'Comment': 'Continuous of the selected motor torque'}])[col_names] df = df.append([{'Type': 'Motor', 'Name': 'T_mot_to', 'Value': Tmot_to, 'Unit': '[N.m]', 'Comment': 'Transient torque possible for takeoff'}])[col_names] df = df.append([{'Type': 'Motor', 'Name': 'T_max_mot', 'Value': Tmot_max, 'Unit': '[N.m]', 'Comment': 'Transient torque possible for climbing'}])[col_names] df = df.append([{'Type': 'Motor', 'Name': 'R_mot', 'Value': Rmot, 'Unit': '[Ohm]', 'Comment': 'Resistance'}])[col_names] df = df.append([{'Type': 'Motor', 'Name': 'M_mot', 'Value': Mmot, 'Unit': '[kg]', 'Comment': 'Motor mass'}])[col_names] df = df.append([{'Type': 'Motor', 'Name': 'K_mot', 'Value': Ktmot, 'Unit': '[N.m/A', 'Comment': 'Torque constant of the selected motor'}])[col_names] df = df.append([{'Type': 'Motor', 'Name': 'T_mot_fr', 'Value': Tfmot, 'Unit': '[N.m]', 'Comment': 'Friction torque of the selected motor'}])[col_names] df = df.append([{'Type': 'Motor', 'Name': 'I_mot_hov', 'Value': Imot_hover, 'Unit': '[A]', 'Comment': 'Motor current for hover'}])[col_names] df = df.append([{'Type': 'Motor', 'Name': 'I_mot_to', 'Value': Imot_to, 'Unit': '[A]', 'Comment': 'Motor current for takeoff'}])[col_names] df = df.append([{'Type': 'Motor', 'Name': 'I_mot_cl', 'Value': Imot_cl, 'Unit': '[A]', 'Comment': 'Motor current for climbing'}])[col_names] df = df.append([{'Type': 'Motor', 'Name': 'U_mot_cl', 'Value': Umot_hover, 'Unit': '[V]', 'Comment': 'Motor voltage for climbing'}])[col_names] df = df.append([{'Type': 'Motor', 'Name': 'U_mot_to', 'Value': Umot_to, 'Unit': '[V]', 'Comment': 'Motor voltage for takeoff'}])[col_names] df = df.append([{'Type': 'Motor', 'Name': 'U_mot', 'Value': Umot_hover, 'Unit': '[V]', 'Comment': 'Nominal voltage '}])[col_names] df = df.append([{'Type': 'Motor', 'Name': 'P_el_mot_cl', 'Value': P_el_cl, 'Unit': '[W]', 'Comment': 'Motor electrical power for climbing'}])[col_names] df = df.append([{'Type': 'Motor', 'Name': 'P_el_mot_to', 'Value': P_el_to, 'Unit': '[W]', 'Comment': 'Motor electrical power for takeoff'}])[col_names] df = df.append([{'Type': 'Motor', 'Name': 'P_el_mot_hov', 'Value': P_el_hover, 'Unit': '[W]', 'Comment': 'Motor electrical power for hover'}])[col_names] df = df.append([{'Type': 'Motor', 'Name': 'Lmot', 'Value': Lmot, 'Unit': '[m]', 'Comment': 'Motor length'}])[col_names] df = df.append([{'Type': 'Battery & ESC', 'Name': 'M_bat_ref', 'Value': Mbat_ref, 'Unit': '[kg]', 'Comment': 'Mass of the reference battery '}])[col_names] df = df.append([{'Type': 'Battery & ESC', 'Name': 'M_esc_ref', 'Value': Mesc_ref, 'Unit': '[kg]', 'Comment': 'Reference ESC mass '}])[col_names] df = df.append([{'Type': 'Battery & ESC', 'Name': 'P_esc_ref', 'Value': Pesc_ref, 'Unit': '[W]', 'Comment': 'Reference ESC power '}])[col_names] df = df.append([{'Type': 'Battery & ESC', 'Name': 'N_s_bat', 'Value': np.ceil(Ncel), 'Unit': '[-]', 'Comment': 'Number of battery cells '}])[col_names] df = df.append([{'Type': 'Battery & ESC', 'Name': 'U_bat', 'Value': V_bat, 'Unit': '[V]', 'Comment': 'Battery voltage '}])[col_names] df = df.append([{'Type': 'Battery & ESC', 'Name': 'M_bat', 'Value': Mbat, 'Unit': '[kg]', 'Comment': 'Battery mass '}])[col_names] df = df.append([{'Type': 'Battery & ESC', 'Name': 'C_bat', 'Value': C_bat, 'Unit': '[A.s]', 'Comment': 'Battery capacity '}])[col_names] df = df.append([{'Type': 'Battery & ESC', 'Name': 'I_bat', 'Value': I_bat, 'Unit': '[A]', 'Comment': 'Battery current '}])[col_names] df = df.append([{'Type': 'Battery & ESC', 'Name': 't_hf', 'Value': t_hf, 'Unit': '[min]', 'Comment': 'Hovering time '}])[col_names] df = df.append([{'Type': 'Battery & ESC', 'Name': 'P_esc', 'Value': P_esc, 'Unit': '[W]', 'Comment': 'Power electronic power (corner power or apparent power) '}])[col_names] df = df.append([{'Type': 'Battery & ESC', 'Name': 'M_esc', 'Value': Mesc, 'Unit': '[kg]', 'Comment': 'ESC mass '}])[col_names] df = df.append([{'Type': 'Battery & ESC', 'Name': 'V_esc', 'Value': Vesc, 'Unit': '[V]', 'Comment': 'ESC voltage '}])[col_names] df = df.append([{'Type': 'Battery & ESC', 'Name': 'Vol_bat', 'Value': Vol_bat, 'Unit': '[cm^3]', 'Comment': 'Volume battery '}])[col_names] df = df.append([{'Type': 'Frame', 'Name': 'N_arm', 'Value': (Narm.value), 'Unit': '[-]', 'Comment': 'Number of arms '}])[col_names] df = df.append([{'Type': 'Frame', 'Name': 'sigma_max', 'Value': Sigma_max, 'Unit': '[Pa]', 'Comment': 'Max admisible stress'}])[col_names] df = df.append([{'Type': 'Frame', 'Name': 'L_arm', 'Value': Lbra, 'Unit': '[m]', 'Comment': 'Length of the arm'}])[col_names] df = df.append([{'Type': 'Frame', 'Name': 'D_out', 'Value': Dout, 'Unit': '[m]', 'Comment': 'Outer diameter of the arm (tube)'}])[col_names] df = df.append([{'Type': 'Frame', 'Name': 'Marm', 'Value': Marm, 'Unit': '[kg]', 'Comment': '1 Arm mass'}])[col_names] df = df.append([{'Type': 'Frame', 'Name': 'M_frame', 'Value': Mfra, 'Unit': '[kg]', 'Comment': 'Frame mass'}])[col_names] df = df.append([{'Type': 'Specifications', 'Name': 'M_load', 'Value': (M_load.value), 'Unit': '[kg]', 'Comment': 'Payload mass'}])[col_names] df = df.append([{'Type': 'Specifications', 'Name': 't_hf', 'Value': (t_h.value), 'Unit': '[min]', 'Comment': 'Hovering time '}])[col_names] df = df.append([{'Type': 'Specifications', 'Name': 'k_maxthrust', 'Value': (k_maxthrust.value), 'Unit': '[-]', 'Comment': 'Ratio max thrust'}])[col_names] df = df.append([{'Type': 'Specifications', 'Name': 'N_arm', 'Value': (Narm.value), 'Unit': '[-]', 'Comment': 'Number of arms '}])[col_names] df = df.append([{'Type': 'Specifications', 'Name': 'N_pro', 'Value': Npro, 'Unit': '[-]', 'Comment': 'Total number of propellers'}])[col_names] df = df.append([{'Type': 'Specifications', 'Name': 'V_cl', 'Value': (V_cl.value), 'Unit': '[m/s]', 'Comment': 'Climb speed'}])[col_names] df = df.append([{'Type': 'Specifications', 'Name': 'CD', 'Value': (C_D.value), 'Unit': '[-]', 'Comment': 'Drag coefficient'}])[col_names] df = df.append([{'Type': 'Specifications', 'Name': 'A_top', 'Value': (A_top.value), 'Unit': '[m^2]', 'Comment': 'Top surface'}])[col_names] df = df.append([{'Type': 'Specifications', 'Name': 'MTOW', 'Value': (MTOW.value), 'Unit': '[kg]', 'Comment': 'Max takeoff Weight'}])[col_names] items = sorted(df['Type'].unique().tolist())+['Optimization'] # print('\033[1m' + "\033[4mPress F2 to render the piece\033[0m") # load the file page = urlopen('https://aitorochotorena.github.io/multirotor-all/notebooks/visu.html').read() soup = bs4.BeautifulSoup(page, "lxml") # delete old code soup.body.div.div.clear() # new code piece = (''' function getParameterDefinitions() { return [ { name: 'batpars', type: 'group', caption: 'Battery params.' }, { name: 'Vol_bat', type: 'number', step:1, initial:'''+str(Vol_bat)+''', caption: 'Volume battery[cm^3]' }, { name: 'proppars', type: 'group', caption: 'Propeller params.' }, { name: 'Dpro', type: 'number', step:1, initial:'''+str(Dpro*100)+''', caption: 'Propeller diameter [cm]' }, { name: 'pitch', type: 'number', step:1, initial:'''+str(pitch*100)+''', caption: 'Pitch [cm]' }, { name: 'motpars', type: 'group', caption: 'Motor params.' }, { name: 'Lmot', type: 'number', step:1, initial:'''+str(Lmot*100)+''', caption: 'Motor length [cm]' }, { name: 'config', type: 'group', caption: 'Configuration' }, { name: 'Npro', type: 'number', step:1, initial:'''+str(Npro)+''', caption: 'Number of propeller [-]' }, { name: 'Nproarm', type: 'number', step:1, initial:'''+str(Npro_arm)+''', caption: 'Number of propeller per arm [-]' }, { name: 'Lbra', type: 'number', step:1, initial:'''+str(Lbra*100)+''', caption: 'Arm length [cm]' }, { name: 'Dout', type: 'number', step:1, initial:'''+str(Dout*100)+''', caption: 'Side arm [cm]' }, { name: 'position', type: 'group', caption: 'Positioning' }, { name: 'x', type: 'number', step:1, initial: 0, caption: 'X pos' }, { name: 'y', type: 'number', step:1, initial: 0, caption: 'Y pos' }, { name: 'z', type: 'number', step:1, initial: 0, caption: 'Z pos' }, { name: 'a', type: 'number', step:1, initial: 0, caption: 'X rot' }, { name: 'b', type: 'number', step:1, initial: 0, caption: 'Y rot' }, { name: 'c', type: 'number', step:1, initial: 0, caption: 'Z rot' } ]; } function main(p) { let Vol_bat=p.Vol_bat, Lbat = 3*(Vol_bat/6)**(1/3), Hbat = 2*(Vol_bat/6)**(1/3), Wbat = (Vol_bat/6)**(1/3), Lmoteur = p.Lmot, Lhd=0.25*Lmoteur, Dhd=0.25*2.54, // this shaft diameter is commonly used along the series of APC MR propellers Dmot=0.7*Lmoteur; // geometric ratio used for AXI 2208,2212,2217 let N = p.N; Dpro = p.Dpro, pitch=p.pitch, pitch_angle= Math.atan(pitch/(Math.PI*Dpro))*180/Math.PI, Dsha=0.05*Dpro, L1= Math.sin(pitch_angle), L2= Dsha/2*L1, L3= Math.cos(pitch_angle), L4= Dsha/2*L3, Hsha=(Dsha/4*Math.sin(Math.atan(pitch/(Math.PI*Dpro)))+0.125*Math.cos(Math.atan(pitch/(Math.PI*Dpro))))*2, Lbra=p.Lbra, Dout=p.Dout, Npro=p.Npro, Nproarm=p.Nproarm, Narm=Npro/Nproarm; let x = p.x, a = p.a, y = p.y, b = p.b, z = p.z, c = p.c; //Battery geometry: let battery = union( cylinder({h: Hbat, r: Wbat/2}).translate([0,Wbat/2,0]) , cube(size=[Lbat, Wbat, Hbat]).translate([0,0,0]), cylinder({h: Hbat, r: Wbat/2}).translate([Lbat,Wbat/2,0]) ); //Motor geometry: let moteur = union( cylinder({h : Lhd, r : Dhd}).translate([0,0,0.75*Lmoteur]), cylinder({h : Lmoteur-Lhd, r : Dmot}) ); function clock(second) { var t = 400*(second); var s = cylinder({r:0.1,h:6}).setColor(0,0,1).rotateX(-t/60*360); return union(s).rotateY(-90).translate([0,0,5]); } function loop() { var d = new Date(); self.postMessage({cmd:'rendered', objects: [ clock(d.getSeconds()).toCompactBinary() ]}); } //Propeller geometry: let propeller = union( cube(size=[Dpro/2, Dsha/2, 0.001*Dpro]).translate([0,-Dsha/4,0]).rotateX(pitch_angle), cylinder({h: Hsha, r: Dsha/2}).translate([0,0,-Hsha/2]) , cube(size=[Dpro/2, Dsha/2, 0.001*Dpro]).translate([-Dpro/2,-Dsha/4,0]).rotateX(pitch_angle) ); //Motor and propeller iteration: var cubes = new Array(); for(i=0; i<Npro; i++) { cubes[i] = rotate([0,0,i/Narm*360], translate([Lbra,0,0], moteur.setColor(0,1,0),propeller.setColor(0,0,1).translate([0, 0, Lmoteur+Hsha/2]) )); } //Coaxial configuration: var coaxial = new Array(); for(i=0; i<Npro; i++) { coaxial[i] = cubes[i].mirroredZ(); } //Arm connection to battery: var arm = new Array(); for(i=0; i<Npro; i++) { arm[i] = rotate([0,0,i/Narm*360], cube(size=[Lbra, Dout/2, Dout/2]).rotateX(90)); } if (Nproarm == 2) { return union(battery.setColor(1,0,0).translate([-Lbat/2, 0, -Hbat/2]).rotateX(90),cubes,arm,coaxial).translate([x, y, z]).rotateX(a).rotateY(b).rotateZ(c); } else{ return union(battery.setColor(0,0,1).translate([-Lbat/2, 0, -Hbat/2]).rotateX(90),cubes,arm).translate([x, y, z]).rotateX(a).rotateY(b).rotateZ(c); } } ''') # insert it into the document soup.body.div.div.append(piece) #overwrite the html file with open('visu.html', "w") as outf: outf.write(str(soup)) #turn the beautifulsoup into a string soup_str=str(soup) ### Push the updated soup to github in order to visualize the 3D visu through Github Pages # using name and password g = github.Github("aitorochotorena","pythonic94") # using temporary token (necessary to update token) # g = github.Github("e307806ac4f09c82f3ecaa82ea75203a86bb5928") repo = g.get_user().get_repo("multirotor-all") file = repo.get_contents("notebooks/visu.html") # update repo.update_file("notebooks/visu.html","Update",soup_str, file.sha) return df, df_opt else: return constraints ###Output _____no_output_____ ###Markdown Optimization results and piece rendering ###Code from IPython.display import display, clear_output from ipywidgets import widgets button = widgets.Button(description="Calculate") display(button) bounds=[(1,400),#k_M (1,20),#k_mot (1,10),#k_speed_mot (1,5),#k_vb (0.01,1),#k_ND (0.05,.99),#D_ratio (.01,60),#k_Mb (0.3,0.6),#beta (0.01,0.5),#J (1,15),#k_ESC (1,20),#Nred ] # df_pro_mod=df_pro[df_pro['TYPE']==pro.value] # df_bat_mod=df_bat[df_bat['TYPE']==bat.value] # df_esc_mod=df_esc[df_esc['TYPE']==esc.value] # df_mot_mod=df_mot[df_mot['TYPE']==mot.value] output = widgets.Output() @output.capture() def on_button_clicked(b): clear_output() # Vector of parameters if Mod.value=='Gear Drive': parameters = np.array((bounds[0][0],bounds[1][0],bounds[2][0],bounds[3][0],bounds[4][0], bounds[5][0], bounds[6][0], bounds[7][0], bounds[8][0], bounds[9][0], bounds[10][0])) else: parameters = np.array((bounds[0][0],bounds[1][0],bounds[2][0],bounds[3][0],bounds[4][0], bounds[5][0], bounds[6][0], bounds[7][0], bounds[8][0], bounds[9][0])) # optimization with SLSQP algorithm contrainte=lambda x: SizingCode(x, 'Const') objectif=lambda x: SizingCode(x, 'Obj') objectifP=lambda x: SizingCode(x, 'ObjP') # Differential evolution omptimisation start = time.time() print("Loading") print("Statement for using this calculator: \n All values are calculated and may deviate from the real.\n Before flight you have to recheck the actual max. values. \n All max. values must stay within the limits of the manufacturers.\n A commercial use is forbidden. We reject any liability! \n DroneApp is subject to copyright and intellectual property rights.") if Algo.value=='SLSQP': if Mod.value=='Gear Drive': result = scipy.optimize.fmin_slsqp(func=objectif, x0=parameters, bounds=[(1,400),#k_M (1,20),#k_mot (1,10),#k_speed_mot (1,5),#k_vb (0.001,1),#k_ND (0.05,.99),#D_ratio (.001,60),#k_Mb (0.3,0.6),#beta (0.001,0.5),#J (1,15),#k_ESC (1,20)]#Nred ,f_ieqcons=contrainte, iter=1500, acc=1e-12) else: result = scipy.optimize.fmin_slsqp(func=objectif, x0=parameters, bounds=[(1,400),#k_M (1,20),#k_mot (1,10),#k_speed_mot (1,5),#k_vb (0.001,1),#k_ND (0.05,.99),#D_ratio (.001,60),#k_Mb (0.3,0.6),#beta (0.001,0.5),#J (1,15)]#k_ESC ,f_ieqcons=contrainte, iter=1500, acc=1e-12) else: if Mod.value=='Gear Drive': result = scipy.optimize.differential_evolution(func=objectifP, bounds=[(1,400),#k_M (1,20),#k_mot (1,10),#k_speed_mot (1,5),#k_vb (0.001,1),#k_ND (0.05,.99),#D_ratio (.001,60),#k_Mb (0.3,0.6),#beta (0.0001,0.5),#J (1,15),#k_ESC (1,20),#Nred ],maxiter=400, tol=1e-12) else: result = scipy.optimize.differential_evolution(func=objectifP, bounds=[(1,400),#k_M (1,20),#k_mot (1,10),#k_speed_mot (1,5),#k_vb (0.001,1),#k_ND (0.05,.99),#D_ratio (.001,60),#k_Mb (0.3,0.6),#beta (0.0001,0.5),#J (1,15),#k_ESC ],maxiter=400, tol=1e-12) print("end") # Final characteristics after optimization end = time.time() print("Operation time: %.5f s" %(end - start)) print("-----------------------------------------------") print("Final characteristics after optimization :") if Algo.value=='SLSQP': data=SizingCode(result, 'Prt')[0] data_opt=SizingCode(result, 'Prt')[1] else: data=SizingCode(result.x, 'Prt')[0] data_opt=SizingCode(result.x, 'Prt')[1] print("-----------------------------------------------") pd.options.display.float_format = '{:,.3f}'.format def view(x=''): #if x=='All': return display(df) if x=='Optimization' : return display(data_opt) return display(data[data['Type']==x]) items = sorted(data['Type'].unique().tolist())+['Optimization'] w = widgets.Select(options=items) return display(interactive(view, x=w)),print('\033[1m' + "\033[4mPress F2 to render the piece\033[0m"), display(IFrame('https://aitorochotorena.github.io/multirotor-all/notebooks/visu.html', width = 900, height = 600)) button.on_click(on_button_clicked) display(output) ###Output _____no_output_____
Statistics/Measure of dispersion.ipynb
###Markdown Measure of Dispersion or Variability1. Variability describes how far apart data points lie from each other and from the center of a distribution. Along with measures of central tendency, measures of variability give you descriptive statistics that summarize your data.2. Variability is also referred to as spread, scatter or dispersion3. Dispersion or spread is the degree of scatter or variation of the variable about a centeral value. 4. Degree to which numerical data tend to spread about an average value is called variation or dispersion of data. Objective of Dispersion1. To judge the reliablity of measure of centeral tendency- if Variability is more than mean is not representative of the series then median is best.2. To make a comparative study of variability of the two series.3. To identify the causes of variability with a view to control it. The Following measure of dispersion1. Range2. Interquartile range3. Semi-interquartile range or quartile deviation4. Average deviation or mean deviation5. Standard deviation or root mean square deviation from arithmatic mean6. Lorenz curve Why does variability matter?While the central tendency, or average, tells you where most of your points lie, variability summarizes how far apart they are. This is important because it tells you whether the points tend to be clustered around the center or more widely spread out.Low variability is ideal because it means that you can better predict information about the population based on sample data. High variability means that the values are less consistent, so it’s harder to make predictions.Data sets can have the same central tendency but different levels of variability or vice versa. If you know only the central tendency or the variability, you can’t say anything about the other aspect. Both of them together give you a complete picture of your data. Range The range tells you the spread of your data from the lowest to the highest value in the distribution. It’s the easiest measure of variability to calculate. To find the range, simply subtract the lowest value from the highest value in the data set. Range example You have 8 data points from Sample A. Data (minutes) 72 110 134 190 238 287 305 324 The highest value (H) is 324 and the lowest (L) is 72. R = H – L R = 324 – 72 = 252 The range of your data is 252 minutes. ###Code a= [72, 110, 134, 190, 238, 287, 305, 324] range = max(a) - min(a) print(range) ###Output 252 ###Markdown Advantage of Range1. Range is easily calcualted and readily understood. Disadvantage of Range1. It is affected by fluctuation of sampling.2. It is not based on all the observation of the series.3. It cannot be used in the case of open-end distribution. Uses of Range1. Quality control for ex. Nutbold example.2. Variation in money rate, share values, gold prices etc.3. Weather forecasting. Interquartile RangeThe interquartile range gives you the spread of the middle of your distribution. OR It is a measure of where the "Middle fifty" in a dataset.For any distribution that’s ordered from low to high, the interquartile range contains half of the values. While the first quartile (Q1) contains the first 25% of values, the fourth quartile (Q4) contains the last 25% of values.The interquartile range is the third quartile (Q3) minus the first quartile (Q1). This gives us the range of the middle half of a data set. IQR = Q3 - Q1 ![iqr.png](attachment:iqr.png) Note:- 1. IQR measures the centeral tendency and spread, respectively but are robust agaibst outliers and non normal data.2. The IQR tells how spread out the middle values are. It can also be used to tell when some of the other values are "too far" from the centeral value(outliers). ###Code from scipy import stats data = [32, 36, 46, 47, 56, 69, 75, 79, 79, 88, 89, 91, 92, 93, 96, 97, 101, 105, 112, 116] # Interquartile range (IQR) IQR = stats.iqr(data, interpolation = 'midpoint') print(IQR) ###Output 34.0 ###Markdown Semi-Interquartile RangeIt is a midpoint of IQR i.e it is half of the difference between third quartile and first quartile. It is also called quartile deviation. Semi IQR = (Q3 - Q1) /2 Note:- 1. It is a good measure of spread of skewed distribution because it tell that how much data is varied from centeral data(i.e mean and median) ###Code semi_iqr = IQR / 2 semi_iqr ###Output _____no_output_____ ###Markdown Standard DeviationThe standard deviation is the average amount of variability in your dataset. It tells you, on average, how far each score lies from the mean. The larger the standard deviation, the more variable the data set is. Uses1. It is commonly used to measure confidence in statistical calculations. For example, the margin of error in calculating marks of an exam is determined by calculating the expected standard deviation in the results if the same exam were to be conducted multiple times.2. It is very useful in the field of financial studies as well as it helps to determine the margin of profit and loss. The standard deviation is also important, where the standard deviation on the rate of return on an investment is a measure of the volatility of the investment. There are six steps for finding the standard deviation by hand:1. List each score and find their mean.2. Subtract the mean from each score to get the deviation from the mean.3. Square each of these deviations.4. Add up all of the squared deviations.5. Divide the sum of the squared deviations by n – 1 (for a sample) or N (for a population).6. Find the square root of the number you found. Standard deviation formula for populationsIf you have data from the entire population, use the population standard deviation formula:![population-standard-deviation-formula-2.png](attachment:population-standard-deviation-formula-2.png)where, σ = population standard deviation ∑ = sum of… X = each value μ = population mean N = number of values in the population Standard deviation formula for samplesIf you have data from a sample, use the sample standard deviation formula:![sample-standard-deviation-formula-2.png](attachment:sample-standard-deviation-formula-2.png)where, s = sample standard deviation ∑ = sum of… X = each value x̅ = sample mean n = number of values in the sample Why use n – 1 for sample standard deviation?Samples are used to make statistical inferences about the population that they came from.When you have population data, you can get an exact value for population standard deviation. Since you collect data from every population member, the standard deviation reflects the precise amount of variability in your distribution, the population.But when you use sample data, your sample standard deviation is always used as an estimate of the population standard deviation. Using n in this formula tends to give you a biased estimate that consistently underestimates variability.Reducing the sample n to n – 1 makes the standard deviation artificially large, giving you a conservative estimate of variability.While this is not an unbiased estimate, it is a less biased estimate of standard deviation: it is better to overestimate rather than underestimate variability in samples.The difference between biased and conservative estimates of standard deviation gets much smaller when you have a large sample size. ###Code import numpy speed = [86,87,88,86,87,85,86] x = numpy.std(speed) print(x) import statistics # creating a simple data - set sample = [86,87,88,86,87,85,86] # Prints standard deviation # xbar is set to default value of 1 print("Standard Deviation of sample is % s " % (statistics.stdev(sample))) ###Output Standard Deviation of sample is 0.9759000729485332 ###Markdown VarianceThe variance is the average of squared deviations from the mean. A deviation from the mean is how far a score lies from the mean.Variance is the square of the standard deviation. This means that the units of variance are much larger than those of a typical value of a data set.While it’s harder to interpret the variance number intuitively, it’s important to calculate variance for comparing different data sets in statistical tests like ANOVAs.Variance reflects the degree of spread in the data set. The more spread the data, the larger the variance is in relation to the mean. Variance formula for populations![population-variance-formula.png](attachment:population-variance-formula.png)where, σ2 = population variance Σ = sum of… Χ = each value μ = population mean Ν = number of values in the population Variance formula for samples![sample-variance-formula.png](attachment:sample-variance-formula.png)where, s2 = sample variance Σ = sum of… Χ = each value x̄ = sample mean n = number of values in the sample ###Code import statistics # Creating a sample of data sample = [2.74, 1.23, 2.63, 2.22, 3, 1.98] # Prints variance of the sample set # Function will automatically calculate # it's mean and set it as xbar print("Variance of sample set is % s" %(statistics.variance(sample))) import numpy as np # 1D array arr = [2.74, 1.23, 2.63, 2.22, 3, 1.98] print("arr : ", arr) print("var of arr : ", np.var(arr)) ###Output arr : [2.74, 1.23, 2.63, 2.22, 3, 1.98] var of arr : 0.34103333333333335
.ipynb_checkpoints/sessionOne_GettingStarted-checkpoint.ipynb
###Markdown Getting Started with Python A) The Read-Eval-Print-Loop or REPL* Python command line environment: * R-E-P-L * Read what we type in * Evaluate it * print it * loop back to the beginning 'Quick example using the command line'* Simple arithmetic* variables* underscore to refer to most recent var + in expression* only works in REPL* assignment has no return side effects* Example of print() {parenthesis in py 3 not in 2}* print is now a function call instead B) White Space Significance* Control flow structures such as: * For loops * While loops * Functions etc.* are all terminated with a colon which indicates that a body of the construct is to follow. ###Code for i in range(0, 11): x = i * 10 print(x) ###Output 0 10 20 30 40 50 60 70 80 90 100 ###Markdown Example:* For loop in the REPL* indentation * Four spaces per level of indentation - more on the rules later* each new block/construct introduces a new level of indentation* This practice ensures a uniform structure that enhances readability 3 Advantages of this approach1. Requires readable code - good practice in any language2. No clutter - {} not required3. Human and computer can't be out of sync Rules:1. Prefer four spaces over tabbing - however, ide's and code editors can be configured to translate a tab into four space chars2. Never mix spaces and tabs3. be consistent on consecutive lines4. only deviate to improve readability C) Python Culture and Zen * Development of py language is managed through a series of Documents - Python Enhancement Proposals (PEP)* PEP 8 - how to format code * 4 spaces for indentation* PEP 20 - ZEN of Python * A bit like a list of commandements that enforce best practice * import this - from the REPL * Nuggets of wisdom Readability counts* Clarity matters * so readability makes for valuable code * in turn, improves your coding quality and allows for others to easily interpret your work Python Standard Library* import keyword to import modules * import module_name (Quick example using the math module)--> Factorial and square root* import is a statement that doesn't return a value* access the contents of the module by using name of module + "dot" + name of attribute* dot is used to drill down into the module attributes available ###Code #importing the math module import math #using math module to calculate the square root math.sqrt(25) # Using the math module to calculate the factorial math.factorial(5) n = 5 k = 3 math.factorial(n) / (math.factorial(k) * math.factorial(n-k)) ###Output _____no_output_____ ###Markdown Using Help()* how do we determine what other functions are included in the math module? * to do this we can use the help function ###Code # using help to get more information on math module help(math) ###Output Help on module math: NAME math MODULE REFERENCE https://docs.python.org/3.6/library/math The following documentation is automatically generated from the Python source files. It may be incomplete, incorrect or include features that are considered implementation detail and may vary between Python implementations. When in doubt, consult the module reference at the location listed above. DESCRIPTION This module is always available. It provides access to the mathematical functions defined by the C standard. FUNCTIONS acos(...) acos(x) Return the arc cosine (measured in radians) of x. acosh(...) acosh(x) Return the inverse hyperbolic cosine of x. asin(...) asin(x) Return the arc sine (measured in radians) of x. asinh(...) asinh(x) Return the inverse hyperbolic sine of x. atan(...) atan(x) Return the arc tangent (measured in radians) of x. atan2(...) atan2(y, x) Return the arc tangent (measured in radians) of y/x. Unlike atan(y/x), the signs of both x and y are considered. atanh(...) atanh(x) Return the inverse hyperbolic tangent of x. ceil(...) ceil(x) Return the ceiling of x as an Integral. This is the smallest integer >= x. copysign(...) copysign(x, y) Return a float with the magnitude (absolute value) of x but the sign of y. On platforms that support signed zeros, copysign(1.0, -0.0) returns -1.0. cos(...) cos(x) Return the cosine of x (measured in radians). cosh(...) cosh(x) Return the hyperbolic cosine of x. degrees(...) degrees(x) Convert angle x from radians to degrees. erf(...) erf(x) Error function at x. erfc(...) erfc(x) Complementary error function at x. exp(...) exp(x) Return e raised to the power of x. expm1(...) expm1(x) Return exp(x)-1. This function avoids the loss of precision involved in the direct evaluation of exp(x)-1 for small x. fabs(...) fabs(x) Return the absolute value of the float x. factorial(...) factorial(x) -> Integral Find x!. Raise a ValueError if x is negative or non-integral. floor(...) floor(x) Return the floor of x as an Integral. This is the largest integer <= x. fmod(...) fmod(x, y) Return fmod(x, y), according to platform C. x % y may differ. frexp(...) frexp(x) Return the mantissa and exponent of x, as pair (m, e). m is a float and e is an int, such that x = m * 2.**e. If x is 0, m and e are both 0. Else 0.5 <= abs(m) < 1.0. fsum(...) fsum(iterable) Return an accurate floating point sum of values in the iterable. Assumes IEEE-754 floating point arithmetic. gamma(...) gamma(x) Gamma function at x. gcd(...) gcd(x, y) -> int greatest common divisor of x and y hypot(...) hypot(x, y) Return the Euclidean distance, sqrt(x*x + y*y). isclose(...) isclose(a, b, *, rel_tol=1e-09, abs_tol=0.0) -> bool Determine whether two floating point numbers are close in value. rel_tol maximum difference for being considered "close", relative to the magnitude of the input values abs_tol maximum difference for being considered "close", regardless of the magnitude of the input values Return True if a is close in value to b, and False otherwise. For the values to be considered close, the difference between them must be smaller than at least one of the tolerances. -inf, inf and NaN behave similarly to the IEEE 754 Standard. That is, NaN is not close to anything, even itself. inf and -inf are only close to themselves. isfinite(...) isfinite(x) -> bool Return True if x is neither an infinity nor a NaN, and False otherwise. isinf(...) isinf(x) -> bool Return True if x is a positive or negative infinity, and False otherwise. isnan(...) isnan(x) -> bool Return True if x is a NaN (not a number), and False otherwise. ldexp(...) ldexp(x, i) Return x * (2**i). lgamma(...) lgamma(x) Natural logarithm of absolute value of Gamma function at x. log(...) log(x[, base]) Return the logarithm of x to the given base. If the base not specified, returns the natural logarithm (base e) of x. log10(...) log10(x) Return the base 10 logarithm of x. log1p(...) log1p(x) Return the natural logarithm of 1+x (base e). The result is computed in a way which is accurate for x near zero. log2(...) log2(x) Return the base 2 logarithm of x. modf(...) modf(x) Return the fractional and integer parts of x. Both results carry the sign of x and are floats. pow(...) pow(x, y) Return x**y (x to the power of y). radians(...) radians(x) Convert angle x from degrees to radians. sin(...) sin(x) Return the sine of x (measured in radians). sinh(...) sinh(x) Return the hyperbolic sine of x. sqrt(...) sqrt(x) Return the square root of x. tan(...) tan(x) Return the tangent of x (measured in radians). tanh(...) tanh(x) Return the hyperbolic tangent of x. trunc(...) trunc(x:Real) -> Integral Truncates x to the nearest Integral toward 0. Uses the __trunc__ magic method. DATA e = 2.718281828459045 inf = inf nan = nan pi = 3.141592653589793 tau = 6.283185307179586 FILE /home/dbutler/anaconda3/lib/python3.6/lib-dynload/math.cpython-36m-x86_64-linux-gnu.so ###Markdown Neater usage of module functions* show how using module_name.attribute can become verbose using:Another way to import a specific function from a module into the current namespace by using: ###Code from math import factorial n = 5 k = 3 factorial(n) / (factorial(k) * factorial(n-k)) ###Output _____no_output_____ ###Markdown Third form of the import statement:* allows us to rename the imported function ###Code from math import factorial as fac n = 5 k = 3 fac(n) / (fac(k) * fac(n-k)) ###Output _____no_output_____
complete_solutions/2018-09-13_adeck_error_calculations.ipynb
###Markdown Optimizing Using Pandas DataFrame Solution for Stack Overflow question 2018-09-13https://stackoverflow.com/questions/52305104/optimizing-using-pandas-data-frame/5231058452310584The original code required about 6 mintes to process 100000 rows on my computer.The new code required 9.5 seconds. ###Code import pandas as pd import numpy as np import datetime from haversine import haversine def main_function(df, row): """ The main difference here is that everything is vectorized Returns: DataFrame """ df_new = pd.DataFrame() df_storage = pd.DataFrame() pos_datetime = df.POSDATETIME.isin([row['POSDATETIME']]) # creates a Boolean map array_len = len(pos_datetime) new_index = pos_datetime.index df_new['StormID'] = df.loc[pos_datetime, 'STORMID'] df_new['ModelBaseTime'] = df.loc[pos_datetime, 'MODELDATETIME'] df_new['Model'] = df.loc[pos_datetime, 'MODEL'] df_new['Tau'] = df.loc[pos_datetime, 'TAU'] # Distance df_new['LatCARQ'] = pd.DataFrame(np.full((array_len, 1), row['LAT']), index=new_index).loc[pos_datetime, 0] df_new['LonCARQ'] = pd.DataFrame(np.full((array_len, 1), row['LON']), index=new_index).loc[pos_datetime, 0] df_new['LatModel'] = df.loc[pos_datetime, 'LAT'] df_new['LonModel'] = df.loc[pos_datetime, 'LON'] def calc_dist_error(row): return round(haversine((row['LatCARQ'], row['LonCARQ']), (row['LatModel'], row['LonModel']), miles=True)) if row['LatModel'] != 0.0 else None df_new['DistError'] = df_new.apply(calc_dist_error, axis=1) # Wind df_new['WindCARQ'] = pd.DataFrame(np.full((array_len, 1), row['WIND']), index=new_index).loc[pos_datetime, 0] df_new['WindModel'] = df.loc[pos_datetime, 'WIND'] df_storage['row_WIND'] = pd.DataFrame(np.full((array_len, 1), row['WIND']), index=new_index).loc[pos_datetime, 0] df_storage['df_WIND'] = df.loc[pos_datetime, 'WIND'] def wind_error_calc(row): return (row['row_WIND'] - row['df_WIND']) if row['df_WIND'] != 0 else None df_new['WindError'] = df_storage.apply(wind_error_calc, axis=1) # Air Pressure df_new['PresCARQ'] = pd.DataFrame(np.full((array_len, 1), row['PRES']), index=new_index).loc[pos_datetime, 0] df_new['PresModel'] = df.loc[pos_datetime, 'PRES'] df_storage['row_PRES'] = pd.DataFrame(np.full((array_len, 1), row['PRES']), index=new_index).loc[pos_datetime, 0] df_storage['df_PRES'] = df.loc[pos_datetime, 'PRES'] def pres_error_calc(row): return abs(row['row_PRES'] - row['df_PRES']) if row['df_PRES'] != 0 else None df_new['PresError'] = df_storage.apply(pres_error_calc, axis=1) del(df_storage) return df_new def calculate_adeck_errors(in_file): """ Retruns: DataFrame """ print(f'Starting Data Calculations: {datetime.datetime.now().strftime("%I:%M:%S%p on %B %d, %Y")}') pd.set_option('max_columns', 20) pd.set_option('max_rows', 300) # read in the raw csv adeck_df = pd.read_csv(in_file) adeck_df['MODELDATETIME'] = pd.to_datetime(adeck_df['MODELDATETIME'], format='%Y-%m-%d %H:%M') adeck_df['POSDATETIME'] = pd.to_datetime(adeck_df['POSDATETIME'], format='%Y-%m-%d %H:%M') #extract only the carq items and remove duplicates carq_data = adeck_df[(adeck_df.MODEL == 'CARQ') & (adeck_df.TAU == 0)].drop_duplicates(keep='last') print('Len carq_data: ', len(carq_data)) #remove carq items from original final_df = adeck_df[adeck_df.MODEL != 'CARQ'] print('Len final_df: ', len(final_df)) df_out_new = pd.DataFrame() for index, row in carq_data.iterrows(): test_df = main_function(final_df, row) # function call df_out_new = df_out_new.append(test_df, sort=False) df_out_new = df_out_new.reset_index(drop=True) df_out_new = df_out_new.where((pd.notnull(df_out_new)), None) print(f'Finishing Data Calculations: {datetime.datetime.now().strftime("%I:%M:%S%p on %B %d, %Y")}') return df_out_new in_file = 'data/aal062018.csv' df = calculate_adeck_errors(in_file) len(df) df.head(20) ###Output _____no_output_____ ###Markdown Original Code ###Code def calculate_adeck_errors(in_file): print(f'Starting Data Calculations: {datetime.datetime.now().strftime("%I:%M%p on %B %d, %Y")}') pd.set_option('display.max_columns', 12) # read in the raw csv adeck_df = pd.read_csv(in_file) #print(adeck_df) #extract only the carq items and remove duplicates carq_data = adeck_df[(adeck_df.MODEL == 'CARQ') & (adeck_df.TAU == 0)].drop_duplicates(keep='last') #print(carq_data) #remove carq items from original final_df = adeck_df[adeck_df.MODEL != 'CARQ'] #print(final_df) row_list = [] for index, row in carq_data.iterrows(): position_time = row['POSDATETIME'] for index, arow in final_df.iterrows(): if arow['POSDATETIME'] == position_time: # match, so do calculations storm_id = arow['STORMID'] model_base_time = arow['MODELDATETIME'] the_hour = arow['TAU'] the_model = arow['MODEL'] point1 = float(row['LAT']), float(row['LON']) point2 = float(arow['LAT']), float(arow['LON']) if arow['LAT'] == 0.0: dist_error = None else: dist_error = int(round(haversine(point1, point2, miles=True))) if arow['WIND'] != 0: wind_error = int(abs(int(row['WIND']) - int(arow['WIND']))) else: wind_error = None if arow['PRES'] != 0: pressure_error = int(abs(int(row['PRES']) - int(arow['PRES']))) else: pressure_error = None lat_carq = row['LAT'] lon_carq = row['LON'] lat_model = arow['LAT'] lon_model = arow['LON'] wind_carq = row['WIND'] wind_model = arow['WIND'] pres_carq = row['PRES'] pres_model = arow['PRES'] row_list.append([storm_id, model_base_time, the_model, the_hour, lat_carq, lon_carq, lat_model, lon_model, dist_error, wind_carq, wind_model, wind_error, pres_carq, pres_model, pressure_error]) result_df = pd.DataFrame(row_list) result_df = result_df.where((pd.notnull(result_df)), None) result_cols = ['StormID', 'ModelBasetime', 'Model' , 'Tau', 'LatCARQ', 'LonCARQ', 'LatModel', 'LonModel', 'DistError', 'WindCARQ', 'WindModel','WindError', 'PresCARQ', 'PresModel','PresError'] result_df.columns = result_cols return result_df in_file = 'data/aal062018.csv' df = calculate_adeck_errors(infile) ###Output _____no_output_____
data-prep/ML_split.ipynb
###Markdown Id Based on Yes Chips ###Code def get_chip_count(chip_file_loc): chip_count = {} with open(chip_file_loc) as r: chips = json.load(r) for chip in chips: poly_id = int(chip.split("/")[5]) if poly_id in chip_count.keys(): chip_count[poly_id] += 1 else: chip_count[poly_id] = 1 return chip_count chip_file_loc = "yes_chips_s3.json" chip_count = get_chip_count(chip_file_loc) sum(chip_count.values()) def train_test_split(regions_loc,polygons_loc,chip_count,skip_poly_ids=[]): with open(regions_loc) as r, open(polygons_loc) as p: pol_groups = {} regions = json.load(r)["features"] polygons = json.load(p)["features"] r_col = GeometryCollection([shape(feature["geometry"]).buffer(0) for feature in regions]) p_col = GeometryCollection([shape(feature["geometry"]).buffer(0) for feature in polygons]) poly_list = skip_poly_ids.copy() for i,poly_1 in enumerate(r_col,1): for j, poly_2 in enumerate(p_col,1): if j not in poly_list: if poly_1.intersects(poly_2): poly_list.append(j) if i in list(pol_groups.keys()): pol_groups[i].append({j:chip_count[j]}) else: pol_groups[i] = [{j:chip_count[j]}] return pol_groups regions_loc = "/Volumes/Lacie/zhenyadata/Project_Canopy_Data/PC_Data/Geometry/custom_congo_basin_regions/congo_basin_custom_regions.geojson" polygons_loc = "/Volumes/Lacie/zhenyadata/Project_Canopy_Data/PC_Data/Geometry/labelled_boxes/v3/labels.geojson" pol_groups = train_test_split(regions_loc,polygons_loc,chip_count,skip_poly_ids=[95,96,97,98]) sum_chips = 0 for k1 in pol_groups.keys(): for k2 in pol_groups[k1]: for v1 in k2.values(): sum_chips += v1 sum_chips train_test = {"train":[],"test":[]} counter = 0 for r_id in pol_groups.keys(): vals_1 = [] for p_list in pol_groups[r_id]: vals_1.append(list(p_list.values())[0]) sum_vals = sum(vals_1) r_thresh = sum_vals * .84 pol_groups_sorted = sorted(pol_groups[r_id],key=lambda i:list(i.values())[0],reverse=True) temp_sum = 0 for p_list in pol_groups_sorted: counter += 1 temp_sum += list(p_list.values())[0] if temp_sum < r_thresh: train_test["train"].append(list(p_list.keys())[0]) else: train_test["test"].append(list(p_list.keys())[0]) train_test pols = [] for k,v in pol_groups.items(): for p in v: pols.append(p) tot_train = 0 tot_test = 0 for pol_id in train_test["train"]: for pol_dict in pols: if pol_id in pol_dict.keys(): tot_train += pol_dict[pol_id] for pol_id in train_test["test"]: for pol_dict in pols: if pol_id in pol_dict.keys(): tot_test += pol_dict[pol_id] tot_train tot_test tot_test / (tot_train + tot_test) import json with open('train_test_polygons.json', 'w') as fp: json.dump(train_test, fp) ###Output _____no_output_____
Notebooks/Performance-Evaluation/FT_AutoEncoder.ipynb
###Markdown Import libraries ###Code from google.colab import drive from pathlib import Path from matplotlib import pyplot as plt import pandas as pd import numpy as np import time import os import csv import concurrent.futures ###Output _____no_output_____ ###Markdown Utility functions Create annot and load descriptors ###Code def create_annot(path): image_list = list(Path(path).glob('*/*.jpg')) # the identity name is in the path (the name of the parent directory) names_list = [i.parent.name for i in image_list] # get the identity of each image # keep info in a pandas DataFrame annot = pd.DataFrame({'identity': names_list, 'image_path': image_list}) return annot def concatenate_annots(list_of_paths): concat_annot = pd.DataFrame() with concurrent.futures.ThreadPoolExecutor() as executor: annots = [executor.submit(create_annot, path) for path in list_of_paths] for annot in annots: new_annot = annot.result() concat_annot = concat_annot.append(new_annot, ignore_index = True) return concat_annot def load_descriptors(path): with open(path, 'rb') as file: return np.load(file) def concatenate_descriptors(list_of_paths): concat_descriptors = None with concurrent.futures.ThreadPoolExecutor() as executor: descriptors = [executor.submit(load_descriptors, path) for path in list_of_paths] for descriptor in descriptors: new_descriptor = descriptor.result() if concat_descriptors is None: concat_descriptors = new_descriptor else: concat_descriptors = np.concatenate([concat_descriptors, new_descriptor]) return concat_descriptors ###Output _____no_output_____ ###Markdown Create pivots ###Code def generate_pivots(descriptors, n, strategy="rnd"): if strategy == "kMED": kmedoids = sklearn_extra.cluster.KMedoids(n_clusters=n).fit(descriptors) return kmedoids.cluster_centers_ if strategy != "rnd": print(strategy, "was not implemented. Random pivots were returned") pivots_id = np.random.choice(np.arange(len(descriptors)), size=n) return descriptors[pivots_id] def generate_list_of_pivots(descriptors, t, n, strategy="rnd"): list_of_pivots = [] with concurrent.futures.ThreadPoolExecutor() as executor: pivots = [executor.submit(generate_pivots, descriptors, n, strategy) for i in range(t)] for pivot in concurrent.futures.as_completed(pivots): new_pivot = pivot.result() list_of_pivots.append(new_pivot) return list_of_pivots ###Output _____no_output_____ ###Markdown Save test results ###Code def save_results(dir, file_name, results): with open(os.path.join(dir, file_name +".csv"), 'w') as f: writer = csv.writer(f) # write the header writer.writerow(["CLASS", "AP", "QUERY TIME"]) # write the data for r in results: writer.writerow(r) ###Output _____no_output_____ ###Markdown Test Performance ###Code drive.mount('/content/drive', force_remount=True) ###Output Mounted at /content/drive ###Markdown Create annot and load descriptors for the database ###Code db_annot = concatenate_annots(['/content/drive/MyDrive/CV_Birds/train', '/content/drive/MyDrive/CV_Birds/mirflickr25k']) db_annot db_descriptors = concatenate_descriptors(['/content/drive/MyDrive/CV_Birds/features/training/AutoEncoder/512to128withPace64.npy','/content/drive/MyDrive/CV_Birds/features/distractor/AutoEncoder/512to128withPace64.npy']) db_descriptors.shape ###Output _____no_output_____ ###Markdown Create annot and load descriptors for the test set ###Code query_annot = create_annot('/content/drive/MyDrive/CV_Birds/test') query_annot query_descriptors = load_descriptors('/content/drive/MyDrive/CV_Birds/features/test/AutoEncoder/512to128withPace64.npy') query_descriptors.shape ###Output _____no_output_____ ###Markdown To run our tests we select only the first image of each species within the test set. Please note that within the test set we have 5 images per species. ###Code queries_indexes = [x for x in range(325*5) if x%5 == 0] ###Output _____no_output_____ ###Markdown Create PP-Index ###Code def get_descriptor_from_id(id_object): return db_descriptors[id_object] %cd "/content/drive/MyDrive/CV_Birds/Notebooks/PP-Index" %run PPIndex.ipynb # generate pivots pivots = generate_pivots(db_descriptors, 40, "rnd") # cosine tree cosine_tree = PrefixTree(pivots, length=3, distance_metric='cosine', base_directory="/content/drive/MyDrive/CV_Birds/indexes/fine_tuning/tree/cosine", tree_file='tree_structure') if cosine_tree.is_empty(): cosine_tree.insert_objects_into_tree(range(len(db_descriptors))) cosine_tree.save() # euclidean tree euclidean_tree = PrefixTree(pivots, length=3, distance_metric='euclidean', base_directory="/content/drive/MyDrive/CV_Birds/indexes/fine_tuning/tree/euclidean", tree_file='tree_structure') if euclidean_tree.is_empty(): euclidean_tree.insert_objects_into_tree(range(len(db_descriptors))) euclidean_tree.save() ###Output Tree retrieved from disk ###Markdown Compute mAP ###Code birds_db = db_annot.loc[db_annot['identity'] != 'mirflickr'] counts = birds_db.groupby('identity').count() print("Minimum number of images per species:", int(counts.min())) print("Maximum number of images per species:", int(counts.max())) print("Average number of images:", float(counts.sum()/325)) ###Output Minimum number of images per species: 116 Maximum number of images per species: 249 Average number of images: 145.63692307692307 ###Markdown Since at most we have 249 images per species, we use $n=250$. ###Code n = 250 ###Output _____no_output_____ ###Markdown The formula for Average Precision is the following:> $AP@n=\frac{1}{GTP}\sum_{k=1}^{n}P@k×rel@k$where $GTP$ refers to the total number of ground truth positives, $n$ refers to the total number of images we are interested in, $P@k$ refers to the precision@k and $rel@k$ is a relevance function. The relevance function is an indicator function which equals 1 if the document at rank $k$ is relevant and equals to 0 otherwise. ###Code def compute_ap(query_index, retrieved_ids): query_identity = query_annot['identity'][query_index] print(query_index//5, query_identity) GTP = len(db_annot.loc[db_annot['identity'] == query_identity]) relevant = 0 precision_summation = 0 for k, id in enumerate(retrieved_ids): if db_annot['identity'][id] == query_identity: # relevant result relevant = relevant + 1 precision_at_k = relevant/(k+1) precision_summation = precision_summation + precision_at_k return (query_identity, precision_summation/GTP) ###Output _____no_output_____ ###Markdown For each query, $Q$, we can calculate a corresponding $AP$. Then, the $mAP$ is simply the mean of all the queries that were made.> $mAP = \frac{1}{N}\sum_{i=1}^{N}AP_i$In our case, $N=325$ (one query per species) Simple tree Cosine ###Code def cosine_tree_queries(query_index, n): start_time = time.time() ids, distances = cosine_tree.find_nearest_neighbors(query_descriptors[query_index], n) end_time = time.time() ids = ids.tolist() return compute_ap(query_index, ids) + (end_time - start_time,) aps = [] for query_index in queries_indexes: aps.append(cosine_tree_queries(query_index, n)) aps ap_at_n = np.array([ap[1] for ap in aps]) query_time = np.array(([ap[2] for ap in aps])) mAP_at_n = np.mean(ap_at_n, axis=0) avg_query_time = np.mean(query_time, axis=0) print("mAP:", mAP_at_n) print("avg. query time: ", avg_query_time) save_results('/content/drive/MyDrive/CV_Birds/performance/fine_tuning/index/AutoEncoder', 'AE_FT_tree_cosine_results', aps) ###Output _____no_output_____ ###Markdown Euclidean ###Code def euclidean_tree_queries(query_index, n): start_time = time.time() ids, distances = euclidean_tree.find_nearest_neighbors(query_descriptors[query_index], n) end_time = time.time() ids = ids.tolist() return compute_ap(query_index, ids) + (end_time - start_time,) aps = [] for query_index in queries_indexes: aps.append(euclidean_tree_queries(query_index, n)) aps ap_at_n = np.array([ap[1] for ap in aps]) query_time = np.array(([ap[2] for ap in aps])) mAP_at_n = np.mean(ap_at_n, axis=0) avg_query_time = np.mean(query_time, axis=0) print("mAP:", mAP_at_n) print("avg. query time: ", avg_query_time) save_results('/content/drive/MyDrive/CV_Birds/performance/fine_tuning/index/AutoEncoder', 'AE_FT_tree_euclidean_results', aps) ###Output _____no_output_____ ###Markdown Tree with query perturbation Cosine ###Code def cosine_pert_tree_queries(query_index, n): start_time = time.time() ids, distances = cosine_tree.find_nearest_neighbors_with_query_perturbation(query_descriptors[query_index], n, perturbations=3) end_time = time.time() ids = ids.tolist() return compute_ap(query_index, ids) + (end_time - start_time,) aps = [] for query_index in queries_indexes: aps.append(cosine_pert_tree_queries(query_index, n)) aps ap_at_n = np.array([ap[1] for ap in aps]) query_time = np.array(([ap[2] for ap in aps])) mAP_at_n = np.mean(ap_at_n, axis=0) avg_query_time = np.mean(query_time, axis=0) print("mAP:", mAP_at_n) print("avg. query time: ", avg_query_time) save_results('/content/drive/MyDrive/CV_Birds/performance/fine_tuning/index/AutoEncoder', 'AE_FT_pert_tree_cosine_results', aps) ###Output _____no_output_____ ###Markdown Euclidean ###Code def euclidean_pert_tree_queries(query_index, n): start_time = time.time() ids, distances = euclidean_tree.find_nearest_neighbors_with_query_perturbation(query_descriptors[query_index], n, perturbations=3) end_time = time.time() ids = ids.tolist() return compute_ap(query_index, ids) + (end_time - start_time,) aps = [] for query_index in queries_indexes: aps.append(euclidean_pert_tree_queries(query_index, n)) aps ap_at_n = np.array([ap[1] for ap in aps]) query_time = np.array(([ap[2] for ap in aps])) mAP_at_n = np.mean(ap_at_n, axis=0) avg_query_time = np.mean(query_time, axis=0) print("mAP:", mAP_at_n) print("avg. query time: ", avg_query_time) save_results('/content/drive/MyDrive/CV_Birds/performance/fine_tuning/index/AutoEncoder', 'AE_FT_pert_tree_euclidean_results', aps) ###Output _____no_output_____
08_lab/autoencoder_seminar.ipynb
###Markdown ###Code import scipy as sp import scipy.misc import matplotlib.pyplot as plt import numpy as np import torch from torch.autograd import Variable import torch.nn as nn import torch.nn.functional as F %matplotlib inline import tensorflow as tf (X_train, y_train), (X_test, y_test) = tf.keras.datasets.mnist.load_data() X_train = (X_train / 255).astype('float32') X_test = (X_test / 255).astype('float32') device = torch.device("cuda:0") def preprocess_data(X, y, classification): X_preprocessed = torch.tensor(X, dtype=torch.float).unsqueeze(1) if classification: y_preprocessed = torch.tensor(y, dtype=torch.long) else: y_preprocessed = torch.tensor(y).unsqueeze(1) return X_preprocessed.to(device), y_preprocessed.to(device) def get_batches(X, y, batch_size, shuffle=False, classification=False): if shuffle: shuffle_ids = np.random.permutation(len(X)) X = X[shuffle_ids].copy() y = y[shuffle_ids].copy() for i_picture in range(0, len(X), batch_size): # Get batch and preprocess it: batch_X = X[i_picture:i_picture + batch_size] batch_y = y[i_picture:i_picture + batch_size] # 'return' the batch (see the link above to # better understand what 'yield' does) yield preprocess_data(batch_X, batch_y, classification) from IPython.display import clear_output class Logger: def __init__(self): self.train_loss_batch = [] self.train_loss_epoch = [] self.test_loss_batch = [] self.test_loss_epoch = [] self.train_batches_per_epoch = 0 self.test_batches_per_epoch = 0 self.epoch_counter = 0 def fill_train(self, loss): self.train_loss_batch.append(loss) self.train_batches_per_epoch += 1 def fill_test(self, loss): self.test_loss_batch.append(loss) self.test_batches_per_epoch += 1 def finish_epoch(self): self.train_loss_epoch.append(np.mean( self.train_loss_batch[-self.train_batches_per_epoch:] )) self.test_loss_epoch.append(np.mean( self.test_loss_batch[-self.test_batches_per_epoch:] )) self.train_batches_per_epoch = 0 self.test_batches_per_epoch = 0 clear_output() print("epoch #{} \t train_loss: {:.8} \t test_loss: {:.8}".format( self.epoch_counter, self.train_loss_epoch[-1], self.test_loss_epoch [-1] )) self.epoch_counter += 1 plt.figure(figsize=(11, 5)) plt.subplot(1, 2, 1) plt.plot(self.train_loss_batch, label='train loss') plt.xlabel('# batch iteration') plt.ylabel('loss') plt.legend() plt.subplot(1, 2, 2) plt.plot(self.train_loss_epoch, label='average train loss') plt.plot(self.test_loss_epoch , label='average test loss' ) plt.legend() plt.xlabel('# epoch') plt.ylabel('loss') plt.show(); class Reshape(torch.nn.Module): def __init__(self, *shape): super(Reshape, self).__init__() self.shape = shape def forward(self, x): return x.reshape(x.shape[0], *self.shape) def create_encoder(): return torch.nn.Sequential( nn.Conv2d(1, 16, 3, padding=1), nn.LeakyReLU(), nn.MaxPool2d(2), # 14x14 nn.Conv2d(16, 32, 3, padding=1), nn.LeakyReLU(), nn.MaxPool2d(2), # 7x7 nn.Conv2d(32, 64, 3), # 5x5 nn.LeakyReLU(), nn.Conv2d(64, 128, 3), # 3x3 nn.LeakyReLU(), nn.Conv2d(128,256, 3), # 1x1 nn.LeakyReLU(), nn.Conv2d(256, 32, 1), Reshape(32) ) def create_decoder(): return nn.Sequential( Reshape(32, 1, 1), nn.ConvTranspose2d(32, 256, 3, dilation=2), # 2x2 nn.LeakyReLU(), nn.ConvTranspose2d(256, 128, 3, dilation=2), # 4x4 nn.LeakyReLU(), nn.ConvTranspose2d(128, 64, 3, dilation=2), # 8x8 nn.LeakyReLU(), nn.ConvTranspose2d(64, 32, 3, dilation=2), # 16x16 nn.LeakyReLU(), nn.ConvTranspose2d(32, 16,3, dilation=2), # 28x28 nn.LeakyReLU(), nn.ConvTranspose2d(16, 3,3, dilation=1), nn.LeakyReLU(), nn.ConvTranspose2d(3, 1,3, dilation=2), nn.LeakyReLU(), nn.ConvTranspose2d(1, 1,2, dilation=1), nn.Sigmoid() ) encoder = create_encoder() decoder = create_decoder() autoencoder = torch.nn.Sequential( encoder, decoder ).to(device) optimiser = torch.optim.Adam(autoencoder.parameters(), lr=0.003) loss_function = torch.nn.functional.mse_loss num_epochs = 20 batch_size = 256 def fit(model, loss_function, optimizer, _X_train, _y_train, _X_test, _y_test, num_epochs, batch_size, classification=False): logger = Logger() for i_epoch in range(num_epochs): model.train() # setting the model to training mode for batch_X, batch_y in get_batches(_X_train, _y_train, batch_size=batch_size, shuffle=True, classification=classification): predictions = model(batch_X) # compute the predictions loss = loss_function(predictions, batch_y) # compute the loss logger.fill_train(loss.item()) model.zero_grad() # zero the gradients loss.backward() # compute new gradients optimizer.step() # do an optimization step # Now, let's evaluate on the test part: model.eval() # setting the model to evaluatioin mode for batch_X, batch_y in get_batches(_X_test, _y_test, batch_size=batch_size, classification=classification): loss = loss_function(model(batch_X), batch_y) logger.fill_test(loss.item()) logger.finish_epoch() fit(autoencoder, loss_function, optimiser, X_train, X_train, X_test, X_test, num_epochs, batch_size, classification=False) X_test[:10].reshape(28, 280) plt.figure(figsize=(10, 10)) plt.imshow(np.transpose(X_test[:10], (1,0,2)).reshape(28, 280), cmap='Greys') plt.axis('off'); plt.figure(figsize=(10, 10)) encoder_reconstruction = autoencoder(torch.tensor(X_test[:10]).unsqueeze(1).to(device)).cpu().detach()[:, 0, ...] plt.imshow(np.transpose(encoder_reconstruction, (1,0,2)).reshape(28, 280), cmap='Greys') plt.axis('off'); ###Output _____no_output_____ ###Markdown Now, lets make a classifier ###Code for param in encoder.parameters(): param.requires_grad_(False) classifier = nn.Sequential( encoder, #nn.ReLU(), nn.Linear(32, 10), #nn.ReLU(), #nn.Linear(10, 10) ).to(device) optimiser = torch.optim.Adam(classifier.parameters(), lr=0.005) loss_function = torch.nn.functional.cross_entropy num_epochs = 70 batch_size = 256 fit(classifier, loss_function, optimiser, X_train[:300], y_train[:300], X_test, y_test, num_epochs, batch_size, classification=True) ## Test accuracy def get_accuracy(model, X, y): return (torch.argmax(model(torch.tensor(X).unsqueeze(1).to(device)), dim=1).cpu().detach().numpy() == y).mean() print(get_accuracy(classifier, X_test, y_test)) print(get_accuracy(classifier, X_train[:300], y_train[:300])) encoder = create_encoder() for param in encoder.parameters(): param.requires_grad_(True) classifier = nn.Sequential( encoder, #nn.ReLU(), nn.Linear(32, 10), #nn.ReLU(), #nn.Linear(10, 10) ).to(device) optimiser = torch.optim.Adam(classifier.parameters(), lr=0.005) loss_function = torch.nn.functional.cross_entropy num_epochs = 70 batch_size = 256 fit(classifier, loss_function, optimiser, X_train[:300], y_train[:300], X_test, y_test, num_epochs, batch_size, classification=True) ###Output _____no_output_____ ###Markdown What do we observe on the training curve? ###Code print(get_accuracy(classifier, X_test, y_test)) print(get_accuracy(classifier, X_train[:300], y_train[:300])) ###Output _____no_output_____ ###Markdown Semi-supervised ###Code X_train_labeled, X_train_unlabeled = X_train[:300], X_train[300:] y_train_labeled = y_train[:300] def gen_untrained(batch_size): ids = np.arange(len(X_train_unlabeled)) np.random.shuffle(ids) for i in range(0, len(X_train_unlabeled), batch_size): yield X_train_unlabeled[ids][i:i+batch_size] unlabeled_generator = gen_untrained(256) ###Output _____no_output_____ ###Markdown Remember, what we want to do here is to create a class, that do two things: it acts both like a Autoencoder and classifier, so it should give you two outputs - a reconstructed image and classification probability vector ###Code class UnsupervisedAE(nn.Module): def __init__(self): super().__init__() self.encoder = <YOUR CODE> self.decoder = <YOUR CODE> self.classifier = <YOUR CODE> def forward(self, X): x_compresed = <YOUR CODE> x_reco = <YOUR CODE> x_class = <YOUR CODE> return x_reco, x_class ###Output _____no_output_____ ###Markdown Define our losses ###Code unsup_ae = UnsupervisedAE().to(device) optimiser = <YOUR CODE> mse_loss = <YOUR CODE> ce_loss = <YOUR CODE> N_EPOCHS = 100 BATCH_SIZE = 16 LAMBDA = 0.3 history_ae = [] history_cl = [] history_tot = [] for i_epoch in range(N_EPOCHS): print("Working on ep #", i_epoch) ids = np.arange(len(X_train_labeled)) np.random.shuffle(ids) for i_image in range(0, len(X_train_labeled), BATCH_SIZE): X_batch = torch.tensor(X_train_labeled[ids][i_image:i_image + BATCH_SIZE]).unsqueeze(1).to(device) y_batch = torch.tensor(y_train_labeled[ids][i_image:i_image + BATCH_SIZE], dtype=torch.long).to(device) try: X_batch_unlabled = torch.tensor(unlabeled_generator.__next__()).unsqueeze(1).to(device) except StopIteration: unlabeled_generator = gen_untrained(256) X_batch_unlabled = torch.tensor(unlabeled_generator.__next__()).unsqueeze(1).to(device) epoch_ae_loss = 0 epoch_cl_loss = 0 epoch_total_loss = 0 # So, here we need to do two things: predict reconstructed image and our MSE loss on the UNLABELED dataset reco_image, _ = <YOUR CODE> ae_loss = <YOUR CODE> # here, we want to predict the classification loss of the labeled data _, class_preds = <YOUR CODE> cass_loss = <YOUR CODE> # And here we just want to make the sum of the losses with some regularisation coefficient loss = <YOUR CODE> loss.backward() optimiser.step() unsup_ae.zero_grad() epoch_ae_loss += ae_loss.item() epoch_cl_loss += cass_loss.item() epoch_total_loss += loss.item() history_ae.append(epoch_ae_loss) history_cl.append(epoch_cl_loss) history_tot.append(epoch_total_loss) if i_epoch % 1 == 0: clear_output(wait=True) plt.figure(figsize=(12, 8)) plt.plot(history_ae, label='ae loss') plt.plot(history_cl, label='cl loss') plt.plot(history_tot, label='total') plt.legend() plt.xlabel('epoch') plt.ylabel('loss') plt.show(); history_tot[-1] ## Test accuracy def get_accuracy(model, X, y): return (torch.argmax(model(torch.tensor(X).unsqueeze(1).to(device))[1], dim=1).cpu().detach().numpy() == y).mean() print(get_accuracy(unsup_ae, X_test, y_test)) #print(get_accuracy(classifier, X_train[:300], y_train[:300])) ###Output _____no_output_____