path
stringlengths
7
265
concatenated_notebook
stringlengths
46
17M
code/week2_dataprep/Lab4_OutliersWithLog_Boston.ipynb
###Markdown Ways to Detect and Remove the OutliersWhile working on a Data Science project, what is it, that you look for? What is the most important part of the EDA phase? There are certain things which, if are not done in the EDA phase, can affect further statistical/Machine Learning modelling. One of them is finding “Outliers”. In this post we will try to understand what is an outlier? Why is it important to identify the outliers? What are the methods to outliers? Don’t worry, we won’t just go through the theory part but we will do some coding and plotting of the data too.Credit: https://towardsdatascience.com/ways-to-detect-and-remove-the-outliers-404d16608dba ###Code #Import the libraries import numpy as np import pandas as pd from sklearn.datasets import load_boston,load_iris #Load the data boston = load_boston() #Find features and target x = boston.data y = boston.target #Find the dic keys print(boston.keys()) #find features name columns = boston.feature_names columns #Description of dataset print(boston.DESCR) #Create dataframe boston_df = pd.DataFrame(boston.data) boston_df.columns = columns boston_df_o = boston_df boston_df.shape #Oulier detection - Univarite - Boxplot import matplotlib.pyplot as plt import seaborn as sns %matplotlib inline sns.boxplot(x=boston_df['DIS']) #Check the correlation between features before multivariate outlier analysis import matplotlib.pyplot as plt import seaborn as sns %matplotlib inline plt.figure(figsize= (10,10), dpi=100) sns.heatmap(boston_df.corr()) #Multivariate outlier analysis fig, ax = plt.subplots(figsize=(16,8)) ax.scatter(boston_df['INDUS'], boston_df['TAX']) ax.set_xlabel('Proportion of non-retail business acres per town') ax.set_ylabel('Full-value property-tax rate per $10,000') plt.show() from scipy import stats import numpy as np z = np.abs(stats.zscore(boston_df)) print(z) z.shape threshold = 3 print(np.where(z > 3)) #print(boston_df[np.where(z > 3)]) print(z[55][1]) ###Output _____no_output_____ ###Markdown Removing Outliers ###Code boston_df_o = boston_df_o[(z < 3).all(axis=1)] boston_df.shape boston_df_o.shape boston_df_o1 = boston_df Q1 = boston_df_o1.quantile(0.25) Q3 = boston_df_o1.quantile(0.75) IQR = Q3 - Q1 print(IQR) boston_df_out = boston_df_o1[~((boston_df_o1 < (Q1 - 1.5 * IQR)) |(boston_df_o1 > (Q3 + 1.5 * IQR))).any(axis=1)] boston_df_out.shape ###Output _____no_output_____
doc/collections.ipynb
###Markdown Histogram collections**WARNING**: Experimental functionality that will probably be redesigned in version 0.6. ###Code import numpy as np np.random.seed(42) from physt import h1 from physt.histogram_collection import HistogramCollection from physt.plotting import matplotlib from physt.plotting import set_default_backend from physt.plotting import vega from physt.plotting import matplotlib set_default_backend("matplotlib") data1 = np.random.normal(100, 15, 2000) h_a = h1(data1, "fixed_width", bin_width=10, name="first") h_a.plot(); data2 = np.random.normal(80, 10, 2000) h_b = h1(data2, h_a.binning, name="second") h_b.plot(); collection = HistogramCollection(h_a, h_b, title="Combination") collection.create("third", np.random.normal(148, 5, 300)) ###Output _____no_output_____ ###Markdown Plotting in matplotlib ###Code # The default collection.plot(); # Add some options collection.plot.line(alpha=.5, lw=8, xlabel="Index"); ###Output _____no_output_____ ###Markdown Plotting in vega ###Code set_default_backend("vega") collection.plot.scatter(legend=False) collection.plot.line(lw=7, legend=True, alpha=.5) ###Output _____no_output_____
replication/SNAREseq_SCOT_alignment.ipynb
###Markdown Notebook for running SCOT on SNARE-seq Cell Mixture Data**Note:** This version of the notebook runs a new setting for SCOT, where we use correlation as a metric for building kNN graphs and use connectivity information from this graph in intra-domain similarity matrices fed into the optimal transport algorithm. **Access to the raw dataset:** Gene Expression Omnibus accession no. GSE126074. SNARE-seq data in `/data` folder containes the version with dimensionality reduction techniques applied from the original SNARE-seq paper (https://www.nature.com/articles/s41587-019-0290-0) SCOT software has been updated on 20 September 2020. It now outputs error statements for convergence issues at low epsilon values. When it runs into numerical instabilities in convergence, it outputs the original data, the GW distance from the uniform coupling, and a converged flag = False. If you run into such an error, please try using a larger epsilon value for the entropic regularization. If you have any questions, e-mail: [email protected], [email protected], [email protected] ###Code import sys sys.path.insert(1, '../src/') import utils as ut import evals as evals import scot2 as sc import numpy as np X=np.load("../data/scatac_feat.npy") y=np.load("../data/scrna_feat.npy") print("Dimensions of input datasets are: ", "X= ", X.shape, " y= ", y.shape) # initialize SCOT object scot=sc.SCOT(X, y) # call the alignment with l2 normalization X_new, y_new = scot.align(k=50, e=0.0005, normalize=True) ###Output It. |Err ------------------- 0|2.152968e-03| 10|6.118185e-04| 20|8.246992e-05| 30|3.894956e-05| 40|3.216470e-05| 50|3.068255e-05| 60|2.850152e-05| 70|2.484922e-05| 80|2.042939e-05| 90|1.618798e-05| 100|1.259759e-05| 110|9.709774e-06| 120|7.426016e-06| 130|5.636091e-06| 140|4.246817e-06| 150|3.179940e-06| 160|2.368862e-06| 170|1.757536e-06| 180|1.299939e-06| 190|9.592410e-07| It. |Err ------------------- 200|7.066033e-07| 210|5.198315e-07| 220|3.820633e-07| 230|2.806101e-07| 240|2.059904e-07| 250|1.511563e-07| 260|1.108881e-07| 270|8.133075e-08| 280|5.964305e-08| 290|4.373381e-08| 300|3.206565e-08| 310|2.350916e-08| 320|1.723517e-08| 330|1.263514e-08| 340|9.262639e-09| 350|6.790190e-09| 360|4.977643e-09| 370|3.648897e-09| 380|2.674833e-09| 390|1.960783e-09| It. |Err ------------------- 400|1.437344e-09| 410|1.053637e-09| 420|7.723608e-10| ###Markdown Evaluate results: ###Code fracs=evals.calc_domainAveraged_FOSCTTM(X_new, y_new) print("Average FOSCTTM score for this alignment with X onto Y is: ", np.mean(fracs)) import matplotlib.pyplot as plt legend_label="SCOT alignment FOSCTTM \n average value: "+str(np.mean(fracs)) plt.plot(np.arange(len(fracs)), np.sort(fracs), "r--", label=legend_label) plt.legend() plt.xlabel("Cells") plt.ylabel("Sorted FOSCTTM") plt.show() ###Output _____no_output_____ ###Markdown Visualize Projections ###Code import matplotlib.pyplot as plt from sklearn.decomposition import PCA pca=PCA(n_components=2) Xy_pca=pca.fit_transform(np.concatenate((X_new, y_new), axis=0)) X_pca=Xy_pca[0: 1047,] y_pca=Xy_pca[1047:,] plt.scatter(X_pca[:,0], X_pca[:,1], c="k", s=15, label="Chromatin Accessibility") plt.scatter(y_pca[:,0], y_pca[:,1], c="r", s=15, label="Gene Expression") plt.legend() plt.title("Colored based on domains") plt.show() ###Output _____no_output_____
Hierarchical-Clustering/.ipynb_checkpoints/Hierarchical Clustering Lab-zh-checkpoint.ipynb
###Markdown 层次聚类 Lab在此 notebook 中,我们将使用 sklearn 对[鸢尾花数据集](https://archive.ics.uci.edu/ml/datasets/iris)执行层次聚类。该数据集包含 4 个维度/属性和 150 个样本。每个样本都标记为某种鸢尾花品种(共三种)。在此练习中,我们将忽略标签和基于属性的聚类,并将不同层次聚类技巧的结果与实际标签进行比较,看看在这种情形下哪种技巧的效果最好。然后,我们将可视化生成的聚类层次。 1. 导入鸢尾花数据集 ###Code from sklearn import datasets iris = datasets.load_iris() ###Output _____no_output_____ ###Markdown 查看数据集中的前 10 个样本 ###Code iris.data[:10] iris.target ###Output _____no_output_____ ###Markdown 2. 聚类现在使用 sklearn 的 [```AgglomerativeClustering```](http://scikit-learn.org/stable/modules/generated/sklearn.cluster.AgglomerativeClustering.html) 进行层次聚类 ###Code from sklearn.cluster import AgglomerativeClustering # Hierarchical clustering # Ward is the default linkage algorithm, so we'll start with that ward = AgglomerativeClustering(n_clusters=3) ward_pred = ward.fit_predict(iris.data) ###Output _____no_output_____ ###Markdown 并且尝试完全连接法和平均连接法**练习**:* 通过完全连接法进行层次聚类,将预测的标签存储在变量 ```complete_pred``` 中* 通过平均连接法进行层次聚类,将预测的标签存储在变量 ```avg_pred``` 中注意:请查看 [```AgglomerativeClustering```](http://scikit-learn.org/stable/modules/generated/sklearn.cluster.AgglomerativeClustering.html) 文档以查找要作为 ```linkage``` 值传递的合适值 ###Code # Hierarchical clustering using complete linkage # TODO: Create an instance of AgglomerativeClustering with the appropriate parameters complete = # Fit & predict # TODO: Make AgglomerativeClustering fit the dataset and predict the cluster labels complete_pred = # Hierarchical clustering using average linkage # TODO: Create an instance of AgglomerativeClustering with the appropriate parameters avg = # Fit & predict # TODO: Make AgglomerativeClustering fit the dataset and predict the cluster labels avg_pred = ###Output _____no_output_____ ###Markdown 为了判断哪个聚类结果与样本的原始标签更匹配,我们可以使用 ```adjusted_rand_score```,它是一个*外部聚类有效性指标*,分数在 -1 到 1 之间,1 表示两个聚类在对数据集中的样本进行分组时完全一样(无论每个聚类分配的标签如何)。在这门课程的稍后部分会讨论聚类有效性指标。 ###Code from sklearn.metrics import adjusted_rand_score ward_ar_score = adjusted_rand_score(iris.target, ward_pred) ###Output _____no_output_____ ###Markdown **练习**:* 计算通过完全连接法和平均连接法得出的聚类的调整离差平方和(ward)分数 ###Code # TODO: Calculated the adjusted Rand score for the complete linkage clustering labels complete_ar_score = # TODO: Calculated the adjusted Rand score for the average linkage clustering labels avg_ar_score = ###Output _____no_output_____ ###Markdown 哪个算法的调整兰德分数更高? ###Code print( "Scores: \nWard:", ward_ar_score,"\nComplete: ", complete_ar_score, "\nAverage: ", avg_ar_score) ###Output _____no_output_____ ###Markdown 3. 标准化对聚类的影响可以改进该聚类结果吗?我们再看看数据集 ###Code iris.data[:15] ###Output _____no_output_____ ###Markdown 查看该数据集后,可以看出第四列的值比其他列要小,因此它的方差对聚类处理流程的影响更新(因为聚类是基于距离的)。我们对数据集进行[标准化](https://en.wikipedia.org/wiki/Feature_scaling) ,使每个维度都位于 0 到 1 之间,以便在聚类流程中具有相等的权重。方法是让每列减去最小值,然后除以范围。sklearn 提供了一个叫做 ```preprocessing.normalize()``` 的实用工具,可以帮助我们完成这一步 ###Code from sklearn import preprocessing normalized_X = preprocessing.normalize(iris.data) normalized_X[:10] ###Output _____no_output_____ ###Markdown 现在所有列都在 0 到 1 这一范围内了。这么转换之后对数据集进行聚类会形成更好的聚类吗?(与样本的原始标签更匹配) ###Code ward = AgglomerativeClustering(n_clusters=3) ward_pred = ward.fit_predict(normalized_X) complete = AgglomerativeClustering(n_clusters=3, linkage="complete") complete_pred = complete.fit_predict(normalized_X) avg = AgglomerativeClustering(n_clusters=3, linkage="average") avg_pred = avg.fit_predict(normalized_X) ward_ar_score = adjusted_rand_score(iris.target, ward_pred) complete_ar_score = adjusted_rand_score(iris.target, complete_pred) avg_ar_score = adjusted_rand_score(iris.target, avg_pred) print( "Scores: \nWard:", ward_ar_score,"\nComplete: ", complete_ar_score, "\nAverage: ", avg_ar_score) ###Output _____no_output_____ ###Markdown 4. 通过 scipy 进行谱系图可视化我们来可视化分数最高的聚类结果。为此,我们需要使用 Scipy 的 [```linkage```](https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.linkage.html) 函数再次进行聚类,以便获取稍后用来可视化层次关系的连接矩阵 ###Code # Import scipy's linkage function to conduct the clustering from scipy.cluster.hierarchy import linkage # Specify the linkage type. Scipy accepts 'ward', 'complete', 'average', as well as other values # Pick the one that resulted in the highest Adjusted Rand Score linkage_type = 'ward' linkage_matrix = linkage(normalized_X, linkage_type) ###Output _____no_output_____ ###Markdown 使用 scipy 的 [dendrogram](https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.cluster.hierarchy.dendrogram.html) 函数进行绘制 ###Code from scipy.cluster.hierarchy import dendrogram import matplotlib.pyplot as plt plt.figure(figsize=(22,18)) # plot using 'dendrogram()' dendrogram(linkage_matrix) plt.show() ###Output _____no_output_____ ###Markdown 5. 通过 Seaborn 的 ```clustermap``` 进行可视化 python 的 [seaborn](http://seaborn.pydata.org/index.html) 绘制库可以绘制[聚类图](http://seaborn.pydata.org/generated/seaborn.clustermap.html),它是一种更详细地可视化数据集的谱系图。它也会进行聚类,因此我们只需传入数据集和想要的连接类型,它将在后台使用 scipy 进行聚类 ###Code import seaborn as sns sns.clustermap(normalized_X, figsize=(12,18), method=linkage_type, cmap='viridis') # Expand figsize to a value like (18, 50) if you want the sample labels to be readable # Draw back is that you'll need more scrolling to observe the dendrogram plt.show() ###Output _____no_output_____
Ex3_SteelPlates.ipynb
###Markdown ML Application Example Classification using Steel Plates Faults Data SetThe task of this example is to implement a complete Data Driven pipeline (load, data-analysis, visualisation, model selection and optimization, prediction) on a specific Dataset. In this example the challenge is to perform a classification with different models to find the most accurate prediction. Dataset The notebook will upload a public available dataset: https://archive.ics.uci.edu/ml/datasets/steel+plates+faults Source: Semeion, Research Center of Sciences of Communication, Via Sersale 117, 00128, Rome, Italy. www.semeion.it Data Set Information: Type of dependent variables (7 Types of Steel Plates Faults): Pastry Z_Scratch K_Scatch Stains Dirtiness Bumps Other_Faults Attribute Information: 27 independent variables: X_Minimum X_Maximum Y_Minimum Y_Maximum Pixels_Areas X_Perimeter Y_Perimeter Sum_of_Luminosity Minimum_of_LuminosityMaximum_of_LuminosityLength_of_ConveyerTypeOfSteel_A300 TypeOfSteel_A400 Steel_Plate_ThicknessEdges_Index Empty_Index Square_Index Outside_X_Index Edges_X_Index Edges_Y_Index Outside_Global_Index LogOfAreas Log_X_Index Log_Y_Index Orientation_IndexLuminosity_Index SigmoidOfAreas ###Code # algebra import numpy as np # data structure import pandas as pd # data visualization import matplotlib.pylab as plt # another module for data visualization import plotly.express as px import seaborn as sns #file handling from pathlib import Path ###Output _____no_output_____ ###Markdown Data loadThe process consist in downloading the data if needed, loading the data as a Pandas dataframe ###Code filename = "Faults.NNA" separator = '\t' columns = ['X_Minimum','X_Maximum','Y_Minimum','Y_Maximum','Pixels_Areas','X_Perimeter','Y_Perimeter','Sum_of_Luminosity','Minimum_of_Luminosity','Maximum_of_Luminosity','Length_of_Conveyer', 'TypeOfSteel_A300','TypeOfSteel_A400','Steel_Plate_Thickness','Edges_Index','Empty_Index','Square_Index','Outside_X_Index','Edges_X_Index','Edges_Y_Index','Outside_Global_Index', 'LogOfAreas','Log_X_Index','Log_Y_Index','Orientation_Index','Luminosity_Index','SigmoidOfAreas','Pastry','Z_Scratch','K_Scatch','Stains','Dirtiness','Bumps','Other_Faults'] #if the dataset is not already in the working dir, it will download my_file = Path(filename) if not my_file.is_file(): print("Downloading dataset") !wget https://archive.ics.uci.edu/ml/machine-learning-databases/00198/Faults.NNA #function to semplificate the load of dataset, in case it is a csv, tsv or excel file #output is a pandas dataframe def load_csv(filename,separator,columns): try: csv_table = pd.read_csv(filename,sep=separator,names=columns,dtype='float64') except: csv_table = pd.read_excel(filename,names=columns) print("n. samples: {}".format(csv_table.shape[0])) print("n. columns: {}".format(csv_table.shape[1])) return csv_table #.dropna() data = load_csv(filename,separator,columns) ###Output _____no_output_____ ###Markdown Data Analysis and VisualizationIn this section confidence with the data is gained, data are plotted and cleaned ###Code #How does the dataset look like? data.head() Faults = ['Pastry', 'Z_Scratch', 'K_Scatch', 'Stains', 'Dirtiness', 'Bumps', 'Other_Faults'] data['class'] = (data[Faults]*np.arange(len(Faults))).sum(axis=1) #Do we have a balanced dataset? plt.bar(Faults,data[Faults].sum()) plt.xticks(rotation=30) plt.grid() #Name of all columns print(data.columns.values) #let's have a look at the data and their correlations, if any measurements = ['X_Minimum', 'X_Maximum', 'Y_Minimum', 'Y_Maximum', 'Pixels_Areas', 'X_Perimeter', 'Y_Perimeter', 'Sum_of_Luminosity', 'Minimum_of_Luminosity', 'Maximum_of_Luminosity', 'Length_of_Conveyer', 'TypeOfSteel_A300', 'TypeOfSteel_A400', 'Steel_Plate_Thickness', 'Edges_Index', 'Empty_Index', 'Square_Index', 'Outside_X_Index', 'Edges_X_Index', 'Edges_Y_Index', 'Outside_Global_Index', 'LogOfAreas', 'Log_X_Index', 'Log_Y_Index', 'Orientation_Index', 'Luminosity_Index', 'SigmoidOfAreas'] target = ['class'] #let's have a look only at a few parameters sns.pairplot(data[['X_Minimum', 'X_Maximum', 'Y_Minimum', 'Y_Maximum', 'Pixels_Areas', 'X_Perimeter', 'Y_Perimeter', 'Sum_of_Luminosity', 'Minimum_of_Luminosity', 'Maximum_of_Luminosity']+target],hue='class') #Let's have a look if there is the possibility to reduce the dimensionality #to see if there is the possibility to see if the fault-classes are "separable" from sklearn.decomposition import PCA aux = data[measurements] aux = (aux-aux.mean())/aux.std() pca = PCA(n_components=3) X_r = pca.fit(aux).transform(aux) y_r = data[target].values.flatten() colors = plt.cm.get_cmap('Dark2')(np.linspace(0,1,len(Faults))) lw = 2 fig = plt.figure(figsize=[10,10]) ax = plt.axes(projection='3d') ax.scatter(X_r[:,0], X_r[:,1], X_r[:,2], c=data[target].values, cmap='viridis', linewidth=0.5); #print(data[target].column) #another fancy way of doing the previous plot px.scatter_3d( x=X_r[:,0], y=X_r[:,1], z=X_r[:, 2], color=data['class'].values,color_continuous_scale='Rainbow' ) #t-distributed stochastic neighbor embedding is a statistical method for visualizing high-dimensional data by giving each datapoint a location in a two or three-dimensional map #https://scikit-learn.org/stable/modules/generated/sklearn.manifold.TSNE.html from sklearn.manifold import TSNE tsne = TSNE(n_components=3) X_r = tsne.fit_transform(aux) y_r = data[target].values.flatten() #colors = plt.cm.get_cmap('viridis')(np.linspace(0,1,len(Faults))) #lw = 2 fig = plt.figure(figsize=[10,10]) ax = plt.axes(projection='3d') ax.scatter(X_r[:,0], X_r[:,1], X_r[:,2], c=data[target].values, cmap='viridis', linewidth=0.5); #Select only the interesting variable for the model, and remove any anomalous value (e.g. "nan") data = data.dropna() ###Output _____no_output_____ ###Markdown Machine LearningHere the interesting input features and output to predict for the task are selected, the data are opportunelly preprocessed (i.e. normalized), the dataset is splitted in two separate train and test subsets, each model is trained on the training data and evaluated against a test set. The evaluation metrics list can be found here ###Code #the module needed for the modeling and data mining are imported #Cross-Validation from sklearn.model_selection import train_test_split #Data normalization from sklearn.preprocessing import StandardScaler #metrics to evaluate the model from sklearn.metrics import f1_score from sklearn.metrics import confusion_matrix from sklearn.metrics import plot_confusion_matrix #Selection of feature and output variable, definition of the size (fraction of the total) of the random selected test set input_features = measurements output = target test_size = 0.33 random_state = 0 #not preprocessed data unnormalized_X,y = data[input_features],data[output] # normalisation #Having features on a similar scale can help the model converge more quickly towards the minimum scaler_X = StandardScaler().fit(unnormalized_X) X = scaler_X.transform(unnormalized_X) #check if nan are present on the data after normalization to avoid trouble later sum(np.isnan(X)) # basic train-test dataset random split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=random_state) #dictionary to help the display of the results Score_Dict = {} #function introduced to simplifies the following comparison and test of the various #return the trained model and the score of the selected metrics def fit_predict_plot(model,X_train,y_train,X_test,y_test,class_names): model.fit(X_train,y_train) pred_y_test = model.predict(X_test) conf_matrix = confusion_matrix(y_test,pred_y_test) score = f1_score(y_test,pred_y_test,average='weighted') model_name = type(model).__name__ if(model_name=='GridSearchCV'): model_name ='CV_'+type(model.estimator).__name__ #Alternative metrics are listed here:https://scikit-learn.org/stable/modules/model_evaluation.html Score_Dict[model_name]=score fig,ax = plt.subplots(1,1,figsize=[10,10]) np.set_printoptions(precision=2) plot_confusion_matrix(model,X_test,y_test,display_labels=class_names, cmap =plt.cm.Blues, normalize='true', xticks_rotation=45,ax=ax) plt.axis('tight') correctly_classified = np.sum(np.diag(conf_matrix))/np.sum(conf_matrix) print("correctly classified :: {:.2f}".format(correctly_classified)) print("f1 score :: {:.2f}".format(score)) return model,correctly_classified ###Output _____no_output_____ ###Markdown Models used in this example are: Ridge Logistic Regression kNN Support Vector Classification Random Forest Ridge Classifier ###Code #initialization, fit and evaluation of the model from sklearn.linear_model import RidgeClassifier from sklearn.model_selection import GridSearchCV estimator = RidgeClassifier() parameters = { 'alpha':np.logspace(-2,2,5)} model = GridSearchCV(estimator, parameters,cv=5) model, ridge_score = fit_predict_plot(model,X_train,y_train.values.flatten(),X_test,y_test.values.flatten(),Faults) print(model.best_params_) ###Output _____no_output_____ ###Markdown Logistic Regression ###Code #initialization, fit and evaluation of the model from sklearn import linear_model estimator = linear_model.LogisticRegression(max_iter=10000) parameters = { 'C':np.logspace(-2,3,5)} model = GridSearchCV(estimator, parameters,cv=5) model, logistic_score = fit_predict_plot(model,X_train,y_train.values.flatten(),X_test,y_test.values.flatten(),Faults) print(model.best_params_) ###Output _____no_output_____ ###Markdown kNN ###Code #initialization, fit and evaluation of the model from sklearn.neighbors import KNeighborsClassifier estimator = KNeighborsClassifier() parameters = { 'n_neighbors':[3,5,7]} model = GridSearchCV(estimator, parameters,cv=5) model, knn_score = fit_predict_plot(model,X_train,y_train.values.flatten(),X_test,y_test.values.flatten(),Faults) print(model.best_params_) ###Output _____no_output_____ ###Markdown SVC ###Code from sklearn.svm import SVC estimator = SVC(gamma='auto') parameters = { 'C':[0.1,1,10,100]} model = GridSearchCV(estimator, parameters,cv=5) model, svc_score = fit_predict_plot(model,X_train,y_train.values.flatten(),X_test,y_test.values.flatten(),Faults) print(model.best_params_) ###Output _____no_output_____ ###Markdown Random Forest ###Code #initialization, fit and evaluation of the model from sklearn.ensemble import RandomForestClassifier estimator = RandomForestClassifier() parameters = { 'min_samples_leaf':[1,3,5], 'class_weight':['balanced_subsample'], 'n_estimators':[10,100,200]} model = GridSearchCV(estimator, parameters,cv=5) model, rf_score = fit_predict_plot(model,X_train,y_train.values.flatten(),X_test,y_test.values.flatten(),Faults) print(model.best_params_) #print out the results in a table from IPython.display import Markdown as md from IPython.display import display table = '<table><tr><th> Model</th><th> Accuracy Metric </th></tr>' for key, value in Score_Dict.items(): table +='<tr> <td>'+key+'</td><td>' +'%.2f'%(value)+'</td></tr>' table+='</table>' display(md(table)) names = list(Score_Dict.keys()) values = list(Score_Dict.values()) plt.figure(figsize=(6, 3)) plt.bar(names, values) plt.ylabel('Accuracy Metric') plt.xticks(rotation=30) plt.grid() #plt.ylim([0.5,0.8]) ###Output _____no_output_____ ###Markdown How to deal with Unbalanced datasetThere are at least two possibilities as explaned here and here : Undersampling or Oversampling ###Code #Undersample counts = data[target].value_counts() mincounts = np.min(counts) df = [0,0,0,0,0,0,0] df_under = pd.DataFrame() for a in range(len(Faults)): df = data.loc[data[Faults[a]]==1].sample(mincounts) df_under = pd.concat([df_under, df], axis=0) plt.bar(Faults,df_under[Faults].sum()) plt.xticks(rotation=30) plt.grid() #not preprocessed data unnormalized_X,y = df_under[input_features],df_under[output] # normalisation #Having features on a similar scale can help the model converge more quickly towards the minimum scaler_X = StandardScaler().fit(unnormalized_X) X = scaler_X.transform(unnormalized_X) # basic train-test dataset random split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=random_state) #initialization, fit and evaluation of the model from sklearn.neighbors import KNeighborsClassifier estimator = KNeighborsClassifier() parameters = { 'n_neighbors':[3,5,7,9]} model = GridSearchCV(estimator, parameters,cv=5) model, knn_score = fit_predict_plot(model,X_train,y_train.values.flatten(),X_test,y_test.values.flatten(),Faults) print(model.best_params_) #initialization, fit and evaluation of the model from sklearn.ensemble import RandomForestClassifier estimator = RandomForestClassifier() parameters = { 'min_samples_leaf':[1,3,5], 'class_weight':['balanced_subsample'], 'n_estimators':[10,100,200,300]} model = GridSearchCV(estimator, parameters,cv=5) model, rf_score = fit_predict_plot(model,X_train,y_train.values.flatten(),X_test,y_test.values.flatten(),Faults) print(model.best_params_) #Oversample counts = data[target].value_counts() maxcounts = np.max(counts) df_over = pd.DataFrame() for a in range(len(Faults)): df = data.loc[data[Faults[a]]==1].sample(maxcounts,replace=True) df_over = pd.concat([df_over, df], axis=0) plt.bar(Faults,df_over[Faults].sum()) plt.xticks(rotation=30) plt.grid() #not preprocessed data unnormalized_X,y = df_over[input_features],df_over[output] # normalisation #Having features on a similar scale can help the model converge more quickly towards the minimum scaler_X = StandardScaler().fit(unnormalized_X) X = scaler_X.transform(unnormalized_X) # basic train-test dataset random split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=random_state) #initialization, fit and evaluation of the model from sklearn.neighbors import KNeighborsClassifier estimator = KNeighborsClassifier() parameters = { 'n_neighbors':[3,5,7,9]} model = GridSearchCV(estimator, parameters,cv=5) model, knn_score = fit_predict_plot(model,X_train,y_train.values.flatten(),X_test,y_test.values.flatten(),Faults) print(model.best_params_) #initialization, fit and evaluation of the model from sklearn.ensemble import RandomForestClassifier estimator = RandomForestClassifier() parameters = { 'min_samples_leaf':[1,3,5], 'class_weight':['balanced_subsample'], 'n_estimators':[10,100,200,300]} model = GridSearchCV(estimator, parameters,cv=5) model, rf_score = fit_predict_plot(model,X_train,y_train.values.flatten(),X_test,y_test.values.flatten(),Faults) print(model.best_params_) ###Output _____no_output_____
08_LinearModels/8D_LR_SVM.ipynb
###Markdown Task-D: Collinear features and their effect on linear models ###Code %matplotlib inline import warnings warnings.filterwarnings("ignore") import pandas as pd import numpy as np from sklearn.datasets import load_iris from sklearn.linear_model import SGDClassifier from sklearn.model_selection import GridSearchCV import seaborn as sns import matplotlib.pyplot as plt data = pd.read_csv('task_d.csv') data.head() X = data.drop(['target'], axis=1).values Y = data['target'].values ###Output _____no_output_____ ###Markdown Doing perturbation test to check the presence of collinearity Task: 1 Logistic Regression1. Finding the Correlation between the features a. check the correlation between the features b. plot heat map of correlation matrix using seaborn heatmap2. Finding the best model for the given data a. Train Logistic regression on data(X,Y) that we have created in the above cell b. Find the best hyper prameter alpha with hyper parameter tuning using k-fold cross validation (grid search CV or random search CV make sure you choose the alpha in log space) c. Creat a new Logistic regression with the best alpha (search for how to get the best hyper parameter value), name the best model as 'best_model' 3. Getting the weights with the original data a. train the 'best_model' with X, Y b. Check the accuracy of the model 'best_model_accuracy' c. Get the weights W using best_model.coef_4. Modifying original data a. Add a noise(order of 10^-2) to each element of X and get the new data set X' (X' = X + e) b. Train the same 'best_model' with data (X', Y) c. Check the accuracy of the model 'best_model_accuracy_edited' d. Get the weights W' using best_model.coef_ 5. Checking deviations in metric and weights a. find the difference between 'best_model_accuracy_edited' and 'best_model_accuracy' b. find the absolute change between each value of W and W' ==> |(W-W')| c. print the top 4 features which have higher % change in weights compare to the other feature Task: 2 Linear SVM1. Do the same steps (2, 3, 4, 5) we have done in the above task 1.Do write the observations based on the results you get from the deviations of weights in both Logistic Regression and linear SVM `TASK1 : Logistic Regression ` ###Code from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score alphas = np.logspace(-2, 1, 20).tolist() corr = data.drop(['target'], axis=1).corr() mask = np.zeros_like(corr) mask[np.triu_indices_from(mask)] = True with sns.axes_style("white"): f, ax = plt.subplots(figsize=(7, 5)) ax = sns.heatmap(corr, mask=mask, vmax=.3, square=True, cmap='mako', annot=True) LR = LogisticRegression(random_state=0) parameters = {'C': alphas} clf = GridSearchCV(LR, parameters, cv=5, scoring='accuracy') clf = clf.fit(X, Y) clf.best_params_, clf.best_score_, clf.best_estimator_ best_model = LogisticRegression(random_state=0, C=0.01, max_iter=10) best_model.fit(X, Y) pred = best_model.predict(X) print(accuracy_score(Y, pred)) print("Best-Weights : ", best_model.coef_) # X_dash = X + e noise = [] for i in range(X.shape[1]): noise.append(np.array([j*l for j in X[:,i] for l in np.random.uniform(1*10**-2, 9*10**-2, 1)])) X_dash = np.array(noise).reshape(100, 7) fig = plt.figure(figsize=(20,8)) plt.subplot(3, 1, 1); plt.plot(X); plt.title("X - Datapoints") plt.subplot(3, 1, 2); plt.plot(X_dash); plt.title("X-dash - Datapoints") plt.subplot(3, 1, 3); plt.plot(np.subtract(X, X_dash)); plt.title("X-delta - Datapoints") fig.show() best_model_edited = LogisticRegression(C=0.01, random_state=0, max_iter=10) best_model_edited.fit(X_dash, Y) pred = best_model_edited.predict(X) print(accuracy_score(Y, pred)) print("Best-Model-Weights : \n", best_model.coef_, "\n") print("Best-Model-Edited-Weights : \n", best_model_edited.coef_) print("Weight-Delta:") top_4 = np.argsort(np.subtract(best_model.coef_, best_model_edited.coef_)).flatten().tolist() print("Top4 Affected features : ", [list(data.columns)[i] for i in sorted(top_4, reverse=True)[:4]]) ###Output Weight-Delta: Top4 Affected features : ['w', '2*z+3*x*x', '2*y', 'x*x'] ###Markdown `OBSERVATION : ``1. Classifier overfits within 10 epochs, due to colinear features.``2. Weight-Delta clearly indicates, one features changes affects the colinear feature weights.``3. Top-4 Features are colinnear features.` `TASK2 : Linear SVM ` ###Code from sklearn.svm import LinearSVC LR = LinearSVC(random_state=0) parameters = {'C': alphas} clf = GridSearchCV(LR, parameters, cv=5, scoring='accuracy') clf = clf.fit(X, Y) clf.best_params_, clf.best_score_, clf.best_estimator_ best_model = LinearSVC(random_state=0, C=0.01, max_iter=10) best_model.fit(X, Y) pred = best_model.predict(X) print(accuracy_score(Y, pred)) print("Best-Weights : ", best_model.coef_) # X_dash = X + e noise = [] for i in range(X.shape[1]): noise.append(np.array([j*l for j in X[:,i] for l in np.random.uniform(1*10**-2, 9*10**-2, 1)])) X_dash = np.array(noise).reshape(100, 7) fig = plt.figure(figsize=(20,8)) plt.subplot(3, 1, 1); plt.plot(X); plt.title("X - Datapoints") plt.subplot(3, 1, 2); plt.plot(X_dash); plt.title("X-dash - Datapoints") plt.subplot(3, 1, 3); plt.plot(np.subtract(X, X_dash)); plt.title("X-delta - Datapoints") fig.show() best_model_edited = LinearSVC(C=0.01, random_state=0, max_iter=10) best_model_edited.fit(X_dash, Y) pred = best_model_edited.predict(X) print("Accuracy Score : ", accuracy_score(Y, pred), "\n") print("Best-Model-Weights : \n", best_model.coef_, "\n") print("Best-Model-Edited-Weights : \n", best_model_edited.coef_) print("Weight-Delta:") top_4 = np.argsort(np.subtract(best_model.coef_, best_model_edited.coef_)).flatten().tolist() print("Top4 Affected features : ", [list(data.columns)[i] for i in sorted(top_4, reverse=True)[:4]]) ###Output Weight-Delta: Top4 Affected features : ['w', '2*z+3*x*x', '2*y', 'x*x']
2-data-split-tcr.ipynb
###Markdown Train / test data split This notebook performs two important tasks:1. Generates negative training examples by randomly mispairing TCRs and epitopes from the positive training examples collated in the previous notebook.2. Splits the positive and negative training examples into train and test subsets, ensuring randomisation and mutual exclusivity by TCR. ###Code import pandas as pd import numpy as np from sklearn.model_selection import train_test_split # Set a random seed for reproducibility seed = 3 # Read in the collated dataframe from the previous notebook as the set of positive training examples path = 'data/input/collated/collated.csv' positives = pd.read_csv(path, sep=',', dtype=str) # Drop the 'source' column, which is not needed at this stage positives.drop('source', axis=1, inplace=True) positives ###Output _____no_output_____ ###Markdown Create the negative set ###Code # Collect a list of TCRs from the positive training examples tcr = positives.drop('epitope', axis=1) # Collect a list of epitopes from the positive training examples negepitopes = pd.DataFrame(positives['epitope'].unique(), columns=['epitope']) negepitopes # Determine the number of times to reuse each epitope to achieve a balanced dataset n = round(len(tcr)/len(negepitopes)) negatives = pd.DataFrame(columns=positives.columns) # Randomly mispair the TCRs and epitopes to generate the negative training examples for i in range(n): batch = tcr.sample(negepitopes.size, random_state=seed+i) batch.reset_index(inplace=True, drop=True) batch['epitope'] = negepitopes['epitope'] negatives = negatives.append(batch, ignore_index=True) # Remove any negative examples generated that already exist in the positive set intersection = pd.merge(positives, negatives, how='inner', on=['cdr3a', 'cdr3b', 'epitope'], suffixes=['', '_del']) intersection.drop(intersection.columns[intersection.columns.str.contains('_del')], axis=1, inplace=True) negatives = pd.concat([negatives, intersection, intersection]).drop_duplicates(keep=False) # Add labels positives['y'] = 1. negatives['y'] = 0. # Union the negatives and positives data = positives.append(negatives, ignore_index=True) # Shuffle the data data = data.sample(frac=1, random_state=seed).reset_index(drop=True) display(data) # Split the train and test data ensuring that they do not include any of the same TCRs tcr.drop_duplicates(inplace=True) train_tcrs, test_tcrs = train_test_split(tcr, test_size=0.10, random_state=seed) train = pd.merge(train_tcrs, data, how='inner') test = pd.merge(test_tcrs, data, how='inner') # Output the train and test sets path = 'data/input/collated/train.csv' train.to_csv(path, index=False) path = 'data/input/collated/test.csv' test.to_csv(path, index=False) ###Output _____no_output_____
Data Warehouse/Amazon United Kingdom/.ipynb_checkpoints/Amazon_UK - Hair Products - Conditioner-checkpoint.ipynb
###Markdown Completed ###Code import requests from bs4 import BeautifulSoup import csv import re import json import sqlite3 import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from time import sleep import os from collections import Counter import pickle import warnings import time warnings.filterwarnings("ignore") from wordcloud import WordCloud import matplotlib.pyplot as plt import PIL from PIL import Image, ImageFilter from selenium import webdriver from selenium.webdriver.common.action_chains import ActionChains from selenium.webdriver.chrome.options import Options import boto3 import botocore %matplotlib inline # Use proxy and headers for safe web scraping # os.environ['HTTPS_PROXY'] = 'http://3.112.188.39:8080' # pd.options.mode.chained_assignment = None headers = { 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/' '537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36'} countries_link = {'USA':'https://www.amazon.com', 'Australia':'https://www.amazon.com.au', 'UK':'https://www.amazon.co.uk', 'India':'https://www.amazon.in', 'Japan':'https://www.amazon.co.jp/', 'UAE':'https://amazon.ae'} ###Output _____no_output_____ ###Markdown List of Products ###Code amazon_usa = {'health_and_beauty':{'hair_products':{'shampoo':'https://www.amazon.com/s?i=beauty-intl-ship&bbn=16225006011&rh=n%3A%2116225006011%2Cn%3A11057241%2Cn%3A17911764011%2Cn%3A11057651&dc&', 'conditioner':'https://www.amazon.com/s?i=beauty-intl-ship&bbn=16225006011&rh=n%3A%2116225006011%2Cn%3A11057241%2Cn%3A17911764011%2Cn%3A11057251&dc&', 'hair_scalp_treatment':'https://www.amazon.com/s?i=beauty-intl-ship&bbn=16225006011&rh=n%3A%2116225006011%2Cn%3A11057241%2Cn%3A11057431&dc&', 'treatment_oil':'https://www.amazon.com/s?i=beauty-intl-ship&bbn=16225006011&rh=n%3A%2116225006011%2Cn%3A11057241%2Cn%3A10666439011&dc&', 'hair_loss':'https://www.amazon.com/s?i=beauty-intl-ship&bbn=16225006011&rh=n%3A%2116225006011%2Cn%3A11057241%2Cn%3A10898755011&dc&'}, 'skin_care':{'body':{'cleansers':'https://www.amazon.com/s?i=beauty-intl-ship&bbn=16225006011&rh=n%3A%2116225006011%2Cn%3A11060451%2Cn%3A11060521%2Cn%3A11056281&dc&', 'moisturizers':'https://www.amazon.com/s?i=beauty-intl-ship&bbn=16225006011&rh=n%3A%2116225006011%2Cn%3A11060451%2Cn%3A11060521%2Cn%3A11060661&dc&', 'treatments':'https://www.amazon.com/s?i=beauty-intl-ship&bbn=16225006011&rh=n%3A%2116225006011%2Cn%3A11060451%2Cn%3A11060521%2Cn%3A11056421&dc&'}, 'eyes':{'creams':'https://www.amazon.com/s?i=beauty-intl-ship&bbn=16225006011&rh=n%3A%2116225006011%2Cn%3A11060451%2Cn%3A11061941%2Cn%3A7730090011&dc&', 'gels':'https://www.amazon.com/s?i=beauty-intl-ship&bbn=16225006011&rh=n%3A%2116225006011%2Cn%3A11060451%2Cn%3A11061941%2Cn%3A7730092011&dc&', 'serums':'https://www.amazon.com/s?i=beauty-intl-ship&bbn=16225006011&rh=n%3A%2116225006011%2Cn%3A11060451%2Cn%3A11061941%2Cn%3A7730098011&dc&'}, 'face':{'f_cleansers':'https://www.amazon.com/s?i=beauty-intl-ship&bbn=16225006011&rh=n%3A%2116225006011%2Cn%3A11060451%2Cn%3A11060711%2Cn%3A11060901&dc&', 'f_moisturizers':'https://www.amazon.com/s?i=beauty-intl-ship&bbn=16225006011&rh=n%3A%2116225006011%2Cn%3A11060451%2Cn%3A11060711%2Cn%3A11060901&dc&', 'scrubs':'https://www.amazon.com/s?i=beauty-intl-ship&bbn=16225006011&rh=n%3A%2116225006011%2Cn%3A11060451%2Cn%3A11060711%2Cn%3A11061091&dc&', 'toners':'https://www.amazon.com/s?i=beauty-intl-ship&bbn=16225006011&rh=n%3A%2116225006011%2Cn%3A11060451%2Cn%3A11060711%2Cn%3A11061931&dc&', 'f_treatments':'https://www.amazon.com/s?i=beauty-intl-ship&bbn=16225006011&rh=n%3A%2116225006011%2Cn%3A11060451%2Cn%3A11060711%2Cn%3A11061931&dc&'}, 'lipcare':'https://www.amazon.com/s?i=beauty-intl-ship&bbn=16225006011&rh=n%3A%2116225006011%2Cn%3A11060451%2Cn%3A3761351&dc&'}}, 'food':{'tea':{'herbal':'https://www.amazon.com/s?k=tea&i=grocery&rh=n%3A16310101%2Cn%3A16310231%2Cn%3A16521305011%2Cn%3A16318401%2Cn%3A16318511&dc&', 'green':'https://www.amazon.com/s?k=tea&i=grocery&rh=n%3A16310101%2Cn%3A16310231%2Cn%3A16521305011%2Cn%3A16318401%2Cn%3A16318471&dc&', 'black':'https://www.amazon.com/s?k=tea&i=grocery&rh=n%3A16310101%2Cn%3A16310231%2Cn%3A16521305011%2Cn%3A16318401%2Cn%3A16318411&dc&', 'chai':'https://www.amazon.com/s?k=tea&i=grocery&rh=n%3A16310101%2Cn%3A16310231%2Cn%3A16521305011%2Cn%3A16318401%2Cn%3A348022011&dc&'}, 'coffee':'https://www.amazon.com/s?k=tea&i=grocery&rh=n%3A16310101%2Cn%3A16310231%2Cn%3A16521305011%2Cn%3A16318031%2Cn%3A2251593011&dc&', 'dried_fruits':{'mixed':'https://www.amazon.com/s?k=dried+fruits&i=grocery&rh=n%3A16310101%2Cn%3A6506977011%2Cn%3A9865332011%2Cn%3A9865334011%2Cn%3A9865348011&dc&', 'mangoes':'https://www.amazon.com/s?k=dried+fruits&rh=n%3A16310101%2Cn%3A9865346011&dc&'}, 'nuts':{'mixed':'https://www.amazon.com/s?k=nuts&rh=n%3A16310101%2Cn%3A16322931&dc&', 'peanuts':'https://www.amazon.com/s?k=nuts&i=grocery&rh=n%3A16310101%2Cn%3A18787303011%2Cn%3A16310221%2Cn%3A16322881%2Cn%3A16322941&dc&', 'cashews':'https://www.amazon.com/s?k=nuts&i=grocery&rh=n%3A16310101%2Cn%3A18787303011%2Cn%3A16310221%2Cn%3A16322881%2Cn%3A16322901&dc&'}}, 'supplements':{'sports':{'pre_workout':'https://www.amazon.com/s?k=supplements&i=hpc&rh=n%3A3760901%2Cn%3A6973663011%2Cn%3A6973697011&dc&', 'protein':'https://www.amazon.com/s?k=supplements&i=hpc&rh=n%3A3760901%2Cn%3A6973663011%2Cn%3A6973704011&dc&', 'fat_burner':'https://www.amazon.com/s?k=supplements&i=hpc&rh=n%3A3760901%2Cn%3A6973663011%2Cn%3A6973679011&dc&', 'weight_gainer':'https://www.amazon.com/s?k=supplements&i=hpc&rh=n%3A3760901%2Cn%3A6973663011%2Cn%3A6973725011&dc&'}, 'vitamins_dietary':{'supplements':'https://www.amazon.com/s?k=supplements&i=hpc&rh=n%3A3760901%2Cn%3A3764441%2Cn%3A6939426011&dc&', 'multivitamins':'https://www.amazon.com/s?k=supplements&i=hpc&rh=n%3A3760901%2Cn%3A3774861&dc&'}}, 'wellness':{'ayurveda':'https://www.amazon.com/s?k=supplements&i=hpc&rh=n%3A3760901%2Cn%3A10079996011%2Cn%3A13052911%2Cn%3A13052941&dc&', 'essential_oil_set':'https://www.amazon.com/s?k=supplements&i=hpc&rh=n%3A3760901%2Cn%3A10079996011%2Cn%3A13052911%2Cn%3A18502613011&dc&', 'massage_oil':'https://www.amazon.com/s?k=supplements&i=hpc&rh=n%3A3760901%2Cn%3A10079996011%2Cn%3A14442631&dc&'}, 'personal_accessories':{'bags':{'women':{'clutches':'https://www.amazon.com/s?k=bags&i=fashion-womens-handbags&bbn=15743631&rh=n%3A7141123011%2Cn%3A%217141124011%2Cn%3A7147440011%2Cn%3A15743631%2Cn%3A17037745011&dc&', 'crossbody':'https://www.amazon.com/s?k=bags&i=fashion-womens-handbags&bbn=15743631&rh=n%3A7141123011%2Cn%3A%217141124011%2Cn%3A7147440011%2Cn%3A15743631%2Cn%3A2475899011&dc&', 'fashion':'https://www.amazon.com/s?k=bags&i=fashion-womens-handbags&bbn=15743631&rh=n%3A7141123011%2Cn%3A%217141124011%2Cn%3A7147440011%2Cn%3A15743631%2Cn%3A16977745011&dc&', 'hobo':'https://www.amazon.com/s?k=bags&i=fashion-womens-handbags&bbn=15743631&rh=n%3A7141123011%2Cn%3A%217141124011%2Cn%3A7147440011%2Cn%3A15743631%2Cn%3A16977747011&dc&'}}, 'jewelry':{'anklets':'https://www.amazon.com/s?i=fashion-womens-intl-ship&bbn=16225018011&rh=n%3A16225018011%2Cn%3A7192394011%2Cn%3A7454897011&dc&', 'bracelets':'https://www.amazon.com/s?i=fashion-womens-intl-ship&bbn=16225018011&rh=n%3A16225018011%2Cn%3A7192394011%2Cn%3A7454898011&dc&', 'earrings':'https://www.amazon.com/s?i=fashion-womens-intl-ship&bbn=16225018011&rh=n%3A16225018011%2Cn%3A7192394011%2Cn%3A7454917011&dc&', 'necklaces':'https://www.amazon.com/s?i=fashion-womens-intl-ship&bbn=16225018011&rh=n%3A16225018011%2Cn%3A7192394011%2Cn%3A7454917011&dc&', 'rings':'https://www.amazon.com/s?i=fashion-womens-intl-ship&bbn=16225018011&rh=n%3A16225018011%2Cn%3A7192394011%2Cn%3A7454939011&dc&'}, 'artisan_fabrics':'https://www.amazon.com/s?k=fabrics&rh=n%3A2617941011%2Cn%3A12899121&dc&'}} amazon_uk = {'health_and_beauty':{'hair_products':{'shampoo':'https://www.amazon.co.uk/b/ref=amb_link_5?ie=UTF8&node=74094031&pf_rd_m=A3P5ROKL5A1OLE&pf_rd_s=merchandised-search-leftnav&pf_rd_r=KF9SM53J2HXHP4EJD3AH&pf_rd_r=KF9SM53J2HXHP4EJD3AH&pf_rd_t=101&pf_rd_p=aaaa7182-fdd6-4b35-8f0b-993e78880b69&pf_rd_p=aaaa7182-fdd6-4b35-8f0b-993e78880b69&pf_rd_i=66469031', 'conditioner':'https://www.amazon.co.uk/b/ref=amb_link_6?ie=UTF8&node=2867976031&pf_rd_m=A3P5ROKL5A1OLE&pf_rd_s=merchandised-search-leftnav&pf_rd_r=KF9SM53J2HXHP4EJD3AH&pf_rd_r=KF9SM53J2HXHP4EJD3AH&pf_rd_t=101&pf_rd_p=aaaa7182-fdd6-4b35-8f0b-993e78880b69&pf_rd_p=aaaa7182-fdd6-4b35-8f0b-993e78880b69&pf_rd_i=66469031', 'hair_loss':'https://www.amazon.co.uk/b/ref=amb_link_11?ie=UTF8&node=2867979031&pf_rd_m=A3P5ROKL5A1OLE&pf_rd_s=merchandised-search-leftnav&pf_rd_r=KF9SM53J2HXHP4EJD3AH&pf_rd_r=KF9SM53J2HXHP4EJD3AH&pf_rd_t=101&pf_rd_p=aaaa7182-fdd6-4b35-8f0b-993e78880b69&pf_rd_p=aaaa7182-fdd6-4b35-8f0b-993e78880b69&pf_rd_i=66469031', 'hair_scalp_treatment':'https://www.amazon.co.uk/b/ref=amb_link_7?ie=UTF8&node=2867977031&pf_rd_m=A3P5ROKL5A1OLE&pf_rd_s=merchandised-search-leftnav&pf_rd_r=KF9SM53J2HXHP4EJD3AH&pf_rd_r=KF9SM53J2HXHP4EJD3AH&pf_rd_t=101&pf_rd_p=aaaa7182-fdd6-4b35-8f0b-993e78880b69&pf_rd_p=aaaa7182-fdd6-4b35-8f0b-993e78880b69&pf_rd_i=66469031', 'treatment_oil':'https://www.amazon.co.uk/hair-oil-argan/b/ref=amb_link_8?ie=UTF8&node=2867981031&pf_rd_m=A3P5ROKL5A1OLE&pf_rd_s=merchandised-search-leftnav&pf_rd_r=KF9SM53J2HXHP4EJD3AH&pf_rd_r=KF9SM53J2HXHP4EJD3AH&pf_rd_t=101&pf_rd_p=aaaa7182-fdd6-4b35-8f0b-993e78880b69&pf_rd_p=aaaa7182-fdd6-4b35-8f0b-993e78880b69&pf_rd_i=66469031'}, 'skin_care':{'body':{'cleanser':'https://www.amazon.co.uk/s/ref=lp_344269031_nr_n_3?fst=as%3Aoff&rh=n%3A117332031%2Cn%3A%21117333031%2Cn%3A118464031%2Cn%3A344269031%2Cn%3A344282031&bbn=344269031&ie=UTF8&qid=1581612722&rnid=344269031', 'moisturizers':'https://www.amazon.co.uk/s/ref=lp_344269031_nr_n_1?fst=as%3Aoff&rh=n%3A117332031%2Cn%3A%21117333031%2Cn%3A118464031%2Cn%3A344269031%2Cn%3A2805272031&bbn=344269031&ie=UTF8&qid=1581612722&rnid=344269031'}, 'eyes':{'creams':'https://www.amazon.co.uk/s/ref=lp_118465031_nr_n_0?fst=as%3Aoff&rh=n%3A117332031%2Cn%3A%21117333031%2Cn%3A118464031%2Cn%3A118465031%2Cn%3A344259031&bbn=118465031&ie=UTF8&qid=1581612984&rnid=118465031', 'gels':'https://www.amazon.co.uk/s/ref=lp_118465031_nr_n_1?fst=as%3Aoff&rh=n%3A117332031%2Cn%3A%21117333031%2Cn%3A118464031%2Cn%3A118465031%2Cn%3A344258031&bbn=118465031&ie=UTF8&qid=1581613044&rnid=118465031', 'serums':'https://www.amazon.co.uk/s/ref=lp_118465031_nr_n_3?fst=as%3Aoff&rh=n%3A117332031%2Cn%3A%21117333031%2Cn%3A118464031%2Cn%3A118465031%2Cn%3A344257031&bbn=118465031&ie=UTF8&qid=1581613044&rnid=118465031'}, 'face':{'cleansers':'https://www.amazon.co.uk/s/ref=lp_118466031_nr_n_1?fst=as%3Aoff&rh=n%3A117332031%2Cn%3A%21117333031%2Cn%3A118464031%2Cn%3A118466031%2Cn%3A344265031&bbn=118466031&ie=UTF8&qid=1581613120&rnid=118466031', 'moisturizers':'https://www.amazon.co.uk/s/ref=lp_118466031_nr_n_3?fst=as%3Aoff&rh=n%3A117332031%2Cn%3A%21117333031%2Cn%3A118464031%2Cn%3A118466031%2Cn%3A2805291031&bbn=118466031&ie=UTF8&qid=1581613120&rnid=118466031', 'toners':'https://www.amazon.co.uk/s/ref=lp_118466031_nr_n_0?fst=as%3Aoff&rh=n%3A117332031%2Cn%3A%21117333031%2Cn%3A118464031%2Cn%3A118466031%2Cn%3A344267031&bbn=118466031&ie=UTF8&qid=1581613120&rnid=118466031', 'treatments':'https://www.amazon.co.uk/s?bbn=118466031&rh=n%3A117332031%2Cn%3A%21117333031%2Cn%3A118464031%2Cn%3A118466031%2Cn%3A18918424031&dc&fst=as%3Aoff&qid=1581613120&rnid=118466031&ref=lp_118466031_nr_n_7'}, 'lipcare':'https://www.amazon.co.uk/s/ref=lp_118464031_nr_n_4?fst=as%3Aoff&rh=n%3A117332031%2Cn%3A%21117333031%2Cn%3A118464031%2Cn%3A118467031&bbn=118464031&ie=UTF8&qid=1581613357&rnid=118464031'}}, 'food':{'tea':{'herbal':'https://www.amazon.co.uk/s?k=tea&i=grocery&rh=n%3A340834031%2Cn%3A358584031%2Cn%3A11711401%2Cn%3A406567031&dc&qid=1581613483&rnid=344155031&ref=sr_nr_n_1', 'green':'https://www.amazon.co.uk/s?k=tea&i=grocery&rh=n%3A340834031%2Cn%3A358584031%2Cn%3A11711401%2Cn%3A406566031&dc&qid=1581613483&rnid=344155031&ref=sr_nr_n_3', 'black':'https://www.amazon.co.uk/s?k=tea&i=grocery&rh=n%3A340834031%2Cn%3A358584031%2Cn%3A11711401%2Cn%3A406564031&dc&qid=1581613483&rnid=344155031&ref=sr_nr_n_2'}, 'coffee':'https://www.amazon.co.uk/s?k=coffee&rh=n%3A340834031%2Cn%3A11711391&dc&qid=1581613715&rnid=1642204031&ref=sr_nr_n_2', 'dried_fruits':{'mixed':'https://www.amazon.co.uk/s?k=dried+fruits&rh=n%3A340834031%2Cn%3A9733163031&dc&qid=1581613770&rnid=1642204031&ref=sr_nr_n_2'}, 'nuts':{'mixed':'https://www.amazon.co.uk/s?k=mixed&rh=n%3A359964031&ref=nb_sb_noss', 'peanuts':'https://www.amazon.co.uk/s?k=peanuts&rh=n%3A359964031&ref=nb_sb_noss', 'cashews':'https://www.amazon.co.uk/s?k=cashew&rh=n%3A359964031&ref=nb_sb_noss'}}, 'supplements':{'sports':{'pre_workout':'https://www.amazon.co.uk/b/?node=5977685031&ref_=Oct_s9_apbd_odnav_hd_bw_b35Hc3L_1&pf_rd_r=C5MZHH5TH5F868B6FQWD&pf_rd_p=8086b6c9-ae16-5c3c-a879-030afa4ee08f&pf_rd_s=merchandised-search-11&pf_rd_t=BROWSE&pf_rd_i=2826478031', 'protein':'https://www.amazon.co.uk/b/?node=2826510031&ref_=Oct_s9_apbd_odnav_hd_bw_b35Hc3L_0&pf_rd_r=C5MZHH5TH5F868B6FQWD&pf_rd_p=8086b6c9-ae16-5c3c-a879-030afa4ee08f&pf_rd_s=merchandised-search-11&pf_rd_t=BROWSE&pf_rd_i=2826478031', 'fat_burner':'https://www.amazon.co.uk/b/?node=5977737031&ref_=Oct_s9_apbd_odnav_hd_bw_b35Hc3L_2&pf_rd_r=C5MZHH5TH5F868B6FQWD&pf_rd_p=8086b6c9-ae16-5c3c-a879-030afa4ee08f&pf_rd_s=merchandised-search-11&pf_rd_t=BROWSE&pf_rd_i=2826478031'}, 'vitamins_dietary':{'supplements':'https://www.amazon.co.uk/b/?_encoding=UTF8&node=2826534031&bbn=65801031&ref_=Oct_s9_apbd_odnav_hd_bw_b35Hdc7_2&pf_rd_r=AY01DQVCB4SE7VVE7MTK&pf_rd_p=1ecdbf02-af23-502a-b7ab-9916ddd6690c&pf_rd_s=merchandised-search-11&pf_rd_t=BROWSE&pf_rd_i=2826484031', 'multivitamins':'https://www.amazon.co.uk/b/?_encoding=UTF8&node=2826506031&bbn=65801031&ref_=Oct_s9_apbd_odnav_hd_bw_b35Hdc7_1&pf_rd_r=AY01DQVCB4SE7VVE7MTK&pf_rd_p=1ecdbf02-af23-502a-b7ab-9916ddd6690c&pf_rd_s=merchandised-search-11&pf_rd_t=BROWSE&pf_rd_i=2826484031'}}, 'wellness':{'massage_oil':'https://www.amazon.co.uk/b/?node=3360479031&ref_=Oct_s9_apbd_odnav_hd_bw_b50nmJ_4&pf_rd_r=GYVYF52HT2004EDTY67W&pf_rd_p=3f8e4361-c00b-588b-a07d-ff259bf98bbc&pf_rd_s=merchandised-search-11&pf_rd_t=BROWSE&pf_rd_i=74073031', 'ayurveda':'https://www.amazon.co.uk/s?k=ayurveda&rh=n%3A65801031%2Cn%3A2826449031&dc&qid=1581686978&rnid=1642204031&ref=sr_nr_n_22'}, 'personal_accessories':{'bags':{'women':{'clutches':'https://www.amazon.co.uk/b/?node=1769563031&ref_=Oct_s9_apbd_odnav_hd_bw_b1vkt8h_3&pf_rd_r=VC8RX89R4V4JJ5TEBANF&pf_rd_p=cefca17f-8dac-5c80-848f-812aff1bfdd7&pf_rd_s=merchandised-search-11&pf_rd_t=BROWSE&pf_rd_i=1769559031', 'crossbody':'https://www.amazon.co.uk/b/?node=1769564031&ref_=Oct_s9_apbd_odnav_hd_bw_b1vkt8h_1&pf_rd_r=VC8RX89R4V4JJ5TEBANF&pf_rd_p=cefca17f-8dac-5c80-848f-812aff1bfdd7&pf_rd_s=merchandised-search-11&pf_rd_t=BROWSE&pf_rd_i=1769559031', 'fashion':'https://www.amazon.co.uk/b/?node=1769560031&ref_=Oct_s9_apbd_odnav_hd_bw_b1vkt8h_5&pf_rd_r=VC8RX89R4V4JJ5TEBANF&pf_rd_p=cefca17f-8dac-5c80-848f-812aff1bfdd7&pf_rd_s=merchandised-search-11&pf_rd_t=BROWSE&pf_rd_i=1769559031', 'hobo':'https://www.amazon.co.uk/b/?node=1769565031&ref_=Oct_s9_apbd_odnav_hd_bw_b1vkt8h_4&pf_rd_r=VC8RX89R4V4JJ5TEBANF&pf_rd_p=cefca17f-8dac-5c80-848f-812aff1bfdd7&pf_rd_s=merchandised-search-11&pf_rd_t=BROWSE&pf_rd_i=1769559031'}}, 'jewelry':{'anklets':'https://www.amazon.co.uk/s/ref=lp_10382835031_nr_n_0?fst=as%3Aoff&rh=n%3A193716031%2Cn%3A%21193717031%2Cn%3A10382835031%2Cn%3A10382860031&bbn=10382835031&ie=UTF8&qid=1581687575&rnid=10382835031', 'bracelets':'https://www.amazon.co.uk/s/ref=lp_10382835031_nr_n_1?fst=as%3Aoff&rh=n%3A193716031%2Cn%3A%21193717031%2Cn%3A10382835031%2Cn%3A10382861031&bbn=10382835031&ie=UTF8&qid=1581687575&rnid=10382835031', 'earrings':'https://www.amazon.co.uk/s/ref=lp_10382835031_nr_n_4?fst=as%3Aoff&rh=n%3A193716031%2Cn%3A%21193717031%2Cn%3A10382835031%2Cn%3A10382865031&bbn=10382835031&ie=UTF8&qid=1581687575&rnid=10382835031', 'necklaces':'https://www.amazon.co.uk/s/ref=lp_10382835031_nr_n_7?fst=as%3Aoff&rh=n%3A193716031%2Cn%3A%21193717031%2Cn%3A10382835031%2Cn%3A10382868031&bbn=10382835031&ie=UTF8&qid=1581687575&rnid=10382835031', 'rings':'https://www.amazon.co.uk/s/ref=lp_10382835031_nr_n_10?fst=as%3Aoff&rh=n%3A193716031%2Cn%3A%21193717031%2Cn%3A10382835031%2Cn%3A10382871031&bbn=10382835031&ie=UTF8&qid=1581687575&rnid=10382835031'}, 'artisan_fabrics':'https://www.amazon.co.uk/s?k=fabric&rh=n%3A11052681%2Cn%3A3063518031&dc&qid=1581687726&rnid=1642204031&ref=a9_sc_1'}} amazon_india = {'health_and_beauty':{'hair_products':{'shampoo':'https://www.amazon.in/b/ref=s9_acss_bw_cg_btyH1_2a1_w?ie=UTF8&node=1374334031&pf_rd_m=A1K21FY43GMZF8&pf_rd_s=merchandised-search-5&pf_rd_r=JHDJ4QHM0APVS05NGF4G&pf_rd_t=101&pf_rd_p=41b9c06b-1514-47de-a1c6-f4f13fb55ffe&pf_rd_i=1374305031', 'conditioner':'https://www.amazon.in/b/ref=s9_acss_bw_cg_btyH1_2b1_w?ie=UTF8&node=1374306031&pf_rd_m=A1K21FY43GMZF8&pf_rd_s=merchandised-search-5&pf_rd_r=CBABMCW6C69JRBGZNWWP&pf_rd_t=101&pf_rd_p=41b9c06b-1514-47de-a1c6-f4f13fb55ffe&pf_rd_i=1374305031', 'treatment_oil':''}, 'skin_care':[], 'wellness_product':[]}, 'food':{'tea':[], 'coffee':[], 'dried_fruits':[], 'nuts':[], 'supplements':[]}, 'personal_accessories':{'bags':[], 'jewelry':[], 'artisan_fabrics':[]}} amazon_aus = {'health_and_beauty':{'hair_products':{'shampoo':'https://www.amazon.com.au/b/?_encoding=UTF8&node=5150253051&bbn=4851917051&ref_=Oct_s9_apbd_odnav_hd_bw_b5cXATz&pf_rd_r=6SEM7GFDN7CQ2W4KXM9M&pf_rd_p=9dd4b462-1094-5e36-890d-bb1b694c8b53&pf_rd_s=merchandised-search-12&pf_rd_t=BROWSE&pf_rd_i=5150070051', 'conditioner':'https://www.amazon.com.au/b/?_encoding=UTF8&node=5150226051&bbn=4851917051&ref_=Oct_s9_apbd_odnav_hd_bw_b5cXATz&pf_rd_r=6SEM7GFDN7CQ2W4KXM9M&pf_rd_p=9dd4b462-1094-5e36-890d-bb1b694c8b53&pf_rd_s=merchandised-search-12&pf_rd_t=BROWSE&pf_rd_i=5150070051'}, 'skin_care':[], 'wellness_product':[]}, 'food':{'tea':[], 'coffee':[], 'dried_fruits':[], 'nuts':[], 'supplements':[]}, 'personal_accessories':{'bags':[], 'jewelry':[], 'artisan_fabrics':[]}} amazon = {'USA':amazon_usa, 'UK':amazon_uk, 'India':amazon_india, 'Australia':amazon_aus} def hover(browser, xpath): ''' This function makes an automated mouse hovering in the selenium webdriver element based on its xpath. PARAMETER --------- browser: Selenium based webbrowser xpath: str xpath of the element in the webpage where hover operation has to be performed. ''' element_to_hover_over = browser.find_element_by_xpath(xpath) hover = ActionChains(browser).move_to_element(element_to_hover_over) hover.perform() element_to_hover_over.click() def browser(link): '''This funtion opens a selenium based chromebrowser specifically tuned to work for amazon product(singular item) webpages. Few functionality includes translation of webpage, clicking the initial popups, and hovering over product imagesso that the images can be scrape PARAMETER --------- link: str Amazon Product item link RETURN ------ driver: Selenium web browser with operated functions ''' options = Options() prefs = { "translate_whitelists": {"ja":"en","de":'en'}, "translate":{"enabled":"true"} } # helium = r'C:\Users\Dell-pc\AppData\Local\Google\Chrome\User Data\Default\Extensions\njmehopjdpcckochcggncklnlmikcbnb\4.2.12_0' # options.add_argument(helium) options.add_experimental_option("prefs", prefs) options.headless = True driver = webdriver.Chrome(chrome_options=options) driver.get(link) try: driver.find_element_by_xpath('//*[@id="nav-main"]/div[1]/div[2]/div/div[3]/span[1]/span/input').click() except: pass try: hover(driver,'//*[@id="altImages"]/ul/li[3]') except: pass try: driver.find_element_by_xpath('//*[@id="a-popover-6"]/div/header/button/i').click() except: pass try: hover(driver,'//*[@id="altImages"]/ul/li[4]') except: pass try: driver.find_element_by_xpath('//*[@id="a-popover-6"]/div/header/button/i').click() except: pass try: hover(driver,'//*[@id="altImages"]/ul/li[5]') except: pass try: driver.find_element_by_xpath('//*[@id="a-popover-6"]/div/header/button/i').click() except: pass try: hover(driver,'//*[@id="altImages"]/ul/li[6]') except: pass try: driver.find_element_by_xpath('//*[@id="a-popover-6"]/div/header/button/i').click() except: pass try: hover(driver,'//*[@id="altImages"]/ul/li[7]') except: pass try: driver.find_element_by_xpath('//*[@id="a-popover-6"]/div/header/button/i').click() except: pass try: hover(driver,'//*[@id="altImages"]/ul/li[8]') except: pass try: driver.find_element_by_xpath('//*[@id="a-popover-6"]/div/header/button/i').click() except: pass try: hover(driver,'//*[@id="altImages"]/ul/li[9]') except: pass try: driver.find_element_by_xpath('//*[@id="a-popover-6"]/div/header/button/i').click() except: pass return driver def scroll_temp(driver): ''' Automated Scroller in Selenium Webbrowser PARAMETER --------- driver: Selenium Webbrowser ''' pre_scroll_height = driver.execute_script('return document.body.scrollHeight;') run_time, max_run_time = 0, 2 while True: iteration_start = time.time() # Scroll webpage, the 100 allows for a more 'aggressive' scroll driver.execute_script('window.scrollTo(0,0.6*document.body.scrollHeight);') post_scroll_height = driver.execute_script('return document.body.scrollHeight;') scrolled = post_scroll_height != pre_scroll_height timed_out = run_time >= max_run_time if scrolled: run_time = 0 pre_scroll_height = post_scroll_height elif not scrolled and not timed_out: run_time += time.time() - iteration_start elif not scrolled and timed_out: break # def scroll(driver): # scroll_temp(driver) # from selenium.common.exceptions import NoSuchElementException # try: # element = driver.find_element_by_xpath('//*[@id="reviewsMedley"]/div/div[1]') # except NoSuchElementException: # try: # element = driver.find_element_by_xpath('//*[@id="reviewsMedley"]') # except NoSuchElementException: # element = driver.find_element_by_xpath('//*[@id="detail-bullets_feature_div"]') # actions = ActionChains(driver) # actions.move_to_element(element).perform() def scroll(driver): scroll_temp(driver) from selenium.common.exceptions import NoSuchElementException try: try: element = driver.find_element_by_xpath('//*[@id="reviewsMedley"]/div/div[1]') except NoSuchElementException: try: element = driver.find_element_by_xpath('//*[@id="reviewsMedley"]') except NoSuchElementException: element = driver.find_element_by_xpath('//*[@id="detail-bullets_feature_div"]') actions = ActionChains(driver) actions.move_to_element(element).perform() except NoSuchElementException: pass def browser_link(product_link,country): '''Returns all the web link of the products based on the first page of the product category. It captures product link of all the pages for that specific product. PARAMETER --------- link: str The initial web link of the product page. This is generally the first page of the all the items for that specfic product RETURN ------ links: list It is a list of strings which contains all the links of the items for the specific product ''' driver = browser(product_link) soup = BeautifulSoup(driver.page_source, 'lxml') try: pages_soup = soup.findAll("ul",{"class":"a-pagination"}) pages = int(pages_soup[0].findAll("li",{'class':'a-disabled'})[1].text) except: pass try: pages_soup = soup.findAll("div",{"id":"pagn"}) pages = int(pages_soup[0].findAll("span",{'class':'pagnDisabled'})[0].text) except: try: pages_soup = soup.findAll("div",{"id":"pagn"}) pages = int(pages_soup[0].findAll("span",{'class':'pagnDisabled'})[1].text) except: pass print(pages) links = [] for page in range(1,pages+1): print(page) link_page = product_link + '&page=' + str(page) driver_temp = browser(link_page) time.sleep(2) soup_temp = BeautifulSoup(driver_temp.page_source, 'lxml') try: search = soup_temp.findAll("div",{"id":"mainResults"}) temp_search = search[1].findAll("a",{'class':'a-link-normal s-access-detail-page s-color-twister-title-link a-text-normal'}) for i in range(len(temp_search)): if country == 'Australia': link = temp_search[i].get('href') else: link = countries_link[country] + temp_search[i].get('href') links.append(link) print(len(links)) except: try: search = soup_temp.findAll("div",{"class":"s-result-list s-search-results sg-row"}) temp_search = search[1].findAll("h2") if len(temp_search) < 2: for i in range(len(search[0].findAll("h2"))): temp = search[0].findAll("h2")[i] for j in range(len(temp.findAll('a'))): link = countries_link[country]+temp.findAll('a')[j].get('href') links.append(link) print(len(links)) else: for i in range(len(search[1].findAll("h2"))): temp = search[1].findAll("h2")[i] for j in range(len(temp.findAll('a'))): link = countries_link[country]+temp.findAll('a')[j].get('href') links.append(link) print(len(links)) except: pass try: search = soup_temp.findAll("div",{"id":"mainResults"}) temp_search = search[0].findAll("a",{'class':'a-link-normal s-access-detail-page s-color-twister-title-link a-text-normal'}) for i in range(len(temp_search)): if country == 'Australia': link = temp_search[i].get('href') else: link = countries_link[country] + temp_search[i].get('href') links.append(link) print(len(links)) except: try: search = soup_temp.findAll("div",{"class":"s-result-list s-search-results sg-row"}) temp_search = search[1].findAll("h2") if len(temp_search) < 2: for i in range(len(search[0].findAll("h2"))): temp = search[0].findAll("h2")[i] for j in range(len(temp.findAll('a'))): link = countries_link[country]+temp.findAll('a')[j].get('href') links.append(link) print(len(links)) else: for i in range(len(search[1].findAll("h2"))): temp = search[1].findAll("h2")[i] for j in range(len(temp.findAll('a'))): link = countries_link[country]+temp.findAll('a')[j].get('href') links.append(link) print(len(links)) except: print('Not Scrapable') links = [] return links def indexes(amazon_links,link_list): amazon_dict = amazon_links if len(link_list) == 5: return amazon_dict[link_list[0]][link_list[1]][link_list[2]][link_list[3]][link_list[4]] elif len(link_list) == 4: return amazon_dict[link_list[0]][link_list[1]][link_list[2]][link_list[3]] elif len(link_list) == 3: return amazon_dict[link_list[0]][link_list[1]][link_list[2]] elif len(link_list) == 2: return amazon_dict[link_list[0]][link_list[1]] elif len(link_list) == 1: return amazon_dict[link_list[0]] else: return print("Invalid Product") def products_links(country, **kwargs): amazon_links = amazon[country] directory_temp = [] for key, value in kwargs.items(): directory_temp.append(value) directory = '/'.join(directory_temp) print(directory) product_link = indexes(amazon_links,directory_temp) main_links = browser_link(product_link,country=country) return main_links,directory ###Output _____no_output_____ ###Markdown Product Scraper Function ###Code def delete_images(filename): import os file_path = '/home/jishu/Amazon_AU/' os.remove(file_path + filename) def upload_s3(filename,key): key_id = 'AKIAWR6YW7N5ZKW35OJI' access_key = 'h/xrcI9A2SRU0ds+zts4EClKAqbzU+/iXdiDcgzm' bucket_name = 'amazon-data-ecfullfill' s3 = boto3.client('s3',aws_access_key_id=key_id, aws_secret_access_key=access_key) try: s3.upload_file(filename,bucket_name,key) except FileNotFoundError: pass def product_info(link,directory,country): '''Get all the product information of an Amazon Product''' #Opening Selenium Webdrive with Amazon product driver = browser(link) time.sleep(4) scroll(driver) time.sleep(2) #Initializing BeautifulSoup operation in selenium browser selenium_soup = BeautifulSoup(driver.page_source, 'lxml') time.sleep(2) #Product Title try: product_title = driver.find_element_by_xpath('//*[@id="productTitle"]').text except: product_title = 'Not Scrapable' print(product_title) #Ratings - Star try: rating_star = float(selenium_soup.findAll('span',{'class':'a-icon-alt'})[0].text.split()[0]) except: rating_star = 'Not Scrapable' print(rating_star) #Rating - Overall try: overall_rating = int(selenium_soup.findAll('span',{'id':'acrCustomerReviewText'})[0].text.split()[0].replace(',','')) except: overall_rating = 'Not Scrapable' print(overall_rating) #Company try: company = selenium_soup.findAll('a',{'id':'bylineInfo'})[0].text except: company = 'Not Scrapable' print(country) #Price try: if country=='UAE': denomination = selenium_soup.findAll('span',{'id':'priceblock_ourprice'})[0].text[:3] price = float(selenium_soup.findAll('span',{'id':'priceblock_ourprice'})[0].text[3:]) else: denomination = selenium_soup.findAll('span',{'id':'priceblock_ourprice'})[0].text[0] price = float(selenium_soup.findAll('span',{'id':'priceblock_ourprice'})[0].text[1:]) except: try: if country=='UAE': try: price = float(selenium_soup.findAll('span',{'id':'priceblock_ourprice'})[0].text[3:].replace(',','')) except: price = float(selenium_soup.findAll('span',{'id':'priceblock_dealprice'})[0].text[3:].replace(',','')) else: try: price = float(selenium_soup.findAll('span',{'id':'priceblock_ourprice'})[0].text[3:].replace(',','')) except: price = float(selenium_soup.findAll('span',{'id':'priceblock_dealprice'})[0].text[3:].replace(',','')) except: denomination = 'Not Scrapable' price = 'Not Scrapable' print(denomination,price) #Product Highlights try: temp_ph = selenium_soup.findAll('ul',{'class':'a-unordered-list a-vertical a-spacing-none'})[0].findAll('li') counter_ph = len(temp_ph) product_highlights = [] for i in range(counter_ph): raw = temp_ph[i].text clean = raw.strip() product_highlights.append(clean) product_highlights = '<CPT14>'.join(product_highlights) except: try: temp_ph = selenium_soup.findAll('div',{'id':'rich-product-description'})[0].findAll('p') counter_ph = len(temp_ph) product_highlights = [] for i in range(counter_ph): raw = temp_ph[i].text clean = raw.strip() product_highlights.append(clean) product_highlights = '<CPT14>'.join(product_highlights) except: product_highlights = 'Not Available' print(product_highlights) #Product Details/Dimensions: #USA try: temp_pd = selenium_soup.findAll('div',{'class':'content'})[0].findAll('ul')[0].findAll('li') counter_pd = len(temp_pd) for i in range(counter_pd): try: if re.findall('ASIN',temp_pd[i].text)[0]: try: asin = temp_pd[i].text.split(' ')[1] except: pass except IndexError: pass try: if re.findall('Product Dimensions|Product Dimension|Product dimensions',temp_pd[i].text)[0]: pd_temp = temp_pd[i].text.strip().split('\n')[2].strip().split(';') try: product_length = float(pd_temp[0].split('x')[0]) except IndexError: pass try: product_width = float(pd_temp[0].split('x')[1]) except IndexError: pass try: product_height = float(pd_temp[0].split('x')[2].split(' ')[1]) except IndexError: pass try: pd_unit = pd_temp[0].split('x')[2].split(' ')[2] except IndexError: pass try: product_weight = float(pd_temp[1].split(' ')[1]) except IndexError: pass try: weight_unit = pd_temp[1].split(' ')[2] except IndexError: pass except: pass try: if re.findall('Shipping Weight|Shipping weight|shipping weight',temp_pd[i].text)[0]: sweight_temp = temp_pd[i].text.split(':')[1].strip().split(' ') shipping_weight = float(sweight_temp[0]) shipping_weight_unit = sweight_temp[1] except IndexError: pass try: if re.findall('Amazon Best Sellers Rank|Amazon Bestsellers Rank',temp_pd[i].text)[0]: x = temp_pd[i].text.replace('\n','').split(' ') indexes = [] for j,k in enumerate(x): if re.findall('#',k): indexes.append(j) try: best_seller_cat = int(temp_pd[i].text.strip().replace('\n','').split(' ')[3].replace(',','')) best_seller_prod = int(x[indexes[0]].split('#')[1].split('in')[0]) except: try: best_seller_cat = x[indexes[0]].split('#')[1] except: pass try: best_seller_prod = x[indexes[1]].split('#')[1].split('in')[0] except: pass except IndexError: pass print(asin) except: pass try: temp_pd = selenium_soup.findAll('div',{'class':'content'})[1].findAll('ul')[0].findAll('li') counter_pd = len(temp_pd) for i in range(counter_pd): try: if re.findall('ASIN',temp_pd[i].text)[0]: try: asin = temp_pd[i].text.split(' ')[1] except: pass except IndexError: pass try: if re.findall('Product Dimensions|Product Dimension|Product dimensions',temp_pd[i].text)[0]: pd_temp = temp_pd[i].text.strip().split('\n')[2].strip().split(';') try: product_length = float(pd_temp[0].split('x')[0]) except IndexError: pass try: product_width = float(pd_temp[0].split('x')[1]) except IndexError: pass try: product_height = float(pd_temp[0].split('x')[2].split(' ')[1]) except IndexError: pass try: pd_unit = pd_temp[0].split('x')[2].split(' ')[2] except IndexError: pass try: product_weight = float(pd_temp[1].split(' ')[1]) except IndexError: pass try: weight_unit = pd_temp[1].split(' ')[2] except IndexError: pass except: pass try: if re.findall('Shipping Weight|Shipping weight|shipping weight',temp_pd[i].text)[0]: sweight_temp = temp_pd[i].text.split(':')[1].strip().split(' ') shipping_weight = float(sweight_temp[0]) shipping_weight_unit = sweight_temp[1] except IndexError: pass try: if re.findall('Amazon Best Sellers Rank|Amazon Bestsellers Rank',temp_pd[i].text)[0]: x = temp_pd[i].text.replace('\n','').split(' ') indexes = [] for j,k in enumerate(x): if re.findall('#',k): indexes.append(j) try: best_seller_cat = int(temp_pd[i].text.strip().replace('\n','').split(' ')[3].replace(',','')) best_seller_prod = int(x[indexes[0]].split('#')[1].split('in')[0]) except: try: best_seller_cat = x[indexes[0]].split('#')[1] except: pass try: best_seller_prod = x[indexes[1]].split('#')[1].split('in')[0] except: pass except IndexError: pass print(asin) except: pass #India try: temp_pd = selenium_soup.findAll('div',{'class':'content'})[0].findAll('ul')[0].findAll('li') counter_pd = len(temp_pd) for i in range(counter_pd): try: if re.findall('ASIN',temp_pd[i].text)[0]: asin = temp_pd[i].text.split(' ')[1] except: pass try: if re.findall('Product Dimensions|Product Dimension|Product dimensions',temp_pd[i].text)[0]: pd_temp = temp_pd[i].text.strip().split('\n')[2].strip().split(' ') try: product_length = float(pd_temp[0]) except: pass try: product_width = float(pd_temp[2]) except: pass try: product_height = float(pd_temp[4]) except: pass try: pd_unit = pd_temp[5] except: pass try: product_weight = float(pd_temp[1].split(' ')[1]) except: pass try: weight_unit = pd_temp[1].split(' ')[2] except: pass print(asin) except IndexError: pass try: if re.findall('Shipping Weight|Shipping weight|shipping weight',temp_pd[i].text)[0]: sweight_temp = temp_pd[i].text.split(':')[1].strip().split(' ') shipping_weight = float(sweight_temp[0]) shipping_weight_unit = sweight_temp[1] except IndexError: pass try: if re.findall('Item Weight|Product Weight|Item weight|Product weight|Boxed-product Weight',temp_pd[i].text)[0]: pd_weight_temp = temp_pd[i].text.replace('\n','').strip().split(' ')[1].strip() product_weight = float(pd_weight_temp.split(' ')[0]) weight_unit = pd_weight_temp.split(' ')[1] except IndexError: pass try: if re.findall('Amazon Best Sellers Rank|Amazon Bestsellers Rank',temp_pd[i].text)[0]: x = temp_pd[i].text.strip().replace('\n','').split(' ') indexes = [] for j,k in enumerate(x): if re.findall('#',k): indexes.append(j) try: best_seller_cat = int(temp_pd[i].text.strip().replace('\n','').split(' ')[3].replace(',','')) best_seller_prod = int(x[indexes[0]].split('#')[1].split('in')[0]) except: try: best_seller_cat = x[indexes[0]].split('#')[1] except: pass try: best_seller_prod = x[indexes[1]].split('#')[1].split('in')[0] except: pass except IndexError: pass print(asin) except: pass try: try: asin = list(selenium_soup.findAll('div',{'class':'pdTab'})[1].findAll('tr')[0].findAll('td')[1])[0] except: pass try: dimensions = list(selenium_soup.findAll('div',{'class':'pdTab'})[0].findAll('tr')[0].findAll('td')[1])[0] except: pass try: weight_temp = list(selenium_soup.findAll('div',{'class':'pdTab'})[1].findAll('tr')[1].findAll('td')[1])[0] except: pass try: best_seller_cat = float(list(selenium_soup.findAll('div',{'class':'pdTab'})[1].findAll('tr')[5].findAll('td')[1])[0].split('\n')[-1].split(' ')[0].replace(',','')) except: pass try: best_seller_prod = int(list(list(list(list(selenium_soup.findAll('div',{'class':'pdTab'})[1].findAll('tr')[5].findAll('td')[1])[5])[1])[1])[0].replace('#','')) except: pass try: product_length = float(dimensions.split('x')[0]) except: pass try: product_width = float(dimensions.split('x')[1]) except: pass try: product_height = float(dimensions.split('x')[2].split(' ')[1]) except: pass try: product_weight = weight_temp.split(' ')[0] except: pass try: weight_unit = weight_temp.split(' ')[1] except: pass try: pd_unit = dimensions.split(' ')[-1] except: pass print(asin) except: try: for j in [0,1]: temp_pd = selenium_soup.findAll('table',{'class':'a-keyvalue prodDetTable'})[j].findAll('tr') for i in range(len(temp_pd)): if re.findall('ASIN',temp_pd[i].text): asin = temp_pd[i].text.strip().split('\n')[3].strip() if re.findall('Item Model Number|Item model number',temp_pd[i].text): bait = temp_pd[i].text.strip().split('\n')[3].strip() if re.findall('Best Sellers Rank|Amazon Best Sellers Rank|Amazon Bestsellers Rank',temp_pd[i].text): x = temp_pd[i].text.strip().replace('\n','').split(' ') indexes = [] for j,k in enumerate(x): if re.findall('#',k): indexes.append(j) best_seller_cat = int(x[indexes[0]].split('#')[1]) best_seller_prod = int(x[indexes[1]].split('#')[1].split('in')[0]) if re.findall('Product Dimensions|Product dimension|Product Dimension',temp_pd[i].text): dimensions = temp_pd[i].text.strip().split('\n')[3].strip().split('x') product_length = float(dimensions[0].strip()) product_width = float(dimensions[1].strip()) product_height = float(dimensions[2].strip().split(' ')[0]) pd_unit = dimensions[2].strip().split(' ')[1] if re.findall('Item Weight|Product Weight|Item weight|Boxed-product Weight',temp_pd[i].text): weight_temp = temp_pd[i].text.strip().split('\n')[3].strip() product_weight = float(weight_temp.split(' ')[0]) weight_unit = weight_temp.split(' ')[1] if re.findall('Shipping Weight|Shipping weight|shipping weight',temp_pd[i].text): sweight_temp = temp_pd[i].text.replace('\n','').strip().split(' ')[1].lstrip().split(' ') shipping_weight = float(sweight_temp[0]) shipping_weight_unit = sweight_temp[1] print(asin,bait) except: try: temp_pd = selenium_soup.findAll('div',{'id':'prodDetails'})[0].findAll('tr') for i in range(len(temp_pd)): if re.findall('ASIN',temp_pd[i].text): asin = temp_pd[i].text.strip().split('\n')[3].strip() if re.findall('Best Sellers Rank|Amazon Best Sellers Rank|Amazon Bestsellers Rank',temp_pd[i].text): x = temp_pd[i].text.strip().replace('\n','').split(' ') indexes = [] for j,k in enumerate(x): if re.findall('#',k): indexes.append(j) best_seller_cat = int(x[indexes[0]].split('#')[1]) best_seller_prod = int(x[indexes[1]].split('#')[1].split('in')[0]) if re.findall('Product Dimensions|Product dimension|Product Dimension',temp_pd[i].text): dimensions = temp_pd[i].text.strip().split('\n')[3].strip().split('x') product_length = float(dimensions[0].strip()) product_width = float(dimensions[1].strip()) product_height = float(dimensions[2].strip().split(' ')[0]) pd_unit = dimensions[2].strip().split(' ')[1] if re.findall('Item Weight|Product Weight|Item weight|Boxed-product Weight',temp_pd[i].text): weight_temp = temp_pd[i].text.strip().split('\n')[3].strip() product_weight = float(weight_temp.split(' ')[0]) weight_unit = weight_temp.split(' ')[1] if re.findall('Shipping Weight|Shipping weight|shipping weight',temp_pd[i].text): sweight_temp = temp_pd[i].text.replace('\n','').strip().split(' ')[1].lstrip().split(' ') shipping_weight = float(sweight_temp[0]) shipping_weight_unit = sweight_temp[1] except: try: temp_pd = selenium_soup.findAll('div',{'id':'detail_bullets_id'})[0].findAll('tr')[0].findAll('li') for i in range(len(temp_pd)): if re.findall('ASIN',temp_pd[i].text): asin = temp_pd[i].text.strip().split(':')[1].strip() if re.findall('Best Sellers Rank|Amazon Best Sellers Rank|Amazon Bestsellers Rank',temp_pd[i].text): x = temp_pd[i].text.strip().replace('\n','').split(' ') indexes = [] for j,k in enumerate(x): if re.findall('#',k): indexes.append(j) best_seller_cat = int(x[indexes[0]].split('#')[1]) best_seller_prod = int(x[indexes[1]].split('#')[1].split('in')[0]) if re.findall('Product Dimensions|Product dimension|Product Dimension',temp_pd[i].text): dimensions = temp_pd[i].text.strip().split('\n')[2].strip().split('x') product_length = float(dimensions[0].strip()) product_width = float(dimensions[1].strip()) product_height = float(dimensions[2].strip().split(' ')[0]) pd_unit = dimensions[2].strip().split(' ')[1] if re.findall('Item Weight|Product Weight|Item weight|Boxed-product Weight',temp_pd[i].text): weight_temp = temp_pd[i].text.strip().split('\n')[2].strip() product_weight = float(weight_temp.split(' ')[0]) weight_unit = weight_temp.split(' ')[1] if re.findall('Shipping Weight|Shipping weight|shipping weight',temp_pd[i].text): sweight_temp = temp_pd[i].text.replace('\n','').strip().split(' ')[1].lstrip().split(' ') shipping_weight = float(sweight_temp[0]) shipping_weight_unit = sweight_temp[1] except: pass try: print(asin) except NameError: asin = 'Not Scrapable' try: print(best_seller_cat) except NameError: best_seller_cat = 'Not Scrapable' try: print(best_seller_prod) except NameError: best_seller_prod = 'Not Scrapable' try: print(product_length) except NameError: product_length = 'Not Scrapable' try: print(product_width) except NameError: product_width = 'Not Scrapable' try: print(product_height) except NameError: product_height = 'Not Scrapable' try: print(product_weight) except NameError: product_weight = 'Not Scrapable' try: print(weight_unit) except NameError: weight_unit = 'Not Scrapable' try: print(pd_unit) except NameError: pd_unit = 'Not Scrapable' try: print(shipping_weight_unit) except NameError: shipping_weight_unit = 'Not Scrapable' try: print(shipping_weight) except NameError: shipping_weight = 'Not Scrapable' print(product_length,product_width,product_height,product_weight,asin,pd_unit, best_seller_cat,best_seller_prod,weight_unit,shipping_weight,shipping_weight_unit) #Customer Review Ratings - Overall time.sleep(0.5) try: temp_crr = selenium_soup.findAll('table',{'id':'histogramTable'})[1].findAll('a') crr_main = {} crr_temp = [] counter_crr = len(temp_crr) for i in range(counter_crr): crr_temp.append(temp_crr[i]['title']) crr_temp = list(set(crr_temp)) for j in range(len(crr_temp)): crr_temp[j] = crr_temp[j].split(' ') stopwords = ['stars','represent','of','rating','reviews','have'] for word in list(crr_temp[j]): if word in stopwords: crr_temp[j].remove(word) print(crr_temp[j]) try: if re.findall(r'%',crr_temp[j][1])[0]: crr_main.update({int(crr_temp[j][0]): int(crr_temp[j][1].replace('%',''))}) except: crr_main.update({int(crr_temp[j][1]): int(crr_temp[j][0].replace('%',''))}) except: try: temp_crr = selenium_soup.findAll('table',{'id':'histogramTable'})[1].findAll('span',{'class':'a-offscreen'}) crr_main = {} counter_crr = len(temp_crr) star = counter_crr for i in range(counter_crr): crr_main.update({star:int(temp_crr[i].text.strip().split('/n')[0].split(' ')[0].replace('%',''))}) star -= 1 except: pass try: crr_5 = crr_main[5] except: crr_5 = 0 try: crr_4 = crr_main[4] except: crr_4 = 0 try: crr_3 = crr_main[3] except: crr_3 = 0 try: crr_2 = crr_main[2] except: crr_2 = 0 try: crr_1 = crr_main[1] except: crr_1 = 0 #Customer Review Ratings - By Feature time.sleep(1) try: driver.find_element_by_xpath('//*[@id="cr-summarization-attributes-list"]/div[4]/a/span').click() temp_fr = driver.find_element_by_xpath('//*[@id="cr-summarization-attributes-list"]').text temp_fr = temp_fr.split('\n') crr_feature_title = [] crr_feature_rating = [] for i in [0,2,4]: crr_feature_title.append(temp_fr[i]) for j in [1,3,5]: crr_feature_rating.append(temp_fr[j]) crr_feature = dict(zip(crr_feature_title,crr_feature_rating)) except: try: temp_fr = driver.find_element_by_xpath('//*[@id="cr-summarization-attributes-list"]').text temp_fr = temp_fr.split('\n') crr_feature_title = [] crr_feature_rating = [] for i in [0,2,4]: crr_feature_title.append(temp_fr[i]) for j in [1,3,5]: crr_feature_rating.append(temp_fr[j]) crr_feature = dict(zip(crr_feature_title,crr_feature_rating)) except: crr_feature = 'Not Defined' try: crr_feature_key = list(crr_feature.keys()) except: pass try: crr_fr_1 = crr_feature[crr_feature_key[0]] except: crr_fr_1 = 0 try: crr_fr_2 = crr_feature[crr_feature_key[1]] except: crr_fr_2 = 0 try: crr_fr_3 = crr_feature[crr_feature_key[2]] except: crr_fr_3 = 0 #Tags: time.sleep(1) try: temp_tags = selenium_soup.findAll('div',{'class':'cr-lighthouse-terms'})[0] counter_tags = len(temp_tags) print('Counter Tags:',counter_tags) tags = [] for i in range(counter_tags): tags.append(temp_tags.findAll('span')[i].text.strip()) print(tags[i]) except: tags = ['None'] try: for feature in crr_feature_key: tags.append(feature) except: pass tags = list(set(tags)) tags = '<CPT14>'.join(tags) print(tags) #Images images = [] for i in [0,3,4,5,6,7,8,9]: try: images.append(selenium_soup.findAll('div',{'class':'imgTagWrapper'})[i].find('img')['src']) except: pass import urllib.request for i in range(len(images)): if asin =='Not Scrapable': product_image = "{}_{}.jpg".format(product_title,i) product_image = product_image.replace('/','') urllib.request.urlretrieve(images[i],product_image) upload_s3("{}_{}.jpg".format(product_title,i), directory+"/images/" + product_image) delete_images(product_image) else: product_image = "{}_{}.jpg".format(asin,i) product_image = product_image.replace('/','') urllib.request.urlretrieve(images[i],product_image) upload_s3("{}_{}.jpg".format(asin,i), directory+"/images/" + product_image) delete_images(product_image) return [product_title,rating_star,overall_rating,company,price, product_highlights,product_length,product_width,product_height, product_weight,asin,pd_unit,best_seller_cat,best_seller_prod, weight_unit,shipping_weight,shipping_weight_unit,crr_5,crr_4, crr_3,crr_2,crr_1,crr_fr_1,crr_fr_2,crr_fr_3,tags,directory] ###Output _____no_output_____ ###Markdown Data Wrangling ###Code def database(product_data,**kwargs): try: try: link = kwargs['link'] except KeyError: print('Error in Link') try: country = kwargs['country'] except KeyError: print("Enter Country Name") try: cat1 = kwargs['cat1'] except KeyError: pass try: cat2 = kwargs['cat2'] except KeyError: pass try: cat3 = kwargs['cat3'] except KeyError: pass try: cat4 = kwargs['cat4'] except KeyError: pass try: product = kwargs['product'] except KeyError: print("Enter Product Name") metadata = [link,country,cat1,cat2,cat3,cat4,product] except NameError: try: cat4 = None metadata = [link,country,cat1,cat2,cat3,cat4,product] except NameError: try: cat4 = None cat3 = None metadata = [link,country,cat1,cat2,cat3,cat4,product] except NameError: cat4 = None cat3 = None cat2 = None metadata = [link,country,cat1,cat2,cat3,cat4,product] conn = sqlite3.connect('{}.db'.format(product)) headers = ['link','country','cat1','cat2','cat3','cat4','product','product_title', 'rating_star','overall_rating','company','price', 'product_highlights','product_length','product_width','product_height', 'product_weight','asin','pd_unit','best_seller_cat','best_seller_prod', 'weight_unit','shipping_weight','shipping_weight_unit','crr_5','crr_4', 'crr_3','crr_2','crr_1','crr_fr_1','crr_fr_2','crr_fr_3','tags','images_link'] product_data.append(metadata) product_data = product_data[-1] + product_data[:len(product_data)-1] temp = pd.DataFrame(data= [product_data],columns=headers) temp.to_sql('Product',conn,if_exists='append') upload_s3(product+'.db',directory+'/'+product+'.db') conn.close() def checkpoint(link_list,directory,product): BUCKET_NAME = 'amazon-data-ecfullfill' key_id = 'AKIAWR6YW7N5ZKW35OJI' access_key = 'h/xrcI9A2SRU0ds+zts4EClKAqbzU+/iXdiDcgzm' KEY = '{}/{}.db'.format(directory,product) s3 = boto3.resource('s3',aws_access_key_id=key_id, aws_secret_access_key=access_key) try: s3.Bucket(BUCKET_NAME).download_file(KEY, 'test.db') except botocore.exceptions.ClientError as e: if e.response['Error']['Code'] == "404": print("The object does not exist.") else: raise conn = sqlite3.connect('test.db') try: df = pd.read_sql('''SELECT * FROM Product''', conn) product_link = df['link'].unique() new_list = [] for i in link_list: if i in product_link: pass else: new_list.append(i) except: new_list = link_list return new_list ###Output _____no_output_____ ###Markdown Execution ###Code #Initializing the product per Jupyter Notebook country = 'Australia' cat1 = 'health_and_beauty' cat2='hair_products' # cat3='None' # cat4 = 'None' product='shampoo' # links,directory = products_links(country=country,category=cat1,cat2=cat2,product=product) # test_1 = {'links':links,'directory':directory} # import pickle # with open('au_hair_prod_shampoo.pkl', 'wb') as f: # pickle.dump(test_1, f) with open('au_hair_prod_shampoo.pkl', 'rb') as f: file = pickle.load(f) links = file['links'] directory = 'Amazon_AU/health_and_beauty/hair_products/shampoo' #replace links with new_links if interruption for link in new_links: data = product_info(link=link,directory=directory,country=country) conn = sqlite3.connect('{}.db'.format(product)) database(product_data=data,link=link,country=country, cat1=cat1,cat2=cat2,product=product) # Run if there is an interruption new_links = checkpoint(links,directory,product) new_links[1:] len(new_links) len(links) ###Output _____no_output_____ ###Markdown Testing the datasets in S3 ###Code BUCKET_NAME = 'amazon-data-ecfullfill' # replace with your bucket name key_id = 'AKIAWR6YW7N5ZKW35OJI' access_key = 'h/xrcI9A2SRU0ds+zts4EClKAqbzU+/iXdiDcgzm' KEY = 'Amazon_USA/health_and_beauty/hair_products/shampoo/shampoo.db' # replace with your object key s3 = boto3.resource('s3',aws_access_key_id=key_id, aws_secret_access_key=access_key) try: s3.Bucket(BUCKET_NAME).download_file(KEY, 'test.db') except botocore.exceptions.ClientError as e: if e.response['Error']['Code'] == "404": print("The object does not exist.") else: raise conn = sqlite3.connect('shampoo.db') df_USA = pd.read_sql("SELECT * FROM Product",conn) df_USA.iloc[:,:15] df_USA.iloc[:,15:] len(link_db) # def upload_s3(filename,key): # key_id = 'AKIAWR6YW7N5ZKW35OJI' # access_key = 'h/xrcI9A2SRU0ds+zts4EClKAqbzU+/iXdiDcgzm' # bucket_name = 'amazon-data-ecfullfill' # s3 = boto3.client('s3',aws_access_key_id=key_id, # aws_secret_access_key=access_key) # # s3.put_object(Bucket=bucket_name, Key='Amazon/health_and_beauty/hair_product/shampoo') # s3.upload_file(filename,bucket_name,key) ###Output _____no_output_____
.ipynb_checkpoints/TestProjectJupyter-checkpoint.ipynb
###Markdown UCDPA Gavin HoranHi Tom, this was meant to be a Jupyter notebook where I would access the basics of my project. I intended to use it regularly, but then I did not. I used Pycharm instead. ###Code import datetime # import nicedate import nicedate def nicedate(year_week): return datetime.datetime.strptime('{0}-1'.format(year_week), "%Y-W%W-%w") from nicedate import nicedate import matplotlib.pyplot as plt #import datetime import seaborn as sns import matplotlib.dates as mdates import pandas as pd import matplotlib.cbook as cbook # # def Nice_Date(df, dateformat): # for row in df: # row['year_week_formatted'] = datetime.datetime.strptime(d + '-1', "%Y-W%W-%w") # result = a + b # return str(result) # # # print(df_country['year_week'].head()) # # df_country['Test_Weeks'] = df.apply(lambda x: my_function(x['value_1'], x['value_2']), axis=1) # function to returns date # def niceDate(year_week): # return datetime.datetime.strptime(year_week + '-1', "%Y-W%W-%w") d = "2013-W05" a = nicedate(d) # r = datetime.datetime.strptime(d + '-1', "%Y-W%W-%w") print(a) print(r) # # def format_date(df, dateformat): # """this will format the column containing dates'""" # for row in df: # row['Date'] = datetime.datetime.strptime(row['Date'], '%Y%m%d') # csv.DictWriter(str(df)+'_converted.csv', data) # return ###Output _____no_output_____
notebooks/01_book.ipynb
###Markdown Deep Reinforcement Learning in Action Ch 1. Introduction Dynamic Programming ###Code def fib(n): if n <= 1: return n else: return fib(n - 1) + fib(n - 2) fib(7) mem = {0:0, 1:1} def fib_mem(n): if n not in mem: mem[n] = fib(n - 1) + fib(n - 2) return mem[n] fib_mem(7) ###Output _____no_output_____ ###Markdown Time Performance ###Code %timeit fib(35) # We get 5.54 seconds to run with n=35 %timeit fib_mem(35) # We get 412 ns to run with n=35 ###Output 279 ns ± 258 ns per loop (mean ± std. dev. of 7 runs, 1 loop each)
community/aqua/chemistry/nah_uccsd.ipynb
###Markdown _*NaH dissociation curve using VQE with UCCSD*_This notebook demonstrates using the Qiskit Aqua Chemistry to plot graphs of the ground state energy of the Sodium Hydride (NaH) molecule over a range of inter-atomic distances using VQE and UCCSD. It is compared to the same energies as computed by the ExactEigensolverThis notebook populates a dictionary, that is a progammatic representation of an input file, in order to drive the Qiskit Aqua Chemistry stack. Such a dictionary can be manipulated programmatically and this is indeed the case here where we alter the molecule supplied to the driver in each loop.This notebook has been written to use the PYSCF chemistry driver. See the PYSCF chemistry driver readme if you need to install the external PySCF library that this driver requires. ###Code import numpy as np import pylab from qiskit_aqua_chemistry import AquaChemistry # Input dictionary to configure Qiskit Aqua Chemistry for the chemistry problem. aqua_chemistry_dict = { 'driver': {'name': 'PYSCF'}, 'PYSCF': {'atom': '', 'basis': 'sto3g'}, 'operator': {'name': 'hamiltonian', 'qubit_mapping': 'parity', 'two_qubit_reduction': True, 'freeze_core': True, 'orbital_reduction': []}, 'algorithm': {'name': ''}, 'optimizer': {'name': 'COBYLA', 'maxiter': 10000 }, 'variational_form': {'name': 'UCCSD'}, 'initial_state': {'name': 'HartreeFock'} } molecule = 'H .0 .0 -{0}; Na .0 .0 {0}' algorithms = ['VQE', 'ExactEigensolver'] pts = [x * 0.1 for x in range(10, 25)] pts += [x * 0.25 for x in range(10, 18)] pts += [4.5] energies = np.empty([len(algorithms), len(pts)]) hf_energies = np.empty(len(pts)) distances = np.empty(len(pts)) dipoles = np.empty([len(algorithms), len(pts)]) eval_counts = np.empty(len(pts)) print('Processing step __', end='') for i, d in enumerate(pts): print('\b\b{:2d}'.format(i), end='', flush=True) aqua_chemistry_dict['PYSCF']['atom'] = molecule.format(d/2) for j in range(len(algorithms)): aqua_chemistry_dict['algorithm']['name'] = algorithms[j] solver = AquaChemistry() result = solver.run(aqua_chemistry_dict) energies[j][i] = result['energy'] hf_energies[i] = result['hf_energy'] dipoles[j][i] = result['total_dipole_moment'] / 0.393430307 if algorithms[j] == 'VQE': eval_counts[i] = result['algorithm_retvals']['eval_count'] distances[i] = d print(' --- complete') print('Distances: ', distances) print('Energies:', energies) print('Hartree-Fock energies:', hf_energies) print('Dipoles:', dipoles) print('VQE num evaluations:', eval_counts) pylab.plot(distances, hf_energies, label='Hartree-Fock') for j in range(len(algorithms)): pylab.plot(distances, energies[j], label=algorithms[j]) pylab.xlabel('Interatomic distance') pylab.ylabel('Energy') pylab.title('NaH Ground State Energy') pylab.legend(loc='upper right') pylab.plot(distances, np.subtract(hf_energies, energies[1]), label='Hartree-Fock') pylab.plot(distances, np.subtract(energies[0], energies[1]), label='VQE') pylab.xlabel('Interatomic distance') pylab.ylabel('Energy') pylab.title('Energy difference from ExactEigensolver') pylab.legend(loc='upper left') for j in reversed(range(len(algorithms))): pylab.plot(distances, dipoles[j], label=algorithms[j]) pylab.xlabel('Interatomic distance') pylab.ylabel('Moment in debye') pylab.title('NaH Dipole Moment') pylab.legend(loc='upper right') pylab.plot(distances, eval_counts, '-o', color=[0.8500, 0.3250, 0.0980], label='VQE') pylab.xlabel('Interatomic distance') pylab.ylabel('Evaluations') pylab.title('VQE number of evaluations') pylab.legend(loc='upper left') ###Output _____no_output_____ ###Markdown _*NaH dissociation curve using VQE with UCCSD*_This notebook demonstrates using the Qiskit Chemistry to plot graphs of the ground state energy of the Sodium Hydride (NaH) molecule over a range of inter-atomic distances using VQE and UCCSD. It is compared to the same energies as computed by the ExactEigensolverThis notebook populates a dictionary, that is a progammatic representation of an input file, in order to drive the Qiskit Chemistry stack. Such a dictionary can be manipulated programmatically and this is indeed the case here where we alter the molecule supplied to the driver in each loop.This notebook has been written to use the PYSCF chemistry driver. See the PYSCF chemistry driver readme if you need to install the external PySCF library that this driver requires. ###Code import numpy as np import pylab from qiskit_chemistry import QiskitChemistry # Input dictionary to configure Qiskit Chemistry for the chemistry problem. qiskit_chemistry_dict = { 'driver': {'name': 'PYSCF'}, 'PYSCF': {'atom': '', 'basis': 'sto3g'}, 'operator': {'name': 'hamiltonian', 'qubit_mapping': 'parity', 'two_qubit_reduction': True, 'freeze_core': True, 'orbital_reduction': []}, 'algorithm': {'name': ''}, 'optimizer': {'name': 'COBYLA', 'maxiter': 10000 }, 'variational_form': {'name': 'UCCSD'}, 'initial_state': {'name': 'HartreeFock'} } molecule = 'H .0 .0 -{0}; Na .0 .0 {0}' algorithms = ['VQE', 'ExactEigensolver'] pts = [x * 0.1 for x in range(10, 25)] pts += [x * 0.25 for x in range(10, 18)] pts += [4.5] energies = np.empty([len(algorithms), len(pts)]) hf_energies = np.empty(len(pts)) distances = np.empty(len(pts)) dipoles = np.empty([len(algorithms), len(pts)]) eval_counts = np.empty(len(pts)) print('Processing step __', end='') for i, d in enumerate(pts): print('\b\b{:2d}'.format(i), end='', flush=True) qiskit_chemistry_dict['PYSCF']['atom'] = molecule.format(d/2) for j in range(len(algorithms)): qiskit_chemistry_dict['algorithm']['name'] = algorithms[j] solver = QiskitChemistry() result = solver.run(qiskit_chemistry_dict) energies[j][i] = result['energy'] hf_energies[i] = result['hf_energy'] dipoles[j][i] = result['total_dipole_moment'] / 0.393430307 if algorithms[j] == 'VQE': eval_counts[i] = result['algorithm_retvals']['eval_count'] distances[i] = d print(' --- complete') print('Distances: ', distances) print('Energies:', energies) print('Hartree-Fock energies:', hf_energies) print('Dipoles:', dipoles) print('VQE num evaluations:', eval_counts) pylab.plot(distances, hf_energies, label='Hartree-Fock') for j in range(len(algorithms)): pylab.plot(distances, energies[j], label=algorithms[j]) pylab.xlabel('Interatomic distance') pylab.ylabel('Energy') pylab.title('NaH Ground State Energy') pylab.legend(loc='upper right') pylab.plot(distances, np.subtract(hf_energies, energies[1]), label='Hartree-Fock') pylab.plot(distances, np.subtract(energies[0], energies[1]), label='VQE') pylab.xlabel('Interatomic distance') pylab.ylabel('Energy') pylab.title('Energy difference from ExactEigensolver') pylab.legend(loc='upper left') for j in reversed(range(len(algorithms))): pylab.plot(distances, dipoles[j], label=algorithms[j]) pylab.xlabel('Interatomic distance') pylab.ylabel('Moment in debye') pylab.title('NaH Dipole Moment') pylab.legend(loc='upper right') pylab.plot(distances, eval_counts, '-o', color=[0.8500, 0.3250, 0.0980], label='VQE') pylab.xlabel('Interatomic distance') pylab.ylabel('Evaluations') pylab.title('VQE number of evaluations') pylab.legend(loc='upper left') ###Output _____no_output_____
Bayesian Neural Networks Tutorial - Code Review .ipynb
###Markdown Bayesian Neural Networks This script explores using pytorch and pyro to build, train and test a bayesian neural network. The advantage and novelty of this model is that inference is done probablistically; thereby allowing us to quantify the certainty of model results. Kind of like a built in sensitivity analysis. I like the way [this](https://towardsdatascience.com/making-your-neural-network-say-i-dont-know-bayesian-nns-using-pyro-and-pytorch-b1c24e6ab8cd) [3] tutorial phrases it: > Making Your Neural Network Say “I Don’t Know”In this tutorial, we'll go over - key differences of `Bayesian Neural Networks (BNN)` vs `Neural Networks (NN)` - how BNNs do inference & training - simple tutorial on how to build a BNN using `pytorch` and `pyro` - difference in training and prediction time complexity BNNs vs NNs Classic Neural Network Bayesian Neural Network ![image.png](https://www.researchgate.net/profile/Florian_Haese/publication/329843608/figure/fig2/AS:713727343067138@1547177267367/llustration-of-a-Bayesian-Neural-Network-BNN-A-A-Bayesian-neuron-defines-a_W640.jpg)Taken from [1]. BNN advantage What happens when you give cutting edge neural network an image of noise? ![image.png](https://miro.medium.com/max/802/0*HG51qQU8I34_fUgB.jpg)--- The most obvious advantage of these BNNs are that they allow the model to propogate it's uncertainty about a prediction, from [this]( https://krasserm.github.io/2019/03/14/bayesian-neural-networks/) tutorial, there is a great plot that shows this.![image.png](https://krasserm.github.io/img/2019-03-14/output_9_1.png)From this we can see that the BNN knows where in it's feature space it has accurate predictions, and where it does not. I've been thinking about this as a built in sensitivity analysis. With a few extra lines of code, we can have the model say, "Hey, I don't know what the answer is, don't trust me", which is a key feature when making important decisions. Other advantages include being able to include priors on our weight distributions (could be tough to know how to do this well). Also priors on our X distributions, which could be as simple as calculating p(x) over each mini-batch of the training set and using this as the prior on test sets. Training a BNN Variation Inference Sampling Prediction ###Code import torch import torch.nn.functional as nnf from torch.utils.data import random_split from torch.utils.data.dataloader import DataLoader from torch.optim import SGD from torch.distributions import constraints import torchvision as torchv import torchvision.transforms as torchvt from torchvision.datasets.mnist import MNIST from torch import nn from pyro.infer import SVI, TraceMeanField_ELBO import pyro from pyro import poutine import pyro.optim as pyroopt import pyro.distributions as dist import pyro.contrib.bnn as bnn import matplotlib.pyplot as plt import seaborn as sns from torch.distributions.utils import lazy_property import math from torch.utils import data from sklearn.datasets import load_iris from matplotlib import pyplot as plt import numpy as np ###Output _____no_output_____ ###Markdown Model Global Variables ###Code LEARNING_WEIGHT = 1e-3 EPOCHS = 150 DECAY_MILESTONES = range(100, EPOCHS, 10) GAMMA=0.1 DROP_OUT_PROP = 0.1 LAYER_SIZE= 50 ###Output _____no_output_____ ###Markdown Load our data in We'll use the classic `Iris` Dataset.number of features: 4 number of observations: 150number of classes: 3 We'll save each observation separately to disk as a tensor object (.pt), so that we can build an efficient DataLoader (for learning purposes, since our volume doesn't actually make this necessary) Also, since we're just exploring the difference between these two models, we'll only split to train/test 2:1 ratio. ###Code X,Y = load_iris(return_X_y=True) # shuffle our data to avoid class bias sel = np.arange(len(Y)) np.random.shuffle(sel) X = X[sel] Y = Y[sel] for i,x in enumerate(X): torch.save(torch.tensor(x).float(), './data/%d.pt' %i) n_classes=len(set(Y)) print(f'Number of classes: {n_classes}') partition = {'train':[str(x) for x in range(0,100)], 'test':[str(x) for x in range(100,150)]} labels = {str(i):torch.tensor(j).to(torch.int64) for i,j in zip(range(150), Y)} ###Output Number of classes: 3 ###Markdown Dataset Class: first part of our DataLoader We have to define a dataset class, which is where we can tell the dataloader where to look for each observation (key -> path) ###Code class Dataset(data.Dataset): 'Characterizes a dataset for PyTorch' def __init__(self, list_IDs, labels): 'Initialization' self.labels = labels self.list_IDs = list_IDs def __len__(self): 'Denotes the total number of samples' return len(self.list_IDs) def __getitem__(self, index): 'Generates one sample of data, load from disk' ID = self.list_IDs[index] X = torch.load('data/' + ID + '.pt') y = self.labels[ID] return X, y ###Output _____no_output_____ ###Markdown Housekeeping choose the device to train on, we'll do it locally on a single thread. ###Code # CUDA for PyTorch device = torch.device('cpu') def get_lr(optimizer): for param_group in optimizer.param_groups: return param_group['lr'] ###Output _____no_output_____ ###Markdown Initialize our DataLoaders We'll use our whole training dataset as the batch, since we have so little data anyway. No need to optimize here. ###Code # CUDA for PyTorch device = torch.device('cpu') # Parameters train_params = {'batch_size': 100, 'shuffle': True, 'num_workers': 0} # Generators training_set = Dataset(partition['train'], labels) train_loader = data.DataLoader(training_set, **train_params) # Parameters test_params = {'batch_size': 50, 'shuffle': False, 'num_workers': 0} test_set = Dataset(partition['test'], labels) test_loader = data.DataLoader(test_set, **test_params) ###Output _____no_output_____ ###Markdown Define our classic neural network ![](https://media.wired.com/photos/592676bf7034dc5f91beb823/16:9/w_1600,c_limit/1957_Jaguar_XKSS_0043_BH-FINAL.jpg) ###Code class Common_FCN(nn.Module): def __init__(self, n_classes=n_classes): super(Common_FCN, self).__init__() self.fc = nn.Sequential(#nn.BatchNorm1d(num_features=4), #nn.Dropout(p=DROP_OUT_PROP), nn.Linear(4, LAYER_SIZE), #nn.BatchNorm1d(num_features=LAYER_SIZE), #nn.Dropout(p=DROP_OUT_PROP), nn.ReLU(), #nn.Dropout(p=DROP_OUT_PROP), nn.Linear(LAYER_SIZE, LAYER_SIZE), nn.ReLU(), nn.Linear(LAYER_SIZE, n_classes), nn.Softmax(dim=-1)) def forward(self, inp): return self.fc(inp) ###Output _____no_output_____ ###Markdown Define our Bayesian Neural Network Initialize our model, gradient descent alg, learning rate decay ###Code FC_NN = Common_FCN() optim = torch.optim.AdamW(FC_NN.parameters(recurse=True), lr=LEARNING_WEIGHT, weight_decay=0.01, amsgrad=True)#SGD(FC_NN.parameters(recurse=True), lr=0.1, momentum=0.95) scheduler = torch.optim.lr_scheduler.MultiStepLR(optim, milestones=DECAY_MILESTONES, gamma=GAMMA) ###Output _____no_output_____ ###Markdown Initialize our model and perform the training loop ###Code if __name__ == '__main__': # need to wrap our training function in this training_state = {'train_acc':[], 'test_acc':[]} for i in range(EPOCHS): total_loss = 0.0 total = 0.0 correct = 0.0 for x, y in train_loader: FC_NN.zero_grad() pred = FC_NN.forward(x) loss = nnf.binary_cross_entropy(pred, nnf.one_hot(y, torch.tensor(n_classes)).float()) total_loss += loss total += y.size(0) correct += (pred.argmax(-1) == y).sum().item() loss.backward() tracc = correct/total*100 optim.step() scheduler.step() FC_NN.eval() # set eval mode - w/o dropout total = 0.0 correct = 0.0 for x, y in test_loader: pred = FC_NN.forward(x) total += y.size(0) correct += (pred.argmax(-1) == y).sum().item() teacc = correct/total*100 FC_NN.train() # reset to training mode - w/ dropout training_state['train_acc'].append(tracc) training_state['test_acc'].append(teacc) print('epoch: %d | learning rate: %f | train loss: %.3f | train acc: %.5f' %((i+1), get_lr(optim), total_loss, tracc), end='\r') plt.figure() plt.plot(training_state['train_acc'], 'r-', label='train acc') plt.plot(training_state['test_acc'], 'b-', label='val acc') plt.legend() plt.show() ###Output _____no_output_____
notebooks/tbeucler_devlog/015_MeanSquaredError_ranked_diagnostics.ipynb
###Markdown tgb - 5/28/2019 - The goal of this notebook is to develop MSE-ranked diagnostics in the hope to identify climate conditions in which our network performs poorly. What we have so far is the average error for each variable in the md.stats object. The goal is to do the opposite: The climate variable as a function of the error. 1) Loading CBRAIN ###Code from cbrain.imports import * from cbrain.data_generator import * from cbrain.cam_constants import * from cbrain.losses import * from cbrain.utils import limit_mem from cbrain.layers import * from cbrain.model_diagnostics import * import tensorflow as tf import tensorflow.math as tfm from tensorflow.keras.layers import * from tensorflow.keras.models import * import xarray as xr import numpy as np from cbrain.model_diagnostics import ModelDiagnostics from numpy import linalg as LA import matplotlib.pyplot as plt # Otherwise tensorflow will use ALL your GPU RAM for no reason limit_mem() TRAINDIR = '/local/Tom.Beucler/SPCAM_PHYS/' DATADIR = '/project/meteo/w2w/A6/S.Rasp/SP-CAM/fluxbypass_aqua/' PREFIX = '8col009_01_' %cd /filer/z-sv-pool12c/t/Tom.Beucler/SPCAM/CBRAIN-CAM coor = xr.open_dataset("/project/meteo/w2w/A6/S.Rasp/SP-CAM/fluxbypass_aqua/AndKua_aqua_SPCAM3.0_sp_fbp_f4.cam2.h1.0000-01-01-00000.nc",\ decode_times=False) lat = coor.lat; lon = coor.lon; coor.close(); config_fn = '/filer/z-sv-pool12c/t/Tom.Beucler/SPCAM/CBRAIN-CAM/pp_config/8col_rad_tbeucler_local_PostProc.yml' data_fn = '/local/Tom.Beucler/SPCAM_PHYS/8col009_01_valid.nc' dict_lay = {'SurRadLayer':SurRadLayer,'MassConsLayer':MassConsLayer,'EntConsLayer':EntConsLayer} ###Output _____no_output_____ ###Markdown 2) Calculate mean of variables conditioned on MSE 2.1) Define models tgb - 5/28/2019 - For now use the best performing LCnet and LACnet ###Code NNa = ['JNNC','JNNL'] # ACnet and LCnet NN = {}; md = {}; %cd $TRAINDIR/HDF5_DATA for i,nns in enumerate(NNa): path = TRAINDIR+'HDF5_DATA/'+nns+'.h5' NN[nns] = load_model(path,custom_objects=dict_lay) md[nns] = ModelDiagnostics(NN,config_fn,data_fn) # # NNL0.01 # path = TRAINDIR+'HDF5_DATA/NNL0.01.h5' # NN['NNL0.01'] = load_model(path,custom_objects=dict_lay) # md['NNL0.01'] = ModelDiagnostics(NN,config_fn,data_fn) ###Output /scratch-local/Tom.Beucler/SPCAM_PHYS/HDF5_DATA ###Markdown 2.2) Accumulates all variables over Nt timesteps, with a focus on P-CWV ###Code # User's choice Nt = 100 P_CONV = 1e3*24*3600 # Conversion from m/s to mm/day # Initialization CWVtot = np.zeros((len(coor.lat),len(coor.lon),1)) QVtot = np.zeros((len(coor.lat),len(coor.lon),len(coor.lev),1)) Ttot = np.copy(QVtot) CSHtot = np.copy(CWVtot) xtot = np.zeros((len(coor.lat),len(coor.lon),304,1)) PRECtot = np.zeros((len(coor.lat),len(coor.lon),1)) PREC_NN = {}; MSE_NN = {}; y_NN = {}; for iNN,NNname in enumerate(NNa): PREC_NN[NNname] = np.zeros((len(coor.lat),len(coor.lon),1)) MSE_NN[NNname] = np.copy(PREC_NN[NNname]) y_NN[NNname] = np.zeros((len(coor.lat),len(coor.lon),218,1)) for itime in range(Nt): print('itime=',itime,'/',Nt-1,end="\r") m = md[NNname] # Reference model diagnostic object # 1) Predict total precipitation on batch (liquid equivalent in mm/day) X, truth = m.valid_gen[itime] PRECIP = (np.sum(truth.values[:,-4:-2],axis=1))*P_CONV/(L_V*RHO_L) # 2) Save mean input xgeo = m.reshape_ngeo(m.valid_gen.input_transform.inverse_transform(X).values) xtot = np.concatenate((xtot,np.expand_dims(xgeo[:,:,:],axis=3)),axis=3) # 3) Water vapor QV = m.valid_gen.input_transform.inverse_transform(X)[:,m.get_input_var_idx('QBP')] QVgeo = m.reshape_ngeo(QV.values) QVtot = np.concatenate((QVtot,np.expand_dims(QVgeo[:,:,:],axis=3)),axis=3) CWVgeo = m.reshape_ngeo(np.expand_dims(np.sum(m.dP(itime)*QV/G,axis=1),axis=1)) CWVtot = np.concatenate((CWVtot,CWVgeo[:,:,:][:,:,:]),axis=2) # 4) Precipitation PRECgeo = m.reshape_ngeo(PRECIP) PRECtot = np.concatenate((PRECtot,PRECgeo[:,:,:][:,:,:]),axis=2) # 5) Temperature T = m.valid_gen.input_transform.inverse_transform(X)[:,m.get_input_var_idx('TBP')] Tgeo = m.reshape_ngeo(T.values)[:,:,:] Ttot = np.concatenate((Ttot,np.expand_dims(Tgeo[:,:,:],axis=3)),axis=3) CSHgeo = m.reshape_ngeo(np.expand_dims(np.sum(m.dP(itime)*C_P*T/(L_V*G),axis=1),axis=1)) CSHtot = np.concatenate((CSHtot,CSHgeo[:,:,:][:,:,:]),axis=2) # 5) Precipitation from NN prediction for iNN,NNname in enumerate(NNa): pred = md[NNname].model[NNname].predict_on_batch(X) PRECIP_NN = (np.sum(pred[:,-4:-2],axis=1))*P_CONV/(L_V*RHO_L) PREC_NN[NNname] = np.concatenate(( PREC_NN[NNname], md[NNname].reshape_ngeo(PRECIP_NN)[:,:,:][:,:,:]), axis=2) y_NN[NNname] = np.concatenate(( y_NN[NNname], md[NNname].reshape_ngeo((pred-truth.values)**2)[:,:,:][:,:,:,np.newaxis]), axis=3) MEANSE_NN = np.sum((pred-truth.values)**2,axis=1)/pred.shape[1] MSE_NN[NNname] = np.concatenate(( MSE_NN[NNname], md[NNname].reshape_ngeo(MEANSE_NN)[:,:,:][:,:,:]), axis=2) # Remove initial zero QVtot = QVtot[:,:,:,1:] Ttot = Ttot[:,:,:,1:] CWVtot = CWVtot[:,:,1:] CSHtot = CSHtot[:,:,1:] PRECtot = PRECtot[:,:,1:] xtot = xtot[:,:,:,1:] for iNN,NNname in enumerate(NNa): PREC_NN[NNname] = PREC_NN[NNname][:,:,1:] MSE_NN[NNname] = MSE_NN[NNname][:,:,1:] y_NN[NNname] = y_NN[NNname][:,:,:,1:] y_NN[NNname].mean(axis=(0,1,3)).shape rank = np.argsort(y_NN['JNNL'].mean(axis=(0,1,3)))[::-1] print(rank) print(y_NN['JNNL'].mean(axis=(0,1,3))[rank]) ###Output [214 21 22 215 20 23 24 25 19 26 29 27 111 28 18 110 112 109 52 51 217 108 50 17 113 107 211 49 114 210 53 115 106 48 54 16 116 105 76 117 55 77 75 78 79 213 119 80 216 104 74 118 212 81 15 73 103 82 56 72 142 143 102 144 141 71 47 145 14 101 138 137 140 136 139 83 134 70 135 100 146 133 57 132 99 93 92 131 84 13 98 147 69 167 168 172 169 170 130 166 171 165 164 58 148 97 163 162 129 173 209 59 161 149 85 174 160 96 12 159 175 128 68 176 123 158 127 94 152 121 46 177 124 179 95 178 91 126 151 153 125 11 120 157 122 155 154 86 90 156 200 67 187 66 150 7 10 196 1 203 182 208 207 186 89 64 0 44 206 204 205 194 183 36 9 192 88 188 191 189 45 190 193 2 195 5 197 202 87 199 201 31 8 43 198 42 181 40 41 184 38 61 32 185 30 39 65 34 60 35 33 62 180 3 63 37 6 4] [3.58962762e+03 2.89627373e+03 2.73849081e+03 2.08702377e+03 2.08678877e+03 1.68331199e+03 1.63449224e+03 1.46708258e+03 1.26161620e+03 1.19472283e+03 8.91013315e+02 8.31361394e+02 7.14128587e+02 7.03599819e+02 6.83741596e+02 6.22179930e+02 5.96507985e+02 5.24187946e+02 4.38576194e+02 4.21367158e+02 3.71807980e+02 3.69996734e+02 3.11628186e+02 2.93006256e+02 2.77250403e+02 2.31191923e+02 2.21708901e+02 2.12113498e+02 2.10626972e+02 1.96428391e+02 1.96053268e+02 1.48197382e+02 1.28285942e+02 1.14884527e+02 1.12527317e+02 1.02432411e+02 9.14123560e+01 6.78236780e+01 5.75847797e+01 5.58714946e+01 5.29674685e+01 4.95076983e+01 4.73396865e+01 4.69016849e+01 4.39716902e+01 4.27953970e+01 4.05623375e+01 3.98687212e+01 3.93765916e+01 3.89502269e+01 3.67625511e+01 3.67092290e+01 3.52985379e+01 3.15529787e+01 3.11661626e+01 2.74752333e+01 2.48466227e+01 2.03625031e+01 1.93618808e+01 1.76052755e+01 1.62084061e+01 1.60189036e+01 1.50016831e+01 1.38912131e+01 1.12827679e+01 1.09824042e+01 1.09022048e+01 1.01577413e+01 9.69534788e+00 9.40217742e+00 8.49855943e+00 8.39287645e+00 7.92276357e+00 7.85141088e+00 7.51751939e+00 6.93095493e+00 6.67120640e+00 6.30970024e+00 6.27891811e+00 6.13160747e+00 5.79747766e+00 5.61700572e+00 5.48581625e+00 4.91692168e+00 4.09352768e+00 3.48366799e+00 3.40838648e+00 3.39314749e+00 2.95357863e+00 2.77591143e+00 2.61756103e+00 2.56070246e+00 2.50534598e+00 2.38828657e+00 2.38608754e+00 2.34611716e+00 2.31837221e+00 2.25769431e+00 2.06341681e+00 2.04469321e+00 2.03248494e+00 1.95356737e+00 1.83071148e+00 1.82850556e+00 1.72039359e+00 1.70925293e+00 1.53888775e+00 1.51954950e+00 1.22844195e+00 1.16205721e+00 1.08652945e+00 1.01536208e+00 9.55005374e-01 9.37391022e-01 8.81626246e-01 8.18504443e-01 7.61440663e-01 6.96553418e-01 6.09855294e-01 5.67824741e-01 5.60089662e-01 5.44410552e-01 4.83762096e-01 4.08279905e-01 3.80679542e-01 2.68415668e-01 2.58457401e-01 2.18358669e-01 2.16820731e-01 1.89944954e-01 1.87222267e-01 1.83874058e-01 1.69996830e-01 1.62117140e-01 1.56345076e-01 1.54186578e-01 1.43876874e-01 1.33055642e-01 1.24486077e-01 1.21721630e-01 1.17908151e-01 9.79162960e-02 8.82078368e-02 8.80911223e-02 7.45371488e-02 6.86938302e-02 6.65358272e-02 6.09260081e-02 5.71186074e-02 5.50152047e-02 4.97674627e-02 3.95525971e-02 3.71720779e-02 2.84567026e-02 2.36143929e-02 1.72241768e-02 1.13614066e-02 1.01496998e-02 8.74182520e-03 7.88774468e-03 6.50385862e-03 5.83004131e-03 4.63582847e-03 3.87802074e-03 3.54363286e-03 2.84879925e-03 2.43609330e-03 2.17755729e-03 1.88545108e-03 1.87913676e-03 1.79161869e-03 1.62361063e-03 1.39923922e-03 1.18672467e-03 1.14096399e-03 8.08444637e-04 7.56948123e-04 6.39271962e-04 5.27778213e-04 5.08029865e-04 4.72095307e-04 4.63932542e-04 4.21616810e-04 3.20078760e-04 3.10100728e-04 3.01998398e-04 2.85523689e-04 2.61460396e-04 2.30768506e-04 2.12851442e-04 1.78122595e-04 1.71559864e-04 1.70963337e-04 1.68912785e-04 1.64859116e-04 1.60645959e-04 1.58223096e-04 1.54389281e-04 1.43189736e-04 1.32100054e-04 1.14056899e-04 1.12724017e-04 1.07723675e-04 1.05862784e-04 1.03087879e-04 1.02765284e-04 1.01439013e-04 9.93121215e-05 8.93163357e-05 8.19414689e-05 7.68469881e-05 7.32960115e-05 6.93219208e-05 6.62163217e-05 5.80133103e-05 5.37063436e-05 4.89281137e-05 4.09028340e-05] ###Markdown Knowing that the networks predicts: [PHQ, PHCLDLIQ, PHCLDICE, TPHYSTND, QRL, QRS, DTVKE, FSNT, FSNS, FLNT, FLNS, PRECT, PRECTEND, PRECST, PRECSTEN] The previous ranking gives the largest sources of error: 214 = PRECT 21 = PHQ[21] 22 = PHQ[22] 215 = PRECTEND 20 = PHQ[20] 23 = PHQ[23] ... 111 = TPHYSTND[21] So we can say that the largest sources of error are by far precipitation and the lower-tropospheric water vapor ###Code print(rank[25:30]) fz = 15 lw = 2 import matplotlib.pyplot as plt import matplotlib as mpl plt.rc('text', usetex=True) plt.rc('font', family='serif', size=fz) mpl.rcParams['lines.linewidth'] = lw plt.close('all') f = plt.figure(num=None, figsize=(10,4), dpi=80, facecolor='w', edgecolor='k') plt.gcf().subplots_adjust(bottom=0.15) # Make room for xlabels Lowrank = 25 # Show top Lowrank variables # Doing the variable <-> color manually because it would take too long to make it automatic plt.figure(figsize=(15,10)) barplt = plt.bar(x=np.linspace(start=1,stop=Lowrank,num=Lowrank),height=y_NN['JNNL'].mean(axis=(0,1,3))[rank][:Lowrank],\ color=['black', 'blue', 'blue', 'black', 'blue',\ 'blue','blue','blue','blue','blue',\ 'blue','blue','red','blue','blue',\ 'red','red','red','cyan','cyan',\ 'black','red','cyan','blue','red']) plt.legend((barplt[0],barplt[1],barplt[12],barplt[19]),\ ('Precipitation','Water Vapor Tendency','Temperature Tendency','Liquid Water Tendency'),\ fontsize=1.5*fz) plt.xlabel('Output Variable') plt.ylabel(r'$\textnormal{Mean squared error }\left(\mathrm{W^{2}m^{-4}}\right)$') # First use column water vapor as binning variable to test # Histogram parameters Nbin = 100 CWVmin = 20 CWVmax = 70 # Initialization CWVm = np.zeros((Nbin-1,1)) CSHm = np.copy(CWVm) Pm = np.copy(CWVm) PNNm = {}; MSENNm = {}; for iNN,NNname in enumerate(NNa): PNNm[NNname] = np.copy(Pm) MSENNm[NNname] = np.copy(Pm) for ibin,edge in enumerate(np.linspace(CWVmin,CWVmax,num=Nbin)): print('ibin=',ibin,'/',Nbin-1,' & edge=',edge,end="\r") if ibin>0: CWVm[ibin-1] = np.average(CWVtot,weights=(CWVtot>=edge_left)*(CWVtot<edge)) Pm[ibin-1] = np.average(PRECtot,weights=(CWVtot>=edge_left)*(CWVtot<edge)) CSHm[ibin-1] = np.average(CSHtot,weights=(CWVtot>=edge_left)*(CWVtot<edge)) for iNN,NNname in enumerate(NNa): PNNm[NNname][ibin-1] = np.average( PREC_NN[NNname], weights=(CWVtot>=edge_left)*(CWVtot<edge)) MSENNm[NNname][ibin-1] = np.average( MSE_NN[NNname], weights=(CWVtot>=edge_left)*(CWVtot<edge)) edge_left = edge import matplotlib.pyplot as plt plt.figure(num=None, figsize=(15,5), dpi=80, facecolor='w', edgecolor='k') for iNN,NNname in enumerate(NNa): plt.scatter(CWVm,MSENNm[NNname],label=NNname) plt.legend() ###Output _____no_output_____ ###Markdown tgb - Then flip it and use the MSE as the binning variable ###Code # Model choice MSEtot = MSE_NN['JNNL'] # Histogram parameters Nbin = 100 MSEmin = 0 MSEmax = 2e3 # Initialization CWVm = np.zeros((Nbin-1,1)) CSHm = np.copy(CWVm) Pm = np.copy(CWVm) MSEm = np.copy(CWVm) lattot = np.expand_dims(np.expand_dims(lat.values,axis=1),axis=2) for i in range(2): lattot = np.repeat(lattot,CWVtot.shape[i+1],axis=i+1) latm = np.copy(CWVm) for ibin,edge in enumerate(np.linspace(MSEmin,MSEmax,num=Nbin)): print('ibin=',ibin,'/',Nbin-1,' & edge=',edge,end="\r") if ibin>0: CWVm[ibin-1] = np.average(CWVtot,weights=(MSEtot>=edge_left)*(MSEtot<edge)) Pm[ibin-1] = np.average(PRECtot,weights=(MSEtot>=edge_left)*(MSEtot<edge)) CSHm[ibin-1] = np.average(CSHtot,weights=(MSEtot>=edge_left)*(MSEtot<edge)) MSEm[ibin-1] = np.average(MSEtot,weights=(MSEtot>=edge_left)*(MSEtot<edge)) latm[ibin-1] = np.average(lattot,weights=(MSEtot>=edge_left)*(MSEtot<edge)) edge_left = edge import matplotlib.pyplot as plt plt.rc('text', usetex=True) plt.rc('font', family='serif', size=fz) mpl.rcParams['lines.linewidth'] = 1.5*lw plt.close('all') plt.figure(num=None, figsize=(15,5), dpi=80, facecolor='w', edgecolor='k') fig, ax1 = plt.subplots(figsize=(7.5,5)) ax2 = ax1.twinx() ax1.plot(MSEm,Pm, 'black') ax2.plot(MSEm,CWVm, 'blue') ax1.set_xlabel(r'$\textnormal{Mean squared error }\left(\mathrm{W^{2}m^{-4}}\right)$',fontsize=1.5*fz) ax1.set_ylabel(r'$\textnormal{Precipitation }\left(\mathrm{mm\ day^{-1}}\right)$', color='black',fontsize=1.5*fz) ax2.set_ylabel(r'$\textnormal{Column Water Vapor }\left(\mathrm{kg\ m^{-2}}\right)$', color='blue',fontsize=1.5*fz) plt.show() plt.figure(num=None, figsize=(10,5), dpi=80, facecolor='w', edgecolor='k') plt.scatter(MSEm,latm) plt.xlabel(r'$\textnormal{Mean squared error }\left(\mathrm{W^{2}m^{-4}}\right)$') plt.ylabel('Expected Latitude') plt.figure(num=None, figsize=(15,5), dpi=80, facecolor='w', edgecolor='k') plt.scatter(MSEm,CWVm) plt.figure(num=None, figsize=(15,5), dpi=80, facecolor='w', edgecolor='k') plt.scatter(MSEm,CSHm) lattot = np.expand_dims(np.expand_dims(lat.values,axis=1),axis=2) lattot.shape lattot = np.repeat(lattot,CWVtot.shape[1],axis=1) lattot.shape ###Output _____no_output_____
curacion/notebooks/practico_final.ipynb
###Markdown **Analisis y Curación de Datos** Grupo 8 Comisión 2- Leonardo Rodríguez- Sergio Sulca- Emanuel Alberto Matar Allasino- Maria Emilia Fernandez- Martín Barrera- Matthew Aguerreberry--- ###Code import sys if 'ftfy' not in sys.modules: !pip install 'ftfy<5.6' if 'feedparser' not in sys.modules: !pip install feedparser import pandas as pd import seaborn as sns import matplotlib.pyplot as plt %matplotlib inline import sqlite3 import feedparser from hashlib import md5 sns.set_context(context='talk', font_scale=1.2) ###Output Collecting ftfy<5.6 [?25l Downloading https://files.pythonhosted.org/packages/8f/86/df789c5834f15ae1ca53a8d4c1fc4788676c2e32112f6a786f2625d9c6e6/ftfy-5.5.1-py3-none-any.whl (43kB)  |███████▌ | 10kB 16.5MB/s eta 0:00:01  |███████████████ | 20kB 3.0MB/s eta 0:00:01  |██████████████████████▍ | 30kB 3.7MB/s eta 0:00:01  |█████████████████████████████▉ | 40kB 3.9MB/s eta 0:00:01  |████████████████████████████████| 51kB 2.4MB/s [?25hRequirement already satisfied: wcwidth in /usr/local/lib/python3.6/dist-packages (from ftfy<5.6) (0.2.5) Installing collected packages: ftfy Successfully installed ftfy-5.5.1 Collecting feedparser [?25l Downloading https://files.pythonhosted.org/packages/91/d8/7d37fec71ff7c9dbcdd80d2b48bcdd86d6af502156fc93846fb0102cb2c4/feedparser-5.2.1.tar.bz2 (192kB)  |████████████████████████████████| 194kB 5.4MB/s [?25hBuilding wheels for collected packages: feedparser Building wheel for feedparser (setup.py) ... [?25l[?25hdone Created wheel for feedparser: filename=feedparser-5.2.1-cp36-none-any.whl size=44940 sha256=249c03a87d2e6695a78e69f5ac3004fa84aa58b7eb791956eeefb9b9e5384f98 Stored in directory: /root/.cache/pip/wheels/8c/69/b7/f52763c41c5471df57703a0ef718a32a5e81ee35dcf6d4f97f Successfully built feedparser Installing collected packages: feedparser Successfully installed feedparser-5.2.1 ###Markdown Ejercicio 1: Tablas de Crecimiento---1. Descargar dataset en CSV ###Code df = pd.read_csv('https://raw.githubusercontent.com/DiploDatos/AnalisisYCuracion/master/input/wtageinf.csv') df ###Output _____no_output_____ ###Markdown 2. Graficar las curvas correspondientes a cada percentil para niños y niñas * a. En la columna Sex, 1 representa a niños y 2 a niñas * b. Las curvas de los percentiles correspondientes a un mismo sexo deben ir en un mismo gráfico. * c. Un gráfico para cada sexo. ###Code fig, ax = plt.subplots(1, 2 , figsize=(20, 10)) df[df.Sex == 1].plot( x='Agemos', y=['P3', 'P5', 'P10', 'P25', 'P50', 'P75', 'P90', 'P95', 'P97'], ax=ax[0]) df[df.Sex == 2].plot( x='Agemos', y=['P3', 'P5', 'P10', 'P25', 'P50', 'P75', 'P90', 'P95', 'P97'], ax=ax[1]) ax[0].grid(True) ax[0].set_title("Boys") ax[0].legend(fontsize='x-small') ax[1].grid(True) ax[1].set_title("Girls") ax[1].legend(fontsize='x-small') plt.show() ###Output _____no_output_____ ###Markdown Ejercicio 2: Pasos Fronterizos---Descargar dataset en JSON ###Code data_json = pd.read_json("https://raw.githubusercontent.com/DiploDatos/AnalisisYCuracion/master/input/pasos.json") data_json["province"].unique() replaces = { "Cordoba": "Córdoba", "AEROPUERTO INTERNACIONAL PILOTO CIVIL NORBERTO FERNANDEZ": "Santa Cruz", "NEUQUEN": "Neuquen" } data_json = data_json.replace(replaces) data_json["province"].unique() fig, ax = plt.subplots(1, 1 , figsize=(16,8)) data_json.groupby("province").count()["location"].plot.bar(ax=ax) plt.show() ###Output _____no_output_____ ###Markdown Ejercicio 3: Representación de grafos--- ###Code edgeList = [[0, 2], [1, 3], [2, 3], [2, 4], [3, 5], [4, 5]] edgePairs = [(edge[0], edge[1]) for edge in edgeList] nodes_set = set([s for (s, _) in edgePairs] + [t for (_, t) in edgePairs]) size = max(nodes_set) + 1 adjMatrix = [[0] * size for _ in range(size)] for (s, t) in edgePairs: adjMatrix[s][t] = 1 adjMatrix adjList = {} for s in nodes_set: adjList[s] = [t for (source, t) in edgePairs if source == s] adjList ###Output _____no_output_____ ###Markdown Ejercicio 4: Ingestión de Datos - SQL--- ###Code !wget https://cdn.sqlitetutorial.net/wp-content/uploads/2018/03/chinook.zip !unzip -o chinook.zip conn = sqlite3.connect('chinook.db') def query_dataset(query): c = conn.cursor() rows = c.execute(query) cols = [column[0] for column in rows.description] results= pd.DataFrame.from_records(data = rows.fetchall(), columns = cols) c.close() return results ###Output _____no_output_____ ###Markdown 4.1 Canciones de Iron Maiden```sqlselect distinct tracks.Name from artistsinner join albums on artists.ArtistId = albums.ArtistIdinner join tracks on tracks.AlbumId = albums.AlbumId where artists.Name = "Iron Maiden"order by tracks.Name asc```Query usando `pandas` ###Code iron_maiden_songs_query = """ select distinct tracks.Name from artists inner join albums on artists.ArtistId = albums.ArtistId inner join tracks on tracks.AlbumId = albums.AlbumId where artists.Name = "Iron Maiden" order by tracks.Name asc """ query_dataset(iron_maiden_songs_query) ###Output _____no_output_____ ###Markdown 4.2 Discos con más de 25 canciones```sqlselect albums.Title as "Album Title", count(TrackId) as song_num from albumsinner join tracks on albums.AlbumId = tracks.AlbumIdgroup by albums.AlbumIdhaving song_num > 25order by song_num desc``` ###Code long_albums = """ select albums.Title as "Album Title", count(TrackId) as song_num from albums inner join tracks on albums.AlbumId = tracks.AlbumId group by albums.AlbumId having song_num > 25 order by song_num desc """ query_dataset(long_albums) ###Output _____no_output_____ ###Markdown 4.3 Las 10 canciones más populares```sqlselect tracks.Name, count(playlist_track.PlaylistId) as occurences from playlist_trackinner join tracks on playlist_track.TrackId = tracks.TrackIdgroup by playlist_track.TrackIdorder by occurences desclimit 10``` ###Code top_songs = """ select tracks.Name, count(playlist_track.PlaylistId) as occurences from playlist_track inner join tracks on playlist_track.TrackId = tracks.TrackId group by playlist_track.TrackId order by occurences desc limit 10 """ query_dataset(top_songs) ###Output _____no_output_____ ###Markdown 4.4 Reescribir la siguiente consulta con JOIN```sqlSELECT trackid, nameFROM tracksWHERE albumid = ( SELECT albumid FROM albums WHERE title = 'Let There Be Rock');```Rta:```sqlselect tracks.TrackId, tracks.Name from tracksinner join albums on tracks.AlbumId = albums.AlbumIdwhere albums.Title = "Let There Be Rock"order by tracks.TrackId asc``` ###Code an_album_tracks = """ select tracks.TrackId, tracks.Name from tracks inner join albums on tracks.AlbumId = albums.AlbumId where albums.Title = "Let There Be Rock" order by tracks.TrackId asc """ query_dataset(an_album_tracks) ###Output _____no_output_____ ###Markdown Ejercicio 5: RSS Ingestion--- Títulos de artículos de Tecnología ###Code feed = feedparser.parse("https://www.lavoz.com.ar/rss/tecnologia.xml") titles = [e["title"] for e in feed.entries] pd.DataFrame(titles, columns=["Titulo"]) ###Output _____no_output_____ ###Markdown Ejercicio 6: Limpieza de Datos--- ###Code in_colab = 'google.colab' in sys.modules if in_colab: BASE_DIR = "https://github.com/DiploDatos/AnalisisYCuracion/raw/master/" else: BASE_DIR = ".." kickstarter_2018 = pd.read_csv(BASE_DIR + "/input/kickstarter-projects/ks-projects-201801.csv", parse_dates=["deadline","launched"], index_col=['ID']) kickstarter_2018 ###Output _____no_output_____ ###Markdown 6.1 Detección de datos duplicadosArmar una tabla con todos los proyectos con nombres duplicados, ordenados para revisar agrupados. ###Code duplicated_names = kickstarter_2018[kickstarter_2018.duplicated(subset=["name"], keep=False)].sort_values(by=["name", "launched"]) duplicated_names ###Output _____no_output_____ ###Markdown 6.2 Verificación de consistencia de la función de hashingVerificar que los proyectos que tienen nombres duplicados también tienen el hash de nombre duplicado ###Code def hashit(val): if isinstance(val, float): return str(val) return md5(val.encode('utf-8')).hexdigest() hashed_dataset = kickstarter_2018.copy() hashed_dataset["name"].apply(hashit).to_frame() duplicated_names["hash"] = duplicated_names["name"].apply(hashit) hash_nunique = duplicated_names.groupby(["name"])["hash"].agg(["nunique"]) hash_nunique.columns = ["hash_count"] hash_nunique hash_nunique[hash_nunique.hash_count > 1].any() ###Output _____no_output_____ ###Markdown 6.3 Manejo del encoding de datosComparar la cantidad de nombres raros en kickstarter_2018 con la que obtenemos al cargar `ks-projects-201801.csv` con encoding iso-8859-1 ###Code kickstarter_2018_wrong = pd.read_csv( BASE_DIR + "/input/kickstarter-projects/ks-projects-201801.csv", parse_dates=["deadline","launched"], index_col=['ID'], encoding="ISO-8859-1") import ftfy.badness as bad def weird(val): if isinstance(val, float): return 0 return bad.sequence_weirdness(val) def add_weirdness(df): df['name_weirdness'] = df['name'].apply(weird) add_weirdness(kickstarter_2018) kickstarter_2018[kickstarter_2018.name_weirdness > 1][["name_weirdness"]].describe() add_weirdness(kickstarter_2018_wrong) kickstarter_2018_wrong[kickstarter_2018_wrong.name_weirdness > 1][["name_weirdness"]].describe() ###Output _____no_output_____ ###Markdown 6.4 Preprocesamiento de variables categóricas**Codificar** *currency* con ambas estrategias (vistas en clase) ###Code from sklearn import preprocessing column='currency' le = preprocessing.LabelEncoder() le.fit(kickstarter_2018[column]) dict(zip(le.classes_,le.transform(le.classes_))) kickstarter_2018[column] = le.transform(kickstarter_2018[column]) kickstarter_2018.head() kickstarter_2018[column] = le.inverse_transform(kickstarter_2018[column]) kickstarter_2018.head() from sklearn.preprocessing import LabelBinarizer other_column = 'currency' lb = LabelBinarizer() lb_results = lb.fit_transform(kickstarter_2018[other_column]) pd.DataFrame(lb_results, columns=((other_column + '_') + pd.Series(lb.classes_))).head(10) ###Output _____no_output_____ ###Markdown 6.5 Validación de integridad 6.5.1 ¿Hay proyecto éxitosos que no consiguieron el objetivo? Si hay, ¿Qué porcentaje sí y cuál no? ###Code successful_not_goal = kickstarter_2018[(kickstarter_2018.state == 'successful') & (kickstarter_2018.pledged < kickstarter_2018.goal)] successful_goal = kickstarter_2018[(kickstarter_2018.state == 'successful') & (kickstarter_2018.pledged >= kickstarter_2018.goal)] successful_not_goal print('Porcentaje que SI consiguieron:') len(successful_not_goal) / len(kickstarter_2018[kickstarter_2018.state == "successful"]) print('Porcentaje que NO consiguieron:') len(successful_goal) / len(kickstarter_2018[kickstarter_2018.state == "successful"]) ###Output Porcentaje que NO consiguieron: ###Markdown 6.5.2 Calcular una tabla con la cantidad de proyectos por categoría principal y estado. ###Code cols = ['name', 'state', 'main_category'] kickstarter_2018[cols].pivot_table( values='name', index='state', columns='main_category', aggfunc='count' ) ###Output _____no_output_____ ###Markdown 6.6 Identificar valores atípicos/outliers¶ 6.6.1 Calcular los valores atípicos de `usd_goal_real` y graficar los boxplots, con y sin estos valores por categoría ###Code %matplotlib inline outliers = kickstarter_2018[kickstarter_2018.usd_goal_real > (kickstarter_2018.usd_goal_real.mean() + 3 * kickstarter_2018.usd_goal_real.std())] outliers fig = plt.figure(figsize=(16, 8)) ax1 = plt.subplot('121') kickstarter_2018.boxplot(column='usd_goal_real', ax=ax1); ax1.set_title('CON valores atipicos') ax1.set_ylim((0, 1.2e8)) ax2 = plt.subplot('122') kickstarter_2018[ kickstarter_2018.usd_goal_real <= (kickstarter_2018.usd_goal_real.mean() + 3 * kickstarter_2018.usd_goal_real.std()) ].boxplot(column='usd_goal_real', ax=ax2); ax2.set_title('SIN valores atipicos') ax2.set_ylim((0, 1.2e8)) plt.show() ###Output _____no_output_____ ###Markdown 6.6.2 Análisis por categoría principal: ###Code fig = plt.figure(figsize=(12, 12)) ax1 = plt.subplot('211') kickstarter_2018.boxplot(column='usd_goal_real',by='main_category', ax=ax1) ax1.set_title('CON valores atipicos') ax1.set_xticklabels(ax1.get_xticklabels(), rotation=45) ax1.set_ylim((0, 1.2e8)) ax2 = plt.subplot('212') kickstarter_2018.drop(outliers.index).boxplot( column='usd_goal_real',by='main_category', ax=ax2) ax2.set_title('SIN valores atipicos') ax2.set_xticklabels(ax2.get_xticklabels(), rotation=45) ax2.set_ylim((0, 1.2e8)) fig.tight_layout() ###Output _____no_output_____
examples/ibis-example.ipynb
###Markdown Ibis, Intake, and Civis * Typically, on platform, you might be interacting with larger than in memory tables. * For example, the City of Los Angeles 311 Dataset is on the order of 10+ GB* Additionally, you may want to switch code between Redshift SQL and PostgresSQL * Intake, by servering as a deffered expression engine allows us to easily switch between Postgres / Redshift and write SQL expressions without having to write SQL. ###Code import intake_civis import altair as alt import pandas as pd cat = intake_civis.open_postgres_catalog() list(cat) # create an Ibis object for a specific table inside postgres db_tbl = cat.public.import311.to_ibis() # subset the dataset to only street repair request types street_repair = ['Barricade Removal', 'Bus Pad/Landing', 'Curb Repair', 'Flooding', 'General Street Inspection', 'Guard/Warning Rail Maintenance', 'Gutter Repair', 'Land/Mud Slide', 'Pothole - Small Asphalt Repair', 'Resurfacing', 'Sidewalk Repair', 'Street Sweeping'] cols = ['srnumber', 'requesttype', 'createddate'] # assemble a ibis query and display using the graphviz query view street = db_tbl[db_tbl.requesttype.isin(street_repair)] street = street[cols] street = street.mutate(date=street.createddate.cast('date')) street = street[street.createddate >= '2018-01-01'] street # execute the SQL on Postgres %time df = street.execute() # do a small groupby grouped = df.set_index('date').groupby([pd.Grouper(freq='M'), 'requesttype']).count() grouped.head() alt.Chart(grouped.reset_index()).mark_line().encode( x='date', y='srnumber', color='requesttype') ###Output _____no_output_____ ###Markdown Adapt to Redshiftbelow is a similar example, but using redshift. You can see the code is nearly the same ###Code cat = intake_civis.open_redshift_catalog() list(cat) # create an Ibis object for a specific table inside postgres db_tbl = cat.public.import311.to_ibis() # assemble a ibis query and display using the graphviz query view street = db_tbl[db_tbl.requesttype.isin(street_repair)] street = street[cols] street = street.mutate(date=street.createddate.cast('date')) street = street[street.createddate >= '2018-01-01'] street # execute the SQL on Redshift %time df = street.execute() # do a small groupby grouped = df.set_index('date').groupby([pd.Grouper(freq='M'), 'requesttype']).count() grouped.head() alt.Chart(grouped.reset_index()).mark_line().encode( x='date', y='srnumber', color='requesttype') ###Output _____no_output_____
1. Natural Language Processing with Classification and Vector Spaces/Week 4/NLP_C1_W4_lecture_nb_02.ipynb
###Markdown Hash functions and multiplanesIn this lab, we are going to practice the most important concepts related to the hash functions explained in the videos. You will be using these in this week's assignment.A key point for the lookup using hash functions is the calculation of the hash key or bucket id that we assign for a given entry. In this notebook, we will cover:* Basic hash tables* Multiplanes* Random planes Basic Hash tablesHash tables are data structures that allow indexing data to make lookup tasks more efficient. In this part, you will see the implementation of the simplest hash function. ###Code import numpy as np # library for array and matrix manipulation import pprint # utilities for console printing from utils_nb import plot_vectors # helper function to plot vectors import matplotlib.pyplot as plt # visualization library pp = pprint.PrettyPrinter(indent=4) # Instantiate a pretty printer ###Output _____no_output_____ ###Markdown In the next cell, we will define a straightforward hash function for integer numbers. The function will receive a list of integer numbers and the desired amount of buckets. The function will produce a hash table stored as a dictionary, where keys contain the hash keys, and the values will provide the hashed elements of the input list. The hash function is just the remainder of the integer division between each element and the desired number of buckets. ###Code def basic_hash_table(value_l, n_buckets): def hash_function(value, n_buckets): return int(value) % n_buckets hash_table = {i:[] for i in range(n_buckets)} # Initialize all the buckets in the hash table as empty lists for value in value_l: hash_value = hash_function(value,n_buckets) # Get the hash key for the given value hash_table[hash_value].append(value) # Add the element to the corresponding bucket return hash_table ###Output _____no_output_____ ###Markdown Now let's see the hash table function in action. The pretty print function (`pprint()`) will produce a visually appealing output. ###Code value_l = [100, 10, 14, 17, 97] # Set of values to hash hash_table_example = basic_hash_table(value_l, n_buckets=10) pp.pprint(hash_table_example) ###Output { 0: [100, 10], 1: [], 2: [], 3: [], 4: [14], 5: [], 6: [], 7: [17, 97], 8: [], 9: []} ###Markdown In this case, the bucket key must be the rightmost digit of each number. PlanesMultiplanes hash functions are other types of hash functions. Multiplanes hash functions are based on the idea of numbering every single region that is formed by the intersection of n planes. In the following code, we show the most basic forms of the multiplanes principle. First, with a single plane: ###Code P = np.array([[1, 1]]) # Define a single plane. fig, ax1 = plt.subplots(figsize=(8, 8)) # Create a plot plot_vectors([P], axes=[2, 2], ax=ax1) # Plot the plane P as a vector # Plot random points. for i in range(0, 10): v1 = np.array(np.random.uniform(-2, 2, 2)) # Get a pair of random numbers between -4 and 4 side_of_plane = np.sign(np.dot(P, v1.T)) # Color the points depending on the sign of the result of np.dot(P, point.T) if side_of_plane == 1: ax1.plot([v1[0]], [v1[1]], 'bo') # Plot blue points else: ax1.plot([v1[0]], [v1[1]], 'ro') # Plot red points plt.show() ###Output _____no_output_____ ###Markdown The first thing to note is that the vector that defines the plane does not mark the boundary between the two sides of the plane. It marks the direction in which you find the 'positive' side of the plane. Not intuitive at all!If we want to plot the separation plane, we need to plot a line that is perpendicular to our vector `P`. We can get such a line using a $90^o$ rotation matrix.Feel free to change the direction of the plane `P`. ###Code P = np.array([[1, 2]]) # Define a single plane. You may change the direction # Get a new plane perpendicular to P. We use a rotation matrix PT = np.dot([[0, 1], [-1, 0]], P.T).T fig, ax1 = plt.subplots(figsize=(8, 8)) # Create a plot with custom size plot_vectors([P], colors=['b'], axes=[2, 2], ax=ax1) # Plot the plane P as a vector # Plot the plane P as a 2 vectors. # We scale by 2 just to get the arrows outside the current box plot_vectors([PT * 4, PT * -4], colors=['k', 'k'], axes=[4, 4], ax=ax1) # Plot 20 random points. for i in range(0, 20): v1 = np.array(np.random.uniform(-4, 4, 2)) # Get a pair of random numbers between -4 and 4 side_of_plane = np.sign(np.dot(P, v1.T)) # Get the sign of the dot product with P # Color the points depending on the sign of the result of np.dot(P, point.T) if side_of_plane == 1: ax1.plot([v1[0]], [v1[1]], 'bo') # Plot a blue point else: ax1.plot([v1[0]], [v1[1]], 'ro') # Plot a red point plt.show() ###Output _____no_output_____ ###Markdown Now, let us see what is inside the code that color the points. ###Code P = np.array([[1, 1]]) # Single plane v1 = np.array([[1, 2]]) # Sample point 1 v2 = np.array([[-1, 1]]) # Sample point 2 v3 = np.array([[-2, -1]]) # Sample point 3 np.dot(P, v1.T) np.dot(P, v2.T) np.dot(P, v3.T) ###Output _____no_output_____ ###Markdown The function below checks in which side of the plane P is located the vector `v` ###Code def side_of_plane(P, v): dotproduct = np.dot(P, v.T) # Get the dot product P * v' sign_of_dot_product = np.sign(dotproduct) # The sign of the elements of the dotproduct matrix sign_of_dot_product_scalar = sign_of_dot_product.item() # The value of the first item return sign_of_dot_product_scalar side_of_plane(P, v1) # In which side is [1, 2] side_of_plane(P, v2) # In which side is [-1, 1] side_of_plane(P, v3) # In which side is [-2, -1] ###Output _____no_output_____ ###Markdown Hash Function with multiple planesIn the following section, we are going to define a hash function with a list of three custom planes in 2D. ###Code P1 = np.array([[1, 1]]) # First plane 2D P2 = np.array([[-1, 1]]) # Second plane 2D P3 = np.array([[-1, -1]]) # Third plane 2D P_l = [P1, P2, P3] # List of arrays. It is the multi plane # Vector to search v = np.array([[2, 2]]) ###Output _____no_output_____ ###Markdown The next function creates a hash value based on a set of planes. The output value is a combination of the side of the plane where the vector is localized with respect to the collection of planes.We can think of this list of planes as a set of basic hash functions, each of which can produce only 1 or 0 as output. ###Code def hash_multi_plane(P_l, v): hash_value = 0 for i, P in enumerate(P_l): sign = side_of_plane(P,v) hash_i = 1 if sign >=0 else 0 hash_value += 2**i * hash_i return hash_value hash_multi_plane(P_l, v) # Find the number of the plane that containes this value ###Output _____no_output_____ ###Markdown Random PlanesIn the cell below, we create a set of three random planes ###Code np.random.seed(0) num_dimensions = 2 # is 300 in assignment num_planes = 3 # is 10 in assignment random_planes_matrix = np.random.normal( size=(num_planes, num_dimensions)) print(random_planes_matrix) v = np.array([[2, 2]]) ###Output _____no_output_____ ###Markdown The next function is similar to the `side_of_plane()` function, but it evaluates more than a plane each time. The result is an array with the side of the plane of `v`, for the set of planes `P` ###Code # Side of the plane function. The result is a matrix def side_of_plane_matrix(P, v): dotproduct = np.dot(P, v.T) sign_of_dot_product = np.sign(dotproduct) # Get a boolean value telling if the value in the cell is positive or negative return sign_of_dot_product ###Output _____no_output_____ ###Markdown Get the side of the plane of the vector `[2, 2]` for the set of random planes. ###Code sides_l = side_of_plane_matrix( random_planes_matrix, v) sides_l ###Output _____no_output_____ ###Markdown Now, let us use the former function to define our multiplane hash function ###Code def hash_multi_plane_matrix(P, v, num_planes): sides_matrix = side_of_plane_matrix(P, v) # Get the side of planes for P and v hash_value = 0 for i in range(num_planes): sign = sides_matrix[i].item() # Get the value inside the matrix cell hash_i = 1 if sign >=0 else 0 hash_value += 2**i * hash_i # sum 2^i * hash_i return hash_value ###Output _____no_output_____ ###Markdown Print the bucket hash for the vector `v = [2, 2]`. ###Code hash_multi_plane_matrix(random_planes_matrix, v, num_planes) ###Output _____no_output_____ ###Markdown NoteThis showed you how to make one set of random planes. You will make multiple sets of random planes in order to make the approximate nearest neighbors more accurate. Document vectorsBefore we finish this lab, remember that you can represent a document as a vector by adding up the word vectors for the words inside the document. In this example, our embedding contains only three words, each represented by a 3D array. ###Code word_embedding = {"I": np.array([1,0,1]), "love": np.array([-1,0,1]), "learning": np.array([1,0,1]) } words_in_document = ['I', 'love', 'learning', 'not_a_word'] document_embedding = np.array([0,0,0]) for word in words_in_document: document_embedding += word_embedding.get(word,0) print(document_embedding) ###Output _____no_output_____
Chapter12_NN/LogisticRegression/LogisticRegressionSklearn.ipynb
###Markdown Logistic Regression - Binary ClassificationComputes the probability that a sample is class 1 (positive).To get the porbability of class 0 (negative), you can compute$P(y=class_0) = (1 - P(y=class_1))$ ###Code clf = LogisticRegression( max_iter=10_000, fit_intercept=True ) clf.fit(x, y) preds = clf.predict(x) score = clf.score(x, y) print(f"Score: {score}") plot(clf, x, y) ###Output _____no_output_____
categorical-features/sklearn-ordinal-encoding-mixedtype-df.ipynb
###Markdown Ordinal Encoding in Scikit-Learn Defining some toy data - We start by defining some toy data here: ###Code import pandas as pd feature_1 = [ 1.1, 2.1, 3.1, 4.2, 5.1, 6.1, 7.1, 8.1, 1.2, 2.1, 3.1, 4.1 ] feature_2 = [ 'b', 'b', 'b', 'b', 'a', 'a', 'a', 'a', 'c', 'c', 'c', 'c' ] df = pd.DataFrame({'numerical': feature_1, 'categorical': feature_2}) df ###Output _____no_output_____ ###Markdown Ordinal Encoding - Usually, we use onehot encoding if we have categorical data without ordering information, so-called nominal data.- An example of such data is blood type (A, B, AB, or O) - Ordinal encoding is typically used if we have categorical data with ordering information.- One example of such data is T-shirt sizes (XS, S, M, L, or XL)- Now, assume that the "categorical" column above has ordered features; we can use the `OrdinalEncoder` to encode that: ###Code data = df['categorical'].values.reshape(-1, 1) data from sklearn.preprocessing import OrdinalEncoder ode = OrdinalEncoder( categories= [['a', 'b', 'c']] ) ode.fit_transform(data) ###Output _____no_output_____ ###Markdown - Notice that based on the alphabetical ordering the ordinal encoder assumes that `'a: 0 < b: 1 < c: 2'`. - If we want to change that and have an ordering assumption like `'b: 0 < a: 1 < c: 2'`, we can override the feature ordering via the `categories` attribute as follows: ###Code ode = OrdinalEncoder( categories= [['b', 'a', 'c']] ) ode.fit_transform(data) ###Output _____no_output_____ ###Markdown Using the `OrdinalEncoder` when other columns are present - Below is an example using a `ColumnTransformer` to transform only specific columns via the `OrdinalEncoder` when multiple columns are present.- For instance, considering the toy dataset at the top, assume we only want to transform the "categorical" column but not the "numerical" column: ###Code import sklearn from sklearn.compose import ColumnTransformer ohe = OrdinalEncoder() X = df.values categorical_features = [1] col_transformer = ColumnTransformer( transformers=[ ('cat', ohe, categorical_features)], remainder='passthrough' ) col_transformer.fit(df) X_t = col_transformer.transform(df) ###Output _____no_output_____ ###Markdown - Note that there are a few extra workaround like the `FloatTransformer()`, which are explained [here](sklearn-onehot-encoding-mixedtype-df.ipynb). ###Code X_t %load_ext watermark %watermark --iversions ###Output sklearn: 1.0.2 pandas : 1.4.0
AudioNotebook/FFT Suite.ipynb
###Markdown FFT Transformations, Discretization, and Representation by Russel MendesA Fourier transform decomposes a function of time into its constituent frequencies. This is useful in analyzing an audio file for its frequencies, the power of those frequencies, and the time of those frequencies. This details the process of Fourier Transform on each unit of the song. We can then represent the data from these transformations for an artistic rendering. This notebook will help show the underlying process. Collecting DataSince FFT are a long and computationally heavy task. It is often prudent to save the data. Step 1) Load Dependencies and Song ###Code """ Author: Russel Mendes """ import numpy as np from numpy import fft as fft from scipy.io import wavfile import pickle pathToSong = 'CastleOfGlass.wav' samplingFrequency, signalData = wavfile.read(pathToSong) #samplingFrequency is the number of time the song has been sampled a second #signalData is the data that holds the audio data of the song channel1=signalData[:,0] #Some songs may have more than 1 channel ###Output _____no_output_____ ###Markdown Step 2) PreProcessing ###Code #Before the data of the song is sampled, we can premodify it depending our needs #For Example, setting the dilationFactor to 100. The amount of data points will increase by a factor #of 100. The REFRESH variable denotes how fast the data needs to replaced in seconds to match the #original speed of the song #dilationFactor = 1 is a default per second analysis dilationFactor = 100 n = int(samplingFrequency/dilationFactor) REFRESH = 1/dilationFactor extended = [channel1[i * n:(i + 1) * n] for i in range((len(channel1) + n - 1) // n )] print("Number of Data Sets in Data: " + str(len(extended))) print("Number of Data Points in Set: " + str(len(extended[0]))) print("RefreshRate: " + str(REFRESH)) print("EstimatedTime: " + str((len(extended))/(1/REFRESH))) ###Output Number of Data Sets in Data: 20551 Number of Data Points in Set: 441 RefreshRate: 0.01 EstimatedTime: 205.51 ###Markdown Step 3) Processing and Pickling Data ###Code pickledFourier = [] #For every data set in the extended set. We are conducting a FFT, which we are storing for later #pickling for data in extended: fourier=fft.fft(data) pickledFourier.append(fourier) name = 'CastleOfGlass_Of_%s_DataSsets_With_%s_DataPoints.p' % (len(extended),len(extended[0])) pickle.dump(pickledFourier, open( name, "wb" ) ) ###Output _____no_output_____ ###Markdown Plotting FFT Step 1) Import and Load Relevant Dependencies ###Code import matplotlib.pyplot as plt import numpy as np import math import pickle infile = open("CastleOfGlass_Of_206_DataSsets_With_44100_DataPoints.p",'rb') FFTrepo = pickle.load(infile) infile.close() ###Output _____no_output_____ ###Markdown Step 2) Plotting Continous FFTFor a given FFT in a the FFTrepo, this section will show how to plot continous FFT ###Code fourier = FFTrepo[200] #an arbitray data set samplingFrequency = 44100 #sampling Frequency from the Earlier Secion X = samplingFrequency fourier = fourier[0:math.floor(X/2)] #Reducing Redundency in the Dataset # scale by the number of points so that the magnitude does not depend on the length fourier = fourier / float(X) freqArray = np.arange(0, (X/2), 1.0) * (samplingFrequency*1.0/X) #Modify The DataSets so it becomes readible powerDB = 10*np.log10(fourier) freqArray = freqArray/1000 #Plotting the data plt.plot(freqArray, powerDB, color='#ff7f00', linewidth=0.02) plt.xlabel('Frequency (kHz)') plt.ylabel('Power (dB)') #Keep in mind. The data for each sound file is unique as compression plays a #factor in audio quality ###Output C:\Users\Russel\Anaconda3\lib\site-packages\numpy\core\numeric.py:538: ComplexWarning: Casting complex values to real discards the imaginary part return array(a, dtype, copy=False, order=order) ###Markdown Plotting Discrete FFTSince FFT is often continous and many discrete data points. Sometimes, reducing the data into smaller chunks is advisable. In otherwise, making the data even more discrete. This section will use the same values from above ###Code def ChunkAverage(ChunkyArray): """ Take an array of with subelements of samples, and average them into a discrete list. Params: ChunkyArray - an array with elements of arrays that contains data Returns: normArray - an average of all data elements ChunkyArray per element """ num_Chunks = len(ChunkyArray) normArray = [] for data in ChunkyArray: avgData = np.average(data) normArray.append(avgData) return normArray Partition = 100 #Reducing the data to 100 discrete data points dataPerChunk = len(freqArray) / Partition #FreqArray and powerDB are the same size n = int(dataPerChunk) freqArrayChunks = [freqArray[i * n:(i + 1) * n] for i in range((len(freqArray) + n - 1) // n )] powerDBChunk = [powerDB[i * n:(i + 1) * n] for i in range((len(powerDB) + n - 1) // n )] normFreqChunks = ChunkAverage(freqArrayChunks) normpowerDBChunk = ChunkAverage(powerDBChunk) plt.scatter(normFreqChunks,normpowerDBChunk ) ###Output C:\Users\Russel\Anaconda3\lib\site-packages\numpy\core\numeric.py:591: ComplexWarning: Casting complex values to real discards the imaginary part return array(a, dtype, copy=False, order=order, subok=True) ###Markdown Making a GIF representation of FFTSince the data from each second of a FFT can be transcribed. Many manipulations can be done with this data. For this program, a gif will be made about the change in frequencies per unit of time. But any manipulation of data can be done. Step 1) Import Dependencies ###Code import matplotlib.pyplot as plt import numpy as np import math import pickle from PIL import Image infile = open("CastleOfGlass_Of_206_DataSsets_With_44100_DataPoints.p",'rb') FFTrepo = pickle.load(infile) infile.close() ###Output _____no_output_____ ###Markdown Step 2) Load Helper Functions ###Code #This helps load the data into a useable per chunk format def createFreqArrayPowerDB(FFTrepo, index): """ Take a FFT repository of data and access a specific element at the given index Params: FFTrepo - an array of FFT data index - an integer to access an element Returns: freqArray - an array of frequencies powerDB - an array of DB points corresponding to a frequency point """ fourier = FFTrepo[index] #an arbitray data set samplingFrequency = 44100 #sampling Frequency from the Earlier Secion X = samplingFrequency fourier = fourier[0:math.floor(X/2)] #Reducing Redundency in the Dataset # scale by the number of points so that the magnitude does not depend on the length fourier = fourier / float(X) freqArray = np.arange(0, (X/2), 1.0) * (samplingFrequency*1.0/X) #Modify The DataSets so it becomes readible powerDB = 10*np.log10(fourier) freqArray = freqArray/1000 return freqArray, powerDB def ChunkAverage(ChunkyArray): """ Take an array of with subelements of samples, and average them into a discrete list. Params: ChunkyArray - an array with elements of arrays that contains data Returns: normArray - an average of all data elements ChunkyArray per element """ normArray = [] for data in ChunkyArray: avgData = np.average(data) normArray.append(avgData) return normArray #This function creates an average array with x amount of partitions def createNormFreqPower(freqArray, powerDB, Partition = 100): dataPerChunk = len(freqArray) / Partition #FreqArray and powerDB are the same size n = int(dataPerChunk) freqArrayChunks = [freqArray[i * n:(i + 1) * n] for i in range((len(freqArray) + n - 1) // n )] powerDBChunk = [powerDB[i * n:(i + 1) * n] for i in range((len(powerDB) + n - 1) // n )] normFreqChunks = ChunkAverage(freqArrayChunks) normPowerDBChunks = ChunkAverage(powerDBChunk) return normFreqChunks, normPowerDBChunks def RaiseValue(array): """ This function converts a complex array into simply a real array by removing the complex portion. It then takes the absolute value of the data Params: array - an array of data Returns: array - the arguement array but modified """ for i in range(len(array)): array[i] = np.abs(np.real(array[i])) return array def give_RGB(row, col, powerDB, Adjust): """ For a given pixel, adjustment factor, and DB value, return a RGB value based on a formula Params: row - row of the pixel col - col of the pixel powerDB - an array of discrete DB values Adjust - a scalar that adjusts the maximum height for the image Returns: R - an integer between 0-255 G - an integer between 0-255 B - an integer between 0-255 """ R = 0 G = 0 B = 0 if col <= len(powerDB): #Check if the col is valid under the powerDB array try: maxHeight = powerDB[col] pixelHeight = maxHeight * Adjust if(row < pixelHeight): R = math.sin(row * col*3.14) * 255 G = 0 B = 0 except: spacer = "" # print(col) return (int(R),int(G),int(B)) def createFrames(raisedFreqChunks, raisedPowerDBChunks, XDIM = 100, YDIM = 100, floor = False): """ This function takes an array of frequency and it corresponding power and few other parameters and creates an image. Params: raisedFreqChunks - an array of discrete frequencies raisedPowerChunks - an array of corresponding DB XDIM - x dimension of the image YDIM - y dimension of the image floor - flooring the image DEPRACTED Returns: new_image = a PIL image """ DATAPOINTS = len(raisedFreqChunks) THICKNESS = int(np.round(XDIM/DATAPOINTS)) img_array = [] img_array = np.empty((YDIM,XDIM,3), dtype=np.uint8) for row in range(YDIM): for col in range(XDIM): if floor: Width = math.floor(col/THICKNESS) else: Width = int(col/THICKNESS) img_array[row][col] = give_RGB(row, Width, raisedPowerDBChunks, 3) new_image = Image.fromarray(img_array) new_image = new_image.rotate(180) new_image = new_image.transpose(Image.FLIP_LEFT_RIGHT) return new_image def createGIF(pickleFile, showStep = False): """ Creates a GIF based on a pickleFile that contains FFT information of a song Saves the GIF locally to the file location of this project Params: pickleFile - a pickle file with FFT information showStep - true/false on whether to show the progress of the gif creation """ splitName = pickleFile.split('_') frames = [] infile = open(pickleFile,'rb') FFTrepo = pickle.load(infile) infile.close() for i in range(len(FFTrepo)): if showStep: print("STEP: " + str(i)) freqArray, powerDB = createFreqArrayPowerDB(FFTrepo, i) normFreqChunks, normPowerDBChunks = createNormFreqPower(freqArray, powerDB, 150) raisedFreqChunks = RaiseValue(normFreqChunks) raisedPowerDBChunks = RaiseValue(normPowerDBChunks) frames.append(createFrames(raisedFreqChunks, raisedPowerDBChunks, XDIM = 500, floor = True)) name = splitName[0]+".gif" frames[0].save(name, format='GIF', append_images=frames[1:], save_all=True, duration=205, loop=0) pickleFile = "CastleOfGlass_Of_206_DataSsets_With_44100_DataPoints.p" createGIF(pickleFile) ###Output _____no_output_____
report_and_src_codes/HW1/hw1_main_code_report.ipynb
###Markdown Utils functions ###Code def loadData(pathToDataset): dataset = arff.loadarff(pathToDataset) dataset = pd.DataFrame(dataset[0]) return dataset def loadColumn(dataset, colNum): column = dataset[colNum] column_NO = list(column[dataset[6] == 'Normal']) column_AB = list(column[dataset[6] == 'Abnormal']) return (column_NO, column_AB) def scatterPlotMatPlot(col_NO, col_AB): fig = plt.figure() ax1 = fig.subplots() x_NO = list(range(len(col_NO))) x_AB = list(range(len(col_AB))) ax1.scatter(x_NO, col_NO, c='b', marker="s", label='first') ax1.scatter(x_AB, col_AB, c='r', marker="o", label='second') plt.legend(loc='upper left'); plt.show() ax1.legend() ax1.grid(True) plt.grid() plt.show() def scatterPlotSNS(dataframe): g = sns.pairplot(dataframe, hue = 'class', palette = {b'Abnormal': 'b', b'Normal': 'g'}) def computeError(y_actual, y_pred): return (np.sum(abs(y_actual - y_pred))/len(y_actual)) def plotErrors(x, y, title, xlabel, ylabel): plt.plot(x, y) plt.title(title) plt.xlabel(xlabel) plt.ylabel(ylabel) ###Output _____no_output_____ ###Markdown Q1 a: Vertebral Column Data Set ###Code pathToCol2C_weka = '../Homework1_Data/vertebral_column_data/column_2C_weka.arff' dataset_2C = loadData(pathToCol2C_weka) dataset_2C.head() ###Output _____no_output_____ ###Markdown Q1 b i: Scatter Plots of the 6 Independent variables in the dataset**class 1- Abnormal: blue class 0 - Normal: green ** ###Code scatterPlotSNS(dataset_2C) ###Output _____no_output_____ ###Markdown Q1 b i: Box Plots of the 6 Independent variables in the dataset**class 1- Abnormal: blue class 0 - Normal: green ** ###Code sns.set(style="whitegrid", ) for x in range(0,6): ax = sns.boxplot(x = dataset_2C[dataset_2C.columns[6]] ,y =dataset_2C[dataset_2C.columns[x]]) plt.show() ###Output _____no_output_____ ###Markdown Q1 b iii: first 70 rows of Class 0 and the first 140 rows of Class 1 as the training set and the rest of the data as the test set ###Code class_zero = dataset_2C[dataset_2C['class'] == b'Normal'] class_one = dataset_2C[dataset_2C['class'] == b'Abnormal'] #Binarizing the dependent variable class_zero['class'] = label_binarize(list(class_zero['class']), classes = [b'Normal', b'Abnormal']) class_one['class'] = label_binarize(list(class_one['class']), classes = [b'Normal', b'Abnormal']) first70_class_zero = class_zero[0:70] first140_class_one = class_one[0:140] training_data = pd.concat((first70_class_zero, first140_class_one)) testing_data = pd.concat((class_zero[70:], class_one[140:])) testing_data.head() training_data.head() ###Output _____no_output_____ ###Markdown First 6 columns are independent variables, last column (class) is the dependent variable Split into X_Train, X_test, Y_train, Y_test ###Code X_train = training_data.drop(['class'], axis =1) X_test = testing_data.drop(['class'], axis =1) Y_train = training_data['class'].values Y_test_Actual = testing_data['class'].values ###Output _____no_output_____ ###Markdown Q1c i: Code for KNN with Euclidean metric (p = 2) and Nearest neighbors = 3 ###Code class knnclassifier(object): """This class performs KNN classifier and returns predicted value, errors""" def __init__(self, x_train, y_train, x_test, y_test, k_val, p_val): super(knnclassifier, self).__init__() self.k_val = k_val self.p_val = p_val self.x_train = x_train self.x_test = x_test self.y_train = y_train self.y_test = y_test self.knn = KNeighborsClassifier(n_neighbors = self.k_val, p = self.p_val) self.knn.fit(x_train, y_train) self.y_test_predicted = self.knn.predict(self.x_test) self.y_train_predicted = self.knn.predict(self.x_train) def get_y_test_pred(self): return self.y_test_predicted def get_y_train_pred(self): return self.y_train_predicted def computeErrorTest(self): return (np.sum(abs(self.y_test - self.y_test_predicted))/len(self.y_test)) def computeErrorTrain(self): return np.float128(np.sum(abs(self.y_train - self.y_train_predicted))/len(self.y_train)) ###Output _____no_output_____ ###Markdown Q1c ii: Plot train and test errors in terms of k for k ∈ {208, 205, . . . , 7, 4, 1, } ###Code listK = [] train_errors = [] test_errors = [] for k in range(208, 0, -3): knn1 = knnclassifier(X_train, Y_train, X_test, Y_test_Actual, k, 2) train_errors.append(knn1.computeErrorTrain()) test_errors.append(knn1.computeErrorTest()) listK.append(k) print("Best training error: {}".format(np.amin(train_errors))) plotErrors(listK, test_errors, 'Test Errors vs No. of Nearest neighbours', 'no. of neighbours' ,'Test Errors') ###Output _____no_output_____ ###Markdown The test error after a few fluctuations initially, in general increases with number of neighbours and remains constant after no. of neighbors goes above 140. Min test error is observed at around k = 4 ###Code plotErrors(listK, train_errors, 'Train Errors vs No. of Nearest neighbours', 'no. of neighbours' ,'Train Errors') ###Output _____no_output_____ ###Markdown The train error has less fluctuations in comparison to test error, and in general increases with number of neighbours and remains constant after no. of neighbors goes above 140. Min test error is observed at around k = 4 Q1c ii: k* = most suitable k = k with minimum error for k ∈ {208, 205, . . . , 7, 4, 1, } ###Code kStar = listK[np.where(np.array(test_errors) == np.amin(np.array(test_errors)))[0][0]] print("The best k = k* is: {}".format(kStar)) ###Output The best k = k* is: 4 ###Markdown Q1c ii: Print out the True Positive rate, True Negative rate, precision and F-1 score for k* = 4 ###Code knn = knnclassifier(X_train, Y_train, X_test, Y_test_Actual, 4, 2) Y_test_Predicted = knn.get_y_test_pred() print("Train error for k_star is: {}".format(knn.computeErrorTrain())) pom = performanceMeasure(Y_test_Actual, Y_test_Predicted) TP, TN, FP, FN = pom.get_performance_measure() F1_score = pom.get_F1score() PPV = pom.PPV errorRate = pom.get_errorRate() print("The confusion matrix is: {}".format(pom.get_confusion_matrix())) print("The True Positive rate is: {}".format(TP)) print("The True Negative rate is: {}".format(TN)) print("The precision is: {}".format(PPV)) print("The F-1 score is: {}".format(F1_score)) ###Output Train error for k_star is: 0.14285714285714285 The confusion matrix is: [[69, 5], [1, 25]] The True Positive rate is: 69 The True Negative rate is: 25 The precision is: 0.9324324324324325 The F-1 score is: 0.9583333333333333 ###Markdown * True positives are the correctly predicted positive values i.e. actual class value is 1 and predicted value is 1 as well* True negatives are the correctly predicted negative values i.e. actual class value is 0 and predicted values is 0.* False positives are when actual class is 0 and predicted value is 1* Precision is the ratio of correctly predicted positive observations to the total predicted positive observations. Here a high precision of 0.93 idicates low positive false value* F1 is usually more useful than accuracy, especially when we have an uneven class distribution. Here a F1 score of 0.95 indicates low False positives and False negatives Q1c iii: Plot of best test error rates, which is obtained by some value of k, against the size of training set, when the size of training set is N ∈ {10, 20, 30, . . . , 210} ###Code list_n = [] best_error_rate = [] for x in range(10, 220, 10): firstN_class_0 = class_zero[0:int(x/3)] firstN_3class_1 = class_one[0:int(2*x/3)] training_set = pd.concat((firstN_class_0, firstN_3class_1)) testing_set = pd.concat((class_zero[int(x/3):], class_one[int(2*x/3):])) X_train = training_set.drop(['class'], axis =1) X_test = testing_set.drop(['class'], axis =1) Y_train = training_set['class'].values Y_test_Actual = testing_set['class'].values listK = [] train_errors = [] test_errors = [] for k in range(1, x+1-5, 5): knn = knnclassifier(X_train, Y_train, X_test, Y_test_Actual, k, 2) Y_test_Predicted = knn.get_y_test_pred() test_errors.append(knn.computeErrorTest()) best_error_rate.append(np.amin(np.array(test_errors))) list_n.append(x) plotErrors(list_n, best_error_rate, 'Learning curve', 'Training data sample size' ,'Best Train Errors') ###Output _____no_output_____ ###Markdown Training curve is decreasing with increase in training sample size because higher samples and higher number of samples available to group together. ###Code X_train = training_data.drop(['class'], axis =1) X_test = testing_data.drop(['class'], axis =1) Y_train = training_data['class'].values Y_test_Actual = testing_data['class'].values ###Output _____no_output_____ ###Markdown Q1d i A: Best k for Manhattan distance (p = 1) ###Code listK = [] train_errors = [] test_errors = [] k_dict = {} for k in range(1, 197, 5): knn = knnclassifier(X_train, Y_train, X_test, Y_test_Actual, k, 1) Y_test_Predicted = knn.get_y_test_pred() k_dict[k] = np.float128(knn.computeErrorTest()) train_errors.append(knn.computeErrorTrain()) print("Best training error: {}".format(np.amin(train_errors))) best_k_manhattan = min(k_dict.keys(), key = (lambda k: k_dict[k])) print("Best k for logp Distance: {}".format(best_k_manhattan)) print("Test error when k = k* is: {}".format(k_dict[best_k_manhattan])) ###Output Best k for logp Distance: 1 Test error when k = k* is: 0.11 ###Markdown Q1d i B: Best k for log(p) ∈ {0.1, 0.2, 0.3, . . . , 1} ###Code powers = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.8, 0.9, 1] best_train_errors = [] k_list = [6] best_p = [] log_p_dict = {} best_p_dict = {} for k in k_list: train_errors = [] for p_dash in powers: knn = knnclassifier(X_train, Y_train, X_test, Y_test_Actual, k, 10**p_dash) Y_test_Predicted = knn.get_y_test_pred() log_p_dict[p_dash] = np.float128(knn.computeErrorTest()) train_errors.append(knn.computeErrorTrain()) best_log_p = min(log_p_dict.keys(), key = (lambda k: log_p_dict[k])) best_p_dict[k] = log_p_dict[best_log_p] best_p.append(best_log_p) best_train_errors.append(train_errors) print("Best training error: {}".format(np.amin(best_train_errors))) best_log_p = min(best_p_dict.keys(), key = (lambda k: best_p_dict[k])) print("Best k for logp Distance: {}".format(best_log_p)) print("Test error when k = k* is: {}".format(k_dict[best_log_p])) ###Output Best k for logp Distance: 6 Test error when k = k* is: 0.11 ###Markdown Q1d i C: Best k for Chebyshev Distance with p → ∞ ###Code listK = [] best_train_errors = [] test_errors = [] k_dict = {} x = 10 for k in range(1, 197, 5): x = 10 train_errors = [] for p_dash in range(10,1000000,100000): knn = knnclassifier(X_train, Y_train, X_test, Y_test_Actual, k, p_dash) test_errors.append(knn.computeErrorTest()) train_errors.append(knn.computeErrorTrain()) k_dict[k] = np.amin(np.array(test_errors)) best_train_errors.append(np.amin(train_errors)) print("Best training error: {}".format(np.amin(best_train_errors))) best_k_cheby = min(k_dict.keys(), key = (lambda k: k_dict[k])) print("Best k for Chebyshev Distance: {}".format(best_k_cheby)) print("Test error when k = k* is: {}".format(k_dict[best_k_cheby])) ###Output Best k for Chebyshev Distance: 6 Test error when k = k* is: 0.09 ###Markdown Q1d ii: Best k for Mahalanobis Distance ###Code listK = [] train_errors = [] covTrain = np.cov(X_train, rowvar = False) k_dict = {} x = 10 for k in range(1, 197, 5): x = 10 knn = KNeighborsClassifier(n_neighbors = k, metric = 'mahalanobis', metric_params = {'VI': covTrain}) knn.fit(X_train, Y_train) Y_test_Predicted = knn.predict(X_test) y_train_predicted = knn.predict(X_train) k_dict[k] = computeError(Y_test_Predicted, Y_test_Actual) train_errors.append(computeError(y_train_predicted, Y_train)) print("Best training error: {}".format(np.amin(train_errors))) best_k_mahala = min(k_dict.keys(), key = (lambda k: k_dict[k])) print("Best k for Mahalanobis Distance: {}".format(best_k_mahala)) print("Test error when k = k* is: {}".format(k_dict[best_k_mahala])) ###Output Best k for Mahalanobis Distance: 6 Test error when k = k* is: 0.08 ###Markdown Final Table summarizing test errors | Method Type | K = K* | Test Error | | :--- | :----: | ---: | | Manhattan Distance | 1 | 0.11 | | | | | | logp for Manhattan | 6 | 0.11 | | Chebyshev Distance | 6 | 0.09 | | Mahalanobis Distance | 6 | 0.08 | We observe least error in Mahalanobis distance because it measures the distance between a point and a distribution. It is effectively a multivariate equivalent of the Euclidean distance. It is a more robust distance metric that is an accurate representation of how distant a point is from a distribution. Q1 e: Best k for Euclidean when weight of each point in voting is inversely proportional to its distance from the query/test data point ###Code listK = [] train_errors = [] test_errors = [] k_dict = {} for k in range(1, 197, 5): knn = KNeighborsClassifier(n_neighbors = k, p = 2,weights = 'distance') knn.fit(X_train, Y_train) Y_test_Predicted = knn.predict(X_test) y_train_predicted = knn.predict(X_train) k_dict[k] = np.float128(computeError(Y_test_Predicted, Y_test_Actual)) train_errors.append(computeError(y_train_predicted, Y_train)) print("Best training error: {}".format(np.amin(train_errors))) best_k_euc = min(k_dict.keys(), key = (lambda k: k_dict[k])) print("Best k for Euclidean with inversely proportional voting: {}".format(best_k_euc)) print("Test error when k = k* is: {}".format(k_dict[best_k_euc])) ###Output Best k for Euclidean with inversely proportional voting: 6 Test error when k = k* is: 0.1 ###Markdown Q1 e: Best k for Manhattan when weight of each point in voting is inversely proportional to its distance from the query/test data point ###Code listK = [] train_errors = [] test_errors = [] k_dict = {} for k in range(1, 197, 5): knn = KNeighborsClassifier(n_neighbors = k, p = 1,weights = 'distance') knn.fit(X_train, Y_train) Y_test_Predicted = knn.predict(X_test) y_train_predicted = knn.predict(X_train) k_dict[k] = np.float128(computeError(Y_test_Predicted, Y_test_Actual)) train_errors.append(computeError(y_train_predicted, Y_train)) print("Best training error: {}".format(np.amin(train_errors))) best_k_manhattan = min(k_dict.keys(), key = (lambda k: k_dict[k])) print("Best k for Manhattan with inversely proportional voting: {}".format(best_k_manhattan)) print("Test error when k = k* is: {}".format(k_dict[best_k_manhattan])) ###Output Best k for Manhattan with inversely proportional voting: 26 Test error when k = k* is: 0.1 ###Markdown Q1 e: Best k for Chebyshev when weight of each point in voting is inversely proportional to its distance from the query/test data point ###Code listK = [] best_train_errors = [] test_errors = [] k_dict = {} x = 10 for k in range(1, 197, 5): x = 10 train_errors = [] for p_dash in range(10,1000000,100000): knn = KNeighborsClassifier(n_neighbors = k, p = p_dash, weights = 'distance') knn.fit(X_train, Y_train) Y_test_Predicted = knn.predict(X_test) y_train_predicted = knn.predict(X_train) test_errors.append(computeError(Y_test_Predicted, Y_test_Actual)) train_errors.append(computeError(y_train_predicted, Y_train)) k_dict[k] = np.amin(np.array(test_errors)) best_train_errors.append(np.amin(train_errors)) print("Best training error: {}".format(np.amin(best_train_errors))) best_k_cheby = min(k_dict.keys(), key = (lambda k: k_dict[k])) print("Best k for Chebyshev with inversely proportional voting: {}".format(best_k_cheby)) print("Test error when k = k* is: {}".format(k_dict[best_k_cheby])) ###Output Best k for Chebyshev with inversely proportional voting: 1 Test error when k = k* is: 0.11
Python-Programming/Python-3-Bootcamp/02-Python Statements/.ipynb_checkpoints/05-Useful-Operators-checkpoint.ipynb
###Markdown Useful OperatorsThere are a few built-in functions and "operators" in Python that don't fit well into any category, so we will go over them in this lecture, let's begin! rangeThe range function allows you to quickly *generate* a list of integers, this comes in handy a lot, so take note of how to use it! There are 3 parameters you can pass, a start, a stop, and a step size. Let's see some examples: ###Code range(0,11) ###Output _____no_output_____ ###Markdown Note that this is a **generator** function, so to actually get a list out of it, we need to cast it to a list with **list()**. What is a generator? Its a special type of function that will generate information and not need to save it to memory. We haven't talked about functions or generators yet, so just keep this in your notes for now, we will discuss this in much more detail in later on in your training! ###Code # Notice how 11 is not included, up to but not including 11, just like slice notation! list(range(0,11)) list(range(0,12)) # Third parameter is step size! # step size just means how big of a jump/leap/step you # take from the starting number to get to the next number. list(range(0,11,2)) list(range(0,101,10)) ###Output _____no_output_____ ###Markdown enumerateenumerate is a very useful function to use with for loops. Let's imagine the following situation: ###Code index_count = 0 for letter in 'abcde': print("At index {} the letter is {}".format(index_count,letter)) index_count += 1 ###Output At index 0 the letter is a At index 1 the letter is b At index 2 the letter is c At index 3 the letter is d At index 4 the letter is e ###Markdown Keeping track of how many loops you've gone through is so common, that enumerate was created so you don't need to worry about creating and updating this index_count or loop_count variable ###Code # Notice the tuple unpacking! for i,letter in enumerate('abcde'): print("At index {} the letter is {}".format(i,letter)) ###Output At index 0 the letter is a At index 1 the letter is b At index 2 the letter is c At index 3 the letter is d At index 4 the letter is e ###Markdown zipNotice the format enumerate actually returns, let's take a look by transforming it to a list() ###Code list(enumerate('abcde')) ###Output _____no_output_____ ###Markdown It was a list of tuples, meaning we could use tuple unpacking during our for loop. This data structure is actually very common in Python , especially when working with outside libraries. You can use the **zip()** function to quickly create a list of tuples by "zipping" up together two lists. ###Code mylist1 = [1,2,3,4,5] mylist2 = ['a','b','c','d','e'] # This one is also a generator! We will explain this later, but for now let's transform it to a list zip(mylist1,mylist2) list(zip(mylist1,mylist2)) ###Output _____no_output_____ ###Markdown To use the generator, we could just use a for loop ###Code for item1, item2 in zip(mylist1,mylist2): print('For this tuple, first item was {} and second item was {}'.format(item1,item2)) ###Output For this tuple, first item was 1 and second item was a For this tuple, first item was 2 and second item was b For this tuple, first item was 3 and second item was c For this tuple, first item was 4 and second item was d For this tuple, first item was 5 and second item was e ###Markdown in operatorWe've already seen the **in** keyword durng the for loop, but we can also use it to quickly check if an object is in a list ###Code 'x' in ['x','y','z'] 'x' in [1,2,3] ###Output _____no_output_____ ###Markdown min and maxQuickly check the minimum or maximum of a list with these functions. ###Code mylist = [10,20,30,40,100] min(mylist) max(mylist) ###Output _____no_output_____ ###Markdown randomPython comes with a built in random library. There are a lot of functions included in this random library, so we will only show you two useful functions for now. ###Code from random import shuffle # This shuffles the list "in-place" meaning it won't return # anything, instead it will effect the list passed shuffle(mylist) mylist from random import randint # Return random integer in range [a, b], including both end points. randint(0,100) # Return random integer in range [a, b], including both end points. randint(0,100) ###Output _____no_output_____ ###Markdown input ###Code input('Enter Something into this box: ') ###Output Enter Something into this box: great job!
04_busquedas_con_xpath.ipynb
###Markdown [![imagenes/pythonista.png](imagenes/pythonista.png)](https://pythonista.io) Es posible identificar elementos dentro de un documento HTMl utilizando comandos de XPath en Javascript. La especificación XPath.[XPath](https://www.w3.org/TR/xpath/all/) es una especificación de la W3C aplicable a documentos XML, capaz de identificar elementos dentro de dichos documentos mediante un lenguaje capaz de desplazarse en una estructura por medio de rutas que tienen como origen un "nodo raíz" a partir del cual se bifurcan otros nodos de forma similar a las ramas de un árbol a partir de su tronco.Hasta antes de HTML5, la especificación de HTML era definida como un derivado de XML y aún cuando HTML5 ya no se se apega por completo a la especificación de XML, conserva muchas características y funcionalidades. Componentes de una estructura según XPath.La especificación de XPath es capaz de identificar los siguientes nodos en un documento XML:* Elementos.* Atributos.* Textos.* Comentarios.* Nodos de documento.* Espacios de nombres (namespaces).* Instrucciones de proceso. En el caso de HTML se utilizan primordialmente:* Elementos.* Atributos.* Textos.* Comentarios.* Nodos de documento. https://www.w3schools.com/xml/xpath_syntax.asp Expresiones de ruta de XPath.Las rutas de búsqueda con XPAth peuden ser creadas a partir de la siguientes expresiones.* ``````: realiza una búsqueda de los nodos con el nombre espcificado a partir de la posición.* ```\``` busca en la raíz. * ```\\``` busca en todos los nodos desde la raíz. XPath en la consola de FIrefox. ###Code %%javascript document.expression('//@id'); ###Output _____no_output_____ ###Markdown [![img/pythonista.png](img/pythonista.png)](https://www.pythonista.io) Búsquedas de elementos con *XPath*. Es posible identificar elementos dentro de un documento *HTML* utilizando comandos de *XPath* en *Javascript*. La especificación *XPath*.[*XPath*](https://www.w3.org/TR/xpath/all/) es una especificación de la *W3C* aplicable a documentos *XML* y *HTML*, capaz de identificar elementos dentro de dichos documentos mediante un lenguaje capaz de desplazarse en una estructura por medio de rutas que tienen como origen un "nodo raíz" a partir del cual se bifurcan otros nodos de forma similar a las ramas de un árbol a partir de su tronco.Hasta antes de *HTML5*, la especificación de *HTML* era definida como un derivado de *XML* y aún cuando *HTML5* ya no se apega por completo a la especificación de *XML*, conserva muchas características y funcionalidades. Componentes de una estructura según *XPath*.La especificación de *XPath* es capaz de identificar los siguientes nodos en un documento *XML*:* Elementos.* Atributos.* Textos.* Comentarios.* Nodos de documento.* Espacios de nombres (namespaces).* Instrucciones de proceso. En el caso de *HTML* se utilizan primordialmente:* Elementos.* Atributos.* Textos.* Comentarios.* Nodos de documento. https://www.w3schools.com/xml/xpath_syntax.asp Expresiones de ruta de *XPath*.Las rutas de búsqueda con *XPath* pueden ser creadas a partir de la siguientes expresiones.* ``````: realiza una búsqueda de los nodos con el nombre espcificado a partir de la posición.* ```\``` busca en la raíz. * ```\\``` busca en todos los nodos desde la raíz. *XPath* en la consola de *Firefox*. ###Code %%javascript document.expression('//@id'); $x(".//header") ###Output _____no_output_____
Chapter2_Type.ipynb
###Markdown ![alt text for screen readers](UTCCLogo.png "University of the Thai Chamber of Commerce") Chapter 2 : Python type of data Variable Name* ตัวอักษรแรกต้องเป็นตัวอักขระ ห้ามเป็นตัวเลข* สามารถใช้ _ เป็นอักษรตัวแรก* ห้ามมีอักขระพิเศษ ! @ $* อักษรตัวใหญ่-ตัวเล็กมีผลต่อชื่อตัวแปร* ใช้ภาษาไทยได้ ( แต่ไม่แนะนำ )* ไม่ใช่คำสงวนของภาษา Python เช่นคำว่า lambda, return, continue**ตัวอย่าง** Variable Name ที่ถูกต้องในภาษา Python* ให้ X เป็นตัวแปรประเภทตัวเลข เก็บค่าจำนวนเต็ม 10,000* ให้ Com_Eng เป็นตัวแปรประเภทตรรกะ เก็บค่าความจริง True* ให้ _Name เป็นตัวแปรประเภทข้อความเก็บชื่อผู้ใช้งาน* ให้ _Last_Name_ เป็นตัวแปรประเภทข้อความเก็บนามสกุลผู้ใช้งาน* ให้ Lambda เป็นตัวแปรประเภทตัวเลข เก็บค่าทศนิยม 1.45* ให้ Return เป็นตัวแปรประเภทตัวเลข เก็บค่าทศนิยม 10.15 ###Code # กำหนดค่าให้แก่ตัวแประแต่ละตัว X = 10000 Comp_Eng = True _Name = "Akara" _Last_Name_ = "Akara Kijkarncharoensin" ชื่อ_นามสกุล = _Name + _Last_Name_ Lambda = 1.45 # คำสงวนของภาษา Python ใช้ lambda เป็นอักษรตัวเล็กท้งหมด Return = 10.15 # คำสงวนของภาษา Python ใช้ return เป็นอักษรตัวเล็กทั้งหมด ###Output _____no_output_____ ###Markdown **ตัวอย่าง** Variable Name ที่ไม่ถูกหลักเกณฑ์ของภาษา Pythonหมายเหตุ : Code ในส่วนนี้ถูกทำให้เป็น Block comment ไว้ หาก run โดยไม่เปลี่ยนให้เป็น Comment จะเกิด Error ขึ้น ###Code # ตัวอย่างขอชื่อตัวแปรที่ไม่เป็นไปตามหลักเกณฑ์ ''' @LineID = "Python101" # มีอักขระพิเศษ @ 1stYear = "2021" # ขึ้นต้นด้วยตัวเลข #Name = "Akara" # ใช้อักขระพิเศษ # จึงกลายเป็น Comment Line Last Name = "Kijkarncharoensin" # มีการเว้นวรรค ชื่อ-นามสกุล = #Name + Last Name # ใช้อักขระพิเศษ - lambda = 1.45 # คำสงวนของภาษา Python return = 10.15 # คำสงวนของภาษา Python ''' ###Output _____no_output_____ ###Markdown **ตัวอย่าง** ทดสอบว่าชื่อตัวแปรที่สร้างถูกต้องตามภาษา Python หรือไม่ ? ###Code print( "X = ", X) print("Comp_Eng = ", Comp_Eng) print("_Name =", _Name) print("ชื่อ_นามสกุล =", ชื่อ_นามสกุล) print("Lambda =", Lambda) print("Return = ", Return) ###Output _____no_output_____ ###Markdown Numeric Data Type* สามารถกำหนดตัวเลขให้แก่ตัวแปร - ไม่สามารถใช้เครื่องหมาย , เช่น X = 1,000 - สามารถใช้เครื่องหมาย _ แทนได้ เช่น X = 1_000 **ตัวอย่าง** ตัวแปรประเภทตัวเลขในภาษา Python* ให้ X เป็นจำนวนเต็มเท่ากับ 123* ให้ Price เป็นข้อมูลตัวเลขทศนิยมมีค่าเท่ากับ 100.50* ให้ Tax เป็นข้อมูลตัวเลขทศนิยมมีเค่าเท่ากับ 12.%* ให้ Total เป็นข้อมูลตัวเลขทศนิยมมีค่าเท่ากับผลรวมของ Price และ Tax ###Code # ตัวอย่างตัวแปรประเภท Numeric X = 123 Price = 100.5 Tax = 12.5/100 Total = Price + Tax print(X) print(Price) print(Tax) print(Total) ###Output _____no_output_____ ###Markdown * ฟังก์ชั่นสำคัญของตัวแปรประเภท Numeric - abs( ข้อมูล ) : หาค่า Absolute ของข้อมูลตัวเลข - float( ข้อมูล ) : แปลงข้อมูลตัวเลขให้เป็นตัวเลขทศนิยม - int( ข้อมูล ) : แปลงข้อมูลตัวเลขให้เป็นเลขจำนวนเต็ม - max( ชุดข้อมูล ) : หาค่าสูงสุดของชุดตัวเลขที่กำหนดให้ - min( ชุดข้อมูล ) : หาค่าต่ำสุดของชุดตัวเลขที่กำหนดให้ **ตัวอย่าง** การใช้ฟังก์ชั่นกับตัวแปรแบบตัวเลข ###Code numInt = -123 Xabs = abs(numInt) Xfloat = float(Xabs) Xint = int(Xfloat) numMax = max( numInt, Xabs, Xfloat ) numMin = min( numInt, Xabs, Xfloat ) print( 'Abosolute value = ', str(Xabs)) print('Floating value = ', str(Xfloat)) print('Integer value = ', str(Xint)) print('Maximum value = ', str(numMax)) print('Minimum value = ',str(numMin)) ###Output _____no_output_____ ###Markdown * การจัด Format สำหรับแสดงผล - , : แสดงเครื่องหมายจุลภาค ( , ) ในหลักพัน - .?f : ให้แสดงผลเป็นเลขทศนิยมจำนวนเท่ากับ ? - .?% : ให้แสดงผลเป็นเปอร์เซ็นต์จำนวนทศนิยมเท่ากับ ? **ตัวอย่าง** กำหนดรูปแบบการแสดงผลของตัวเลข* ให้ TotalPrice แทนราคาสินค้าที่รวมภาษีแล้วมีค่าเท่ากับ 2500.1234 ###Code # กำหนดรูปแบบการแสดงผลของข้อมูลตัวเลข TotalPrice = 2500.1234 # แสดงเครื่องหมายจุลภาคในหลักพัน print( 'TotalPrice = ' , format( TotalPrice , ',' ) ) # แสดงผลเฉพาะทศนิยมสองตำแหน่ง print('TotalPrice = ', format(TotalPrice,'.2f') ) # แสดงผลด้วยทศนิยมสองตำแหน่งและใช้จุลภาคที่หลักพัน print('TotalPrice = ', format(TotalPrice,',.2f') ) # แสดงผลรูปแบบเปอร์เซ็นต์ print('TotalPrice = ', format(TotalPrice,',.2%') ) ###Output _____no_output_____ ###Markdown String Data Type* เป็นการเอาอักขระหลายๆตัวมาต่อกันเป็นข้อความ* อยู่ภายใต้เครื่องหมาย Single quotes ('…') หรือ Double quotes ("…")* การเชื่อมต่อ String สามารถทำได้ด้วยเครื่องหมาย " + "**ตัวอย่างเช่น** * Name = 'fired eggs'* Message = "Don’t running"* Text = "Yes, they do"* Hello = 'You said "Hello World ! สวัสดีชาวโลก!" '* Sentence = Message + ". " + Text ###Code # สร้างตัวแปรประเภท String สำหรับ ภาษา Python Name = 'fired eggs' Message = "Don’t running" # วาง Single Quote อยู่ในภายใน Double Quote สองอัน Text = "Yes, they do" Hello = 'You said "Hello World ! สวัสดีชาวโลก!" ' # วาง Double Quote ทั้งหมดให้อยู่ภายใต้เครื่องหมาย Single Quote สองอัน Sentence = Message + ". " + Text # เชื่อมต่อ String สองตัว print(Name) print(Message) print(Text) print(Hello) print(Sentence) ###Output _____no_output_____ ###Markdown List Data Type* รวบรวมข้อมูลต่างๆ รวมเข้าเป็นกลุ่ม* ชนิดของข้อมูลสามารถแตกต่างกันได้* นำข้อมูลทั้งหมดบรรจุภายใต้เครื่องหมาย [ ] แบ่งแยกแต่ละข้อมูลด้วยเครื่องหมาย ,* การอ้างถึงข้อมูลทำได้โดยใช้ Index ( เริ่มต้นที่ 0 )**ตัวอย่างเช่น*** NumList = [ 1, 2, 3 ]* DataList = [10.0, “Name”, True ]* print( NumList[0] ,NumList[1], NumList[2] ) ผลลัพธ์คือ 1 2 3 ###Code NumList = [ 1, 2, 3 ] DataList = [10.0, "Name", True ] print( NumList[0] ,NumList[1], NumList[2] ) # ผลลัพธ์คือ 1 2 3 ###Output _____no_output_____ ###Markdown * Method สำคัญของตัวแปรประเภท List - .append( ข้อมูล ) : เพิ่มสมาชิกใหม่ 1 รายการต่อท้าย List - .count( ข้อมูล ) : นับจำนวนสมาชิกทั้งหมดที่มีค่าตรงกับข้อมูลที่ระบุ - .index( ข้อมูล ) : คืนค่า Index ของสมาชิกที่ใน List ซึ่งมีค่าตรงกับข้อมูลที่ระบุ - .remove( ข้อมูล ) : นำข้อมูลที่ระบุออกจาก List - .clear() : ลบสมาชิกทั้งหมดของ List - .sort() : เรียงลำดับสมาชิกจากน้อยไปมาก ###Code # List เริ่มต้น NumList = [ 1, 2, 3 ] DataList = [10.0, "Name", True ] print('Original NumList = ', NumList) print('Original DataList = ', DataList) # เพิ่มสมาชิกต่อท้าย List NumList.append( 1 ) print('After appen 1 member = ', NumList) # นับจำนวนสมาชิกที่่มีค่าเท่ากับ 1 print('Number of 1 = ', NumList.count(1)) # หมายเลข Index ของสมาชิกที่มีค่าเท่ากับ 1 2 3 print('Index of 1 = ', NumList.index(1)) print('Index of 2 = ', NumList.index(2)) print('Index of 3 = ', NumList.index(3)) print('2nd Index of 1 = ', NumList.index(1,2)) # หมายเลข Index ของสมาชิกที่เท่ากับ 1 แต่เป็นตัวที่ 2 ###Output _____no_output_____ ###Markdown * ฟังก์ชั่นสำคัญของตัวแปรประเภท List - len() : นับจำนวนสมาชิกทั้งหมดใน List - max() : สมาชิกที่มีค่ามากที่สุด - min() : สมาชิกที่มีค่าน้อยที่สุด ###Code # List เริ่มต้น NumList = [ 1, 2, 3 ] print('The List Data :', NumList) print( 'Length = ', len(NumList) ) print('Maximum = ',max(NumList) ) print('Minimum = ',min(NumList) ) print("") # กรณีที่ List เก็บ Data ที่หลากหลาย DataList = [10.0, "Name", True ] print('The List Data :', DataList) print( 'Length = ', len(DataList) ) print("") # print('Maximum = ',max(DataList) ) # เกิด Error เพราะ มี String บนอยุ่ในข้อมูล DataList2 = [10.0, False, True, 0 ] print('The List Data :', DataList2) print( 'Length = ', len(DataList2) ) print('Maximum = ',max(DataList2) ) print('Minimum = ',min(DataList2) ) print('Flase = 0 ? :', False == 0) # ค่า False ของภาษา Python มีค่าเท่ากับ 0 ###Output _____no_output_____ ###Markdown Tuple Data Type* ใช้สำหรับเก็บข้อมูลหลากหลายชนิดไว้ในตัวแปร เช่นเดียวกับ List* จัดเก็บข้อมูลไว้ในตัวแปรแบบ Tuple ว้าภายใต้เครื่องหมาย ( ) และใช้ index ในการอ้างถึง* หลังจากสร้างแล้ว ไม่สามารถเปลี่ยนค่าสมาชิก หรือ ลำดับของแต่ละสมาชิกได้อีก**ตัวอย่าง*** Holiday = (‘Sunday’, ‘Saturday’)* Location = (‘Europe’, ’Japan’, ’USA’)* Money = (20, 50, 100, 500, 1000)* Note = tuple( [ 20, 50, 100, 500, 1000 ] )* print( Note(0), Note(4) ) ผลลัพธ์คือ 20 1000 ###Code # ตัวอย่างการสร้างตัวแปรประเภท Tuple Holiday = ('Sunday','Saturday') Location = ('Europe', 'Japan', 'USA') Money = (20, 50, 100, 500, 1000) Note = tuple( [ 20, 50, 100, 500, 1000 ] ) # ฟังก์ชั่น tuple สร้างตัวแปรประเภท tuple จากตัวแปร List ได้ print( Note[0], Note[4] ) # ผลลัพธ์คือ 20 1000 # เปลี่ยนข้อมูล List เป็นข้อมูลแบบ Tuple ColorList = ['Red','Green','Blue'] ColorTuple = tuple( ColorList ) print(ColorList) print(ColorTuple) # เปลี่ยนข้อมูล Tuple เป็นข้อมูลแบบ List ด้วยฟังก์ชั่น list() print( list(ColorTuple) ) ###Output _____no_output_____ ###Markdown * เนื่องจากสมาชิกของ Tuple ไม่สามารถเปลี่ยนแปลงได้ ดังนั้นฟังกชั่นที่เคยใช้จัดการกับสมาชิกอย่างที่ใช้กับตัวแปรประเภท List จะไม่สามารถใช้กับ Tuple ได้* Method สำคัญของตัวแปรประเภท Tuple - .count( ข้อมูล ) : นับจำนวนสมาชิกทั้งหมดที่มีค่าตรงกับข้อมูลที่ระบุ - .index( ข้อมูล ) : คืนค่า Index ของสมาชิกที่ใน Tuple ซึ่งมีค่าตรงกับข้อมูลที่ระบุ ###Code # ตัวอย่างการใช้ Method ของตัวแปรประเภท Tuple DataTuple = (1,2,1,2,3,1,2,3,4) print('DataTyple.count( 1 ) = ',DataTuple.count(1) ) print('DataTyple.index( 1 ) = ',DataTuple.index(1) ) print('DataTyple.index( 1,2 ) = ',DataTuple.index(1,2) ) print('DataTyple.index( 1,3 ) = ',DataTuple.index(1,3) ) ###Output _____no_output_____ ###Markdown * ฟังก์ชั่นสำคัญของตัวแปรประเภท Set - len() : นับจำนวนสมาชิกทั้งหมดใน Set - max() : สมาชิกที่มีค่ามากที่สุด - min() : สมาชิกที่มีค่าน้อยที่สุด ###Code # ตัวอย่างการใช้ฟังก์ชั่นกับตัวแปรประเภท Tuple DataTuple = (1,2,1,2,3,1,2,3,4) print('len( DataTuple ) = ',len( DataTuple) ) print('max( DataTuple ) = ',max( DataTuple) ) print('min( DataTuple ) = ',min( DataTuple) ) ###Output _____no_output_____ ###Markdown Set Data Type* ใช้เก็บข้อมูลหลากหลายประเภทแบบไม่ซ้ำกันภายใต้เครื่องหมาย { }* ถ้าข้อมูลซ้ำ จะนำมาเป็นสมาชิกแค่ครั้งเดียว* ไม่มีลำดับของข้อมูล**ตัวอย่างเช่น*** Fruit = { ‘apple’, ‘orange’, ‘banana’ }* Color = {‘Red’, ‘Green’, ‘Blue’}* print( ‘orange’ in Fruit ) ได้ค่า True เพราะ orange เป็นสมาชิกของ Fruit* print( Color[0], Color[1], Color[2]) เกิด Error เพราะตัวแปรประเภท Set ไม่มีลำดับ ###Code # ตัวอย่างการสร้างข้อมูลประเภท set Fruit = { 'apple', 'orange', 'banana' } Color = {'Red', 'Green', 'Blue'} # ตัวอย่างการใช้งานข้อมูล set print( 'orange' in Fruit ) # ได้ค่า True เพราะ orange เป็นสมาชิกของ Fruit # ข้อมูล set ไม่สามารถเข้าถึงด้วย index print( Color[0], Color[1], Color[2]) # เกิด Error เพราะตัวแปรประเภท Set ไม่มีลำดับ ###Output _____no_output_____ ###Markdown * Method สำคัญของตัวแปรประเภท Set - .add( ข้อมูล ) : เพิ่มสมาชิกใหม่ 1 รายการ - .remove( ข้อมูล ) : นำข้อมูลที่ระบุออกจาก Set - .clear() : ลบสมาชิกทั้งหมดของ Set ###Code # ตัวอย่างการใช้ Method ของข้อมูลประเภท set Fruit = { 'apple', 'orange', 'banana' } print(Fruit) # เพิ่มสมาชิกใหม่ให้แก่ตัวแปร Set Fruit.add('mango') # เพิ่ม mango เข้าไปใน set print(Fruit) # ลบสมาชิกหนึ่งตัวออกจากตัวแปร Set Fruit.remove('apple') # ลบ apple ออกจาก set print(Fruit) # ล้างสมาชิกทั้งหมด Fruit.clear() # ได้ empty set print(Fruit) ###Output _____no_output_____ ###Markdown * ฟังก์ชั่นสำคัญของตัวแปรประเภท Set - len() : นับจำนวนสมาชิกทั้งหมดใน Set - max() : สมาชิกที่มีค่ามากที่สุด - min() : สมาชิกที่มีค่าน้อยที่สุด ###Code # ตัวอย่างการใช้ฟังก์ชั่นกับตัวแปรประเภท Set DataTuple = (1,2,1,2,3,1,2,3,4) # สร้างข้อมูลประเภท tuple DataSet = set( DataTuple ) # สามารถเปลี่ยนข้อมูลประเภท tuple มาเป็นข้อมูลแบบ set ได้ print(DataSet) # ฟังก์ชั่น len เพื่อนับจำนวนสมาชิก print( 'len(DataSet) = ', len(DataSet)) # ฟังก์ชั่น max เพื่อหาค่าสูงสุดของ set print( 'max(DataSet) = ', max(DataSet)) # ฟังก์ชั่น min เพื่อหาค่าต่ำสุดของ set print( 'min(DataSet) = ', min(DataSet)) ###Output _____no_output_____ ###Markdown Dictionary Data Type* เป็นการจัดเก็บข้อมูลรูปแบบหนึ่งที่แต่ละสมาชิกจะมีองค์ประกอบสองอย่างคือ Key และ Value* Key : ใช้อ้างถึงสมาชิกแต่ละตัวของ Dictionary สมาชิกแต่ละตัวห้ามมี Key ซ้ำกัน* Value : เป็นค่าของสมาชิกใน Dictionary ซึ่งถูกอ้างถึงผ่าน KeyValue = ตัวแปรชนิด Dictionary[ Key ] **ตัวอย่าง*** Teddy = { ‘Color’: ‘ขาว’ , ‘Size’: ‘s’, ‘Price’: 100 }* print( Teddy[‘Color’],Teddy[‘Size’],Teddy[‘Price’] ) ผลลัพธ์คือ ขาว s 100 ###Code # ตัวอย่างการประยุกต์ใช้ข้อมูลประเภท Dictionary Teddy = { 'Color': 'ขาว' , 'Size': 's', 'Price': 100 } # เรียกดูค่าที่บันทึกไว้ผ่านค่า Key print( Teddy['Color'],Teddy['Size'],Teddy['Price'] ) # สามารถใช้ Method .get() เพื่อผ่านค่า Value จาก Key ที่กำหนดให้ print( Teddy.get('Color') ) ###Output _____no_output_____ ###Markdown * Method สำคัญของตัวแปรประเภท Dictionary - .keys() : อ่าน Key ทั้งหมดของสมาชิกในตัวแปร Dictionary - .values() : อ่าน Value ทั้งหมดของสมาชิกในตัวแปร Dictionary - .items() : รายการทั้งหมดในตัวแปร Dictionary คืนค่าออกมาในรูป tuple( key, value ) - .clear() : ลบสมาชิกทั้งหมดออกจากตัวแปร Dictionary ###Code # ตัวอย่างการประยุกต์ใช้ Method ของข้อมูลประเภท Dictionary Teddy = { 'Color': 'ขาว' , 'Size': 's', 'Price': 100 } # ข้อูมูลเริ่มต้น print('Teddy.keys() = ', Teddy.keys() ) print('Teddy.values() = ', Teddy.values() ) print('Teddy.items() = ', Teddy.items() ) print('Teddy.clear() = ', Teddy.clear() ) ###Output _____no_output_____ ###Markdown * ฟังก์ชั่นที่สำคัญของตัวแปรประเภท Dictionary - len() : นับจำนวนสมาชิกทั้งหมดใน Dictionary - max() : สมาชิกที่มีค่ามากที่สุด - min() : สมาชิกที่มีค่าน้อยที่สุด ###Code # ตัวอย่างการประยุกต์ใช้ function กับข้อมูลประเภท Dictionary Teddy = { 'Color': 'ขาว' , 'Size': 's', 'Price': 100 } # ข้อูมูลเริ่มต้น print( len(Teddy), max(Teddy), min(Teddy) ) ###Output _____no_output_____
production/ptn_1_ab-test/ab_test_kornlp.ipynb
###Markdown A/B Test using Production Variants--- Introduction---프로덕션 ML 워크플로에서 데이터 과학자와 머신 러닝 엔지니어는 데이터/모델/컨셉 드리프트에 따른 재훈련, 하이퍼파라메터 튜닝, 피쳐 선택 등과 같은 다양한 방법들을 통해 모델을 개선합니다. 이 때 이전 모델과 신규 모델 간의 A/B 테스트를 수행함으로써, 신규 모델에 대한 검증을 충분히 해야겠죠. 그렇다면 A/B 테스트를 위해 엔드포인트를 재배포하거나 2개의 엔드포인트를 배포해야 할까요? 그렇지 않습니다. 프로덕션 Variant 기능을 사용하면, 각 variant에 대해 동일한 엔드포인트 뒤에서 여러 모델 또는 모델 버전을 테스트할 수 있습니다. Production Variants프로덕션 Variant로 단일 SageMaker Endpoint에서 신규 모델을 테스트하고 배포할 수 있습니다. 예를 들어, 카나리 롤아웃(canary rollout) 및 블루/그린 배포(blue/green deployment)를 위해 엔드포인트의 모델 간에 트래픽을 이동할 수 있습니다. 물론, 초당 요청 수(requests per second)과 같은 지표를 기반으로 엔드포인트를 자동으로 확장하거나 축소하도록 오토스케일링 policy를 구성할 수도 있습니다.본 실습에서는 아래와 같은 기능들을 체험해 봅니다.- 2개의 프로덕션 variant들을 배포 (Variant1: CPU, Variant2: GPU)- 트래픽 분포 변경 (50:50 -> 80:20 -> 100:0)- Variant2 삭제 References- A/B Testing ML models in production using Amazon SageMaker: https://aws.amazon.com/ko/blogs/machine-learning/a-b-testing-ml-models-in-production-using-amazon-sagemaker/- Example: https://sagemaker-examples.readthedocs.io/en/latest/sagemaker_endpoints/a_b_testing/a_b_testing.html ###Code import os import json import sys import logging import boto3 import sagemaker import time from datetime import datetime, timedelta from sagemaker.huggingface import HuggingFaceModel from sagemaker import session from transformers import ElectraConfig from transformers import ( ElectraModel, ElectraTokenizer, ElectraForSequenceClassification ) logging.basicConfig( level=logging.INFO, format='[{%(filename)s:%(lineno)d} %(levelname)s - %(message)s', handlers=[ logging.FileHandler(filename='tmp.log'), logging.StreamHandler(sys.stdout) ] ) logger = logging.getLogger(__name__) role = sagemaker.get_execution_role() region = boto3.Session().region_name sess = sagemaker.Session() sm = boto3.Session().client("sagemaker") sm_runtime = boto3.Session().client("sagemaker-runtime") ###Output _____no_output_____ ###Markdown 1. Deploy Models---사전 훈련된 한국어 자연어 처리 모델(네이버 감성 분류 긍정/부정 판별)을 배포합니다. 편의상 동일한 모델을 2개의 프로덕션 변형에 배포하지만, 다른 모델(예: 다른 하이퍼파라메터로 훈련된 모델)을 배포할 수 있습니다. ###Code model_dir = 'model' # Define the model repo tokenizer_id = 'daekeun-ml/koelectra-small-v3-nsmc' model_id = "daekeun-ml/koelectra-small-v3-nsmc" # Download model and tokenizer model = ElectraForSequenceClassification.from_pretrained(model_id) tokenizer = ElectraTokenizer.from_pretrained(tokenizer_id) os.makedirs(model_dir, exist_ok=True) model.save_pretrained(model_dir) tokenizer.save_pretrained(model_dir) ###Output _____no_output_____ ###Markdown 모델 파라메터 및 토크나이저를 `model.tar.gz`으로 압축합니다. 압축 파일명은 자유롭게 지정할 수 있으나, 반드시 `tar.gz`로 압축해야 합니다. ###Code model_artifact_name = 'model.tar.gz' !cd model && tar -czvf {model_artifact_name} *.* ###Output _____no_output_____ ###Markdown 압축한 모델 아티팩트를 Amazon S3로 복사합니다. ###Code s3_prefix = 'ab-test/models/nsmc' s3_model_path = f's3://{sess.default_bucket()}/{s3_prefix}' s3_model_url = f'{s3_model_path}/{model_artifact_name}' !aws s3 cp {model_dir}/{model_artifact_name} {s3_model_path}/{model_artifact_name} ###Output _____no_output_____ ###Markdown Create Models ###Code ecr_uri_cpu = f'763104351884.dkr.ecr.{region}.amazonaws.com/huggingface-pytorch-inference:1.9.1-transformers4.12.3-cpu-py38-ubuntu20.04' ecr_uri_gpu = f'763104351884.dkr.ecr.{region}.amazonaws.com/huggingface-pytorch-inference:1.9.1-transformers4.12.3-gpu-py38-cu111-ubuntu20.04' model_name1 = f"model-kornlp-nsmc-cpu-{datetime.now():%Y-%m-%d-%H-%M-%S}" model_name2 = f"model-kornlp-nsmc-gpu-{datetime.now():%Y-%m-%d-%H-%M-%S}" sess.create_model( name=model_name1, role=role, container_defs={"Image": ecr_uri_cpu, "ModelDataUrl": s3_model_url} ) sess.create_model( name=model_name2, role=role, container_defs={"Image": ecr_uri_gpu, "ModelDataUrl": s3_model_url} ) ###Output _____no_output_____ ###Markdown Create Variants엔드포인트 설정에서 프로덕션 variant를 여러 개 생성할 수 있습니다. 우선 각 variant에 대해 `initial_weight`를 1로 설정합니다. 즉, 클라이언트 요청의 50%가 Variant1로 이동하고 나머지 50%가 Variant로 이동됨을 의미합니다.본 예제에서는 최적의 레이턴시&비용 절충안을 찾기 위해 Variant1을 CPU 인스턴스로 설정하고 Variant2를 GPU 인스턴스로 설정했습니다. ###Code from sagemaker.session import production_variant variant1 = production_variant( model_name=model_name1, instance_type="ml.c5.xlarge", initial_instance_count=1, variant_name="Variant1", initial_weight=1, ) variant2 = production_variant( model_name=model_name2, instance_type="ml.g4dn.xlarge", initial_instance_count=1, variant_name="Variant2", initial_weight=1, ) (variant1, variant2) ###Output _____no_output_____ ###Markdown Create Production Variants단일 엔드포인트에 2개의 프로덕션 Variant들을 생성합니다. ###Code endpoint_name = f"endpoint-kornlp-nsmc-{datetime.now():%Y-%m-%d-%H-%M-%S}" print(f"EndpointName={endpoint_name}") sess.endpoint_from_production_variants( name=endpoint_name, production_variants=[variant1, variant2], wait=False ) ###Output _____no_output_____ ###Markdown Wait for the endpoint jobs to complete엔드포인트가 생성될 때까지 기다립니다. 약 5-10분의 시간이 소요됩니다. 아래 코드 셀에서 출력되는 AWS 콘솔 링크로 접속해서 엔드포인트 배포 상태를 확인할 수 있습니다. ###Code from IPython.core.display import display, HTML def make_endpoint_link(region, endpoint_name, endpoint_task): endpoint_link = f'<b><a target="blank" href="https://console.aws.amazon.com/sagemaker/home?region={region}#/endpoints/{endpoint_name}">{endpoint_task} Review Endpoint</a></b>' return endpoint_link endpoint_link = make_endpoint_link(region, endpoint_name, '[Deploy model from S3]') display(HTML(endpoint_link)) sess.wait_for_endpoint(endpoint_name, poll=5) ###Output _____no_output_____ ###Markdown 2. Invoke Endpoint----엔드포인트가 배포되었습니다. 샘플 데이터로 직접 추론을 수행해 봅니다. ###Code def invoke_endpoint(payload, endpoint_name, target_variant=None): start = time.time() if target_variant is not None: response = sm_runtime.invoke_endpoint( EndpointName=endpoint_name, ContentType="application/json", TargetVariant=target_variant, Body=payload, ) else: response = sm_runtime.invoke_endpoint( EndpointName=endpoint_name, ContentType="application/json", Body=payload, ) latency = (time.time() - start) * 1000 variant = response["InvokedProductionVariant"] logger.info(f'[{variant}] Latency: {latency:.3f} ms') output = json.loads(response['Body'].read().decode()) return output payload = '{"inputs": ["불후의 명작입니다. 눈물이 앞을 가려요", "저런...5점 만점에 1점 주기도 힘들어요."]}' invoke_endpoint(payload, endpoint_name) ###Output _____no_output_____ ###Markdown Variant1와 Variant2가 고르게 호출됨을 확인할 수 있습니다. ###Code for i in range(10): invoke_endpoint(payload, endpoint_name) { variant["VariantName"]: variant["CurrentWeight"] for variant in sm.describe_endpoint(EndpointName=endpoint_name)["ProductionVariants"] } ###Output _____no_output_____ ###Markdown `VariantName`으로 Target Variant를 고정적으로 지정할 수도 있습니다. ###Code invoke_endpoint(payload, endpoint_name, variant1['VariantName']) invoke_endpoint(payload, endpoint_name, variant2['VariantName']) ###Output _____no_output_____ ###Markdown 3. Update Variant Traffic (Canary Rollouts and A/B Testing)---카나리 롤아웃은 신규 모델을 안전하게 배포하기 위해 사용되는 전략 중 하나입니다. 대분의 트래픽이 기존 모델로 이동하고 카나리 모델의 클러스터에 할당되는 트래픽은 상대적으로 작기 때문에 사용자 경험에 영향을 거의 주지 않습니다. SageMaker에서는 이를 위한 기능을 API로 제공하고 있으며, A/B 테스트 결과에 따라 트래픽을 특정 variant에 더 할당할 경우 굳이 호스팅 엔드포인트를 재배포하실 필요가 없습니다. `UpdateEndpointWeightsAndCapacities`를 사용하면 엔드포인트 중단 없이 각 variant에 할당된 가중치를 쉽게 수정할 수 있기 때문입니다. ###Code import pandas as pd cw = boto3.Session().client("cloudwatch") def get_metrics_for_endpoint_variant( endpoint_name, variant_name, metric_name, statistic, start_time, end_time ): dimensions = [ {"Name": "EndpointName", "Value": endpoint_name}, {"Name": "VariantName", "Value": variant_name}, ] metrics = cw.get_metric_statistics( Namespace="AWS/SageMaker", MetricName="Invocations", StartTime=start_time, EndTime=end_time, Period=60, Statistics=[statistic], Dimensions=dimensions ) return ( pd.DataFrame(metrics["Datapoints"]) .sort_values("Timestamp") .set_index("Timestamp") .drop("Unit", axis=1) .rename(columns={statistic: variant_name}) ) def plot_endpoint_metrics(start_time=None): start_time = start_time or datetime.now() - timedelta(minutes=60) end_time = datetime.now() metric_name = "Invocations" statistic = 'Sum' metrics_variant1 = get_metrics_for_endpoint_variant( endpoint_name, variant1["VariantName"], metric_name, statistic, start_time, end_time ) metrics_variant2 = get_metrics_for_endpoint_variant( endpoint_name, variant2["VariantName"], metric_name, statistic, start_time, end_time ) metrics_variants = metrics_variant1.join(metrics_variant2, how="outer") metrics_variants.plot() return metrics_variants ###Output _____no_output_____ ###Markdown Variant 트래픽 테스트약 2분여간 추론 요청들을 수행하면서 각 variant의 트래픽 분포를 확인해 봅니다. 현재는 50:50 가중치이므로 트래픽 분포가 고르게 이루어지고 있다는 것을 알 수 있습니다. ###Code from datetime import datetime, timedelta def invoke_endpoint_many(payload, endpoint_name, num_requests=250, sleep_secs=0.5): for i in range(num_requests): print(".", end="", flush=True) response = sm_runtime.invoke_endpoint( EndpointName=endpoint_name, ContentType="application/json", Body=payload, ) output = json.loads(response['Body'].read().decode()) time.sleep(sleep_secs) payload = '{"inputs": ["불후의 명작입니다. 눈물이 앞을 가려요", "저런...5점 만점에 1점 주기도 힘들어요."]}' invocation_start_time = datetime.now() invoke_endpoint_many(payload, endpoint_name) time.sleep(20) # give metrics time to catch up plot_endpoint_metrics(invocation_start_time) ###Output _____no_output_____ ###Markdown Variant 가중치 변경 (80:20)이제 `UpdateEndpointWeightsAndCapacities`를 사용하여 각 variant의 가중치를 변경합니다. 트래픽의 80%를 variant1로 이동하고 나머지 트래픽을 variant2로 이동합니다. Variant 가중치 수정 후 곧바로 2분 정도 추론 요청을 연속적으로 수행해 보겠습니다. ###Code sm.update_endpoint_weights_and_capacities( EndpointName=endpoint_name, DesiredWeightsAndCapacities=[ {"DesiredWeight": 80, "VariantName": variant1["VariantName"]}, {"DesiredWeight": 20, "VariantName": variant2["VariantName"]}, ], ) print("Waiting for update to complete") while True: status = sm.describe_endpoint(EndpointName=endpoint_name)["EndpointStatus"] if status in ["InService", "Failed"]: print("Done") break print(".", end="", flush=True) time.sleep(1) { variant["VariantName"]: variant["CurrentWeight"] for variant in sm.describe_endpoint(EndpointName=endpoint_name)["ProductionVariants"] } ###Output _____no_output_____ ###Markdown 대부분의 추론 요청이 Variant1에서 처리되고 있으며, Variant2에서 처리된 추론 요청이 적다는 것을 볼 수 있습니다. ###Code invoke_endpoint_many(payload, endpoint_name) time.sleep(20) # give metrics time to catch up plot_endpoint_metrics(invocation_start_time) ###Output _____no_output_____ ###Markdown Variant 가중치 변경 (100:0)Variant1의 퍼포먼스가 만족스럽다면 트래픽의 100%를 모두 variant1로 보내도록 라우팅할 수 있습니다. variant 가중치 수정 후 곧바로 2분 정도 추론 요청을 연속적으로 수행해 보겠습니다. ###Code sm.update_endpoint_weights_and_capacities( EndpointName=endpoint_name, DesiredWeightsAndCapacities=[ {"DesiredWeight": 1, "VariantName": variant1["VariantName"]}, {"DesiredWeight": 0, "VariantName": variant2["VariantName"]}, ], ) print("Waiting for update to complete") while True: status = sm.describe_endpoint(EndpointName=endpoint_name)["EndpointStatus"] if status in ["InService", "Failed"]: print("Done") break print(".", end="", flush=True) time.sleep(1) { variant["VariantName"]: variant["CurrentWeight"] for variant in sm.describe_endpoint(EndpointName=endpoint_name)["ProductionVariants"] } ###Output _____no_output_____ ###Markdown 모든 추론 요청이 Variant1에서 처리되고 있으며, Variant2에서 처리된 추론 요청이 없다는 것을 볼 수 있습니다. ###Code invoke_endpoint_many(payload, endpoint_name) time.sleep(20) # give metrics time to catch up plot_endpoint_metrics(invocation_start_time) ###Output _____no_output_____ ###Markdown 이슈가 없다면 곧바로 엔드포인트에서 Variant2를 삭제할 수 있습니다. 바로 아래 섹션에서 Variant2를 삭제해 보겠습니다. 물론, 프로덕션에서 새로운 테스트 환경이 필요할 때에는 엔드포인트에 신규 variant를 추가하고 신규 모델을 계속 테스트할 수 있습니다. ###Code endpoint_config_name = sm.describe_endpoint(EndpointName=endpoint_name)['EndpointConfigName'] ###Output _____no_output_____ ###Markdown 4. Delete Variant---Variant를 여러 개 띄운다는 것은 모델 호스팅 클러스터를 여러 개 띄운다는 의미입니다. 이제, 불필요한 과금을 피하기 위해 Variant1만 사용하도록 엔드포인트 구성을 업데이트합니다. 엔드포인트 업데이트는 수 분이 소요되지만, 엔드포인트 업데이트 중에도 **다운타임이 발생하지 않는다는 점**을 주목해 주세요. (즉, `invoke_endpoint()`를 계속 수행할 수 있습니다.)**[Tip]** 본 핸즈온에서는 빠른 실습을 위해 곧바로 Variant2의 클러스터를 삭제했지만, 실제 프로덕션에서는 이전 클러스터로 빠르게 롤백해야 하는 경우를 대비하여, Variant2를 일정 시간 동안 유휴 상태로 유지하는 것을 권장드립니다. ###Code updated_endpoint_config_name = f"updated-endpoint-config-kornlp-nsmc-{datetime.now():%Y-%m-%d-%H-%M-%S}" print(updated_endpoint_config_name) updated_endpoint_config = sm.create_endpoint_config( EndpointConfigName=updated_endpoint_config_name, ProductionVariants=[ { 'VariantName': variant1["VariantName"], # Only specify variant1 to remove variant2 'ModelName': model_name1, 'InstanceType':'ml.m5.xlarge', 'InitialInstanceCount': 1, 'InitialVariantWeight': 100 } ]) sm.update_endpoint( EndpointName=endpoint_name, EndpointConfigName=updated_endpoint_config_name ) invoke_endpoint(payload, endpoint_name) ###Output _____no_output_____ ###Markdown AWS 콘솔에서 엔드포인트 상태를 확인합니다. Updating 상태로 수 분의 시간이 경과 후 `InServce`로 변경됩니다. ###Code endpoint_link = make_endpoint_link(region, endpoint_name, '[Deploy model from S3]') display(HTML(endpoint_link)) sess.wait_for_endpoint(endpoint_name, poll=5) ###Output _____no_output_____ ###Markdown Clean up--- ###Code sess.delete_endpoint(endpoint_name) sess.delete_endpoint_config(endpoint_config_name) sess.delete_endpoint_config(updated_endpoint_config_name) sess.delete_model(model_name1) sess.delete_model(model_name2) !rm -rf {model_dir} ###Output _____no_output_____
courses/aima-book/Chapter 3 - Search.ipynb
###Markdown Problem Class ###Code import utils infinity = float('inf') class Problem(object): def __init__(self, initial_state, goal_state): """Intialise with initial state, and goal state. Note that in the case of maps, state will be string with name of the places. """ self.initial_state = initial_state self.goal_state = goal_state def actions(self, state): """List of actions that can be taken while at this state. """ raise NotImplementedError def result(self, state, action): """The state that results from applying 'action' to 'state'. """ raise NotImplementedError def goal_test(self, state): """Test if the goal has been achieved. """ return self.goal_state == state def path_cost(self, c, state1, state2, action): """Cost of path from root to state 2. ('c' is the cost from root to state1, and state2 resulted by applying action to state1.) """ return c + 1 def value(self, test): raise NotImplementedError ###Output _____no_output_____ ###Markdown Node Class ###Code class Node(object): def __init__(self, state, parent=None, action=None, path_cost=0): """We initialize this node with this node's state (e.g. name of the place), it's parent, the action which brought it here from parent, and the total path cost to reach this node. """ self.state = state self.action = action self.parent = parent self.path_cost = path_cost self.depth = 0 if self.parent: self.depth = self.parent.depth + 1 def __repr__(self): return "Node is {0}".format(self.state) def __lt__(self, node): return self.state < node.state def __eq__(self, other): return isinstance(other, Node) and self.state == other.state def __hash__(self): return hash(self.state) def expand(self, problem): """For all the actions possible from this state, find the resultant node of applying that action. Create a list of all such nodes and return. """ return [self.child_node(problem, actions) for actions in problem.actions(self.state)] def child_node(self, problem, action): "Create and return child node resulting from applying action to current node state." next = problem.result(self.state, action) return Node(next, self, action, problem.path_cost(self.path_cost, self, next, action)) def path(self): "Returns the sequence of nodes from parent to this node" node = self path_back = [] while node: path_back.append(node) node = node.parent return list(reversed(path_back)) def solution(self): """Return list of all actions between this node and root (We don't query the root for action because it didn't result from any action, it was initialised.) """ return [node.action for node in self.path()[1:]] ###Output _____no_output_____ ###Markdown Graph Class ###Code class Graph(object): def __init__(self, dictionary=None, directed=True): self.dictionary = dictionary or {} self.directed = directed if directed == False: self.make_undirected() def make_undirected(self): """If the graph is suppose to be unidirected, make connections the reverse way too """ for node in list(self.dictionary.keys()): for (child, distance) in self.dictionary[node].items(): self.connect_nodes(child, node, distance) def connect_nodes(self, A, B, distance=1): """Connect A to B with distance as 'distance'. Also add them in reverse if graph is undirected. """ self.connect_helper(A, B, distance) if self.directed == False: self.connect_helper(B, A, distance) def connect_helper(self, A, B, distance): self.dictionary.setdefault(A, {})[B] = distance def nodes(self): return list(self.dictionary.keys()) def get(self, A, B = None): links = self.dictionary.setdefault(A, {}) if B is None: return links else: return links.get(B) def print(self): for node in self.dictionary: print(node) print(self.dictionary[node]) ###Output _____no_output_____ ###Markdown Test Graph Class ###Code print("======= Directed =======") g = Graph({'a' : {'b' : 1, 'c': 10}, 'c' : {'b': 8} }, True) g.connect_nodes('x', 'y', 19) g.connect_nodes('x', 'a', 7) g.print() print("====== Undirected ======") g = Graph({'a' : {'b' : 1, 'c': 10}, 'c' : {'b': 8} }, False) g.connect_nodes('x', 'y', 19) g.connect_nodes('x', 'a', 7) g.print() ###Output ======= Directed ======= a {'c': 10, 'b': 1} c {'b': 8} x {'y': 19, 'a': 7} ====== Undirected ====== y {'x': 19} x {'y': 19, 'a': 7} a {'x': 7, 'c': 10, 'b': 1} c {'a': 10, 'b': 8} b {'a': 1, 'c': 8} ###Markdown GraphProblem Class ###Code class GraphProblem(Problem): def __init__(self, initial_state, goal_state, graph): Problem.__init__(self, initial_state, goal_state) self.graph = graph def actions(self, state): """List of actions that can be taken while at this state. """ return list(self.graph.get(state).keys()) def result(self, state, action): """The state that results from applying 'action' to 'state'. """ return action def path_cost(self, c, state1, state2, action): """Cost of path from root to state 2. ('c' is the cost from root to state1, and state2 resulted by applying action to state1.) """ return c + (self.graph.get(state1, state2) or infinity) - romania_problem = GraphProblem('Arad', 'Bucharest', romania_map) romania_problem.graph.print() class Fifo(object): def __init__(self): self.l = [] self.back = -1 self.front = 0 def enqueue(self, object): self.back += 1 self.l.append(object) def dequeue(self): self.old_front = self.front self.front += 1 return self.l[self.old_front] def items(self): return self.l[self.front:] def empty(self): return self.back < self.front ###Output _____no_output_____ ###Markdown Breadth first tree search ###Code def tree_search(problem, frontier): iterations = 0 frontier.append(Node(problem.initial_state)) while frontier: node = frontier.pop() iterations += 1 print(node.path()) if problem.goal_test(node.state): return iterations, node nodes = node.expand(problem) for n in nodes: frontier.append(n) # Safety valve if iterations > 200: print('Safety valve goes off!') break return iterations, None i, n = tree_search(romania_problem, utils.FIFOQueue()) print(i) print(n.path()) ###Output [Node is Arad] [Node is Arad, Node is Sibiu] [Node is Arad, Node is Timisoara] [Node is Arad, Node is Zerind] [Node is Arad, Node is Sibiu, Node is Rimnicu] [Node is Arad, Node is Sibiu, Node is Fagaras] [Node is Arad, Node is Sibiu, Node is Oradea] [Node is Arad, Node is Sibiu, Node is Arad] [Node is Arad, Node is Timisoara, Node is Arad] [Node is Arad, Node is Timisoara, Node is Lugoj] [Node is Arad, Node is Zerind, Node is Oradea] [Node is Arad, Node is Zerind, Node is Arad] [Node is Arad, Node is Sibiu, Node is Rimnicu, Node is Craiova] [Node is Arad, Node is Sibiu, Node is Rimnicu, Node is Sibiu] [Node is Arad, Node is Sibiu, Node is Rimnicu, Node is Pitesti] [Node is Arad, Node is Sibiu, Node is Fagaras, Node is Bucharest] 16 [Node is Arad, Node is Sibiu, Node is Fagaras, Node is Bucharest] ###Markdown Breadth First Graph Search ###Code def graph_search(problem, frontier): iterations = 0 safety = 0 node = Node(problem.initial_state) print(node.path()) if problem.goal_test(node.state): return iterations, node frontier.append(node) explored = set() while frontier: node = frontier.pop() explored.add(node.state) nodes = node.expand(problem) for n in nodes: safety += 1 if safety > 100: print("Safety valve goes off!") return iterations, None # Why are we checking explored AND frontier as well?? # A - We don't need to explore the already explored and # we don't need to check the nodes in frontier because # they already got tested for goal before being put there. if n.state not in explored and n not in frontier: iterations += 1 print(n.path()) if problem.goal_test(n.state): return iterations, n frontier.append(n) return iterations, None i, n = graph_search(romania_problem, utils.FIFOQueue()) print(i) print(n.path()) ###Output [Node is Arad] [Node is Arad, Node is Sibiu] [Node is Arad, Node is Timisoara] [Node is Arad, Node is Zerind] [Node is Arad, Node is Sibiu, Node is Rimnicu] [Node is Arad, Node is Sibiu, Node is Fagaras] [Node is Arad, Node is Sibiu, Node is Oradea] [Node is Arad, Node is Timisoara, Node is Lugoj] [Node is Arad, Node is Sibiu, Node is Rimnicu, Node is Craiova] [Node is Arad, Node is Sibiu, Node is Rimnicu, Node is Pitesti] [Node is Arad, Node is Sibiu, Node is Fagaras, Node is Bucharest] 10 [Node is Arad, Node is Sibiu, Node is Fagaras, Node is Bucharest] ###Markdown Depth First Tree Search ###Code # Goes into infinite loop #i, n = tree_search(romania_problem, utils.Stack()) #print(i) #print(n.path()) ###Output _____no_output_____ ###Markdown Depth First Graph Search ###Code i, n = graph_search(romania_problem, utils.Stack()) print(i) print(n.path()) ###Output [Node is Arad] [Node is Arad, Node is Sibiu] [Node is Arad, Node is Timisoara] [Node is Arad, Node is Zerind] [Node is Arad, Node is Zerind, Node is Oradea] [Node is Arad, Node is Timisoara, Node is Lugoj] [Node is Arad, Node is Timisoara, Node is Lugoj, Node is Mehadia] [Node is Arad, Node is Timisoara, Node is Lugoj, Node is Mehadia, Node is Drobeta] [Node is Arad, Node is Timisoara, Node is Lugoj, Node is Mehadia, Node is Drobeta, Node is Craiova] [Node is Arad, Node is Timisoara, Node is Lugoj, Node is Mehadia, Node is Drobeta, Node is Craiova, Node is Rimnicu] [Node is Arad, Node is Timisoara, Node is Lugoj, Node is Mehadia, Node is Drobeta, Node is Craiova, Node is Pitesti] [Node is Arad, Node is Timisoara, Node is Lugoj, Node is Mehadia, Node is Drobeta, Node is Craiova, Node is Pitesti, Node is Bucharest] 11 [Node is Arad, Node is Timisoara, Node is Lugoj, Node is Mehadia, Node is Drobeta, Node is Craiova, Node is Pitesti, Node is Bucharest] ###Markdown Depth Limiting Search ###Code def recursive_dls(node, problem, limit): print(node.path()) if problem.goal_test(node.state): return node elif limit == 0: return 'cutoff' else: for child in node.expand(problem): result = recursive_dls(child, problem, limit -1) if result == 'cutoff': return result elif result is not None: return result else: return None def depth_limiting_search(problem, limit = 50): return recursive_dls(Node(problem.initial_state), problem, limit) val = depth_limiting_search(romania_problem) if isinstance(val, Node): val.path() ###Output [Node is Arad] [Node is Arad, Node is Sibiu] [Node is Arad, Node is Sibiu, Node is Rimnicu] [Node is Arad, Node is Sibiu, Node is Rimnicu, Node is Craiova] [Node is Arad, Node is Sibiu, Node is Rimnicu, Node is Craiova, Node is Rimnicu] [Node is Arad, Node is Sibiu, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova] [Node is Arad, Node is Sibiu, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu] [Node is Arad, Node is Sibiu, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova] [Node is Arad, Node is Sibiu, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu] [Node is Arad, Node is Sibiu, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova] [Node is Arad, Node is Sibiu, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu] [Node is Arad, Node is Sibiu, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova] [Node is Arad, Node is Sibiu, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu] [Node is Arad, Node is Sibiu, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova] [Node is Arad, Node is Sibiu, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu] [Node is Arad, Node is Sibiu, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova] [Node is Arad, Node is Sibiu, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu] [Node is Arad, Node is Sibiu, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova] [Node is Arad, Node is Sibiu, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu] [Node is Arad, Node is Sibiu, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova] [Node is Arad, Node is Sibiu, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu] [Node is Arad, Node is Sibiu, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova] [Node is Arad, Node is Sibiu, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu] [Node is Arad, Node is Sibiu, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova] [Node is Arad, Node is Sibiu, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu] [Node is Arad, Node is Sibiu, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova] [Node is Arad, Node is Sibiu, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu] [Node is Arad, Node is Sibiu, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova] [Node is Arad, Node is Sibiu, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu] [Node is Arad, Node is Sibiu, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova] [Node is Arad, Node is Sibiu, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu] [Node is Arad, Node is Sibiu, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova] [Node is Arad, Node is Sibiu, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu] [Node is Arad, Node is Sibiu, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova] [Node is Arad, Node is Sibiu, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu] [Node is Arad, Node is Sibiu, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova] [Node is Arad, Node is Sibiu, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu] [Node is Arad, Node is Sibiu, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova] [Node is Arad, Node is Sibiu, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu] [Node is Arad, Node is Sibiu, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova] [Node is Arad, Node is Sibiu, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu] [Node is Arad, Node is Sibiu, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova] [Node is Arad, Node is Sibiu, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu] [Node is Arad, Node is Sibiu, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova] [Node is Arad, Node is Sibiu, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu] [Node is Arad, Node is Sibiu, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova] [Node is Arad, Node is Sibiu, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu] [Node is Arad, Node is Sibiu, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova] [Node is Arad, Node is Sibiu, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu] [Node is Arad, Node is Sibiu, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova] [Node is Arad, Node is Sibiu, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu, Node is Craiova, Node is Rimnicu] ###Markdown Iterative deepening search ###Code def iterative_deepening_search(problem): """[Figure 3.18]""" for depth in range(sys.maxsize): result = depth_limited_search(problem, depth) if result != 'cutoff': return result ###Output _____no_output_____ ###Markdown Best First Search A* Search Recursive Best First Search Uniform Cost Search ###Code def best_first_graph_search(problem, frontier): iterations = 0 node = Node(problem.initial_state) print(node.path()) if problem.goal_test(node.state): return iterations, node frontier.append(node) explored = set() while frontier: node = frontier.pop() explored.add(node.state) iterations += 1 print(node.path()) if problem.goal_test(node.state): return iterations, node nodes = node.expand(problem) for n in nodes: if n.state not in explored and n not in frontier: frontier.append(n) elif n in frontier: incumbent = frontier[n] if incumbent.path_cost > n.path_cost: print("Savings!") del frontier[incumbent] frontier.append(n) return iterations, None # Uniform Cost Search - f() is nothing but the path cost f = lambda node: node.path_cost frontier = utils.PriorityQueue(min, f) i, n = best_first_graph_search(romania_problem, frontier) print(i) print(n.path()) import numpy as np import matplotlib.pyplot as plt from matplotlib.animation import FuncAnimation fig, ax = plt.subplots() xdata, ydata = [], [] ln, = plt.plot([], [], 'ro', animated=True) def init(): ax.set_xlim(0, 2*np.pi) ax.set_ylim(-1, 1) return ln, def update(frame): xdata.append(frame) ydata.append(np.sin(frame)) ln.set_data(xdata, ydata) return ln, ani = FuncAnimation(fig, update, frames=np.linspace(0, 2*np.pi, 128), init_func=init, blit=True) plt.show() ###Output _____no_output_____
Copy_of_AIML_DirectBotFather.ipynb
###Markdown This version is not using the redirect server and is using json file which you need to copy in to left paneThis is a chatbot using botFather you can send a request and get a url relating to it. many thanks for information to Nikolay Gerasimenko of Skillbox ###Code # # These are the libraries needed by this application #!pip install scikit-learn #!pip install nltk #!pip install json # import random import nltk import json from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer from sklearn.linear_model import LogisticRegression, RidgeClassifier, SGDClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import train_test_split # # load the file into sample_data on the left hand pane # with open('/content/AIML_BOT.json', 'r') as f: BOT_CONFIG = json.load(f) #with open('/content/test.txt', 'w') as f: # f.write('test text') # # ======= This configuration matches what you load into the bot urls =========== # # when complete you will load in the json file automatically generated when you # load up the webserver with the relevant urls and categories from spreadsheet # #BOT_CONFIG = { # # 'threshold': 0.6, # # 'intents': { # 'house': { # 'examples': [ # 'house!', # 'club', # 'garage' # ], # 'responses': [ # '1', # '2', # '3' # ] # }, # 'futuregarage': { # 'examples': [ # 'ukgarage', # 'futuregarage' # ], # 'responses': [ # '6', # '7', # '8' # ] # }, # 'electro': { # 'examples': [ # 'electronic', # 'electro', # 'eighties', # 'pop', # 'newwave' # ], # 'responses': [ # '6', # '7', # '8' # ] # }, # 'rock': { # 'examples': [ # 'heavy', # 'rock', # 'metal' # ], # 'responses': [ # '6', # '7', # '8' # ] # }, # 'cartoon': { # 'examples': [ # 'anime', # 'cartoon', # 'animation', # 'computer art' # ], # 'responses': [ # '1', # '3', # '8' # ] # }, # 'java': { # 'examples': [ # 'javascript', # 'java', # 'flash', # 'webpage' # ], # 'responses': [ # '21', # '23', # '18' # ] # }, # 'list': { # 'examples': [ # 'valid', # 'say', # 'examples' # ], # 'responses': [ # '65536' # ] # } # }, # 'default': [ # 'style not known or found please say again or say list for available options', # 'i do not have it, you could try speaking slower again,, or list for available categories' # ] # #} # # clean the text roman or cyrillic alphabet # def clean(text): return ''.join([simbol for simbol in text.lower() if simbol in 'абвгдеёжзийклмнопрстуфхцчшщъыьэюяabcdefghijklmnopqrstuvwxyz ']) # # use nltk to suggest the word match (close to words) # def match(example, text): return nltk.edit_distance(clean(text), clean(example)) / len(example) < BOT_CONFIG['threshold'] if len(example) > 0 else False # return nltk.edit_distance(clean(text), clean(example)) / len(example) < BOT_CONFIG['threshold'] #def get_intent(text): # for intent, value in BOT_CONFIG['intents'].items(): # print("intent was %s" % intent) # if 'examples' in value and not intent.find(text) == -1: # for example in value['examples']: # if match(example, text): # return intent # else: # return 'not found in examples' # elif 'inc_examples' in value and not intent.find(text): # for example in value['inc_examples']: # if match(example, text): # return intent # else: # return 'not known from inc_examples' # return 'no config found' # clean the text up (remove punctuation) and convert to lower case # def cleaner(text): # clean the text up cleaned_text = '' for ch in text.lower(): if ch in 'абвгдеёжзийклмнопрстуфхцчшщъыьэюяabcdefghijklmnopqrstuvwxyz ': cleaned_text = cleaned_text + ch return cleaned_text #def get_intent(text): # get the intent from the json file # for intent in BOT_CONFIG['intents']: # if 'examples' in BOT_CONFIG['intents'][intent]: # for example in BOT_CONFIG['intents'][intent]['examples']: # if match(cleaner(text), cleaner(example)): # return intent # elif 'inc_examples' in BOT_CONFIG['intents'][intent]: # for example in BOT_CONFIG['intents'][intent]['inc_examples']: # if match(cleaner(text), cleaner(example)): # return intent def get_intent(text): for intent in BOT_CONFIG['intents'].keys(): for example in BOT_CONFIG['intents'][intent]['examples']: cleaned_example = clean(example) cleaned_text = clean(text) if nltk.edit_distance(cleaned_example, cleaned_text) / max(len(cleaned_example), len(cleaned_text), 1) < BOT_CONFIG['threshold']: return intent #return 'unknown_intent' #X = [] #y = [] #for intent, value in BOT_CONFIG['intents'].items(): # if 'inc_examples' in value: # examples = list(set([example.lower() for example in value['inc_examples']])) # else: # examples = list(set([example.lower() for example in value['examples']])) # X += examples # y += [intent] * len(examples) X = [] y = [] for intent in BOT_CONFIG['intents']: for example in BOT_CONFIG['intents'][intent]['examples']: X.append(example) y.append(intent) #X = [] #y = [] #for intent in BOT_CONFIG['intents']: # if 'examples' in BOT_CONFIG['intents'][intent]: # X += BOT_CONFIG['intents'][intent]['examples'] # y += [intent for i in range(len(BOT_CONFIG['intents'][intent]['examples']))] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=42) # # most models use this TfidVectoriser # vectorizer = TfidfVectorizer(analyzer='char', ngram_range=(1,3)) #CountVectorizer(analyzer='char', ngram_range=(1,3), preprocessor=clean) X_train_vectorized = vectorizer.fit_transform(X_train) X_test_vectorized = vectorizer.transform(X_test) # # count vectoriser is used by SGD classification # vectorizer2 = CountVectorizer(analyzer='char', preprocessor=cleaner, ngram_range=(1,3), stop_words=['а', 'и']) vectorizer2.fit(X) X_vect3 = vectorizer2.transform(X) X_train_vect3, X_test_vect3, y_train3, y_test3 = train_test_split(X_vect3, y, test_size=0.3) #sgd = SGDClassifier() # SGD model #sgd.fit(X_vect3, y) # Train the model #sgd.score(X_vect3, y) # check score # ======================================================================================== # different methods gave various answers so we collated the answers and selected variants # ======================================================================================== # # use ridge classifier and TfidVectoriser # clf = RidgeClassifier() # model examples RandomForestClassifier() #RidgeClassifier() #LogisticRegression() clf.fit(X_train_vectorized, y_train) clf.score(X_train_vectorized, y_train), clf.score(X_test_vectorized, y_test) # # use random forrest classifier and TfidVectoriser # clf2 = RandomForestClassifier() clf2.fit(X_train_vectorized, y_train) clf2.score(X_train_vectorized, y_train), clf2.score(X_test_vectorized, y_test) # # this is an alternative method using random forrest with a differnt test/train split but still using the TfidVectoriser # vectorizer2 = TfidfVectorizer() X_transformed2 = vectorizer.fit_transform(X) X_train2, X_test2, y_train2, y_test2 = train_test_split(X_transformed2, y, test_size=0.2, random_state=42) classifier = RandomForestClassifier() classifier.fit(X_train2, y_train2) # # SGDClassifier with CountVectoriser # sgd = SGDClassifier() # stochastic gradient descent sgd.fit(X_vect3, y) sgd.score(X_vect3, y) # We look at the quality of the classification # # test the bot reader and nltk here # question = input() answer = get_intent(question) print(answer) # get intent using ridge classifier model1 # def get_intent_by_rc_model1(text): vectorized_text = vectorizer.transform([text]) return clf.predict(vectorized_text)[0] # get intent using random forrest model 1 # def get_intent_by_rf_model1(text): vectorized_text = vectorizer.transform([text]) return clf2.predict(vectorized_text)[0] # get intent using random forrest model 2 # def get_intent_by_rf_model2(text): return classifier.predict(vectorizer.transform([text]))[0] # get intent using sgd classification and countVectoriser # def get_intent_by_sgd_model1(text): # Функция определяющая интент текста с помощью ML-модели return sgd.predict(vectorizer.transform([text]))[0] # # use the ridge classifier model 1 # def bot_rc_ml1(text): intent = get_intent(text) # 1. try to understand the intention by comparison according to Levinstein if intent is None: intent = get_intent_by_rc_model1(text) # use ridge classifier if 'inc_response' in BOT_CONFIG['intents'][intent]: return random.choice(BOT_CONFIG['intents'][intent]['inc_response']) else: return random.choice(BOT_CONFIG['intents'][intent]['responses']) # # use the random forrest model 1 # def bot_rf_ml1(text): intent = get_intent(text) # 1. try to understand the intention by comparison according to Levinstein if intent is None: intent = get_intent_by_rf_model1(text) # 2. use random forrest 1 if 'inc_response' in BOT_CONFIG['intents'][intent]: return random.choice(BOT_CONFIG['intents'][intent]['inc_response']) else: return random.choice(BOT_CONFIG['intents'][intent]['responses']) # # use the random forrest model 2 # def bot_rf_ml2(text): intent = get_intent(text) # 1. try to understand the intention by comparison according to Levinstein if intent is None: intent = get_intent_by_rf_model2(text) # 2. use random forrest 2 if 'inc_response' in BOT_CONFIG['intents'][intent]: return random.choice(BOT_CONFIG['intents'][intent]['inc_response']) else: return random.choice(BOT_CONFIG['intents'][intent]['responses']) # # use the sgd model 1 # def bot_sgd_ml1(text): intent = get_intent(text) # 1. try to understand the intention by comparison according to Levinstein # when multiple phrases are found in many sections i might reverse this if intent is None: intent = get_intent_by_sgd_model1(text) # 2. use the SGD model if 'inc_response' in BOT_CONFIG['intents'][intent]: return random.choice(BOT_CONFIG['intents'][intent]['inc_response']) else: return random.choice(BOT_CONFIG['intents'][intent]['responses']) # # this is test we run each ml model in alternating sequence # so we can look and analyse how it parsed the json file # choiceVar = 0 question = '' while question != 'exit': question = input() if question.find(cleaner('exit')) == -1: if choiceVar == 0: print("bot1 ridgeclassifier ml1") response = bot_rc_ml1(question) elif choiceVar == 1: print("bot2 random forrest ml1") response = bot_rf_ml1(question) elif choiceVar == 2: print("bot3 random forrest ml2") response = bot_rf_ml2(question) else: print("bot4 sgd ml1") response = bot_sgd_ml1(question) choiceVar = choiceVar + 1 choiceVar = choiceVar % 4 print(response) # # This is the bot which will run the AIML for the messanger # we run all the machine learning models we developed and make a choice from there # results # def bot(question): if question.find(cleaner('exit')) == -1: response = [] print("bot1 rcml1") try: response.append( bot_rc_ml1(question) ) # ridge classifier model except: print("bot1 rcml1 fail") print("bot2 rfml1") try: response.append( bot_rf_ml1(question) ) # random forrest model1 except: print("bot2 rfml1 fail") print("bot3 rfml2") try: response.append( bot_rf_ml2(question) ) # random forrest model2 except: print("bot3 rfml2 fail") print("bot4 sgdml1") try: response.append( bot_sgd_ml1(question) ) # sgd stochastic gradient desent except: print("bot4 sgdm1 fail") return random.choice(response) # make a random chocie from the machine learning models results (some always look at various sections so this way we see them all) #return response # if you want to look at the result list else: return 'exit' # # test it here # question = '' while question != 'exit': question = input() print(bot(question)) pip install python-telegram-bot --upgrade import logging from telegram import Update, ForceReply from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, CallbackContext # Enable logging logging.basicConfig( format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO ) logger = logging.getLogger(__name__) questions = [] # Define a few command handlers. These usually take the two arguments update and # context. def start(update: Update, _: CallbackContext) -> None: """Send a message when the command /start is issued.""" user = update.effective_user update.message.reply_markdown_v2( fr'Hi {user.mention_markdown_v2()}\!', reply_markup=ForceReply(selective=True), ) def help_command(update: Update, _: CallbackContext) -> None: """Send a message when the command /help is issued.""" reply = 'list :: house, garage, future garage, cartoon, anime, hip hop etc..' update.message.reply_text(reply) def echo(update: Update, _: CallbackContext) -> None: """Echo the user message.""" question = update.message.text questions.append(question) if question.find(cleaner('exit')) == -1: response = bot(question) if not response.find("65536") == -1: # we did find in list reply = "list :: house, garage, future garage, cartoon, anime, hip hop etc.. \n no suitable category found try a single word " else: reply = response #print("the response %s" % reply) else: reply = 'exit' # to send a file ..... reply = 'https://colab.research.google.com/drive/1Azj_M62pXee_7DlknRx7nEqEB2j7SoFp#scrollTo=zuaT9rmGkjZw' if not reply.find("list") == -1 or not reply.find("reply") == -1: update.message.reply_text(reply) else: replyurl = reply update.message.reply_text(replyurl) def main() -> None: """Start the bot.""" # Create the Updater and pass it your bot's token. updater = Updater("1778043544:AAE7NZYNTOqLMWmSFB0jGIv2-vFuNxgBz68") # Get the dispatcher to register handlers dispatcher = updater.dispatcher # on different commands - answer in Telegram dispatcher.add_handler(CommandHandler("start", start)) dispatcher.add_handler(CommandHandler("help", help_command)) # on non command i.e message - echo the message on Telegram dispatcher.add_handler(MessageHandler(Filters.text & ~Filters.command, echo)) # Start the Bot updater.start_polling() # Run the bot until you press Ctrl-C or the process receives SIGINT, # SIGTERM or SIGABRT. This should be used most of the time, since # start_polling() is non-blocking and will stop the bot gracefully. updater.idle() # # run communication to the botFather # main() ###Output 2021-07-17 19:19:45,788 - apscheduler.scheduler - INFO - Scheduler started
4_Transformer.ipynb
###Markdown Hyperparameters ###Code BEST_PATH = './models/Transformer.h5' TRAINING_EPOCHS = 200 LEARNING_RATE = 0.002 EPSILON = 1e-06 BATCH_SIZE = 16 ###Output _____no_output_____ ###Markdown Data loading ###Code l = np.load('./results/2020_S/modeling_dataset.npz', allow_pickle=True) output_columns = l['output_columns'] input_indices = l['input_indices'] output_indices = l['output_indices'] input_data = l['input_data'] output_label = l['output_label'] INPUT_MAXS = l['INPUT_MAXS'] INPUT_MINS = l['INPUT_MINS'] OUTPUT_MAXS = l['OUTPUT_MAXS'] OUTPUT_MINS = l['OUTPUT_MINS'] input_data = input_data.astype('float32') output_label = output_label.astype('float32') input_indices = input_indices.reshape(input_data.shape[:2]) print(input_data.shape) print(output_label.shape) input_indices, output_indices, input_data, output_label = shuffle(input_indices, output_indices, input_data, output_label, random_state=3101) N_TRAIN = int(input_data.shape[0]*.7) train_input = input_data[:N_TRAIN, ...] train_label = output_label[:N_TRAIN, ...] train_indices = output_indices[:N_TRAIN] val_input = input_data[N_TRAIN:, ...] val_label = output_label[N_TRAIN:, ...] val_indices = output_indices[N_TRAIN:] print(f'number of training set: {train_input.shape[0]}') print(f'number of validation set: {val_input.shape[0]}') with strategy.scope(): train_dataset = tf.data.Dataset.from_tensor_slices((train_input, train_label)) train_dataset = train_dataset.cache().shuffle(BATCH_SIZE*10).batch(BATCH_SIZE, drop_remainder=False) val_dataset = tf.data.Dataset.from_tensor_slices((val_input, val_label)) val_dataset = val_dataset.cache().shuffle(BATCH_SIZE*10).batch(BATCH_SIZE, drop_remainder=False) print(train_input.shape) print(train_label.shape) ###Output _____no_output_____ ###Markdown Model construction ###Code class EmbeddingLayer(layers.Layer): def __init__(self, num_nodes): super(EmbeddingLayer, self).__init__() self.n = num_nodes self.dense = layers.Dense(self.n) self.norm = layers.LayerNormalization(epsilon=1e-6) def call(self, inp, is_train=True, **kwargs): inp = self.dense(inp) inp = self.norm(inp, training=is_train) return inp class EncoderBlock(layers.Layer): def __init__(self, num_nodes, num_heads): super(EncoderBlock, self).__init__() self.n = num_nodes self.h = num_heads self.d = self.n // self.h self.wq = layers.Dense(self.n) self.wk = layers.Dense(self.n) self.wv = layers.Dense(self.n) self.dropout = layers.Dropout(0.1) self.norm1 = layers.LayerNormalization(epsilon=1e-6) self.dense1 = layers.Dense(self.n, activation=tf.nn.relu) self.dense2 = layers.Dense(self.n) self.norm2 = layers.LayerNormalization(epsilon=1e-6) def head_maker(self, x, axis_1=2, axis_2=0): x = tf.concat(tf.split(x, self.h, axis=axis_1), axis=axis_2) return x def call(self, inp, is_train=True, **kwargs): Q = self.head_maker(self.wq(inp)) K = self.head_maker(self.wk(inp)) V = self.head_maker(self.wv(inp)) oup = tf.matmul(Q, tf.transpose(K, (0, 2, 1))) oup = oup / tf.math.sqrt(tf.cast(K.shape[-1], tf.float32)) oup = tf.nn.softmax(oup) oup = self.dropout(oup, training=is_train) oup = tf.matmul(oup, V) oup = self.head_maker(oup, 0, 2) oup += inp oup = self.norm1(oup, training=is_train) oup_ffnn = self.dense1(oup) oup_ffnn = self.dense2(oup_ffnn) oup += oup_ffnn oup = self.norm2(oup, training=is_train) return oup class TransformerLike(Model): def __init__(self, num_nodes, num_heads, num_layers): super(TransformerLike, self).__init__() self.n = num_nodes self.h = num_heads self.l = num_layers self.emb = EmbeddingLayer(self.n) self.encs = [EncoderBlock(self.n, self.h) for _ in range(self.l)] self.dense1 = layers.Dense(self.n, activation=tf.nn.relu) self.dense2 = layers.Dense(self.n, activation=tf.nn.relu) self.dense3 = layers.Dense(self.n, activation=tf.nn.relu) self.flatten = layers.Flatten() self.outdense = layers.Dense(11) def call(self, inp, is_train=True, **kwargs): inp = self.emb(inp) for i in range(self.l): inp = self.encs[i](inp, training=is_train) inp = self.dense1(inp) inp = self.dense2(inp) inp = self.dense3(inp) inp = self.outdense(self.flatten(inp)) return inp callbacks = tf.keras.callbacks.ReduceLROnPlateau( monitor='val_loss', factor=.5, patience=2, verbose=0, mode='min', min_delta=0.0001, cooldown=0, min_lr=0) save = tf.keras.callbacks.ModelCheckpoint( BEST_PATH, monitor='val_loss', verbose=0, save_best_only=True, save_weights_only=True, mode='min', save_freq='epoch') early_stop = tf.keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=0.0001, patience=20) with strategy.scope(): opt = tf.keras.optimizers.Adam(learning_rate=LEARNING_RATE, epsilon=EPSILON) model = TransformerLike(128, 4, 2) model.compile(optimizer=opt, loss='mae') model.fit(train_dataset, epochs=TRAINING_EPOCHS, validation_data=val_dataset, verbose=1, callbacks=[callbacks, save, early_stop]) model.load_weights(BEST_PATH) model.evaluate(val_dataset) pred_output = model.predict(val_input) pred_output = pred_output*(OUTPUT_MAXS - OUTPUT_MINS) + OUTPUT_MINS cal_val_label = val_label*(OUTPUT_MAXS - OUTPUT_MINS) + OUTPUT_MINS fig = plt.figure(figsize=((8.5/2.54*2), (6/2.54*2))) ax0 = plt.subplot() ax0.spines['right'].set_visible(False) ax0.spines['left'].set_position(('outward', 5)) ax0.spines['bottom'].set_position(('outward', 5)) ax0.plot(cal_val_label, pred_output, 'o', ms=5, mec='k', c=cmap[0]) ax0.set_ybound([0, 100]) ax0.set_xbound([0, 100]) fig.tight_layout() label_df = pd.DataFrame(cal_val_label, index=val_indices, columns=output_columns) label_df.index = pd.DatetimeIndex(label_df.index) pred_df = pd.DataFrame(pred_output, index=val_indices, columns=output_columns) pred_df.index = pd.DatetimeIndex(pred_df.index) fig = plt.figure(figsize=((8.5/2.54*2), (6/2.54*2))) ax0 = plt.subplot() ax0.spines['right'].set_visible(False) ax0.spines['left'].set_position(('outward', 5)) ax0.spines['bottom'].set_position(('outward', 5)) ax0.plot(pred_df.index, label_df['leaf_dw'], 'o', ms=5, mec='k', c=cmap[4]) ax0.plot(pred_df.index, pred_df['leaf_dw'], 'o', ms=5, mec='k', c=cmap[0]) fig.tight_layout() ###Output _____no_output_____ ###Markdown 예측을 해야함. 이전 값들을 받아들이는 형태로 개선해야해 ###Code label_df.to_csv('./results/model_output/transformer_label.csv') pred_df.to_csv('./results/model_output/transformer_pred.csv') ###Output _____no_output_____
CRIM_Classify_Merge_Results.ipynb
###Markdown Merges the output of batch_classifyThis notebook contains csv files displaying batch_classify outputs and how it might be used ###Code from crim_intervals import * import pandas as pd import ast import matplotlib from itertools import tee, combinations import numpy as np from fractions import Fraction import re # Identify Files to be joined and read them file1 = "/Users/rfreedma/Documents/Python_Projects/CRIM-notebooks/CRIM/CRIM_Model_0019_generic_close_real.csv" file2 = "/Users/rfreedma/Documents/Python_Projects/CRIM-notebooks/CRIM/CRIM_Mass_0019_1_generic_close_real.csv" file3 = "/Users/rfreedma/Documents/Python_Projects/CRIM-notebooks/CRIM/CRIM_Mass_0019_2_generic_close_real.csv" file4 = "/Users/rfreedma/Documents/Python_Projects/CRIM-notebooks/CRIM/CRIM_Mass_0019_3_generic_close_real.csv" file5 = "/Users/rfreedma/Documents/Python_Projects/CRIM-notebooks/CRIM/CRIM_Mass_0019_4_generic_close_real.csv" file6 = "/Users/rfreedma/Documents/Python_Projects/CRIM-notebooks/CRIM/CRIM_Mass_0019_5_generic_close_real.csv" df1 = pd.read_csv(file1, skiprows=[1]).rename(columns={"Unnamed: 0": "entry_number"}) df2 = pd.read_csv(file2, skiprows=[1]).rename(columns={"Unnamed: 0": "entry_number"}) df3 = pd.read_csv(file3, skiprows=[1]).rename(columns={"Unnamed: 0": "entry_number"}) df4 = pd.read_csv(file4, skiprows=[1]).rename(columns={"Unnamed: 0": "entry_number"}) df5 = pd.read_csv(file5, skiprows=[1]).rename(columns={"Unnamed: 0": "entry_number"}) df6 = pd.read_csv(file6, skiprows=[1]).rename(columns={"Unnamed: 0": "entry_number"}) # Join them together. Option to Sort by Any Column Needed. # Also option to drop Singletons or filter by any Type # df3 = pd.concat([df1, df2, df3, df4, df5, df6], sort=True)[df1.columns].copy() df3 = df3.fillna("-") df3 = df3.sort_values(['start', "piece_title", 'pattern_generating_match']).copy() df3.drop(df3[df3['predicted_type'] == "Singleton"].index, inplace = True) df3.drop(columns=["entry_number"], inplace = True) df3 = df3.reset_index() df3.drop(columns="index", inplace = True) # df3 = df3[df3["predicted_type"] == "Fuga"] df3.drop_duplicates().copy() # df3.to_csv("merged.csv") df3["pattern_matched"] = df3.pattern_matched.apply(pd.eval).apply(tuple) df3["unique_titles_for_pattern"] = df3.groupby("pattern_matched").piece_title.transform(lambda group: group.nunique()) df4 = df3[df3.unique_titles_for_pattern > 0] df5 = df4.sort_values("pattern_matched") df5.head(50) df5.to_csv("Palestrina_Veni__combined.csv") ###Output _____no_output_____
notebooks/galaxy_azel_plot.ipynb
###Markdown Plot the Galactic plane in az/el from a given location on a given date 1/7/22 jpw uses https://docs.astropy.org/en/stable/generated/examples/coordinates/plot_obs-planning.htmlsphx-glr-generated-examples-coordinates-plot-obs-planning-py ###Code import matplotlib.pyplot as plt import numpy as np import os, glob from astropy import units as u from astropy.time import Time from astropy.coordinates import SkyCoord, EarthLocation, AltAz, get_sun from datetime import datetime %matplotlib inline ###Output _____no_output_____ ###Markdown Enter your observing location here ###Code # enter your observing location and time difference relative to UT here # west longitudes are negative here = EarthLocation(lat=21.3036944*u.deg, lon=-157.8116667*u.deg, height=372*u.m) utcoffset = -10*u.hour # relative to Universal Time (= GMT) ###Output _____no_output_____ ###Markdown Set the local time and date here ###Code local_time = '2022-01-07 11:00' local_time = '' # but if you don't set it, e.g. local_time='', then use the time when you run the program if len(local_time) == 0: local_time = datetime.now() savefig = True fileout = 'Galaxy_AzEl_'+str(local_time).replace(' ','_').replace(':','')[:15]+'.png' t = Time(local_time, scale='utc') - utcoffset l = np.arange(-180,181,5) nl = l.size alt = np.zeros(nl) az = np.zeros(nl) for i in range(nl): c = SkyCoord(l[i], 0, frame='galactic', unit='deg') altaz = c.transform_to(AltAz(obstime=t, location=here)) #print(f"Alt/Az = {altaz.alt:.2}, {altaz.az:.2}") alt[i] = altaz.alt / u.deg az[i] = altaz.az / u.deg altaz_sun = get_sun(t).transform_to(AltAz(obstime=t, location=here)) fig = plt.figure(figsize=(12,7)) ax = fig.add_subplot(111) # sort in azimuth to stop wrap around k = np.argsort(az) ax.plot(az[k], alt[k], 'b-', lw=3) ax.plot(altaz_sun.az/u.deg, altaz_sun.alt/u.deg, color='orange', marker='*', ms=20) ax.set_xlim(0, 360) ax.set_ylim(0, 90) ax.set_xlabel('Azimuth (deg)') ax.set_ylabel('Elevation (deg)') ax.text( 3, 2, 'N', fontsize=20, fontweight='bold', ha='left') ax.text( 90, 2, 'E', fontsize=20, fontweight='bold', ha='center') ax.text(180, 2, 'S', fontsize=20, fontweight='bold', ha='center') ax.text(270, 2, 'W', fontsize=20, fontweight='bold', ha='center') ax.text(357, 2, 'N', fontsize=20, fontweight='bold', ha='right') # plot tick marks to show longitude lmarker = np.arange(-180,180,30) for l1 in lmarker: i = np.argwhere(np.abs(l-l1)<1)[0][0] ax.plot(az[i], alt[i], 'bo', ms=7) ax.text(az[i]+3, alt[i], str(l1), fontsize=12, color='blue', clip_on=True) if savefig: plt.savefig(fileout) print('Plot saved to ',fileout) ###Output Plot saved to Galaxy_AzEl_2022-01-21_1413.png
jupyterhub/notebooks/visualization/09-B-Widget Events 2 -- Separating Concerns.ipynb
###Markdown Separating the logic from the widgetsA key principle in designing a graphical user interface is to separate the logic of an application from the graphical widgets the user sees. For example, in the super-simple password generator widget, the basic logic is to construct a sequence of random letters given the length. Let's isolate that logic in a function, without any widgets. This function takes a password length and returns a generated password string. ###Code def calculate_password(length): import string import secrets # Gaenerate a list of random letters of the correct length. password = ''.join(secrets.choice(string.ascii_letters) for _ in range(length)) return password ###Output _____no_output_____ ###Markdown Test out the function a couple times in the cell below with different lengths. Note that unlike our first pass through this, you can test this function without defining any widgets. This means you can write tests for just the logic, use the function as part of a library, etc. ###Code calculate_password(10) ###Output _____no_output_____ ###Markdown The Graphical ControlsThe code to build the graphical user interface widgets is the same as the previous iteration. ###Code helpful_title = widgets.HTML('Generated password is:') password_text = widgets.HTML('No password yet') password_text.layout.margin = '0 0 0 20px' password_length = widgets.IntSlider(description='Length of password', min=8, max=20, style={'description_width': 'initial'}) password_widget = widgets.VBox(children=[helpful_title, password_text, password_length]) password_widget ###Output _____no_output_____ ###Markdown Connecting the logic to the widgetsWhen the slider `password_length` changes, we want to call `calculate_password` to come up with a new password, and set the value of the widget `password` to the return value of the function call.`update_password` takes the change from the `password_length` as its argument and sets the `password_text` with the result of `calculate_password`. ###Code def update_password(change): length = int(change.new) new_password = calculate_password(length) # NOTE THE LINE BELOW: it relies on the password widget already being defined. password_text.value = new_password password_length.observe(update_password, names='value') ###Output _____no_output_____ ###Markdown Now that the connection is made, try moving the slider and you should see the password update. ###Code password_widget ###Output _____no_output_____ ###Markdown Benefits of separating concernsSome advantages of this approach are:+ Changes in `ipywidgets` only affect your controls setup.+ Changes in functional logic only affect your password generation function. If you decide that a password with only letters isn't secure enough and decide to add some numbers and/or special characters, the only code you need to change is in the `calculate_password` function.+ You can write unit tests for your `calculate_password` function -- which is where the important work is being done -- without doing in-browser testing of the graphical controls. Using interactNote that using interact to build this GUI also emphasizes the separation between the logic and the controls. However, interact also is much more opinionated about how the controls are laid out: controls are in a vbox above the output of the function. Often this is great for a quick initial GUI, but is restrictive for more complex GUIs. ###Code from ipywidgets import interact from IPython.display import display interact(calculate_password, length=(8, 20)); ###Output _____no_output_____ ###Markdown We can make the interact a bit nicer by printing the result, rather than just returning the string. This time we use `interact` as a decorator. ###Code @interact(length=(8, 20)) def print_password(length): print(calculate_password(length)) ###Output _____no_output_____
Assignment_4_Palencia.ipynb
###Markdown Linear Algebra for CpE Laboratory 7 : Matrix Operations ###Code import numpy as np import matplotlib.pyplot as plt %matplotlib inline ###Output _____no_output_____ ###Markdown TranspositionOne of the fundamental operations in matrix algebra is Transposition. The transpose of a matrix is done by flipping the values of its elements over its diagonals. With this, the rows and columns from the original matrix will be switched. So for a matrix $A$ its transpose is denoted as $A^T$. So for example: ###Code A = np.array([ [1 ,0, 5], [5, -16, 9], [0, -3, 13] ]) A AT1 = np.transpose(A) AT2 = A.T np.array_equiv(AT1, AT2) B = np.array([ [1,-2,13,14], [11,9,2,10], ]) B.shape np.transpose(B).shape ###Output _____no_output_____ ###Markdown Dot Product / Inner ProductIf you recall the dot product from laboratory activity before, we will try to implement the same operation with matrices. In matrix dot product we are going to get the sum of products of the vectors by row-column pairs. ###Code X = np.array([ [9,6], [0,7] ]) Y = np.array([ [-9,0], [12,9] ]) np.dot(X,Y) X.dot(Y) X @ Y np.matmul(X,Y) ###Output _____no_output_____ ###Markdown Rule 1: The inner dimensions of the two matrices in question must be the same. ###Code A = np.array([ [9, -4], [15, -2], [0, 5] ]) B = np.array([ [10,19], [13,-3], [-14,-2] ]) C = np.array([ [9,11,9], [1,9,-2] ]) print(A.shape) print(B.shape) print(C.shape) A @ C B @ C A @ B.T X = np.array([ [1,12,9,10] ]) Y = np.array([ [12,7,14,-1] ]) print(X.shape) print(Y.shape) Y.T @ X ###Output _____no_output_____ ###Markdown Rule 2: Dot Product has special properties ###Code A = np.array([ [3,22,1], [14,5,11], [1,1,0] ]) B = np.array([ [4,10,6], [14,1,9], [1,-4,8] ]) C = np.array([ [1,1,0], [0,1,1], [1,0,1] ]) A.dot(np.zeros(A.shape)) z_mat = np.zeros(A.shape) z_mat a_dot_z = A.dot(np.zeros(A.shape)) a_dot_z np.array_equal(a_dot_z,z_mat) null_mat = np.empty(A.shape, dtype=float) null = np.array(null_mat,dtype=float) print(null) np.allclose(a_dot_z,null) ###Output [[0. 0. 0.] [0. 0. 0.] [0. 0. 0.]] ###Markdown DeterminantA determinant is a scalar value derived from a square matrix. The determinant is a fundamental and important value used in matrix algebra. ###Code A = np.array([ [1,8], [10,3] ]) np.linalg.det(A) B = np.array([ [1,13,15,6], [9,3,10,3], [3,16,8,2], [5,12,6,3] ]) np.linalg.det(B) ###Output _____no_output_____ ###Markdown InverseThe inverse of a matrix is another fundamental operation in matrix algebra.Inverse matrices provides a related operation that could have the same concept of "dividing" matrices. ###Code M = np.array([ [1,7], [-3, 5] ]) np.array(M @ np.linalg.inv(M), dtype=int) N = np.array([ [18,5,23,1,0,33,5], [0,45,0,11,2,4,2], [5,9,20,10,9,4,3], [1,6,4,4,8,43,11], [8,6,8,7,1,6,11], [-5,15,2,10,10,16,-30], [-2,-5,1,2,1,20,12], ]) N_inv = np.linalg.inv(N) np.array(N @ N_inv,dtype=int) squad = np.array([ [1.0, 1.0, 0.5], [0.7, 0.7, 0.9], [0.3, 0.3, 1.0] ]) weights = np.array([ [0.2, 0.2, 0.6] ]) p_grade = squad @ weights.T p_grade ###Output _____no_output_____
PyPoll/Untitled.ipynb
###Markdown In this code a set of pull data will read and analyzed as follows:- The total number of votes cast- complete list of candidates who received votes- The percentage of votes each candidate won- The total number of votes each candidate won- The winner of the election based on popular voteFinally, the results will be exported to text file Import packages and read input file ###Code import os import csv # Path to collect data from the Resources folder Csvpath = os.path.join("Resources/","election_data.csv") ###Output _____no_output_____ ###Markdown Evaluation ###Code # with open as csvfile: with open(Csvpath,'r') as csvfile: csvreader = csv.reader(csvfile, delimiter=',') next(csvreader, None) # skip the header total_vote = 0 # Initialize the variables candidate_list=[] #create the list of candidate candidate_vote=[] #create the list of canditates vote #Loop through each row of data for row in csvreader: total_vote+=1 #caluclate the total number of votes if row[2] in candidate_list: index_candidate=candidate_list.index(row[2]) candidate_vote[index_candidate]+=1 else: candidate_list.append(row[2]) candidate_vote.append(1) winner_index=candidate_vote.index(max(candidate_vote)) sum_list = sum(candidate_vote) ###Output _____no_output_____ ###Markdown write results to console and text file ###Code print("Election Results") print("----------------------------------------------------------") print(f'Total Votes:{total_vote}') print("------------------------------------------------------------") for i in range(4): print(f'{candidate_list[i]}:{candidate_vote[i]/sum_list*100}% ({candidate_vote[i]})') print("------------------------------------------------------------") print(f'winner: {candidate_list[winner_index]}') print("------------------------------------------------------------") output_file=open("PyPoll.txt",'w') output_file.write("Election Results") output_file.write(f'\nTotal Votes:{total_vote}\n') for i in range(4): output_file.write(f'{candidate_list[i]}:{candidate_vote[i]/sum_list*100}% ({candidate_vote[i]})\n') output_file.write(f'winner: {candidate_list[winner_index]}') output_file.close() ###Output Election Results ---------------------------------------------------------- Total Votes:3521001 ------------------------------------------------------------ Khan:63.00001050837531% (2218231) Correy:19.999994319797125% (704200) Li:13.999996023857989% (492940) O'Tooley:2.999999147969569% (105630) ------------------------------------------------------------ winner: Khan ------------------------------------------------------------
updates/cloudml/flights_model.ipynb
###Markdown Developing, Training, and Deploying a TensorFlow model on Google Cloud Platform (completely within Jupyter)In Chapter 9 of [Data Science on the Google Cloud Platform](http://shop.oreilly.com/product/0636920057628.do), I trained a TensorFlow Estimator model to predict flight delays.In this notebook, we'll modernize the workflow:* Use eager mode for TensorFlow development* Use tf.data to write the input pipeline* Run the notebook as-is on Cloud using Deep Learning VM or Kubeflow pipelines* Deploy the trained model to Cloud ML Engine as a web serviceThe combination of eager mode, tf.data and DLVM/KFP makes this workflow a lot easier.We don't need to deal with Python packages or Docker containers. ###Code # change these to try this notebook out # In "production", these will be replaced by the parameters passed to papermill BUCKET = 'cloud-training-demos-ml' PROJECT = 'cloud-training-demos' REGION = 'us-central1' DEVELOP_MODE = True EAGER_MODE = False NBUCKETS = 5 import os os.environ['BUCKET'] = BUCKET os.environ['PROJECT'] = PROJECT os.environ['REGION'] = REGION %%bash gcloud config set project $PROJECT gcloud config set compute/region $REGION ###Output Updated property [core/project]. Updated property [compute/region]. ###Markdown Creating the input data pipeline ###Code DATA_BUCKET = "gs://cloud-training-demos/flights/chapter8/output/" TRAIN_DATA_PATTERN = DATA_BUCKET + "train*" VALID_DATA_PATTERN = DATA_BUCKET + "test*" !gsutil ls $DATA_BUCKET ###Output gs://cloud-training-demos/flights/chapter8/output/delays.csv gs://cloud-training-demos/flights/chapter8/output/testFlights-00000-of-00007.csv gs://cloud-training-demos/flights/chapter8/output/testFlights-00001-of-00007.csv gs://cloud-training-demos/flights/chapter8/output/testFlights-00002-of-00007.csv gs://cloud-training-demos/flights/chapter8/output/testFlights-00003-of-00007.csv gs://cloud-training-demos/flights/chapter8/output/testFlights-00004-of-00007.csv gs://cloud-training-demos/flights/chapter8/output/testFlights-00005-of-00007.csv gs://cloud-training-demos/flights/chapter8/output/testFlights-00006-of-00007.csv gs://cloud-training-demos/flights/chapter8/output/trainFlights-00000-of-00007.csv gs://cloud-training-demos/flights/chapter8/output/trainFlights-00001-of-00007.csv gs://cloud-training-demos/flights/chapter8/output/trainFlights-00002-of-00007.csv gs://cloud-training-demos/flights/chapter8/output/trainFlights-00003-of-00007.csv gs://cloud-training-demos/flights/chapter8/output/trainFlights-00004-of-00007.csv gs://cloud-training-demos/flights/chapter8/output/trainFlights-00005-of-00007.csv gs://cloud-training-demos/flights/chapter8/output/trainFlights-00006-of-00007.csv ###Markdown Use tf.data to read the CSV filesNote the use of Eager mode to develop the code. I turn off eager mode once I get to the next section ###Code import os, json, math import numpy as np import tensorflow as tf print("Tensorflow version " + tf.__version__) if DEVELOP_MODE and EAGER_MODE: print("Enabling Eager mode for development only") tf.enable_eager_execution() else: EAGER_MODE = False CSV_COLUMNS = ('ontime,dep_delay,taxiout,distance,avg_dep_delay,avg_arr_delay' + \ ',carrier,dep_lat,dep_lon,arr_lat,arr_lon,origin,dest').split(',') LABEL_COLUMN = 'ontime' DEFAULTS = [[0.0],[0.0],[0.0],[0.0],[0.0],[0.0],\ ['na'],[0.0],[0.0],[0.0],[0.0],['na'],['na']] def decode_csv(line): column_values = tf.decode_csv(line, DEFAULTS) column_names = CSV_COLUMNS decoded_line = dict(zip(column_names, column_values)) # create a dictionary {'column_name': value, ...} for each line return decoded_line def load_dataset(pattern): filenames = tf.data.Dataset.list_files(pattern) dataset = filenames.interleave(tf.data.TextLineDataset, cycle_length=16) dataset = dataset.map(decode_csv) return dataset if EAGER_MODE: dataset = load_dataset(TRAIN_DATA_PATTERN) for n, data in enumerate(dataset): numpy_data = {k: v.numpy() for k, v in data.items()} # .numpy() works only in eager mode print(numpy_data) if n>3: break %%writefile example_input.json {"dep_delay": 14.0, "taxiout": 13.0, "distance": 319.0, "avg_dep_delay": 25.863039, "avg_arr_delay": 27.0, "carrier": "WN", "dep_lat": 32.84722, "dep_lon": -96.85167, "arr_lat": 31.9425, "arr_lon": -102.20194, "origin": "DAL", "dest": "MAF"} {"dep_delay": -9.0, "taxiout": 21.0, "distance": 301.0, "avg_dep_delay": 41.050808, "avg_arr_delay": -7.0, "carrier": "EV", "dep_lat": 29.984444, "dep_lon": -95.34139, "arr_lat": 27.544167, "arr_lon": -99.46167, "origin": "IAH", "dest": "LRD"} def features_and_labels(features): label = features.pop('ontime') # this is what we will train for return features, label def prepare_dataset(pattern, batch_size, truncate=None, mode=tf.estimator.ModeKeys.TRAIN): dataset = load_dataset(pattern) dataset = dataset.map(features_and_labels) dataset = dataset.cache() if mode == tf.estimator.ModeKeys.TRAIN: dataset = dataset.shuffle(1000) dataset = dataset.repeat() dataset = dataset.batch(batch_size) dataset = dataset.prefetch(10) if truncate is not None: dataset = dataset.take(truncate) return dataset if EAGER_MODE: print("Calling prepare") one_item = prepare_dataset(TRAIN_DATA_PATTERN, batch_size=2, truncate=1) print(list(one_item)) # should print one batch of 2 items ###Output _____no_output_____ ###Markdown Create TensorFlow wide-and-deep modelWe'll create feature columns, and do some discretization and feature engineering.See the book for details. ###Code import tensorflow.feature_column as fc real = { colname : fc.numeric_column(colname) \ for colname in \ ('dep_delay,taxiout,distance,avg_dep_delay,avg_arr_delay' + ',dep_lat,dep_lon,arr_lat,arr_lon').split(',') } sparse = { 'carrier': fc.categorical_column_with_vocabulary_list('carrier', vocabulary_list='AS,VX,F9,UA,US,WN,HA,EV,MQ,DL,OO,B6,NK,AA'.split(',')), 'origin' : fc.categorical_column_with_hash_bucket('origin', hash_bucket_size=1000), 'dest' : fc.categorical_column_with_hash_bucket('dest', hash_bucket_size=1000) } ###Output _____no_output_____ ###Markdown Feature engineering ###Code latbuckets = np.linspace(20.0, 50.0, NBUCKETS).tolist() # USA lonbuckets = np.linspace(-120.0, -70.0, NBUCKETS).tolist() # USA disc = {} disc.update({ 'd_{}'.format(key) : fc.bucketized_column(real[key], latbuckets) \ for key in ['dep_lat', 'arr_lat'] }) disc.update({ 'd_{}'.format(key) : fc.bucketized_column(real[key], lonbuckets) \ for key in ['dep_lon', 'arr_lon'] }) # cross columns that make sense in combination sparse['dep_loc'] = fc.crossed_column([disc['d_dep_lat'], disc['d_dep_lon']], NBUCKETS*NBUCKETS) sparse['arr_loc'] = fc.crossed_column([disc['d_arr_lat'], disc['d_arr_lon']], NBUCKETS*NBUCKETS) sparse['dep_arr'] = fc.crossed_column([sparse['dep_loc'], sparse['arr_loc']], NBUCKETS ** 4) sparse['ori_dest'] = fc.crossed_column(['origin', 'dest'], hash_bucket_size=1000) # embed all the sparse columns embed = { colname : fc.embedding_column(col, 10) \ for colname, col in sparse.items() } real.update(embed) if DEVELOP_MODE: print(sparse.keys()) print(real.keys()) ###Output dict_keys(['carrier', 'origin', 'dest', 'dep_loc', 'arr_loc', 'dep_arr', 'ori_dest']) dict_keys(['dep_delay', 'taxiout', 'distance', 'avg_dep_delay', 'avg_arr_delay', 'dep_lat', 'dep_lon', 'arr_lat', 'arr_lon', 'carrier', 'origin', 'dest', 'dep_loc', 'arr_loc', 'dep_arr', 'ori_dest']) ###Markdown ServingThis serving input function is how the model will be deployed for prediction. We require these fields for prediction ###Code def serving_input_fn(): feature_placeholders = { # All the real-valued columns column: tf.placeholder(tf.float32, [None]) \ for column in ('dep_delay,taxiout,distance,avg_dep_delay,avg_arr_delay' + ',dep_lat,dep_lon,arr_lat,arr_lon').split(',') } feature_placeholders.update({ column: tf.placeholder(tf.string, [None]) for column in ['carrier', 'origin', 'dest'] }) features = feature_placeholders # no transformations return tf.estimator.export.ServingInputReceiver(features, feature_placeholders) ###Output _____no_output_____ ###Markdown Train the model and evaluate once in a whileAlso checkpoint ###Code model_dir='gs://{}/flights/trained_model'.format(BUCKET) os.environ['OUTDIR'] = model_dir # needed for deployment print('Writing trained model to {}'.format(model_dir)) !gsutil -m rm -rf $OUTDIR estimator = tf.estimator.DNNLinearCombinedClassifier( model_dir = model_dir, linear_feature_columns = sparse.values(), dnn_feature_columns = real.values(), dnn_hidden_units = [64, 32] ) train_batch_size = 64 train_input_fn = lambda: prepare_dataset(TRAIN_DATA_PATTERN, train_batch_size).make_one_shot_iterator().get_next() eval_batch_size = 100 if DEVELOP_MODE else 10000 eval_input_fn = lambda: prepare_dataset(VALID_DATA_PATTERN, eval_batch_size, eval_batch_size*10, tf.estimator.ModeKeys.EVAL).make_one_shot_iterator().get_next() num_steps = 10 if DEVELOP_MODE else (1000000 // train_batch_size) train_spec = tf.estimator.TrainSpec(train_input_fn, max_steps = num_steps) exporter = tf.estimator.LatestExporter('exporter', serving_input_fn) eval_spec = tf.estimator.EvalSpec(eval_input_fn, steps=10, exporters=exporter) tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec) ###Output INFO:tensorflow:Using default config. INFO:tensorflow:Using config: {'_model_dir': 'gs://cloud-training-demos-ml/flights/trained_model', '_tf_random_seed': None, '_save_summary_steps': 100, '_save_checkpoints_steps': None, '_save_checkpoints_secs': 600, '_session_config': None, '_keep_checkpoint_max': 5, '_keep_checkpoint_every_n_hours': 10000, '_log_step_count_steps': 100, '_train_distribute': None, '_device_fn': None, '_service': None, '_cluster_spec': <tensorflow.python.training.server_lib.ClusterSpec object at 0x7f55912ec3c8>, '_task_type': 'worker', '_task_id': 0, '_global_id_in_cluster': 0, '_master': '', '_evaluation_master': '', '_is_chief': True, '_num_ps_replicas': 0, '_num_worker_replicas': 1} INFO:tensorflow:Running training and evaluation locally (non-distributed). INFO:tensorflow:Start train and evaluate loop. The evaluate will happen after every checkpoint. Checkpoint frequency is determined based on RunConfig arguments: save_checkpoints_steps None or save_checkpoints_secs 600. INFO:tensorflow:Calling model_fn. INFO:tensorflow:Done calling model_fn. INFO:tensorflow:Create CheckpointSaverHook. INFO:tensorflow:Graph was finalized. INFO:tensorflow:Running local_init_op. INFO:tensorflow:Done running local_init_op. INFO:tensorflow:Saving checkpoints for 0 into gs://cloud-training-demos-ml/flights/trained_model/model.ckpt. INFO:tensorflow:loss = 5011.7, step = 1 INFO:tensorflow:Saving checkpoints for 10 into gs://cloud-training-demos-ml/flights/trained_model/model.ckpt. INFO:tensorflow:Calling model_fn. WARNING:tensorflow:Trapezoidal rule is known to produce incorrect PR-AUCs; please switch to "careful_interpolation" instead. WARNING:tensorflow:Trapezoidal rule is known to produce incorrect PR-AUCs; please switch to "careful_interpolation" instead. INFO:tensorflow:Done calling model_fn. INFO:tensorflow:Starting evaluation at 2018-11-30-04:51:33 INFO:tensorflow:Graph was finalized. INFO:tensorflow:Restoring parameters from gs://cloud-training-demos-ml/flights/trained_model/model.ckpt-10 INFO:tensorflow:Running local_init_op. INFO:tensorflow:Done running local_init_op. INFO:tensorflow:Evaluation [1/10] INFO:tensorflow:Evaluation [2/10] INFO:tensorflow:Evaluation [3/10] INFO:tensorflow:Evaluation [4/10] INFO:tensorflow:Evaluation [5/10] INFO:tensorflow:Evaluation [6/10] INFO:tensorflow:Evaluation [7/10] INFO:tensorflow:Evaluation [8/10] INFO:tensorflow:Evaluation [9/10] INFO:tensorflow:Evaluation [10/10] INFO:tensorflow:Finished evaluation at 2018-11-30-04:51:38 INFO:tensorflow:Saving dict for global step 10: accuracy = 0.234, accuracy_baseline = 0.766, auc = 0.5, auc_precision_recall = 0.883, average_loss = 41.49751, global_step = 10, label/mean = 0.766, loss = 4149.751, precision = 0.0, prediction/mean = 7.736658e-13, recall = 0.0 INFO:tensorflow:Saving 'checkpoint_path' summary for global step 10: gs://cloud-training-demos-ml/flights/trained_model/model.ckpt-10 INFO:tensorflow:Calling model_fn. INFO:tensorflow:Done calling model_fn. INFO:tensorflow:Signatures INCLUDED in export for Classify: None INFO:tensorflow:Signatures INCLUDED in export for Regress: None INFO:tensorflow:Signatures INCLUDED in export for Predict: ['predict'] INFO:tensorflow:Signatures INCLUDED in export for Train: None INFO:tensorflow:Signatures INCLUDED in export for Eval: None INFO:tensorflow:Signatures EXCLUDED from export because they cannot be be served via TensorFlow Serving APIs: INFO:tensorflow:'serving_default' : Classification input must be a single string Tensor; got {'dep_delay': <tf.Tensor 'Placeholder:0' shape=(?,) dtype=float32>, 'taxiout': <tf.Tensor 'Placeholder_1:0' shape=(?,) dtype=float32>, 'distance': <tf.Tensor 'Placeholder_2:0' shape=(?,) dtype=float32>, 'avg_dep_delay': <tf.Tensor 'Placeholder_3:0' shape=(?,) dtype=float32>, 'avg_arr_delay': <tf.Tensor 'Placeholder_4:0' shape=(?,) dtype=float32>, 'dep_lat': <tf.Tensor 'Placeholder_5:0' shape=(?,) dtype=float32>, 'dep_lon': <tf.Tensor 'Placeholder_6:0' shape=(?,) dtype=float32>, 'arr_lat': <tf.Tensor 'Placeholder_7:0' shape=(?,) dtype=float32>, 'arr_lon': <tf.Tensor 'Placeholder_8:0' shape=(?,) dtype=float32>, 'carrier': <tf.Tensor 'Placeholder_9:0' shape=(?,) dtype=string>, 'origin': <tf.Tensor 'Placeholder_10:0' shape=(?,) dtype=string>, 'dest': <tf.Tensor 'Placeholder_11:0' shape=(?,) dtype=string>} INFO:tensorflow:'classification' : Classification input must be a single string Tensor; got {'dep_delay': <tf.Tensor 'Placeholder:0' shape=(?,) dtype=float32>, 'taxiout': <tf.Tensor 'Placeholder_1:0' shape=(?,) dtype=float32>, 'distance': <tf.Tensor 'Placeholder_2:0' shape=(?,) dtype=float32>, 'avg_dep_delay': <tf.Tensor 'Placeholder_3:0' shape=(?,) dtype=float32>, 'avg_arr_delay': <tf.Tensor 'Placeholder_4:0' shape=(?,) dtype=float32>, 'dep_lat': <tf.Tensor 'Placeholder_5:0' shape=(?,) dtype=float32>, 'dep_lon': <tf.Tensor 'Placeholder_6:0' shape=(?,) dtype=float32>, 'arr_lat': <tf.Tensor 'Placeholder_7:0' shape=(?,) dtype=float32>, 'arr_lon': <tf.Tensor 'Placeholder_8:0' shape=(?,) dtype=float32>, 'carrier': <tf.Tensor 'Placeholder_9:0' shape=(?,) dtype=string>, 'origin': <tf.Tensor 'Placeholder_10:0' shape=(?,) dtype=string>, 'dest': <tf.Tensor 'Placeholder_11:0' shape=(?,) dtype=string>} INFO:tensorflow:'regression' : Regression input must be a single string Tensor; got {'dep_delay': <tf.Tensor 'Placeholder:0' shape=(?,) dtype=float32>, 'taxiout': <tf.Tensor 'Placeholder_1:0' shape=(?,) dtype=float32>, 'distance': <tf.Tensor 'Placeholder_2:0' shape=(?,) dtype=float32>, 'avg_dep_delay': <tf.Tensor 'Placeholder_3:0' shape=(?,) dtype=float32>, 'avg_arr_delay': <tf.Tensor 'Placeholder_4:0' shape=(?,) dtype=float32>, 'dep_lat': <tf.Tensor 'Placeholder_5:0' shape=(?,) dtype=float32>, 'dep_lon': <tf.Tensor 'Placeholder_6:0' shape=(?,) dtype=float32>, 'arr_lat': <tf.Tensor 'Placeholder_7:0' shape=(?,) dtype=float32>, 'arr_lon': <tf.Tensor 'Placeholder_8:0' shape=(?,) dtype=float32>, 'carrier': <tf.Tensor 'Placeholder_9:0' shape=(?,) dtype=string>, 'origin': <tf.Tensor 'Placeholder_10:0' shape=(?,) dtype=string>, 'dest': <tf.Tensor 'Placeholder_11:0' shape=(?,) dtype=string>} WARNING:tensorflow:Export includes no default signature! INFO:tensorflow:Restoring parameters from gs://cloud-training-demos-ml/flights/trained_model/model.ckpt-10 INFO:tensorflow:Assets added to graph. INFO:tensorflow:No assets to write. INFO:tensorflow:SavedModel written to: gs://cloud-training-demos-ml/flights/trained_model/export/exporter/temp-b'1543553501'/saved_model.pb INFO:tensorflow:Loss for final step: 2781.2466. ###Markdown Deploy the trained model ###Code %%bash model_dir=$(gsutil ls ${OUTDIR}/export/exporter | tail -1) echo $model_dir saved_model_cli show --dir ${model_dir} --all %%bash MODEL_NAME="flights" MODEL_VERSION="kfp" TFVERSION="1.10" MODEL_LOCATION=$(gsutil ls ${OUTDIR}/export/exporter | tail -1) echo "Run these commands one-by-one (the very first time, you'll create a model and then create a version)" yes | gcloud ml-engine versions delete ${MODEL_VERSION} --model ${MODEL_NAME} #gcloud ml-engine models delete ${MODEL_NAME} #gcloud ml-engine models create ${MODEL_NAME} --regions $REGION gcloud ml-engine versions create ${MODEL_VERSION} --model ${MODEL_NAME} --origin ${MODEL_LOCATION} --runtime-version $TFVERSION !gcloud ml-engine predict --model=flights --version=kfp --json-instances=example_input.json ###Output CLASS_IDS CLASSES LOGISTIC LOGITS PROBABILITIES [0] [u'0'] [4.0148590767522937e-17] [-37.753944396972656] [1.0, 4.0148587458800486e-17] [0] [u'0'] [9.958989877342436e-18] [-39.14805603027344] [1.0, 9.958989877342436e-18]
src/main/python/workshops/WorkshopSolutionTwo.ipynb
###Markdown Punto 1 ###Code from sklearn.cluster import KMeans import matplotlib.pyplot as plt import numpy as np def distance(a, b): d = (a[0] - b[0])**2 + (a[1] - b[1])**2 return d C1 = [3, 4] C2 = [7, 2] for i in range(3): cluster_1 = [] cluster_2 = [] cluster_teams = [] X = [[1, 2], [3, 4], [4, 3], [5, 2], [5, 3], [6, 4], [5, 5], [-1, 5]] cluster_centroide = X # asignacion for x in X: if distance(x, C1) < distance(x, C2): cluster_1.append(x) cluster_teams.append(1) else: cluster_2.append(x) cluster_teams.append(2) if i == 0: print("Valores iniciales") else: print("\nActualicación " + str(i)) print("Centroide 1: " + str(C1)) print("Centroide 2: " + str(C2)) cluster_centroide.append(C1) cluster_teams.append(0) cluster_centroide.append(C2) cluster_teams.append(0) fig, ax = plt.subplots() ndarray = np.array(cluster_centroide) ax.scatter(x=ndarray[:,0], y=ndarray[:,1], c=np.array(cluster_teams)) if i == 0: plt.title("Asignación inicial de clusters con los valores de los centroides iniciales") plt.show() # actualizacion C1 = np.mean(cluster_1, axis = 0) C2 = np.mean(cluster_2, axis = 0) ###Output Valores iniciales Centroide 1: [3, 4] Centroide 2: [7, 2] ###Markdown Punto 2 ###Code import numpy as np from sklearn.cluster import DBSCAN from sklearn import metrics from sklearn.datasets import make_blobs from sklearn.preprocessing import StandardScaler centers = [[3, 4], [7, 2]] X = np.array([[1, 2], [3, 4], [4, 3], [5, 2], [5, 3], [6, 4], [5, 5], [-1, 5]]) X = StandardScaler().fit_transform(X) params = [[1, 1], [1, 2], [1.5, 1], [1.5, 2], [3, 1], [3, 2]] for param in params: db = DBSCAN(eps = param[0], min_samples = param[1]).fit(X) core_samples_mask = np.zeros_like(db.labels_, dtype=bool) core_samples_mask[db.core_sample_indices_] = True labels = db.labels_ n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0) n_noise_ = list(labels).count(-1) print("\nEps: " + str(param[0]) + " MinPts: " + str(param[1])) print("Número estimado de clusters: %d" % n_clusters_) print("Número estimado de puntos de ruido: %d" % n_noise_) import matplotlib.pyplot as plt unique_labels = set(labels) colors = [plt.cm.Spectral(each) for each in np.linspace(0, 1, len(unique_labels))] for k, col in zip(unique_labels, colors): if k == -1: col = [0, 0, 0, 1] class_member_mask = labels == k xy = X[class_member_mask & core_samples_mask] plt.plot( xy[:, 0], xy[:, 1], "o", markerfacecolor=tuple(col), markeredgecolor="k", markersize=14, ) xy = X[class_member_mask & ~core_samples_mask] plt.plot( xy[:, 0], xy[:, 1], "o", markerfacecolor=tuple(col), markeredgecolor="k", markersize=6, ) plt.title("Número estimado de clusters: %d" % n_clusters_) plt.show() ###Output Eps: 1 MinPts: 1 Número estimado de clusters: 3 Número estimado de puntos de ruido: 0 ###Markdown Punto 3 ###Code import numpy as np from matplotlib import pyplot as plt from scipy.cluster.hierarchy import dendrogram from sklearn.cluster import AgglomerativeClustering def plot_dendrogram(model, **kwargs): counts = np.zeros(model.children_.shape[0]) n_samples = len(model.labels_) for i, merge in enumerate(model.children_): current_count = 0 for child_idx in merge: if child_idx < n_samples: current_count += 1 else: current_count += counts[child_idx - n_samples] counts[i] = current_count linkage_matrix = np.column_stack( [model.children_, model.distances_, counts] ).astype(float) print(linkage_matrix) dendrogram(linkage_matrix, **kwargs) X = np.array([[1, 2], [3, 4], [4, 3], [5, 2], [5, 3], [6, 4], [5, 5], [-1, 5]]) modelComplete = AgglomerativeClustering(distance_threshold = 0, n_clusters = None, linkage = "complete") modelSingle = AgglomerativeClustering(distance_threshold = 0, n_clusters = None, linkage = "single") modelComplete = modelComplete.fit(X[1:8,0:2]) modelSingle = modelSingle.fit(X[1:8,0:2]) plt.title("Dendrograma usando la distancia Euclidea con estrategia completa") plot_dendrogram(modelComplete, truncate_mode = "level", p = 3) plt.xlabel("Number of points in node (or index of point if no parenthesis).") plt.show() print("\n") plt.title("Dendrograma usando la distancia Euclidea con estrategia simple") plot_dendrogram(modelSingle, truncate_mode = "level", p = 3) plt.xlabel("Number of points in node (or index of point if no parenthesis).") plt.show() ###Output [[ 1. 3. 1. 2. ] [ 2. 7. 1.41421356 3. ] [ 4. 5. 1.41421356 2. ] [ 0. 8. 2.82842712 4. ] [ 9. 10. 3. 6. ] [ 6. 11. 7.07106781 7. ]]
07___Project_1_Part_6_Final.ipynb
###Markdown ###Code from google.colab import drive drive.mount('/content/drive') import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.preprocessing import StandardScaler, OneHotEncoder from sklearn.impute import SimpleImputer from sklearn.compose import make_column_transformer, make_column_selector from sklearn.pipeline import make_pipeline from sklearn.model_selection import train_test_split from sklearn import set_config from sklearn.linear_model import LinearRegression from sklearn.tree import DecisionTreeRegressor from sklearn.metrics import r2_score from sklearn.metrics import mean_squared_error set_config(display='diagram') salesfile = '/content/drive/MyDrive/**Coding Dojo**/02 Week 2: Pandas/Files for Lessons/sales_predictions.csv' df = pd.read_csv(salesfile) df.head() sns.barplot(data=df, x='Item_Outlet_Sales', y='Outlet_Type') plt.title('Sales per Outlet Type') plt.xlabel('Item Outlet Sales') plt.ylabel('Outlet Type') plt.show() ax = sns.regplot(data=df, x='Item_Outlet_Sales', y='Item_Visibility', scatter_kws={'s':1}, line_kws = dict(color='black', ls=':')) ax.set(title='Sales Dependancy on Item Visibility', xlabel='Projected Sales $', ylabel='Item Visibility'); #X = df.drop('Item_Outlet_Sales', axis=1) X = df.drop(['Item_Outlet_Sales', 'Item_Identifier'], axis=1) y = df['Item_Outlet_Sales'] X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42) # replace tiers with numbers Out_Loc_Type = {'Tier 1': 1, 'Tier 2': 2, 'Tier 3': 3} df['Outlet_Location_Type'] = df['Outlet_Location_Type'].replace(Out_Loc_Type) categoricals = ['Item_Fat_Content','Item_Type','Outlet_Identifier','Outlet_Size','Outlet_Location_Type','Outlet_Type'] numericals = ['Item_Weight','Item_MRP','Outlet_Establishment_Year','Item_Visibility'] mean_imputer = SimpleImputer(strategy='mean') freq_imputer = SimpleImputer(strategy='most_frequent') scaler = StandardScaler() scalertwo = StandardScaler() ohe = OneHotEncoder(sparse=False, handle_unknown='ignore') num_pipe = make_pipeline(mean_imputer, scaler) cat_pipe = make_pipeline(freq_imputer, ohe) num_tuple = (num_pipe, numericals) cat_tuple = (cat_pipe, categoricals) preprocessor = make_column_transformer(num_tuple, cat_tuple, remainder='drop') ###Output _____no_output_____ ###Markdown * Final - Part 1 ###Code reg = LinearRegression() reg_pipe = make_pipeline(preprocessor , reg) reg_pipe.fit(X_train , y_train) train_score = reg_pipe.score(X_train, y_train) test_score = reg_pipe.score(X_test, y_test) train_preds = reg_pipe.predict(X_train) test_preds = reg_pipe.predict(X_test) r2_train = r2_score(y_train, train_preds) r2_test = r2_score(y_test, test_preds) print(r2_train) print(r2_test) rmse_train = np.sqrt(mean_squared_error(y_train, train_preds)) rmse_test = np.sqrt(mean_squared_error(y_test, test_preds)) print(rmse_train) print(rmse_test) ###Output 1138.9543788442363 1093.0107702137034 ###Markdown * Final - Part 2 ###Code dec_tree = DecisionTreeRegressor(random_state = 42) dec_tree_pipe = make_pipeline(preprocessor, dec_tree) dec_tree_pipe.fit(X_train, y_train) train_preds_dec = dec_tree_pipe.predict(X_train) test_preds_dec = dec_tree_pipe.predict(X_test) train_dec_score = dec_tree_pipe.score(X_train, y_train) test_dec_score = dec_tree_pipe.score(X_test, y_test) print(train_dec_score) print(test_dec_score) r2_train = r2_score(y_train, train_preds_dec) r2_test = r2_score(y_test, test_preds_dec) print(r2_train) print(r2_test) rmse_train_dec = np.sqrt(mean_squared_error(y_train, train_preds_dec)) rmse_test_dec = np.sqrt(mean_squared_error(y_test, test_preds_dec)) print(rmse_train_dec) print(rmse_test_dec) ###Output 4.925864104892086e-15 1497.7376579778304 ###Markdown * Final Step 3 judging off the 2models i would choose the Linear Regression model. The linear regression model at least has a common bias and variance between the training data and the testing data, so at least I know my models are having the same outcome based off the prediction scores. ###Code ###Output _____no_output_____
source/getting_started/source_code.ipynb
###Markdown .. meta:: :description: A guide which introduces the most important steps to get started with pymoo, an open-source multi-objective optimization framework in Python. .. meta:: :keywords: Multi-objective Optimization, Python, Evolutionary Computation, Optimization Test Problem, Hypervolume ###Code .. _nb_getting_started_source_code: ###Output _____no_output_____ ###Markdown Source Code In this guide, we have provided a couple of options for defining your problem and how to run the optimization. You might have already copied the code into your IDE. However, if not, the following code snippets cover the problem definition, algorithm initializing, solving the optimization problem, and visualization of the non-dominated set of solutions altogether. ###Code import numpy as np from pymoo.algorithms.moo.nsga2 import NSGA2 from pymoo.core.problem import ElementwiseProblem from pymoo.optimize import minimize from pymoo.visualization.scatter import Scatter class MyProblem(ElementwiseProblem): def __init__(self): super().__init__(n_var=2, n_obj=2, n_constr=2, xl=np.array([-2, -2]), xu=np.array([2, 2])) def _evaluate(self, x, out, *args, **kwargs): f1 = 100 * (x[0] ** 2 + x[1] ** 2) f2 = (x[0] - 1) ** 2 + x[1] ** 2 g1 = 2 * (x[0] - 0.1) * (x[0] - 0.9) / 0.18 g2 = - 20 * (x[0] - 0.4) * (x[0] - 0.6) / 4.8 out["F"] = [f1, f2] out["G"] = [g1, g2] problem = MyProblem() algorithm = NSGA2(pop_size=100) res = minimize(problem, algorithm, ("n_gen", 100), verbose=False, seed=1) plot = Scatter() plot.add(res.F, color="red") plot.show() ###Output _____no_output_____
mtab/mtab_to_metrics.ipynb
###Markdown Setup parameter ###Code input_file_path ='./t2dv2-dev-input' json_save_path = './mtab_json_dev' csv_save_path = './mtab_csv_dev' gt_file_path = './round_1' gt_label_path = './mtab_gt_dev' metrics_save_path = './mtab_metrics_dev' total_metrics = 'dev_results.csv' files_to_join = ['./dev_predictions_metrics/metrics_1.csv'] tags = ['tl1'] limit = 1000 !mkdir -p $json_save_path !mkdir -p $csv_save_path !mkdir -p $gt_label_path !mkdir -p $metrics_save_path json_save_path ###Output _____no_output_____ ###Markdown Get MTab created json file ###Code ls = glob.glob(input_file_path + "/*.csv") for file in ls: json_name = json_save_path + file[len(input_file_path):len(file)-4] + '.json' !curl -X POST -F file=@$file https://mtab.app/api/v1/mtab?limit=$limit -o $json_name ###Output % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 100 228k 100 212k 100 16493 1644 124 0:02:13 0:02:12 0:00:01 4875016493 0 0 100 16493 0 4586 0:00:03 0:00:03 --:--:-- 4586 0 312 0:00:52 0:00:52 --:--:-- 09 --:--:-- 01:07 0:01:06 0:00:01 0 0 149 0:01:50 0:01:49 0:00:01 0 % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 100 24538 100 22269 100 2269 1980 201 0:00:11 0:00:11 --:--:-- 4665 % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 100 30358 100 27133 100 3225 2983 354 0:00:09 0:00:09 --:--:-- 7489:08 --:--:-- 06 3509 % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 100 65846 100 60816 100 5030 1296 107 0:00:47 0:00:46 0:00:01 1839000:34 0:00:34 --:--:-- 000:40 0:00:40 --:--:-- 0 % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 100 67798 100 62090 100 5708 1720 158 0:00:36 0:00:36 --:--:-- 17770 0 0 100 5708 0 495 0:00:11 0:00:11 --:--:-- 0 159 0:00:35 0:00:35 --:--:-- 0 % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 100 21034 100 19506 100 1528 2470 193 0:00:07 0:00:07 --:--:-- 4400 % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 100 203k 100 194k 100 9394 1190 56 0:02:47 0:02:47 --:--:-- 523370 9394 0 77 0:02:02 0:02:01 0:00:01 02:22 0:02:20 0:00:02 0 0 58 0:02:41 0:02:41 --:--:-- 0 % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 100 285k 100 256k 100 29829 2176 247 0:02:00 0:02:00 --:--:-- 76453:01 0:01:01 --:--:-- 029 0 474 0:01:02 0:01:02 --:--:-- 01:16 --:--:-- 0 271 0:01:50 0:01:49 0:00:01 0 0:01:59 0:00:01 0 % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 100 75682 100 68244 100 7438 734 80 0:01:32 0:01:32 --:--:-- 16821 269 0:00:27 0:00:27 --:--:-- 0 0 178 0:00:41 0:00:41 --:--:-- 02 0:01:31 0:00:01 0 % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 100 29102 100 27017 100 2085 2512 193 0:00:10 0:00:10 --:--:-- 8201 ###Markdown Transform json to table-linker format csv ###Code ls = glob.glob( json_save_path+ "/*.json") for file in ls: if os.path.getsize(file) == 0: continue f = open(file) data = json.load(f) # get data and input from json file to arrays target = data['tables'][0]['semantic']['cea'] inp = data['tables'][0]['table_cells'] #load the data we want annotation = [] desc= [] kg_id = [] kg_label = [] wikipedia = [] target_ls = [] tar = [] col = [] for i in range(0, len(target)): if 'dbpedia' in target[i]['annotation']: annotation.append(target[i]['annotation']['dbpedia'][target[i]['annotation']['dbpedia'].find('resource/')+9:]) else: annotation.append('') desc.append(target[i]['annotation']['desc']) kg_label.append(target[i]['annotation']['label']) kg_id.append(target[i]['annotation']['wikidata'][target[i]['annotation']['wikidata'].find('/entity/')+8:]) if 'wikipedia' in target[i]['annotation']: wikipedia.append(target[i]['annotation']['wikipedia'][target[i]['annotation']['wikipedia'].find('/wiki/')+6:]) else: wikipedia.append('') tar.append(target[i]['target']) target_ls.append(inp[target[i]['target'][0]][target[i]['target'][1]]) col = [] row= [] for i in tar: row.append(i[0]-1) col.append(i[1]) import pandas as pd df = pd.DataFrame(list(zip(col, row, target_ls, kg_id, kg_label, desc, annotation,wikipedia)), columns =['column', 'row', 'label', 'kg_id', 'kg_labels','kg_descriptions', 'dbpedia', 'wikipedia']) df.to_csv(csv_save_path + file[len(json_save_path):len(file)-5] +'.csv', index = False) ###Output _____no_output_____ ###Markdown Label the results with ground truth ###Code ls = glob.glob(csv_save_path+ "/*.csv") for file in ls: if os.path.getsize(file) == 0: continue gt_file = gt_file_path + file[len(csv_save_path):] output_path = gt_label_path + file[len(csv_save_path):] !tl ground-truth-labeler -f $gt_file < $file > $output_path ###Output ground-truth-labeler Time: 0.012091875076293945s ground-truth-labeler Time: 0.010023117065429688s ground-truth-labeler Time: 0.01080179214477539s ground-truth-labeler Time: 0.011776924133300781s ground-truth-labeler Time: 0.010269880294799805s ground-truth-labeler Time: 0.011457204818725586s ground-truth-labeler Time: 0.011809825897216797s ground-truth-labeler Time: 0.01980900764465332s ground-truth-labeler Time: 0.011887073516845703s ground-truth-labeler Time: 0.01222085952758789s ###Markdown Get Metrics and output to file ###Code ls = glob.glob( gt_label_path+ "/*.csv") df_list = [] for file in ls: if os.path.getsize(file) == 0: continue output_path = metrics_save_path + file[len(gt_label_path):] out = file[len(gt_label_path)+1:] !tl metrics $file -k 1 -c "evaluation_label" --tag $out > $output_path if os.path.getsize(output_path) > 1: df = pd.read_csv(output_path) df_list.append(df) #pd.concat(df_list).to_csv(total_metrics, index = False) ###Output metrics Time: 0.3990919589996338s metrics Time: 0.11082911491394043s metrics Time: 0.12528109550476074s metrics Time: 0.3603677749633789s metrics Time: 0.09476995468139648s metrics Time: 0.37699031829833984s metrics Time: 0.4293069839477539s metrics Time: 1.0290112495422363s metrics Time: 0.03458094596862793s metrics Time: 0.34078001976013184s ###Markdown Join with other files ###Code df = pd.concat(df_list) df.columns =['mtab_k', 'mtab_f1','mtab_precision', 'mtab_recall', 'tag'] df = df.reset_index(drop = True) for index in range (0, len(files_to_join)): file = files_to_join[index] df.insert(0, tags[index] + '_precision', '') df.insert(0, tags[index] + '_recall', '') df.insert(0, tags[index] + '_f1', '') df.insert(0, tags[index] + '_k', '') new_df = pd.read_csv(file) for i in range(0, len(df)): for j in range(0, len(new_df)): if df.tag[i] == new_df.tag[j]: df[tags[index] + '_k'][i] = new_df['k'][j] df[tags[index] + '_f1'][i] = new_df['f1'][j] df[tags[index] + '_precision'][i] = new_df['precision'][j] df[tags[index] + '_recall'][i] = new_df['recall'][j] continue df.to_csv(total_metrics, index = False) df ###Output _____no_output_____
Oanda v20 REST-oandapyV20/05.00 Trade Management.ipynb
###Markdown Trade Management [OANDA REST-V20 API Wrapper Doc on Trade](http://oanda-api-v20.readthedocs.io/en/latest/endpoints/trades/opentrades.html)[OANDA API Getting Started](http://developer.oanda.com/rest-live-v20/introduction/)[OANDA API Trade](http://developer.oanda.com/rest-live-v20/trades-ep/) ###Code import pandas as pd import oandapyV20 import oandapyV20.endpoints.trades as trades import configparser config = configparser.ConfigParser() config.read('../config/config_v20.ini') accountID = config['oanda']['account_id'] access_token = config['oanda']['api_key'] ###Output _____no_output_____ ###Markdown Get a list of trades for an Account. ###Code client = oandapyV20.API(access_token=access_token) params ={ "instrument": "DE30_EUR,EUR_USD" } r = trades.TradesList(accountID=accountID, params=params) client.request(r) print(r.response) ###Output {'lastTransactionID': '63', 'trades': [{'state': 'OPEN', 'currentUnits': '100', 'realizedPL': '0.0000', 'price': '0.71532', 'openTime': '2017-01-20T15:59:08.362429413Z', 'unrealizedPL': '1.4736', 'initialUnits': '100', 'instrument': 'NZD_USD', 'id': '35', 'financing': '0.0090'}, {'state': 'OPEN', 'currentUnits': '100', 'realizedPL': '0.0000', 'price': '0.75473', 'openTime': '2017-01-20T15:58:58.618457963Z', 'unrealizedPL': '-0.0458', 'initialUnits': '100', 'instrument': 'AUD_USD', 'id': '33', 'financing': '0.0078'}, {'state': 'OPEN', 'currentUnits': '100', 'realizedPL': '0.0000', 'price': '0.75489', 'openTime': '2017-01-20T15:58:23.903964257Z', 'unrealizedPL': '-0.0688', 'initialUnits': '100', 'instrument': 'AUD_USD', 'id': '31', 'financing': '0.0078'}]} ###Markdown Get the list of open Trades for an Account. ###Code r = trades.OpenTrades(accountID) client.request(r) r.response trade_id = r.response['trades'][0]['id'] ###Output _____no_output_____ ###Markdown Get the details of a specific Trade in an Account. ###Code r = trades.TradeDetails(accountID, tradeID=trade_id) client.request(r) print(r.response) ###Output {'lastTransactionID': '63', 'trade': {'state': 'OPEN', 'currentUnits': '100', 'realizedPL': '0.0000', 'price': '0.71532', 'openTime': '2017-01-20T15:59:08.362429413Z', 'unrealizedPL': '1.4736', 'initialUnits': '100', 'instrument': 'NZD_USD', 'id': '35', 'financing': '0.0090'}} ###Markdown Close (partially or fully) a specific open Trade in an Account. ###Code data = { "units": 100 } r = trades.TradeClose(accountID, tradeID=trade_id) client.request(r) ###Output _____no_output_____
notebooks/.ipynb_checkpoints/fgsm-heuristic-checkpoint.ipynb
###Markdown Load Data ###Code word2vec = Word2Vec.load('/home/david/projects/university/astnn/data/train/embedding/node_w2v_128').wv vocab = word2vec.vocab ast_data = pd.read_pickle(root+'test/test_.pkl') block_data = pd.read_pickle(root+'test/blocks.pkl') ###Output _____no_output_____ ###Markdown Allowed var names ###Code leaf_embed = nn.Sequential( model._modules['encoder']._modules['embedding'], model._modules['encoder']._modules['W_c'] ) # words we wont allow as variable names reserved_words = [ 'auto', 'break', 'case', 'char', 'const', 'continue', 'default', 'do', 'int', 'long', 'register', 'return', 'short', 'sizeof', 'static', 'struct', 'switch', 'typedef', 'union', 'unsigned', 'void', 'volatile', 'while', 'double', 'else', 'enum', 'extern', 'float', 'for', 'goto', 'if', 'printf', 'scanf', 'cos', 'malloc' ] def allowed_variable(var): pattern = re.compile("([a-z]|[A-Z]|_)+([a-z]|[A-Z]|[0-9]|_)*$") if (var not in reserved_words) and pattern.match(var): return True else: return False allowed_variable('scanf') embedding_map = {} for index in range(len(vocab)): if allowed_variable(word2vec.index2word[index]): embedding_map[index] = leaf_embed(torch.tensor(index)).detach().numpy() ###Output _____no_output_____ ###Markdown Var replace functions ###Code def replace_index(node, old_i, new_i): i = node[0] if i == old_i: result = [new_i] else: result = [i] children = node[1:] for child in children: result.append(replace_index(child, old_i, new_i)) return result def replace_var(x, old_i, new_i): mod_blocks = [] for block in x: mod_blocks.append(replace_index(block, old_i, new_i)) return mod_blocks ###Output _____no_output_____ ###Markdown Closest Var functions ###Code def l2_norm(a, b): return np.linalg.norm(a-b) def cos_sim(a, b): return np.inner(a, b) / (np.linalg.norm(a) * np.linalg.norm(b)) def closest_index(embedding, embedding_map, metric): embedding = embedding.detach().numpy() closest_i = list(embedding_map.keys())[0] closest_dist = metric(embedding_map[closest_i], embedding) for i, e in embedding_map.items(): d = metric(embedding_map[i], embedding) if d < closest_dist: closest_dist = d closest_i = i return closest_i def normalize(v): norm = np.linalg.norm(v) if norm == 0: return v return v / norm ###Output _____no_output_____ ###Markdown Grad locating functions ###Code def get_embedding(indices, node_list): ''' get the embeddings at the index positions in postorder traversal. ''' res = [] c = 0 for i in range(node_list.size(0)): if not np.all(node_list[i].detach().numpy() == 0): if c in indices: res.append(node_list[i]) c += 1 return res def post_order_loc(node, var, res, counter): ''' ''' index = node[0] children = node[1:] for child in children: res, counter = post_order_loc(child, var, res, counter) if var == index and (not children): res.append(counter) # print(counter, word2vec.index2word[index]) counter += 1 return res, counter def get_grad(x, var_index, node_list): grads = [] for i, block in enumerate(x): indices, _ = post_order_loc(block, var_index, [], 0) grads += get_embedding(indices, node_list.grad[:, i, :]) try: node_embedding = get_embedding(indices, node_list[:, i, :])[0] except: pass if len(grads) < 1: return None, None grad = torch.stack(grads).sum(dim=0) return grad, node_embedding ###Output _____no_output_____ ###Markdown Var name finder ###Code class declarationFinder(c_ast.NodeVisitor): def __init__(self): self.names = set() def visit_Decl(self, node): if type(node.type) in [TypeDecl, ArrayDecl] : self.names.add(node.name) def get_var_names(ast): declaration_finder = declarationFinder() declaration_finder.visit(ast) return declaration_finder.names # get_var_names(x) ###Output _____no_output_____ ###Markdown FGSMwith vars ordered and early exit ###Code # def gradient_method(x, n_list, var, epsilon, metric): # orig_index = vocab[var].index if var in vocab else MAX_TOKEN # grad, node_embedding = get_grad(x, orig_index, n_list) # if grad is None: # # print("no leaf occurences") # return None # v = node_embedding.detach().numpy() # g = torch.sign(grad).detach().numpy() # v = v + epsilon * g # # get the closest emebedding from our map # i = closest_index(v, sampled_embedding_map, metric) # # print("orig name:", word2vec.index2word[orig_index], "; new name:", word2vec.index2word[i]) # if i != orig_index: # return replace_var(x, orig_index, i) # else: # return x MAX_TOKEN = word2vec.vectors.shape[0] import time import datetime success_stats = [] def evaluate(epsilon, limit = None, sort_vars = True): ast_count = 0 var_count = 0 ast_total = 0 var_total = 0 start = time.time() for code_id in block_data['id'].tolist(): # print(code_id) x, ast = block_data['code'][code_id], ast_data['code'][code_id] _, orig_pred = torch.max(model([x]).data, 1) orig_pred = orig_pred.item() # get the grad loss_function = torch.nn.CrossEntropyLoss() labels = torch.LongTensor([orig_pred]) output = model([x]) loss = loss_function(output, Variable(labels)) loss.backward() n_list = model._modules['encoder'].node_list var_names = get_var_names(ast) success = False var_weighted = [] for var in list(var_names): orig_index = vocab[var].index if var in vocab else MAX_TOKEN grad, node_embedding = get_grad(x, orig_index, n_list) if grad is not None: # l1 h = abs((grad @ torch.sign(grad)).item()) # l2 var_weighted.append( (h, grad, node_embedding) ) if sort_vars: var_weighted = sorted(var_weighted, key=lambda x: x[0], reverse = True) var_count=0 depth = 2 var_index = 0 for h, grad, node_embedding in var_weighted: v = node_embedding g = torch.sign(grad) v = v + epsilon * g # get the closest emebedding from our map i = closest_index(v, sampled_embedding_map, l2_norm) if i != orig_index: new_x_l2 = replace_var(x, orig_index, i) else: new_x_l2 = x if new_x_l2: o = model([new_x_l2]) _, predicted_l2 = torch.max(o.data, 1) # print(orig_pred, predicted_l2.item()) var_total += 1 if orig_pred != predicted_l2.item(): var_count += 1 success = True success_stats.append((False, h, var_count)) else: success_stats.append((True, h, var_count)) var_count += 1 if success: ast_count += 1 ast_total += 1 if ast_total % 500 == 499: eval_time = time.time() - start eval_time = datetime.timedelta(seconds=eval_time) print(ast_total, ";", eval_time, ";", ast_count / ast_total, ";", var_count / var_total) if limit and limit < ast_total: break return (ast_count / ast_total, var_count / var_total) # sample_rate = 0.2 # sample_count = int(len(embedding_map) * sample_rate) # sampled_embedding_map = {key: embedding_map[key] for key in random.sample(embedding_map.keys(), sample_count)} sampled_embedding_map = embedding_map success_stats = [] evaluate(40) success_df = pd.DataFrame(success_stats, columns=["robustness", "heuristic", "position"]) success_df success_df.hist("heuristic") # success_df.hist("heuristic", by="success", sharex=True) success_df.groupby(pd.cut(success_df["heuristic"], bins=10)).count() filtered = success_df[success_df["heuristic"]<10] binned = filtered.groupby(pd.cut(filtered["heuristic"], bins=10)) print(binned.count()) binned.mean().plot.line("heuristic", "robustness", title="Robustness by Heuristic") filtered = success_df[success_df["heuristic"]<20] binned = filtered.groupby(pd.cut(filtered["heuristic"], bins=20)) print(binned.count()) binned.mean().plot.line("heuristic", "robustness", title="Robustness by Heuristic") filtered = success_df[success_df["heuristic"]<30] binned = filtered.groupby(pd.cut(filtered["heuristic"], bins=30)) print(binned.count()) binned.mean().plot.line("heuristic", "robustness", title="Robustness by Heuristic") filtered = success_df[success_df["heuristic"]<100] binned = filtered.groupby(pd.cut(filtered["heuristic"], bins=1000)) print(binned.count()) binned.mean().plot.line("heuristic", "robustness", title="Robustness by Heuristic") binned.count() success_df.groupby(success_df["position"]).mean() pd.cut(success_df["heuristic"], bins=10) ###Output _____no_output_____
paper_experiments_work_log/egocom_dataset_creation/manual_trimming_and_aligning_start_time/shrink_video.ipynb
###Markdown This script was used to produce the 720p, 480p, and 240p versions of EGOCOM ###Code from __future__ import print_function, absolute_import, division, unicode_literals, with_statement # Python 2 compatibility import os import subprocess data_loc = "/home/cgn/Downloads/egocom-aligned-final/" write_loc = "/home/cgn/Downloads/egocom-aligned-final-720p/" for fn in sorted([v for v in os.listdir(data_loc) if v[-4:] == ".MP4"]): print(fn) i = data_loc + fn o = write_loc + fn cmd = "ffmpeg -i {i} -s 1280x720 -aspect 1280:720 -vcodec libx264 -crf 20 -threads 12 -crf 20 {o}".format(i=i, o=o) out = subprocess.getoutput(cmd) data_loc = "/home/cgn/Downloads/egocom-aligned-final/" write_loc = "/home/cgn/Downloads/egocom-aligned-final-480p/" for fn in sorted([v for v in os.listdir(data_loc) if v[-4:] == ".MP4"]): print(fn) i = data_loc + fn o = write_loc + fn cmd = "ffmpeg -i {i} -s 640x480 -aspect 640:480 -vcodec libx264 -crf 20 -threads 12 -crf 20 {o}".format(i=i, o=o) out = subprocess.getoutput(cmd) data_loc = "/home/cgn/Downloads/egocom-aligned-final/" write_loc = "/home/cgn/Downloads/egocom-aligned-final-240p/" for fn in sorted([v for v in os.listdir(data_loc) if v[-4:] == ".MP4"]): print(fn) i = data_loc + fn o = write_loc + fn cmd = "ffmpeg -i {i} -s 352x240 -aspect 352:240 -vcodec libx264 -crf 20 -threads 12 -crf 20 {o}".format(i=i, o=o) out = subprocess.getoutput(cmd) ###Output _____no_output_____
notebooks/chapter_08/05 - Introduction to GANs.ipynb
###Markdown About GANsSince GANs are quite advanced, diving deeply into the technical details would be out of scope for us. This specific implementation will be a **deep convolutional GAN, or DCGAN**: a GAN where the generator and discriminator are deep convnets.In particular, it leverages a `Conv2DTranspose` layer for image upsampling in the generator.We will train our GAN on images from **CIFAR10**, a dataset of 50,000 32x32 RGB images belong to 10 classes (5,000 images per class). To make things even easier, we will only use images belonging to the class "frog".Schematically, our GAN looks like this:- A generator network maps vectors of shape (latent_dim,) to images of shape (32, 32, 3).- A discriminator network maps images of shape (32, 32, 3) to a binary score estimating the probability that the image is real.- A gan network chains the generator and the discriminator together: `gan(x) = discriminator(generator(x))`. Thus this gan network maps latent space vectors to the discriminator's assessment of the realism of these latent vectors as decoded by the generator.- We train the discriminator using examples of real and fake images along with "real"/"fake" labels, as we would train any regular image classification model.- To train the generator, we use the gradients of the generator's weights with regard to the loss of the gan model. This means that, at every step, we move the weights of the generator in a direction that will make the discriminator more likely to classify as "real" the images decoded by the generator. I.e. we train the generator to fool the discriminator. A bag of tricksHere are a few of the tricks that we leverage in our own implementation of a GAN generator and discriminator below.- We use `tanh` as the last activation in the generator, instead of `sigmoid`, which would be more commonly found in other types of models.- We sample points from the latent space using a normal distribution (Gaussian distribution), not a uniform distribution.- Stochasticity is good to induce robustness. Since GAN training results in a dynamic equilibrium, GANs are likely to get "stuck" in all sorts of ways. Introducing randomness during training helps prevent this. We introduce randomness in two ways: 1. we use dropout in the discriminator; 2. we add some random noise to the labels for the discriminator.- Sparse gradients can hinder GAN training. In deep learning, sparsity is often a desirable property, but not in GANs. There are two things that can induce gradient sparsity: 1. max pooling operations; 2. ReLU activations. Instead of max pooling, we recommend using strided convolutions for downsampling, and we recommend using a LeakyReLU layer instead of a ReLU activation. It is similar to ReLU but it relaxes sparsity constraints by allowing small negative activation values.- In generated images, it is common to see "checkerboard artifacts" caused by unequal coverage of the pixel space in the generator. To fix this, we use a kernel size that is divisible by the stride size, whenever we use a strided Conv2DTranpose or Conv2D in both the generator and discriminator. The generatorFirst, we develop a `generator` model, which turns a vector (from the latent space -- during training it will sampled at random) into a candidate image. One of the many issues that commonly arise with GANs is that the generator gets stuck with generated images that look like noise. A possible solution is to use dropout on both the discriminator and generator. ###Code import tensorflow as tf import numpy as np from tensorflow.keras import layers latent_dim = 32 height = 32 width = 32 channels = 3 generator_input = tf.keras.Input(shape=(latent_dim,)) # First, transform the input into a 16x16 128-channels feature map x = layers.Dense(128 * 16 * 16)(generator_input) x = layers.LeakyReLU()(x) x = layers.Reshape((16, 16, 128))(x) # Then, add a convolution layer x = layers.Conv2D(256, 5, padding='same')(x) x = layers.LeakyReLU()(x) # Upsample to 32x32 x = layers.Conv2DTranspose(256, 4, strides=2, padding='same')(x) x = layers.LeakyReLU()(x) # Few more conv layers x = layers.Conv2D(256, 5, padding='same')(x) x = layers.LeakyReLU()(x) x = layers.Conv2D(256, 5, padding='same')(x) x = layers.LeakyReLU()(x) # Produce a 32x32 1-channel feature map x = layers.Conv2D(channels, 7, activation='tanh', padding='same')(x) generator = tf.keras.models.Model(generator_input, x) generator.summary() ###Output Model: "model" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= input_1 (InputLayer) [(None, 32)] 0 _________________________________________________________________ dense (Dense) (None, 32768) 1081344 _________________________________________________________________ leaky_re_lu (LeakyReLU) (None, 32768) 0 _________________________________________________________________ reshape (Reshape) (None, 16, 16, 128) 0 _________________________________________________________________ conv2d (Conv2D) (None, 16, 16, 256) 819456 _________________________________________________________________ leaky_re_lu_1 (LeakyReLU) (None, 16, 16, 256) 0 _________________________________________________________________ conv2d_transpose (Conv2DTran (None, 32, 32, 256) 1048832 _________________________________________________________________ leaky_re_lu_2 (LeakyReLU) (None, 32, 32, 256) 0 _________________________________________________________________ conv2d_1 (Conv2D) (None, 32, 32, 256) 1638656 _________________________________________________________________ leaky_re_lu_3 (LeakyReLU) (None, 32, 32, 256) 0 _________________________________________________________________ conv2d_2 (Conv2D) (None, 32, 32, 256) 1638656 _________________________________________________________________ leaky_re_lu_4 (LeakyReLU) (None, 32, 32, 256) 0 _________________________________________________________________ conv2d_3 (Conv2D) (None, 32, 32, 3) 37635 ================================================================= Total params: 6,264,579 Trainable params: 6,264,579 Non-trainable params: 0 _________________________________________________________________ ###Markdown The discriminatorThen, we develop a `discriminator` model, that takes as input a candidate image (real or synthetic) and classifies it into one of two classes, either "generated image" or "real image that comes from the training set". ###Code discriminator_input = layers.Input(shape=(height, width, channels)) x = layers.Conv2D(128, 3)(discriminator_input) x = layers.LeakyReLU()(x) x = layers.Conv2D(128, 4, strides=2)(x) x = layers.LeakyReLU()(x) x = layers.Conv2D(128, 4, strides=2)(x) x = layers.LeakyReLU()(x) x = layers.Conv2D(128, 4, strides=2)(x) x = layers.LeakyReLU()(x) x = layers.Flatten()(x) # One dropout layer - important trick! x = layers.Dropout(0.4)(x) # Classification layer x = layers.Dense(1, activation='sigmoid')(x) discriminator = tf.keras.models.Model(discriminator_input, x) discriminator.summary() # To stabilize training, we use learning rate decay # and gradient clipping (by value) in the optimizer. discriminator_optimizer = tf.keras.optimizers.RMSprop(lr=0.0008, clipvalue=1.0, decay=1e-8) discriminator.compile(optimizer=discriminator_optimizer, loss='binary_crossentropy') ###Output Model: "model_1" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= input_2 (InputLayer) [(None, 32, 32, 3)] 0 _________________________________________________________________ conv2d_4 (Conv2D) (None, 30, 30, 128) 3584 _________________________________________________________________ leaky_re_lu_5 (LeakyReLU) (None, 30, 30, 128) 0 _________________________________________________________________ conv2d_5 (Conv2D) (None, 14, 14, 128) 262272 _________________________________________________________________ leaky_re_lu_6 (LeakyReLU) (None, 14, 14, 128) 0 _________________________________________________________________ conv2d_6 (Conv2D) (None, 6, 6, 128) 262272 _________________________________________________________________ leaky_re_lu_7 (LeakyReLU) (None, 6, 6, 128) 0 _________________________________________________________________ conv2d_7 (Conv2D) (None, 2, 2, 128) 262272 _________________________________________________________________ leaky_re_lu_8 (LeakyReLU) (None, 2, 2, 128) 0 _________________________________________________________________ flatten (Flatten) (None, 512) 0 _________________________________________________________________ dropout (Dropout) (None, 512) 0 _________________________________________________________________ dense_1 (Dense) (None, 1) 513 ================================================================= Total params: 790,913 Trainable params: 790,913 Non-trainable params: 0 _________________________________________________________________ ###Markdown The adversarial networkFinally, we setup the GAN, which chains the generator and the discriminator. This is the model that, when trained, will move the generator in a direction that improves its ability to fool the discriminator. This model turns latent space points into a classification decision, "fake" or "real", and it is meant to be trained with labels that are always "these are real images". So training `gan` will updates the weights of `generator` in a way that makes `discriminator` more likely to predict "real" when looking at fake images. Very importantly, we set the discriminator to be frozen during training (non-trainable): its weights will not be updated when training `gan`. If the discriminator weights could be updated during this process, then we would be training the discriminator to always predict "real", which is not what we want! ###Code # Set discriminator weights to non-trainable # (will only apply to the `gan` model) discriminator.trainable = False gan_input = tf.keras.Input(shape=(latent_dim,)) gan_output = discriminator(generator(gan_input)) gan = tf.keras.models.Model(gan_input, gan_output) gan_optimizer = tf.keras.optimizers.RMSprop(lr=0.0004, clipvalue=1.0, decay=1e-8) gan.compile(optimizer=gan_optimizer, loss='binary_crossentropy') ###Output _____no_output_____ ###Markdown Train the GANNow we can start training. To recapitulate, this is schematically what the training loop looks like:for each epoch:- Draw random points in the latent space (random noise).- Generate images with `generator` using this random noise.- Mix the generated images with real ones.- Train `discriminator` using these mixed images, with corresponding targets, either "real" (for the real images) or "fake" (for the generated images).- Draw new random points in the latent space.- Train `gan` using these random vectors, with targets that all say "these are real images". This will update the weights of the generator (only, since discriminator is frozen inside `gan`) to move them towards getting the discriminator to predict "these are real images" for generated images, i.e. this trains the generator to fool the discriminator. ###Code import os from tensorflow.keras.preprocessing import image # Load CIFAR10 data (x_train, y_train), (_, _) = tf.keras.datasets.cifar10.load_data() # Select frog images (class 6) x_train = x_train[y_train.flatten() == 6] # Normalize data x_train = x_train.reshape( (x_train.shape[0],) + (height, width, channels)).astype('float32') / 255. iterations = 10000 batch_size = 20 save_dir = './results/gan' # Start training loop start = 0 for step in range(iterations): # Sample random points in the latent space random_latent_vectors = np.random.normal(size=(batch_size, latent_dim)) # Decode them to fake images generated_images = generator.predict(random_latent_vectors) # Combine them with real images stop = start + batch_size real_images = x_train[start: stop] combined_images = np.concatenate([generated_images, real_images]) # Assemble labels discriminating real from fake images labels = np.concatenate([np.ones((batch_size, 1)), np.zeros((batch_size, 1))]) # Add random noise to the labels - important trick! labels += 0.05 * np.random.random(labels.shape) # Train the discriminator d_loss = discriminator.train_on_batch(combined_images, labels) # sample random points in the latent space random_latent_vectors = np.random.normal(size=(batch_size, latent_dim)) # Assemble labels that say "all real images" misleading_targets = np.zeros((batch_size, 1)) # Train the generator (via the gan model, # where the discriminator weights are frozen) a_loss = gan.train_on_batch(random_latent_vectors, misleading_targets) start += batch_size if start > len(x_train) - batch_size: start = 0 # Occasionally save / plot if step % 100 == 0: # Save model weights gan.save_weights('./results/gan/gan.h5') # Print metrics print('discriminator loss at step %s: %s' % (step, d_loss)) print('adversarial loss at step %s: %s' % (step, a_loss)) # Save one generated image img = image.array_to_img(generated_images[0] * 255., scale=False) img.save(os.path.join(save_dir, 'generated_frog' + str(step) + '.png')) # Save one real image, for comparison img = image.array_to_img(real_images[0] * 255., scale=False) img.save(os.path.join(save_dir, 'real_frog' + str(step) + '.png')) ###Output discriminator loss at step 0: 0.6778590083122253 adversarial loss at step 0: 0.6921756267547607 discriminator loss at step 100: 0.6909359097480774 adversarial loss at step 100: 0.7232593297958374 discriminator loss at step 200: 0.7348566055297852 adversarial loss at step 200: 0.7360358834266663 discriminator loss at step 300: 0.6915746927261353 adversarial loss at step 300: 0.7531574368476868 discriminator loss at step 400: 0.6916146874427795 adversarial loss at step 400: 0.7307637929916382 discriminator loss at step 500: 0.6944142580032349 adversarial loss at step 500: 0.7515280842781067 discriminator loss at step 600: 0.7076152563095093 adversarial loss at step 600: 0.7389554381370544 discriminator loss at step 700: 0.6973960995674133 adversarial loss at step 700: 0.7396218180656433 discriminator loss at step 800: 0.7043560743331909 adversarial loss at step 800: 0.7644603848457336 discriminator loss at step 900: 0.6983610391616821 adversarial loss at step 900: 0.7599353790283203 discriminator loss at step 1000: 0.6604564785957336 adversarial loss at step 1000: 1.0948461294174194 discriminator loss at step 1100: 0.6915992498397827 adversarial loss at step 1100: 0.7560330033302307 discriminator loss at step 1200: 0.7402322292327881 adversarial loss at step 1200: 0.781981348991394 discriminator loss at step 1300: 0.6984463930130005 adversarial loss at step 1300: 0.8005400896072388 discriminator loss at step 1400: 0.6942706108093262 adversarial loss at step 1400: 0.7470855712890625 discriminator loss at step 1500: 0.6934496164321899 adversarial loss at step 1500: 0.7374321818351746 discriminator loss at step 1600: 0.7100178599357605 adversarial loss at step 1600: 0.7399117350578308 discriminator loss at step 1700: 0.6666063070297241 adversarial loss at step 1700: 0.8585618734359741 discriminator loss at step 1800: 0.7241682410240173 adversarial loss at step 1800: 0.7761653065681458 discriminator loss at step 1900: 0.6823721528053284 adversarial loss at step 1900: 0.7394989728927612 discriminator loss at step 2000: 0.6899019479751587 adversarial loss at step 2000: 0.760367751121521 discriminator loss at step 2100: 0.7057534456253052 adversarial loss at step 2100: 0.7540478110313416 discriminator loss at step 2200: 0.7046432495117188 adversarial loss at step 2200: 0.7378937602043152 discriminator loss at step 2300: 0.6872519254684448 adversarial loss at step 2300: 0.7519232034683228 discriminator loss at step 2400: 0.6937800049781799 adversarial loss at step 2400: 0.7615062594413757 discriminator loss at step 2500: 0.6933615803718567 adversarial loss at step 2500: 0.600437581539154 discriminator loss at step 2600: 0.6794348955154419 adversarial loss at step 2600: 0.7602338790893555 discriminator loss at step 2700: 0.6938766837120056 adversarial loss at step 2700: 0.7341712713241577 discriminator loss at step 2800: 0.6820650100708008 adversarial loss at step 2800: 0.7613245248794556 discriminator loss at step 2900: 0.6948167085647583 adversarial loss at step 2900: 0.764464259147644 discriminator loss at step 3000: 0.7136386632919312 adversarial loss at step 3000: 0.7398979663848877 discriminator loss at step 3100: 0.7916512489318848 adversarial loss at step 3100: 0.7699298858642578 discriminator loss at step 3200: 0.6865180134773254 adversarial loss at step 3200: 0.7760629653930664 discriminator loss at step 3300: 0.6743766069412231 adversarial loss at step 3300: 0.6999596953392029 discriminator loss at step 3400: 0.6923761367797852 adversarial loss at step 3400: 0.8001443147659302 discriminator loss at step 3500: 0.7045294046401978 adversarial loss at step 3500: 0.7855002880096436 discriminator loss at step 3600: 0.684561550617218 adversarial loss at step 3600: 0.5478525161743164 discriminator loss at step 3700: 0.7120552062988281 adversarial loss at step 3700: 0.7256332635879517 discriminator loss at step 3800: 0.692017674446106 adversarial loss at step 3800: 0.7316322326660156 discriminator loss at step 3900: 0.7161163091659546 adversarial loss at step 3900: 0.7588232755661011 discriminator loss at step 4000: 0.6953740119934082 adversarial loss at step 4000: 0.7575713992118835 discriminator loss at step 4100: 0.7161322832107544 adversarial loss at step 4100: 0.7404390573501587 discriminator loss at step 4200: 0.6931121349334717 adversarial loss at step 4200: 0.8234656453132629 discriminator loss at step 4300: 0.6933884620666504 adversarial loss at step 4300: 0.7672902345657349 discriminator loss at step 4400: 0.6870468258857727 adversarial loss at step 4400: 0.8066245317459106 discriminator loss at step 4500: 0.7038675546646118 adversarial loss at step 4500: 0.7506153583526611 discriminator loss at step 4600: 0.7378366589546204 adversarial loss at step 4600: 0.793296217918396 discriminator loss at step 4700: 0.704311192035675 adversarial loss at step 4700: 0.6991087198257446 discriminator loss at step 4800: 0.6901472210884094 adversarial loss at step 4800: 0.7494764924049377 discriminator loss at step 4900: 0.7062457799911499 adversarial loss at step 4900: 0.8139276504516602 discriminator loss at step 5000: 0.6785241365432739 adversarial loss at step 5000: 0.7340184450149536 discriminator loss at step 5100: 0.6995220184326172 adversarial loss at step 5100: 0.8298379778862 discriminator loss at step 5200: 0.6959754824638367 adversarial loss at step 5200: 0.706729531288147 discriminator loss at step 5300: 0.682555079460144 adversarial loss at step 5300: 1.4900400638580322 discriminator loss at step 5400: 0.693299412727356 adversarial loss at step 5400: 0.819563090801239 discriminator loss at step 5500: 0.6903590559959412 adversarial loss at step 5500: 0.8526564836502075 discriminator loss at step 5600: 0.7191124558448792 adversarial loss at step 5600: 0.7900956869125366 discriminator loss at step 5700: 0.6815227270126343 adversarial loss at step 5700: 0.7497392892837524 discriminator loss at step 5800: 0.6751655340194702 adversarial loss at step 5800: 0.7598960995674133 discriminator loss at step 5900: 0.6923342943191528 adversarial loss at step 5900: 0.8122521638870239 discriminator loss at step 6000: 0.6919266581535339 adversarial loss at step 6000: 0.7355332374572754 discriminator loss at step 6100: 0.7285688519477844 adversarial loss at step 6100: 0.8961042165756226 discriminator loss at step 6200: 0.6937045454978943 adversarial loss at step 6200: 0.8102605938911438 discriminator loss at step 6300: 0.6830167174339294 adversarial loss at step 6300: 0.7191219925880432 discriminator loss at step 6400: 0.6995924711227417 adversarial loss at step 6400: 0.8283789753913879 discriminator loss at step 6500: 0.678104043006897 adversarial loss at step 6500: 0.7392969131469727 discriminator loss at step 6600: 0.6722562909126282 adversarial loss at step 6600: 0.747785747051239 discriminator loss at step 6700: 0.7277047038078308 adversarial loss at step 6700: 0.8066657185554504 discriminator loss at step 6800: 0.6564754247665405 adversarial loss at step 6800: 0.7770302891731262 discriminator loss at step 6900: 0.6471182107925415 adversarial loss at step 6900: 0.7265607714653015 discriminator loss at step 7000: 0.6762427687644958 adversarial loss at step 7000: 1.0510435104370117 discriminator loss at step 7100: 0.6755245327949524 adversarial loss at step 7100: 0.8556197285652161 discriminator loss at step 7200: 0.6616963148117065 adversarial loss at step 7200: 1.1035858392715454 discriminator loss at step 7300: 0.779595673084259 adversarial loss at step 7300: 0.8863322138786316 discriminator loss at step 7400: 0.7418298125267029 adversarial loss at step 7400: 0.8954163789749146 discriminator loss at step 7500: 0.6821805834770203 adversarial loss at step 7500: 0.7995446920394897 discriminator loss at step 7600: 0.6932834386825562 adversarial loss at step 7600: 0.7285876870155334 discriminator loss at step 7700: 0.7006130218505859 adversarial loss at step 7700: 0.7259680032730103 discriminator loss at step 7800: 0.6575964689254761 adversarial loss at step 7800: 0.8425529599189758 discriminator loss at step 7900: 0.6934231519699097 adversarial loss at step 7900: 0.7954859733581543 discriminator loss at step 8000: 0.6591410636901855 adversarial loss at step 8000: 0.8270484805107117 ###Markdown Let's display a few of our fake images: ###Code import matplotlib.pyplot as plt # Sample random points in the latent space random_latent_vectors = np.random.normal(size=(10, latent_dim)) # Decode them to fake images generated_images = generator.predict(random_latent_vectors) for i in range(generated_images.shape[0]): img = image.array_to_img(generated_images[i] * 255., scale=False) plt.figure() plt.imshow(img) plt.show() ###Output _____no_output_____
wine_data/tipsy_sentiments/drunk_classifier.ipynb
###Markdown Tannic_Sentiments Based on https://colab.research.google.com/drive/1OlQpHdZD7zVyZW56r8vI-L8BYylq_UmmscrollTo=XhAOS8BY0jC2 A Sentiment Classifier that has been triained on wine reviews. ###Code import pandas as pd import numpy as np from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences from tensorflow.keras.models import Model from tensorflow.keras.layers import Input, GRU, Dense, Softmax, Embedding, GlobalMaxPool1D, GlobalAvgPool1D, Concatenate, Bidirectional, SpatialDropout1D wine_df = pd.read_csv('wine_data/tipsy_sentiments/ranked_wine.csv').dropna() wine_df.sample(7) # trainable_df = wine_df[['description', 'points']] # trainable_df.head() tokenizer = Tokenizer() tokenizer.fit_on_texts(wine_df.description) tokenizer.texts_to_sequences(['I need a test string, and this will do.']) print(pad_sequences((tokenizer.texts_to_sequences(["I need a test string", "This will do."])), 50)) x = pad_sequences(tokenizer.texts_to_sequences(wine_df.description), 50) y = wine_df.sentiment y = np.asarray(y) x = np.asarray(x) print(x) # y.sample(3) input = Input((50,), name='input') embed = Embedding(len(tokenizer.word_index), 100)(input) embed_dropout = SpatialDropout1D(0.5)(embed) rnn = Bidirectional(GRU(50,return_sequences=True, recurrent_dropout=0.2))(embed_dropout) maxed = GlobalMaxPool1D()(rnn) avged = GlobalAvgPool1D()(rnn) concat = Concatenate()([maxed, avged]) dense = Dense(3, activation='softmax')(concat) model = Model(input, dense) model.compile('adam', 'sparse_categorical_crossentropy', ['sparse_categorical_accuracy']) model.fit(x, y, batch_size=512, validation_split=0.2, epochs=7) test = model.predict(pad_sequences(tokenizer.texts_to_sequences(["Hello", 'This is the worst', 'This has depth', 'There is feeling', 'please work', ]), 50)) test ###Output _____no_output_____
5.5_Movie_Recommendation_ItemBased.ipynb
###Markdown However, we want to avoid spurious results that happened from just a handful of users that happened to rate the same pair of movies. In order to restrict our results to movies that lots of people rated together - and also give us more popular results that are more easily recongnizable - we'll use the min_periods argument to throw out results where fewer than 100 users rated a given movie pair: ###Code corrMatrix = userRatings.corr(method='pearson', min_periods=100) corrMatrix.head() ###Output _____no_output_____ ###Markdown Now let's produce some movie recommendations for user ID 0, who I manually added to the data set as a test case. This guy really likes Star Wars and The Empire Strikes Back, but hated Gone with the Wind. I'll extract his ratings from the userRatings DataFrame, and use dropna() to get rid of missing data (leaving me only with a Series of the movies I actually rated:) ###Code myRatings = userRatings.loc[0].dropna() myRatings ###Output _____no_output_____ ###Markdown Now, let's go through each movie I rated one at a time, and build up a list of possible recommendations based on the movies similar to the ones I rated.So for each movie I rated, I'll retrieve the list of similar movies from our correlation matrix. I'll then scale those correlation scores by how well I rated the movie they are similar to, so movies similar to ones I liked count more than movies similar to ones I hated: ###Code simCandidates = pd.Series() for i in range(0, len(myRatings.index)): print ("Adding sims for " + myRatings.index[i] + "...") # Retrieve similar movies to this one that I rated sims = corrMatrix[myRatings.index[i]].dropna() # Now scale its similarity by how well I rated this movie sims = sims.map(lambda x: x * myRatings[i]) # Add the score to the list of similarity candidates simCandidates = simCandidates.append(sims) #Glance at our results so far: print ("sorting...") simCandidates.sort_values(inplace = True, ascending = False) print (simCandidates.head(10)) simCandidates = simCandidates.groupby(simCandidates.index).sum() simCandidates.sort_values(inplace = True, ascending = False) simCandidates.head(10) ###Output _____no_output_____ ###Markdown The last thing we have to do is filter out movies I've already rated, as recommending a movie I've already watched isn't helpful: ###Code filteredSims = simCandidates.drop(myRatings.index) filteredSims.head(10) ###Output _____no_output_____
programs/sketch_rnn/sketch_rnn.ipynb
###Markdown Create generated grid at various temperatures from 0.1 to 1.0 ###Code stroke_list = [] for i in range(10): model = reset_model() stroke_list.append([decode(model, z, draw_mode=False, temperature=0.1*i+0.1), [0, i]]) stroke_grid = make_grid_svg(stroke_list) draw_strokes(stroke_grid) ###Output _____no_output_____ ###Markdown Now interpolate between owl $z_0$ and owl $z_1$ ###Code model = reset_model() z0 = z stroke = test_set.random_sample() z1 = encode(model, stroke) _ = decode(model, z1) z_list = [] # interpolate between z0 and z1 N = 10 for t in np.linspace(0, 1, N): z_list.append(lerp(z0, z1, t)) # for every latent vector in z_list, sample a vector image reconstructions = [] for i in range(N): model = reset_model() reconstructions.append([decode(model, z_list[i], draw_mode=False, temperature=0.3), [0, i]]) stroke_grid = make_grid_svg(reconstructions) draw_strokes(stroke_grid) ###Output _____no_output_____
notebooks/LogicProgramming.ipynb
###Markdown Logic Programming Logic programming is a programming paradigm based on logic. A program herein is a set of logical expressions stating facts and rules.A **fact** is some an atomic formula (with no logic operations, except negation) that is assumed true.A **rule** is an implication with form$$H \leftarrow B_1, B_2, \ldots, B_n$$where $B_i$ are atomic formulas that need to be true in order to $H$ to be true (sometimes $H$ is called the head and the set of $B_i$ the body).This rule can be seen as+ a logical implication: $B_1$ and $B_2$ and ... implies $H$+ a program: to solve $H$ you must solve $B_1$, then $B_2$, ...To solve a certain problem we need to write it into a collection of rules and facts, that is, we need to translate it into a logical statement.This statement is what is called a **logic program**. A computation over a logic program is the deduction of some of its implications/consequences.**Querying** a logic program is finding if some logic expression is one of the program's consequences. Usually, in answering the query we also get a model, that is, the concrete values that makes the expression a consequence of the program. These queries are computed via a backtracking algoritm called **unification** that attempts to match parts of the query with clauses from the available rules and facts. If the algorithm is able to match the query it succeeds, otherwise it fails.Due to the backtracking nature of unification, the algorithm is able to provide all possible combinations of facts that satisfy a given query.Despite the declarative nature of logic programs, there is also an imperative perspective. Unification, while replacing (or _rewriting_) part of the query with a possible match, can be seen as performing a tree depth-first search over the collection of rules and facts. So, the order of how the rules (and the clauses within rules) are listed on the program determines how the tree search will occur. Different orderings might result in enormous differences in efficiency, and even in program correction due to nontermination (if depth-first becomes locked inside the exploration of an infinite tree branch). This is a serious [leaky abstraction](https://en.wikipedia.org/wiki/Leaky_abstraction) that logic programmers must always consider.Logic languages also have an operator that allows the search termination on some branches of the tree (because the programmer knows that a search there will be useless). Also, the judicious use of this operator (called the cut operator), used for computation eficiency and/or termination, represents a side-effect that contaminates the pure declarative nature of a logic program. --- The most famous example of a Logic Programming language is Prolog. Here we will use the Prolog implemetation, `swipl`, which has an interface to Python, `pyswip`: ###Code # On Windows: # 1. install swipl @ https://www.swi-prolog.org/ # 2. add swipl exe to PATH !sudo apt install swi-prolog # install swipl on Colab !pip install pyswip ###Output Collecting pyswip Downloading https://files.pythonhosted.org/packages/dd/b8/9b79319127d48e41542500dea4181ee19758c2710432a397366ac85b7117/pyswip-0.2.10-py2.py3-none-any.whl Installing collected packages: pyswip Successfully installed pyswip-0.2.10 ###Markdown An object of class `Prolog` gives us a Prolog engine to query the provided rules and facts. ###Code from pyswip import Prolog # subclass to replace the standard: program = Prolog() # because the clause cache does not reset, so we # need to do stuff like this, everytime we start # program.retractall('father(_,_)'); # program.retractall('grandfather(_,_)'); class LP(Prolog): def __init__(self, terms=[]): super().__init__() for term in terms: name, arity = term.split('/') clause = name + '(' + ','.join('_'*int(arity)) + ')' self.retractall(clause) # check if query is satisfiable def isSat(self, aQuery): return list(program.query(aQuery)) != [] ###Output _____no_output_____ ###Markdown Logic programming consists on the use of three types of statements+ **facts**: a statement that some relation exists, like _alex is the father of michael and michael is the father of gina_+ **rules**: a statement that creates a new abstract relation between other facts and rules, like _if X is father of Y, and Y is father of Z, then X is grandfather of Z_. + **queries**: a statement that asks if some expression is a consequence of the program, like _is alex the gradfather of gina?_ or _is there someone that is father of gina?_ It's usual for the engine to return all models that make the query a consequence of the programThese statements are build from **terms** like+ **constants**, fixed values, like number or strings+ **logical variables**, used by the engine to assign values in order to satisfy queries. By convention, logical variables start in uppercase letters.+ **functors**, which denote relations. Functors have structure `functor-name(term1, term2, ..., termn)`. The number of parameters is the functor's **arity**. Constants are just functors with zero arity Let's see an example of a logical program with these three types of statements, ###Code # list of program's functors program = LP( ['father/2', 'grandfather/2'] ) program.assertz("father(alex,michael)") # facts program.assertz("father(michael,john)") program.assertz("father(michael,gina)") program.assertz("grandfather(X,Y) :- father(X,Z), father(Z,Y)") # rule for soln in program.query("father(X,Y)"): # queries print(soln["X"], "is the father of", soln["Y"]) # soln is the current model print() for soln in program.query("grandfather(X,Y)"): print(soln["X"], "is the grandfather of", soln["Y"]) print() for soln in program.query("father(X,gina)"): print(soln["X"], "is the father of gina") ###Output _____no_output_____ ###Markdown Notice that a rule like$$\text{grandfather}(X,Y) \leftarrow \text{father}(X,Z), \text{father}(Z,X)$$All variables are universally quantified variables, but $Z$ can be seen as existencially quantified. The rule can be read as _for all X,Y; X is the grandfather of Y if there exists a Z such that X is father of Z and Z is father of Y_ If the query is not satisfiable, the engine will provide an empty list of models ###Code print( program.isSat('father(X,gina)') ) print( program.isSat('father(alex,gina)') ) ###Output True False ###Markdown Let's add a rule for siblings, ###Code program = LP( ['father/2', 'sibling/2'] ) program.assertz("father(alex,michael)") program.assertz("father(michael,john)") program.assertz("father(michael,gina)") program.assertz("sibling(X,Y) :- father(Z,X), father(Z,Y)") for soln in program.query("sibling(X,Y)"): print(soln["X"], "is a sibling of", soln["Y"]) ###Output michael is a sibling of michael john is a sibling of john john is a sibling of gina gina is a sibling of john gina is a sibling of gina ###Markdown There are some wrong answers, because the engine is matching a person as her own sibling, since everybody trivially shares his father with herself.The meaning of the previous logic program was not what we wanted, ###Code program = LP( ['father/2', 'sibling/2'] ) program.assertz("father(alex,michael)") program.assertz("father(michael,john)") program.assertz("father(michael,gina)") program.assertz("sibling(X,Y) :- father(Z,X), father(Z,Y), X \== Y") for soln in program.query("sibling(X,Y)"): print(soln["X"], "is a sibling of", soln["Y"]) ###Output john is a sibling of gina gina is a sibling of john ###Markdown Terms can be recursive, ###Code program = LP( ['parent/2', 'ancestor/2'] ) program.assertz("parent(alex,michael)") program.assertz("parent(michael,john)") program.assertz("parent(michael,gina)") program.assertz("parent(gina,bob)") program.assertz("ancestor(X,Y) :- parent(X,Y)") program.assertz("ancestor(X,Y) :- father(X,Z), ancestor(Z,Y)") for soln in program.query("ancestor(X,Y)"): print(soln["X"], "is a ancestor of", soln["Y"]) ###Output alex is a ancestor of michael michael is a ancestor of john michael is a ancestor of gina gina is a ancestor of bob alex is a ancestor of john alex is a ancestor of gina alex is a ancestor of bob michael is a ancestor of bob ###Markdown Prolog includes lists, ###Code program = LP( ['member/2', 'double/2'] ) program.assertz("double(L,L2) :- append(L,L,L2)") program.assertz("member(X,[X|Xs])") program.assertz("member(X,[Y|Ys]) :- member(X,Ys)") print(program.isSat("double([1,2,3],L), member(3,L)")) ###Output True ###Markdown The `member` rule is a typical predicate for list membership. But we can use this rule in a different way, ###Code for soln in program.query("member(X,[1,2,3])"): print(soln["X"], "is a list member") ###Output 1 is a list member 2 is a list member 3 is a list member ###Markdown Or even asking how to replace a variable inside the list to satisfy the query, ###Code for soln in program.query("member(3,[1,3,Z])"): print(soln["Z"], "is a list member") ###Output _2174 is a list member 3 is a list member ###Markdown We can make it radical and ask what are the lists where 3 is a member? Well, there are infinite answers, so let's just compute the first ones, ###Code iter = program.query("member(3,L)") for _ in range(6): soln = next(iter) print(soln["L"], "is a possible list") ###Output [3] is a possible list [Variable(101), 3] is a possible list [Variable(101), Variable(102), 3] is a possible list [Variable(101), Variable(102), Variable(103), 3] is a possible list [Variable(101), Variable(102), Variable(103), Variable(104), 3] is a possible list [Variable(101), Variable(102), Variable(103), Variable(104), Variable(105), 3] is a possible list ###Markdown The first model states that $Z$ can be any value (here described by a new variable created by the engine) because there's already a 3 in the list. The second model states that $Z=3$ also satisfies the query.This variable creation can be seen in these simple queries, ###Code program = LP() for soln in program.query("X = Y"): print("X=", soln["X"], " Y=", soln["Y"]) for soln in program.query("X \== Y"): print("X=", soln["X"], " Y=", soln["Y"]) ###Output X= _1750 Y= _1750 X= _1752 Y= _1754 ###Markdown A rule with no conditions is universally satisfiable, ###Code program = LP( ['thing/1'] ) program.assertz("thing(X)") # anything is a thing program.isSat("thing(prolog)") ###Output _____no_output_____ ###Markdown + Exercise: create a rule that produces the permutations of a given list, ###Code program = LP( ['remove/3', 'permutation/2'] ) program.assertz("remove(X,[X|Xs],Xs)") program.assertz("remove(X,[Y|Ys],[Y|Zs]) :- remove(X,Ys,Zs)") program.assertz("permutation([],[])") program.assertz("permutation(Xs,[Z|Zs]) :- remove(Z,Xs,Ys), permutation(Ys,Zs)") for soln in program.query("permutation([1,2,3],L)"): print("L=", soln["L"]) ###Output L= [1, 2, 3] L= [1, 3, 2] L= [2, 1, 3] L= [2, 3, 1] L= [3, 1, 2] L= [3, 2, 1] ###Markdown --- Negation as failure Logic programs describe what is considered to be true.However, it is natural to express rules like$$\text{bachelor}(X) \leftarrow \text{male(X)}, \text{not} ~\text{married(X)}$$What is the semantics of the `not` operator? The perspective to deal with negation is to consider it as a failure. A goal $G$ fails (i.e., $\neg G$ succeeds), if $G$ is not a consequence of the program.This is not the same as the negation of first-order logic. We are not checking if $\neg G$ is a consequence of the program. We are just checking if $G$ is a consequence, and it that effort fails, we _assume_ $\neg G$ is true. Otherwise, if $G$ succeeds then the rule that depends on $\neg G$ fails. Arithmetic in Prolog Prolog uses operator `is` to assign a value to a variable. Also, Prolog includes the typical list of arithmetic operators, ###Code program = LP( ['plus/3', 'gcd/3', 'factorial/2'] ) program.assertz("plus(X,Y,Z) :- Z is X+Y") # Greatest Common Divider using Euclid algorithm program.assertz("gcd(I,0,I)") program.assertz("gcd(I,J,G) :- J>0, R is I mod J, gcd(J,R,G)") program.assertz("factorial(0,1)") program.assertz("factorial(N,F) :- N>0, N1 is N-1, factorial(N1,F1), F is N*F1") for soln in program.query("plus(12,18,S)"): print("S =", soln["S"]) for soln in program.query("gcd(12,18,G)"): print("G =", soln["G"]) for soln in program.query("factorial(6,F)"): print("F =", soln["F"]) ###Output S = 30 G = 6 F = 720 ###Markdown Cuts As mentioned earlier, Prolog will depth-first the tree of rules and facts to satisfy a given query.However, if the tree is large, this search will have performance problems. Some of these problems can be avoided if the program is allowed to tell Prolog to stop searching some parts of the tree. That's the purpose of the cut operator (denoted as `!`). Consider the task of merging two ordered lists. For each pair of first elements from both lists, $X, Y$, only one of the next expressions will be true: $XY$. So, if one of this is satisfied, we can safely tell Prolog to stop searching the next rules. ###Code program = LP( ['merge/3'] ) program.assertz("merge([X|Xs],[Y|Ys],[X|Zs]) :- X < Y, !, merge( Xs,[Y|Ys],Zs)") program.assertz("merge([X|Xs],[Y|Ys],[X|Zs]) :- X=:=Y, !, merge( Xs, Ys ,Zs)") program.assertz("merge([X|Xs],[Y|Ys],[Y|Zs]) :- X > Y, !, merge([X|Xs], Ys ,Zs)") program.assertz("merge(Xs,[],Xs) :- !") # prevent redundant solution of merge([],[]) program.assertz("merge([],Ys,Ys) :- !") for soln in program.query("merge([1,3,5],[2,4,7,8],M)"): print("M =", soln["M"]) for soln in program.query("merge([],[],M)"): print("M =", soln["M"]) ###Output M = [1, 2, 3, 4, 5, 7, 8] M = [] ###Markdown A cut prunes all same-named rules below. It also prunes branches that could result on evaluating clauses to its left. But Prolog will still evaluate branches of clauses to its right. However if that rule fails after the cut, no more solutions will be searched.With this knowledge, we could have implemented the not operator, not(X) :- X, !, fail not(X)or a different variable operator `≠` ≠(X,X) :- !, fail ≠(X,Y)(`fail` is a Prolog predicate that always fails) A cut that does not alter the program's meaning is denoted _green cut_. Otherwise it's denoted _red cut_.The cuts in the two previous pseudo-codes are red cuts.But most red cuts are not benign... The following rule for computing the minimum is wrong minimum(X,Y,X) :- X <= Y, ! minimum(X,Y,Y)the program would succeed `mimimum(2,5,5)`. The programmer needs to make explicit the unification of the 1st and 3rd argument, minimum(X,Y,Z) :- X <= Y, !, Z=X minimum(X,Y,Y)The use of cuts must be done with extreme care. It is very easy to introduce unwanted and yet subtle behaviors in a logic program. Interaction between Python and Prolog With `pyswip` it's possible to assign atomic formulas to Python functions. The next example inserts a print side-effect: ###Code #Using foreign functions within the logical program from pyswip import Prolog, registerForeign def hello(t): print("Hello,", t) hello.arity = 1 registerForeign(hello) prolog = LP({'father':2, 'grandfather':2}) prolog.assertz("father(michael,john)") prolog.assertz("father(michael,gina)") for sol in prolog.query("father(michael,X), hello(X)"): pass ###Output Hello, john Hello, gina ###Markdown And here's a Python function helping printing the solution of the Hanoi's Towers puzzle, ###Code def notify(t): print("move disk from %s pole to %s pole." % tuple(t)) notify.arity = 1 registerForeign(notify) hanoi = LP({'hanoi':1, 'move':4}) hanoi.assertz("hanoi(N) :- move(N, left, right, center)") hanoi.assertz("move(0, _, _, _) :- !") hanoi.assertz(""" move(N, A, B, C) :- M is N-1, move(M, A, C, B), notify([A,B]), move(M, C, B, A) """) N = 3 list( hanoi.query(f"hanoi({N})") ); ###Output move disk from left pole to right pole. move disk from left pole to center pole. move disk from right pole to center pole. move disk from left pole to right pole. move disk from center pole to left pole. move disk from center pole to right pole. move disk from left pole to right pole. ###Markdown To run a Prolog program from a text file: ###Code program = """ hanoi(N) :- move(N, left, right, center). move(0, _, _, _) :- !. move(N, A, B, C) :- M is N-1, move(M, A, C, B), notify([A,B]), move(M, C, B, A). """ f = open('hanoi.pl', 'w') f.write(program) f.close() hanoi = LP({'hanoi':1, 'move':4}) registerForeign(notify) prolog.consult("hanoi.pl") N = 4 list( hanoi.query(f"hanoi({N})") ); ###Output move disk from left pole to center pole. move disk from left pole to right pole. move disk from center pole to right pole. move disk from left pole to center pole. move disk from right pole to left pole. move disk from right pole to center pole. move disk from left pole to center pole. move disk from left pole to right pole. move disk from center pole to right pole. move disk from center pole to left pole. move disk from right pole to left pole. move disk from center pole to right pole. move disk from left pole to center pole. move disk from left pole to right pole. move disk from center pole to right pole.
Episode2-Regression/Regression.ipynb
###Markdown This project is part of a course called "Machine Learning With Python" offered by IBM on Coursera. [1] Import Packages ###Code import matplotlib.pyplot as plt import pandas as pd import numpy as np import pylab as pl from sklearn.model_selection import train_test_split from sklearn.metrics import r2_score %matplotlib inline ###Output _____no_output_____ ###Markdown Download Data Data source: [Fuel Consumption Ratings, Government of Canada](https://open.canada.ca/data/en/dataset/98f1a129-f628-4ce4-b24d-6f16bf24dd64) Uploaded on IBM Object Storage. ###Code !wget -O FuelConsumption.csv https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/ML0101ENv3/labs/FuelConsumptionCo2.csv ###Output --2019-09-11 13:15:50-- https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/ML0101ENv3/labs/FuelConsumptionCo2.csv Resolving s3-api.us-geo.objectstorage.softlayer.net (s3-api.us-geo.objectstorage.softlayer.net)... 67.228.254.193 Connecting to s3-api.us-geo.objectstorage.softlayer.net (s3-api.us-geo.objectstorage.softlayer.net)|67.228.254.193|:443... connected. HTTP request sent, awaiting response... 200 OK Length: 72629 (71K) [text/csv] Saving to: ‘FuelConsumption.csv’ FuelConsumption.csv 100%[===================>] 70.93K --.-KB/s in 0.02s 2019-09-11 13:15:50 (2.87 MB/s) - ‘FuelConsumption.csv’ saved [72629/72629] ###Markdown Understanding Data `FuelConsumption.csv`:We have downloaded a fuel consumption dataset, **`FuelConsumption.csv`**, which contains model-specific fuel consumption ratings and estimated carbon dioxide emissions for new light-duty vehicles for retail sale in Canada. [Dataset source](http://open.canada.ca/data/en/dataset/98f1a129-f628-4ce4-b24d-6f16bf24dd64)- **MODELYEAR** e.g. 2014- **MAKE** e.g. Acura- **MODEL** e.g. ILX- **VEHICLE CLASS** e.g. SUV- **ENGINE SIZE** e.g. 4.7- **CYLINDERS** e.g 6- **TRANSMISSION** e.g. A6- **FUEL CONSUMPTION in CITY(L/100 km)** e.g. 9.9- **FUEL CONSUMPTION in HWY (L/100 km)** e.g. 8.9- **FUEL CONSUMPTION COMB (L/100 km)** e.g. 9.2- **CO2 EMISSIONS (g/km)** e.g. 182 --> low --> 0 ###Code df = pd.read_csv('FuelConsumption.csv') df.head() ###Output _____no_output_____ ###Markdown Exploratory Data Analysis ###Code df.describe() # Getting a subset of important features cdf = df[['CYLINDERS','ENGINESIZE','FUELCONSUMPTION_COMB','CO2EMISSIONS']] cdf.hist() plt.tight_layout() #adjusts spacing between figures plt.show ###Output _____no_output_____ ###Markdown Observing relationships between CO2 emissions and some features There seems to be a linear relationship between engine size and CO2 emissions (Figure 2) ###Code fig, axes = plt.subplots(1,3, figsize=(13, 3)) fig.tight_layout(w_pad=5) #plotting CYLINDERS Vs Co2 emission axes[0].scatter(cdf.CYLINDERS, cdf.CO2EMISSIONS, color='blue') axes[0].set_xlabel("CYLINDERS") axes[0].set_ylabel("Emission") #plotting engine_size Vs Co2 emission axes[1].scatter(cdf.ENGINESIZE, cdf.CO2EMISSIONS, color='red') axes[1].set_xlabel("ENGINESIZE") axes[1].set_ylabel("Emission") #plotting FUELCONSUMPTION_COMB Vs Co2 emission axes[2].scatter(cdf.FUELCONSUMPTION_COMB, cdf.CO2EMISSIONS, color='black') axes[2].set_xlabel("FUELCONSUMPTION_COMB") axes[2].set_ylabel("Emission") ###Output _____no_output_____ ###Markdown Train/Test Split ###Code X_train, X_test, y_train, y_test = train_test_split(cdf.loc[:, cdf.columns != 'CO2EMISSIONS'],cdf.CO2EMISSIONS, train_size = 0.8) #alternate way to split #msk = np.random.rand(len(df)) < 0.8 #train = cdf[msk] #test = cdf[~msk] #X_train = train['ENGINESIZE'] #y_train = train['CO2EMISSIONS'] #X_test = test['ENGINESIZE'] #y_test = test['CO2EMISSIONS'] ###Output /anaconda3/lib/python3.7/site-packages/sklearn/model_selection/_split.py:2179: FutureWarning: From version 0.21, test_size will always complement train_size unless both are specified. FutureWarning) ###Markdown Modeling: ###Code # changes shape of data from (n,) to (n,1) def transform_data(data): features = np.asanyarray(data) features = features.reshape(-1,1) return features def modeling(regr, features, y_train): y_train = np.asanyarray(y_train) regr.fit(features,y_train) # The coefficients print ('Coefficients: ', regr.coef_) print ('Intercept: ',regr.intercept_) return regr def plotting_best_fit_line(regr,xx, yy): plt.scatter(X_train.ENGINESIZE, y_train, color='blue') plt.plot(xx, yy, '-r') plt.xlabel("Engine size") plt.ylabel("Emission") plt.show() test_scores = list() train_scores = list() ###Output _____no_output_____ ###Markdown Linear Regression ###Code from sklearn import linear_model regr = linear_model.LinearRegression() features = transform_data(X_train.ENGINESIZE) regr = modeling(regr, features, y_train) yy = regr.coef_[0]*X_train.ENGINESIZE + regr.intercept_ plotting_best_fit_line(regr,X_train.ENGINESIZE,yy) ###Output Coefficients: [39.37007679] Intercept: 125.30753624621028 ###Markdown Model Evaluation We compare the actual values and predicted values to calculate the accuracy of a regression model. Evaluation metrics provide a key role in the development of a model, as it provides insight to areas that require improvement.There are different model evaluation metrics, lets use MSE here to calculate the accuracy of our model based on the test set:+ **Mean absolute error**: It is the mean of the absolute value of the errors. This is the easiest of the metrics to understand since it’s just average error.+ **Mean Squared Error (MSE)**: Mean Squared Error (MSE) is the mean of the squared error. It’s more popular than Mean absolute error because the focus is geared more towards large errors. This is due to the squared term exponentially increasing larger errors in comparison to smaller ones.+ **Root Mean Squared Error (RMSE)**: This is the square root of the Mean Square Error.+ **R-squared** is not error, but is a popular metric for accuracy of your model. It represents how close the data are to the fitted regression line. The higher the R-squared, the better the model fits your data. Best possible score is 1.0 and it can be negative (because the model can be arbitrarily worse). ###Code def evaluate_model(regr,features,y_test): y_hat = regr.predict(features) print("Mean absolute error: %.2f" % np.mean(np.absolute(y_hat - y_test))) print("Residual sum of squares (MSE): %.2f" % np.mean((y_hat - y_test) ** 2)) print("R2-score: %.2f" % r2_score(y_hat ,y_test) ) return r2_score(y_hat ,y_test) train_scores.append(evaluate_model(regr, features,y_train)) features = transform_data(X_test.ENGINESIZE) test_scores.append(evaluate_model(regr, features,y_test)) ###Output Mean absolute error: 23.21 Residual sum of squares (MSE): 942.24 R2-score: 0.69 Mean absolute error: 24.12 Residual sum of squares (MSE): 966.52 R2-score: 0.71 ###Markdown Polynomial RegressionThe dependent variable can have quadratic, cubic or higher order dependence on the independent variable. In general regression can be represented as: $y = \theta_0 + \sum_{i = 1}^{n} \theta_i x^i$In case of linear regression, n = 1, in case of quadratic, n = 2 and so on. For polynomial regression of degree n, there are n + 1 parameters to be learned i.e. $\theta_0, \theta_1, ..., \theta_n$ "**PloynomialFeatures()** *function in Scikit-learn library, drives a new feature sets from the original feature set. For example, lets say the original feature set has only one feature, ENGINESIZE. Now, if we select the degree of the polynomial to be 2, then it generates 3 features, degree=0, degree=1 and degree=2"* [1] ###Code from sklearn.preprocessing import PolynomialFeatures def polynomial_regression(degree): poly = PolynomialFeatures(degree) features = transform_data(X_train.ENGINESIZE) transformed_input = poly.fit_transform(features) regr = linear_model.LinearRegression() regr = modeling(regr, transformed_input, y_train) train_scores.append(evaluate_model(regr, transformed_input,y_train)) XX = np.arange(0.0, 10.0, 0.1) yy = regr.intercept_ for i in range(degree): yy = yy + regr.coef_[i+1]*np.power(XX, i+1) plotting_best_fit_line(regr,XX,yy) poly = PolynomialFeatures(degree) features = transform_data(X_test.ENGINESIZE) transformed_input = poly.fit_transform(features) test_scores.append(evaluate_model(regr, transformed_input,y_test)) ###Output _____no_output_____ ###Markdown Quadratic (n = 2) : ###Code polynomial_regression(2) ###Output Coefficients: [ 0. 50.50477736 -1.49048584] Intercept: 107.65740232367344 Mean absolute error: 23.26 Residual sum of squares (MSE): 932.89 R2-score: 0.69 ###Markdown Cubic (n = 3) : ###Code polynomial_regression(3) ###Output Coefficients: [ 0. 32.89953949 3.41939197 -0.41365956] Intercept: 126.23221009188595 Mean absolute error: 23.21 Residual sum of squares (MSE): 930.70 R2-score: 0.70 ###Markdown Quartic (n = 4) : ###Code polynomial_regression(4) ###Output Coefficients: [ 0.00000000e+00 4.03068346e+01 4.04490447e-01 8.35632327e-02 -2.84822566e-02] Intercept: 120.10843004250736 Mean absolute error: 23.20 Residual sum of squares (MSE): 930.66 R2-score: 0.70 ###Markdown Quintic (n = 5) : ###Code polynomial_regression(5) ###Output Coefficients: [ 0.00000000e+00 1.12082224e+02 -3.97456196e+01 1.04284346e+01 -1.26482860e+00 5.52003871e-02] Intercept: 73.09655435577093 Mean absolute error: 23.20 Residual sum of squares (MSE): 930.01 R2-score: 0.70 ###Markdown Comparison ###Code regressions = ('Linear', 'Quadratic', 'Cubic','Quartic','Quintic') y_pos = np.arange(len(regressions)) fig = plt.figure() plt.plot(y_pos,train_scores,marker='o', linestyle='dashed') plt.plot(y_pos,test_scores,marker='o', linestyle='dashed') plt.xticks(y_pos, regressions) plt.xlabel('Degree of Polynomial') plt.ylabel('r2 Score') plt.legend(('Train_score','Test_score')) ###Output _____no_output_____
indiv_articles.ipynb
###Markdown looking into the titles of individual articles with certain features ###Code import logging from gensim.models import ldaseqmodel from gensim.corpora import Dictionary, bleicorpus, textcorpus import numpy as np from gensim.matutils import hellinger import time import pickle import pyLDAvis import matplotlib.pyplot as plt import os from scipy.stats import entropy from IPython.core.debugger import set_trace alldata_new = pickle.load(open('output/dtm_processed_output.p', 'rb')) alldata_new.keys() # load data doc_year=alldata_new['docs_per_year'] doc_ids =[0]+list(np.cumsum(doc_year)) term_topic = alldata_new['term_topic']# term_topic is n_years*n_topics*n_terms terms = alldata_new['terms'] doc_topicyrs = alldata_new['doc_topic'] doc_topic = [] for year in range(len(term_topic)): doc_topic.append(alldata_new['doc_topic'][doc_ids[year]:doc_ids[year+1]])# doc_topic is nyear*n_docs given year*n_topics # rename topics by their top freq word topics = range(term_topic.shape[1]) def topic_label(topic, term_topic, terms): term_freqs = np.sum(term_topic[:,topic,:], axis = 0) max_term = np.argsort(-term_freqs)[0] return(terms[max_term]) topic_labels = [topic_label(topic, term_topic, terms) for topic in topics] print(topic_labels) """with open('output/all_visdtm.p','br') as f: allvisdtm=pickle.load(f) for visdtm in allvisdtm: visdtm[0]['topiclabel']=topic_labels""" #topic_labels alltitles=alldata_new['docnames'] doctitle = [] for year in range(len(doc_year)): doctitle.append(alltitles[doc_ids[year]:doc_ids[year+1]]) with open('topicnames.p','rb') as f: topicnames=pickle.load(f) # feature1: most typical article of each topics """with open('highest_freq.txt','w') as f: for kt in range(len(topic_labels)): f.write('\ntopic '+str(kt)+': '+topic_labels[kt]+'\n') for year in range(len(doc_topic)): topicfreq=np.array(doc_topic[year]).T[kt] idx=np.argmax(topicfreq) title=doctitle[year][idx] f.write(str(year+2000)+': '+title+'\n') """ # we read the titles and give names for each topic. then store it. topicnames=['Sequential learning','Face and emotion perception','Reasoning','Text processing and creativity','Mathematical psychology','Decision making','Language: syntax','Causal reasoning','Knowledge structure','Developmental psychology','Spatial cognition and embodied cognition','Memory','Categorization','Language: semantics','Educational psychology','Neural network','Communication','Probabilistic modeling','Consciousness and identity','Visual attention'] with open('topicnames.p','wb') as f: pickle.dump(topicnames,f) # feature 2: papers connecting 2 fields maxtp = 17 # the leading topic # now search for each secondary topic for kt in range(len(topic_labels)): if kt == maxtp: continue f.write('2ndary topic '+str(kt)+': '+topicnames[kt]+'\n') for year in range(len(doc_topic)): topicfreq=np.array(doc_topic[year]).T[kt] toptidx=np.argmax(topicfreq) # topic kfreq = topicfreq[kt] title=doctitle[year][idx] # feature3: "purest" / "most chaotic paper" maxen=0 with open('maxmin_entropy.txt','w') as f: for year in range(len(doc_topic)): f.writelines('\n'+str(year+2000)+':\n') alldocs = doc_topic[year] allentrop=[] for d in alldocs: allentrop.append(entropy(d)) # rank them maxE = np.argmax(allentrop) minE = np.argmin(allentrop) f.writelines('max:'+doctitle[year][maxE]+'\n') f.writelines('min:'+doctitle[year][minE]+'\n') # max entropy across years? if maxE>maxen: maxen=maxE maxtitle=doctitle[year][maxE] print(maxtitle) # does entropy correlate with length of title? -- no, not really alltitlen=[] allentrop=[] for year in range(len(doc_topic)): alldocs = doc_topic[year] for d in alldocs: allentrop.append(entropy(d)) for idx in range(len(alldocs)): alltitlen.append(len(gettitle(year,idx))) plt.plot(alltitlen,allentrop,'.') plt.show() for kt in range(len(topic_labels)): print(len(doc)) ###Output _____no_output_____
machine-learning/K_Means.ipynb
###Markdown ###Code import matplotlib.pyplot as plt import numpy as np ###Output _____no_output_____ ###Markdown 1) 가상 데이터로 실습하기 1.1 가상 데이터 생성 분류용 데이터 생성 메서드를 이용해서 label이 없는 데이터를 생성해보자. make_blobs를 이용해 정규분포(가우시안 분포)를 따르는 클러스터링용 가상데이터를 만들 수 있다.* 주요 parameter: - `n_samples` : data의 수, 기본값은 100 - `n_features` : feature의 수, 기본값은 2 - `centers` : 생성할 클러스터의 수, 기본값은 3 - `cluster_std` : 클러스터의 표준편차, 기본값은 1.0 - `random_state` : 난수* 반환값: - `X` : [n_samples, n_features] 크기의 배열 - `y` : [n_samples] 크기의 배열참고 : [make_blobs 가이드](https://scikit-learn.org/stable/modules/generated/sklearn.datasets.make_blobs.html?highlight=make_blobssklearn.datasets.make_blobs) ###Code from sklearn.datasets import make_blobs # 3개의 중심점을 가지는 데이터 200개를 생성 input_feature, _ = make_blobs(n_samples=500, centers=3, cluster_std=0.8, random_state=30) input_feature[:5] plt.scatter(input_feature[:, 0], input_feature[:, 1], c='blue', edgecolor='k', lw=0.5) plt.show() ###Output _____no_output_____ ###Markdown **[잠깐! 상식 - 1]** **plt.scatter 함수에 대해 알아보자** . plt.scatter( A , B , ... ) . scatter 는 산점도를 그려주는 함수다. plt.scatter(A, B)를 하면, 좌표 (A, B) 에 점을 찍어준다는 의미다. 예) plt.scatter(1, 2) 라고 한다면, 좌표 (1, 2) 에 점을 하나 찍는다. . 그럼 동시에 여러개의 점을 찍고 싶다면? 예를 들어, 좌표 (1, 2) , 좌표 (3, 1), 좌표 (4, 3) 세 개의 점을 한번에 찍고 싶다면 아래와 같이 A 자리에 [1, 3, 4], B 자리에 [2, 1, 3]을 작성해주면 된다. . plt.scatter([1, 3, 4], [2, 1, 3]) **[잠깐! 상식 - 2]** **numpy array를 알아보자.** 위에서 input_feature 와 같이 여러 숫자를 array[ ] 형식으로 담고 있는 데이터 형식이 바로 numpy의 array다. . 그럼 예를 들어, A = array([[1, 2], [3, 4], [5, 6]]) 와 같은 array가 있을 때, 1, 3, 5만 지정하고 싶다면 어떻게 해야할까? . 우선 A[0] = [1, 2], A[1] = [3, 4], A[2] = [5, 6] 이다. . 그럼, A[0, 0] = 1, A[1, 0] = 3, A[2, 0] = 5 다. (여기까지를 Indexing 이라고 말한다.) . 자 이제, A[0, 0], A[1, 0], A[2, 0]을 한번에 표현하는 법만 알면된다. . A[:, 0] 이렇게 표현하면 된다.(이를 Slicing이라고 한다.) 1, 3 = A[:2, 0], 3, 5 = A[1:, 0],1, 3, 5 = A[:,0] 으로 표현할 수 있다. 1.2 모델링 KMeans* 주요 parameter: - `init` : 초기 군집 중심점의 좌표를 설정할 방식, 기본값은 'k-means++' - `n_clusters` : 군집화할 개수, 즉 군집 중심점의 개수, 기본값은 8 - `max_iter` : 중심점 이동 최대 반복 횟수, 기본값은 300 max_iter 횟수만큼만 학습을 반복한다 만약, max_iter 이전에 더이상 중심점 이동이 없다면 학습을 중단한다* 주요 Attributes: - `cluster__centers_` : 군집 중심점의 좌표(Coordinates of cluster centers) - `labels_` : 각 데이터 포인트들의 label값(Labels of each point) - `inertia_` : 각 데이터에서 해당 군집의 중심점까지의 거리 제곱합(Sum of squared distances of samples to their closest cluster center.) - `n_iter_` : 반복 실행 횟수(Number of iterations run.)참고 : https://scikit-learn.org/stable/modules/generated/sklearn.cluster.KMeans.htmlsklearn.cluster.KMeans ![kmeans.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAxoAAAG+CAYAAAFjVUIVAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAAEk0AABJNAfOXxKcAAOsaSURBVHhe7J0FnFXFF8ftwEJFRUBsVOzu7sRusbtQ7EKw9W8nCiomohIKiIEI2CIWYHdgAoJiO//9nnvPMnv37fIW32Pf2/19P5/53Hvnds05Z86ZmRmCKCn0QkoMvZASQy+kxNALKTH0QkoMvZASo7gvZLHFQlhooRB++imEv/6qOFvu02277bY2PeCAA2zamCnuC/n44xDWWCOZv/32EI4+OpnPsNFGG6VzIay55pphs802S5cSNtlkk7DwwgunSw0bFVklhl5IiaEXUmJM9xfy77//pnP/nRdeeCGdazjUyx/yzz//pHPTzvPPP5/ONSzqrcj6+++/07m6M2zYsHSu4VGvMuQvbJM6MmTIkHSuYVLvQr0uL+Wpp55K5xou9f5CRFX0QkoMvZASQy+kxCiJF/Lss8+GmWaaKV2quKi0VjjOmxpbbbWVbT/77LOnObXj5yg1SuKq7r///nQuYfLkyTblofXt2zfss88+YfDgwWGppZYKL7/8cujSpYutv/fee20awwtZYIEFbF8SVfr+8H37VVZZpXI9iRrmNm3a2Lr6pmT+kJlnnjldSphxxhntYfkXz3zHjh3theRihx12CJ999pltz5/C9uAP/frrr7dlx/Nnm222ym1LgdK5knqkbdu2YYkllkiX6he9kBJDL6TE0AspMfRCSgy9kBKj3l4Iauqff/6ZLoUw99xz29TV0W+//TZss802ljfrrLOGW2+9Ncw333y2DF9//XV4++23bX6//fazKfsdfvjhNg+vvPKKeShbtWplWtSvv/5qESyxL+aee+4x+wT1FzjGjz/+aPP1Qb3/ITwoquB/+eUXW27ZsmVYiFiuCo455hibTo2ff/7ZpmeccUblcWDs2LHpXEJc1f/MM8/YlBAkroF9F1lkEdt//Pjxtq4+UJH1XyFG4NNP04WUzIeQkxdfDOHNN9OFKeiFFIKOHdOZCiqK14qyMYnadD74IIR27dKFDJ07pzMJeiElRlFeCOE5DT0VC/0hJUbRXwhay0033WTzTZs2tSnMMcccNu3Xr5/V7IqEor2Qo9NId7c1ttxyS5s6//vf/2yKmuuBc6uttppNGzMqskqM6fpC8P4BHkJcrqNHj7YiDQt6jbQdCfOXX365GWfMw/Dhw8M333wTHn/88QoNskKFrGD11Ve3KUXfDz/8YPMNgen2Qv7444/w+++/27xPwYu0eJ3Psw/E22fxbRoKRX0h/iCPP/54c6861CWdf/754YILLrA/5dprr61cz7rffvstvPTSSzY/YcKEcOmll9o6l0sNGcmQEqPeXghfvlf+UTk4ceLE8N1331kwwlFHHRU+/fTTMGbMmDBgwADbxuUJHHnkkelcCNtvv31lsTdixIgqDUfZh2Nna5Zjxo0bZ1Nk1B577FF5nv79+9s+vXv3DhdddFHo06eP5Rcb/SElRr29kI8++qhSU3JefPHFcMIJJ5h/4+STT64isO++++50LoTnnnvOpqNGjQrXXXed/Wl8xfhN0OSoZn/zzTfDiSeeaH8aXHjhheHAAw8MgwYNsuVSRX9IiVFvLwQtauutt7b5W265JbzzzjuV5Tlgc3xMO/cKCKSDI444wqaXXHJJuOqqq2wfyn6gvH/rrbfCOeecY84mcM2Nv+fMM8+08/To0cO25fyFaFpXaPSHlBjT5YU89NBD5t8+77zzzBd+++2329+x4oorhgcffDB069at8u/ALmnRooXNN0b0h5QYeiElhl5IiaEXUmLohZQYeiElhF5GCaGXUULoZZQQehklhF5GCaGXUUIU52U8/XQI3oCGRvdzzhnC++/Tu1iSl4P/0mlZQ6E4L4PG+7vthkM6Cb8nKvG990J48sl0g+rg5dtiiy3CIYccYsveWJ/wUnzp3uimIaNiqoTQyygh9DJKiOn6Mjp06JDO5WbxxRcPO++8s83PNddc4fvvv7f5xsJ0/zNOOumkdG7aKcVAg0JQL8UUMVTTSk2RhA2BepMZBFHXFe+crKFSrwK8Ln+I7IxpoDGMcOO9RBQavYxpQC+jhNDLKCEa7MugtxxwAe0VhDS2qQvsR7O1fIh795kWGuzL4MaynR0vtthilS9l6aWXDmeddVbo3r17GDhwoPV/xTrSOuusY9v4tryMuOUsbTxuvPHGsP/++1fu49sC0esw55xzVsmfGg36ZdBU2eGh0OzMoSmZW9x0erzkkktWPrjslJdRkwpM8zSOk33oGJE0wsnm10aDfRmlAO3Z6X06X/QySgi9jBJCL6OE0MsoIfQyphP5+Eoa3MvAqGOIh3bt2tkDwH6IR+P88MMPbUq/tscdd5y1C6QdIHz11VfWv65Dy1aGnUANdhhiwuHYjNNBu0JU2Lh3BPrORZui3xOg79zlllvO5muiQf4Z9FnLQ8YiXn/99S0Pi5yXEncYQ1NkOOigg2xaE3SHkbWuvfE+uFVPv7ned6733es1AflY5yqmSojKlzFyZDKNGTo0namFr79OpnRm+eqr/Nq2qJcxDVS+jGWXZUDcUFFGVjzJ6FF2706PAUne5puH8O67ITz22JRteGE+v+iiyTYV6GVMA3Uqptq3T2civLPNTL9bRXkZufqWbUhp3nnnTe+2sBT8ZYhpp6gvg05YXAVt3ry5TZ1tt93WptOrE65yoCgvw/uNyrLpppva9P333698GQsvvLBN4yEbGisqpkqIoryM92iLUQHCPG4EQy+g9EfI4Ivevy3gwSNhBROodtlll1lfgt77p9OpU6fKPqpgnnnmMUOOP60hMF3+jLhvW1L8gmgIg8Xt68CncT+6tRFXo5Qz0+Vl4C4FvnTqoUj0LQitW7cOnTt3Dl27drWeNqmWcN82L41+Cr/88ktbvvLKK23aUKl3mZHPl99YqPeXgWpL9IarwNSs0ldhr169rOIPqLUFeu2kNpceQoHaVmpwHY5B76A+kgGRIkSV3HbbbdYLHNDvYVxMInO8H8TXX389rL322lZp6UUrwRH0DkqFZrGp95dRTMqtBW29vwz6o6XfW6oZ3n33XesnnT7TycuFV4Pz1/Cw2Y5QH4aR488YP358GDlyZGXxx19CdTzyKNbgSpEG/WeUGyXxMmjrh5zgq6ZPdWwMnD/+JX/++efmxSNC0CMGsT/wEtIbtGtbyAV6ED3ttNNMFjGlB1HgGPQoysiYaGj8UaU2VIT+jBKiKC8DrQcYVwPOPvtsm9Lf7Z133mlai8e5AnbHnnvuab05s82QIUPsy3atqbGgP6OE0MsoIfQySgi9jBJCL6OE0MsQogb0cwhRA/o5hKgB/RxC1IB+DiFqQD+HEDVQXj8HXeDny377pTNCTBvl+XOstBLRVCHQTXTajjlccUUITZrQ+iOEN96ouLO635q3SW7WrFlYa621wiabbGKBwoSn0GCcof/pqnrllVe27YBuq7/44ouw1FJL2TL9MomGQfmrVeuum878N2i0TwJvK9K2bdtw7LHHWtSLt8BaYIEFQs+ePcNhhx1my0Ak44ILLmjzjPDcGPrmbQzI5hCiBvRzCFEDDfbnOOWUU9K5aWPcuHFmS4CrVFloEtWlS5d0qe48Ty8xomRp8JKDdoD5dIeXZVG6J0rZbrvt0rmkczE4/PDDrTfrQw891JbrAi25ROnTaNQqBrupz3aZnHv48OHpkigHGp3NQevz6dnXD+1za+qPSJQ2jdYgZxy7Yg4QOGnSpEoVTJQnZfFz0BcJo8UUIzE+eSG7VKKnofnnnz/nuRp7qssAM6VAWVxtY+hetzFQrJ7Oi4V+DjHd0M9RBPRzNAz0cxQB/RwNA/0cRaDYPwcv7dlnn7WgQZyGWTAkMdwdggyLYVzOOOOMNgwuUEmAcc+wVvlQDsaufo4iMD1+jqFDh1b5wJj35XiKI4/e3uM8xnlu1aqVzfNR411nnr6y6TmeMHffFuabbz6b5+OP8fWO/xz07Eg0MPg2O+ywg817P6mef/DBB1ttWcuWLSvz4uNm8xgvjirtXNsUGv0cRaDYP8eOO+5onS/CeuutFy6++GKbd3zsO9h6661tuuqqq9oUaMNBVeX3339v3VjzcQ0bNiycf/75YdSoUWGPPfaw7byjeNqG0HUp3ZJm2XfffW1/wlLootQHLiF8fvXVVw9rrLGGLW+22WYmaTy8/pZbbqn8qNu3b2/zjz76qC3H1+rzHAtos+JOUfZ5/PHHK49TaPRzFAHZHNMHfuSffvopXSo8+jmKgH6OhoF+jiKgn6NhoJ+jCOjnaBjo5ygC+jkaBvo5ikAhf45PPvnEqjkdBuKkBxGH4QjnnnvudCmB2huvwaF6lSGnttlmG1uGOeaYw6pEGfQTHnvsMauC3WqrrfJuaMU1eMMpzrXxxhvbID3UeNV0jJoCJqnxqo0zzjgjnauKt3d56qmnKs/Js6A3ll9++cWW/wv6OYpAIX+ONddc06pRwcdCe+KJJ2zqxD/HK6+8YlOqToEPpW/fvlV+Dn4M9vGfo0mTJuY0rEuVKB86VbNA+w//OXKRa9h5fmrGgKOqN666JXQ+XvZzOHvvvXc6l8APSjrkkENsmZ/k9NNPDz/++KMt/xf0cxQBqVUNA/0cRUA/R8Og2s+x7LLokMl8x47JdPDgZOr4+qOPTqYjRyLyEbXJuvHjQ2AMem9tydjz48Ylnfux/tdfk3xYbLGkw78KtbjyuIy46teVdsznpFuUNvo5GgY5fw5o0yb5OYgjo8eXWPWbb74QBgwIYaaZkmV6taR3S+ADr7AZK3RKxplM8rAn998/mc/+HL16JdO0Z0ujQkUOd96ZzD/8cDJN0c8hphtFU6vS8bYLTVn8HELUB2X/c3iIOUF/tUE1JdGnBNl9+OGHlseyd7LggYdCOGX1c9x8883hrrvuSpdC2G233WxKB9A33XSTzTP4Pr4MEvMxn376qVWxbr/99tZ7OmHf/fr1sx8rruKkI+jmzZunS6Kx0mDUKv85aFvhxL0W7r777hZ6jp/D872NBGTr/4Uoq5/jggsuSOeoqfurSlectH2goc8RRxxhyyuttFKl5GBKF5wjR44MHTt2tP2WX355W8c8bSbuvvtuWwb6m8Khtvnmm9tx4dRTTw1nnnmmzdPHLW0mHM7rcH73UJ944ol2fF/G6fjqq6/atTO0AdfAz0y/vG+++Wb46KOPbDtRGpTVz/Hee++lcwmdOnUKF110UbpUFT5MPmhAxcLOoGEQtgct8byBzy677BKOOeYYU7kIC+EjRX3DM8y8e8i/++67Si+x2ykevvH++++Hd955x+ZjOFeHDh0qwzLgDerZUwhFIfQEOI9+jtKirH4ObAXHQz8c77Hc+6OlhduvUR239z5ISY694Z1Do2qddNJJ4aGHHqr82PkpgBJ9/PjxpobdeuutJi285R3xSXQ2x3kGDhxocVS0AgQkFc1jr7rqKls+8MADbSrKi7L6OY52L2kOnnzySQuY44OGWWaZxRLwcfJD0E4cCUDy+Kl7773X1CcC62jrzShO/nM4DIc2duxYk1KuyhF75M1Q+VH5wQ466CBb5ofi5xkyZIgte1yWKC8ajEE+NXbdddd0LonE/fbbb9OlYKU+Hz1BeAxnRvIfkaA9VCzWIxHYtlevXqaC+Y8CO+20U7j00ksrpRV2Ro8ePWyeiNv99tvP1C+I99trr72sGtlVtXgdx8Auufbaa006cXy3qei8AYmGCujQjhzVbLXVVktzqh4vC/c7YcKEShXT7x2QkkhTCg2kJWoq94/6xzG9LT1Rw0jI8847z2w37tNts3Kn0fwc7ttwkAT8JE7nzp1t2rVrV5s61113nUW2HnXUURaNi2HPzzF69Oh0iylceeWVVVS5e+65J51LxvP44IMPbH6DDTawKVyRhkJ4TyTxh0XkL1LnhhtusJ+AameHn4NeUPiAGciTgXS8Ywh+fK7Z8R8P+Pi5FvBtcoW+8xOedtppVZ6R4xIRvvzyS2t3fskll9h1gv845U6j+Tn4uJ2HH3443HjjjelSYj9QE+YvnWUSNVV8IKzDJ4Lh3rt3b/soMbJZhxoW4x8a6/xDJ/zl6quvrvzQzj333Mr9/Gfi4+ac7EMPKKh7VCSgrlHDBaz3fq2Yp90I2/i9MQYJ4eUck9o2oDcRMW00mp+DEs7xvp689um+++6zj5GO3RyvwsU/gq2ByoAt0b9//0pDO4aKAD7EXKUwH74b64CUconBOSl5kWSoWMyvvfba1l2QqzuMVuvwMwBVwQ7qFlDaY1vxc/ADiv9Go/k5/AMCStdYF0d1ateunUmGp59+2sYZx6nIx4ke76EpH3/8sdkP11xzjS3HLQi33HLLSr8KtgIfPbaCg80AnAt1C6OfvqPYhsZXSC1+QleBuB4iAPbcc0/7iVHpuBbgWPhWfNzD2267rVJKIRW5Pldx/AdzsIPYHrgWVxtFdRrNz0Gvgc4qq6xiEsA7VfNaJ3wS8U/keGnNB4zej3EKqEnenDTW86n2xXbhwwOXQuADbA4aNMimqGczzzyzqUGc238OJAN2SLdu3ew8SDb/OegwDliHpKGamto6wBfE6FVulLNPbEc9+OCD4Y477rB5zoE0FLkpq58jNnCBUV7dkOWjRV3xqlxKWhJQxUuNCwl1hhJ6ww03tHVsw0dC7RTNUxmAH1UqS2wMi8ZB2UkOdHMvXSmhHUpDHG/u0Y7BoH7kkUcqHXiA8w5QT7AhUDVcCnhtEuehTTZ6PF1+En6CFKG2SjR8Go1aJURd0c8hRA3o5xCiBvRzCFED+jmEqAH9HELkQD+GEDnQjyFEDvRjCJED/RhC5EA/hhA50I8hRA70YwiRA/0YQuRAP4YQOdCPIUQO9GMIkQP9GELkoLx+jHS447zIdCAtRF0orx9jttmSAQ5XXjmEeeYJ4Zhjpows6gMfMr/ppiG0bZss5wG9EdK9juPjgmfJ1beUaJiU34/h42zwA3jnZHTs3KlTMk8+vPVWMs0Deibxrv7pGIFhBujgeal0+Fw6iaa/J7rB6dOnj+XRDy59R9ElD/P0+cQwavRqIsqf8vox1lyTscuS+YqP1X6Mio8zXH31lHGll1giWV/HkZa8jyjgA19xxRWth5ERI0aE5ZZbzoY7a8kwvCkzVJyLBHTItsIKK1i/UWuttZblifKmvH6MLLUMJ1AX+OBbtWqVLiXd8CAxfEw/hg9YZpllwv3332+dtzH0AKMp+ZBndLlDJ830N4sUEeVPef8YQhQJ/RhC5EA/hhA50I8hRA70YwiRgwb7Y8SD5E8L9JSO04/xNmIYiH/ppZe2MTieeOKJNLfu4BMRpUuDlhg46qaVmrzfDBjJGBTQpk0bm9YV/RSlT4NXpeYhdGQa8SEEgOHCgHE3GIKA8fYYLamu6KcoDxqFjdGkSZN0rn6ZaaaZ0jlR6jQa43vOOedM5+qHGesYoiLql0bzY8Acc8yRzk1fPKZKlA+N7o3NRoTudEQ/RXnSKN/a9Po59FOUL432zc0yyyzpXHHQT1HeNOq3Ny3Vrfmgn6L8KYs3SN0/tUrFSLS1KCQ4FXOdp7Enqsw///zz9CmVPmXxY9x3333pnChnvvzyy3Su9NGPIaYb+jEKjH6MhoF+jAKjH6NhoB+jwOjHaBjoxygw+jEaBvoxCox+jIaBfowCox+jYaAfo8AU+8fwdhKDBw/O2ZCI9Y899li6VPHQZpihKA2OPvvsMzs26fXXX6+TB/23335L52qG433//ffp0vRHP0aBKfaP4R9gTR8iXtt4XYsWLYrSviI+x7hx42q8niyPPPJI2G+//dKl0kU/RoGZHj9GbR8hP4Gv79ixY/j0008rfwzy6bbT19NRQu/evSuXmdIPLt160ifuH3/8YXncU9x7OkGNHCcmPgb88MMPlZ08kMcPQWfUG220kXUZisRr1qyZ9aFLHBgdVHOdnHubbbaxff766y/LQ+JxLNb58Tp37mzTd955x/IKjX6MAjO9fozTTjvNlidMmGDL/hHyIT377LNh5513tvxJkyZZ3i+//GJT+r51dYyS248HPh07dmyYa665bJ687bff3uYdPtQePXqkSwnZY2R/DO9Amp7W99hjD5snn+shZqtbt252fa4+sc5/DH5QzxszZkxlKH7r1q31Y1SgH6MC//B8moUPCVhPtzr+Y3heTPZYPv3222/tx6CzaFhooYXCjz/+aPMwefLkKsfi48we46WXXrIf459//rHlq666yjqS7tu3b9h6660tLz4GcJ38wMC6XD9Gdqofo+I5pNOSptg/xgYbbGBTPhafj+FngI033timfMSe99RTT9nHtM8++9jy8ssvH3bcccew9tpr27JPf/rpJxsugAhTPsxdd93V8mPef/99K7n5gZBa8b6c45NPPgnbbbedGdpImLhndVSnxx9/PIwfP97WLcuAOhVwnVwvrLfeeqZ6kccPAn6O9u3bWxQs5+cnLgb6MQpMsX8MEcKNN95oP5ZLjmKgH6PA6McoPlQVf/DBB+lScdCPUWD0YzQM9GMUGP0YDQP9GAVGP0bDQD9GgdGP0TDQj1Fg9GM0DPRjFBj9GA0D/RgFRj9Gw0A/RoHRj9Ew0I9RYPRj1B8el1UI9GMUmEL9GESdAsOFAaHaX331lc0TvzR69OjKuKKaaNq0qcU85XrJNGY66KCDbJ64qxdeeMHmp8Yqq6wS1l13XZsnFJ1Aw1NOOcWWL7zwQgvTIJw9S02Npb7++ut0LjcEI2bxYx199NE25Zy//vqrBTsuuuiioXnz5pb/X9CPUWAKKTE8Fuibb76x2KD4xyBYL/4xCMgj6C4OquNnqunHINSbEHDaNcCrr75qU+CD54Mleeh4jIekcz6CBP06n3766bDEEktU+TFOPfVUC/jjY1544YXtOvmZJk6caD/3wIED7UdjrEAia3v16hWuvfbadO+q3HvvveH++++vbAvCz81x49Gf+CkKEUOlH6PAFOrHoE0FHx0fqXfo7CHZTvxjDBgwwKZxg6Inn3wybLXVVjVKDKJtkSqkXCUzECUbs8IKK6RzifTgmlxiOFmJ8d5779kHzM9InBPwY/BT0JDKJQAtAWsDqbDqqqumSyG0a9fORqT1/VkGSYwSpBg2BsMSUxKTgI+R5O0l4NZbb7VOn7MdP5977rnhu+++S5em8Nxzz6VzCW+99VY6N3XiH5JroK0F+HXRSMrxthRZ+CHA238jSZznn38+PPPMM5bi6/RnsPjii9syahP3y4i0FAicm59niy22sPX/Bf0YBUbGd8NAP0aB0Y/RMKj2Y1RIMSOyxapRYfeFIUPShf9Ihf0VfvghmX/7bQy6ZJ7hCd58M5lP0Y8hphvVfoxWrdAVQ/DKiPffR/9M5gGVMa3lC4yA5eNrvPhiCL16hTBsWAgjRoQwahT6Z7I/8MNdcUUIca3gLbckUyoRTj8dwzGZhwqVOWy7bTKfoh9DTDeq/Rg77ECb3Ck/xssvUzORzIN/zOAftM9//DFOlin7kud2GPP9+yfzToXtZCCdfAzGtm2TKdsedVQyn6IfQ0w3cv4Y4CX3gQeGcMwxyTzw4c8+ewh9+oSwzDIh3H13CF98kWxPTdyTTyY/huedcALdsSTzTz8dwmuvpQeqIKp5C927J9O0wiG8914IzzyTzKfoxxDTjaIY3zl8QjXy4YfpTEpU05dFP4aYbhTlx3BjusCUxY/RqVMnC69QKt80osJILsqPUSTK4scQYnqjH0OIHDSYHyPu7pIAwSx0cEwCeuEj3CHOEyKmrH8M4os8tuluqvIq+OKLL8Ltt99uem1N0Dky0Pse0Du5EDFl9WMQ3BYPkPLzzz+nc8mPQXQpkaZErxJQF0eWnn/++elcCLvssosF4t1zzz2hrTt5Iq688soqA8WIxkfZ/Bi0JPNI2FzEoeFOrCYdeeSR6dwUiUGX/bSD8EZCzk477WTtGkTjpcHYGHfddVc6N4U4PJsGPbSjoEWa/xgepi1ElgbzYwhRSMrqx/AWdbD33nuH3XffPVx88cW2zPgUBx98sDUuOvPMM63tNSMkPfTQQ7beoXUayVWvQw45xPaDs88+26Zw4IEHWiu6K664InTv3t2aq/p27E+z0csvvzwst9xyoUOHDuGGG26wdex33nnn2TzbHXbYYTaPKogE88qC+BoYbcmb2IrSoKx+DNomx1ArdbpHXKYw2hFj02FgM9oQw3dheHuLOzpCyHZ37yrXMMKYU2ixxj4XXHCB/QCMa4dRDrTLvuOOO2wd29G2mx/ommuusfUMAAPZc9FWPBfvvvuuXSeDyojSoGx/DD5MQkVOPPFEW77ooousFAdKbzoMWIaIzAo+/PDDSt8GPoy4S5jjjz/eRkCCt99+O7yZNlhhTDvmOS6jG/GTHH744bbutttusxqwUaNGhZtvvtm2i6VNPmDvODQdFaVF2dsYPmQWpT6JZUp0VBZXW+L5LFT/ehVwrn04FrVb8br4XCTyXeqIhkHZ/BioJQ888EC6lAQWHnvssdYxAT1jAD14wJAhQ6zxP/o+xB8tEoIhiZ2TTjrJEqAWubqD/r///vvbPDD4I13ZuF3AOj8+QwUTAfzEE0/YMioWnQjQGcGee+5p4+bF1cWi9CkriRE76fj46BwMFQadHt8DHz2g3rCejxe1iVLf1Sc6MCN56b/55puHzTbbzOaxBzbZZBObh9j5h6EP/mPEx8TQp6sc/0Hd+HYHIz/wEUccYfOiPCibH8N71/OP0Utg9HM6RKM/Jy+x99prLxtQvhVtiivgR8CABmyFuOsbuqVx24GPnR/FOeCAA9K5xIYh5YIB6JE6DEAPdE9DbVjsYNSPUV6UvY0hRDFoFD8GtVT03Ofgu4hx3wZQsrvvgdoo1LWTTz7ZluNOx+iwjeMyXjdT4rO81ozeBAH/BtXH2Bvff/+95Z1wwglh7rnnNpUNbz1qmPelyzVQW4VdQi0a18k1APaM18ABUs4lXVwjxvXjc+natWsYNGiQVVkzvrfjfeuOHDnS1D9fZjvum645CarkOB6Lxtjl+GvAnxXr6FAOm8zVy4YkFRuNxODF8zJ5idkaJD4+77UPtcz7qiUPX0n84eFAhEsuucQCFnE0nnPOOfaB9OzZ09a5P4NB5bt162YOwjvvvNPygLAU4rM4D/bNx/R4kYJT0QfTRxXjmqkaxo56mV40KuD6CbP3n4aKBn4+4KdCpaM6+7jjjrO+dP3H4HjHHHOM/aQrrbSSfdheCPAjcj1EJ1OIcH+cG/WSZ8bU4fzkvfPOO6aWes0gfQFnfU3lSqP4MQhDj/Efw0u6mvCfBQ+646UopT3777bbbiYF+GHiHwPjnv34UKkF8/gsPmDOH/sx/PrcJiG40T9EOprOBT+r20D8NO6n6dKli81zPVwX1+A/Bj8KPwI1eh5F4E5F+qp1G4oPnf1iCVUTXB+ee3jllVds2hCQjSFEDhrFj0E4h6shEPsxUCvOOOOMytouSlCWUTlw/DVp0sTygVKfHtPBS2P3dWADYIvQnT46PutQkwj3wAbBlwGEjsw222xmf4BLIGAoghhq1jguqhpV0FzXOuusE1588UWTFn4twHpqwrxHFdSa+NiibjQaicHPgIEc68qArk71rv8Yw4cPtw+PKSqP68+A3g2oUHz4Mdgw+CtQUbxq2OFn8eMQnkJbD64FR6Qb9pBV7eh5vE+fPubEpJcNbILXXnvNQlBef/11u8YYjGbUOX4I1CAMfzFtNIofwz9ohw8f/ENk8BRa8wEfbNxKkOhd379v376hf9T1IzVSzmWXXRbOOuus0K9fP1tmHvhY43YfRP4SMOiRuujoXrIzH9sUPm4FgZLYHUOHDrVlpBCG/Q7ek18F7ptxO0f8N2RjlCj5tiD0QWNEYWkUPwY6uodrQDZU/brrrquUHnSQ4LVE1ByhguE5h1tuuSXcdNNNNu8QxUuVKLVArkJ5GxFApUE64PcYM2aMHdulyvXXX1+pYjGPjeA1YXEHDZwDm8HPzbbe2QO2BtKOUHtAxcOecr9JPBIT527durUdC+889436J6rTaCQGHzftJvgQPTw9JqvfAyrWtttua8Y7EHbCR024OYYwPwI/DsY1PxvLjHcXg+1BlS3t1d944w3b3z967Jvsed034SMqAedABfProKoWtQ746fGVxG1JuAZ+MH40Oolw9QoVkp8d28YHoRS5aRQ/htcAxfABusFNTVGuH8NxSYB/go8OoxxHHMYtHy15zPuglDH8QBDHTXncVxxxixcbx5r/GOA/GVKMnxSnHfhPgEMO7zYeea+h4kfEX8FPx0/NvrQtAaQDP2VcieASSlRFNoYQOSibH4NmpbH4p6QmvJt4IvRp4nRcvUBlYB71hdI5jorFtxC3mEPf/t///mfzVI0KAWUlMQi/cKjXp/0FqgPh5vw03vEB6gv6OG0yiBvKdpOD2oOaQu/bcfWo/0DYDPxQqDW+3qc42K6++uqw4IILmhNQNEzK5segzh6jOVs9ST5D9BKng/cXNt100yqdEOBg89ig+OcCjFw3dDFw3dbI1WsHPyPHBqY1tc8Q5Y9sDCFyoB9DiBzoxxAiB/oxhMiBfgwhcqAfQ4gc6McQIgf6MYTIgX4MIXKgH0OIHOjHEEIIkTcSGkIIIfJGQkMIIUTeSGgIIYTIGwkNIYQQeSOhIYQQIm8kNIQQQuSNhIYQQoi8kdAQQgiRNxIaQggh8kZCQwghRN5IaAghhMgbCY1iwSD16YCaQgjRUJDQKBZPPx3CbLMl8x9+GMILLzCwfQh33RXCN98k+aNHh/DKKyF8+WUIPXqEMHZsks/27A+Mqfn44yFMnhzCc89VvLGKV3bLLYlQEkKI6YyERrGIhcYFF4TQvHkyP2JEUvBPnBjCsceGsOiiST6Qf911IVx4YQhzz53k/f13kv/GG8ky82+/ncwXmBlnnNFGSb/22mvD3nvvneYmfFghyHbeeed0KT+aNWuWzgkhGgoSGsUiKzRWXDGZHzMmKfixFBAazL/2WmKBuEC4/fZk/vXXQzj//GQ+FhpYJQidIrHIIotUGEAVFlDEiAphd9ppp9n8qaeeGvr162fzSy21VPjrr7/CcccdZ8s//vhjmH/++W2+devWNoUZuO4Un2eKMIINNtigwvCqsLwqmK3iuf1dISybNGliy127dg0ffPCBzQsh6pcpf7KY/iA0KgrLUuGggw4K5557brpUleeeey6cfPLJNn9sxXU/9dRTNo8AoYCnoIdJkyaFOeaYw+ZzCY2PP/44zDLLLDYfC5I111yzUoAcdthh4Y8//qg833nnnRduvfVWmxdC1C8SGvXJhAmo5umCEEKUPhIaQggh8kZCQwghRN5IaAghhMgbCQ0hhBB5I6FRRhCZ1LJly3Spfthjjz3SuRC23HJLm06ePDmss846Np/l7bffDhcQclzBSy+9ZG1A/v3337DNNtuEww8/3PLrg5lmmimMHz8+XRJC5IuERhlyyimnWFsICt/64KyzzgotWrRIlxJqa/hHg0HCawcNGmTLCD/o379/uPrqq21+ejHzzDOH559/Pl0SQtQVCY0ypkOHDmHeeeedrsLj559/Nqvik08+Cfvvv3+aW7vQoA0G0GgQ/vnnn8oGgNMLCQshCoOERgPgpJNOCnPPPbcVxqXAAw88kM7VLwhTqqEkLIQoHBIaDYgTTzzRut4oFeFRX3D/EhZCFAcJjQbI8ccfb1150L1HY4I+sOh0cfjw4WmOEKLQSGg0YBAes88+uxWmDRn6qcLRPnTo0DRHCFEsJDQaAQiPWWedtcEJj99++82ExZAhQ9IcIUSxkdBoRLjw+PPPP9Oc8uSXX34xYTF48OA0RwgxvZDQaISUq+UxceJEExZP+6iGQojpjoRGgaBV9IQJE8om0d7i4IMPLgvhwfUiLHr16mXXnb0XJaVsorV/Y48iLBYSGgXigAMOCD179kyXhBD1ycILL2w9EYjCI6FRIBAa9913X7okhKhPFlpoIQmNIiGhUSAkNIQoHSQ0ioeERoGQ0BCidJDQKB4SGgVCQkOI0kFCo3hIaBQICQ0hSgcJjeIhoVEgJDSEKB0kNIqHhEaBkNAQonSQ0CgeEhoFQkKjYUODsXiwK7pioWV9x44d05zS45577gkPP/xwutS4kNAoHhIaBaLchQY/Ga2un332WVumW3WW47za8G1JK664Ypo7hXvvvbdyPd2Xlzp0r+7Xy6h/jGm+/vrr2/JTTz1V2VkivQgXihdffDE0a9Ys7L777mnOf4PxVU499dR0qXEhoVE8JDQKREMSGowBzvzSSy+drp06XrjedtttNs8xnPfff9/y9thjD5tmhcb9998fmjZtautmmWWWcPjhh6drQvj666/DhhtuaPmsp9uTo446Kl0bwogRI2zgqdVXXz1cd911YbbZZrPtll122cqxyOGuu+4KLVu2tHUkWgzXNEzuGWecYdvwTGoiKzR+/PHHMNdcc4Ull1zSluGnn36y64nzGCZ3jTXWqLwO7geOOOII29/zGMb37rvvtnV0ncIz8H3atGljz9Rh6Fyefd++fW0clU022cTy55lnHsv3bmLYjmOzHfMcCyH17bff2npnp512qjzXVlttFZZYYgl7P2PGjEm3KH0kNIqHhEaBaChCgwKO6XLLLZeuqQoFUJwc9nFh4MegwKf/H+Yp6Ng+3g4o2MiLe95lGUEAFM4xdNXCehccL7/8si2T/HoeffRRW0YwwBtvvGHLFMTxNdcEBS3bxwVzlqzQ+OGHH2x5gQUWsGXI5nmHi9x/tqAGqpJYv9dee6U5Idx6662Wd+aZZ6Y5wawH8gYOHGjLHI9lto0hj+T37NtxXY5v48/ft4kH8PJt3nnnnTSn9JHQKB4SGgWiIVkan3/+eWVB8eqrr6ZbJFVWDHQ0bNgwS1SnOGzrwsAHRaLQ3m+//Srz0fzj7byLc4Zm7datW7jhhhsq01VXXWXbuIbN8ujRo62gZHmfffax9S40Ym2eApk8NHcH4YUVxFjqrCNlBZKzww472Hq08ZrIZWmwXJvQcPr37x9WW201W0fywaNcaGCROe3bt7e8Aw88sMrzIXkh7gU9zzPGj58VGrwfxy0zhMbrr79u81grMS7YJTQESGgUiIbm06DqxqtzFllkEcurDbZzYQAffPCB5ZG8UMwKDXjwwQctj2qTzTbbzKpDWrdubQU8uNWy5pprhk033dSsBZbrIjQQNBxzl112saoXtyRqEhrAu2QbTwhAClPm6Zo9KzSAeyCPQna99darXHah8d1339ky/pGDDjrIqoZY9kGkvBqPtPbaa9uzAa+6w/rbfvvt7dicwymU0AB/51QHch7muUemb7/9tm1TDkhoFA8JjQJR7kJDiJpAYMSCvhyQ0CgeEhoFQkJDNATcGR8nquvKDQmN4iGhUSAkNIQoHSQ0ioeERoGQ0BCidJDQKB4SGgVCQkOI0kFCo3hIaBQICQ0hSgcJjeIhoVEgJDSEKB0kNIqHhEaBkNAQonSQ0CgeEhoFQkJDiNJBQqN4SGgUCAkNIUoHCY3iIaFRICQ0hCgdJDSKh4RGgShFoTFu3Lh0LulEkL6J3n333bDWWmtZ30Yk+lAaO3asXb9DR3v087Tvvvtav0VsQ/fYWTbffPPK42y00UaWF3fOt91224U999wzHHPMMeHjjz+2cSL23ntv6/02hsGC6KSPbZledtll1j35k08+aevpbJDrobNB+n0CtqP32kJCf1s8s/Hjx6c5wa6Z6+K6f//997DzzjuH888/P7z55pu2nn0YiImxNrxvJp5HPsTnKSR0iOjX7YnnRX9gWbbeeuvKd+iJrtt55sB9rbPOOmHddde1d8z9UiA7PBPeFZ0t8p3QHXyu8VSmNxIaxUNCo0CUotD49ddfrQdZeqSlw78s9CeEUPjqq6+s4zq2oVCgQz+gV1s6rqNgoMDO4gM1QTzt0KGDzYP3ZMv+dLZHZ325fmbOD3TyB0ceeWR4/PHHbZ6ODN977z2bX2mllcI333xjx5pzzjkrO0PMh7ijvlxQ+HH/wHNzyOMeeA4bb7yxdU3OOB6wzDLL2BQQjm+99VbOcUg4BtfKuB4UrvFzZnwL4Poee+wxm//000/DjTfeaAU9z4sOF6+//vrw2Wef2XNnO/Zjnv0YGIo8eiju16+fCbddd921MjG+CJ075gMdFLrQ8Hvn/Mx7V/cOygD3QfcjCFwUjubNm6dr6w8JjeIhoVEgSrV6CguBApnCBOhiHM0zHr8CobHjjjumSyE88MADVnDQuyoDAE2ePLma0GBsCHp+pYCkZ9QWLVrYMbwgdCio2OaSSy4xrbYuQgONGSisuN4vvviicr5Lly7ho48+svVZKETpBZd34omuxemlNu52vCZeeeWVKvdLIY8FhRDG4kG4uNBgHb3VIlyPPvpoy8slNPy+HBcaTH3cDyyPVq1a2TzH23///SutF7pSR5AAhfbll18errjiCptnv8UXX9zWAYNR8U5WWWWVsPLKK9uU1Lt373SLKYwaNcrOgXDiW6H7c5a///57Wz9y5MgqFhHX26dPn3Qp6RWX7vMfeughE6gIjUUXXTRdW39IaBQPCY0CUYpCg4KYsa0BzdNHgiMfbZ0xIDxRrQCMqXDuuedWpnPOOccGUKJL8iwIAjRnBEicwDXSiy++2JYpnLbccsucQsNH+0NT59roHp1l17pzwXHisT4KwQorrGBChTE8EHBAYY/AAawmqmhOOeUUExpYaQyOFD8vxg/h2rPwbWAlMaIhBTm4NUOBT5UXzweLAHgOCO/FFlvMnh1Vhe3atbNCnZENqX7CguB6eeZ+TKCr9QEDBoTBgwfbtSB8WfbqvhgEHsKIbwCh6N+Dc+ihh1bpEj22NBCY3DPVlCgiZ599tgk3rrm+kdAoHhIaBaJULQ0hGiMSGsVDQqNASGgIUTpIaBQPCY0CIaEhROkwVaFBNSDVbGkQgkHk36mnpgt5MmBAcpyK8wUCAJh/5JF0ZQ38+ivj+FK/l2bUEcaX3357IlHSjBxwPW+9FQLVlnHkI75Drpnq1wsvTDMr4N4vuyyZJ/qvadNkPgcSGgVCQkOI0iEvobHzziGsvnoI66yT5FFwduyYzHfqlAiA9dcnzJDIjCQ/hvDvKMrOwIfowohjuX+HIAbyieBr2TKZb9MmhOHDQ5hvvuTcK6yAkwknV7IPwSm77JLMczz2IYwegcA8kXtpVGEVuOZu3dKFCpZbjhj1EBZcMBEYDgELPs4/Qzrff38yv/feIayxRjKfg4ozi0IgoSFE6ZCX0PARCQkQoRBmTHkXGgiK2BJgfRai97wgdzgned99F8Lpp08RGp99NuUYvp+HgM8//xSh9MMPybrXXkuEGgkICvH9ff7fJDy8Ggie559PF1IWXjiE7t3ThZRVVgnhnHOSecbcf+edZP7WW0O44IJkPgfpVYj/ioSGEKXDVIUGhfFxx6ULKauuSkx6Mj92bGKFECrN9Kuvkvwsf/2VWASEaBNhWFEOVPLLLyG0bRvCHHOEcPbZiaByaAw7yywhDBuWWBnrrZdUCVHgP/BAsg1h8ksumRyb/Vu3TvKBAh/BkcvSQFhtuGG6kLLZZiFEodIG+yOkAGvEq6uo+mK5BiQ0CoSEhhClQ1k5wrFwqBIqJFQ7eXVTLi65ZJp9KhIaBUJCQ4jSoayERpkhoVEgaMS2XoWJme3HR0lJqX7Sd/gVRMGR0BBCCJE3EhpCCCHyRkKjnqB/Izr4ox8f+oXaZJNNrDM4ejLtFsdYR9Ar7dTqaakmw79COvjgg+34sMUWW1R2Qjdp0iTrP4muy+nYj15b6S0VOIcQQtSEhEaRoXtvOnHLQk+qdJBHIY8AYUwJuOWWW6wDu/bt21tng3T+RqeBXj9LgX/DDTeEtm3bhpdeesnyaoOeXWHbbbe1HkhjODa9lMIzzzxjY1XkEhr0GrvUUktZZ4ZCiMaNhEYRoKdTxnuoDQZDmn/++a23UroV97Ejrr322nDHHXfYPGNIIEzonTabiNSqaXwEuhV///33bd676UZoMNYDwodeS309IIhIEI8PkQt6dqW3Vno0FUI0PiQ0igwFNN14Z/nkk09MuGBRxImoD7c6gAJ6kUUWqZIY5IZqp5oGFYqFhrPDDjtUsTSwZuhmmyox59tvv60y+JDDeej+2sdzEEI0XiQ0SoyuXbtWae+xyy672HCb2cRIavFASg7jYjAeAgKCxGhq+CuuueaaSmsCyMNaYGhPBltafvnlbUhT93sIIUQuJDSEEELkjYRGESDCCafx/bU146/giCOOsClRTgy5WhMM6+kwpKbDSHeAZcL8BRdcYCO0UeWEoz0XRGz5+M8Ox2QI06yPhLG5nYEDB6ZzwYaAPe+882wI2UMOOcRG8INOnTrZSG5XX3116Ny5s53L8egsquC6px2nMVodMKwpPPLII2GnnXayIVXhsssuqxxREKjSe+KJJ9KlqvizAO4P3w1WE2NnM8/43jH4nNjOAwUYtW78+PE2BjfnzFbvCSESJDSKBE7sXEKDApfxmOOqpbhApsBisH78C0RWUVCSYOzYsSYQKOwohGNB87///c8KbwpaEuNpA2NDM540DnbyL7rookqhwTkQbm+++aYVusx7hFRclQUevUXh6lAQc6wxY8bYMteJg9/Thx9+aPmAMMO3Q5gvQ5eCCw2GDcVv4j4ano9z880323CzjF/N8KcOw5xyX4x5zn0RdeYgzAgwYHhWrhEB93ym10+EJLjQIFLNnzND2CI0ss9ACCGhUTQoNO+9916bR3hMnjzZ5nPBuNExHTt2NOHiyQvqK6+80gpvCjgSForDeNouUFwoAJbAQQcdZGNdo+XTfiO+FoSIgyDDOQ8IAs7r435jBQDH5niejzaPhQOMXQ0INFJ8HRz7qKOOynlfCAa25XhcW5cuXSz/uuuus3PFiSABfDYIHsavRqDwHPbaay8TPnDnnXfauN5sj7XD9LTTTrN1Rx99dJUAAhcatFVhjO4111zTnpXfkxCiKhIaQggh8kZCowh4nTzVHFlo50BVC5owrcDBLQ2qcGiv4ZFPPXv2DMMZ2SvFtXCv50fbRlvHwsCvceKJJ9o5OZ5r3Wjhxx9/vPkYsGC22mory3dWXXVV8yl4evTRRy2f82NFEH1FuC1VWFmItnIIBXawegALYrfddrP5muCaHarpHnroIZvHLxRX23GPNGr0KiRazp900kk277iPyKdCiMIjoVEknnzyyfDqq6+mS1OgGoY6dgr/WxkhqwJvKHfccceZ8xk/BNUkzONUdihEETQUxkB1k0OhmouPPvqoip+BthkOQolz0MUIjfoQDu78pr0G10p3I+TTMNChagkBRBUUQo3tXGgg0LgWEgLSfSvAdgghqqHwRVBN5CHBrKPaycEXwXYO/hs/Lvn4KFwwAudC6IKEhhDFQ0KjCKy++uph6623DhsxOlcFFM6xH4F6dQpXCkISDu5cIHhiSyML/gkHDd2PhwN3yJAhlt+sWTMrkPEpeIrBQY61wvUhDKj/B4TNBhtsYPNYJ7GlwXYIF7ogYRuuY0lGGMsBXZ3EAg0/BMLBr5UEnB9BgoVBIvpr0KBBtg4QFAgFfBPt2rUzgey+iUsvvdSmDr4TIURxkNAQtUKBj9CJU02wLVaDw7YuMGLBkSXex+f9vLUR7+dWB/t5qgnfL76nqe0D8fmyxNfKcbB8cl1/fD6f5iLeN94e4n1yPbssuc4RhyBn9+M8cfL9/RrivCysqytxYILv7+eJl0VpIKHRgHAfwMorr2zTmG+++cZ+9JqsGjordNjOrZhjjz3W2i9gHZDcnwLk41sALAWsANfyvf+sUaNGWdVWr169bJmorKyvx/0eJ5xwgk0vvPBCixTDQvMIK6BVu9OjR4/KiCgKHe6PbfHhcJ1x2K5D63qI/TojR44MX3/9tc3jL6HqMFsg0tYDPLIMCFfGauM5YXnhbwGWsYo8cS8Ox6W6juvwPsWIgnOIAPMCnJ4BHApMQqKx9oiG83NRRQhEmXHPDAIWgxW37rrrmp+K6/CwaY7H+/aqw8MOO8zupybirmWwAOkaBzwCjndP1WqMt73h3XBeT3yjfDfAdfAOeWYcl+dJJ568Ow+Jdohu8/Y9on6R0Ghg0DGhF7S0XaD6ybVwfuCahMZNN91kAoBqNabPPvusFWALLLCAVXF5WnDBBSv9G7HQ4Byx0MD5DrQ5oXCMhQYFfowLDS9gcXDjy5ma0PCChW5V4PTTT7ewXwrrXEIDQQRxX2AU2lTFUYDSviOr0VLQe9VbLDTwDwEFPcfgWVMtSB9fVJ/RKzBtRVj2whntnsIZIUXPxkwRdsB5WYdQJiFsHY5LgUshTYAC1wkuZHlOXE8sNHhHNLSkLQztd5jyvLCA2P7MM8+0NjOcF0FEvkMe3wrXyJQEPIv4ffB9+H68f9rOLLvsspXXBTTG5Pz01Iwfj+uIFQ/a8tCnGdfPPSP8HN4jz45vNtvORtQfEhoNCBzWe+65Z4097KLJUejA7bffXllFwQ+Ln4VEQcIUCwNOPvlkK4DQyD05FBQUmiQ0RbR0LzAoeCgwvQFf7969bUrhxzWS0DBZ9sJ49OjRFs0FCKOHH364ii+IQpiEwEAIURABBRcRa1gKaOTcI4LA8XYfTtxSnevjnggQYDsvIMEFn0PhCu6H6dChg01x5ntQA8+Owu+NN96w4zJ1KGS5fgQaiXP688KaQVDzHElx2x0K/9dee83y2cetOK4VJcEtTN5/bXir+dtuuy0MHTq08n3SoDMWlvRF5us8Ac+5X79+di2k5557zoQIjSjpJNO/IZJbHrwjlhm3hXtFqeC9OlhCCE4EKMkFChF4NObkWXLPbvmK+kdCQ5gG6U5yr5qJ67nj9dntmKfAYQouiHx9ruNNDS/AmLJ/vvtSeMVCxqGwi6/TrxX8+pya7iv2mWAtMfX1rm37tn4+T3798fZ+X0wRclgRuWAf3x6YxvN1ea5CFAIJjQbE4MGDbbrccsvZNMbrv5l6AZiF6hE0fwoqtOhY646Ju0hnG8YDcaiiADRNrAFaVtOfk3cBQkGHlkr0EwktMhsii9YKrm2j3QJ16b4f2q93R0IdP1FmnIdrIyTZq3BgxIgRdiwsJqpohg0blq5JqnbiCLW4bp+6fK6Xe2HAKvCQZGjSpIlV0WEB4G+I/RcxWYuFbk/gxRdfrKyai/sXA4QflgFtYeJqOqwx72mAaiv6+1J3J2J6IqHRwKC+3rV9Chscrr5MAZNLo0XTRZulbp5CkOoWr2t3KDxZR+HFPIIF4gIYqPYAtqVwxI9AH1Huf+A8CBYKSRywhMt6gz6HdiPgQqNv3742pXqD/ag649juEEYwUbBT7YQgYX3s2OeeqSufd9557V6pK+cegEIYYUJ1EVMvnNmHZfqgYsrIhkAXIw5aPgKQ0GgEkzuIgf1pUOn7cT6eL8LAhS7CxqHvsBiCBRC0hEDzHtmPcznU9WNZ9e/fP80RYvogodGAQEPHQsjVToEW6Kyj4MU6oC+pmiwJBIIX/oDWTpsJWqt78rpnCjWiczhenz59zAkMdNjIdggVEvXfFHL4HajnpqDnmryho8N+QGGNgOBacKYihHDesh/CBl8Ejm+sJnwcRNZQgHJO6sNj7ZtGkAge7oF1RC+50MDhjtAiUdfugQL4Lzyf5M72Bx98sLJVOsfhuJyT83u7EiyL+Hl5o0N8R2yHoORcCAXWIRBia8QtK+AZ8Fy92o3oML9PkgbGEtMbCQ2RE9e4ESwUzNTFM81WbfkyhXC8Di2cZaycXPu5pQLxufw8vn+MF5zg1hP4sbx+3wWGH4PkQgJLJxce+urbgc/H1woeoguxcGJ7hCndr2TP4/fFdbMdyzHcG9ZQfH6HKkM/Hvtnrye77Pi9+zv0Z5brHELki4RGA8Lr5nO1zvYwV7R8CpCaQJOlUPHIIKptCCdFI0fLdwuEun+0fyDUFisibplNVQ3VR0RpEUJLxI8XfIRZ0qOsE483Tnw/BTh+CR87BP8Flgq434C2CQ5jcNCavG3btrYcd68CtHnwsFcPZcXKiYmrncCjzCjcXaBQOPt1UyUFWBtYbwgorCaivm688Uarbso1CiLtDXi+WA/A9RCSHBP3x4XFxfPnmDw/nqPfO+fy/rd4Xx7+7NAfGVYnVg/VeT4WCeek52VPVB3KLyLyRUKjgUG4rRfOOMYp3GKtlmqkmiJ1CItlXxcaFJKM6UFBjWBgil/EC2CqeqhaQoBQQMZCAyHCeSnsEFIc04UVQoAwUUJMmXrdPvtQXcM1E4LqoZlU1/g2hABDLDT8eN4Iz53xaNhcF9simKjW4RxA3D9CiSoeCvxYaBCC6uG8sdAAF750j8Kz4Lzsz7kopKlyIrEd2zgcB98Iz5N2C+7r4JngP+IcWFxYKrElA926dats54LD3S0zryZz4rYp3CeCD6HD/VDFSKgw74EqMkJYacPDO6MKjOsXIh8kNBoQWAMU0nEnhw6FAlqqO66xGGJh4o3VgIIFJy7QRoC4frRdkrdHIOyUc5EoPHHcst7BaYu2zXoKJwptr0bBD4JzHO2XbdzhTQHMscgnxe0pEICxszhuHYxVglXj14jG7YIT4ccxEQJYAl4g05iRApPrY54OGyEu6BGKHCcWsggtClkc3/hyuA+gsMcPQgHvKQ4SILgAvwXPlevBH5OF9gxTg/t2K4g2HzR4xMriGWMlxuADYj33yDVj9QGWDeOo8C5Zx/OLB8wSojYkNOqJ+Cd1TZYCimoCNEk0Rea98APqvcnz5Jo72xARxD6uhcZ1/nHhla0y8WO5AImrYmLc8S2EaNxIaBQBr6P2KoVcULfteLQT7Q/QDtFgCZVFO6cbjppYY4010rlg9dbevgE4FhANhLbpmnnsS3DQwGnjgNYc16eDC6G4Cw0sEapksCYQLi6UEDh0NYJVEFfp+HYIOSwahBz16lnBJIQofSQ0igjOW9oBZMEhSZ02hSgRMy40KGixFmjwRqFKgY0VkCXbORzEbQTAu9Gg6gJfhHfDkB2ECX8Bfg5PHv5Kgc5YGm4RxUKDunEKfq6XEFuqYnyMD28Eh7/CIVw1FqA+SFU81KwQojyQ0CgCCAOEghfkaPFo5kA1EA5Z4vuZ4ryljhsojGmDgLZPYuwJr78G8tgWy4BEvbhXQyEUaNRGYju3NHAq41BmyjV5/0kOBThObk8egYNgQ6BgwRDp4y3KgWoyrA1AyOGL8P6YaDfglgeCA+FH2wtfDx6BFftRhBDlgYSGEEKIvJHQEEIIkTcSGkIIIfJGQkMIIUTeSGgIIYTIGwkNIYQQeSOhIYQQIm8kNIQQQuSNhIYQQoi8kdAQQgiRNxIaQggh8kZCQwghRN5IaAghhMgbCQ0hhBB5I6EhhBAibyQ0hBBC5I2EhhBCiLyR0BBCCJE3EhpCCCHyRkJDCCFE3khoCCGEyBsJDSGEEHkjoSGEECJvJDSEEELkjYSGEEKIvJHQEEIIkTcSGkIIIfJGQkMIIUTeSGgIIYTIGwkNIYQQeSOhIYQQIm8kNIQQQuSNhIYQQoi8kdAQQgiRNxIaxaJnzxDeeitdEEKIhoGERrGYoeLRnnhiulAgHnkkhCOPTBeEEGL6I6FRLBAaHTqE8PffITz2WAg//xzCww+HMGhQsv7PP5P8X38NoVevKfkwYEAIY8cm82++GcLLL4fw+echbLBBCG3ahPDEE8k6IYSYzkhoFAsXGhMnJvPLLx/Cttsm81dfHcK4ccl8q1YhtGuXzB911JR9778/md9nnxDWXz+p7pp33hCaNAlh2WWTdQXm5JNPDltvvXV47rnn0pwpPPnkk2HHHXdMl/Ljn3/+SeeEEA2FitJJFAUKfoTGpEnJfLduSf4664Swyy5ThIkLh+uuS5aBKUICEBobbpjM77lnCOuum8wXmGOPPTZccskl4Ysvvqg4fXodEeT9/vvv6dLUefvttysMowrLSAjRoKheOojCQMEbC42hQ5P8bbYJYfvtp+S/9lqSf9ddyTIwffDBZD4WGrvtlgidIvDAAw+Ef//91+bnxaKJuPTSSysuaYawxBJL2PKdd95p86uvvnr4+eefw99//x122mmnsPTSS1cYTRVWUwXzzDNPmH322Stk3LoVtz403HbbbZb/6KOPhrvvvjv07ds3HH300bYdnHvuuWHxxRcPW265Zfjzzz/De++9F9q0aROWWWaZ8Mwzz9g2Qoj6Jy2lRMGh4M9HaOy+ewhffx3CCiuEsMUWyTZNmyZWxVdfhbDAAlOExr77hjDnnCF88EGyXASefvrpsNBCC6VLCQgTtz5+/fXXikuouIYKvvnmm7DccsuFH374IVx77bWWd9RRR4XevXuH0aNHV1x2ct0PVgjATp062fztt98errjiigoZeVfYfPPNLQ+rZL311rN5BMpZZ50VDjvsMBMwv/32WzjvvPNsnRCi/pHQKBaLLRYqSsfE0d269RSLokK7DsceO6V66uSTk+kqq4Tw/ffJNv36hTDXXCG0bRvC8ceH0L59kv/xxyE0b55sXwQ++eSTMOOMM6ZLU8A34UJj3LhxYZZZZgmbbLJJ2HjjjcNmm20Wfvrpp7DoootawY91cOutt4ZRo0aZlQFYMRdeeKHNu9Do3r176NKli+UhZJpX3BfHRNCcdNJJFfLyKztes2bNQteuXW07IUT9I6FRX4wfnxT+I0akGfULGn0ugQGx0Pjll19CE5zxFYyvuAesgP/973/h5ptvtrzOnTtXExpPPfVUhdxLBN/OO+9cKTTwocDIkSNNYMCwYcPC448/Hi677LIKgyqxqPzcQoj6R39jfVGhnZvQeOWVNKN+2XPPPSuMo8VCq1atLOFTcBAaWBIOmj/btmjRwhznWB8tW7YMK6ywQthrr73CjTfeaEKI5XVSHwwWyJJLLmm+CwQMVVY33HCDrQMitzjGiiuuWGGETQzvvvuu+TgWXnhhs06EEKWBhEZ9gdMZnwXtNYQQokyQ0BBCCJE3EhpCCCHyRkJDCCFE3khoCCGEyBsJDSGEEHkjoSGEECJvJDSEEELkjYSGEEKIvJHQEEIIkTcSGmXE4YcfHh5hyNd6gr6mDjroIJv//vvvresPuOeee8L29Nybg23o1Tcl7l+KrkreZFTCemD//fcPK6+8crokhKgLEhplBP05zTbbbKFPnz5pzvTHhcCy6eiBX3/9tY30R99SuUAw0M05nRZeddVV1o26d2S45ppr2nR6su+++1rX7xpVUIhpQ0KjzPjjjz9scCMGMaoPJk+eHGaeeWYbiMnB6nAhkgusk1VXXdXmKazpKRdqEjTFAoFBV+sSGEJMOxIaZQgj2yE4+jHuxnRm0qRJoWnTpuHKK69Mc6YuNHbddddq1UFUteUai7xYyMIQojBIaJQpDLGK4HjsscfSnOmDV0/hk3BqExqMyoelMWDAgMqBmLp16xbOPPNMm58eSGAIUTgkNMoYCkEEB4MWTQ/wX+CfgO+++67SN8G8C41vv/02rLTSSjYPiyyySDoXbKyM33//3QZVmnXWWafL4EoIDMbk8PHPhRD/DQmNBgCCo3///ulS/UKE1RNPPJEu1S/77LOPBIYQBUZCo4FQSoKjFJCFIURxkNBoQCA48B00dhAYcbWYEKJwSGg0MBAcAwcOTJcaH/vtt58EhhBFREKjAYLgKBW/wvSElt7NmzdPl4QQxUBCo4EyxxxzNCrBccABB4RFF100XRJCFAsJjQYMgmPQoEHpUsPlwAMPlMAQYjohodHAaeiCg4aDLVq0SJeEEMVGQqMR0FAFR/v27UPLli3TJSHE9EBCo5HQ0ATHwQcfXKUrEyHE9EFCoxEx55xzNgjBgcBYbLHF0iUhxPREQqORgeAo56iqQw45JLRu3TpdEkJMbyQ0GiHlKjgOPfTQsPjii6dLQoj6QEKjkYLgKKeW4/Suu8QSS6RLQoj6QkKjANApHr27TpgwoWwSXZQzAl859FXFgE209Ga421z3oqSUTb/++mv69YhCI6FRABhPgrEhmjRpYhp8uSRG4JtvvvnCCy+8kN5J6XH66aeHeeedN8w999w570FJKZtQhnbffff0CxKFRkKjADByHYWvEKL+Yfx8hhgWxUFCowAgNNCGhRD1z6OPPhp22223dEkUGgmNAiChIUTpIKFRXCQ0CoCEhhClg4RGcZHQKAASGkKUDhIaxUVCowBIaAhROkhoFBcJjQIgoSFE6SChUVwkNAqAhIYQpYOERnGR0CgAEhpClA4SGsVFQqMASGgIUTpIaBQXCY0CIKEhROkgoVFcJDQKgISGEKWDhEZxkdAoABIaQpQOEhrFRUKjAEhoCFE6SGgUFwmNAiChIUTpIKFRXCQ0CoCEhhClg4RGcZHQKAASGg2bf/75x0ZmjGGo3JNPPjn06tUrzSkt+CYvvfTSMGnSpDSn8SChUVwkNApAQxAaM800kyWHIWybNWtmefvuu2+am5sPPvggzDjjjLZtq1at0tyqnH/++bae7Tp27JjmljZXXnllWHjhhW1URk+rr766rbvgggts+ZhjjrHlQoGA+vPPP9OlaYfr5PoOPPDANKfxIKFRXCQ0CkBDEBpeKDqMy83yQgstlObUzPvvv1+5P+nrr79O10yBfAQG01NPPTXNLV3WWmutyvuhAN5xxx3DsssuG+aaay5bXwyhscMOO9gxv/322zRn2nnllVds9LpPPvkkzWk8SGgUFwmNAtDQhMYbb7xh84y1jMUxNVxoeEG7/fbbp2sSnn/+ectfZ511bFrqQgNBwHViGX322WdpboILxGIIDb4hjpmtChN1Q0KjuEhoFICGJDQQEkyxCt5+++10be240Nhyyy1t6tq4s+mmm9rxLr/8clsfC41ffvklbL755pZPWmqppcLrr7+erg3h4osvDi1atKiynuowZ/311w9zzz13ePXVV8Nqq61Wud1NN92UbpGA1o0QYB3TUaNGpWuq4xbRN998k+ZUJys09txzT/sG7rvvPlsG7nOeeeYJt9xyS5oTQocOHUKTJk0qr/P444+388w222yV52W8ee7Zue222+weWcd27OOcfvrptv3NN99swpptPvroo7DHHnuEpk2bhoceesi2Y5nt+vfvH3baaafKc2WrHvv162fWJeu49qFDh9pxqKosFyQ0iouERgFoSEJjhRVWsCn1+fniQqNdu3ZWODH/2GOP2bqffvrJlhEcF154oc3HQsMFQqdOncIDDzxghSLLv//+u61v27atbd+nT59w0UUX2ToKXWfFFVe0PNJJJ51kwsGXx40bZ9usueaatkwB37t3b3MQv/XWW7Yuy3fffWfbUqjWRlZoeIF955132jIce+yxlnfttdfaMk5zlpdffnkr2BAGCBGuc5999gmzzjqrrd99993DIYccYvt069bN8lZaaSW7dq/Cuuaaa2z9CSecYMtLLLFEmH322e158D622247y7/33nttu2233daW+U7333//cMopp9gyya2niRMnVgoTzn/aaadVbjPLLLPYNuWAhEZxkdAoAA1JaHjKVRf+999/h7/++qsy4bQFFxposO+9957Nr7rqqrburrvusuXBgwebM5x5FxoIApbbt29vy3D11Vdbnhe+f/zxh00dCkfWe72/C43u3bvbMrggcovEl9Gip8bw4cNt22IIDQQjywiFXPANsf7HH39Mc0KYc845LY9n77CMFQAuNEjxs6pJaGy99da2DKussorlIZhgww03tGWEr+PvSEJDOBIaBaAhCY2uXbvalCqcLM2bN6/cjtS3b1/Lj4UG+HpA+6V6A7JC44wzzrBlqk1atmxpaf7557c8ClgYMGCAOaJdC/fkvgYXGvhhHKwT8hBgcMMNN1Tuh9BhuSY4LtsVQ2hQbcQyiSq8uJoJEASsw9qBn3/+2Za5FqLSeD5M3RoAFxrHHXecLTs1CY34+o444gjLu/76623Zq83cygOvrpTQEI6ERgFoaI5wr6JafPHFbdl57bXXwrBhwyoTVU+QFRq33367LZ944ok2dUsiKzSoj2d55513toI8Ti+99FKlQ36ZZZYJL774Yvjhhx/CyiuvbHmff/65HcOFxptvvmnLkBUaQHUUVUDkk84999x0TXW8UK7N75EVGtwDy9y7c/TRR1ueCw3AijjrrLMsn4S272SFxoQJE2yZgASOGz+fq666yrZxodGlSxdbdmoSGj169LBlOPLIIy3PhYZbNRIaojYkNApAQxMaVDv5stet10ZWaPA8WHbHM3XlkBUaXsePMzvGq706d+5s6+OqJz9mXYRGHAFGtRbrSDVB1RrrER7PPPNMmpsc58Ybb7T5rNCgoR/LhOY6rrm70Iirj6huYh3JwdnMMs/TobAm7913301zEvyeCik0vOpvv/32s2XAUU6ehIZwJDQKQEMTGkAh5XmDBg1Kc3OTFRrgzuc2bdqkOdWFBnikDlFRVPGst956FikEOH5ZR+GLA5iqK9eG6yI0KIypr6ftyXLLLWfr0PZrgyoztiNxPQsuuKDN19ROg0gz357QY6y01q1b27ILDbYlwotCeYMNNrB1WE4Oz488GhS6IMWqII9qPqLMiFDDR+P+mUIKDaK4WCbheOfZ+vOW0BCOhEYBaAhCg0KMFEPBSF7WEsjyxRdf2DZUNzlULa277rrmCHfuuOMO246C0KGbC6pxXKNGeBBF5RAxxToKa5zptHBeY401Kh3hBxxwgLX/iMNwyUP4uGDBseta/6KLLmphsW7N1AYF7NJLL11ZkFJt9+CDD9o6v5c4ygzh4AUskVHXXXedXUfPnj1tPULQrQmqos4555wq3XwQReVhw0RYOY8//nhYbLHFLJ/nQHWft+UgiorriJ8zIJh5dy7wiZZi+YknnrBluOyyyyzPw3KBNjXeCh6riZBo5vEplQsSGsVFQqMANAShIUQu3DokZLpckNAoLhIaBUBCQzQUttpqK7NOqO7zQAZSOXV8KKFRXCQ0CoCEhmgouJCIU9xCvxyQ0CguEhoFQEJDNBTwlXz44YcWbpztd6tckNAoLhIaBUBCQ4jSQUKjuEhoFAAJDSFKBwmN4iKhUQAkNIQoHSQ0iouERgGQ0BCidJDQKC4SGgVAQkOI0kFCo7hIaBQACQ0hSgcJjeIioVEAJDSEKB0kNIqLhEYBkNAQonSQ0CguEhoFQEJDiNJBQqO4SGgUAAkNIUoHCY3iIqFRACQ0hCgdJDSKi4RGAZDQEKJ0kNAoLhIaBUBCQ4jSQUKjuEhoFAAJDSFKBwmN4iKhUQAkNIQoHSQ0iouERgGQ0BCidJDQKC4SGgVAQkOI0kFCo7hIaBQACQ3RmPnzzz/TudJAQqO4SGgUgFIUGpMmTQp//fVXuhTChAkTbLr55puHtdde29Kqq65qeYsuumj4+++/bR46deoU1l133fDSSy/Z8jbbbBP++OMPm3d++OGHsNZaa9lxmF533XVh0KBB4ZJLLrH17777bthzzz3t52WM6U033TR88MEHYeutt7b1DsfdYYcdwh577GHb77zzzuHrr78Oyy67bLpFCH369AlzzDFH2Hbbbe06ua811lgjXVs4fvnll/D555+H3377zZb/+ecfuyYS17Dyyitb/mKLLWZTYEjUDTfcMBx55JHh33//DV988UVYf/3107U18/vvv1sqBoccckjldXvafvvt07VT+PnnnyvfYZxuuukmW9+tW7ew3nrr2TZMP/744/D000+HSy+91NYjLBgedvbZZ7f3wvPaa6+97P3VJxIaxUVCowCUotB48803Q+vWrW1+7NixYZ555rH5mAUXXNCms846a6XQuOGGG8Lxxx9vBeCcc85peeuss041oRHz3HPPhdNOOy3069cvnH322WluwuWXX24F7korrRTGjBljhVKWnj17hnPOOSf06NEjXHTRRVb4zD///OnaYAIDbr/99nDuuefaer+3QoEgmnHGGcP5558fZpih6m/x7LPPhiuuuMKEK/hzmTx5cphppplsfvDgwSYsECLLL7+85dXGNddcE2688cZ0qXZ4bieddFK6NG1k76kmuFfehcN34PcIAwYMsHcABx10kL2nRRZZJDRt2jRcfPHFpmB8+eWXtr6+kNAoLhIaBaBUq6ewJEaOHGk/9I8//pjmJnTp0iVcdtllNj/bbLOFhRde2ApjLAymgMVBIVKT0Ojdu3c444wzTFhwvMcee8wsgDvuuCPdIphWOtdcc1mhVZPQePDBB03Y3H333XYczr/AAgvYOjT3zTbbzObfeuutsPfee9s8x2vWrJnN5wPHpACsiVdeeSUMGTLE5in4vvrqK5uHgQMHhiuvvLKa0MDaatGihc1jySGE2S+X0OD5d+3aNbzxxhu2jNC4+eab7f34uV599dXwzTffmACn4EOYwv/+97+w+uqrh4cfftiWsYbYl2cDzzzzjBXUCGg44IADwq677lqZsDTyFRqc45ZbbkmXQhg2bJhZEnzjEAsNnifH59gnnnii5fHsZGk0bCQ0CkAp+zRmnnnmcOyxx6ZLIYwbN840Qq7XhcMss8xSaWmcddZZoVevXlYgUFBRVZNLaJBPYY/QYP7XX3+1QhFNPebggw8Or732mgmwuggNCiIKK2jZsqVZIEsuuaQV4FgFiy++uK3LBe+DgitOFMjbbbfdVKuEqCpzC8y5+uqrTTA2b97crsvfNVWAc889t82///77VqVGgZ4VGjxztHUK9l122SVcf/31JjRuvfVWsyD69u1r2x122GFW/cMz41lieVFtx/qNN944jB492s7Dc+AeqSajyo+qP6rOvGCPLYPaeOSRR8K9995r6f7777cpgop53j/VXJyD+YUWWsiECc/fhQbnRiCBV3VSDYbAf+CBB2y5PpDQKC4SGgWgVIXGt99+awXgiiuumOaEsNxyy1lhFBNXT1FAUFAgSCiwIJfQQLOk4KaaiEKd+WOOOaZa9RTVYggDNPK6CI1swf3RRx+ZsKAenWohqtBqAsGHth0nCpEmTZqETz75JN2qZrgfqvQchAD+jqylAdT/8/woVLm2XNVT+Ia8cHVqEhpPPvmkCalWrVrZsXnu+IR23HFH24YCm/fCc8LHQ5XdBhtsYNs4VBmtssoqJkiYkpZZZplqDmvex6hRo8Lbb79t1iCClXmSc99996VzCd99950JL8CfwXVi8fAMAEsDC6w+kdAoLhIaBaBUhQYaH9dGoYG2Dw899FCVOmvo3r27CYuayCU0WD7uuOPSpYQXXnjBnKfOU089Fdq2bWsOVLTwqQkNCkAc6giN2KfBPlSxeeJZL7XUUuna/KAgiwMDshx99NGmOQOFuVfdUXXk1kQuoZEll9AYMWKEWTCARYZAQmiguWOpuVberl07CybwQhdhQhUcQQUuNLASsQjArSac1BT+WRA+CKHa8HfIvXFtWfDzHHrooZWJQAa3NOCdd94xBcMDLRBk8mk0bCQ0CkApCg0KvaOOOipdSnwAaJr4Ic4880yrY4+TQ4EQJ6oo2DcrNND2qT6i4Jo4cWJlIh8oCL2+HxBcuYQGBSR5CBe2QUBh6dTm6Eao1FY9NS1Q0FFAIlC5XwTMe++9F+abb75KgUr1FLjQoL4/flZYOEsssYQ5/WM4FgUrQQbcF05zCnSWhw4dato6lgPnQmicd955Zk1Q1bfFFltYlBNVTp07d7ZjcZ1XXXWVXQfVYQgNCm/gW+zfv7/5Hqjm4ljMk5dLKHCtP/30k63zb4HzOZwrhuvjXgF/Fd8SVaAXXHCBCRWenYRGw0ZCowCUqqWRC+rMqaqgcIuTV09RDRMnOPDAA6tVbeDHQDslciZOXqXl/hLnhBNOsKqh/fffP82pnY022iidqw4FJ3X8xQCt3sneA9FCCBAEG/BM4meFwMSRjcWQCwr4XLBftkqH48XCnGvBh+JgveWCfXjHOMfjRJ4L9BgswOy3EFelYdHF7xdFAMc8IGji+weqDakWrU8kNIqLhEYBKCehIURDR0KjuEhoFAAJDSFKBwmN4iKhUQAkNIQoHSQ0iouERgGQ0BCidMhLaLzwAvHD6UIFRNaRl/FjTRWCJN58kxaWIYwcmWaWAHQBRBj2sGFpRgXc2/PP00o2hE8/TTMrGDcuyfd7f/HFEFIfZy4kNAqAhIYQpUNeQmOGiqKPhLAAgg5Yrkt/YBSsrVoRYkZEQbJ/2pNBrTz+eAhHHJEuTAN33kkDoXQhB6wnupCgDq7J2/AQTen3yNQDKwYNSpbTPtdsPoqgy1KxVvxXJDSEKB3yFhr0OHDaaclyLqHxyishDB9Oo6Q0I8MBB9CdwpT1FMIcg+g5BAqWi0esERJNIrJstdVCoPNLLBSiztDsEV5YK2lbISvA0f49ahErhlBmuo5p2ZKuFhKhkAvaOHHtQMNezgeEig8YkMwTfegNcbFKuG6/D4TgxInJfA4kNAqAhIYQpUPeQoOW7Uzp+ysrNBAGyyxDQ5YkPxuuTFUO+dttl2akEK5Moe3afNp6PtAr81prhdC1awhNmiSJHp9ffTXZboUVkvXMP/QQ/dIk8x6+TM/KNMrt0IHO4kJYaCG6M07WxdDjAfs5L7+cLB9zTDL1RrxDh07ZbsyYZN4FFEIjCu/OEh1dTCsSGkKUDnkLDQQFBS+9KMdCg6qjuICli5RddknmHSwJtjn99DQj5YwzQphvvkRrZz0FMiAQvDt/OuDccMNkHmuC7caPT5Z32ikEutb/7LMk34UGjV05NlAVlunjrZL776fvnnQhZbnlkmM99VSakUIelg5Ck3n3Y9B3WdruJhcVW4r/ioSGEKVDnYQGUMh26pTkITRovBn3fYZWn46lUgW2z/ZcgC8BPweFLuvTlvqBnhBcaFA1tMEGybwLDYduXVZfPamGIv+bb5J8jutCY+mlE6sjF3Tjk+39mV6nOVamga7lYVFwrQgqF5LbbFO1mi5DdLViWpHQEKJ0qLPQ+OijZJlEYVmxv83jR6BPL6pr0m7pq0BX+mxHYY7/Av8Iy/Tz5pbGCSdMOb4LDfoha9OGgW6S/VhHd/T4M9Dy6ZUaRzT511+fCB7mXWhQZcb9RZ1qVuLVXTEIEvJioeHVX1gXPIdrrpkSPUWL/5r8OBVkji6mBQkNIUqHvIQGhXbayaLRvn0IbdtOKSwpoLE2SNkqqJi7706qi3AyU5i7oxko8PFv4Ijm+Pvsk+QjjLAGON+IEUnhTa/N+FE23zzZBjp2DIHOMvGb7LxzCNdem+Tfc08ITZtWrzJzOJ77UgAhiKXikWLAtXh3N1wz5Zf3Tcax07FaciGhUQAkNIQoHfISGqVCtnqqEJx1VuJkrw2sp7idSh2Q0CgAEhpClA5lJTTiKKZCwiiONNrLRb9+ITzxRLpQdyQ0CoCEhhClQ1kJDfwM06jx1xcSGgVAQkOI0qGshEYZIqFRABAaDIjDCG2MXqekpFR/icGhdt999/TvFIVGQqMAMFYyA/MwAp2SklL9JkYyZNRDURwkNIQQQuSNhIYQQoi8kdAQQgiRNxIaQggh8kZCox554403Qu/evdOlhLcYVSsHv//+e5hUS3fFzp9//lkl/Zt2QvaOd5xWQXabv9LuBXDok4QQoiYkNOqBgQMHhu222y6sttpqYYkllrD5m2++2dbNR7fKOSCMcP/990+Xama//fYLBxxwgKUll1wyfEL/+hUsRn/8Keecc07o2LFjmHHGGcMpp5wSzk4HY+nTp081ISaEEDESGvXA5MmTw48//mjWw6+//homTJgQfk6HV1xkkUVsGnPJJZeEFVdcMbRr1y4ccsgh4Y9aeqCMOemkk8Kn6VjAS9OdcgaExj///BO+++47ExYIj759+6ZrhRCiOhIaRQSBsMsuu4Tu3bunOVPYbLPNwlFHHRVOP/300JbeLlPmmWeesMYaa4QxY8aE9u3b23aDGMM35fXXX7eGS0sttVSaUzMnnnhijUIDoYXQ+Pbbb8MPP/wQnn766dClS5ecQuO8884Lx9MLpxCi0SOhUQTwC6y77rph2223DR/4mL8RX331lVVJOZ06dQrPPfeczS+88MI2hW+++SZ8+eWXVsUUpy+++MKO8bePtJWBcwOWxmeMAFYB3Zzsscce4bfffguPP/54mGuuuczfseiii4az6BWzgn79+uUUGmz36quvhjXXXNOsHSFE40VCo8BQ0M8wwwzm5K6NvfbaK6y//vphyy23NMvCmZv+81MefPDBcM8994S77767SurRo0e48847TQDkYsF01LG9997bhAvgO/H5J554wqqlnLffftumWBtPZYeEzDBgwIAwG2MUCyEaJRIaRYDqnpNPPjm0bNkyjGTAlRqg4M5aCx7tBK+88kq4hRG9Mrz22msmNGrChQbX4BFX2eqp4cOHhzZt2oSmTZua8x1H+VVXXZWurQ5OcqygK664otL/IoRofEhoFJFffvklXHnlleEOxujNcOGFF5rPIk6HH364FeLOs88+G2addVZzjsdp/vnnD9f6KF45cKERs8wyy6RzDFg2IWy44YZVCn+c6wiiiy66KM2ZAv34PPTQQxaeK4Ro3EholBixTwOhkasQHzp0qAmjLPgkttlmG6se23rrrS1hYeDjiB3nWB+rrrpqujQFBNFll12WLgkhRHUkNEoM2lY4L7/8skVW0YNunPCB4NfIgo+DqjEio5iSiOACd3Y7NCLcdNNN7XxUo9E76P3335+uFUKI3EhoCCGEyBsJDSGEEHkjoVEEqBYilDYXRCEdfPDBYYcddrAIJkJYv/7663RtCN26dbP1nohwAsJiaQ2Oz4IW4uDhsRdffLFNaYBHK/NDDz3UlmkvwjFYJh1xxBHWONB54IEHbP1GG21kjnjmHaK6rr/++sp52pLAvvvua050tqXPKtp8ANVgm2++uTUEZFvWjxo1ytbBnnvuadcOBx10UGVwAA0fua4DDzzQosUIWfbGjB9//LENqHP00UdbtRwNEWHw4MFV7ouhPeP2MDj1SUSEEaHGMwF8RDzD+Dl440fCkQmB9rBktUcRIjcSGgWGAva9996zSKjaIFIJhzTO69p8CRS2MXHUVP/+/W261VZbWX9SOLsnTpxordBrgkZ6Mc8884y1BKfleQyRUu4H4Z5OO+00mwdasgMF/fbbb2/zCCh8JDjSCd2lMOfYQBgxx0PAUIgjMG+77TZbd/nll9sU2B/fCu1FoEOHDjZ1jj322HSuKjR4fPPNN9OlEH766ScbgpdIM+Zd2MTgL4KPPvrIpkSS0amj+4C4FiFEdSQ0ikRtQgMNGu0Z6PMJIVMTOLWBwhYNn8Z9vXr1sjwXGptssokV8K1atbIC1K0cCsthw4aZFu6JMGCH+bgfK2/TQSHPubAGcJjHQoNjXnPNNZVWg18LDQYRFggNOlckuit21vM8vAddhBztPYB9Ro8eHV566SU7Fpr+Y489Zuto0R6DhQIITtqq+D29++67ldZEDMKtpmfLcwIXGlh0hCLTap77R3jlEjZCNHYkNIpETUKDaqa4fQTaLYWV8+GHH5pQoU8qqk6Yp3BHU/ftvID0ZQbTp4qKaisK7QsuuMDy6YjwjDPOsDw0eqZxIUrDQ6ppqOoZMWKEzY8bN67K9XEuClGuk6kLMebBq9bohJH+sijAPaHtOyuvvLIJC3rXpe+sG264wfKp4vLqJIQTEWBjx461dUyxZGglv+uuu1beL0Lr3HPPNcHEfXHf3KuzxRZbmLBk/88//9x6E45bwHfu3LnSonChsfzyy9uUKirujSq7mlrcC9GYkdAoEjUJDersKfCo9yfRnUis0boFkqtBINtS6J566qlmWXg35lTBIGw8eVUThZ+PlYFmzbwX9kC/VlQVcc7jjjsu7LzzzumaYFVcVPmQECZeVYWGj0AgHyuEAhwolCm4PbHPddddZ+sg7nSRwtyFFwUzfhA/F9vRdXxt+H1RrYegyt6XQ79aufjf//6Xzk0RGhyD5+YCWdVTQuRGQqNIxE7luG+pLI8++qgV3g4OaUDbJ6Fdu5Z8++23m8/C89wJfeaZZ1phR54nYIwO/A6HHXaYafdU78TtNRifI67W8SojoN8qzkWiWglhBfhEsHrIpzrr3nvvtXwsDro8wVrwFOPdnvh9kbhOLBSqm/xcCBOqp7B4uAau2RNCEyFDg0eWqULDYc183NEi/hTy5pxzTnsPjC3C8aj+yvYJ5kKDKj16/cVKwarZeOONLV8IURUJjXqGgjPWktF4vbrGk6+ngI+TF8zsk12XL14dxDTuJiR7PM4BXG92HXCN2XzfB7jW7H052f1c6E0PclkoED8LIcQUJDSEEELkjYRGEaC9BO0ZcnHjjTfaFMfzI488Yu02vG3AMcccY1PHQ1sBRzTRUkQmES5K9Q4O8Bi6P8+CzwLfCc5fqqhi/wnVWieccIJFKXly8BUA1UeciyofBycyUUtEPbHOneTMU6VEp4vum6FdhINTn/VUi5GIwOL+gbYWdJlC9BLgtHewUqjCIzjAo57ckY4z3I9F1dZdd91VOYaIEKLwSGgUmOeff94KWqpYKKSzEO1DPTuRRLTRwJnNoEpAwUy9Po0D8RfEBS7QmA8fCODXQIhQoBKltMoqq5gDmkghHx8DcG4jNGiLQYEcCw0KYYSbF+IkoGrG+65yoRH7aIBzITQQFvH5gPYi+BAQSDvttFOaWx2c5S40wNt8AMIxi/tVgHuJz4uQozEhvhMJDSGKh4RGkcBPkKvgcw2ZKKSHH37YhIZbGmjXFIwrrbSSRUjFUT44iz3kFId4bGkgoBAentwnwLkQGHTDTmLeG9XBrbfeahFDdFhItJBHXbE/EV70hIuvA+EWWxpAZJQ7i+PCm4GjnnzySeuJF9yxD9nrRCC57wArByHkgjb77LAgaMlNxBZ4a3WHZ4IVIqEhRHGR0CgSNYVsYokQ/UMhTngsQoOqFzTlDTbYwKJ36P6CKV1nIBwcrAK0cUJsESAvvvii5buQcHwZKyDbdgJHs4O1Q9cfVGsxRWjQlYc7sGmZ7ZFSsaVB1RPbUPATmeXdhcTOcncwI3Ac2psQiUXEEpFdzHuDwrhNByB0HZ4B+3I+395bdDs8O5DQEKK4SGgUAaplYuIoIkI+77vvvnQpqQqiMAS0c6qgPCFgYhgnY5ZZZrHEPNYKYBHQuM8titlnn93yKbypjvK2ExS8CA4HKwK/CQKDaiT6gQKuF6GE/4Xry1U9RdUVfhmgfYVDdyYIRRLWQtz2g8aEWFcIi7jdBlB9hpWFxUH3IdnuUzgX4b6E0mJhIXAdjkU/XiChIURxkdAoMGjdOHSxFLwOnlbGDtUraNlUSZGwMqimyUXsCM9CwevVUyussIJp6n7MFi1aWD5CA0uDgt9TbJVQdYTvgfPj3Oa6HKrIjjzyyEqfRlw9hQChaor7bNKkiVVl1RS6GgsbhAvVSH6dJKwYoLoJC4rngZXBaIEx+H2wsqimozEgjSSBc3srdaCbFQkNIYqHhIaoUmUlhBC1IaEhagXLJE41WRTke3K82g3i+ZjY8onnIbvs+DniaT7ngvj6/J78PLXtB/G+Mdnr5DhxlaTD/vE5s/tlr82nvp8vx8TLua6vpmuOry/exs8VJ4f5+Fqyzyu7fT7E15E9F8mvra7HFcVDQqOBQNXTPvvsU9k+Igv+D/wguWA/qp884SAHjkkoL85uTx7yC3RVTh9bdM9xzz33WLUSvhOgLyvanXiPtWuttZZNaY9BtRdde2DhxF2X4IgnzJjW4nQLEncDTzUX1XWEGRNkwDb4fChUcKzTyy4wZkbc7xcdF3JM9qWrdro7wR8D5M0333x2bI4TVyOyzLNgXwIQuFb3m9A1y8wzzxz2228/Cxv20GieFx0dUmWGT2fbbbetUnVGT75cGwEOvCeeQQz+God2NcBx6IGX/bhPquYcquu22267yrYrRJhlYft4vBaqBKlCpXdh94nFwQr4wOL3TeIeKLR5HjPNNJO9P/DoOYQH7YGIbqOrFyL/gH14dv5dsZ6uXxzaCRE6TYeT+Pquvvpqy+dd0pUL52bMF3xvcdWpqF8kNBoI3lU5mlu23QTwA+N/yAWFoGt04ONgUAi6z8CT9w4L/PQOPzcFDr4VfCAUdnDiiSfa1IUGwiTGhQaFPQU8jnSgXy0PT4bYMU5X8Nwn0WPcl18bEEyQbSTpUOjHQgMWXHDBdC4Zn93783J4LhTmCI04MIF+rfAH4aD3QpQCmWfH/eOroZCPfTMEItAuhWeCz4v1/twRuPE9+kBbQINIoF3N0ksvbfMENLhPy8OP8ec4BDIQdEBhi5DFt+WWAUEEPCfX3nl3NVlZ5PMdANfKfTveBb8X9g7PwaPbPAiDhB8ubrvEs+Pb4xsDQs5JhGx72yVAQSBYQ76q0kBCo4HBGBYUjGhnFBwUVE5NQgONlx+awo594s7/EAIIA0/uuAYXGkR6IQxcaNCluxdgJ598sk1daFDA8vNTYFLoe4NCzokTnA4VKezQNmsSGlhCDz30kIUHAwEHWEtA40k/ZhYKWhzoHJsCk+gwCn60bsgVJk2hiQBEaNAhowvNWGigEdMqHihgETy0h3Gry0FY8Yx5RszzbnieBBtw/RSWWGzgQhdBRdsXrBoKT9raANt5qLMHTMRCw4UoFosHDXBtXDPdxYMX4FiPLjQQXmzvicKba0Vg+KBY3mofSwoQlPF3gTDj/BwTy4Pr9uSCCoiUI/iB4Aiehb9vvicEIQIWKwgry5+LqH8kNBoQtGGgYAN+XBrgeffpUJPQQMvnJ8VaYXrTTTdZPmG8Cy20UGjWrFllck0XYkuDQtyFBoWSh+9mhQZdrMS4pUG1EwWUR5xNzdKgEPN2KtwjXYhQ4JGXa4Q/rA8KJgSqF6jAMteLdpy1MoCIL86F0IhHPeS5cBy6U/FCm+dGr7sUpjwnupePLSvOwTUiEChkeV4IEWAZIcMyhTTHdcincOV83ktvz549Ky1Kt+ZiocFzxcrhXIyvwrxfP5YL46z4sbwKEbxwx5Lj2TDPvYMLRicOtaZQ556JuvNqPIQGVXEILt474eA0KHV4HygYVK8hAONwcPbl+fBuuP94HBhRv0hoNCB23HHHdC43sdDwwiqGthIx/LixDyMLBY/D2BYuNChwvCD1kFsXGnGBznYuNNDY0Spdw65NaDBuOFqzCw00ca6TgjVX9RQFEttAXD3FwEuuIWOZLb744jbvYBH5WOFx9RSFNed2P4pXT2GtYS1gObjgjaGQ5h1xj2effbb5ARxGRuT5od3zbnwgLfwUWGWu+XsbIK7bR2707uljoYHlhh/B/RL4CLg+iAtu4Lk5nJtnAVidMQgbrtPTcsstl66ZOoSFZ8Fq49wkrs0tPkAxcPg26GNMlAYSGg0EtDkKOKoOshqh4wU3xJ0Tog3jmMWpypSE1kfBfMopp1ijPE9xozq0S85HAYK2j0aI0AD8ExzP26B4IYBzm31IFBLegSNQuNE4j8KEBntuNQEFI/tQEKK9ogF79RTVJi6cKNjje/Prcih43dLAgY1zGq0eh3YsmFwAOQgNBAVWx5AhQ8yq8yoTL8g5Dxo9zwktnKlXBQHWGHmcj4TwcH8B94BjmntCKLnwRBg/99xzlk9yAQUIXJ6xV/m48MgFg3r5c8BHgwDzd+qOdEAY4Y/wdSQXHuxHAc51MnVHOFVm/t2QUBh4j3wLXC+KAmPKsC5u+Iq1xDUhyEmxH4MqRp4h3yEC34cVFvWPhIYQ0xkEjxDlioSGMG2X5M5Qr8N2fL0n12zZPs6LnZxeMHoelkG++L5MObbP1wbboSVnt/NjUBXm1xpvE1+zV8uQ59v6M/F90J65Fz8m60nMg58vTr6vL2efSU319eznx3Pi682+JyGmBxIaDQSiYqgWoDrAC7oYqn6oXvDqohjq1/FP4MQE2lI4FFrE3bOehPN7gQUWsHX4ETguIZNUodBLLeemmoGQUeroCS3FKe/HxOke14sTsRRfE+vxbVBFRZWKh6xSJcJ1+H7ci4d1cs84WanOomqKKh2HQpZQVargqI7i+nFGO7POOms6F8K6665rUzpqZB/uF98KhTNVNAgG2kbwTHjWOO3pSoVqKu+Vl+3I9+fFtTGOOs+Fa+NZ05YD3PeC49jDnHGWU1VFX2DU8+ObiHs7Zn+HcFUhpjcSGg2EuGPAOKoFKNy9XjpXd+2OOzZpkEX9e03MM8886VwS4kv9NlD/TOFIwYcwoc6awg8/Ar4IoA6bKB5PCIW47Qe+kIEDB1oexyaCCxCK8X7Uc+OPAJzGDPDkobxxvT/HGTx4sDmCERoIpDgIYK655qrU3mkL4OA85znShoHCHSuGaB8XOEy5V6KCuDYi1XJBdFDc6I7jeGNAoqx8Hc8rBh8V/h4c63FAgIe5gvsmhJieSGg0MHAuUzDjyKUgpcB2KMzff//9dGkKdKJI+wivLvE2D0DBiKVAQjgwdQc0GjlOUBp34TClmgVtnOgkCkEKfDR28Gk8nkcu0LoRGlQVUSi6sKHg9KinXGDlED1EW48YGrXhvKdwRQhw/x7a6Y0asQAQHG4BsB1Occ5NUAGWBoKGhFXEPoy6iHWA5YFj3COpEFw8C54pznUPJeUYWEAuMMDDiyEW+oAwxdlOhJQLDeYJicaq4r3gWBZieiOh0YAgCsarZigkiabxag+goVUuECYIDKpDSGjmufAIKApNoAB1PwBQqLrWzrkJkyTChwZ3Hk6J5s48VTCsR6NG+3aoDmIbtzR82FcaemFRIHwoUBFICAjuk+gconIYMRDNH4HlAhALCwuBbYl+oiD20FOmFO4IEqY0KnMo7IliIsoJIcLx4nBUQBghLBB03iWHPxsPfyXUlAIevF0F1hTRQr4t0KYjhughBCxRZC40UAIcriXuQViI6YWERgMBZ2m2iiOGqpfYiRpr5FgQFPKeVl999XRNgo/P4Q32HCwatGXCQ0kU2F4Q0mAMzZhEwZdt1BdXucRguaBJI0goeL16yuG4WBUx3o1Gtv0BUMhTALMNXWpgPXhoJy3hsV68rUAckoy/xPOpiqKQRpD5vlgaWDGON2JEAFOthLUBcXsNLCfuB78K1WRxqGvcXxZWH9VaON1jSyPbV5UQ9YGERgOBApNqFgqvuDUxUDiRT0IoANp/TWQ12ObNm5sF4MkHUKJQw1lNAUqiYHbBROd0tJlAqyZ5IYrlQCd6+AkQEPQ15Y30Yk0a7Z7uRLxfI+r+2Y/WzzjCmae+H6jyoUprwIABdn1UmzkU9BTcWAQkfDVeRcc14Hchj0RbAoeC2vOx3hAaVL3ho+EeER44qNmfe/dCH0GKwz5+Xm5h8Pyx4rCyuA+veuMcPCPAevG2HQgrtqebDeD9cg9+3Li1vxDTCwkNIeoAVU1UdxUSqsKEKBckNEQ14np75knU6TON2wx4XT1kQ3nd34F27vs7cTWZ55PHPhzf2x+wn4Om78SFNtVhXiXmx/Vr5Dh+DRD7ThzfN76XGI86czz8Ftg3fh6cC+sruw/bsY7k15i9B1qp1ySM4qrE7HOOn2VM/Nz93H6vQvwXJDQaCNSB016CcFMvdGPoDoRqnVwRSNTBEzLqoarUqQMFEvlUO+ETYd6rkqgCoqrJq1iyXZ7j5Oa4LVu2tKozHNlO3I8Qzm6gWoft6JrC+4ly5zlVYFyDd/MRh51SbeVdm9A2hK4/vHqKah32o88tzu99S1EIE3rM/fh1eZUQ29MZoYfQMn6Ew7Pj2XBcfCsU9F71xP3jwKeKjOqq+HngfKeajXYkq622moVA+73gRKe6irYeHDfuoZfrpDsOqt/cX+PdizgEAHjkVgzVe1TLUeXGe+J+XOD4+yZRDUdXJELki4RGA4Gus51s7D4N76jbh9raabAd4GsANFMKYBqUUSBTsLnW6wU3jmIKMoSSg5ZL5A918ISdUvhTmDr4BdyhTEHrYAmwjrBVjunXirADHOOsi4UGfRKxnsKehoHg/gEH/wGC1Md/4B6y1oBfDzAYlBMLDcKNAauEaCeirxCmPCeipyikSVyLC50YfDZdu3a17fH5QFbY4ptx/w/wzBE2LqwRIIQ2E+aLksC7RqgSpRZbJGzPc8cvhI+F87rQQGjhpyEhoHnmQuSLhEYDg2gmGptRCBIB5BE+tBUgNDVbWMZ4D7QuNLA0KMip2qAAp7BzZ7W3ZqagQrDEQgNBQgFO5A/OahznHvpLK2muhdBZCj/v9A9LiQgrIrm8LYe3hPb2CDh/aSMSCw0c+xSQCEUKVDR6tzQobMmjkMfZTOtt4NlwblqskzzPWXbZZdO5qkKD54HjnuguoMoMfwTPxx30REZxj1gPWE++HdYVVgrPn4K8ffv2to5IMHoXJjKNgj3ubh7h65FTLlxoXQ48UwQugpSEQz7uqJJ3z3NBmeDeWSbMF+LxUoSoKxIaDQg0YTRVoMqGdgfeWtupTav0xm0uNIjcIUSUZQpCCkuPukJzR4P16pJYaFBIk6i+YbhYBI9XmVG4Y3UgTCjc0XodrpV1CBzuwwtBF2ZU+9CeIhYanIPjsB/CCtziAPJp14Eg8l5rqedHiBGB5dZDLDTiLuRjoYH/guuiMKfAbtWqlQky4DjkeYpDbRE2nJOqNAQHBbz3N0VB7v4MLBcipqjOcrAkqJoilBe86xS2Q1g5KAqxT8S7Umd/BBNVa+4XQkhx71hEdNXuEXVC5IOERgOBgtn7acpCtxveCtr7V4q1Uqot6DrEHaUuNCgcqVPPB7otdygkqbNHw0Ujpq7ej0kVGAUgBSPJw0kBoYRQ8ULTfRreBsLbesRCAyiEYzg/UB1FgewwfgX35HgjRkKSXWhgUbGN9+sUCw0KaffHIAAQeH585u+77z4rgJli4cTE4bwIEG9Tw/NAuGMRYBnwvFxAAEIZK4OE9UICzoFVgn+CRBsOf8eAgEXAE5aLZYPvA6Hk+PuXP0PUFQmNBgKNxajKoACJ67YdChHqvr3QRBt2EAwuMMDr49mWahSqeTxR5UShzLgY7lDF0erjPDsUiu4Mp77dBQDVSwg39iEhrByc6hR05FPNxDmAa2N/99VkC2S0d7Rxv0b3CVAIU+hyHRwvHn+c68JPg4VEH1ZedZONTlpmmWXSucS6YswSqohoXY9PwK8Ra4hqom7dulnKCluqnrg3hBLPIyvo4jHBa8OrpxAEVIfxXEi8p7h9DgKR1vZ8E7wjBC/C2AMOyGPKu8Aqit+/ELUhoSGEECJvJDTqAer7PUyUenKvm6ZOGm0QbZdpXEeNdk9enDgOsB1aMHls53X7jtdlcy6vuoH4fBDXkceghda0TgjRuJDQKDA4HKmqoIrFx5SOIQQVp/LWW29tbRSoGnFnMFUKxO0vssgiVtftIbBAVRHbEflCYluPPKIenPYQVFFRuHvnd4SeUh1CNQbVENSV4zB1qGriWPgfqOLwMFCEBO0XSPQ7hZ8h171kQVipmkOIho2ERoGJx4Teaqut0rmqULh6AzqEBoU9UAdN4U4UEREu3u9STcQN5mg05iGecW+21KPjYMaaQGjEzlBAINCgjHr/XIP6MEgR+3rUDuDIZdwKIIondiJzTRzTx7pgPSD0vLsM9s+2JRFClAcSGkWAaBxCW+OxLGJw5NLNN20TXGjQ/QSOVRycRNrQaI1IHA9VJZ8wSRzOCBem7lzGqiF6il5cEQpuMXBMd4oTPcP1EMXkXV/gsPUQXYjDTt3B60KDthLexmOGGWaoHDMCAUd0FtFEtEx2weVtHbzFN2GsRPcQ9YOTGEsJx7wQoryQ0CgCXkUTdwnh0O0EBTwRPGjbFLgIDTRxfAwU8hTERPO4jyJL3LYBqKaKt4tbIzOgEi2zKeCzlgbnI6rKk4eTAkINfIhYHy8CGO0OiMjh3DRM45zci+NCAyEBa6+9tk3jqCLvSkMIUT5IaBQY4t+9oPRuK/A5xFDAU5UDhF7iWwAKd3wHhKXi2/D2CY6PZ5HtP4oqLY7jhX/cJgC8VTj9JcVCA8vDw2FJProexCGgtAL36iZwoYF/BkHE9VAthWCh6gn83r1LDh+rQkJDiPJGQqMIYE14wzHwcR+AAhwhggOaKicKefoRArR1qoFowHXUUUdZbH8MDcCoovLk58D3wfY44Ekrr7yy5WNdcG6EElVdtFrGynHox4lW3TjKPQHVWggYrhOrhvYOcX9I7oMBWln7/RHNxX5YSERsUfXl1VXeSptzOrGfRAhRHkhoCCGEyBsJDSGEEHkjoSGEECJvJDSEEELkjYSGEEKIvJHQEEIIkTcSGkIIIfJGQkMIIUTeSGgIIYTIGwkNIYQQeSOhIYQQIm8kNIQQQuSNhIYQQoi8kdAQQgiRNxIaQggh8kZCQwghRN5IaAghhMgbCQ0hhBB5I6EhhBAibyQ0hBBCCCGEEAVHhoYQQgghhBCi4MjQEEIIIYQQQhQcGRpCCCGEEEKIgiNDQwghhBBCCFFwZGgIIYQQQgghCo4MDSGEEEIIIUTBkaEhhBBCCCGEKDgyNIQQQgghhBAFR4aGEEIIIYQQouDI0BBCCCGEEEIUHBkaQgghhBBCiIIjQ0MIIYQQQghRcGRoCCGEEEIIIQqODA0hhBBCCCFEwZGhIYQQQgghhCg4MjSEEEIIIYQQBUeGhhBCCCGEEKLgyNAQQgghhBBCFBwZGkIIIYQQQoiCI0NDCCGEEEIIUXBkaAghhBBCCCEKjgwNIYQQQgghRMGRoSGEEEIIIYQoODI0hBBCCCGEEAVHhoYoL4YODWHmmUNo1SqE885LM0uYH34I4d13Q5g0Kc0QQgghhGgcyNAQ5cXTT1d8tRWf7WyzhdChQ5pZouy1V3Kta60VwptvpplCCCGEEI0DGRqivMhlaPz6awjXXx/CQguFsNNOIdx0Uwh77hnCvPOGMPvsISy1VAinnRbC118n2//8cwgXXphsv+++IdxwQwi77BLC3HOHMMccISyzTAhHHhnC228n28ONN4bQunUIq60WwoABaWYFf/+dXMfCCyfH+vLLELp3D2HFFZNz+7U2b55czzPPpDuWB19W3M8XX3xRcZsV91kH2P77778PP/30U5ojhBBCiMaGDA1RXuQyNAhLuuCCJJ+07bYhvPFGqNCQQ7jqqsSAIP+AA5LtJ0wI4dhjq27/wgshfPVVCE89FcLmmyf5SywRwmOPJftgmJDHsXr2TPIABXyffZJ1G26YGCfjxiXTzTZL8tu2DeH++0N47bUQxo9PdyxdRo8eHdZZZ50w44wzhh122CFsv/32Nr9ahZE1YsSIdKvcjK+4vzZt2oSZZ5457LrrruHUU09N1xSOb775Jpx99tnhyiuvTHOEEEIIUYpUaEFClBFTMzRQ6p99NsmHMWNC2GabZN1224Xw3XfJ9m5orLtuCK+8km6ccu+9ISywQLK+S5ckrzZDA08G6zA0MHCc3XZL8tdeO4S33kozS58VVlghzD777OGXX35Jc0L466+/wvzzzx+aNWuW5lTl33//DR9//HG48cYbK255hrDXXnuZUfLqq6+mWyRejo8++igMHDgw9OnTp+KR5H4mP/74Y3jiiSfCo48+GgYPHhw+//zzdE1iyNx5551hoYUWqnjVbcNLL71k5/iu4r3iPRk6dGj47LPP0q0Txo4dG4YNG2beGfj2229tu68qDEu8NQ888EB48cUXbR1w3yNHjgx9+/a1a2W7LFzH888/Hx555JEwYMAAO4cQQgghqlKhBQlRRkzN0CBkiQbjTmxobL99CN9/X9XQwGtRodBWwY8155xJGBRcfnmSR7rjjiQPUF79+FlDY/fdk/xVVw0VmmuaWX5gRNxyyy0VtzJD2GOPPdLcqmBEPFth4B155JG23YYVz+Kqq64KNxCWVsHLL79sRsoSFc+bvJtuuqni8exe8RpnC5deeqltg6digw02CHPNNVc4/fTTzaDAc4GBs95665kyjzFzbMW7m3feeUPr1q3Nq3HzzTeHd99915R+zn3uuefa8Zzbb7/d8v08HJflBRdcsMIW3M2uB0Pkjz/+qHjNl5uRtV2FUXpHxXtmHwyvZZdd1oweuLDC6Jxjjjkq7Mt9wz333BPOOussOxaenw8++MC2EUIIIUSFGpROhSgP3NCYZZYQTjghyZs4MemBivysR2P06BC22ipZl8ujgTHRtGkIyy+fGAyLLZbk47mgrYdD7fsmm0zZZ801Q4VWnDT0pq0H+RXKcBWDAi9IkybJOgwgDI7nn09XlgdvVBhOK1Zc+6yzzmpKdW38888/4bnnnqu43RnCySefnOYSqTYhbLTRRqacd+nSJVxzzTXh2muvDZdccomFWS2yyCJh1KhRti2GxG233RYuvvhiU/J79uxp+8xZ8cwxGIBtW7ZsGdbFGxXx4IMP2rk7deqU5iS4oXHFFVfYshsap9FuJwLvRfPmzcPqq69uRhLXiUF09NFH2/n3339/uxeujf0JL7ugwijlvDwn1mGUCSGEECKhQgMSooygfUWFMhsIdfnkkySP8CXaY5CPR4HG3s7kyYmxgZcD70aFMlyljQbGA8d6550QevUK4eGHk1AqjJcs7Msx+vQJ4ZFHQqC9wu+/ox0n56ZdRhRuZBD2M2hQCA89lOxHd7dlAGFIO+64o7XNQHH/888/0zU1U5OhQZgRngq8EM8880wYPny4eRBeeOEFaw+CF4CG44RKse8uu+xi6wlNwtDASGHfW2+91Y73TsW7atGihXk5Ynz/M888M81JvDFXX3215buh0b17d1t2D4dDCBSeCdqlcG1cJyFWr732ml3np59+WhlOxpTrYB8MkuUrDFXCuTBWhBBCCJEgQ0M0PmiQ7YYGXompNHBubHTu3DnMNNNMpowfeOCBoWPHjqFDhw6WaNxNiFMuYkPjBPc2VUA+hgPHxDuCx4J2EYcddljlOQClneX1118/3Hvvvaa0E8qE12O++eaz8C2gnQehWU2bNg3nnHNO6N27t7XjeO+998wbgVcCo+K+++6zMCtCrPCmXHbZZba/Gxp4SmIwpjge6wipuv/++0PXrl3D2muvHZo0aVLp0eHe2IbQKcK1ON6qq64aFlhggdC/f3/bRgghhBAyNERjhO5wURrpAveSS0L49NN0hQDCgIYMGWKJdhdxIi9uJJ4F7wXbffjhh2nOFFDkabjdr1+/8NBDD5l3I9uIeuLEiXaOXr16mRL/yiuvmBFBHgaG82vFO2Td448/bgaKNxjn2mgP8vDDD1uDc/b54Ycf7Jp8Gwwlzp1tNO6wPQYT5+c4nIfriqFBOW02uA88KTR8r+25CCGEEI0RGRpCCCGEEEKIgiNDQwghhBBCCFFwZGgIIYQQQgghCo4MDSGEEEIIIUTBkaEhhBBCCCGEKDgyNIQQQgghhBAFR4aGEEIIIYQQouDI0BBCCCGEEEIUHBkaQgghhBBCiIIjQ0OIIvH999+HL7/8Mvz+++9pjhD1y7777hsee+yx8Ndff6U5QgghRPGQoSFEkTjrrLPCrLPOGhZccMHQp0+fNLfxgDL76KOPhtlmmy08++yzlofRdcstt4R55pknvPXWW5aHMbb00kuHOeaYIzRr1iy0bdvW8vPhl19+CV26dAmLL754ePvtty1v3Lhx4YQTTgirrLJK+Pzzzy1vwIABYckllwzt2rUL1113Xdh4440tjR492tY3dPbbb78w88wzh4UWWigMHTo0/PPPP+kaIYQQonjI0BCiiKBYn3LKKaZsL7DAAqFv377pmsbBv//+G0aMGBHmn3/+cPnll5tRgFGBMeD8/fffYfLkyeGHH34Iu+yyS1h22WXTNfnBOXr06GGGxPXXXx8OP/zwsN1221V6kr799ttw8cUX2/mdUaNGhXXXXTe0b98+zWmYuIGBATd8+HAZGEIIIaYrMjSEmA78+eefoUOHDmH22WcPTZs2Df369TMFubGAt2GGGWYIiy66aI2hZISa7bzzznU2NJz77rvPzrHNNtukObn55JNPwvbbbx822GCD8MUXX6S5DQtCpNyD8fzzz8vAEEIIUS/I0BBiOkLtvRsc8803X4M3OLi31157Lcw555yhW7du4aKLLgoLL7xwmDBhQrrFFP6LodG9e/fQunXrcO+994ZDDz00bLLJJmbcxXDOjh07hpYtW4aePXumuQ2L2MB44YUXZGAIIYSoV2RoCFEPoACefPLJZnDMO++84fHHH29wBgeK/l133WVGBsYG0G7jwQcfDE2aNAnDhg2zPKcmQ2PixInhjjvuCJ07d672jH7++edw5plnhjZt2oSPP/7Y8mi3cc4551jemDFjLI+QNZTvffbZJ3z22Wdh7Nix1jbkp59+KvvnzvW7gYERJwNDCCFEqSBDQ4h6BCXxpJNOMoNj7rnnNoOjoSiJ3AeGwG+//ZbmJHDPv/76qxkQMeRjJLBPlldffTUccMAB6dIUMGbY/o8//khzEvAcTZo0yc7DcbkG2oWMHz/epiSMDM5XrsQGhodIkSeEEEKUCjI0hCgR3OCYa665Qv/+/VUrnYLyjCcE40EkBhwGxkwzzVTpwZCBIYQQohSRoSFEiXHiiSeawUF4Ed2yyuAQwHdAL1JuYMiDIYQQotSRoSFEieIGB20cMDhUo9844b1jYMw444xhkUUWsW5qhRBCiHJAhoYQJQ6Dz2FwMKDdwIEDZXA0EggX23///WVgCCGEKFtkaAhRJhx//PGVBscTTzwhg6OBQgN3Gr77uCPZ3rmEEEKIckGGhhBlBgYHxgZGx6BBg2RwNBDoOUsGhhBCiIaEDA0hyhQ3OGabbTYzOAi1EeUHI6UfeOCBZmC0aNEiDB06NF0jhBBClDcyNIQoczA4aDAug6O8wMA46KCDzMBgtPIhQ4aka4QQQoiGgQwNIRoIxx13nHk4Zp11VhkcJczkyZND+/btZWAIIYRo8MjQEKKB4R4OGRylBaOUH3zwwWZgLLbYYmHw4MHpGiGEEKJhIkNDiAaKt+HA4KCXKnozEtOfSZMmhUMOOcQMjNatW8vAEEII0WiQoSFEA4dxONzDgcEhD8f0YeLEiZUGxuKLLx6eeeaZdI0QQgjROJChIUQjwQ2OWWaZxQb+k8FRHH7++edw2GGHmYGxxBJLhKeffjpdI4QQQjQuZGgI0cg46aSTwlxzzWW9VA0YMEAGR4GYMGFCOPzww83AWGqppeTBEEII0eiRoSFKBkJN7rrrLqsNPuqoo8KRRx6pVITEs6X9RpMmTUwpxsPx2GOPWXerou789NNPlR4M0vrrrx+OPfZYfcNKSkoFS5Qnhx56aOjatWsYN25cWvoIUfrI0BAlw/fff28Dl9GW4Igjjgh33313uOOOO5SmQ7r99tvDPffcE/7555/0bYh8GDNmjBnHCP9cz1VJSUnpv6YePXpY5QWhr7vvvnv48ssv0xJIiNJHhoYoGTA0DjjggDDvvPOG++67L80VQgghGjePPvpoWGihhcJuu+0mQ0OUFTI0RMkgQ0MIIYSojgwNUa7I0BAlgwwNIYQQojoyNES5IkNDlAwyNIQQQojqyNAQ5YoMDVEyyNAQQgghqiNDQ5QrMjREySBDQwghhKiODA1RrsjQECWDDA0hhBCiOjI0RLkiQ0OUDDI0hBBCiOrI0BDligwNUTLI0BBCCCGqI0NDlCsyNETJIENDCCGEqI4MDVGuyNAQJYMMDSGEEKI6MjREuSJDQ5QMMjSEEEKI6sjQEOWKDA1RMsjQEEIIIaojQ0OUKzI0RMkgQ0MIIYSojgwNUa7I0BAlgwwNIYQQojoyNES5IkNDlAwyNIRo+Pz777/hn3/+qUwsCyFqR4aGKFdkaIiSQYZG/XP77beH66+/vjLVJNBQEO+///5w3XXX2XY+/emnn9It6s748eMrj+Pp2muvDffcc0/4+eef062mznvvvReuuOKKymP4MYcNG5ZuIQoBBsLo0aPtOzj//PPDCSecEA4++ODQvn37cNJJJ4Uzzzwz3HjjjeHpp58OkyZNsn0mT54czj777DDDDDNYmnXWWW0fkT/nnnuulZE8PxTPhx56KPzxxx/pWtFQkaEhyhUZGqJkkKFR/yy44IKVSiDp2WefTddM4auvvgqrrbZa5TYzzjhjWHXVVcNHH32UbjFtvP/++1XO7WmWWWYx5TUf3njjjbDSSivlPM4pp5ySbiX+C88//3zYeeedw0wzzZTzOWfTXHPNFZ588knb97fffgsXXHBB5brZZ589HHPMMbaulOA63Yg69dRTzVAdN25curb+wLjbZZdd7J/zZ3jxxRdXGnKi4SJDQ5QrMjREySBDo/5BkLkCQ4oNDZScO+64o4qCSY30bbfdlm7x34gNjdlmmy20atWqchkDiNrx2vj666/NIGH7Jk2aVLkXFDMURjHtfP7552HttdeufKb+XPfYY4/QvXv3MHz4cFPOP/jgg/Dqq6+Gp556yhR03smLL75oxygXQ+PBBx8Mc8wxR+V1co/fffddurb+wej57LPP5MloRMjQEOWKDA1RMsjQqH9qMjSoMd1yyy0r8zE2ttlmm/Djjz/a+kIQGxoooOutt17YbrvtKvOWWWaZ8Prrr6dbVwUF9n//+1/ltnhcjjzyyMrlfAyNTz/91Iym008/PRx99NGWOnXqFB5//PFaQ8L+/vvv8O6771rtd5cuXSxs6IgjjjAFmuX+/ftbWFguUF4Jfbn55pst8d37M8VzQG01xyGdd955prxzr7UxduzY8MADD9i+XAvPgbAmroVrrEsYmjNq1Kgw99xzV3meHJdQqLowNUOD9TwvnsUtt9wS7rrrLnu2Wdhu6NChZnyy3Z133hnefPPNdG1Vfv/99/DKK69YGN5ZZ51l5+Pd8p5vuOEGM4rgl19+sTCvrl272r1xbX6dq6++erjyyivt+8DYxqDKxZgxY8JNN90UTjvttHDUUUdVfgN4dGp67hhot956q90HhtmIESMs/+2337Zr5t1xTv+GeD5syz5sz39DKKPDetaRuD/Ww4cffmj3xn9w+OGHhw4dOoRu3brV+G3GoNjyjDt27GjPhu+Kc+PFpAICQ9PPyTn+SwilyI0MDVGuyNAQJYMMjfona2jQrqFfv37mIfA8QmH69u2b7lE4YkMDRbZdu3aWF9csb7jhhqY4ZkG58m0WW2wxU/Yuv/zyyryaDI1ff/3VlNoWLVpUbkvCU4NXJc7bYYcdTPmLQUlcY401qmzH84mVVE8ojNla8ZdffjmsuOKKldsstdRSYZ999gnNmzevsm+cmjZtakp2FtqytGzZssq23DcGAlPPQ7GuSwNstm3Tpk2VY6KoTgtTMzR++OGHsO2221auX2CBBUwZzsJ27Bdvd/XVV6drE7744gsrT3wbT7zXOeecs3IZxc2333vvvatsmysRyocC72BAXHLJJaFZs2ZVtuM8fEdx3r777hs+/vjjdM+E4447rso2J598cthkk02qvDO+ezxFsPXWW1fZnvf+559/2jrYaqutqqzn+Msuu2yV42UT7WZyGbBUNCy33HK17otRH98nzyf7n4j/jgwNUa7I0BAlgwyN+idraKy55pqV8ygbm2++udXgF4OsoUE7AOjZs2dlPkrMIYccYvnOyJEjwxJLLGHrUe6oTQYaKMfHyxoahJ7stddeldssuuiipkDG4Shc0/bbb1+5DaFDHgYE77zzjtW6DxkyJEycODHNnQJKNYaH73/ppZdWOX7W0CCtssoqZtzhRaKmmpAwlItYmeO9EMrkoNjFRkbr1q3DN998k65NwGDgWdXVo/HWW2+FmWeeufLYPMuavAdTIx9DI37eGBDUpGdhu2OPPbbKdtT+Ozw7PAm+nvPgwcjy7bffmseEb5qEN4lj47mIPTgYmbT/4V2g5HmbCDwYsWLPd5hV/HnmeOd8my222MI8RA4GaFaRx3vn3paXXnrJDFr/vmIvH+nee+8Nf/31l60DDLXs8fD20baKb4Brx0MS3x8JoyL2jNAxRGxssz3fOh4znhXHee6556p9v/yj/BeisMjQEOWKDA1RMsjQqH+yhgY9NqHE+TIKzI477jjVkBk8BSiCeCV23XXXKom8gw46qFoYVNbQ2GmnnSyf0A56MPJ11BzjsQBqoYmf93UohMD5p2ZoEArj60k0sn3kkUdCjx49KlOvXr3CoYceWkXRvuqqq9IjTIGwGxTKu+++O3Tu3NnundpxDJPY0Nhvv/3smp2soYHnCMUyC2Ev1Cz7dhyT3rUc3ke8nrT88stbmMvDDz9sz2NaQdnNGhoe3lNXppehASjgvp40zzzzWLgfnq6pKcI8M7b3fWtqo0H4UXwOetDim4m/Ib4plMO4bRNtQJysoYH3AgOmJupqaPC95vpf99xzTzMKfDtCodxAwqiZb775KteRMMhyecI4d/ysZGgUBxkaolyRoSFKBhka9U/W0KCWE+UQBSrOR7FAqa4JFJWNNtrIQpJo1B0n8ugZavDgwenWCTUZGoD3AW+Kr0c5J+4eg8PzCGHiGEBta22GxoQJEyzO3NfXJdG9qEPt9ZJLLlllPWFPm222mT2zbBgUy7EnIlfoVC5vATXvbdu2rdwua2gAxg5hRng7YsPAE8r4tDRqRvnM9kbGfUwL+RgasSLNealFz4LnIQ45ymVoUDuPNwZjKzaWPeEhWnnllXOGZuVjaFBeYTjGx8w30ZbByRoafLe5vGNOXQ2NrIfF4bnEHovY0ODfisPLFl544Zwhi4DxEf8DMjSKgwwNUa7I0BAlgwyN+ieXoQEoE88880wVxQRlZoMNNihYt5+1GRpA6AoKD+u5DpQb2mOwjAFDQ2dnaoYGENPv60ne/S33mivF64BGr7SX8P0J1YkVPujTp09YeumlK7eZmqHBPU2roRHDNXIeQrpQkuP3hqGXTwPgGO4jrpHnedI5QF07A5iaocG3FCvvlAWEm2UhDAjPmG+Xy9DIwjeB4cF2hCb5vijGNLSPwSsRhxbVZKDxzfg2JG8n4t9JNvm6mKyhwXeE0VgTdTU08Krk6p2qNkODsjj2xJH4/7LXDnhf4u1kaBQHGRqiXJGhIUoGGRr1T02GhoMngLYT8TY01iYM6b8yNUMDHnvssSrnJlHz6u0ynHwMDRrXxveCgoQSF/fshAKHgknjdxRieqByGBQwVr4JRfGGviiKKG7ZxtnFMjRoN0IoUe/eve0dxbzwwgth/vnnr9wXD0tdDQ2gnUBcy0/iudJQfPfdd7ceuqipJ9yO3p3o1pbQMYxAnilMzdCg1hwPQ3wOrp1wtE8++cTaBxCKFLd5IGUNDQwWemYi5A5FOFaQWReHPOH9Oeecc9K1CXjb4gbwHB8DgPfAc/d3iPJNw23fDi8J3wHPypV7lHfayxAuRW9kvI+YUjQ04IwzzrB/wtfzn9Fb12uvvWbvAa8Hx8g2eJehURxkaIhyRYaGKBlkaNQ/PPtYacCLkYWQFEJL4u1c4aQtwbSCAhcfE4UpC20NUPCnth2hJ4Q4xdsRKpUFhY4uUmvr5ckT7SfoxtMh7h0lOde2JNpIoFjGbSdofE4YmIOBEBsQeGho65EFxS7eDgUx7vaVXreyRmKsbHpCSeH8uWqm84FGwIMGDQqbbrpptWPXlDCK/DvC0MB74Ou4j2wPVrwTuuaNjxEnvlGMGDoF8DyMkbjtDMZhrneTfSYYQTR6jhtBO4TlxYq2JxRrbyMENK6/6KKLqoWX5UpcO+8qJnudF154oRnKNUFj8nj7bGhUdj2hZ7kMDZ5fbCRcc8011bYbMGBAte8qTuzP8WPPHs8sV5fE4r8hQ0OUKzI0RMkgQ6P+oV98PAueamtETM05xkG8vXfBOS2g5MTHIzwmF9R6c51siwchVy9KKI7E+6Pw+PFqa5vA9nx/eBPo0pfxFAg7omE2eVxLrMzF8ByowWU/asKpsabGG6Wc58f4HFwr10LNdnwcjBVf7/eTKxaeY/l2/pxjpZDr5/7ofYpr5jq4B8bioOE215NLmf4vYKyg8PB8MJg4JwYFzwFPDdeCgRTDPryX+D7wEuSC58Bz5T34cfFOYKxwL+5d4Dh8D9kQPp49+dTAx++Ua2McjHy8Ohg9nJPnyDWwP+FXWa8RcE3cL14Ouh+OvyH2YV02tA54b/6N8o0Qjlbbu+KZ+32zPd9/bDiyPj5edr3D9fD82Y7j8fxqMkDpbYvn6O+Ye+J7BDxNsfFB+V3TccS0I0NDlCsyNETJIENDCCFKB0InMXJrAmMq9uQQhpar8b7478jQEOWKDA1RMsjQEEKI0sEbhGNMMDYLYYAM/kfIGe1r3MAg0clArrA/URhkaIhyRYaGKBlkaAghROmAx4KOFmh8j4GB4UFbpcUXXzysv/764fjjjw8DBw6U4jsdkKEhyhUZGqJkkKEhhBBCVEeGhihXZGiIkkGGhhBCCFEdGRqiXJGhIUoGGRpCCCFEdWRoiHJFhoYoGWRoCCGEENWRoSHKFRkaomSQoSGEEEJUR4aGKFdkaIiSQYaGEEIIUR0ZGqJckaEhSgYZGkIIIUR1ZGiIckWGhigZZGgIIYQQ1ZGhIcoVGRqiZJChIYQQQlRHhoYoV2RoiJJBhoYQQghRHRkaolyRoSFKBhkaQgghRHVkaIhyRYaGKBlkaAghhBDVkaEhyhUZGqJkkKEhhBBCVEeGhihXZGiIkkGGhhBCCFEdGRqiXJGhIUoGGRpCCCFEdWRoiHJFhoYoGWRoCCGEENWRoSHKFRkaomSQoSGEEEJUR4aGKFdkaIiSQYaGEEIIUR0ZGqJckaEhSgYZGoXnn3/+Cb/88kuYNGlSzuSw3c8//xx+/fXXNKc6f/zxR+Wx2I59nH///dfyJ06cmObUjG+bK3F8zgN//fWXXdNvv/1my47fE9fgafLkyeHvv/+2dRzHr48p28bXmgu/t/iYcfr999/tGFwL98i1ZSGPY4wfP94S81xTDMdi/2x+uZJ9TqQ///zT1vn7BJ4b7zLXffM98HzZln14l+Q5PHeeGesaK3xbPJdczzubeI5T+96BZ+zPPJ/k/6XDcq7tPPk18D3w7nnHueDeXnrppXDzzTeHTz75JM1N4Bq5J95/PvfUkJGhIcoVGRqiZJChkR8HH3xwmHvuucMDDzxQTfhff/31YdFFFw0nn3yyKXfvvvtu2GijjcKqq64aVlttNUurr756mH322cMMM8xgQhy++uqrMOuss4Ydd9yxmjKIksD+bL/SSiuFrbbaKjRt2tS279Wrl22DErT22mvbdWWvKQvveeONN65yTaSlllrKruvUU0+17fr162fnPOuss2zZGTlyZFhiiSVs/5VXXjksuOCCoVWrVuGxxx4LH3/8seVtueWWYcyYMWGbbbax66pNMKMAXXfddWGBBRaw8yHM2Ydjssz32KFDh/Dpp5+G4447zu6dc8X06dMnLL744qFly5bh2GOPDUcccYQdZ/755w9PPPGEbYNCxbNju1dffdXySpWPPvoo7LHHHqFJkyb2DOabbz57L+PGjUu3CPad8Kx5DyTeySyzzBIuu+wye6bNmze3bw94fnPOOWcYMWKELTu8L/abaaaZ7LvcbLPN7JwLL7xwePrpp20bnjvfxvLLL2/L/wUU11tvvTUsssgioXv37mluYeB5oLjzTxVaKaY83GSTTao872zi+fEv8L5efvnldM+amTBhgj1v9o3/w2xacskl7Zhnn312lbKhZ8+eYddddw3t2rWzbWacccaw4YYb2jLfzhdffGHPe8CAAbb/Oeeck+6ZGCndunULJ554Yjj++OPtf5t55pnDzjvvHE455ZRw2mmnhf79+9s1br311vYtNXblWoaGKFdkaIiSQYZG/pxxxhkm2BHeXmt82GGHmUC/4YYbaqw9hLFjx5rSh+Li27mhgVBn/U8//WT5KAr77LOPKYKfffaZ5QFKMwbPbLPNZvkYNeuss05ehobDuTkXtZ8waNAgu/7TTz/dlt3QQOmAXPeEUnfxxRebknLooYeaUYIwxsDI19BwHnzwQbsfFCq4++677Zl06dLFllEejzzySDMeYkODZ7fFFluY0vbee++luSG8/fbbphjy/LhP2Hbbbe0ZoWBhFH7++eeWX0oMGzbMvo8999zT3jMwxSDgm8OozMJ3gkHF+7riiivsXWHwYlACymPW0MDrg+HVrFmzKgbMDz/8EHbYYQcz2lA0UViXXnrpOhkaXA/fIbXpzDvMX3PNNfa93HTTTZbHe+X+4u2AfBTrbD553F92n1deeSWsssoqYfvttw8ffvih5WUNDvblumKF3SHPn7efw/dnH2r1MfprSuzDd8U7wENQKAYOHGjPq1OnTjVe97rrrmvfxqWXXprmJnD9bmice+65aW5V2IZ7+/rrrysrPhy+Bf5hGRoyNET5IkNDlAwyNOoGHo155pkn7L333mHFFVc0Y2Do0KHVlBsHxQVlaK655rIa3W+//TZdM8XQaNGiRdhrr72slhElioSg59gofQ4KEV4T9sHDUFdDA+XEDYkLLrjA8rgfaq7vv/9+W6ZGk/XUcHO9GBJZULAwDPBG9O7d266FZ1EXj4aTr6HB99mjRw9Tjl3pPuSQQ8wAeeSRR2wZ7rjjDnse5513nim87I9ivdhii4XBgwebUuWKZSHhXXJ9KG5ZJbmu8M4xCPCMYRBQw53r+yKPe0fZvP322+17wNBAAcVQ4LllDQ2M2TXWWMPebXxM8vfbbz/Lx5vBu8vX0Hj//ffDTjvtZN8N/wYeMs7Lt+qGqhsaeDY47wknnGD3hiLn8G753rgHFGXAw8K3xbH5BrhXjCG+U74d5lnHsfmOOP9DDz1k+/Ktt2nTxvZhX7ZbYYUVwpNPPmnruef111/fjo9xyr5zzDFHGD58uK2/+uqrbT+MPb7nmhLPG2Nn9OjRtl9t8E1OLfGfcg94KCg7YkODb4M8vBl4Uk466ST7V9dcc03Ld89OTYYG/y4eELxglDd33nmnGbdsixwANzRQsF9//XVbzmXoNgZkaIhyRYaGKBlkaNSdK6+80pQShPPDDz9cTXF95plnTPFiG5QflLWnnnqqmrLohgbbZmstUdg9PAmF6dlnnw1HH320nXPfffe1bRD++RgabEfIBEocCgTHQHklJOmoo44yhZRjbrDBBqZoobi44p8LFFG8MNwXYTijRo2qc+iUk4+hccwxx9g14w1C4LsSCvfcc4+FofD9klC48Hz4s+bdUNu9zDLLVAshmhoo7ptuuqmFbaFs1JZ4VxiGXCeG4wsvvFDtfecLSivKNoo4Hpvnn38+XVMV7g1Fk+f12muv2XumFhqjiufJO+DbiO+ba3ruuefMECAkD0Uer4h70DAOAI9ZPobGW2+9Zco+Cnv8H3A95Pl7jA0NDDGum1Ctvn372nrAuMJDyDePUYiCu8suu5gh/Pjjj9vx2Ybr5b9D6eb8fMv8Q7H3Dw8j74Lafv4t/z/45snnvLxfvimWMVyyXHvttaaQ33LLLeYd++6776yiIFdiPc9/akYmz5T/i/cSJ4whroMKiTifbQkx9G+JComOHTtWuVegLLnkkksqje4XX3wxrLfeeva8Ha6NfM6DF8bBwNtuu+3se8P48mW+K0KyKCOGDBmSbt24kKEhyhUZGqJkkKFRN1DKUQoQ4NQIMn/hhRdWqfFDgUExQAHJhiXEfPPNNybcUfKyhobz448/mlLEu8mGZuRraMRMTRHyUJw4tjuGe0NpZBs8GijhnJ9lFPpCGBrUiKM0Ac8xV+gUcHwUK4wo9iNRQ0s7gDjhtcHQi0OFCg3/EfH61ILH30K+oFTjDcl+B1dddZV9YzTajeE94knjuePZAcL5phY65bA/50Nh5/m88cYb6ZqEfA0Nav+5hvbt26c51eFc+RgavB83NAjpA743vhEMPu6NbwUFmJp6av+5bgwN2jlhADvcO9fFd8PxMIYwAPEEMOW74P/EuMZQxVjOwreFos3zxCDZfPPNra0Px8UzQp4nvneMFa5pamAsEbYU/4uUv8suu2yt5YWz//772zeBwcixsgljgWfGdXpIpMP3xXVS7mBEURnCFC/Xm2++adsodGoKMjREuSJDQ5QMMjTyA0WeRrYI+Ng7QcgRtY4oOhgFDiFFXjOfrxHgoKxke5PJJpQyQmtQeFAWpnYOlF+uBY8FimVdQQHinvx8KNQOShrKWl08Gjw/EqFOKI8ohtC5c2er1SWWH8WZsCzaWKAYZQ0NngNx+ZyfsJVs4vlQ403NLgpmKTcGP//88+3b4v75J7k3aslpN4EiyDcHfBuEs+BRYntqm518DA2+k/g7yiYUYJRV2oxwLN53bdCAnfNhlPB88TpwDt5rbAS5oeEGE++X9YS4AQowij0GAd4nvII8B7xwKHmuBANGCsdCCcSjxrdGOBD36f8B6/hmeD4YVIRw8XyoHMBjRMNtvmm+jZoMDQyBOMHll19u182/4GVAdpupwXXhAYzDIjkuzyYfQ4VnwjVgQOHtyZUwEvhu4tApro93zHPg3HhmMeq5F87r755nzTPl/cvQkKEhyhMZGqJkkKExdVDSMSRQUFCssiCYCRNBKXTlAaXJw1N4vgcddFCNiRr7uCabWmaUiNrScsstZ/uiVORraOAlQEEhHAXFFq9FroR3AS+BgxLC/fAMqEXP4oYGbSFQ8JlOzdBA+UNhbN26tT0jFFoUTKYsk6jFpjEshguhSXHtd75w7d5Ggxj2UgajiHdDrTm1/RhHNP6n5t155513rAadkLEsKJDU1rMeeL58g4QxAcp2165dTXHK9U154l16e422bdvavrWBgkqoEu+PY7MfIV+EOwEKLteL54GaduB7x+Dl+CjetIPAyOIb5X15uBjGFsqyt0ngufCfEQLkEP6FkYtizPdEb2SAh4NwQ/K4LtYTIuidB/Cv8m/SRsEbkjveNgTD3MOYMIC5Bp4xnhLPJ1HZgHIfe1Vqgnvgn8WTmS0L4nTggQda5UDWy4X3CCMT709N/y8hVoTCuacQeE+8W4w53jPPlHKE783fPR4svkHKOu6Ttj6NGRkaolyRoSFKBhkaxQHlAMUvn5RPuMR/BWUPjwu1/ChaU0sNRahy3yhL1HwThiNEfcP/nqscyJVyfbMYX7n+2WyikT5hUGLakaEhyhUZGqJkkKEhhBBCVEeGhihXZGiIkkGGhhBCCFEdGRqiXJGhIUoGGRpCCCFEdWRoiHJFhoYoGWRoCCGEENUpiKGx4oohzDxzheZXofodeyyjJqYrIuixsHnzEGacMYQzz6TnhnRFEaC3tJdfpreIEJZaKrkuEudu0SKEww4LYdiwZLv/Ch0ZjB1Lf9mFOV5d4Tly7u++o8FemlkHuH7GFpp99hA6dgzhwANDaNo0eV4XXUQPC+mGKSNHhkBPfaxv3z55l/PPH8LWWyfXkWXgwOS9s33nzvTakq6oYN11k/NyvqiHunypOKIQpYEMDSGEEKI6BTE0ll02USQ9rbRSCB9/nK5MwdBYYIFkPQptMQwNFG26ca64HzvPnHOGsOuuIdx+ewiDB4dw550h7Ldfks96tsuMr1MnPvgghHXWSY614YbT39CoeHdh8cWT859/fppZB376KYRNN032v/baKe/EDQ/yr7uOwX6SfHqkZNwh8nmu33yT5GNMLLxwCHPMEcLrryd5DgZds2bJPtdcM+VYsPnmifG36KK5jdOpUHFEIUoDGRpCCCFEdQpqaOy5Z5JQOFlmbBzvurg2Q2P8eEY2DWGrrRKlk23mnjuENddMlNOvv043nAoVsr5SQV5kkRBq6rqYmnevted8KMzUtF98cQgLLhjCeuslRoTDc9lpp0RhZoqyjGKOwTLbbMlxZp01uT+u+Zln6Kc7hJVXTo5/5JEh0HX5qqsm2zZpEsIWW4TAeE+ueH/yCQPIJPd9+OGMKpnkw9NPJ9fEuiuvTNbhleHc7kmaa64Q5psvVLzIEEaPTnecCrwr9j3hhKrngyOOCGGWWZL16E08A79+zuFGhsMAtNxX69ZVnzvjA2GEsF/37gx0lK6oAGMFQwPjJes5yYOKIwpRGsjQEEIIIapTUENjhx0ShfGhh5JwGvK6dEmMipoMjZdemmKYoEyjpKOoMvjozjsniijr0rFjagSDBs8F26L0d+iQrsjBL78k69m2Qi8Id9yRKMCMMk/eYouFMGZMunEFGCZrrZWsw5AAjBo8CqutluSvskoIL7yQGBgYLcz7/c4zTwjHHZcYL4xn06lTYhiwbu+9k+PhLeB+yWvXLoRobKHAQK4o8KwjVAkwvq66KoRWrZJ8jBTCxd5/v2p4Uk0MGjTlvTGfC4yNmWZKPD8YYGy7xx7VjQwg9GnppZNtbrlligHFuDseOvXwwyH89VeSDwx2yvF575MmpZn5U3FEIUoDGRpCCCFEdQpuaGBQwOefh8BgmOQTIoPinsvQIHQHYwLFOw6rcXwfvAO1QdgSij/bUsuP56EmCNPZZ59kWxRoFG3OHRsa776bblxBbGissUaaWQHGwcYbJ/nrr1/VS8OAnH7tBx2UZqYQskT7EdZhoDA4J8ZVbGjQ5sKJDY0zzkgzKyDflXsGrqxLG41bb02MQdqsRIODVgPvins2uI/YAMqy+urJdjxH91Bwr5yD/CefrBpehvGFoYF3B+OvjlQcUYjSQIaGEEIIUZ2iGRoObSJQVFG6USrZLjY0aHzs4T+Z0eut5tyV3H33TTNrAWMBpZXtCSs67bTqNeXUvNOegW04NuFDgKJ74YVJPjXwzz6b5AOeCIwP1mUNjU02SfJrMzR22SXNTHnrrRA22ihZt+WWyX1+9VUIe+2V5HEPsaFz772J54V1WUNjmWWS/LoaGr17J+07MPIIzaoJwp0IQ+MctHH58890RQbO3aZNst3//lfVq8I3QYP5bBsWnheemWkc0LfiTEKUBjI0hBBCiOoU1NDYdtvqhgZ07TolVIhE2FKslLdvP8XYQGEnnAhFHCWYhJGRb0NrFGEUXQ/XYX+UdHpK8mvA4GH5rrvSnVJoGO4KPdvuuGMI222XhA7R/oH82NDAaDnvvCQfw4bet44+OjEyXnttiqHBvXE9eFnw7tDWgnzaPAwfnh6sgttum2JYYdjQFoLnQdsQb8AeGxqEJXGN5HNMQpDOOisJn5oahIrxvtgXA6smjwLvzttYdOtWs6GBQYKHhOvHMHPuv39Kexgamcfn8YbspJra09RCxV5ClAYyNIQQQojqFMTQoJ0FbSto+FuTIkqN/XPPJb0/4bnIZTigdCOjb745aShNO4dp6bIV2A+j5/HHQ7jppuSYKPJDhtQepoMBxHWiYJNeeSVp/zFiRFLzjwERw3noYQtF+8YbQ+jVKwnNIhzJDQ3aT9A2oWfPEK6/PoS7767qsYihUXb//sn19uiRNBLHoOF4hB7hRYnh2rhGrvWGG5L3kG97B+6F0C2ukcb4ucDjgOHEufG85HofGBZuNNDo3TsAcGibwfVljVDC6Qjhomeq7D55IENDlAwyNIQQQojqFMTQENWJQ6fw2JQqGIC0CeE6r7ii7mFMGEV4fDA0MDin1TCcBmRoiJJBhoYQQghRHRkaRQKPBZ4MlHg8HaLgyNAQJYMMDSGEEKI6MjREuSJDQ5QMbmjMN998oScxkkIIIYQIffv2DQsvvHDYddddZWiIskKGhigZvvvuu7D//vuHGWaYISy66KJhlVVWCSuttJKSkpKSklKjTcjCli1bmmzEo/E5Y18IUSbI0BAlw19//RU+++yzMGLEiDBy5EglJSUlJSWlNCEbP/nkk/AHXZ4KUSbI0BBCCCGEEEIUHBkaQgghhBBCiIIjQ0MIIYQQQghRcGRoCCGEEEIIIQqODA3RaBkwYEBYbLHFwvnnn5/mVIdudrfccsvw2GOPpTlTZ/jw4WHnnXcOPXr0sAbu/4UOHTqEjTbaKGyyySZV0sYbbxy23nrrsMMOO1hvXc7xxx8fdtlllzB+/Pg0J2HChAlh2223DRtuuKEdb7PNNrN9mbJM4njx9T733HNhzTXXDEOGDElzhBBCCCHyR4aGaDQ8++yzoV27djZWB6l9+/bhqKOOCoceeqh1q0ve7rvvHu6+++7wbzo8/0033WR9l9911122PDVQyumOkG4I55hjjnD11VeHX375JV1bd8aNGxfGjh1bLf3888/hiCOOCLPPPnv49NNP062DGRNLL710+OGHH9KcBO6H65g0aVKYPHmyGUEzzjhjGDRoUJg4cWJlfswTTzxh29B/uxBCCCFEXZGhIcoeFGS6/nvjjTfSnNqhD/LLLrss7LjjjlbDj8cCz8Grr76abjGFm2++Ocw///xmiNxyyy3m4fj2229t3UsvvRQuvvhiM06WXXZZG7X1wAMPNI/GP//8E7744otwySWXhDZt2oRWrVqZJ+Kkk04KDz74oBkQ/5UTTzzRjJl8DI0sRx99tBlD55xzTqUXAyOpbdu2YbnllrNEv+35Ghp///13eP7558O7776b5gghhBCisSNDQ5Qd1MDfe++9FurTvHnzsM4664Qbb7yx0gCoCWr0Tz311LDMMsuEXr16WXjR77//brX5GCoo6YQkvf/+++keiUdjwQUXtPCqt956K4waNcqOg3LOMsYGiv5vv/2W7pEbDA9CnNhn2LBh1UKbpsZHH31khlDsdcBowaNBv+oO94BhU5OhwXXjyWnSpIk9g3nnnddCqBiVPUtdPBp4TDAyTjvttErDCqOL8CshhBBCNE5kaIiSB8X+nnvuMYMCJRYF9tFHHzXPRF3aQDDI0RlnnBEWWGAB8yrEvP3222G99dYLW221VRUPQU2hU1zPyiuvbAr14osvXqeE5wPD5aeffkqPNnUOO+wwMwree++9NCdY6BSGVjxK7Pbbb2/b7brrrmGvvfYKnTt3Dn/++Wd45JFHQosWLawtRp8+fSwPMBBefvnlcPjhh4cll1zSvBoeNoaBgaHB9nWFZ/3OO++Ebt26hW222ca8LLQdkeEhhBBCNB5kaIiSB2PilVdeMWV4iSWWsDYQ5557bvjggw/SLfIHJfrrr7+2EKgzzzwzHHfcceHkk08Ot912m3kr8HDEEA7UqVOnnGFVv/76qxklP/74ox23tuTb430gbMrz8uXII480z0rsbVlllVXM2KHNhoNHA49N1qNBaFMWPEMff/yxhXr1798/vPDCC2ZwueGCF2bMmDHWHqSucD6OR/sXDBjeWZcuXWzkdyGEEEI0DmRoiLICBZ0elFD8MQDwcKCAH3TQQWYoTA2ME8KnCLuiZyhPtNcghChOeAd22mkn86AMHTo0PcIUnnnmGfNQYKxgAHz44Yd2/FwJ5Z1G5rT3uPLKK63Gvy7kMjQIxeJZxEbL1Npo3HfffaF169ZmoGBo3X///eHpp582gwqDo3fv3tYuBW/IfPPNZ/fPOaYGRgnPA68Q+9H25fbbb7d7x8ASQgghRONDhoYoe2gfQU351NpoACFDhCzhBWD72hIhW5deemlo1qxZzl6n6MUKpZ7G4A8//LCFcxGilCvRPS6G0VxzzRX+97//1dnQOPbYYy2EKzY0YmhjQTsVwsLw+uQyNGgfQpsOGsJ76NTUmHvuucNuu+2WLtUMxs6XX35pCaNDCCGEEEKGhhC1gPKOoXHnnXemOVMYPHiw9TZF7f0FF1wQLrzwQjMmcqWLLrrIvC6zzDJL3oYGx6dnqUMOOcRCkEi01fDE8gknnGA9Xz3wwAPWyxVtMGg3kqtxNxAihceBth3ehoN2FLTHwONy/fXXh44dO4bVV189zDTTTGGfffYpSA9ZQgghhGh8yNAQohYIG6JdBe0ZsuBJ+eabb8ybQluNqSXfjmPVtY1GvnBserSa2vHZ7quvvrJ7w0syevRoa2hO+BfXiUekLg3thRBCCCGyyNAQQgghhBBCFBwZGqJBQ3sBvBLU3tPbFAkvBO00vJ0CHgDCjnKNhcH+hA7F+2cT7T1oz+FeBHquoqE2+zBlvc/7OfEaxGNpcG7G84h7h+K8cS9YjKXBNg7Xxnr2rSnhlcjV4xRw/Pg+aJfCtcZ5NXXByzPlnmO4f7annYbDNfqz49h+fM7FOhLeE95JFq6dfWlbQq9jI0aMMO9LbY3TuSbOzzk4JvfIOXgW9J7FNZLPcR3Wkef37Pv6/bGevJrO6++b83IvNbV/Id/fS67EcQipy8fbxTZ0QsB3VBO89+w5st8DvYyRnwvCBrOePJ6JBmUUQgiRLzI0RFnQr18/67aVdg577LFHtXEwaoNej2izgBKI4sS4EDSuZvwIoBE3I2SjTOYC5d73zyaUNnqtoutWB2URBRfFkgEAr732WjsviqoreUcddZS1l3AYgJCer84777xw3XXX2Sjka6+9tvXiRE9S9P7ESN4M9uegQKPY57ouEte96aabmpLOtllQGtkOg4dtV111VeuNi2Wu34+RBWWVkcUZ9TyGexswYIC18XDckEAhZoBBxg9B4ef4gHLNuyXfoctdGqATyoUSzCCFsbJMd8K0mWE09ywcz6+bBvoMTsg1dO/e3Z4v3HDDDfacnSuuuMLOx32xL++a9+UKP0YE7Wq4Tgejj2+RtjluYJC4NsLRGOOEMVvY1+E+WO/bxolnTnfAtMfBsMrCM+ObYRR7vgcSDfs32GAD6x2NZUae55v2d+3Pys+R63tgZHnuxaGtD9dw8MEH23fN8yNxrzwPDD2+aSGEECIfZGiIssCVPpRPxtOgW9ZpAc8FDazpeclB4WcwORS5p556Ks3Nnz333NO6hM2C0oZiiPLOyOMxxxxzTBVDg4bYp5xyiinZrgTS4Jserbhf73UKZdRBWafbW5TM7bbbrlpCscY4oetaP2YMDclRUjk2I5xjeJBQRBlbhGunIXsM7wHPAj1bMY5HbIhgaAwcONBGB8+Cwk33vCjDgPHHOTGcLr/8cmuUHoPSj+H35ptvWve7vBcS29OOpCbvAkbkCiusYIo+BqkbFHwvKNU8Fxq6Y1w4nH///fdPlxJ4tnTNS5saBhk8++yzq7wvDC26Sa4Nnm38XQwaNMi6SmYAw1zvasUVVzTDMp9umuneuWnTpjZwJM8qX/gWGRvF/6esoQF0U7zZZpuZx8S/D/9+MDRYJ4QQQuSDDA1RVtTV0KB2umvXrqbM0TvUHXfcYTXLMXQ/S+1zHPLj0K0t+6EcklDUfMwNz2M0cXpw8kH9UEhRZDEuUNBIr7/+uvXu5IPh4QGJR8mmxpmabBIjapNee+21yvAdwMsRK4WEH6EI5rruqYFBgJHCs/QerDC0SN67la8jvAbFFKV57733rlzGe8D4ImyPAcd9cu/uaWCbN954wzwleKAwRLgferjiuLwbvD+9evWyARgBA4T3wRgf5GEo5EpnnXWWeSaosY/h2BhQDtfAtXGNjJYOhATFHhQU6/322888LhihnjB+MHQIo8J7w7EdjBCMU2r7MUZiMBT4TvCSxaFHV111lZ07DpmrKzxvvmWuBy8H17Tmmmvacac2Xgn3gZEQh8NlDQ2MOAw03ovDt8oz5VliaLRs2dLeMcaxEEIIURsyNERZMS0eDWq/UXjxBrz44os2jRO15CiHWQOkJlDycnkwYugmltCdmuL1qQ1/8skn06UElFdq75944gmr/Y4T17jGGmuYF8BBseQYhCuxD4YL42gQhkU+eaxD0UZJjImXUf4xgKhtxxOA4hl7KuJtaWOBUUZ4lCc8GD6PgYAR47AvoUTx9iTfh+eAQTFkyJB0j6RHLAxClFsU2jjh9eH9o8gTDpWF2n2unWeDIk4CjDQPnSI0DSPAwQDiGePF4brixDVyXkKnUMKzoJBzvRgPGD14Qjh/Li8D+3Meng9jsHANGKjcB94a8nn/ubw1HgYVh3QxWKWHWfl6vuHsuwaMwsUWW8yMxJhcHg2+W7xVGNNrrbWWGVxu8GJoEH4lhBBC5IMMDVFW1GRoUPNMDTcKVRYULxR+1mUT9OzZ0xTkuIFwDPHpXuMOKJHe1qImrr76ahsBHK8E3gkUPE8o3tSuM4hfDOFcBxxwgCnYKL1xQtml9pz7rw3GyMCwyAdq8Rm9G+U1bgfBPIo63hw8GDGE3XiYEc81TjwXDBsUdAcvB54ePBgQbw+cC8WWtg4OnhraCBBGxBQDKE5cN8eLDRqH9+xhaDURnx+4hltvvdUSZK8RjxHhVdn39V/Bk3P66afnNGDyoUWLFmZA1wTfKB4mQrL4FmPj0cllaDiEzsXXxndyzTXXyNAQQgiRNzI0RFmB4kotcrbGmOWaanOnBgPjUctNaEkuUMZpGIuSiyGQTYSaoNzGihzhPzTgpk3C5ptvXiVtscUWVmNM7XUMoVWEQhHewjbZtP7661sbBA9roQbbe0nyhJGRzcMLwXPLQu07CjkNimng6+dhnnMRo09IVww12hgA2XvyxP433XRTunXyvggbI7wn1/YorbQxicNwuC+MFc7j1xQn9mMfwoWyuHLNs6/pfdGmJr5GjBM8VHiMstdHInSOY3loHN8a1+i9adWUWI+ng2fAPoSKxfswjxGTzcMLk31fhDv5Np541/G+JL5hN4KZ8q3U9k9gSHIMB+Ob/4iEFyOexwilDRFha0IIIUQ+yNAQQgghhBBCFBwZGqJBQ80wtfLUJjvU2hKe4/H79IhETX3cmBbvBJ4AanFzwbasj/fJQi0w4TjZ7ko5P3m1NQrmuukStqbzx3AfhL8MHz7c2mnQ3oGRvrnv2mqzeQaExtSWuMcsXFfcnS/nIA/PAPc0tV6TuF7aTGS9JTwv2tB4bT418lwjbVe4Fu6RKbXq3j0u7RkIR8t6AICaeMLUfP9s8l6lCAlyuH6+hXg7lv374ZicH49GbdAWg4ba/o05nI8UN7bmW8NjwT15irtC5h0wdkU+34IQQghRSsjQECUPDZXpBYgGuUB7DPr6z2csDZR62m7EbQBoN0H3pLQNgN69e1v3pXHvTTTO9dAZ2kbEoTcsE15Ej1Nx42yH49Iz0oUXXmjdwzLl/PR+hXKNYkk7Bxqi1wSNshdZZBFrkItimwsU7H333ddCZgCFP07AeWgY7kprDA2dCZ1CkeW6cqXYkELpJhSMMKiZZprJwphoO4KBwXNCqebd0K4iC0oyDeQ32mgjC0VaaKGFwjrrrGM9dnnXttwP9xzj90FvS8suu6zNY1R4Ps+f9+HLWchn+2wCuunlO6CNjhOfjx64vB2I5/ONXHrppTnbh/CtEYK3zz77WCNqetuiLQ7dw9JOB2grwXNwQwniawSMRN6/G2yMGcJybYPzCSGEEKWIDA1R8qBoZ3t5olci2hZMDWqOUexQqpmn9pq2ETQmph0F0DsT8flLLrlk5XlQzlEYGWwPTwHtODyxjLKJkp/LWKAGnB6O6D6Wmm2US47vY3RgaKBgo3DTyBgFndpvGp1j8NDg2pVOYD1tGLgetvdeiVz5pTYcJR1PBtdHd60YUx6v79tl4ZnQyJ3n4opuNmX3xetAY3WMCdqLALX8GBreu1cuQyOG90lD5h49eqQ5CbkMDeC4tNlo1qyZvSvA+OQ4GJs8l2mB94ih5AM3OjwzehajYT3tEfx5Q22GRgwGzNJLL20GSwzfIm1E6EqX+80FxhodAHjDfxkaQgghyhUZGqKsQAmkO1Bq02sLW3KobUc5b9eundV+o/jREBjlFE8GZD0aKNecxxMwejg13IT2gK9DGc9C70THH398pbcDw4LGya5Yo7hmPRoolhhONASvKeFFocclwomA+8BbQhhTTQlvAUYZte0xXLdfv98D18D4EjRc9ny2cWODeUYrJ9SMeZ4Z2/OMMZpYxgihV7BccN8YbmxLiBK1/3hIvJesrKFBuBIN0mmI7yFIKOEo6j7gIs8Y71bWIMLLQ/exNGqPnyHnI/ky6+n5Ce8N941Bh7EZhzZhuGEIsp575z1mjRPACOO9c33ujWDQxpVWWqmylzS8cnjYagubowc0evzieCBDQwghRLkiQ0OUDdTSo3TGoS75gBKK0pxVRh2USBTmuPed/wpKKcoqSjiJecKHPLE+ez30AMSo3MD6OAHGEs/AQdnHkCCszLvBjRNeAEYGn1qXuG5QxMYF0yxcx8wzz2zzrMfrQ+hVPqFT3CvbAQYE3qD4XiCXRwMPlI8vkStxDfQGlevdksc1cz+AMYERgRJfE3hn8Drcdttttuzvj8TxaPNx0UUXWTe+uWA97U/4njxkjnkPocP4weuUNfwcvCW8u7iLYhkaQgghyhUZGqLkoSacNg8M0obSiJKGQhgrayjoKN25Gsyi/B5zzDFWu5wvNBTGY8AxL7744iqJUb/xSNB+orZ2FjWBh4PuUrP7tm3b1sYpoJaeAfriREjSggsuWGVgO5R9RmnG+KKdAqFX2cR5vA1HFtYzYB5hSG4EAMo53h1Cq9jG4Xx4K1DUuYfddtvNavZ5D/kYGrwbtkWJxqPE/WOAUXPPNaKMt2rVKt0jP3hWnC+XocEz43rxRuUL3xVeFxR+2q4QnuWJ58S7wHNDSNzU4JowAldbbTXzitQE5yGEje04fhYZGkIIIcoVGRqiwUNbBRR4b8idTRgNhKtke4eqDRoSo5B6I9+6gEJOG4Bs70wo/YTzMFBarkTITxzXj0Fw6KGHWhhZru1JhFuh5HqtfgxKNfdNA22OgUeFxH6EN+Gt8JClLOzrYKTQ0J1QNmr0CdWaFvAo8QxyeVNqgvYshMbl2ofj+SCIud47Rpi3o3G4F8ZN4ZlknyWJcT8wWmtqX0G7H4wzQr4I0/PrwjAj5I8wL8KrpuZlinn++efNuJtaT1dCCCFEqSFDQwghhBBCCFFwZGgIUYIQYkTIGN2gUhtel1p+QnZo20BbiLrshweAsSrwSuRKrIvbsXAeetgibA0PT67EqOBxewOH66JhNSFqHJuxJaix55h4oAhVwmtCaJY3igbW05CakCnaZxBOFN8j1xeHKRGaRVgWXfP6fXBsGtT7ftxDNoyNc48cOdLGJfGxRPAgET7FO+F68abEIWcO2/t98czYnuvGg0U7Ea6JZ8N8jHu6GDPDYZ77/X97d7MqV9FGAdiBn0SjiIi/YHSgiUE0CpEYxaEaEQIq3kGuwZF3II5EdCAONBMdCOJcB7kSr8SPZ5v3+KZSu7vPsYU2WQuKc3p39961q2qfs9b7V8bAWPQ9PwrWh3Go+9N4TByv941JDzXUL3MjT8ZY6o/fq0/Wnippa+unX0vra4P3jBemxg1cu0LQ9K1+9z1jbRxd3zx1GHN94jk0F3Z9V3nMmnPPfW2MsJ762pTLYwz7a9c3NyOce+wLONa9mMbWOtB353Y9v5vrmiver/FcxtVaMsbWnspxvR+//fbbMq67wPPls7O1CK5V88gL69zmw7G6pjWgP7P55qm0VmrcZk2RDOsanNNa6O/56RwV/qev1uTYZ9/1vBiv+o5wTM/wONc+a/zNYV+LvXnPNa2jDq/dl2eLF1LulvM5Zv1qykx3z6V++Ztg3dS9mW/jBu7F+7N1EwR3MyI0guDAgAwI6SmCLuzr2rVrCwHfhs8//3zZn0Iok8TtqlC1C4QSCfcp8tHhmPwBYUgF/1iFCLnmceEfvNKxY9lipFOFJons+q7SV99sUUldYU0FieLCvKrPP/3007KHRQEJFKo0ltLtkHMj36SAOKjYhYAgXsi/alSgVPGvv/66FCRQPWpW+UzVKXM2Quibfhg391cVzBAoYWrC3OSoqOxlnBFsOSbmxO9CtgiYXWD/DvNlXJA05ZRHYQPGWDjYSOKIAOOMXHWBMoNrEAHWD/i86mjExgjXe+CBB5bfnVuYGcJGZFnzxrRgHlRrQ4xn8P1Knt8Gz5Lzm4Miu5ugElnPTyp4rqyHGTyjEv8BUSU0zPWnn356Wzjh1atXb1mTXgvZK7zxxhtHa24Nzu1vwnvvvbeU5lYcwXocn1+EXr+sOaGS7s1Pa848g3LfRNxIyHeB69lXx3yM0J9777335qu/Yb6FIvbnhwC1Vw4DwgjrU4ikZ+Q4IASExo598zfFc+VahKvwSb8TiwwgxsHfYeNU8NzLz1rLefO3S7EH1fRgFq4aBHcjIjSC4MBw/fr1xWJe8A/szJkzS3LzrmBdROi6VXkbEEWkZCQq4JhqSV1o+GeMvCCkErJneRCIcu1X0kFo+CeuwhM4P+LkuHyEmdBAFiVXV0UoYD1EwIuUzYQGMTRWmiIgilQhjl1oIB6OlYVeUrzrIg5yPhCykwgNhF6eBiusXJQSGuDcSt8aXwUHSlQaV3OiL/bm2FVoyLlhCQbECKlDoPrcGm8E0waMfX8WYA1+/vnnF2s7q/AmGEsCUGljKKFRorVf05zef//9y7WRP/fbhUaRNHBeAsZ8Gn/FAqwZZE775JNPluvMvDyeGXNOhCLZ/dnRhy+//HIp5mCddSGlrz/88MNS3EC+kt871oQGMXTlypWlYIXfFT5wHWNKCPlewfNgE8eaHyA6rNPyZm0SGu7n/Pnzy/PFG+e5ULyBkCSalcm2mWg3TKiUZv1VBbRLly79+csvvxw9A9aWviPlx/mbAebR+TqMo+f2o48+WtawsfS6rjcKDUJCQQQiaA365Vzffvvt8tozQxy89NJLy9gTXL29//77y9rx+/ic8pS419q0FVSL019rp551/S6vm0pznlt/c6xNf2N785wRm9aqz/TnKQjuZkRoBMEBwz85/yy7VRcRRhhYShEl1tLRIn0SoYHUExvInGYzO61eV/J0QWiK/lUYzFpDWker+Cg0kEXEioVagjsiOQoN4ss/8i4anNs/9vKMjEIDEVNKF/lC5HzfdVWiUoXMOCKjSEcBae77qiBAr7322kI8jAFycxKhgfAgx+7RTyEYBYS5NkB0buLJffPG2NxPP53XeG4Dsuqzxg+sAWTMONXYSUpHPvu8INCsuEgWbwIiXsRwE4gGlc8qzMQ5XZ+IULVNWFBBXwgN1yihgfSZf8RZMQQeMmOPKCvQ4LzCWIjG+qkJYfK+OR6rzXlPH9xnNa9tpElcee161hdLNRh/68A1Cs5tTAgeIDxqzQKyXgK9wIpuLN0XEJC94t33339/y27xYN1amzWGa0KDmBCeI9RJ87tn1L3w7rm240Ss8UP4jS1PAGu9MffsuWfPMsIPmzwavGNIe282IC0yb51oBDkg4cbI2BQcM1fuEzw3BA8CX7BOnJOYID7rbwtPh3NZY9ZswRpyH4wEfndfyL379Xs1r11/hLEzbu7BuuDhqTnx99N+SNVf8PeHQPX3oASb5nd/C/TZvI9GjSC42xGhEQQHCAQBGRJedJJqQycRGiP802Rd/zeAAHShUUA81kKnECfj8cUXXyyvoTwWRfR29Wh0jKFT4ubtf1HeA4KuLK3/JHSKRXkWOgWIpf1cANlE2szdcUOnfFeZ3O6FWAudEkJE9CD7Sis//vjjS+iK/iNQSF73RsyAHCKAdmgvlEdjDJ2yJpHnCnUyn8LEjCECPIZOjSAmiIHarHEbkE9rh3Ag6qw3ohLh5U3gJRoFMKJpjemjOejNnBHBPI4dvD7WyNp3hGD1fVeE6fAgdKHJmm6flRK3mzwaxtHzY+xnzVi6r3HujAXxI39kFBRItnVvzEY4Dwt9P99aKF7BGq/y1zxpnuVqlZtVc39S6I/7dU/yYswpb6cwMud3zBib09EQA9YBgUXM98Yz5++OMDtCucN5iB5/U4hi5cWffvrpRcg55m9DF09BEERoBMHBgcX82WefXdz6yCDS6B/ncf6BrQkNlnzk0vsdiIkQDJbsaiylyFM/ppX1GJlADuUzbMKMrLI8In4IvH/4SIHwFveJtCJpSInzl9AAn2F9RGIRSKTTP/jCKDRYQ5E6xBnhcJ0iIKy/+sHi3IWG+xLCQ2SxEiOR8h2QMKIEmdDHNaHBwo246V/dF4v2k08+ufx0fvfXhQavlPFAltxP7STOusuajtSthU4h7F9//fUS7vPjjz/eRp4rdKoTQ585derUUXjYCOtDbP1MqPou4izMhPgZ4X1epJ5gj0B//PHHR3kI5tyYC8cDhPHatWu3CE/Xdv/2swFWaUSvktwLs/XlWSH27NeyBmNlE8wu5N0Xwj9eozALnSJi9Xvt+XS+CvcplFUc6eeBMDY1FrBJaPBCeDaMz9gIEOvWuPlMwVgwXBjn44JHQL7X6DXaBELGGrDG9csareY1TxHRV8+PufXe+LdmbP5G1Xy5hrVd7zmvORXyZ4z6d4zLCH8rHnroocVjqPH4VSM2zNFYJMIzOsvdKXgeiJUgCP5GhEYQBAuJRi57c2ztOCAHP//880Ka+z/p3sb49G1AHhB05BrBIYwqvGMXIG5I93Fw48aNI2K/CQg0Cy1LP0FA7IykfhMIRx4WYyhMBQnaBmFyiJIxMNYn8W6xcCO6M1HkvhF8wpZwuHDhwiKUzFnN83FhDgmxtTAv62aMX0fS3R/SXSAqhCoRvIjfuLYcI+h6eEuH9YN4IrxCcpB54oYHRiMwjW0HAkxYGgvhbb3x9DjevTfAom6dO//ad3bZ4LGDIKvQq+PCs0Ok9OeG4OWlG8ewGnJc3rYR5oZlf/a9aoh6r5Zm7QgzIvTGMalm/ur5IRb9Pv6tWWtr37H2HO/HfGZ2XwwA+mHezNPYGDR6PljBfRrLy5cvL14Z+VC8G4RJD4cLguAvRGgEQRAEQRAEQbB3RGgEwQFCmAvrtxATSYgzi9wMPidciNWNdX8WmzwD6yfruVChaizMlZzKSihkqFfD6hCSwUrfv9+b95yvrOQsj6x/jmtyHnxOnHjlF7Dei8mv77DIs0L2Phgn4Q0VPsNCzXpcr1nPhQfxWgjjGBsLukR7Hgp9mkHoFk9GhRkZY9ZmFnt9c/1eO1/ICguy47NrCsPyc7TqC5PjJRG+w0rPcu7+5Z+43lpojj6wYvueMXT/xrNea+Zu5hlyL6z+s/Xl+j2ESNhWnc979XtZcfXD8R5iY12o1MN6bRzNqSTbmm99tC78bt7Nm+v0kBXnq3Gpa/Zm/bg/3ggw59aI69Rn/G59Vw6En8rB8vaAdcfz8d133922DjxDniV90MydOdYvCcXCdzxv5pwFvcNra9ZnOnxXfxQzCIIguJMRoREEBwZueaEIYpZBSVTVcMYwjxHIjETgCrkQl6xaEjKNfB0XYpGrEpLvS/BF2NaquGwCkv7ggw9Ov0dQPfHEEzdf/Q3kTVhDCQ33J2dDqIJYaSEuxuXcuXNL0rT4aHkKqsgcB4i28K81UUYYuC7CCgSP6wtPQYzFxPcKO9vgPoTa9DAh4UHyTYr4jkC87SnRw1NmMOfWixAouQ3WUImuGQi1//3vf7eJHjCWPc6/4Hz2TSgQD4g40m0PlxI0hJH5GcOGxNKfPn16+V1/JUwj6QSHNSYMRR7RriAazN9YWKCDOJD7UuFq5tSaIVLqOUP+FRrYVACBIJObIxSNMJRjRACtFQdwfyqo+UyHZ0jCMbERBEFwJyNCIwgODAhtkR9AViQ48m5sAhKqxGK3TquqYo+EbmXeBqJA7DsSVSSVQGDxRe4lv84s4JuADKrY0+EcPA3i55FNCay8MXVuAkR8ewkN52AZR0KRVYRdpSb7QCDq+ia5VznOgnNJtFaO0/nlH/SmvKUKOuKr1+5pFBoINGKLyJsbwrALDaRTH8R/E2v9el6L/UZyywIPLN9K6joueZiQMQd+VwXJ+QiTmWgwDsql6pM+9rwR64GA0Rdj279vvdgojqgVk04sdCE4ExqEiTmoMrHmBGkmNqxZZYRLaDj+1FNP3ZZYbe4UKuA54PEgRiTn8yaYb9W5jiM0rFcig9DpQPp5RyrZn2ehoJKX5HoFAcxnjYtkf+J2DebbPFnLrqnvm8odWyM2YeTV6cn4rmmtPvroo8uc9yphQRAEdxIiNILggIHk2hBP6E6BdRT5RERZxiU/j9Z4r1WZQYLXQm46fv/994XcIpwI00hoEVDX6v0oILYIFyJfTclUVnXE2mt9QahqQzbkk2BhaS4gnhLBa88QJJfluIeyILEEV4UlId/CnhBKYSjGq28QiMAje/qA4Apx8jnWaN/x2k8kuQTNCKRUwnQJDaKNsHFPRIyf3WLtekQPEutaQqz8RCbrekJ7Svy5TxWHKtQHcfWzfucRcH7eJecY+0lIIP/mR780VYuIj1ojxIRqSUWEy5rfx9a489LUMd/vVYpUSeq7wFsjhF7tcE2o+U6tRXPFW1Lrzxoydyz89qZQrUyfEHZi2Hl8x/0qIwz6K4zMPRG+CDtxaCM1hN+8ECbWLkGBzBMDvDr2XKgQLH3tIsocKDTQK5qBcxBfa3A+fTFf1pM+dI9G3atQLuKOtwzMv6Rpz6Pyrj7X11QQBMGdigiNIDhAIOKsu0hkWYh3hRKqdupl6T4OWL9V+bGh2KypIoMwzVAx5wV9tonXJpGDqLE0I5YIKItzNXH/qjQJk+oEkahhiWe97p/XeECQPSFGBd8VjsR7QMQQGLwXyr0aH9+p490T0KECEKu0uQD3uil0iiWfuHBu44VU14Z8hIbj7qGLQ9fWSuARFMQkQrwLiBVCAXFF/nnAiCAkWNOfPo5ANLgPXhNVhXojAlj2xypXyLvvjJ/XlBB2nyVmjIO9Ofzs8Nq+EUKZeEWEWyl1ipgTRsh+FwDGxXrgPTH2yHsJHiKQR2uE3CJeKGuIp4ZwIYKASCC8CJIRPBqeuxn0qfZaIH4IB14J+R0zjwZRYl3O4Lnwec9cEATBnYwIjSA4MBAKjz322ELskUdEBoFci98vIDk2q2K9ZwmX7Oon0lVWcFZj1vY1Ur0GJJWXonZIHsFCf9999918tY7uKUGOhRMh3nIE3F81ZJS3QhnSbsEnCggJZFSoDY/H2JDPgvPyLAjRksgrfwWZtRmXcXHMe67Vw9VASA5xUeVTeQBYpJFDVnkeGfeDlNdme8YVseVZcW65B8g58mrs9cVxfSRu6t5Y9cXsVwgNrwJS3cVBH7sOYub8+fO3JKSPsDM4QdfHkmeAwFs7r1yJMXRK+NxamI97510oYaz/7ql7exxD+p1nJkK9j7TzWBQIQoJkW9nifh/mQCiZ+TaWninzro+8EGfPnl32lujhcvpNwFhbHQSG3A2lWnu+ifXhXLWvijnsQsPzQtRuwtrYB0EQ3CmI0AiCYCsQIiFJayQToRcyJLYeOR+b4yz73cuClPEKIIBI4awhl52MESH2eBAO5HtjY2V2nYqH913kGmGv5pjWj9VndgFyisAi+L5XIVIwnler/m96j4eDB2I2dpoQo6+++mp1wzWihYVcQrwx4C0wRqz6Qp4Ini4ygHDiNZmNu0ZUjd40a0AI0+zzmn4SCwVeG2FaPBjds2F+zBMvA8+JfhM+SP7oBSBIiBUCZba+HPMeQV4wToRghZLxtvBuEHlVtYwYIRLANX2OeN0V1q/vWdM8T37vVad4NHhIxv5W06cxryQIguBOQ4RGEARBEARBEAR7R4RGEBwghMHIeRDXL+m5W4k3QUy98A6hHMJ5lMRlOd8FrNXChKrJ16iwF5Z3/ZFAW3BeoVTClSTf+inZW8iX1xqPhYpCHcJXWNN5JoQTsTyXp0Nuin5363QHyz0vh3OzPvud1dh5HJNPsVYylIVZWM4I1v6eY2EM9Y+HgdVdiI17Mz7CZUCcfyWtF1js9d3nNeFRPUTN/c5Cz+QfuN8eGldzV5Aj00OjWNDt6l1jbwzlWvRQJ2PRw8EqAd8Y1BzLd9DP8nao/tX7weLOE+CzvBL1XXt7gO9J0O4hQwXrg5Vf2JGwIvfvu3Wt8mj1fVG8J5/GfLo/c2we3GcdMze9jwXeE3PiOrU+XPf69es3P/GX50gfypMEvBDWgM9rPEdancOczZ4/c2ut8PoYV3kqwvF4a8p7IyzP89i9OSN4x4RGjvkwa7Be3JMx0T+eRN4s46TPfgqZ7H12jfH57s2z7hlYq7wWBEFwUkRoBMGBQaKqHIAK/0E6lTfthGwGITz2lJD4C2LTL168uOxP0KsL7QoVpiT3AmIm1EN4yQzeV/1JXL9YdpV1ZkAQxdsjSAWEWolZQkCCrhCobeVNhdMgnPJRhN4g4mvXBKRX/+V8jBC/L9Z+BmFCKjqBvukjki3MpyojAdLpvhC9gu8JBaqxV8a3qm51IO+qdpVIQpZ9Vn6Daky+8+KLLy7iZhMkRSO88gyE5tibpJdENs4qZI3hUB2qRK3t1yIxf8yrQEztqTHuw2G8CWQ5GwUigqCqZH1rQe6KMC/iAMyhvB3rA/knVNyDRvQY+0pe3wWEskRzz5BcGQnnWkH+heR2z9sazItytiUyC/pKSBDfhKXzE0OMAwQNEMLmsJ7lApEl18d9OLecFGFkCL8wvF2eV8JKHta1a9eWnBf9HHNqdoXSx3JKxlLEQRAE/xQRGkFwYJCY3K2brNIvv/zy0Q7MxwGBwlJ5EqHB2lwkeyY0EEdWbnH5kl5ZfpFDggP5knTO20DoFBChd9999xZiKp5efD+ihjytCQ3X4x3RD+cc8z3E3yN8+l1Wc0AIVYYiSljs5S50Mj0TGqzByC/yRWy5D3PiuLHQV9cqIJfyDLr3hgXZXg2sybAmNFjp3X/Pf0E6jSniaSyVCp4JDfftOleuXFlKFOsHKz6LvEpZtbM7qNqk3PBJhYZKXWM52DWhAcaLsDOOBBQrvHyN8oqZd6JUgj+YMx4r4sIaIkp4XAgxzZwj9p4D35klkwORg+hbf0SMc7Hys/pbA65ZQKyJQeLGeWt+gXjkXSH8iZHZ9ax9IpEAdU1rgkD0DAOvg/eJo5oLY6E/yH1fp+AaPEz6VHk/IwhsYtLaMP8KD/Ca8Mx4Br1HeHfRYH2b+xJVvVln1pt10z1wQRAE+0CERhAcMJBgJLKLDEQDiUKYJf4i+71UKkspwsPC7jNIz0hotgFZs6t4kd9RaHiNQCOPSA1LOkJ5+fLlpeRnhUR5T2hGEVSCh8Ue2UNsiAIkGyEElt81ocGi/Nlnny3kzvssxZKd33zzzeV+HSMoiIIKa2IJF/6klG6BJR0hkwwMSKJQswKiK/yrjzlRg8zVeJiDvgcCso+s9XKl9sVAOkt8rAkNhFrJVP3/5ptvFnJqd/N+LgQZoSy4NtGEPPd76zCmPdzKHCCbxl0SuPEjSJD4EqJrQsP7BEWv6AWbhAYgv9YS0tvDcsyBMTOudW0/zQlBsakRzvqBZHcQlAg6Ms3S77ojiFEiscREAcG2nq1b69g8WJs+P16nwz24HuHg8xLvrc/yKppvXovytjleXqe1RHHvmRvrsieXg/FxztqDxOc8k9aPY/UMeFatnxpbz4n3ndPfA/elee2+/U6QHffvRBAEwTZEaATBAaIs/0IvNhGdDoQBERvjye1lsGv8N7DiEgw9B2EUGuAYIrtmAe8Yid0aCA3hSjOhASy+SCRhIUyJSLGDNesu0owojiEuCBTxgsD5jJj2agglsdbDoIAQQeTlF4zfQUSJOISxQJwQdZW7AM6NDFaexJrQAGFqm8ZRWAwLeAfvEcGJyJZVGqFEpB03hqMHoiDchsAZieWa0CB0XOf111//848//rh5dLPQsA6FBekPIL3G1VyZC300X3JAqh+INSI85hBoSL3n4Z133lnI9Az6YV54IXzGvBsTPxFtAqsqTYF7sY4IS/NMtLPyW1fmuebe+Jcw7SA8fcfYmGv3SgwTH5rKXURRFwzuxfpx7wi+Makm3Mw5CNRNe6gQmtaXvuufdUXsOCbsbKxOxovlmA05jSNvCQ+X6yl9TJATfTNxFgRB8E8QoREEBwak5+GHH16IGOKASCC8I4GeAeEkCBAjlnGhT4hcWZMRNSE+oxgBxJSQQOJHYeA1AoWUF5xTKIzrbUKdC4mxZwZSVKRdY5VHAllzkUN5Hiy1I5A15E8Izxp4EeyR0YUVAodgriXkCl3yfgcyqA9rIg9R5inpELZEbLgP92Psu1dik9BgCTdPowW7Y5wT+Q/I7WzjOQJD2JIchbJq88IgztbUK6+8soSFGXukmpdD3+65556FgBbkNni/LPT6gFRXqNk2oSF0rObSd5F1G/QJ85KwTMx1UanfyLC+EQTCrHojpIm5tWcBqeeJq9ClDuPAw4bkj3umdLg/4U77gPXKc9EFH4FBuLkPng73Uo0Y8PzzPHlWZqiSxaOYKHgG5DyVIPQsEBfWNAMC8WMcrHvzfubMmaWfjgldG9dZEATBP0GERhAEO4Gno4cTISS8BQgy0jJrQr8Q/V2BaIt1d60ZEFvXJKSEySD7fiLJmvj3kXgjWkRAWax7q2Msux2su+5r03f04ziQrF+EfYTjSL1QtNk4CssxLqMHgkdJHgOBRowSfYgqYrmp0tEaCJNdPWhgDfjOTCDpq3EleI0Z67u5QISRWoRb3yusCJyHWEB8iVDf680xQolQWYMwQmvOOPAO8FYZH2PYhd8azKuQon3AOpab0YU9AVDemdnacn+8TZvmTzie7xMK1j/hW2LRuXso5XGEQ0RGEAT7RoRGEARBEARBEAR7R4RG8J+A0AMWPBZb1ruTWBxZFVmMWVI7xL6zJrI4F1iYXU8YhdAOIUnivCvZ03Ex0T3MpCCxlCVfX1lSxya0RnnUHt4hTIJF0rnFzwtTEn7jPGXZFILSLf3uR5KtkCTlMVlrva5qM/Id9LtXn+koS281+yGwPrNEs2zKO3CPBceMn9CqaqypFU7lvs3NWshHEARBEAR3FyI0goOHhMlXX331KBlTaAwSL855F0jYlCCq+Y5SlkI2hF8g1RJxJV/2TdsKSL7PireXjyCHQTJy5TycFOLUiYgZzp07t1xjhPr/vXyqZFGf04QG6b8SqEJwhKCI+bb3wSyGfwbVrYQeGReNkJBUuwnCbCQQg0RtpWDHMKSTgtC5dOnSzVdBEARBEPzXEKER/OegIhPPwrYk5BEI+XPPPXfbhnI8C6PQsDmeZFTEHVFX0UmMuWRJ10fIxVGPG3HtComwY0IrQaWiDi8Cgq/6kvjuisu3uVn3ugDRxIugL8TG1atXl/vwHXkGb7/99tJnXo0ef628KO8Ebw2PhIpFcgBKWCl5KlHX8Q7n4LGojeBGoWHjMGVgXa/6Da734YcfLl4ZnhQVrYg13iVeFyJQjgFvj+Pmx/UfeeSRJWZdn/RNpaUPPvjg5ln/gnlyXSDEzK/r8BDZb8H8qn7kWjxXEpm9f9LNzYIgCIIg2A0RGsF/BsKnJMYinioyCW3aBUg9b4AEYuFGREQlEfMKCDnqQgMBlQiMqEo+dR0kWHiRZEtk1XHnQJjHjbzsdyAESfKphrSrACQ0Cdl1DLF2vKojCYliwe/nQuL1rxJJEeZeTUnJShWWejiVajXCrpyH0OARkKyLcHfi75wECPHEs6JP+tyb++x7RYBzEAbl6RA6RgyAawrh8j1eor75l76cOnXqaB8KnzWG+kck8cKoKKT6k8o4QFQ988wzy+8F4/3CCy/cfPUXJBbzEIFE2PPnz98iAI21SkT9WIWvBUEQBEHw7yFCIzh4qHTEut+BONuQTMnObShLPuKLYCrv2IHQstwjux0s80p72tVX7kT95E3Rn015IkRJr7akNv+2vrK0C81SeYYoUnmnGoFy9uzZ265JbBBSBIecju4B4RVQCpY3YQZkX34K78SsEVcEWId8EOMnXwV4Lqrev/PxsMxCp3hvTp8+fRRy5rMEo/ML8yICNGVHa5O5mdAwpur+dxirCrFSwvPixYvL7wX7CigF26EakTKqQRAEQRD8e4jQCA4eBALSryZ/bYaF6KqrXxBa89Zbb91WmtNrVm6iRJ7FhQsXFqLKIs/67ri6+sJxlN0ssOir5Y+4z8Biz0syipOCvSAqkRo5J3Y0v/fWw5l4aVj1hQi5Z16HasAr0kOniA9CyPeQeGFTxkUNffs5GCuW/rUcDZt2ycsgFHg3xsZb0cdTjoxxsweDPBIeGtcRtiQ3w/XMwSah0cuguk9eJfsCOI954C2p+v9EByHi3EK8eJr0h3Awd77Dq6Qfvgu7Cg2iLEIjCIIgCP5dRGgEdz2QV7shj4ScR0BNfyKnNg8ThiQ/48aNG6sbZoHwLISbB2LWhFe5Zg9nQuwdrwTv3oQXCRviwdgVzk1E9Gt0EDDyQmbXq9aTz7eBcHLfa1WugiAIgiC4uxChEQRBEARBEATB3hGhEQRBEARBEATB3hGhEQRBEARBEATB3hGhEQRBEARBEATB3hGhEQRBEARBEATB3hGhEQRBEARBEATB3hGhEQRBEARBEATB3hGhEQRBEARBEATB3hGhEQRBEARBEATB3hGhEQRBEARBEATB3hGhEQRBEARBEATB3hGhEQRBEARBEATB3hGhEQRBEARBEATB3hGhEQRBEARBEATB3hGhEQRBEARBEATB3hGhEQRBEARBEATB3hGhEQRBEARBEATB3hGhEQRBEARBEATB3hGhEQRBEARBEATBnvHnn/8HChqD+2V/niQAAAAASUVORK5CYII=) ###Code # KMeans의 학습과정은 아래 그림과 같다. from IPython.display import Image Image(url='https://upload.wikimedia.org/wikipedia/commons/e/ea/K-means_convergence.gif') ###Output _____no_output_____ ###Markdown source: https://en.wikipedia.org/wiki/K-means_clustering/media/File:K-means_convergence.gif 초기값(init) 설정 방식 가장 먼저 random 방식으로 초기값을 설정해보자 ###Code model_r = KMeans(n_clusters=3, init='random', random_state = 0) model_r.fit(input_feature) # kmeans 수행후 군집의 중심점을 확인해보자. center = model_r.cluster_centers_ center c0 = center[0] c1 = center[1] c2 = center[2] ###Output _____no_output_____ ###Markdown **그럼 이제, 데이터가 클러스터링된 결과를 시각화해서 확인해보자** . plt.scatter(cluster_0 에 속한 데이터만, 초록색(g)으로) plt.scatter(cluster_1 에 속한 데이터만, 빨간색(r)으로) plt.scatter(cluster_2 에 속한 데이터만, 파란색(b)으로) . plt.scatter(cluster_0 중심값, size는 200으로) 기본값 = 100 plt.scatter(cluster_1 중심값, size는 200으로) 기본값 = 100 plt.scatter(cluster_2 중심값, size는 200으로) 기본값 = 100 ###Code plt.figure(figsize=(8,6)) plt.scatter(input_feature[model_r.labels_ == 0, 0], input_feature[model_r.labels_ == 0, 1], marker='o', c='g', edgecolor='k', lw=0.5) plt.scatter(input_feature[model_r.labels_ == 1, 0], input_feature[model_r.labels_ == 1, 1], marker='o', c='r', edgecolor='k', lw=0.5) plt.scatter(input_feature[model_r.labels_ == 2, 0], input_feature[model_r.labels_ == 2, 1], marker='o', c='b', edgecolor='k', lw=0.5) plt.scatter(c0[0], c0[1], s=200, c="y", edgecolor='k', lw=1) plt.scatter(c1[0], c1[1], s=200, c="y", edgecolor='k', lw=1) plt.scatter(c2[0], c2[1], s=200, c="y", edgecolor='k', lw=1) plt.legend([0,1,2]) plt.show() ###Output _____no_output_____ ###Markdown 이번에는 초기화 방법(init)을 'k-means++'로 수정해보자 ###Code model_pp = KMeans(n_clusters=3, init="k-means++", random_state = 0).fit(input_feature) center = model_pp.cluster_centers_ c0 = center[0] c1 = center[1] c2 = center[2] plt.figure(figsize=(8,6)) plt.scatter(input_feature[model_pp.labels_ == 0, 0], input_feature[model_pp.labels_ == 0, 1], marker='o', c='g', edgecolor='k', lw=0.5) plt.scatter(input_feature[model_pp.labels_ == 1, 0], input_feature[model_pp.labels_ == 1, 1], marker='o', c='r', edgecolor='k', lw=0.5) plt.scatter(input_feature[model_pp.labels_ == 2, 0], input_feature[model_pp.labels_ == 2, 1], marker='o', c='b', edgecolor='k', lw=0.5) plt.scatter(c0[0], c0[1], s=200, c="y", edgecolor='k', lw=1) plt.scatter(c1[0], c1[1], s=200, c="y", edgecolor='k', lw=1) plt.scatter(c2[0], c2[1], s=200, c="y", edgecolor='k', lw=1) plt.legend([0,1,2]) plt.show() ###Output _____no_output_____ ###Markdown 적정 군집 개수(K) K-Means의 적정 K를 판단하는 방법은 없을까?K-Means는 처음에 클러스터의 갯수를 사람이 직접 설정해줘야하는 제약이 있다. 그럼, 가장 적합한 K는 어떻게 판단할 수 있을까? . 물론, 정성적으로 판단해서 정하기도 하지만, Sklearn에서 적정 K를 판단해주는 척도도 제공한다. . 바로, 위에서 언급했던 Attributes 중 inertia_라는 값이다. .inertia = Sum of squared distances of samples to their closest cluster center(자신의 클러스터 중심점과 데이터(samples) 간의 거리의 제곱합) . 즉, 각 클러스터가 데이터와 중심점이 얼마나 가까이 붙어있는가를 의미하는 값이다. . 중심점과 데이터가 가까이 붙어있을수록 클러스터의 응집도가 높다고 볼 수 있다. 이 응집도를 클러스터의 평가 척도로 사용할 수 있다. . 하지만, inertia 값은 본질 상, 클러스터가 커질수록 값이 작아지므로, 클러스터 수에 대한 inertia값의 추세를 보고 적절한 K를 선택해야한다. ###Code # k-means++ 로 초기 중심점을 설정한 모델의 inertia 값을 확인해보자 model_pp.inertia_ ###Output _____no_output_____ ###Markdown 그럼, K 를 바꿔보면서 inertia_값의 변화를 한번 살펴보자. ###Code set_k = [k for k in range(2, 8)] set_inertia = [] for k in set_k: model = KMeans(n_clusters=k, random_state=0) model.fit(input_feature) set_inertia.append(model.inertia_) plt.plot(set_k, set_inertia, '-o') plt.title('Relationship of Num.Cluster and inertia') plt.xlabel('Num.Cluster') plt.ylabel('inertia') plt.xticks(set_k) plt.grid() plt.show() ###Output _____no_output_____ ###Markdown k의 증가에 따른 inertia 값의 변화 추세를 보면 k = 3 일때 가장 많은 inertia값의 감소가 발생했고 이후에는 감소추세가 줄어든 것을 확인할 수 있다. . 군집의 개수(k)를 3보다 큰 수로 설정할 경우 inertia값은 작아지겠지만 군집의 개수가 너무 많아져 유의미한 군집화가 이루어졌다고 할 수 없다. 결국, 위의 데이터에서는 3개의 군집 개수가 가장 많은 inertia 감소폭을 보이는 최적 군집 개수라고 할 수 있다. 2) 붓꽃 데이터로 실습하기 2.1 데이터 로드 및 확인 붓꽃(iris) 꽃잎, 꽃받침의 너비와 길이를 측정한 데이터이며 150개의 샘플로 구성되어 있으며 3개의 레이블 클래스를 가진다.feature * `sepal length` : 꽃받침의 길이* `sepal width` : 꽃받침의 너비* `petal length` : 꽃잎의 길이* `petal width` : 꽃잎의 너비 No machine-readable author provided. Dlanglois assumed (based on copyright claims)., CC BY-SA 3.0, via Wikimedia Commons ###Code from sklearn.datasets import load_iris from sklearn.model_selection import train_test_split data_iris = load_iris() # iris 데이터의 4가지 특성 data_iris.feature_names # iris의 데이터의 target data_iris.target_names # 데이터프레임으로 확인해보자 import pandas as pd x_iris = pd.DataFrame(data_iris.data, columns=data_iris.feature_names) y_iris = pd.DataFrame(data_iris.target, columns=['target']) df_iris = pd.concat([x_iris, y_iris], axis=1) df_iris # sepal length와 petal length에 따른 iris 종류를 그래프로 확인해보자. plt.figure(figsize = (10, 5)) plt.scatter(x_iris.iloc[:, 0], x_iris.iloc[:, 2], c = data_iris.target) plt.show() ###Output _____no_output_____ ###Markdown 2.2 모델링 및 평가 ![kmeans_iris.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAxoAAAG+CAYAAAFjVUIVAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAAEk0AABJNAfOXxKcAAP+lSURBVHhe7J0H3JXjG8eFEkmUTZnZGdGwIyvKSObfKCOZWWWGbMkmQoqsiIzMZGTTtDJDRmUlI9v9f7/Xea7T/Z73vHXKOb3nvO/v+/k8n2fv57mv+xr3dS8QRFGhF1Jk6IUUGUX1Qp588slkquZS2Bfy1FNlZyg7xW23hfD336npLHz55ZfJVAirr7566NatWzKX4sADDwxrrrlmMle9KfwfssUWyYTIhaIqsoReSNEx319I165dw3bbbRduueWWUKdOHVt2xBFHhD///DPMnDnT5gcOHBj++uuvcNJJJ4WJEyeGnj17hnfffTfsvvvuYcMNNwxff/11OPjgg8ONN95o21cniuoP4UHXdFRkFRnz/YU0aNAgbLTRRja9xx57WFFFMXbppZfaspVWWim88cYbNj1t2jQrmpwtympsFHcwYcKEsOyyy9p0dWK+vpDmzZsnU9n5G12lhjPf/xCEsqicKpEhc/pTajJVJtQ33njjZErEVNkLgdatWydTwinIC1lggQVyHmrVqpXsNXv++eefrPtX1VAoqvQPERXRCyky9EKKDL2QIqMoXsizzz4b2rdvn8yFsNhii9l4boRnp06dwi+//BIWWWSRZMksCimE801RXOnTTz9t4/jBjR49Ol2j2WqrrWwZ06+99ppNw9JLL51MzYIX0rBhQ9t2nXXWMfcw04cddljYfPPNk61Sx2KoXbu2jYuForiS559/vsKXzcP2B8UYY+IDDzyQNjxC/CDXWGONsMEGG5jxkuUMSyyxRLj77rvDUUcdZdvE28fb4Ie55JJLkjVVS/F8GlUIRV0caFGV6IUUGXohRYZeSJGhF1JkVNkL+eqrr8xD6JEm7iOZMWOG1X6o3u600062bKGFFrJxXEuKoYZFLWzrrbcO1113XbJ09rz11ls2/vHHH+2c06dPNwMmtT2EfFVRZS9k+eWXT6bK8/nnn4e11lorbLnllukXsvjii9t44YUXtnFl8EKy0bhx47DCCivY9CuvvGJjf7kXX3yxjbt06WIv848//gjfffedLasKVGQVGXoh/xX+tFNOCWHMmFnz/NnTpoXw778h/PZbCG+/HcIPP6QCzv/8M7XdI4+ktmV9hF5IPuCFODzklVYKoX79ZEEZN91EWH9qOjOy5p57kokUeiFFhl5IkVHwF4Jxb04BcNRsRIqCvRAi153rr78+mZpF/bIylpfVt2/fdKDD+uuvb+OazHwpsnghTZo0Sf8JF154YTj55JNtGnKNPKkJFPSF0MYDeBE4iFD4YNSoUfYHrbbaauHQQw+19iCAxsy2J554os3TttDp3LmzjdkeXn31VfNzLLfccmU1zqTKWQ0o+B9y0UUXJVMhHHvssTb+8MMPzUzBsNtuu4WPP/44vPPOOzbPw/3mm29sO4dGO44XhePGjbNx3bp1w9ixY226OlDQF+JF1AsvvGB/hZs2nnrqKWtmgGmkZcuW4b777ivTocqUqDK+/fZbaxmFiYOWU5hQ2rZta+tefvllG1dn5osMefzxx8P333+fdtOee+65ZlzEfXr77bdbUXXWWWfZOpgyZUo4/PDDwyGHHGLz3bt3t3GPHj1sXJ2ZLy8kEyJEYOrUqTY+8sgjTUZQpBGEjaGPv8eLqr322iv8+uuv4aCDDrL5/fff38bANI1+OAYQ2PDwww9bu0W2vwktuYxPPvnExjQOisHKCyxHzmHpRYbhu99ll13s4+Fj8Y+j0FTJC/GHw58A559/fujXr59NX3311fZwXnzxRWvo6fBCHP4e54orrjAT/imJ+WKHHXYI1157rU3vu+++4a677kq/+PgY/gJjqCS4O8D56KOP5msARJW8kOOPP97GPEw455xzbHz22WeHM888M+y6664mV1Aoe/fubeuQMV6s+QM/77zz7CHTWLRXr15WWzv11FNtHcfkeJMmTbJ5iruffvopPProozZfrFTJC0F+xNx77702phKw9957hzZt2oQHH3zQlsFzzz2XFvrg+/M3EULEn0bREwfb4Vc57rjjkrnUy4vlVLFSJS/kyiuvtLHrGXvuuac92FtvvTUMGDAgvczZeeedbTxixAgb87KQDePHj7ei7qWXXjI5wgtBPj3xxBO2nRdLvAxeOn+W1/z4A6HY2rpXyQv5Ad9AGcOGDbMxXHDBBclUijgzEK1xoX///jYGynVqbxRruGF5IUcffbQ9dF6sw7n4w5AFNBDyY/gfyL7FRMFfyG+//WahohQp/pAJE+UFECLKMrL/xC/Hv96aSMFeSKxt33HHHWlfNlAtBRTFAw44wNJsOLRhJ0UT2ntNpEqKLFE5eiFFhl5IkaEXUmTohRQRehlFhF5GEVGYlzGHyJJsxPapmkphXgY2ptq1U9MzZhAdjY81NZ8BmjvGQWxXKIUjR4605R78TINNnFk1gcIXU4l/Q8wZyYwiQi+jiJivLwP5QEAcLZ7wTyMXCIrDNE4Eyc8//5xsmcLbjyNLAKtwvXr1wn777RdWXnllW1adqJI/Y05xvDUVFVNFxHx/GZMnT04XPziiaAtIIBxtA5dZZpl0CM2mm25qxRjevv/973+2jMaX++yzjzXyrI4xviX9Z5x++unJVPWgZF/GGWeckUxVH0ryZcSB2NWJknsZmREm1YkqfRlzaxz0KMTqSt5fBoHL1IJyHXJ9IdSesu1fVUMhyPtRvRpanaEKXgj0MuYBvYwiQi+jiKi2L4Mb22STTZK5sgtKhCNhn7niphGswvODav0y/AXENSYa1wAxuszTHA3TOf1IMU8LWW8a7fvzMmjY6cTpYele1Y8dg/keMpfPjmr7Mkj+Bd7gMjO/Lg/p/ffftzEGxXXXXdeMi/hEvOkysN7/DHwe4A+YMVnhaPXkyxxeBssyl8+OavsyigHPep0rehlFhF5GEaGXUUToZRQR1e5l0NHj9OnTQ4cOHcwV6ylZvVbjTcvo1oEG/bR+pXF+Nmh3TrWYNoS0CXdiI6SnZQVy7Xp2H66BAAl6QIAll1zSxrOj2r2MFVdc0aqVbhb3l+G4A4lutvGZz676yYu4//777WU46CEO+gjtB2k/zpgUFpkJiz3RGSkt5oSKqSJCL6OISL+Mpk1T42wQG8afXFYMGiik/mejlH76aQgE7XGsVVe1xXoZ80C5l8F0WVGaftBuFbjjjtSy9dajN0pyO83ahrbwyy2XmkbhfP11m9TLmAfmqpgik3SO5P1liHmnyl6GW1zFLAryMqjHQ8eOHctkU0o4xXz22WdWhTzttNPSQdCfItBqOPPtzyD1EGlTAb0BvcJN3SJFQV6GO4Zcy8UPAbTdo/s3Uq36Ou9MkVxTnhWUthqk0APfl4BnEn6RQIwsbRRz5NWlcxNSG1UHCvIycAYBSbliSNyImYMocjpXhGOOOSa89957lqSLvLhAg0q0c8wZnvsQPI+u8/vvv9tYL2M2tGvXLplK4Q8wNkGQUBJOOOEE+8rJukaqJIoyzw5NNZmsa/xNX3zxRbjsssvSf0p1pCAvI84fCJ7GDpcqD5hkxSSDBBIPI+R5GcDLcKMeicL4+mmzMb1Mk2W/7bff3tZVR+abAM/Eravku0UeeH5aalf8BWRp8xyEbjQkiTF/3QcffJBeF6ftJu8t+RH5g8A71rrhhhvSvarFkJaVv9LPPXToUHvh1Oz8+PSGQJJLan6FpspeBmQ2qASWkb0tTpnXp08fG5PlDcjD7r0xx1BdJn/h8OHDbR7T+mOPPWYv07PFuX7DC+BcsZk9fuDkPASyh5JJ1HtfKyRV9jLuvPNOG5OjlhtnTPWX1q9UfTGtZ6ZHJbqDVrLUplzmAPu2atXKvmi+bOaBXLcciy4m4mTExUqVvQxeAF/rkCFDrJN4hzZ8ZOOMMzLHeXBd1nhRhCyhmGvRooXVwCjiYjgOf4FexmygFgVUaUmPitOHIoQXhIZOPlu8gEBRgeC/6qqrbB5uu+22sNlmm5kn0IsxXgp6Bxmi0VuoNg8aNMjWUfbHsJxzZysqq4oqexl05E6xQpdB3vkJoFc888wzlm7b9Qi+aoQwZbcnEXYIZvOkxgSxsa1ni0bg08UEoEiC58OlmOQjKCYK8jLcn+zVUC9yyG2LZk2vMuQ4B3oSIPUqmnVNp2B/BuW3p76OwzARzLwM59JLLzWlkEzN7IMphe1rYgLiKiumREX0MooIvYwiQi+jiNDLEKIS9HMIUQn6OYSohNL6OZJuyyya+LvvUtNYFT01dNLsNeA5IsB1LjJUeqNyIPALIxpdqJGTj560sGoyTW/B3nkg4PnC7Ow5/+JuOUVpU5o/B730v/cevXCFsOyyqR9l6NDUD/Haa6mocI/+ngfwHAKNdsjaQF+z/DBAQ3/6nPV+BoGu7PCZAz9H3H2qKF1K6+fIxj33JBNC5JfS/zmEKBDV+ucgAiLu05Re3j3ygYg2b99MaKNHYnt3N0RDvP766+X6SwWPHyKa4uWXX7Zp7w6HKG+Hts8ex8p1EPFNlYtgAdILeW/Donip1j8HzRKI1+WnIGCO/FU06GcgtMfDSPlR7imrnsVNETzgzjud52fwQG2P0SKmCzgH0CV2ZhIAfhKOTTIBfsCnnnrKgvXYZ9KkSclWohhRtUqISqj2P0ecPgZJ8sgjj1hViTEQVodE8TSUmG0Bsy3bL7XUUtaU56WXXrLlQGOFt5Om+FS/iBhGOiEdHKpNSAka3JEvaPDgwSaxaHiH2Xh+5SEV8061/zkIeXdoPAiEwtNqE12DKhUR2t7ahp/D58lbwUfPj4OuQvsEcL0D/8aMGTPsOHzs8blct+Hn4xj8aExTlWKguiWKm2r9c9AupFipjl0ZVTdqhM5Bb3TFQnXraK06U6MU8rjfgvkN7X1FaVGjfg6HviLmF3HjdlFa1MifwyE3VpzeIl9wzMx0UKL0KImfoyZkdK0JFCq5dqHQzyHmG/o5CoB+juqBfo4CoJ+jeqCfowAU+ucgIZxH35KEGq93zCqrrBKaNGmSzKVylLFdISCFI9HCa621lp0ns0PQ/8LMmTPTkchVgX6OAlDon4OXRtLWBRdcMFlSHj4owkroYdbn/SMjbH2JJZawH4wsjETzkreUftFoUUgYO9Nk4fV9OA8mXpICxmR+uP5zEL7SsGFDW+bbLLbYYuHwww8Pa6+9tnnbWe4maqZp5hv3Eef7xWO6PvZ5Wjoy7d0lFwL9HAVgfvwcfNR0f0DT2Ez8Y9lzzz0t5Sj4MsZ0oeD4cpx+fHz8HC5l4o+OuKx4Hpj3TJger8XPwY9JcCRk2ycek1SyS5cuNu3E+2Ruz/0S88U854R4+3yin6MASOcoPEg+QPoU6iPWz1EA9HNUD/RzFAD9HNUD/RwFQD9H9UA/RwHQz1E90M9RAPRzVA/0cxSAfP4cW2yxhWUiIXIWRxvstddeNoZFF120XLtzzJqk2HHzJj4HmrzGndBhDsXE261bN2s3vvvuu9v2O+ywQ7JFCtqVM9Dj1OTJk5OlKeiRirbmgB+Etuu0Oe/UqZMtw9zq0K2Yd5rkUcUrrbSSnRPTLx8hWRfpDwhzMtfGPZPelN6r4h6sJk6cmB5IMdS0aVPbx+GYNPPlmo8//vhk6byhn6MA5Ovn8PxSDh8gA3mkSL1z1113meNufdKNJtDbMD8MH537Afg5SLrg0JMX+5177rnJkhT4RXLFe5H0tuU4+EjiQPogriuzp2LvNAvYBg8//hD/6WgHz3XBm2++aWP6a+InZOzQX58/B3wv5ATmXhmAXsg4v7eb/y/o5ygAqlZVDyr8HHEHmp4YPBskx8vocnCeQGJ6l+kk2ygr5AxPjJGRR0w/h5hvVPg5yqpwZeI4hPffD+GUU0JIogDK6nKpMSQRCYEeZ2++OZSJ+Vkfs29XJv3sB+Lj3mOPEJK0S+WOA4kkteVeTaX5ctIBd9hmm9Q4QT+HmG8UrFoVVXFnS+bPkvRmXxkl8XPQ1T71Zw2lPRCgWUqUxM8hRFVQ0j8HKTuxojC+/vrrk6UhbL311mbazEbdunXLdTxDl+hYduKenYSAkvo5Bg4caDlnY9zESEMe2G233WwMsS8CyGyO3yD+Oei1yf0dMW7aFDWXalOt+i8pdgqRnkeUPiX1c5CxPMY7p8z2ceNU82TOONSGDBliDY/wWpPwGUce+DaMH330UcuvSx9/cN1114Vff/3VpNJFF11kDZimT58ehg0bZtt7Xx60BkThBE9WTctCWvtxrtgbHmddxBNPNY9muQzez4coDqqVQh53PoM32z3aPp3tJ2L72HOebb/MaajsXHOLH0MUHyX7c9AvRswJJ5xgMUFxan+amQJxSZT06BC07ybM4+yzz7Z16B+Ej1x99dW2nO4BbrzxRlv3008/2TGWW245i0dq3bp1ePXVV20dx2Pgx6KZLPFOX0QeX7peJlRDlC7VSnLMK3FQnxBOjfs5vAenKVOm2BiGDh1qEuDAAw80HYNtiEKFa665xoLvunbtav2PY/a97777LIqVaN2HH37YtgOSI7C/Q1QsII3Y3/UcBxM0EbzoJZk/KBKKaFiiAzwaFunmEGSIpY7rJJqX4zBGP7r99tuTrVJ4L1TZrG9E63o/ht6xDhxwwAEWsMj9eBcORDSjy1GF9Ooguhz3jT6Gs5berZo1a2brSh1JjhJGVrbCUuN+Dkp8iK1G9O4KntiN7pYZJkyYELp37x6OOeYYkyqXX3556NGjh5XqmTqP4z3MxqCvxH0Kjhs3LowZM8ZK8htuuMHC3ZFkWMSwcG255ZbWBgNdBimFlGBbro/un9GDuC5AkvXt2zc9TXZ3SvuDDz7Yeq/FySnmDUkOISqhxv0clNLQrl07Gzv0IU5DH3wpRx55pFmq8GkAJTfQ2T76Cf4QfBI0kHJefPHFdH2dFnzoJ64j0PrPW+5RN2c9egB6An4RTxSHtct1Dzr1R9chcyH1+p49e4ZTTz3V1tGvOdt6v4KEzlxwwQU2jWTjOvr162f+FfQQ7g1Jx7nwx5DADp2hMuknUkhyCFEJJfVz4OmOufjii5OpYBaaU045xbzVQCnLQAlLM09KT+r5u+yyi9Xt11tvPdsObzcQt0VpitXK223j92jbtm3o3bt3WHHFFa3EBd9H3SVXbyQ5hKgE/RxCVIJ+DiEqQT+HEJWgn0OIStDPIUQW9GMIkQX9GEJkQT+GEFnQjyFEFvRjCJGF0voxfvopmcg/tL5zaK8hajal9WPQDzjNOb2VGyHcnqHDm3nSFJVlr7+ems8RErUBzTybN29uLem8nw0aNNFklHQ5hJQD4d8M1157rfVpQeOjUaNGhXvvvdfWi9Km9H6Mc85JTZMRmw7rSXnD9HnnzVoOSbvoXFhooYVC/fr1k7lgrejob5u8V7Tq40fhp4hTiNKjkffmRDtuOrAhp5VnJhGlTen/GD59xRWzpuG111LjHFlyySWTqWANmpAQHpZO70usJxkCIekbbrih/Qj+Yyy//PLWMIr0PUyL0qe0foxM/MdwyqozaeLegISYS0r7x3jmmWRCiPxS2j+GEAVCP4YQWdCPIUQWqu2PMWLECBuTFO2ZRBfxNP0+D6S28c5rPHk0PolvvvkmNGzYMHTp0qVc9wWktXQHIL6OuNN80tU0adIkne4SvFsBIGWOd2gviptq+2N4zlnPI+U5lsgry8fueV/5MVhPL0446jDDAj8Fple2HTt2rC0DPmryQGGq7dixo+WBJSvJVVddFTbbbDP7MYCshGeeeaZNt2nTxsYOfWnEP4woPqp1Vepm+pwug5Q6QGcwQAoezxFLyh1Ps0mSNJKuIVH4MfBZkD7TuwgAPuo33njDfjxK/xYtWli6TNL38IN4WlCSpiEhnA8//NDGJIQj6Rr7ewI3UXxIx8jC3GTwm9fOZkRxox9DiCzoxxAiCzXix1hwwQWt0xSgP3GgcxVXtLEioU+gd3i6fxI9o5M88MADNu+gvMNee+2VjqQlRahXvwg4jDupdIsXOgfdlmH58m7ORPFS7X8M8s7y4dOvxC233GI9IXXu3NmsSf5jkLsWyxQfLR840bX8OCjajvdTzo9BfxVAz0PvvvuuZUInspYfkB8DCER87rnnbJrIXZT8QYMG2TxgRhbFS42pSrl5FOsSpTjdAPgHDnzwSAy3VnkiZx/zo9BxZdy9l3cTQJdj+D3o0sytV/E0+3A+8G4A/DyiOKm2P0Zm/xhzizv7CsXKK6+cTIlipFpLDLoCKEbiBk+iOKn2VSn6/y4m6H9DFD81Qscg3KMYoAN+URrUGOV7xx13TKaqBjV5LS1qzI8BrVu3TqbmL6o+lR416seAVq1aJVPzBynapUmN+zFgfv0cZA0RpUmN/DGgZcuWyVRhkE5R2tTYHwMK9XOssMIKyZQoVUrix/Csf4UY8v1zoFNkO4+GBUqqv/SS+DHuvPPOZEqUMrRyLBX0Y4j5hn6MPKMfo3qgHyPP6MeoHujHyDP6MaoH+jHyjH6M6oF+jDyjH6N6oB8jz+jHqB7ox8gzhf4xVlttNRuTznPVVVe16RjWk5HQWWaZZcLqq6+ezOUP2oHTZoOkCmQxxCmWTzheVebN1Y+RZwr9Y/gHWNmHuNhii9nH6vBj1KpVK5nLH5zfkyVMmTIl5x/j/vvvD/vtt18yV7zox8gz8+PHYPj999+TJeXhJ/CP9IILLrCEzf5jsPzKK69Mr69bt27o27dvep4xy9j+/PPPTy/r1atXuUwhpPIhvU9MfAwg8wg5dYFlJKEmNRASjV5nyYHVtGlTSxXk+3DeRo0aWY5dlpGxhGX86CS09ghg1tEZJ8vJ4VsI9GPkmfn1Y3iPq+ST8g8KmH7kkUfsw2E7ckKxjEwijDfffHPr+RXOOuss+3jZDnxMKp169erZNMu6detm0w77ZyZ3yzxG5o/Rvn17m0Zi7L333jbNcq6HrOskj+P6PI0P6/zH8ARxLCO5tJ+DRlX6McqeSzIuaqq6KsWH5GNS/fuPAZn7ZB7Lx1OnTk3/GMAHHtf3yUsVHyv+WH1Mcjf/MaBnz572gw0dOjT9k8THAK6TnFjAumw/RuZYP0bZc0jGRU2hfwzPSDh9+vT0dAw6BiyyyCI25sfwZUgZPqZVVlnF5qmekDPKJYiPp02bFpZeemnrdIbts+WVev31120dAzqG6zUkdmMZ3QwQvUvyNuapojnM0+0B0sGPAVyn/xh16tSxH4Nlrsv49VEFYx/u3yVMvtGPkWcK/WOIYPl0qar5D1UI9GPkGf0YhYdE1OTgLST6MfKMfozqgX6MPKMfo3qgHyPP6MeoHujHyDP6MaoH+jHyjH6M6oF+jDyjH6N6oB8jz+jHqB7ox8gz+jGqB/ox8ox+jOqBfow8k68fo0GDBjYeNWqUxUURru2dVhIKQTSqdz8MdGn8ww8/JHMhjB071uKd2rZtm37JHIcAQQICicClJ1fnjTfeSKZmD20pll12WZseP368hb+fdNJJNr/vvvvata2xxho2HxO3EYn57rvvkqlZEO7x1ltv2RB3p8z1M3jMVJcuXWyec/76668WYcw95yMXr36MPJNPieGxQHwoF110UfrH4CPjp4g7pfSfhDB0ol2BIL74x4jZYYcdwnXXXReef/55m/e+xYG2FwQSMvhPENOmTRsb076Ca/CgRCJd6SI5/jHYlvy4fMwciw8Z6Jec/sqPPfZY60WKrpsJODzhhBPCVVddZcGEBEByDmfixIkWDkLQIpxxxhnWOjGOmaLNBsGR/xX9GHkmXz/GRx99ZAM0a9bMxvwgMbHEIMwb4gZFF198sfXOlO0lIzEef/zxsPDCC9sQN4eNue+++5KpFIccckgylZrmA3aJ4ay55prJVAoibvkxCEP/+uuvbdnTTz8dXnrpJeummQ/7ww8/TIe2ezQtPwYRtg7Sae21107mQjj99NPD9ddfn5ZGG2ywgY0lMYqQfP0YhG3DE088EdZff31r9cYAdFDPELfio/QnDNtD0b1UPeqoo9LT/DxICErmZ555xpY5VL1yhY/a4eekpeDff/9t1zRkyJCw/fbbJ2tn3ccLL7xg2/Kxwx133GHj4cOHp8f+ExAeT4s/BlocOvxYPIMll1zS5glJJ7yehk78TJz/ySeftBaA/xX9GHlGynf1QD9GntGPUT2o8GN8/HFq/NlnqXE20IfGjUtm/iN0Q/Drr7OmvYpMdTOjcZZ+DDHfqPBj0Kaeqqs31/3xR5S+1DSgFyVGibL6HVaQ1DQf8csvhzJlivrtrH28qTAGldtuS42dYcNSY4wK11wTAk173cBQplOFjC6v9WOI+UaFH2PXXVMfp/8YZbpUiA0WSXIKg+169Jg1/cknqWnfl2UdOsyaTvSsNN5JKMtZD+uskxo/8kgIRx6Zmk5Itihu9GNUD7L+GOAf6iWXhHDppalp+Ptv+m2jMXwIGAduuCG1nO2pfr33Xvkf44gjZk1jzJg2LTUP662XTJQxYEBqnCTaC998Q7a91HSCfgwx3yiI8u0/Ri4kZu0066+fTFREP4aYbxTkxxg5MpnILyXxY5DEDF+BhtIdCEMpyI9RIErixxBifqMfQ4gsVJsfIw6ME+K/UtI/xscff5yOeh00aJCNgWjSOFw8Ew8c9OzifgwhnJL6MQj5JvmxE7c7iH+MbBCU55xzzjk2JsR6t912s+kY2kB4xKqomZTMj0Ebhfr16ydzKYj+9PYTcbi4E4dXHxl5Nl1iEHFKW4Pjjz/e5h1+ojjaVdQ8Sq4qFbeNiBk4cKCNCRv3FPc+hg033DCceeaZYdNNN03/GHGjJCFiqo3yPa94Ax4hYkrqx6CFnEMPQrvvvnvo3bu3ze+zzz7WxRaNi+hQ5X//+5+1grv77rttvXPooYfa4JKHFnPsB+znHHDAAaF79+7WYq9///7hvPPOCwceeKCtY3+88TSNpRkoyv7VV19t69jvtNNOs2m2ow01eJNV2lFDfA30RzF58mSbFsVBSf0YtCaLoaoUf8xAs1BavHXo0MFa2tGqDSXbrVR8gJ9lxP+7LvLaa6+lP9ZtttnGkhygqF944YU28CM4WLRYt91225lX97LLLgvXEM5cBj2uQua54lZ4MTRTJREDTVJFcVCyP8att95qpfgxxxxj80zzQwBS48QTT0wnEKDts/cSlOnvYP927drZNB/0O++8Y9NIJKbpUJJsIVjDXIGnTTQ/INszzXb8fHMDP5QTGwlEcVDyOoZ/6IwZKPEZaC/NAPF0JpVt59N+3Hjd7M4lqgcl82OQ4mXw4MHJXLDugKkqkdWCRv98qKSPgZEjR1ruKOr74PV6OO6448Kpp56azKWcgQxAWhpPY8MHTy+tDplBRowYYcsBHcZ1E1LVoHOQIQTOPvtsS1BA9Q3dh32P8LYCoiQoKYlBlK3Dx0feJEywVJXQKfwDv+GGG+zH4eOlmoIJ161P6AUMng2Eer9Xa6jro1s4G220UTKVUu5dQjDEZuHOnTuHl19+2bKPgDsQPd8T1TX9GKVFyfwYn9N4vQyvshx++OE2xtJDKhs+ykcffdSWkdmPFDB4yuHcc88Nl9A6rAw82rFXG8uWf7QcO677H3TQQclUKp8UQza23nprs2AhqYC+7Oh7m5/RJYx+jNKi5HUMIQpBjfgxSE/p1iag6hODtcktTnQoTxUNsEaR7tKraDvvvLONgQRkVNn23HNP82EQ0OhWs4033tjGSBH0DAaP8cJatvjii5uUwZSMn4N9gWtAX8EihhREKnINwDXH2QnRr7hWiC1iLMO0fNNNN5kE5RitW7dO1oa03kReXbIWulSkusd9k4HwoYcesuP4ucnaSII68GdF2lL6LOeZuFT066kO1BiJQbJlgg5R0uNQESBExD9cpj1kBB0BxT3+8IivAsy4ROVSbUPPQVe59957bZ1n+sN5iHOQjukxLzvLLbdcaNWqlVUP+RHQbRx+HI4JVMX4APlA+Xk8F65XH/068aOwHaALEeeFn4WPnSqd/xgcj4+f50DqTfLiHnbYYbaOe+F6uBYKkT59+tgxqV7y4ccmZZbxHNkOX42vGzZsWDobYqlTI34MSvYY/zG8pKsMV55jK5anwyQ2i/332msvs3RRQsc/BkGN7Eekbo8ePdIfMak8OT8JoB2P8/LrwprGx8bxPfdsJhgX3KtOPl6ckUAAJNP8wEg7pIz/GARLUtpjuHB9zJubkgcXJyVwbrz3/KRzgh/U9T9PHVodkI4hRBZqxI9BKUp92omrRpTklKBUDYCqAfPoFZhk69WrZ8sB77n7Sk455RQzH3v8FHV1dIqhQ4eaBYx1VK/ojwJpgjkZkCZ16tRJh7J4FQhq1aqVTKUg9T66BVU1qmNcF6X/66+/bsvdCgasx/PvGVWQCPGxxdxRYyQGHyL+jkwPNf1V4KjzHwMnHn1MUE+nahPXrb2qQRUnrl4BdXeUT34c384h+NHPS71+3LhxFk6CidkDDiGzaoe5mS4D+Mn4sV1hRteggxnMwjEPPvigVedQ9k8++eR0tU/MPTXixyCOKsaVWP8Z+Jjuuecem0YJdecfkDbfP3Q+Uj4+B4uUg+ULSfTwww/bPB55eOCBB8oFEiJpOJdbg9Bj/ANm2vUaoO0IoCegd+BEBDp7Qb+IrWTeAY7rOeK/IR2jSHk/I2VkZShcvTDUiB+D6krclgNPeAwWHK/GUFd3kyOmVHQNlzgDBgwoZ3YFJABSAssS0gWuuOIKGwOWIPSBTTbZxOK9OLZLJ3QPr6oxjbnTJYZbjQDd4qmnnkq3+cAE/Nxzz9k0Ok4s8ajiYb518zMdwDi0i6cLM45FlYz7zsXyVBOpMRIDBZyqCLZ82j5kklm/B7YjANBDQfgB+Aip31PvZzk/xuWXX27z/BiZ3YhhHu3YsWNo2rSp6QjoFa7A8/Fmntedav4TAOdAR/LroJrmcVncF+bouMNJfBfoGfys/fr1S98vPxMGAXpIwkEoKqdG/BjeZiOGD9B1DBoYZfsxHNcxKKn5OXCwUe/H+UcJzc/ANE6/TFy5jpV49zm4hx1Q3Om2zH8M8GhinG1IKiQW0KAKiPnCUIBPxBVxlG5+Ipx4tDPhx8CTDvg1iER2Ccg9xzqNmIV0DCGyUDI/Bs1KqZY4SAGqGNTfKR0J+aCKAKNHj7ZpWvDhmY3Np1QlaH3nYJ71JqtePRGipCRGbB6lOkFVhAHFmmA5V5pRmGkTQSgGimZmlm3MoyirmDjjqosnVsAJhzMPB5k7ybzK0bJlS2s6S6fwVE1E9aRkfgw8x3h1M9NpUo9GOuA08zr4tttuWy6xAD+OJ1AjtinGu/gF9AHXNdwvEIPHmWMDYxRfUT2RjiFEFvRjCJEF/RhCZEE/hhBZ0I8hRBb0YwiRBf0YQmRBP4YQWdCPIUQW9GMIIYTIGQkNIYQQOSOhIYQQImckNIQQQuSMhIYQQoickdAQQgiRMxIahWLECDouSmbyyIQJIXz3XTKTf+hpb4EFKn4W5EFce+210znYc2GppZZKpoQQ1QUJjUKB0KhTJzV99tmhrMQlH3nZEy975C1apJaTbZmOMZZfPrU86S410OGG70tv/qz76KMQFlssNc0QpUjPF6R7JCt0gwYNkiWzIFkXHRCSzJcua8l0R5JeOlkHesCktx6SfK244oq2jLH3ohMLIp9m7El7Dz30UDsuWfH8mAsvvLCN6aCdZGJCiKpn1p8s8kssNM45h1I1NT1xYqrQnzaNvpdD2Hzz1HJo1Ij8oXTsEcLii6eWUeiyvReaTKNt5BkK5S222CI0b97cenalf/OY559/3voeBFKoei+z9CRLlwv0Gw50S7AYwq0MulBzXFCQAjUWGs6qq66aFjB0n4xAIjcx0CtVZn8lQoiqYdZfK/JLbJ5C01h33dR0WYFoBT/dZyA0VlghNc/gmsbXX4eyUji1rFWr1Ni716YWz3wBNI25ZXbdjcwrmccsxDmEEPNOWekjqoxMTUMIIYocCQ0hhBA5I6EhhBAiZyQ0hBBC5IyERomy+eabh/bt24d99tknWTJ3xJFLderUSUcu5QrRTY899phNH3DAATaujL333tuuleisecFDeGfH+h5EkOCdZL/55ps2bty4cbrzaiHEvCOhUaLsuuuuYauttgrXXHONzRMiO2rUqLBuEqVVr169cP/994fVVlst/P7772GdddYJzz77rC2HWGjUrl3bCtSjjjrKOl6/6aabLIx24403tmMcffTR4fPPP7fQ1yuuuCIst9xy4ZVXXrHGe5zz8MMPD+eff77t/8033yRHncWxxx4bNt10U2sHAmw/YsSIsN1229l869atw7XXXmsC8O233w777ruvtQvZZJNNTDg1oX1LAsISuLYXX3wxDBo0yAQD9zVy5MjQs2fPcO6554b//e9/1lCxbdu24d133w2rr756uOSSS+xctAURQswbEholCAUztfavvvoqbLbZZuHLL78MTZs2tXXbbLONjSlEacVNuwsEwNZbb51eTkGcKTSmTZtmBT/79O/f3/a5+OKLbbzoootaYX755Zeb4EFo/PLLL2HppZe2Y3Xp0sWOM3bs2HDHHXfY9b333nu2DIHFOWnPQTuP22+/Peyyyy7h119/NWEE3AvtQE4//XRrcb7yyivbchr1UeDHQuP111+36+H8O++8c+jatast5xw0TKSB4M0335zWfnbaaafwzz//mKYxY8aMcMMNN9jz4rjSPISYeyQ0qim1atVKpvLDzz//bNoKLcGHDRuWLM3OTz/9lEzlxoYbbphMzT8mT55swkQIMXdIaAghhMgZCQ0hhBA5I6FR4uBX6NOnjzl+L7jggmRpeYYPH55MlQffAD4LIDng3Jpr8FdcffXVNs35K4Mkhvgp+vbtG5o1a2b+kUzwZWAyygZO7zlxNqlaEvCrvPbaa+YIx1cDsQ9HCDHv6E8qcRo1ahQGDBhgEUOHHHKIOYGJTEJQEF2E8/fAAw80py+OYwrwTp06mY8iU2hMmTLFnMkffPBBOOuss0wo4EifOHGiRTaRSJBorRdeeMHCYCdNmhSWX355269z587mhL7qqqvsHDGfffaZJSTk2G3atDGn9x577GHX5CHDPXr0sEgtQn8Jz+X63NfBNQAZdomOIiliw4YNLQqKQADo0KFD2HbbbS3xIuG2JDvkWp544glbX7duXbuP+vXrm/NeCDFvSGiUOIt7NtwECl7CV4kSeuutt2zZDjvsEP744w8TEkQ2Eb1EgZ0pNHBgE+46ZsyY0K9fPyukL730UtvXI6COPPJIC8slgy0RTAgt9kNgwRtvvGHrESiE5QJCg/DgGEKGES5cG3DNhM8S2kvE08yZM9PagQsN6NWrV1qYEJLrQgGhQXgtwoI07YThPvroo+Gee+6x9R4YsOyyy9p1CyHmDQmNEqNbt27J1PyHghxBQ6FO6OvscGFUrCD01F5DiLlHQqMEoYbdrl27ZE7MDQiLM844I5kTQswtEholzEYbbWR+CjFn8MFIWAjx35HQqAaQogOTkagIjnoJCyHyh4RGNQKH8Y477pjM1WxIdXLRRRclc0KIfCGhUQ3B51FThQeaxYUXXpjMCSHyjYRGNYZGca3oY7wGgIO7ssaNQoj8IaFRA8BhXl2FBy3Nyc4rhJg/SGjUIPB5VBfhgbDo3bt3MieEmF9IaNRA6MioZcuW4d9//02WlA4rrLBCOO+885I5IcT8RkIjj1AIl8oAdH5UCsKDRIoIi3POOcfmM+9Fg4ZsgygMEhp5giyv5H0SQlQ9RNF98cUXyZzIJxIaeQKhQaI+IUTVs8wyy0hoFAgJjTwhoSFE8SChUTgkNPKEhIYQxYOERuGQ0MgTEhpCFA8SGoVDQiNPSGgIUTxIaBQOCY08IaEhRPEgoVE4JDTyhISGEMWDhEbhkNDIExIaNYu//vorXHLJJeGKK65IlhQfL774Yrqf+JqGhEbhkNDIE6UuNPjJFlhggfDss8/aPK2wmWe47bbbbNns8G0ZNttss2TpLB5++OH0+lq1aiVLi5fPP/+83D3R81+DBg1sesSIEeG3336z6UUWWSTZ478zfvx4a6G/5557Jkv+G/TqyLF4lzUNCY3CIaGRJ6qT0Ljllltsun79+snaOcP2Cy+8sKUnZ/ryyy9P1swqgOldkHGm0HjttdcsE+8SSyxhiQgvvvjiZE0I06dPD/vuu29YZZVVbD3jPn36JGtDmDBhQmjatKn1H/LQQw+FVVdd1a67TZs24c8//0y2StW6W7duHZZaaikbSN5YWaqJvn372nXWrVs3WVKRTKHxww8/2HWQ18vh2ldfffVyy7777rvQvn176yQKIbTmmmva8lNPPdXunWNyn+uss0647777bB0ceOCB9o7Yh3ubNm1asiaE9ddfPzRp0iSMGzfO9ttrr71s+dprr23L0YqA7Xg+7777bthggw3sWKTPnzlzpq13jj322LDsssvac+rSpYs9t/XWWy989NFHyRbFj4RG4ZDQyBPVRWiQfoFxs2bNkjW5wT4uDChsmJ8xY4bNM924ceO09hILDZazLP7BmW/YsKFN//jjj+Hvv/+2aUAwsP64446zeQQO8ww///yzLevfv7/Nox3AG2+8Ue6Yc2KhhRay7WdXSGYKjW+//bbCOTKXcS/ML7jggjafCWloWI+QdO666y5b1rlz52RJCEcddZQte+aZZ2ye58l8r169bN6fF8sYXGj4dp999pnN834yt+HamOdaHd/m7bffTpYUPxIahUNCI09UJ01j9OjRNk0h88033yRbhPDHH3+Em2++2TQRhjvuuCNZkypYXBj88ssvNk+hffTRR9s0UKjH2/300082j4Zy0kknhWOOOSY9UMOFDh062DZHHHFEeOmll+wZM7/ffvvZehcaq622ms3D1KlTbVm9evWSJcFq5tSgveCsU6eO3U82ttxyS9uGJImVkYvQQKvIXMY5r7vuOtuPdQzvvPOOrXOhsffee9s88F2xDE0qfj4Mo0aNsm38nnjuMX78TKER3zfPgWVoZW+++aZNx88NGjVqZMslNARIaOSJ6ubTwHTjNW5MHnOC7VwYAAUMyxgeeOABW5YpNODKK6+0ZZiUDj300HD88cebljNgwABb79fVqVOncPDBB6evaW6ExhNPPGHHPPfcc63fcIQU6zML2Rgy6rINA9eG6YneAZmvzKfh22NmwpdQu3Ztm3ehgeBi+sQTT7T7di3r7rvvtvWYl5jn+ey666723NAa/LhbbbWVmbF22203O4eTL6EBiy66qM0vvfTSZuZCYPjzktAQIKGRJ0pdaFQ1CKlff/21Uqct6/4rFPRegM4NFNzY/XNJt802mT6CTCi0f//992SuIlxjtvUsm91++YR7iM1csaAvBSQ0CoeERp6Q0BDVAbQzhEQ8oG3EfqVSQEKjcEho5AkJDSGKBwmNwiGhkSckNIQoHiQ0CoeERp6Q0BCieJDQKBwSGnlCQkOI4kFCo3BIaOQJCQ0higcJjcIhoZEnJDSEKB4kNAqHhEaekNAQoniQ0CgcEhp5QkJDiOJBQqNwSGjkCQkNIYoHCY3CIaGRJ4pRaEyfPj2ZSqWmIPfT5MmTLdkdyfvI80RKCxLrnX/++cmWqUSCZEzt16+fpcVgG8/1FEMeJI7DcMopp9gy0n47vXv3DpdddpllnSXx4fXXX285nThfzHPPPWep1NmW8dChQ8ONN94YXn/99WSLVC6oXXbZJXz88cfp+Q8//NCm8wnPLM7weumll9p10dkSz/Caa64Jt956a/o6eD6PPPKIJVz8/vvvbVnXrl1tPCfi8+QTEg/6dfvAc/3666+TLWbRs2fP9DuMB0/nwjQZhRmfcMIJdr977LGHrQOeCccg2SL5q8icSw6xqkZCo3BIaOSJYhQaFKqkOqc/C8aZkIiOH/3LL7+0n4yEgBQKJAUEUkewDfmOFl98cVsWw3r6ZQDSTfj4nnvusWkgZxTLSIxHEj761Mj2M7MeSPIHRx55ZHj00UdtmkR9nqZ8k002sf05FscdOXKkLc8H2223XbqwjHMt8Uw4F89h6623DieffLJlAgb6qHAOP/zwMHHixLDGGmskS8rDe6AvC44TP+evvvrKxpzbpxFe9LqHgOUd0Z8FWX5JSsi+kyZNsmfCPgwkQ/z0009tP4QtlYB4oE+MV1991Y49J0iP7s8BSNjIOwCW+7sGBF/btm0tMy/XSbbfbN/a/EZCo3BIaOSJYjVP0QEQPzk/M5DllY5/xo4da/OA0Ig1BDQQhAQFOJlXSV6XKTSoUZLhlfThFCoMFJZx1ldgfwQLPfftsMMOcyU0HnvsMZsGBGDcKREp1z2leCYkJqSzJu4zHiik6ZhodnmUPvjgA9MqEE4OhSH3jwCkA6TTTz89LTQoNMnXRAbcbbfd1pZlExqLLbZYMpVKHe9CgzEdHgEFvmev5Z2Rft61FzpLQigA69DOGJhGg6RTJ4fr33333cNZZ51lmX0ZM/g1x6ANMtDxFTmmfP7ll1+29bznOEki14tW4dBhFO/zlVdesfeMkPN+TKoSCY3CIaGRJ4pRaPBDe38YdMHqnfZQMHttNx6ADnowD/lAAUGnQBwrEwQQWkXmMRwKbmrfQAFPjTSb0OC6KPwGDRpkQoeaMdeI2acy6MqUzpXyCQKQAve0004L6667ri2jhs4AFPZoGpiiKICpdQ8ZMqTc87rhhhvsXjLBPMQ3QidSFM7gnTFxr6RbZ/1aa61l5kB6DRwzZowJLzQENCvMTAgp+hihf/KrrrrK1nNdcadZ7733nnUdiwDkWhDczGfrL5wU6BzT313me6RfE94LQoQBbce1MIQp94yGyjPjW8E0FadtryokNAqHhEaeKFZNQ4iaiIRG4ZDQyBMSGkIUDxIahUNCI09IaAhRPMxRaDRtinMohChCLyy1VAgnnJDM5AgmP46DyY4xwQ3xMbPx228hXHcdvWolC+aSH34I4dxziURJFmRhrbXosjKEJk1CaNkyWVgGZuZrrw0Bc+ZttyULy6B3yfOTCMoNNwyhbt3UdBYkNPKEhIYQxUNOQmPPPUNYddUQ9t03tYyCMwkdD/R/jxCgG2HGl1+eWh6DTy3xS6UhzJ3tn346hBNPDKFx49TySZNSyxEY7MM03RGPGJEaU7gTLMG6jTZK7dOuHZ3kp6YffDC1DwEtLqAo2LMFg2y3HdEQyUwZ3GO3biGstFLqvhyCJzwghunBg1PT++wz6xqyUHZmkQ8kNIQoHnISGrvumpru1StVCFMLd6HhBbzDfCbjxqUK8KjP9UAbJLb94AMaMs0SGp99NusYtPFh2jUNNBxv24MWwTrCo4lo3H331HLCz31/AkSYzgg8SYPwS6Lf0hB8cfXVyUwCEYI9e6amd9ghBI+uQ0Aef3xqOgvJVYj/ioSGEMXDHIUG5pvddktmyqCwxrREQQ+vvBICYeYUzoxfey21PJOffkqZc9iOYcstafGYWkfjTbQHllN7Zwy0gVllldQ8woBCPj5G376p7dAiuCaWdew4a/8//0wJGuYRXJmgVUQh9AZtZwYMSGYS0GqmTElNL7lkCMcdl5peb73UeSshuQrxX5HQEKJ4KClHOBrOgQcmM3li2LAQBg1KZrKA72bGjGRm7pDQyBMSGkIUDyUlNEoMCY08QWOrbbbZpkw73VKDBg1VPGyxxRbphq0iv0hoCCGEyBkJDSGEEDkjoVFFkCCOVOGLLrqopQEnlTU5f66++mpLvZ0N8jL9QJTHbCBdOYkBPTkgiQSBDK6urpOErmPHjqFVq1a2nAylnsQvzu4qhBCZSGgUGApqkutlQmrrJk2ahM6dO4emTZuGZ5991pYjQEhWR3I/spciVHbbbbd0RldSYB922GEmDHJJ2MexgGvwTLcO6bY3Shrx4JMhAWE2oUFiOoSLEEJIaBSAQw45xDJ/0kFNZZDWmmygaBZkUaVzH7j22mvDgCSemoyiJ554ovXfkG144IEHbLtMSCtOhlNYldagZdCBEYKJPhjog4JU1g4pt8luC3PSNEg7jqPxvPPOS5YIIWoSEhoFBKHx5JNPWrrrTOiMhz4SWrZsWW5YffXVy3ViRCc/q622WrmBbUh5TQrtbMRCw2nXrl05TYNzI7i8vwR66/vkk0+sb4hM0DQIYSS9thCiZiOhUWRceeWV1vmOgwmJ/hbopCgeEBpx5zgOXXoy0LESAz3BIRiOOuqocv4Q9t1+++3N/EX/DnQyhDDybkyFECIbEhpCCCFyRkKjAFC7nzBhQrjrrruSJdnxXu3wgUyePNmms0GPbY731QzdyDFTBi3RjznmGPMzPPXUU7aMfqKzgVM97v8Z8KmcccYZ5kOJ8e5L4fHHH0+mUtFXZ555pvlCDjroIHOgAz3u0YMb19urVy/rbc7BxIW5jnt1nw3bwIFJCgW6c8X3Qu94cOGFF1oXqA7PKO4CNsafBRCFxn5cJ34cpjl/DOfhOXigAL4aTHS0Imacad4TQqSQ0CgQ77//flahQYFEYRsX3Pg9HLrepHDElEREE4UYZiYgcsq742R/Xw50J3rBBRdYQcvgQojoKAr1Pn362HIEi5+byC6EG5FZ9LlNd6DMA4VozNSpU20cL6cgJlSXewXui2kfcLo7CB3v7tWFhAsNP46H/dIfuHPTTTdZn9uNGzcu10c4YcLcF8+I+yJ82cGpv9dee1nf5pjhzjnnnAoCESEJLjQQMH69d9xxhwmNbOY/IWo6EhoFYuLEielcVAgPCrDK8ALMITIKoeEDfUADgmHKlCnp5fTH7DxKtswyXKg41OzJi4VfBK3ggAMOKHctaAcOAoFtgH6mEXDU0hm8wEbgsI0vR5icS4cwZbjmxDY+OAiGrl27lrsvQoqBfrVZz/FY5tdEH9hcezxQoH/33XfWbzn3ts4661ikGu1OXKChySCYuE60HfbjmQIaSRzV5kKDfr8JK950003DQgstpOgwISpBQqPEiAVCoZmf5xJClAYSGgXAfQGDsqQmppZL2wlqwSQ4BPwAgL1++PDh4fvvv7dhyJAh4cUXX7R18Ouvv5oJipo27LffflYzx6zDuWjTMXjw4NCzZ890LZ7a/3HHHWc151NOOaVCQ0PMV9TEuR7G3vaDWj8huphtqIFnC7eltu8sR89fCQ8//LAJHExGmIlmB9fmcP8eboxZLvajAG1YaBQJtGs59thjbdo54ogjyo2FEPlHQqNAUOCNGTMmmZsFJhsKcOzwN998sy07/fTTbUwBShjsJptsYm02mCYE18H30aZNm/R+bg6CTK3A57HTY2ryIS78EUqcg5QjmHnwOTAPCy64oF2rm23efvttWw6YrRBMtBN59dVXTWi50MDsxH6cn+Wxg5/lCCGEX9++fW09x/Lt46yk+BPY1iGMGIHLtvh9Ro0aVc6/wvYuZCQ0hCgcEhoFgAKf6BzXOO6///5yfgTs6vgCcBwzVJb3/+mnny6naWTi/ge4++67zQHMgJZAwz2gUR6FO+f3IQahRCFMAY1m0aVLF1vOdXkUE87rWNiwLQKGhoZsg28BZ3U2ECqxQOP4+GX83r0lOtdF/i00CYaXX345HQkGXOMjjzxiAhb/BevdUY2TP4Y2KUKIwiChIWaLawzxUBloAbGTmXknns4k3sePz/az28evy/FgAfZhXSyoMvHz+Tk4zpz2gfg6M4mvlWkEdbZnxTIGP2+8X3z+eHm8PcTr4lDiyq4v232h7TmZ+/k1+uDn4zjZlsWwbm6JKzK+v9+vz8/LcUVhkNCoRnjoLqlHMqF2zw/OOBsjRoxIplIFAf4NQCsi1Ndbmsf7o9F4zix+fDLsEiEF/fv3tzHhvOPGjUtHX2EiGzhwoE077vdw/wbRWGgP9957b7nCLTbV3X777XY+oNBDW+MaiCjD5xOH7Tq7J530Y45zSPromh4aDmHAmQUhGh8QeeagKVJg85zQvPC3AJFcnN8jxAj3dTgu17Xnnnua74Yor9gk16lTp7QQ8OcHFJhoT2h7JKv0c+GjAt4B56S9TQyt++mMCB8T1+H3yTnQ4rz1P8ecXTshTJUOOcvc7OqmRp577JsC/34wIXJsH4YNG2bfDXBfPXr0sHeG5sh1kQEBkyrmzxii32677bZkTlQlEhrVDExJXnPD94Ez2m3/1H4rExoUUv369TNHOWN8BvzUK664YlhppZXSA/Pu34iFBueIhYY7qclnhZM+FhoU+DEuNDzMtXv37iYg5iQ0PIAAcyBQAOHnIQggm9C44oorbMwzcgg44H5ol4HQiGvxQEG/5ppr2nQsNLgPoKDHdEZ2YrQd7mWfffaxfGP777+/zXthzfPHBMg7IOkjYwQy8KzZjmUIJG8vAzxn1nGtCG436XlBzXMixDsWGtwTz7N3794mgAljPuuss0zADh061MK8O3ToYPdHpuVMbYMCnGtk7IKMbV2jA/bxdZgKuV8qLPjBHIQT3QBwPoI4EF7ch8NzREhwv2PHjk23BwKELu19uN84waaoWiQ0qhEkJaR9wh577JEsKQ8/uUdeUdt3nwA/JgUQA/syPoGO58sgIuvNN98Mr732WnpwEBQedcU2FAjHH3+8raPwpUbtbS6oYQI1a66RAf+GzwMFH318ALVpCrdYaFBoUzDR7gU/EYUgUODuuOOO5iOhZTwFc5xDiwaAMRSWztlnn21+I1rQU8hSsDtx4Qfu76HFP3hNn8KNRoiAZkbhx/F4VnHLfJ4JQoYCkoaUaDl+DrQZHPkvvPCCDX5swI/DNbKc5+y5ySiw0Z5cE8LPNDtowAlUENAs/X3id4qFZcOGDdPrfAC+H547DSUZEAi8Hwaeu39DDAh+QHNjHg2Mb4nKRJyduUWLFiasqVwwuBAlko42Odwvzwm/mSgOJDSEFRheo3TTTFzzjNdnbsc0hTZjcK3G1/uYbXLFCzDG7J9ZC64MIrHimrDjNWK/Tr9W8OtzKrsv195Yj7mNa8s8ngscX+aDX3/m9sxzHLQLN9lkwjbsH1+nTzNmvRDzEwmNaoRHTNGpUyZuI8aE5AV7JqQloeZPQYamEde6Y6gZOmgrmKwc0nsANcqHHnrITBJoMm6SoqDD9OU1amri2NRjSO0OHoqMTR6odfp+2MAxAwGmIWrOmDkwS2HGim301NIxZeFf4Zhx2hbatfhzg9g8gi2f66XWTIdZEOfjWmyxxaywp6aO+cTNa5lQC49x7Yt9/B6IHItBSPEcWB+b6TA7UQMHzHCY5OLQZCEKjYRGNQN7vRf2mAEocNwMRYFJbTwTbMxswxjzldvdM8F27zVu1wbo0CnmmWeesTGFKbZ0/AiM3f9ArRkTEylRsKVTWGLyiKEtCrjQQPgA5g32w29CUkTPN0UOKhyl+A9wqmLyef75520dULhyb0sssYTdGwWw19YxJSGMKIwxg3jhzPbM+3IP/yUdi8O9IAB5zggx+khxeM4IB38+nI9ngiaEGQbYx6HdSgxOctLQYPrjHfLcuVYHExn35eljhJhfSGhUI6gRU9BSuGZCQYszEo0D8wi198o0CQQCtXmHWjvb42NgoJB33wi2ago3auz4GjxCCccyzm9q9QzY6ynkcGrinKWgpyaNJhJn8fWkgYwpSLkW9kdg4TNhP2ruNI7EXo6wo+aNtkHh7dcXC8cjjjjC2rHgV6EARhtyoYG/Ay2IgXtwYYn/wpczYHsH7skz4KIFcFwaSXJ+FxBoWfHzevDBB205Ghd5ttBIOA7XS9sTnkusjcSZfBGSCE0XZviQ/D4Z1P+JmN9IaIisuG8AwUIt18euaTg+TyEcr6MWzjyFuu/nBTXEtng/F9tyHgpI1sfHAy84ITaxudbjvgMXGH6M+Ny+TSY4syG+Rp/24zux6SsWTmyPJuPhuDH+/Py6M9dzb57FOBMEmW/P/pnXkznv+L37uV3jzHYOIXJFQqMa4eaOVVZZxcYx1LCBfjAq82kAJiMKFddWCInER0BNmkgjj9ShYHTnLX4SCn5CKx1MKexHLR3t5rrrrksXfNTU40ifOMsvEVKYhojwIX0J0C7AzU3e7oG+1R20G6KtmjVrZvNx9l/g3Gg43Bd+DYhb00NsdgIKf+Ca0XKAwtlNZ0888YSNiTbj/ljH/aNJ8Kw4J/eRCb4iBBdaAnA9mTnKPAINWM8xMfNxLbfeemva/4EW4mY/xpl+EaKsOBYmQzQ0j2CjvQfH8YFrQtsRIhckNKoZpAL3WjwOYJy8ca0Wc0hljlPMLBRoLjQoCCmsKIwwBTH2Pj4AsxEFF85zhlhouLZA4elCymu6hHBSgFHwMsaRDFwXphyuEVu990eCqcdDVz2UMxYa+B0oGF2wuDOe+6aWToFJ2CfHdm2F4yGU8FVwnbHQwIfhhXEsNIBCFvBlYArEVMT+3BuNzxAADBTYcQoYnuvo0aNtHwS3C180AAIQMPdRcKNt+PN1MIGRyBIQ4uwDmLtieA4O94n2hJkPoettQHi3CHKEM4KWd4oZMP5GhJgdEhrVCGq4FGo0zsuEQgGbvRfQRO7Eppq4MRwFC74GQLugAR99WzB4Yc32XlPF0U3B5m0VgLBUjsE6hIkXwkAtH7s+Lb/Zxh3eCBG2YzlDnJGXAji+r7hVOfdNwefXiI/E743oIjQhhA3Hdp8Fjmbui+unsF977bVtOQW7g0DhOLGQRZjik6EgxpfjkVBoFTitKYgZOLYHBQCCB18HWgnX5NFsMXHvg5XBtboWhOMf3w7XwDPO1LC4X9bzDnie/sxojY6w51gIN56BO+eFmBMSGlWEO1PBzTwU7NjIMfVQ62Q6LtipPbLMB69xsh8FG/vE/gEnLrzikFLwY2H3Br8WtAyEBwO441sIUbOR0CgA3ieEp87Ihre4Bk+9gRZA6CjmGfYldNSjibLRvHnzZCqY+cJbLAMmFsD0gQnCw1OztRqm9TGtqamNxvZ0cHNOnEIDQYJJBjMTwoV+uAFzSKNGjcxs5Y5l4L7YHjMVNnqEHK2iWSaEKC0kNAoIdu/YzuzgZ8AUQ+FJ4epCw7UH9qFwpZB1zSEms7EYYA6KcU0GMwlmCk/DECfrA0JhEVI+eAM1hEEstGKhgSBCA8LkgokEG74n2KMvb3BBAuSGigWoN5BzE5gQonSQ0CgAmIaIZHJHJYWs16oRFBSoOEIZk8MHGzlgOqLdA0KENgxMxzV27One1oIBO72boejFj5bVDOzrmgaCC8HAmGuKO24Ceg/E1u4D9m/AqUwra9prcK2xDR5h5mYrhBxC0M1ehKP6ufGbILywl7svAdyprTYGQpQeEhpCCCFyRkJDCCFEzkhoCCGEyBkJDSGEEDkjoSGEECJnJDSEEELkjISGEEKInJHQEEIIkTMSGkIIIXJGQkMIIUTOSGgIIYTIGQkNIYQQOSOhIYQQImckNIQQQuSMhIYQQoickdAQQgiRMxIaQgghckZCQwghRM5IaAghhMgZCQ0hhBA5I6FRKGbMCOG335KZ0mH69OnhvffeS+Zm8cMPP4SHH344mRNC1FQkNArFAmWP9uijk5k88d13IbzwQjJTGBZeeOGw9NJLJ3OzWKDsfp588slkbs6MHz8+bL755smcEKK6IKFRKBAaJ56Ymv711xD++SeEX34JYebM1DLw5T/9FMLvvycLy2Cbv/9OTf/xR2od2+2+ewibbhrCX3+l1uWZu+++O3Tr1q2C0EDDQGiccsopNj958uRw1FFHhcsvvzz8+++/tmzYsGFhn332Cffff7/NM73CCiuE8847L7z77rthxIgRtvz1118Pzz//fHjzzTfD0KFDwx577GHLx4wZEw499NBwxx132PxvZVraWWedFbp27Rq+//57WyaEqHokNAqFCw0EAtOHH54aM7zxRigrCVPTBx00a/njj8/aNyk8w777htCyZQh9+szajqEA1KpVq0w2/VNBaPxRJrgQGn+XCbK/ygQW02yHgGjfvn34/PPPw7bbbmvbtmrVKrz00kth9OjRZZfd0va95557wrnnnmvrb7755nDZZZeFgQMHhlVXXdWWfVemQTVs2NCmEVo33nijCQuEGKaynXbaydYJIaqewpQ+IlWwIzR+/jk1ndTSw7LLhrDffimfB8tPPTW1vKzmHurVS02z/N57U9Nsu+WWs6a32CI1nWfatm1r2gJaxFJLLVV22WXXnYCAQFAAvo3atWuXXcp+oWPHjmW3s6xpG//73//CdtttVyYDD7JC/5133jEBAtmExoABA8LZZ59tyx566KGw2mqr2TF33HHHsNdee4XHHnssLL744mUyc9/w6aef2nZCiKonVRKI/JMpNNwXsfPOIeyyy6zlb76ZWn7nnal5YOxCY//9ZwmNssI0tGiRms4zW2+9dWjevLkN+DXuu+++ZE15ofFTmebkWgFaxwcffBBuuOGGcNFFF9my448/Pi00WrdubcseeeQRWw4nnXRSWmj4Pi+UPRuEDvz4449h2rRpYezYsWHmzJkmpPzcQoiqR39joaCgyyY0MLW0azdref/+qeWdO4ewwgqp6bJCO1x+eWp6s81mCY299w5h9dVT0wUCAbHMMsskcylioQGYotAIll9+eTMhvf/++6Fu3brh8MMPDyuttFK45pprrOBfZJFFwsEHH2zmpzp16pifA5NVnz59TGhcfPHFyRFDWGWVVcpub++wxBJLhHHjxoW77rrLlmH+cr+HEKLqkdAoFBSyRE+5T2PkyNTytm1T2oYLjdVWS40Z3Kdx2GGzljVtiqMgtXzgwFnLqyHuVBdCFC/Vs/QpBaZPTxX+Y8eGMGVKKkoq5ocfykdUOURgMQghRBUgoVFVIBQQGu7TEEKIEkBCo6pAi7jwwhCmTk0WCCFE8SOhIYQQImckNIQQQuSMhIYQQoickdAQQgiRMxIaQgghckZCQwghRM5IaJQggwYNCttss03YaKONwsSJE5OluUP6DnI8OZ7yfG4gNxSQG4pkg5VBltoNN9zQclt52vO5gZTqV1xxRTKXnSeeeCKMGjUqmQuhWbNmYerUqeGpp56yLLtAIkUhxH9HQqMEiVOXe2JBMsGS7wlI9EdKjldffdXm4auvvgpff/21TVMIx0KDfivgmWeesTFQ6P7yyy+WoBA43nPPPWfHJhfVLrvsEl5++eVANlyEhguRTMg35ZBzCmbMmGGdNAFJDznPa6+9ZvPAOSdMmGDT9L3hQsP34Vq++OILm6YfDu6d+wP66UBIffPNN2HFFVdM9+9BynUEi1KVCPHfkNAoQUgbvt5664U999zTkgHSo94FF1xg6ccRFLfccoulF7/33nvD6aefbuuvuuqq0LdvX5u+8sorKwgNtJaRI0eGddZZx5aRoPCmm24K6667rs03adLE9qVjJWjXrp0JEYQGyQpJb756lmSKn332mV0r21PA0/fGrrvualrHqaeeaoIAocM9tWnTxlKzkyKddOoU9C+++GJaaFx44YXWV0f//v3T17nJJptYJ1Fkym3atKlpHfXq1TOhx7VyHPBz8FyEEPOOhEYJMoVcVWVQYDdo0CBsv/32llqcQpba/O23326pyWGhhRYKn3zyiWWKRbCccMIJFYQG/VrQAdMRRxwROnXqZMs8qy1CB+jbAtgf6LkP6FPcO1MipXomCDVAoyADLoU6AoT9O3fubBlyjzzySNsGDQEQKlzTZpttZhpIbJ7ClIaW0717d+sRkHUPPvigaRhkyQXMU2hdCBHv9Y9OnUBp1oX4b+gPKkFatGhhNXtMN6QpHzx4cBgyZIiZl+gxj17xKIgx4VBrJx05goZaPf1aZNM0EArY/ymwwQvXnj172hiB8Pvvv6f70kBLwMyUTWhwfodCnW5hEQ5oK2geBx54oGkUZ555ZnjrrbdME8HshdCgLw60g2+//TZsvPHGFYTGzjvvHHr37m2+FDqAwjRFt7FoJGg6f/75Z6hfv74JDbQnzgcIRPD7iq9RCJE7EholCiYZetqjkARML3R2BBSImGtwmPv6W2+91bQPHOc4p3050H8F85h9PvzwQ1uGIAL3Vfz6669m9nKnOQKmX79+Ns11wJ10JFWGd7jkYPbCHOWCCkHgx8d3QZ8bt912W1orQABy/WgP+Ca4Xoc+xt1/4Y51TF74a9Co0LIwYXF9CDRMbLF/x8/r5i0hxNwhoVENoaB081S+IPoIDQVz2Jxwh3UuIJTcdDQ/ccEjhJg7JDSqIUQ3FSJKaF7Ce+cE18n1CiFKAwkNIYQQOSOhIYQQImckNIQQQuSMhIYQQoickdAoYUghcsABB1hUE6lFaDeRydVXX23tK7IR55yizcTcMnz4cBsTKrvmmmvadDZ22203a6tx2mmnhe222y5ZWp7rrrsumSoPjQAJCZ4dJ510koXmOrVr17bxRRddZGOiyT766CObFkL8NyQ0ShgatXlupRtuuCHdFoH2FB6RRIjsNddcY+0waCfhBT14wz3o1auXjWnP4dAgj0Zy3gL977//trYfCAmmt9hiC2ubQct0Wl/Hx46hBbjTvHlzG5Pmg7Qk8MEHH5jQo60JvPHGG+n2IggNz0NF8kIgisuFBMe49tprrU0G10ljP9KaMKaVO21PuCfmvR2LEGLekdAoYRAatPymMd8SSyxhhXerVq2stTQpOwhnXW655cLbb79tjd0QEhSsrVu3tv0zhcYyyyxjhfnmm29uy2g9zb477rijzSOAvvzySzs27L///iZAKLA5P43sGjdubOtiOnToEAYMGGDCixxQCAS0A45NKhGuk/04DgIA4YCGRMvwWGiQLgVhRboQtAj2IzUKreInTZqUbmnurb5JocK8N0qkYaALWSHEvCGhUcIgNK6//norMClwgZQeNJYjfQjZY8n0StZYCtj111/fclRlpgiBc845x2rmCKEuXbrYMt+uR48eNvaGfccee6yNPWdUbJ5y01AMqUkovKntYyqj1TbLuE5MVoAwgbvuusvSqO+0007h0ksvLSc0SHVO63LSnrRt2zacccYZJnjYnlQjrIM6derYGKEBCA03T5GfSwgx70holDAIDU+N7pAbikLcC2GS91GzJzcTphrMOtmERqxpuADw7cgzBZh9OLYvp9BGEKDZZAoNFwYQm6fg448/Nu0Ac9rJJ59sy8hIi0kNrYTrJSPvJZdcUk5oQKNGjSwlO8KSHFOA0EADwvSFEPHrI9st5rVYaBx99NE2FkLMGxIaJQz5oNAmYkj8RwGJOQqo2TOPpkECQ9JnuI+Cwt5BU8H0w7behwYaDCAogGNQ4HumWwp59z14/xYIEcDH4iCIMiEhoW8LnANthGvgHFwb5ja283sBzsN5Gfw+yDvFfmTU5f78fPhw8JdwHM+15Vl3hRDzhoRGCXH33XdbTbqqwC9AzR//xZyIs+gWGzjE6btDCDH3SGiUEKQ9x/RC7bmqoNZeyjz66KP2DGMtSwiROxIaJQa931HouVlI5A4aBs/OgwaEEHOPhEYJQmM3Cj9s/yI3JDCEyA8SGiWKCw46HhKzhw6rJDCEyA8SGiUMnR1RGMZRSKI8hCRLYAiRPyQ0Shx8GxSK3he2mAVdu0pgCJFfJDSqAbStoHAkjYdIQU4sCQwh8o+ERjUBpziFJA3kajquYXgjRSFE/pDQqEbgFKew9NbZNRHXMEq9PYkQxYqERjWjJgsOaRhCFB4JjWoIOaMoPD13VE1AAkOI+YOERjWlJgkON0ll67lQCJFfJDSqMTVBcLiGIYEhxPxBQqOaQ8O/6io4JDCEmP9IaNQAqqPgkMAQomqQ0KghuOCoDrmqBg0aJIEhRBUhoVGDoEe7UhccN910k91DMXfyJER1RkKjhlHKgkMCQ4iqR0KjBuKCo5T645DAEKI4kNDIE//++29JDVOmTLFCmGSHxQ4Co1atWiYwst2LBg2ZgygcEhp5wGvupToUc9ex/fv3z3rNGjTMbthjjz2SL0jkGwmNPPDNN9+EJZdcMpkTQlQljz76aNhzzz2TOZFvJDTyAEJjiSWWSOaEEFXJAw88EPbaa69kTuQbCY08IKEhRPEgoVFYJDTygISGEMWDhEZhkdDIAxIaQhQPEhqFRUIjD0hoCFE8SGgUFgmNPCChIUTxIKFRWCQ08oCEhhDFg4RGYZHQyAMSGkIUDxIahUVCIw9IaAhRPEhoFBYJjTwgoSFE8SChUVgkNPKAhIYQxYOERmGR0MgDEhpCFA8SGoVFQiMPSGgIUTxIaBQWCY08IKEhRPEgoVFYJDTygISGEMWDhEZhkdDIAxIaNY+XXnop9OnTJzz11FPJkuKC3uvuvvvu8OeffyZLag4SGoVFQiMPVAehsdpqq9ngUOhsuummtmy//fZLlmZn0qRJYdVVV7Vtt9xyy2Rpefr162fr2e6iiy5KlhY3Dz30kN1PnTp1rDc4OtryHuF69eply4466iibLzaaN29u13fMMcckS2oOEhqFRUIjD1QHoeHdZDoU7MxTYM6JDz74IL0/w88//5ysmUW8/uSTT06WFi977713+npr164dVl99dZuuV6+erT/nnHNsvlu3bjafD4488siwyCKLWPfB/5UHH3wwrLfeeuHNN99MltQcJDQKi4RGHqhuQmPKlCk2veCCC4bp06fbstnhQmOjjTay8f7775+sSTF69Ghb3qxZMxsXu9C45JJL7Dpr1aoVXnzxxWRpildffdXGhRAafEMc84cffkiWiHlBQqOwSGjkgeokNDBLUbNm+vHHH0/Wzh4XGttuu62Nl1pqqWRNCq+1X3jhhTbOFBqdO3cOyy23XGjQoEHYeuutwxdffJGsCWHgwIGhZcuWdkzWb7XVVuVq4rvvvntYa621zETWvn17226llVYKI0aMSLZIcdxxx5lpjGOsssoqds2VgbDgOl9//fVkSUUyhQZmqnXWWcdq+E7v3r1t2e23354sSZnp1l13XbsO7hkzF/eDJoOQ5pjcD6ZB55lnngkbbLCBfWNc+9VXX52sSWmEHO+2224LRx99tJnQPvvss9C1a1fTNB555BHbzufxwXDNSy+9dGjYsGE4++yzbb3z6aefhhYtWtj1sf3nn39u4w033DDZoviR0CgsEhp5oDoJjbZt29r48MMPT9bMGRcadOZPoc80jmL45ZdfbL5Vq1bh/PPPt+lYaLh2gmA577zzbHrRRRdN1gYrLHfcccdw5ZVXhn333dfWIxSc9ddfP73PdtttF5o0aWLzDG4mY3/muTY/zhtvvGHrMkGzYlsEx+zIFBrt2rWzeQpvh0KcZVdddZXNU2Azv8wyy1jBjyDjmfH9cB8uNCig3Tc0fPhwW0YBz3EQEMzfddddtp5jMO/PkYH3scsuu9j04MGDbbudd97Z5vEr4e/YZptt0tt///33tg34NWy88cb2PH2bhRdeONmi+JHQKCwSGnmgOgkNH+Ym6saFBjX9cePG2TRCAviBmb/vvvvSzmMXGk8//bTN77bbbjYPLliI/IG4QIM111zT1n/77bc270LjggsusHmgFs2yjz76yOZXWGEFm0cbmRMIO7YthNA488wzbZ7l2XDz1HfffZcsCWHZZZe1ZXxjDvPLL7+8TbvQYJgxY4YtQ1usTGggMJw11ljDlrkm1KFDB5t3Zz8gZFkmoSEcCY08UJ2ERs+ePW282GKLJWtm0bRpUytM3Xzz2GOP2fJYaADTXuhS6PmxMoXGWWedZfNoB5tvvrkNXpBh2oGJEyeGnXbayUxLFFysY8AEAy40xo4da/PgtfH333/f5r3wZsD08vzzz9vybLAP2xVCaOCUZp4B4Xfttdfacqd+/fq2btq0aTb/66+/2jzXglmO58PYtQFwoYGJL6YyoTFgwACbh8MOO8yWXXPNNTbPe2Ie7dBBALFMQkM4Ehp5oDoJDcAuzvRmm21m886QIUPCLbfcYsPNN98cJk+ebMszhcbFF19s8xT8jPE7QKbQYMw8phhCQ+MBW/wnn3xi6xdffHELf33nnXfSAgFbO7jQGD9+vM0DNniWudCAJ554Iqy88sq2nOGmm25K1lTEt4lr/JlUJjRuvfVWmwfug2UuNODtt98uZ/bZYYcdkjUVhQZaFvP4mLp3717u+biQcKGBhhZTmdCI/StEa7HMhQYmPuZ///13mwcJDZGJhEYeqG5CY+bMmen5zMIoG5lC48svv7R5d6i7cMkUGhRg8X6ZeNhvXHv32vDcCg0nru1XBj4H1hP+mhk95r6aTKGBo5n5uE2Lm8VioeHwzbCOwcH5zLxrUeDbuDkuk3wKDb/v0047zebB71NCQzgSGnmgugkNeO6552we04j7BiojU2gADleWYXpyMoXGP//8Y/MM+EBOPfVUs6e7o5vIKdbhBKYg87YSDHMjNDCrHXTQQVZ4e4RXp06dbF02qGn7eRi4F45BZFZl7TRwrDO/0EILmfZAeDFCh2UuNNAWMLXRkpxoK9Y1btzY1oE7pzmfa2fcN8s4N+YktAzuDwc55FNoTJgwweYZtthiCzOH+byEhnAkNPJAdRAamCbiqCXo2LGjLZvTvSFUKCCJgHJGjhxpDQOJiHIwV9WtW7dcTZYaNBFSXjgx9OjRI1lbvpFd//79zWTGtGsvhKZSoFHgOTh7uR6EGeAH8GMwcPy///7b1lXGH3/8EU466aRy++FLwA8DON65l+OPP97mMeMccsgh6W0p3BGCPIPrrrvOtnGnsg8Il9gEhobhJiJMck7fvn3L7UdIrt8/ApjrwCQYg9DhWPfcc096nu3uvPNOmweukWXXX399siSEQYMGpX1WmCm5PqbRGksFCY3CIqGRB6qD0KhqKKRjW3oMkVx//fVXMjdvICQwu80L7FfZtWXCfTBUBsIFBzdaVmVwvsz1ueyXT/xZubYXtxspdiQ0CouERh6Q0BDVBTSXH3/80abvuOOOtHZTmU+lGJHQKCwSGnlAQkNUF1xIxEPcyr0UkNAoLBIaeUBCQ1QXvvrqK4swo+Hlhx9+mDX5ZLEjoVFYJDTygISGEMWDhEZhkdDIAxIaQhQPEhqFRUIjD0hoCFE8SGgUFgmNPCChIUTxIKFRWCQ08oCEhhDFg4RGYZHQyAMSGkIUDxIahUVCIw9IaAhRPEhoFBYJjTwgoSFE8SChUVgkNPKAhIYQxYOERmGR0MgDEhpCFA8SGoVFQiMPSGgIUTxIaBQWCY08IKEhRPEgoVFYJDTygISGEMWDhEZhkdDIAxIaQhQPEhqFRUIjD0hoCFE8SGgUFgmNPCChIUTxIKFRWCQ08oCEhhDFg4RGYZHQyAMSGqIm8++//yZTxYGERmGR0MgDxSg0fvrpp/DPP/8kcyHMmDHDxqeeemo49thjbTj66KNtWceOHcttO2LEiNC9e/fw9ddf2/wpp5wS/vrrL5t2fvzxx3DMMcfYcRgPGzYsvP766+GOO+6w9V988UW47LLLwqWXXho++eSTcMkll9gyzh/DcS+44ILQp08fGy666KLw/fffhwMPPDDZIoT33nsv7LTTTuHiiy+2+b///jscccQRNp1Pfv/99/Ddd9+l75Vnwj1wXdzbWWedZQXkPvvsY+uBaz399NPDoEGDbH7atGn2TOYE5/rjjz+SufxyzTXX2HXHw4UXXpisncUvv/ySfofx8Oijj9r6xx57zOaPP/54G/M9jB49Ov2OeQ98V4sssojN82zOOecce4ZViYRGYZHQyAPFKDTuu+++sM0229g0BdQCCyxghSCF/fTp08Ovv/4aGjRoYOtr165tBQAMGTIktG3b1vZZaKGFbFnLli0rFHAci+NwPH5ShMHDDz8czjjjDFtPgfT2229boUQhtOGGG1rh36JFC1vvcBwEyuGHH27CA0E2c+bMsNRSSyVblH2kZdcOFEgU4FzrKqusYsvyBQVerVq1QuvWrdPnY9mECRNCv379rOBdYYUVbPmiiy5qY3+uCA6EY/v27cNnn30W1llnHVs/O6644opw7bXXJnOz5/PPPw833HBDMjdn3n333fDWW2+lh48++ih9TzHcH+8wHoYOHRrOPPNMW//bb7+FH374ISy44IL2nnnuCBKEJyDY69evH5ZffvlQr1690Lt3bxPuVA6qEgmNwiKhkQeK1Ty12mqrWS1/ueWWC19++WWyNAW1xZ49e9o0QmPHHXe0QgRB4wKCwuPll1/OKjTg2WefDdddd10YPnx4OPfcc8MjjzwSdtttN1vuIBA4PoVWNqEB99xzjwkbauvnn3++CZKGDRvaukmTJoUddtjBpidOnBj23HNPm+Z4devWtelc4Jiz4+mnn07XoDMLvieeeMKEVabQoEbtyyhwl156aXvO2YQGQprrnzp1qs1feeWV4frrr7f9EJKA8KGg5lo//vjj8MEHH9j0448/Hrbffvvw1Vdf2XYU5Ahk9gWOyfSrr75q8xyb5+gDAi2b0MgGQuPqq69O5oIJQYQGmivEQgN4/3wfDz30kM3z7Pw6qwoJjcIioZEHilVoUDOkwOZHdii8KCApcP/8809btvDCC6c1jW7duoUXX3zRphEkFIzZhAYmHK+VUlBilqHQP/LII8PkyZOTrUI46qijrDDbeOON50poUMi5iYp74Jq22mqrcO+999q5EYiVQeHL9cQDZpXzzjvPBOPsYBtqzTE33nij3Ss16nbt2lntGjDNLLnkkjaNkFl11VVNK8gUGggF7of7Yx1aIAU7xz3hhBPMtAdoW0899ZSZg4477jgbs+zEE08Ma665pl0bAoP3xTOrU6eOFdAIeq7Nj4OGSCH/888/pwcv9GPQQBBkDAgoxghpBBZg5kIb5ZnxTDg+FQTXJrnXbbfd1qZXWmklG++yyy5mvhs1apTNVwUSGoVFQiMPFKvQwHxCAbf77rsnS0JYccUVzR8Q17xj8xQFBAVhkyZNwgYbbGDLsgkNCjNMSI0aNbJzsM8WW2yRLlAcCpOxY8daoTc3QoNaewzCwmuwXCOFd2VgJuJ88YCQobCNtaBsvPHGG3Y/7gOCLbfc0s6dqWnAQQcdZO+eApzCNpt5imN26NDBprk3BHcsNLyWfthhh5lA51m0adMmvPnmm7b9mDFjTIMDTECYAim8O3fubNoR14fPxVlsscVMmPfq1SucffbZphn06NEj/Y4dTGQ8b46JX4JtmXbhigaDYIlhGVoOIIybN29u37+/LyoorgFVFRIahUVCIw8Uq9CgkKMGjADw2uNzzz1nZpEYhEosRDJp1apVBaFBAXT77bcncylGjhwZrrrqqmQumD9gmWWWMaGAsJqT0BgwYIAVphD7NNZbbz0r8H3AXEKtfm7gOeBnqQzMTxSCwHW44xhbvvt2sgmNTLIJDQr0PfbYw6bROjiPCw2CDO6//35bt++++4Ynn3zStAK0JQr6pk2bmo/ChYb7dd5///0wfvz48O2335of5p133rH1gFDiXvER8WyZR9vIhn8LaBRslwl+HrbxAZ9TbJ7iHngfmPAATUM+jeqNhEYeKEahgQkEhzZQ4GMeQTA8+OCD6ZpkPDiYYeLh1ltvNVNWptCg8HOfQrbjIJwobL2wqswRTo3+gAMOsGvdeeedwyGHHGLTjRs3TrbITr4d4S+99JJpS9wzBSVmOaKFeG6YYSBTaGDSiZ8Vz3zXXXcN66+/vq13KIzRRNAcMCUNHjw47QjHD4RQptBF40PTQBMkIgthsu6665oAQftB46Cmj0DF5IcgptaP0PDaPwJn3LhxJlDQYhDiCG/mM98hYDZ0sr1HngV+LR/69u2bFhoEOCCUuG60pf79+9s3IaFRvZHQyAPFKDTwAcSFBIUMTlZqsti/KXjiwcNM77rrrvRw9913W/TN3nvvnfZ/OByLQpYCNB7cbMQziaEWjanDa9wOx0FDef75522ggIONNtrIxtngWhFC+WbKlCl23/7c0DJiML1QoFKQA0Iwfl48c47hwjqG+8TERmACULB6cAKaCPfOOgQEMI+Ad9BgiGwDBBrn89BWzH+uRXEMQpNxfhPxxZiBgITM+4GVV165wreAtuAgKDFd+bD44ovb8YDC2e8d4YGwQuDxDKoSCY3CIqGRB4rVPCVETURCo7BIaOQBCQ0higcJjcIioZEHJDSEKB4kNAqLhEYekNAQonjISWjgW0oaVRpED7IsCgLIGRz/r70WwqefJguKgM8+S91PEjWZhmVcZ+zfImouvvdJk1LPoxIkNPKAhIYQxUNOQoMW8vyzXlB++21q2W+/peZzgX3bt0/tRyQh46Sx42x5880Q+vZNZuaB558PIWnImRXWL7xwCIRyc03vv59afsEFqXnCsxl7oMzw4al5F6JMZwmacMrWiv+KhIYQxUPOQoP2N/36peaJRGPZ77+n5oEatxe42Tj//BAWXJAwutQ8tXWOcdRRKYFCY9QkKrGskEgJJoQSYeebbZaaJyrR066MH086g9Q0+xFd5zV+0s/Qqp/9mzQJ4fjjySeTWpcJ6194ITVNe6add05Nk5pn4MDUdPPm5IBJTZN+Jr73WrVS56oECY08IKEhRPGQs9B4+unUmFp1ptCgHRDZe/mvF188tSwGocD2W26ZLEjYcccQGjVKFe6sT9rPhGbNUoICDQMtgIFltORnO5KLLr98apoULBMnpqaTTNNhxRVJBhfCwQenBBWaDVpOJggd9nMtgozFzHNexh46//jjqXl4993y6xAalTQGhWQv8V+Q0BCieMhZaFDAkgyTmnksNEjkybTX8imkO3dOTTukZGGb009PFiSwL2UBhTbrXQtBYFC7hzZtQthqq9T0Sy+ltkNIAEIIAYJPguVoGMA1JglGQ9OmIZx7bmo6k3vvJY9MMpOAAORYgwcnCxJYRvseNBqmPc0M9zub7AllW4r/ioSGEMVDzkIjaRwZaOE/YEBqGUKDAn6ZZVLrAA1gvfWSmQTXNJKGnmnYbrnlUs5l1lOLh5YtZwmNrbcOYfPNU9MuNJzu3WnZGgJJP1nuQoOC34XGGmuEkJHjLc1NN6XOH0MKfo4VO/6BZZih0CpWX32Wf4csAbPx7URXK+YVCQ0hioe5FhoksWSeAaHRv39qGk2Ewp+aN4VxJtTq2e7GG1MRSV44l50/bZ46//yU2YdpFxq0uMc0BS40Ro5MnXuppeioZJbmgzObgp1pFxoIpkMOSU1ngi+DbWNuvTW1LM7qgB/FlzFwzS407rtvltaRhYyji3lBQkOI4mGuzFPOFluklrlPY999U/MMUU+NFSAPFz4AtqtdO4QoYadpDSyns7NNNglh001Ty598MrUck9OYMalpj8JimdfySbnDsrXXTjm0XWj06ZNajhkrG6zDKe+4FhULjW7dZmk799yTWu9+DKZnEz5ctlb8VyQ0hCgechIa+WY27RpmS6Z5Kh906ZIaZgcmuXlMLCmhkQckNIQoHqpEaMwrRErlW2gAYb+VtbUgoirHroazIaGRByQ0hCgeSkpo0GEV4a8lhIRGHpDQEKJ4KCmhUYJIaOQBhAZ9SdBnhfcLoUGDhqoZ6MK2Y8eOyd8p8o2ERh6gT2T6adagQUPVD/SOSO+UojBIaAghhMgZCQ0hhBA5I6EhhBAiZyQ0hBBC5IyERhXy+eefhzGkEYj4w1Ma55k/4xQClfDPP//YIIQQlSGhUQW89dZb4corrwyHHXZYaNu2rU0/S9K0Mipr73HDDTeE4+l4ZQ5sv/32YYcddrBhjTXWMMEEK6+8so3h4IMPDp06dQq1atUKe++9d/jf//5nyx988EGLcRdCiMqQ0KgCfvjhh/Dee++FL774wob3338/TJkyxdYtu+yyNo5BoDRq1ChstNFG4ao4Idoc6N69e/g0STyGAMkEoYFmgRYyadKkcPPNN4eHHnooWSuEEBWR0CgwF110Ubj99tuTuVkcccQRphXsueeeYWvy6yfUq1cvLL744mHcuHHh8MMPNw3huuuuC78n2TeHDh0a1l577bA6+e/nAJrJ7ITGAgssYMf96KOP7DoPOuigrELjwgsvDPeSBloIUeOR0CgA1N67du0aVl111fDiiy+Gfz1PfcKXX35p5iPnzDPPtJaskE3TmFsQRnDCCSekhcYyyywTLr30UvOZTJ06NdSpUyfMmDEj1K1bNzz88MO2zSOPPJJVaPz111/h0UcftWMgXIQQNRcJjTyDmYka/FfeWXwlbLjhhqYJnHvuuVYYO0vRCUsCwuTkk0/OOhx33HHhl0q6ZMSUBfgqMH9B48aNw3g6ri/jsssuS++LQBs+fLhNo8XcT6cvs2Hs2LFhEfpOFkLUSCQ0CsDHH38cttpqKzM7/UrPX5Xw3XffWd6qmHj+zTffDDdl6TGM5XfccUcyVxEXGuuvv374PuloJtM8hdAgqRvaUJMmTULLli1NK6qMb7/9Nqy11lph//33T/tfhBA1DwmNAoIZiIJ54MCByZJZdOnSxQrqeGjdunVYccUVky1SDnB8HKuttlq5YYUVVpitQ9yFRsyaa66ZTNGT49+meaANITx+++03E2BoMLfddluy1SzOOeec0KNHD3PgCyFqNhIa84G5afsQ+zQQGpio8Cngi/DhqaeeCn3o8jGD1157zcxd+CvOPvvscNZZZ5lmgCCKHedoMx06dEjmZjFq1KhwzDHHJHOzQMgIIQRIaBQZaBYOwgH/CEIgHhZccMFwxRVXJFvNAq0BJ/vXX39tYwbMSrDrrrva2LnvvvusTQiRWg0aNAgLL7xwTu1AhBA1GwmNIiMz0qrQcD5pEkKIXJHQEEIIkTMSGgUAn0HcYC+GNB2k8dh5553Nh0D7hzg8l1bZrPcBxzdMmDAhHHroodYGg8Z2gPkK8GPAUUcdFWbOnGmN9IDOoTgG+zGQtoTIK+euu+6y9a1atbJ9mHbQPtwExjQ+Eth3333Nac+2+Fq8jQemMe4ZPwqOc47HNTv0pObtR/Cz9O/f36YZc10HHnig+WQwrT3xxBO2jkaHLVq0sIaQzZs3T0dtPfPMM+Xua/fdd7dW9Q7PgIGGkbR29wi2kSNHhkMOOaTcc6AlPEyePNkCERjDbrvtZmMhRHkkNPIMBelnn31mrblnx4ABA6ygpdCdXfgsOaJi+vbtm0yFdPuKHXfc0Qp1wmqJhKKVeWW88cYbyVSKJ598Mlx88cXW41kMhW3Pnj1tGqFx6qmn2jRQiAOF/y677GLTP/74ox3jggsuCJdccokV6t5gERMYhTj3SlAA/pUbb7zR1tHg0Nliiy3CJptsEkaMGGHzRHPFdOvWLZkqzyeffJJugwI//fSTXc/yyy8ffv7556xRXwgoIDwaECzvvvtuuuX9tttua2MhRHkkNArE7ITGhx9+mI5SIkmgF1zZQFsACm4K1TvvvDOd0sOFBt1bEhJLzZrGfGgxQMgvhSNCzAcKb4dphJzjBSaF/Oabbx4GDRpkjflioYEmcP3116e1Bm8M+Nhjj5kAQAtiQHAgGB1ayCMkYaeddkoLv8svvzy8/fbb4eWXXw733HOPOe9pmQ60aI9BSwASPKLF+D2hLWRmB+Yejj322PDOO+8kS8rjLeX92SNwERwIafZFw+FehRDlkdAoEJUJDWqzccHNPLVih0IQE8lpp51mBSjTbE8N3QWIpzknDQi89NJL1pk+tXwK7F69etnyadOmWWHPMtJ/MCZRokO2XQZMPNTUKYg5R9wgEaFCIcp1ArV4YBm4aQ2BM3HiRDMT+eCRW0Cyxe22286uB1PVtddea8sRSD5wj9wrwg5oO4ImQ6qTPfbYw7QGuPrqq8MZZ5xhGhL3xb37PoCWgLDk+WHSatasWbmwZ56P36MLjXXXXdfGmKi4N0xtuaSTF6KmIaFRICoTGtReMSXR8T0DNdu4hbXvF9fSnX322SecdNJJ4cQTTzTTEVoKUDijvfjgWgB4YYkwYtoLe2AZhfLRRx9tQxyWi5+AWjoDwgQhBpi38DWwHEFCAQ4cG1+OD2go11xzja2Dxx9/PJlKCQOOAZjT8IH4uTBNobXMDu6B8919990mnDLvy6ERZCYIJ79mcKHBMRBqLihknhIiOxIaBYIW3w5O3Mqg4Pf8UECtn4KNGj+2eNKAUKABTmO0EpajAeBEBwp0zDMUnD4AZiQczJ07d7aW6eSi8sIf8DvEWkXsX0BAcQ0MFPJunkJoUNCznGvxDL5oHKQ84Vp9iKGlOdfMtcf3xbFwuPu5EETcF9MIVK7ZB/r+YHs0C+bxoSDcmB42bFhyptQzZNmiiy5q93jAAQfYOWmbgmCKcaGBwCBVfJs2bUyrweQnhKiIhEYVk1lDZj4ueOPCNzblMPi+jDPX5Qrboqkwnt25ZrfOyVwe39u83FdVEl+fEGIWEhpCCCFyRkKjAOAf2G+//ZK58ridn3YCRB7RbsPNUx7K6rVc2hM4OKyJKsLx+8orr5gTHAd4DEkIY6ix46fAd4KTHPNO7D/B5ESKdaKUfHCIaAIc05zLI5cAhzQOda4JMxJjYDvCcElP4uGxmIccQmN5Lm5uIorKu5fl2tZee20ziwFObgftg2gnQnDx2YA/R6Kv/Fg44jGD4asRQhQGCY08Q0GO3Z0CO1t7CfwG2NzphIl2C/g0vIEZfhDCUrH342vAFxFDYz4vZPEnIEQoUHGu0z8H+9HOgV7/3MSDzR9Hc+/eva1gJqLKIUwXez8N8bwgB5zBmKw4ngsNrjkGZzeFNLjQcEiOiA8BgdS+fftkaUXGjBlTrtMnDxWGWGg4p5xySjKVEjJx40GEGP4KCQ0hCouERoEg3JPOjjLxGjIOZTQNhIaHrVJrpiCnHwy0gDjKh1ToHppLq3EKcto2AA5mQl598DBZ9kdg0GKcgel+/frZPsBxcACTBRcHMNOApkNYKmGyCECuMXbsA+0svNV7LDRoR0LLa1ptQ6yhIIzi60RjYQwImA8++MDCcYF2HjG33HKLaXAIGvCQXYcgAbQRCQ0hCouERoGgEM4GpiXaSxABROSOm6eotdNxExoINXXGNLBDODhEQpHeApMQy71VM9pGDELDIRqJWrgPXkgDJp22bdtaXxuMCdV9/fXX0/tT8FPQo/3EmgamJwQL5yVE2COSfL/Yme0RXoDWQjsPCneEF9MevRVrQBA3rPPtiBBD0HBurjOGZwUSGkIUFgmNAkCNOCZurUz7hcGDBydzqYLWfRj0n0HeJR88DYdDmnS6WmUgPbqnBEEjwGfhGoV3x0rhjRZDAcyApuImJaAAJl8VAoP+NajNA9eEeQmtCKGRzTyF+cpr/dyTgzaDaYkBTSs2T7E9Ya8IvaeffjpZmoLrR7vq3r27tUXJTJ/COdgfExrCEv8IIJww0yGMQUJDiMIioZFnqNlvvPHGlg/KczfFMf/Y4a+77jqrbTPgz4jbSsRQoFcGBa+bp9Zbbz0TChSWDCuttJItRxjh0KaG74M3XgMc7bSkRiggUGjX4XAPpDpxn0bslGcaUxZ+FJzeCArXLDKJ/TLcOzmn/N4Z3OSGKQ0nPc+DVvCeINFBSODU59nRMdXnn39uyzGj4XtxSH3i64QQ+UdCQ5QTJEIIMTskNMRscd+FD25Ki0HLiAcnFkaxnyUmPh7HdzhOPB/j54jH8fErOxfE1+f34+eZ3X6Q7d4h8zq578wEiuD3FA8xmdcGnNOv0fePieezXV98zJj43cT7ze4amfZrgczn5dc6N8TPyfflGvw8vmxujysKh4RGNYEoJxzlmLSyFRSEAuMTyQZ9ZNBG5Mgjj7QBBzlQsOAvIUOtD3GILA5xhqFDh1o6EcxL7tDmOoiIYh1sttlmNiYMGbMXPhKc8nGEGY54zFkUJJyHiDGH7dmPvFTkheLauCfuFQe/Z8bFLxTn/XrxxRft3tiXvFb4eDyRIsvo6ta3J1mhw3F5FuxL2xKeL8cCrpHucQlVxs/i/h6uG1Ok+3UIWojbxeCL4bnQfwkZeDP9RHGbFtrWAMdZZ511bD9MdPRX4mCuw2fkebwGDhxo4xgSOxLJ52D6IySb5+5+IO8TBTBXxu+bgTQyFNo8D3xp7rMjcAMo3LlXvgVCuMmNBuzDM/LvivVkanZOP/10+/a4R67JMx+TpwyfFucmozPP/oYbbrB1ouqR0KgmeL8W/MBx+wWHH3ixxRZL5srDjx0LGg+XpWAmV5U70hk8yy3EeaxIHkiBQ/sNfCBEiIH3O+5Cg4I6xoUGhT2FMoUcEHUVh9XGjnEKZmq5FHrcF+d0QfDCCy9Y4ZQNhEwsNKBhw4bJVLAOrzzdvMNzIWwYARc3piSvFcECo0ePtgIRKJwp8FjOgDCjfYyD/wYhSnsTfF74v/y5c13xPdKI0/EGj/jIiHQDju2Fvodm489xKOh5J4QiI4wJJHDNgPM+99xzaY2B/lx8OhP28f241vgbIoAC4j5egOfg10ZkIIKKge8jbrtEBByh3C4ICbPme6A9Ets7fIf40BTgUBxIaFQzbr31Vivcca5TOLqjGSoTGkQkUfDiUGYfb0AIFH4s88H7xAAXGnTkREHtQgNnthdg3srchQYFLK26KfQohGlBDmgWFDAIDWqXHLMyoUE4Mn1veNgtbVv8OBS21HazQeQYzwaBhLChsOX5eJuQbJltKUypCSM0cMJ70EIsNAgM8AgyClgEB++B5xWDYKAwZ8xAMAPnJ4oNLRFNyLUFoskAYUrEGe+IGjdaDKDZeagzNXKIhYYHB6CxcM9A4cs1e6AASSyBdjguNDw4g4FptBiuFYHhGoQLMW+MSbucOBgBgcQ8x0STYOxDDFFyHJtElzxXD8TgOdBnimdMQNubXUdlYv4ioVGN4Ef1blRfffVVU+lj00NlQoNuZ2k3gbbC2I9B63VSkxCN5QORWk6saVCwudCgUPLU7l7QuNDALBPjmgY1fGqX3up7TpoGeG2WQo+GgZiCuO/MkGeg4EHgecHtUDiSXh3TD4IqE+8WFqERd5VLr4Ach0LVC20KPwpUwpebNm1q10wIscP18XwQCAgKhIprbhTm1NA5JoW8p0uhsGY5z5b9EVKAYPVGlS6YY6FB5mDaAvEOOR/Tvi8CiHfn+7tpDxB6CBcKeM7HNANgQorx4wFaDd/Gpptumn4vHANzFOfGvIbWxTNyeAY8P54/QjfWkLkONAvWcf/+PETVI6FRTaBw8ZpjZcRCg0IqE6/dOvy43jNfNjykGOhv3IUG+1FIg7cId6ERF+hs50KDQgNtw69hdkIDnwAFmRdOnBuhhdDMZp6icPP7QGi4BkDB7poTQoGeD2MoqKgpQ2ye4ni0kfGGi26eQnOgfQ1hynHh6LAfLd65ZzQGhItDGxWeHc+QwtQ1CtqdINhYzuDmPrbxwt7b/cRCg3W8A/cnYBbi+iDTPxB/C5gW0Wh8OoZngbDxAV9LrngnVzHcA4KJAV+YfwsQh6kjrOK0/aJqkdCoJmBi4afGNJPZZ4TjaT/A+8cAasM4relHgjEDphwKZtbhwPUBs5JD7ZLzYVKiPwt+fAo+oA0Jea+8AHLTDyYI9mHAPBS3DUGgYIbAVIFd2/sRB8xR7EONldbfOJ1j85QX3Jw3rt1TeMaaRaxpcCwaUFLjRgB57i2gIPV2MIDQQItBOON7ocDjmsALcu6fNiQ8J4QKY2+ECPgzOAbnY6Dm7bV4tAWOi9Djvlx48ox5FixniNO5YMLhnWFqA1K4VAYNNz1IAYc75/B3Gtfi0XzQPH0dgwsVfF88E4QnYy/Y0ST8u2FAm0SA8S3gw+Le0DhY55onDBkyxLRSnhFDfB0ECfCsqEzwTaBNiuJAQkOI+QyCR4hSRUJDpO3WmIvAtQPH1/vgDk3G8TKv8YIXjL6Mmnqu+L6Mfb85FbRsh1M5vgZgv/jaGeJj+b0AZiWI78ufie+Dv4Fzxet9e5jd+Xzez+n3li3aDdiPIVtbBsh8T0LMDyQ0qgnY6TEFYKbxgi6G5UT5xNFPDvmqSNtBmwyIwyIptDA/sZ6BtB0epoqTHf8B5glMK4RyUiDi0GYZ0VCYpYiu8WNiYiHE0gciobywBqKFOC4mKrLwcj6gPQTX4ftxL5hOgHsjwgkTDKYq74IWuB62x2SFjR9fATZ0p3bt2slUylcCXIPfLz4KF0g8V/wQPBNMMFxPs2bNLOLJ21XgB4ifF9thRqKwxyTIs3ZTnfteMPV4Li7MZ7RfwOzEOfHzxCGtHNvBXCTE/EZCo5oQF/Tjx49PplIQtuhOTW8/kQ06QQLCV7GlV0b9+vWTqVSIL7ZnwA9CIU20CxFF2KFpgEbopuegoqaOH8Ft9DjTPYwVsKcTssoyCn+/XoSi78NAQe3nxY9AlA2ROAiu2BGO74EGge4PwSEeO34XX3zxdO3dU8MDPgcc10Qp4bPA1o/AdYHDmHtFKOArIK19NuLwY0AQeJBAu3bt0qGkmX2nIITw+eDPiAMCELIOQsODAYSYX0hoVDNop4ADkoGCNG4kReGdLXQRpyiFN6YTiHsdpGBEU2DAAcqYQhsIUaWGT/sFwlE5H7VxHM04tYmGcU2B2jNkiyqKQWNBaFC7p+2CF6YUnLFDOxNCbhF01NRjEKAEBrhTF2HgQhWHMFoDjloEx5ZbbmnL2Y4CGeGDhsO1IHQZ0IrYh4gtrpOCn4IdjQQQXBTkPFM0CA8lxZSE8IgFWtypVBxJBTw3BB6hyC40EP4EB6A58ZzjYwkxv5DQqEZQ0yf0FCgcmY7Tq8fRQTFoBhRqFMoMmGeyEYdBAtpAbFoipBMhAxSqtBXAtMLgIZNEzDCPuYgxUUJxK3OindjXNQ1veMdyat+YnzxNB+Yv7pNuYDH9oClREGOmcwFIQc+9sS2FOceltg/cK1FOHIOxF/yAsEAQInAQAPgV/FgOWg3PGKHhbTgQKOAmMkJNXZPxBoAIXq7HtwUizWLQ9hAWsaaB0HcQKHEHV0LMLyQ0qgnUhrM1anMwa8RO1LgAIg4eM4wPNNCK8f45MkM6McsQr0/BzoAg8ILQeyVkoMD3sFQnNrnEIBAQDhTs1MwzzWkci8I6Bp8BYCrLBCGBtoKGw3oEit87QpS2HQg7Bm9LAggRX859IjA4lrem5jmhVTgewowA5t4QROB9lACFPMKTZ4avgk64HA8ZBoQgGiLaYqxpVCb0hZifSGhUE2gERmGEGShuGwAUdCxnoLADz1eUjUz7Ot3B0sbABwpuoFDDNEXjMgbMUS6YSAKIcxcTFoPb3tE+0C6I10dbwGFOTR5iQUY/IJzLBQICi/3QNDDPME2NHSiMXUCxT5w/ioKexIeYdmhLggbgJjqELIKJZQz0r+7go/DltG5HaGASotW8+20QlKxjG28VjoDB3BU/L9cwWM61ce9oRC4ouDd/Prw7tBDAb4LW54KHtBvcgx/X24kIMT+R0BBiLkBgIIjyCb4gIUoFCQ1RgdhPwTQDNn3G7isA919AZkHKvG/P2NskQGwm8+UsYx/O434D9nMwEznY8x3MYW4S8+P6NXKc+JrRFjLxfeN7iclMt8L1+jL2jdtQcC6iw+JrBbZjHYOfJ/Me0FDiZTFxdtfMkOn4WcagFXE+rtefg9+rEP8FCY1qAvZ10lJgJomdtQ7RUZh1KJwyoT0EUTmeTNDbAlAg4QtgPam8mcbWDziNMXG5H8X3dbDDY1YhnxMmIMJenT59+iRTqbTcQEgtYbRt2rQxUxJ4+wSSKHLuvffe2+Y9uypwbPct0AYCUxhZUgGzEckXCaslssq7wcX8g5mMe/brcl8F5yEZoacwof8IB3Mcz5hzYh7CkY4ZDQjnxTyGSQ4TVJzKhIKefE/cH13kYo7yeyEjLM8bsyHXG7e94Dp59rRJ8XQrnl7E4fnH5jgHBz4+HPxTmPi4H/8u/H0z8HwIixYiVyQ0qglxYeM2dIdQWWqa4NFI2fBQWgpfoGZKgUQ7CApZClQvXL3gxpFMQYa93aGWS0GF3Z5wU/wDLgiAMFX3X8TtQdAEKMBwAiNY/Fo9NJXCmv1ioUE0FtfHtVEAA209YvAloMV4OwwKY4+gcuLQZASXEwsNbxfC/dEuBo2D60W4EsZLYcx1xJmCYxBuCFL2946fMoUtQizOJusFvN8bYcIEAhBVRVguQgt/Es8q1kiImiP8Goc7zwcB79ogQguBwoAvjA6UhMgVCY1qBlFKmEcoXCk8PYqHwoUkcNlMNI7H/cdCg4KGGiqFEvMuNLywo7ClQIuFBjVwCj8iqHCa47z12jBOYApPWktTG3ZNg2UU7hS+Xqv2BnPU4oGssjjNY6FBgYlQxLTDdhTY3okUmgrOZgp1Cs8NNtjAlnPNJF7ElMTgy5zVV189mSovNDgOAtKFGc+SApznw3MnggqNjoH7dK2O94GGwnNlH56Jh8viz+CYaCIEFfizB4SLByy4RofwBp4phT73y0DhH1cWqADgRCdVO8+Ieb9HnoUQ84qERjWCwpsaKNASmdpmZl4javmV4TVsL7goaCikKCQJgyVc1LtgxZZPwe+FWCw0sNsjYBAemGMo0N32j6kGrYNUIQiSOAKICCJMPJi+KBT92l2YYRJCCMZCw9OhY5JyX0PczoTjcW1oPp7aHB8HApWC3NtXxEKjXr16yVR5ocF+CC4KcwrpFVZYwa4JeNZoQgyYmfy5AMIG3wKCgUyvsUkNrcdDenleRGXF7VYQSghW0qKDCwb2c80BMO/FPhFPN44Q5hgILj8u75HQZoQzz3J2rf+FyERCo5qAuaWyFCEUNN62gbTiEJtAKMixsbuj1IUGhSO12VyI05ggNOj/ghotBTjmEPwpQOFKq20KPIb4+BTi1Nq9MHSfBhoIuPCIhQZgooqhkAYaI8aObBrQcU+OCxdCcF1oYPJhG+9HIxYaXJv7YxAAFLZemCP8CL/lfhhn9v8dh/MifDyVCIKSZ4LZibQlXJNrP4Dwce0FIU0bDuCZYnZyTYP9adzpcD3XXHONCWY0G0x88bPw9PneTbAQuSKhUU3AAUyBxRAXOg41ZAoaLzSpgTsUfi4wgMIG2JYGZWgXPqABUChju+dc2Nxxenu6EAeHPNoJhaznUQIc6RSACCYGdwgDJhu2ZTkC0LUaoGD0gg7HcAxtN+JrpPAFxt5YEBMQQglwVnPtbMt1s52nH3FnseP5uABtoX379ta+BKHIMTApAeYjrpFnwRD3EwI4wbkvhCdtU+LnDxwzF7zdChoOJjGOxUBuLddegHQsONhpO8OxMSdSOUAjQUvi/TFGi8TPEr9/IWaHhIYQQoickdCoAqjBe4Mub/8A2KSxO2OeYRzbqKndsywe4ph/asgsYztqkzEeKcS53HQD8fnArwOoeXrtk3G8TghRc5HQyDPE8uPsxIzgYZIxmGeI4Sf8E/MGYZxuqsBRi2OWdgJkWvXUEoCAICLJ03KQQdUjj7CJ0x4CswsmFE9+x3JMM0TyYBLCyRrbtXEecyxMG4Rnuh0eIYEzmqFOnTomVNyJPDsQVjJzCFG9kdDIM0S1ODSIywaFK3miAKFBJBFgg8bujuMamz4F+eyIQyebN2+e7uuBMEsHOzo5pyj4ERqZrZWxc+NL4LzY6TOhkyK0DO/wCBBMpCIHfAEehssxcEpzTG8r4b4CnL8eDcX+7ncQQpQWEhoFgOylbdu2zdr6GmiIRwHLdi402BbHJM7qjTfe2GLrcVC6YxZnNe0AiIJBuDD21sE4MzE1sT2ahCfC45guxHCE4iDHEetpNXDOxtpQHHbqIaMuNIj2cYGzwAILWDQWEVg4pxEGNGYj3JewUWjatKmNXRsijJVkfNwfIak4ar3VtRCidJDQKABuosmWypqwSAp42ipQ4NJYDaGBn4PCmZo5JioKeLSD2AfheNsAhxDPeDsiZpyOHTuaEOFcmZoGxycc0wcPcQWijqBLly52Xd6eAbwdA6Gw9H2BEEAAeIoRcKGBkIAWLVrYGMHoZNNshBDFjYRGnqEGTuMt8LQVhLvGUMB7iCUahLeWJoQSsw01eApp5mMQDkCDthji973BHEPcLwR4Gw3CZWOhgYDiOmjsxUDoqhObxgjl9BQa4EKD6yR1BX4czs81Y3oC75bUe8Pza5LQEKK0kdAoAERG4cR2jSPOhUQBTAtmnNZ0F0oNH4c1ENuPM5pGbDTWciHh0A2rN/Ri4BiA0539aL/A4Oky0Go8nQQCA62GaCmHAp/2BZiufABMTKTiYF8c5QiyOEWFnxc4JqYmwFlPHxf4NNCmMH25ucqd+mg7TtzAUAhRGkhoCCGEyBkJDSGEEDkjoSGEECJnJDSEEELkjISGEEKInJHQEEIIkTMSGkIIIXJGQkMIIUTOSGgIIYTIGQkNIYQQOSOhIYQQImckNIQQQuSMhIYQQoickdAQQgiRMxIaQgghckZCQwghRM5IaAghhMgZCQ0hhBBCCCFE3pGiIYQQQgghhMg7UjSEEEIIIYQQeUeKhhBCCCGEECLvSNEQQgghhBBC5B0pGkIIIYQQQoi8I0VDCCGEEEIIkXekaAghhBBCCCHyjhQNIYQQQgghRN6RoiGEEEIIIYTIO1I0hBBCCCGEEHlHioYQoiD8+++/NgghhBCiZiJFQ5QWH38cwsknh3DuuSE89liysMj55x9q3clMaYGi0LNnz7DAAguEhRdeOFlaOb/99ls49thjbfu6deuG1VZbLVmTPzjHu+++Gz788MNkiRBCCCGKESkaorQYMaLsqy37bOvUCeHEE5OFRco554Sy2nkIzZqFMHZssrB0oEK/0047hUUXXTQstNBCYemll07WZOfXX38NzzzzjCkZXbt2LdOv/rFjxPz555/h+++/D99++61tnw32Y5upU6eGH3/8Mfz111/JmhSvvPJKWGGFFcJmm21mx/jpp5/CH3/8YceePn16hXOyjuW///57hXkGP4+DcjVz5ky7Rq6D7TPhGjnvtGnTbBvOLYQQQojySNEQpUU2RYNK3quvhnDJJSHcdVcIn3xCbTSE884L4dhjQ7jyylRFH88CUOF89tnU9kOHhjBpUgjPPRfC2WeHcPzxIVxzTQhjxlAjTW0Pr70WQt++IdxwQwgffJAsLANPxeOPh3DppSHcd18oq7GGMG5cCP37h7DBBqlrbdIkhB49QujTJ3VtJQCV7CZl183wzTffhAYNGsxW0aCifdlll4Xll1/eFI0ll1wyNG3aNLRu3drWc7y99trL1q299tph/fXXt23WWGON8Oabb9o2P//8c+jdu7dts9JKK4VNNtkkNG7cOCyyyCKhe/fupgA89dRToWHDhuZdwWPC/ltttVV45JFHwrBhw2zfM844w47n9C97Fyy/4IILbP62226zee4Nj8uqq65a9urL3n0Zz5Z9F+utt54pVxuUvT+OX6fsWzvooIPC119/bdsMLftmuC6eSfPmzcMqq6xix8PzU5nyJIQQQtREpGiI0iKbolFWQTXvAcsXWiiElVcOYf/9Q1nNL4RWrVLLGU46KbX99OkhHH10almtWiEss0woqwWHcOaZIey+eyirQabWtW+fCtUClBaWLb54CPfem1oGf/8dwn77pdZtuWVKCSmrDIcjjghhxRVTy8sqxmGXXVLHRoEpcsaUXSOV67Zt21rlHuv9nBQNYLtRo0aV3fICZa9mlrdpxowZZY+yfVhiiSXK9MFXzYMwZcqUMHHixNCyZUvzTjCP5wCPCMrEZ599ZqFRkydPNgVmscUWC4MGDbLjvfPOO2WveOW0EuPcc889du5zCauLuPnmm205x4GBAwfaPF6XmOfKlE2Uhl133dXO7Z6OwYMH2/0feeSRpgwdXfbtoPxcfPHFZXrjJ3btb731Vpku+lr44YcfkqMJIYQQoqwWJEQJMSdFY401Qnj44dRyKKuUltWYU+vKKpDh++9T27uisd56KcUgpqxiWlaTTK3HCwG5Khp4MxyUF5a3aBHChAnJwuIGJYMwqdq1a4e99967TDfaPXTo0MHmUT7wSgwfPjzZujwoGs8//3zZLS9gHgiHynezZs3sGB07drTjMnTq1CkceuihZfrfSWWPZ0IYP358qF+/fmjUqFHo0qWLKSucHw8Gw4033mjHQ9HA49EKJTLi3rL3wrl79eqVLElx/fXX23JXNAYMGGDzKAoxeETwsuDp2Geffewaud4DDjjAPBpXXXWVKRXw/vvvh0suucSub5111rHjEWb23nvv2XohhBBClFWDkrEQpcGcFI311w/hhRdSy4GKX1kF0Na1axfCN9+UVzQ22yyEl19ONk6gQkvbCtZffXVqWe/eqXkUkDvvTC2DmTNDWY00ta4aKBooC952wQfaK6AAUNknROpvlKssxIrGCSeckCxNtYlASahVq1Y47bTTzEtBhR2FhWO2adPGtrvllltsG0KfCNci3ApPwSGHHBLq1auXVjTwhFC5JwRr9OjRFtLEddF2Y/EyRXDzzTcPH3/8sbWfIMyJUCg8EJcS3laGKxoXXnihzTtfffWVKRgoMbfffrvtz7EJiWL/q8u+Bc6D4oGHA08J7TNoq3HcccfZMfGWCCGEECJFWS1IiBIi34oGA4rAHXeE8OKLqbYUHjqFp6Ks8mnQ/qJx49TyLbYIYfDgVDuP//1vlvcjU9Ho3HnWObi+Rx8NYerUZGXpQPgUHgo8D7OD7T744AMLMRoyZEiydBYoDyeffHLYcccdTRnYd999zYvgoMAQfoT3gHYX2223nW3Psbp16xYejjxVhCyxDi8CxyHcypdzrexP6BdKBeFaRxxxRHjyySdtm5deeqns1XQOj9O2JguEf+Fp2Xrrre04HO/tt99O1qYUqsceeyzsv//+dh/bbrutKSPKgiWEEEKUp6wGJEQJgWdghx1CWe2SVr2pZWQZIpyJ5YTsvPtuajl88UUI558fymq3IdAY+KefUoMrGttvHwKx/8ccEwIx/ygRKBgjRyYHiBg/PtW4vKwCatuiZNBI/NprU8enjcennyYbl8F14RHZeefUcVFo2F4IIYQQogYgRUPUPOLG4CgAo0cnK4QQQgghRL6QoiFqHqSgpdM/FA28IHG4kxBCCCGEyAtSNIQQQgghhBB5R4qGEEIIIYQQIu9I0RBCCCGEEELkHSkaQgghhBBCiLwjRUMIIYQQQgiRd6RoCCGEEEIIIfKOFA0h5hP0nP3iiy+Giy66yHqSvvHGG8MXdCg4n6BH7DPPPDP8RkeCGZxzzjnh6aeftmuc33A9DzzwQOjbt2+yZO6ZPHly6Nevnz3Xiy++2O51fjFp0qTQp0+f/3zOt956K5xxxhnh/fffT5aU59prrw333ntv+OWXX5IlFeE6HnzwwazvWAghhJjfSNEQYj7w8ccfhw022CDst99+4Y033jAFg8r1sssua5X/n+itvMBcccUVYYEFFgg/0o9IBrVr1w5nnXVW+Pvvv5MlheHXX381pWazzTZLltB/4vTQpUuXsOqqqyZLcufnn38OZ599dlh++eWtgv3ll1+G119/PXTq1Mme9wcffJBsWTief/750KRJE3u+/wWew4QJEyr9Fpo1a2bfz7Rp02x+2223DT169Cj3PldeeeVwxBFHhBkzZiRLhBBCiKpDioYQ84EpU6ZY5Xq77bYLd955p1ng//nnn2TtLG644Yaw5557ho4dO4add9453H777VY5h2eeeSYccMAB4bTTTgsnnHBC2HHHHcPhhx8ePvnkE1v/xx9/hAsuuCDss88+Yd999w177713OP/888PMmTNtfS6KBtf07LPPht13392Os9tuu1ll1o/x119/2brLLrvMzs00lfp33303vZ774z6pFB999NF2vYceeqitx2uBQlG3bt1w4IEHhhNPPNGu+7DDDgsNGjQIV111Vfjf//4XdtlllzBgwADbZ3ZguceKz76Mx44da8fLBOWO47Zv3z506NAhnHrqqeaJgE8//dQ8CVwjSh/PnvtCcXGGDx9u+/NMGY499lg7F1SmaHDMk046KXz11Vc2/+GHH9q5u3fvbt8DoIByLp4551hppZVsGngWO+20UzjooIPChRdeGBo2bGjXMHXqVFOuFl10UXuWvKP+/fvbPszvtddeoWvXrnYfPMfnnnvO3osQQggxv5GiIcR8hLAYwl8I76EijyWeijgVQSzRG220kVXUCcN57LHHwtJLL23bogAMGjTIFIKnnnoqHeJ0yimnhLXXXtsqrlScH3rooTBy5EizjMOSSy5pFV48FVdeeeVsFQ0qr48//nhYcMEFwx133GEVVCrRWM7XX3992+7PP/+0Y7jiwDzKwoorrmjzd999tykRP/zwg81jnaeyy30AHojjjjvO7ttxjwbH8GsbM2aMHeeRRx6x+TnBfjwvwou6detm1v9tttnGFCDuY/XVV7frROHg+aDktGzZ0jwgEydODC1atDDFyJ8rz4F3cd1119k9ss+jjz5q3hLeBcdiHxSFV199NauiMX78+NCqVaswcOBAm0chQ5nbdNNNw6233mrLUCL233//8M0334SHH3441KtXL7z22mumoLVp08aUEyBcqmnTpratezSY57n5s4ZVVlnF9vv2229tnneOEvb111/bvBBCCDE/kaIhxHxg3LhxpljQDiLmlltuMcs0FeuDDz44tG3b1izbo0ePtkoxHoAXXnjBtkXRWGihhcI777xj83DNNddYJff77783LwaVZyzxVK6peHNsLOi5KhooQbVq1bK2JFR4uYbBgweHm266ybZzRQPlx+dpF8F54Prrr7fKsoM3hop/o0aNbB7F48gjj8yqaMShU3gLFl54YVN4Zgf3TZuX448/PlmSAgWDZ4FC8fLLL4c111zTvDC0g+CeuM/bbrvNrofnSeWf63JQHjbffHN7vjzPdddd10K+OBbPFk8D+7z33nv2nCoLneLaUCZQCPEAoWiiUKConX766WHLLbe048HQoUPt2aHMcN28z7gNT2boFIpG586d7Rk4maFTeMh4XyhUQgghxPxGioYQ84lRo0aZskEFuHnz5laRJewFDwVWcirchORsvfXWVgHdaqutLMSHUBnAMk5FlArxIYccYschHMtDbVBiCKdiOSE6l156aVhrrbXSHg0qvZUpGnXq1Aknn3yyXQeV09atW4ctttjCjkdoj4dGuaLRq1ev9DxhXK5ooFhQ6ee8eEKo9GLVX2aZZWw918H1rrHGGmHDDTe0UDBCnbDUr7baarYNoGjgWbn55pttnso8lnmUrUzw5rinAC8Dz2SHHXaw+0WR4Jx4WvyZcl+cl1A0QPlgGRV5FILtt9/epglD+/333y20Ci/MxhtvbOvwQPF8d911V1MceK94Y1DkskEIE88sVpp4LixDMeP64K677rLnyHXh+eGZ0M6Eb4R3g7LEdXz33Xe2/dVXXx3WW289uy6UKMCjQXgV3xJ4uJwrGnhoUPrcUyKEEEIUEikaQpQIKBqZHo1ig4r9eeedl2434hVmwpj+C1SUL7/8cvMg5BvCzDI9GtUV7vWSSy6x9ySEEEIUGikaQpQIKBhYsd2iXYzgEcHKTyN0woNoy0CDdjwDxQrtGfAmjBgxIlkihBBCiHwgRUMIIYQQQgiRd6RoCCGEEEIIIfKOFA0hhBBCCCFE3pGiIYQQQgghhMg7UjSEqCLoP4HUp6R5JZ0qqUrpUbtfv37pfhAKBechjW1mj9Gk1iWlrHdcNz/hnkkpS/8RcwspbunVm072eJZ0MEiaW/oQIX1tISG1LP2AfPTRR8mSeYN+NrheT2ccQwpk0gV7muFs0LkhKXzjvjeEEEKIqkSKhhBVBB201a9f33oCpy8JMjORdpQ+FtZZZ52cU7nSMzj9LJBKNlfor4GO9ugHI4ZO8ugjg+xRhYT7POmkk+w+HTqeo38Q+ouYWz777LPQrl0765uEe+JZzpw5M9x///3W2Z73+zEn6AsDZYVO83LlqKOOsn5RvDf2eYXev+mHJNuzJysWfZ3QnwgMGTIkNGzYMLz99ts2D3T+SGeL/1XhEUIIIfKFFA0hqggUjcUXXzw89NBDyZIU9NDduHHjdCXysMMOM68HHeettNJK1kM1SgUVUjrHowJKZ3dUtKls0gEcygsV9vbt29tAr9v0OO6dw81J0cCjwbE435577hk22WQT67XcO/tDMeKcKEXesR+d9D3//PO2/rfffrNO9JZbbrmw1157WT8aXAfbAn05cI9cR6dOnazTP5QDPAMso/JOR3UoIlznnHBFI7O/Dnr4xsOBUgP08k0neHgH6NiQ66ZzQBg2bJjdE50icp303D1p0qQwceJEe/Z0jLf33nubtwTFztMMZ1M06EEcj8rgwYOTJcHug3eF8uPQQSO9e/M+d955Z3tWHPfRRx+18+B5opNAOiCk4z3OQRperp953gn3zT70dL7YYovZ86TTPjxk7PvNN98kZxNCCCHmL1I0hKgiUDToURqFgJAXrPFUIOn9+YMPPrBtqOBiYaezugEDBoSbbrrJlBP6qcACDhyDym7cCRvhMygWhEFRCb/qqqussz/vMXpOigY9aWMd79u3rx2Dgcr36quvbtuhSFDRpedyQPGgMo+VHQjL4jpdseHaCIladtllbZ4wqa5du5oi47hHA6XIwbOwyCKLlKucZ4N7RCGiIs+z5FoZc8777rvPtkGh41mhHNFL96233mq9e6MQeEgSChD3+NJLL9k8cK2vvPKKVeRRIEaOHGleEnoeh8o8GiiEXBOdFxLWxL2hVNC/CO+XHsDxRj322GO2PWFrhNL5dxEroPTozftwTwvXz7uKz4liSG/qb7zxRrIk2PPmvfi3IoQQQsxPpGgIUUW4R4NxZRx00EFmRceqjpKAdZrwnnHjxqUr8VRKu3XrllY0qKBS4aYNCG0X6JCuR48ephjkqmg8/fTTVrGdPHmy7Y/FHG8AnhJwRePSSy+1eRQNlA68AUBFmHtzsNhTwY4VDdokZFM04tApKs21a9dOn7cy3KNBZb0yqNDjxaCHda6fe3ryySfN6+AVcRSNNdZYwxQK+PDDD035Q0HgOfD8UFyWX375cOGFF9o2lSkahG61adMmnHvuueatQBGAffbZJ1x88cXmJUJh5FqAa8ej8cQTT4SVV17Zrs1B0UNRdEUDpYf5TEUjM3SK53300UdL0RBCCFElSNEQoorAa0CoC/H2lUFjbRSLTTfd1NpzeDgOsfzOvffeaxb2unXrWmWfSj37ECJEZR+vCA2lCUPycB8UDRoeZ1M0UEAIncJyT2gR5/V2IK6ouKLRu3dvm0fRIIzLFQ3CoPCiEL7TqFEjUyAI6SGUCjg+lWQq6FSYW7RoYfe63377VVA0uCYq1vDCCy+Y4oF3J4YQJ8KLttpqq2RJdl588cXQoUOHsMwyy1hlnufFvg7tYvCCoKihYIwePdraReD5aNCggXmX8BAQsoTXBlDyCG0bM2aMzceg3KBMoVygtAGKAGFYKBpxGwvCvnbaaSdTDglBI3RsiSWWsHfAO+F9esN2lBgUtSWXXNLeA8fG28V0rGjwvAm9m5v2O0IIIUS+kKIhhMg7WP/xxIwaNcrmUVCoOFPh/i9QicZDUOisXNWVZ5991pRAIYQQYn4gRUOIAkCjaazgWJgJ6fE2FzUJlIHHH3/cPADXXHONeQsKnc1KZIfG5Xhw+B5JNuDhWkIIIUQhkaIhRAEhnIYQGSp4u+yyi8X8CzG/oBE6oVt8f2eccUY69E0IIYSYH0jREGI+QApV4vJd4VBfB6KQSMEQQghRDEjREGI+gsJBg2JXOD7++ONkjRD/HSkYQgghigkpGkJUAePHj08rHKQ+jTMfCTG3PPzww5bm2BUM71hRCCGEqEqkaAhRhWQqHPQHIUSukOqYPj2kYAghhChGpGgIUQTQZoO+Mqgw0pfC559/nqwRoiJSMIQQQpQCUjSEKCJQOOjEjgokHbV5J29CQKxgXHTRReqITwghRFEjRUOIIoRG4lI4hCMFQwghRCkiRUOIIuaTTz5J98OBwvHFF18ka0RN4M4770wrGBdeeKEUDCGEECWFFA0hSoBMhUNtOKo3eDBWWGGFtILx008/JWuEEEKI0kGKhhAlBGlwveO/1q1bKy1uNQMPhqepveCCC6RgCCGEKGmkaAhRgsQKR6tWraRwlDh4MLyjPSkYQgghqgtSNIQoYT799FMpHCUMCsbKK6+cVjBmzJiRrBFCCCFKHykaQlQDpHCUFrGCcf7550vBEEIIUS2RoiFENYKexaVwFC8oGI0bN7b307t3bykYQgghqjVSNISohkydOtUai1OhbdmypWWtElXHTTfdlE5TKwVDCCFETUGKhhDVGBSOzTffvJzC8e+//yZrRaGJFYzzzjsv/Pjjj8kaIYQQovojRUOIGgAKxxZbbJFWOOh5XApH4ZCCIYQQQkjREKJGMW3atLDllltK4SgAPEcUDDraq1WrlkKkhBBC1HikaIii4Y8//ggTJkwIw4cPD48//riGAg1PP/10GDhwYKhfv74pHJtttln48MMPpXDMI3///Xe48cYb0/1g0HP7kCFDwogRI7I+fw0aNGiYlwHZOG7cuPDbb78lpY8QxY8UDVE0fPPNN+Hggw+2ytomm2wS9thjj9C+fXsN82HYaaedLM0qlWaROwj9rl27hh122CHrc9WgQYOG/zogCzfddFOTjZ06dQpffPFFUgIJUfxI0RBFA4rG//73v7DEEkuEO++8M1kqhBBC1GweeOCBsMwyy4S99tpLioYoKaRoiKJBioYQQghRESkaolSRoiGKBikaQgghREWkaIhSRYqGKBqkaAghhBAVkaIhShUpGqJokKIhhBBCVESKhihVpGiIokGKhhBCCFERKRqiVJGiIYoGKRpCCCFERaRoiFJFioYoGqRoCCGEEBWRoiFKFSkaomiQoiGEEEJURIqGKFWkaIiiQYqGEEIIUREpGqJUkaIhigYpGkIIIURFpGiIUkWKhigapGgIIYQQFZGiIUoVKRqiaJCiIYQQQlREioYoVaRoiKJBioYQQghRESkaolSRoiGKBikaQgghREWkaIhSRYqGKBqkaAghhBAVkaIhShUpGqJokKJR9Tz++ONh2LBh6eHbb79N1pTn33//Da+99lp48MEHbTvG999/f5g2bVqyxdzz888/p4/nA8J15MiR4c8//0y2mjNTpkwJ9913X7njMLz99tvJFiJf/PXXX2Hs2LHhjjvuCFdeeWXo3bt36NWrV7j00kvDzTffHB577LHw6aef2vcCv/32Wzj33HPDAgssYEPdunXD0UcfbetEblx//fVhk002CSussEJo2bJlePbZZ8Pff/+drBXVFSkaolSRoiGKBikaVc+SSy6ZrgQyUMnPhMriHnvsUW67hg0bWoXnv/DBBx+UO6YPiy66aDjxxBOTrWbPxIkTw7bbbpv1ON27d0+2Ev8F/lPex+qrr571OWcO9erVC08++aTty7dzzjnnpNctssgioVu3brau2Pj111/DmDFjrGJPefTjjz8ma6oOFLtWrVqVe74nnXRSmD59erKFqK5I0RClihQNUTRI0ah6EGRxJSZWHrBKo3g0aNAgvX7hhRcOPXv2TFus/wuxosFxUV58HuvtkCFDki2z8/3334eTTz7Ztq9Tp459R75/rVq1bJ2Yd3755Zew9957p5+pP9d1113XvBj33HOPecT4RvBMDRw40CrBWN2ffvppO0apKBrcC9fm18l9/xdvXb559913wwsvvBA+//xzeTNqCFI0RKkiRUMUDVI0qp7KFI1//vknHHbYYenlVDDXXnvt8P7779v6fBArGoTUbL755qF169bpZVRoCcPJBtd36623ltt2v/32S8/nomgQuvXqq6/aca644opw1VVXhbvuuqvSc8bMnDkzTJgwIdx7773h2muvtTAi9meeymBlYCV/5ZVXzOL/1FNPhRdffDH89NNPtu7LL7+0ysU111wT+vbtGwYPHhymTp1q6+bE+PHjLXzsuuuus+vgmph/6623wh9//JFslTtff/11WHbZZcs9zzZt2uT0bGLmpGgQIkcolj+P5557zp5DJmz33nvvhSeeeCK93eyeMxZ/thswYIA9DwbeM0qRP1OujefDsVCcFltssfR1br311hYa+Mwzz9g+lVX0eJ+jRo0K/fv3T39DKMhfffVVskVF+IdQxBhQ1D755BNT3Pke+f9uuukmu3YUPeD5MO/b825iRZ/1fjyej69neP7558ONN95o18W3QfgjXpJcePPNN8Mtt9yS3pdr4L8Dnpmfc8SIEXbtIr9I0RClihQNUTRI0ah6MhUNKiZUoFdaaaX0soUWWihcffXVyR75I1Y0qMh26NDBQld8GcMOO+yQbF0ertMrhksvvXQ4//zzrXLu+81O0aAdwUYbbZTeNttA+A/hQlTaYkaPHm0KUbZ94mHxxRcPffr0SSsRDorN+uuvn94O5a1Hjx5hnXXWKbd/PKy33npZ25tQ2eNaeD/Z9vPhjTfemGsP1GabbVbuGLybeWFOigZlwI477phej1eLym0mtB3q2rVrerullloqXH755cnaFN99952dKzMcMHOg4gaTJ08O++67b9Zt4gFvGxXtGBRS3l227X3AE3jeeefZdcXQRiXe7qyzzrLriD0qW2yxRfjwww9t+7Zt25bbnvYxcRumzPUXXnhh2GmnnULt2rXLLfeB5f369cuqgKJI7rzzzmHBBRfMui/DbrvtZs/E55lGYRP5RYqGKFWkaIiiQYpG1ZOpaFCh9Gkq61h2c7Wqzy2Zisbuu+9uy6kE+XI8HSeccIItdwgj8Yow+x1xxBEWToJVOj5epqLB90Yl17dBmWAbGpNTEafyhnU2ViS23377cpUoFAUqj3gbqAhidWZfLL0c//DDDy9XwUNBcyswYFGOFQ0GPAe0C+B5fPbZZ+Hll18OLVq0KLfNNttsU66ywbYrr7xyen3jxo3NU+IVUNobcCws4FTS50bR4PnGygvPkvueF+akaHBt7dq1S69H0bjtttuStbNgu7iCznZY2h08TDRI9/Wc56ijjkpb2rHiU+F//fXX7R3wvfCsJk2aFN55551w2WWXlfNoUHnnW8BTNG7cuHSSBN5Bp06d0tuhTFxwwQXp9VTeSUQQv+OOHTvaeZzjjjvOnqmvZ+B6jz/+eKtcMuCNcgVll112Kbct317slUAxyDwe7ZO4T7w+tGMipI1/Kd6G9fF3MXz48HLKDgNKEIoqx+E53X777aFRo0bltkHRUOKF/CNFQ5QqUjRE0SBFo+rJVDROP/30cvNUIo499thk68qhQkl2ISqDxxxzTLmBZVjtqajEZCoa7du3t+XExnfu3Dm9bsUVV7QYel9HBdLX0RCcSibDnBQNKvO+noH7op0Hlm0fCIW55JJLylW0CYvKBpV5KqJUCDg256fSHLcVOfDAA+24TqaiwXkIn8qEilvTpk3T2+EhicPWqISuuuqq6fUMW221lSk2VGpj5WZuQanIVDTm1WI9vxQNFAdCjnw9A9823woKg4chVQZhUvXr10/vW1kbjTPOOKPcOfCkcW3xN4QXC29Y7BVAcXAyFQ2+B5SZyphbRQPlIFsoE56I+L0SXueKKc8Pz6CvY8BrmE1BJSwtVkikaBQGKRqiVJGiIYoGKRpVT6aiQTgOFScqrfFyQqmwmFcGlY84pj9zINQFS29MZYoGfPTRR+XCm5j++OOPw6BBg9LLaJfh10TFanaKBgoEnhFfPzfD2WefnRwlWAw8yg2ZseJtUAQIpdlggw3KVVhpNxK3JchUNMjkhLKSCZVcQqZ8O7wvme1jCOvCch1b4n2gQomictppp5kSNjewPefzY/Es5zWD1/xSNABvAlb5zLAvHwip2nXXXdMZsWJyUTQorw444IByx8x1QAlyMhUNlJfZZbiaW0UjM7TKIfSMpAm+HW2BfDu+y/ibXm655cLvv/9u6zJB+YgzkEnRKAxSNESpIkVDFA1SNKqeTEXDG4NTmYgbWzNgoaXiXBnsM7shk9kpGkAIkYchUQmiLwGvpGN9xSLrzEnRwMJPiIuvZ6DCR+WU77CygcqmW8Op4C2//PLp/al84xGJocEsCpBvMydFY7XVVsuqaBCuNidFI4ZrpX0LDX/XXHPNchVPlJ/ZVWSzQXuX+BhMH3zwwcna3JmTosHzywxF4tyZEN6Gd8i3y6ZoxFBJRjGlPU6XLl3KWeBRCgmVisHjwHLfhmvimcZg9cdD59swXHzxxXaP8TeTOfANsY2TqWjgFZmdx2VuFQ3Cm7K1vzjyyCMrVTRom5GpsMaeuBiUvtgzIkWjMEjREKWKFA1RNCCEpWhULZUpGg6VjbjizEAl/9FHH022mHfmpGhQseO7iM/NQCWHthAxc1I0gIp43P4C5YWKY7YMQSgHVMTi50FIle/LQJrfuIJIiE6zZs3KbVMoRYOMQDRQzpahiYpt7F1i3x9++CFZmztUWOMKLAMVVZ4hygOegZdeeslCv8i2ReNs2vhsuOGGFnYDc1I08J6QrSk+B21PYu8Xzy+z0XamooFHDWVh6NChFZRavqM4UQADbRZiyJ7Eu/D1ePA8XI9Ku1fc+R7id4xycuaZZ1rlOxO+bxIC8N3FFKOiASiSsQKB0he3MeK5otzz7H0bBikahUGKhihVpGiIokGKRtWT2bAzW4d9VNSwAMfb4d0gI1Rl4RW5kKloENaSCRXkU089tdy5CeuKLcRAXDwhTvF2mZVJoBJOo9t4OwYqWCgeVMS80kYFCg+Bw/dKw954Pzwu7MfzoBJNGFDctgJFg0bZTqaiscoqq2SNz89UNDgHjXodGnm7MsG1E/ZDBZB/Ka50rrHGGqbIZPMo5QKeEJ5BbO2f04BlnBAz4D3FCiDPlxCeGN4xXof4GPHAe2jevHk46KCD0su417jtDO/GQ6u4f94FoVKE7MWNoHmOp5xySoXvlm887sE8HjjWDTfckGyZUr4ptzK34zq597gyz74PPfRQsmcK2gbF+9G7erY2FQ4ZpOLtMxUNsnbF75zwwmyKBsp5fG0oEZkhViivlWWr8oH2VnxnPs99k0BA5BcpGqJUkaIhigYpGlUPjXzJw+9DZRUeKqpY/rHOxtujLMwrVPbiY9GfQDZodE1lnHNzvXw3mWB1pc0Cln4/XjZrv4MlnXYgWLKJn6fSSoPuu+++26zW9NlABThbo2rCeOg8jbYEWMrZh8o8x+T5sS/XSipcQlLiSh/34usZqKBla0NBRdK34144fqxceWgQngMqnliasfBTycTTwHNCSZhXBSMbPA+ul3Y8tAOgosrA88OTQH8SfA9xr9U8P56X3wfvMVulie14r3hHvN8H+qYgfI7lrKeCzzPlWJnfAetpIM+6hx9+OP1OURC4NtrysH+2CngMngnOiVeA/al4ozRl8wjxrrlflD4ypfn58O7wfeCByvYOuH//RvleeT7ZvjOH9+zPj+25lviYrPfj8XwIR8v23rl/nj/bcTyeX7btUD5QavEq4fXgHfON8Q3ieUEJjhUblKk5PVcx90jREKWKFA1RNCDopGgIIURxkE3pjUGZixuC48m76KKLkrUin0jREKWKFA1RNEjREEKI4oH2PISXNWnSxDqRJNsbbVLWWmutCu25UDIeeeSRZE+Rb6RoiFJFioYoGqRoCCFE8UDCg5YtW5oSESsVPtBWhg4rCeuL24mI/CNFQ5QqUjRE0SBFQwghihfacGRrxyEKjxQNUapI0RBFgxQNIYQQoiJSNESpIkVDFA1SNIQQQoiKSNEQpYoUDVE0SNEQQgghKiJFQ5QqUjRE0SBFQwghhKiIFA1RqkjREEWDFA0hhBCiIlI0RKkiRUMUDVI0hBBCiIpI0RClihQNUTRI0RBCCCEqIkVDlCpSNETRIEVDCCGEqIgUDVGqSNEQRYMUDSGEEKIiUjREqSJFQxQNUjSEEEKIikjREKWKFA1RNEjREEIIISoiRUOUKlI0RNEgRUMIIYSoiBQNUapI0RBFgxQNIYQQoiJSNESpIkVDFA1SNIQQQoiKSNEQpYoUDVE0SNEQQgghKiJFQ5QqUjRE0SBFQwghhKiIFA1RqkjREEWDFA0hhBCiIlI0RKkiRUMUDVI08s8///wTfv311/DLL79kHRy2mzFjhm1bGWwzc+ZM2+aPP/5Ilqb4999/w88//xx++umnZEnlsG1l18TyP//807b766+/7Jp+++03m3e4Dpb9/vvv6YHrYblfI+uZ9vMwPTs4V3y8zIFr4ro5LvfI9pn4+h9//NEGplkWw/Ww/99//50sKW2yPSt/Nv4egDHvsrL7ZjnPJn7/Du+OZ8b3VVOZ0/eZOWR+d5Xh/0cuQ+Z7YT7bdj74P8d2vHuuKxts9+WXX4Y333wzTJ8+PVmawssK3v+c/uHqjhQNUapI0RBFgxSN3DjrrLPCTjvtFF577bUKwveJJ54IHTt2DBdffLEJ9gkTJoRVVlklLLnkkmGppZayoWHDhqFWrVphgQUWMCEOCK7atWuHdu3aVagMUkns1q2bbc+7adSoUahTp05YeeWVw9ixY20bKpXNmzcPiy++eAUlJJMpU6aEVVddNTRo0CB9TQyLLbZYqFu3bjjppJNsu4ceesjOedppp9m88/LLL9tyzlWvXj2bbty4cXj44YfDxx9/HNZff/3Qtm3b8O6774Ydd9wxtGjRYraCmfvjmXIcBp4D98k9+rJ99tknfPDBB+GYY46x6+ZcMVSSWrZsGRZccEE7/zrrrGP7bb755ulnRIVrhx12sGt9/fXXbVmxQsXw2muvtee38cYbhz322CM8+OCDacUB+E54Z7wHBp7XwgsvHC666CL79vhOWrdubdt27949LLroovacYvjn9913X3tW/o3y/Dkn7xI+/fRT+4Z5pv8VKq68uz333DMMHz48WZof+Bf5n/gX8l0pvv7660OTJk3sGfrzzhzq169v3x/P8tVXX032rJwffvghrLHGGhX+w8yBd8wx+Q/jsuHyyy+3soRteO9sw//I/LLLLhsmTpxoz5vnzLrTTz892TOlOI0aNcrK+YEDB9r3xXs/5ZRTwr333mvDW2+9ZdfIv7z88suHyZMnJ3vXTKRoiFJFioYoGqRo5M5+++1nwvvWW29NV+wvvPBCE/hU6rAoVgbrqDxssskm6YojFkUEPQoM7yG2LB5yyCFWgRk3blyyJFVp3mWXXcIiiywSpk2bZsehok2FZ06KhsN2nMuVnaefftruqUePHjZPhdArOJDNi0ClDqWKCtgll1wS7rrrLlNiqCC/9957dj9zUjQcKjfczxlnnGHzgwYNsmdy/vnn2zyVxyOPPNIqUo888ogtA+6B82y55Zam3DijR4+2yvGhhx4avv32W6t0cV1UFm666SarDGZacIsBlFPuO/4+ULKoVPLdVObN8fd32WWXmaKxwgorhK233trWozzyjngmDl6fXXfd1Y47adKkZGlKEeWdUbHmOFQwqRDnS9G48sorw0ILLWSV93zCvW200UZ2T/H95AMq+PwvsxuA58w7yEXRyJUnn3zSntfZZ59dwQgB/l1jvLjhhhuSpSn4Zx577DG7JpR5h+Wff/65KRNvvPFGGDlypH0/zz//fBgzZoz9R/xX33//vf1bKBo1vXItRUOUKlI0RNEgRWPucMUCS2H79u1NGaBy7JWOTKgQINyxqFPpc4sjoGhgkWY567HEUxlgPZVFKhpx6AMVDirknB8PA+vmRtFgf/dYUIGB++67zyqXt912m827JZTKP5X7gw8+2JbHEFLB/lSAL7jgAtsX6/e8KBr33HOPPYM5KRpYgNmWe0bh4n5R/Lh2KkvOsGHDwnLLLRdOPvlkU6bYn+thf94Zx//qq6+SrfMHlTMqbzyb/wrXjWI6derUsNpqq1lFB+UuE+6N+6WySQXeFQ08GvzXRx11VAVF47vvvgvrrbeebReD8nXYYYeFpZde2r5Rvs1cFQ3O1bt3b/OO8D/wfW644YamgDquaNx44432fffs2dOOH3s4UIKOPfZYO+czzzxjy7gWyifug++S46Nsv/POO2Ho0KGhadOmdv945bh2PH54geDtt98OnTp1Su/LN40CzzcK3DPerjZt2oTDDz/cngnbvfDCC7a+T58+dk94DPAWVDastNJK9q/ERoH/yu233x7WXXddUwa8vAC+jc8++yzstttudl/bbLONPRO8frwzlFS+i2yKBvAdHX/88bYOBe2AAw4Ia621lj0/vGKAgs4/w72hfBA+l0v5Uh2RoiFKFSkaomiQojH3EL5AyATCmkpNprUZSzQWf4Q1lSsqQOedd14FYe0eDRSWTKsl4VhUJFA4qGwR0kIlmXNuu+22tg2VhlwUDdY9++yzZvk8+uij7RiEa3E8vDMoCqyjAkLlhkrHmWeemexdEazdHTp0CGuvvbaF2Hz44YcWdkO4RSEUDb9mKj5Y4m+55RZbRwUI5YHny3NmP7ah0kuFGv5L6BQV9yOOOMIqc9tvv32lA/eNZ4WKLNeJwkVlcF7huVJx5f3zLCtTjPhmuH9XPPkesELzL/ONUAHmO40VDZ4HlVieF6EzvD++VyrVXDsVT+D6c1E0eN9sg2IRv+/nnnsurLjiiun3mqlonHDCCfauUHwdV3ZQFqhgUzbxb6AYEfIDKCOEl6HgoohRESZ8cPfddy8X5sN3zfNDoYmVtIMOOsjuE48BlXLeLfN8d5lcddVVds38Hyh+HIeKfmVDNq9TJq7MzM2At4r/APiWKRNQJByeJ/8R34p7jHj+q6++eujbt6/NA9u98sordkzCpRyeO4oL/xEKM6FTlA9sxzfEv41BoiYiRUOUKlI0RNEgRWPuwOpHhZZQqZ133tmE8R133GEVOOf999+3ihGVuNlVPlzRoNJeWXgEFmus81R4qTTEYT+5KhpcA+ESjz/+uFmQn3rqqTBixIj0gPWYOH6s8UxzT5ltNByuiftlGzwaXD8WVeYJX8mHosE8niOggpUtdApo04AShuWaZ+7TWJZ9GD9+fPjkk0+sUlpIq6x7CvCy8E/NLVSgsSR7hdKhQojFnmeeCffMc+d5A5XhOYVOOVSy8TigXHbt2jX079/fKstOrorGSy+9ZNeQzfPl8M3komhQwXVFA0UACIfCWk+Fl++Nbw2lhu8GaItDRZiKMv+bc+qpp9p14Y2g3QoKKt8Qyg8VapQH3tMWW2wRmjVrZt9NJnyLrVq1MiWVa/L2GhyX47HMB45NGGAu31i2f53yFwUzfgeVwTfGN0GoFspw5kC5QAWZ6+Q5xLCefwsFlPeLssn9MQwYMMC2cY+GQqekaIjSRYqGKBqkaOSOt9HAoo7ABqznVKCosFHRcwjroBKezZORC1TGZjegPBCSRSUpl9ApKjBcK96KwYMHJ0srQiWIbWPFCTg+cdxYvHkGsUUUq/LcejS4B6ACjWKB5Rmw3lKhpD0FUOGloT2VxUxFA0UNRYyGrVjpMweOTYUSLw2VKkKbihX32lDZ4/nzfFD8UDCpVLpFGUWEZ3Luuefa9tttt11amc1V0Yi/o2wDx0PxpAI9J0UD5Y5vkCH2vOCRI6wp06Ph7QnwCnL9scUdKzyN+rHEowCjVKPYc698686JJ55o+/JMPvroI/NoEP7EtTgoCXxHnN//VUAZOOecc0yB4fkSZkYIEaFYucB18z5ij8LcgqLDN4ly6VC28A3E11oZXbp0sftHWSC0K9uAgoWnKzN0ivcL/M/XXXedlQceeujvnmeKh06KhhQNUbpI0RBFgxSN3MDyhzWU0AMX1g5WWayRhGV4jD6KBqE0xIhjOR4yZEilA8IsPiaKABWAygYqEliDqdwxpsIxJ0UDKyeVNizCeGMI+aKilm3gmmgg6qB0cE1UAKloZZ4Lb8ncZp1iHRUmlCTad1BBxirMmHm3IBM6Q0WQdheZWadywUOnOFYxKxqAwrTBBhvY++U9o1xhcScsysFLw3vAyp/pOUHRoBJbWdYpKrGEA1FJzfymGNjWrf5cB2UCXpo5gRJA2wfOzTfPcdZcc00LH3QIZULZQwEErOa9evWyfVA0qRjTgP+4446zSq63kyDLG94KngnfOdfHcVAgHbwktDPgW+Lcd999ty3Hc4ciRvsczsH+eENQYoCKPhVI2nwQPhaD0oM3g/+L5+ED81wL32O8nEo51xArRJVBxZXvm/ft/1tlAyFkmV4unhPKDuUKSmTmwHd+9dVX23t2RQ/wAHJe3hHvm+fCdfNcmOd9c228O+4PL5KyTknREKWJFA1RNEjRKAxUpAgrocI0p4HtMpWXXKACgpeB/TMrI5ngqcBqyzsm7GR2AxU1Kji5wrkJxaFSj0KD4kFISy7W2TnBc6ESSGXb213MDVzb/fffb6FBZFaq7vAeUAS5b9r1vPjii/PUQJ3j0KYlMzWu+O8Q8pStHMg2oFhmlg3sn+2/zRxQVMhmNrfwH/MPe2hWTUaKhihVpGiIokGKhhBCCFERKRqiVJGiIYoGKRpCCCFERaRoiFJFioYoGqRoCCGEEBWRoiFKFSkaomiQoiGEEEJUJC+KxtFHh9C+fQi77hoC2fRmVux8M9COqkwOh912C4FEBxlZ//IKbX4++igE+iM68MAQNt44hJVWCqFZs1B2oyFcfTXp5JKN8wD3Qlr2ObQjLBhkg/z559R9zwu33pp6L/36kWYyhL33DmHHHentlsZsyUYJJIPo1i0EUo6ffz6ZKlLbk/3thx+SjSJoA8d732WXEOjgNG4TdfLJZKEJYZ99SBuZLMwdKRqiaJCiIYQQQlQkL4pG06Zltb6yap8PKB2ZiS2Yb9gwtZ7U4YVqhP/11yG0aZM6z0ILhdCyZUoRot+iY48NYfPNU+tq1Qpho43IIZ7sOA/QaWm7dqnjbbFFYZWnbJBdbv31U+efTQe0lYJi1LFjan/Sr8+YkVreqVNqGQPKgStQpKtu3jy1fIMNQvjkk9RylMsGDUJYeWV6800tc557jnzXqX2uuKK84rLNNqnlyy8/69xzQdmeQhQHUjSEEEKIiuRV0cBqTUWeCv6yy4YwfvwsK/vsFA06eHzhBXqhTFm4N9wwhLZtUwoCfQvlau3Gq+CVWs41dmyyIgOuq27d1HZ4OqjkktL83ntDOOCAlKU96jMnfPttCOedl1qHxZ9tSUW+886p++Q4Zc8w7LsvnfuQozulhBxzTAh09ImnAMv+YYeFsNlmIWy1VQgnnJC6Z6/EkzHwqqtS3oFrrilf8eY+uCYUgLL3Zeuuuy71rBdfPHV+FA7Of9FFKa9DLnA97Mt9Zb57vAysYxg+PPW+uG7mW7UKIeo81OC+a9dOeY/iZzdmzKxnhOckTh2/++4phW/ttVMer7mk7IhCFAdSNIQQQoiK5FXRIHQKpYD+ZRZbLLVs0KAQ6HCzMkUDCzgWbZavuWYqrIle86mUbrppqiJapw45j1PbVwbKCiFZHIcK73HHJSuyQJgR3g22xRJPj/FUgHv0SC1r3DiE995LNi4DpQEFgXVcE7D+0ktTlWS/dq75oYdCmDYtdb1+v4ssklJKCEVi/SGHpK6RdVT2gVCvDh1Sy1C2pk5NLQeUrSZNUutOPz31PDk+yopX4tl34MCUlyNbCFMmeBrWXTe176OPJgszcM/GKquEsMYaqWn6MJo0KdkgAkVptdVS29CXkCsUH3886/1y/0nnqwZK2IILphQX3slcUnZEIYoDKRpCCCFERfKuaHjIFJ1wrrhiajmVaazsjRql5mNFg7YAVDapjGdr27Hkkql9vIJfGSgayHe2xaNC24zKwHru1vyllkp5CbieXBQNQoccKtFbb51ajnch9tK89NIsRSPzWr7/PoTjj0+twwPw2msh0HFkLooGIU4Oy10BoOPKuWmjcfPNqfex3HKpd1UZeElQ9jgH95pNyXDwRPk1uocCb5B/ByiQvCcHjxXvfvvtU+1M5pKyIwpRHEjREEIIISpSMEUDqDx6RRzr+cILp6ZjRaPsvFbZxMKfaYnHyo03g31orDwnUFRo8M32KC6EF2WCpb1//9Q2VKC33DK1nGulQTPLqRjHHhQUCjwWrKtM0chsoxErGvvtlyxM+PDDWUoF+9PWgWfPs2AZ3o+4Qk9IlytplSkac9tGY8iQVJsKntOzzyYLs4CXCGWEc+AxiT0Smbh3h/AtD3dDsSBUjeeZGQKH8klIGd6ceWhIX3YmIYoDKRpCCCFERfKqaNAwOlY0HCrHVGjZhoH2Bq5o0MCYyj6VfsKtjjwyFZJEWFCZzLbtqcTnEg4EKBKETfm5UG6ozNP2gYbhrrgsumgIZ5+d7FQG3oBRo2bthwfhnHNSXo611pp1/bGiwb1ynSyvXz/VsPr660N4550QXn99lqKBIoW1n/YdKFnrrJNaTkgSFX7g/H36pJYz4CGhwt65c0ohcCUtVjTefTflDWA52/DsUAzwjswJnqfve/nl5dtOxJC5y8OzCDGrrMH7sGGpdipsF/fW//TTqWeJl4mQuthrxffCfaGg0Ih/Lik7kxDFgRQNIYQQoiJ5UTSojFKJHDo0e/gT0CaArEOXXJKajkNogHYNHOfcc1PtJ6jkE95D6M28wPE5z2WXhXDiiSF07ZpSGvByYEWvDMKa2IbGzexHnYFKMNmXqPjT/iAGK/3jj4dw2mmpxt9U2tmecCjCsqh4d+mSasOA1+Goo1Lb0i4iWyN3lAcULUKrUCq4h/ffTz0bFJXnn082TOC53XFH6no5P9eXTdnLxhNPpDwiVPa5h2ygNKA8nX9+CG+9ld3zwDPz7Ffe3sWhgT7vnXt5++3y+3NO2pzwnitTdGaDFA1RNEjREEIIISqSF0VDVCQOnTrkkGRhEUJWKNqJcJ0oFHE7k1wgJIrMXXiKaOsyH1P8StEQRYMUDSGEEKIiUjQKxOjRIbRoEcKqq5YPdypWCN1Cycj0NM2JedknT0jREEWDFA0hhBCiIlI0RKkiRUMUDa5oNGjQINxL9gYhhBBChIceeigsu+yyYc8995SiIUoKKRqiaJg2bVo48MADwwILLGCWm3XXXTesvfbaGjRo0KAhT8Naa62locSGddZZJyy33HImG/FofJ5rj9JCFAFSNETR8Ndff5mlZsKECeHtt9/WoEGDBg0aNCQDsnHy5Mnhz/nYkFeI/4oUDSGEEEIIIUTekaIhhBBCCCGEyDtSNIQQQgghhBB5R4qGEEIIIYQQIu9I0RA1lmeffTa0bt06XEG3+5UwbNiw0LFjx/Dkk08mS+bMm2++GQ466KAwdOjQ8C+d6/wH+vfvH3r06BF69uxZYTjzzDNDr169wvTp05Ot6W+op537xx9/TJakmDFjRjjyyCMtqxcphA899FCbP/jgg22e4fDDDw9/Rx36vPzyy2H77bcPL9FzqhBCCCHEXCJFQ9QYnnrqqdC2bduwxx57WC7ydu3aha222irssMMONs/ynXbaySr3riBcf/31lrt84MCBNj8nJk2aFHbeeWdLQ8h+jz32WLJm3vj000/DO++8U2H45JNPTGmoXbu2ndPh3GussUb49ttvkyUpUCBIicjxyFrSr1+/UKtWLVOG2N+Xx4rR8OHDbRvytwshhBBCzC1SNES14JdffglTp05N5mbPb7/9FkaNGhXOP//80L17d/MMUOH+7rvvki1mgaLRqFEj22b06NFh/Pjx4aeffrJ1bP/cc8+Fyy+/PLRv3z4svfTSYZNNNgmDBw82D8KYMWPCfvvtZ/s3a9YsHHvsseGuu+76f3t3GmNZUYYB2IAgMiAKUTZHBWZEZ0YgyMwwArKoAREHxBHUREVDVNzijlsYDYpIjLjEFTVqUFyCUdwS0QSCcSG4BOPuD2Ncoon+MRr9Ya55avobv6k55/btpkm6h+9NKn373HvOqVNVt/t9v60mv/3tb1sp37uKl770pZP99tuviYTAmNDowfNBDL3jHe/Y2RcenjVr1kyOOuqo1tRtn1VoECh//OMf534rFAqFQqFQKKFRWKH43e9+N3nd617XiPHhhx8+ef7znz/5+c9/PvfuMFj1r7766skRRxzRCDZLvn07WPqFSG3cuLGFSf3pT3+aO+P/QgMxF0okLOof//jH5J///OfkpptuajuY33777a0///73v+fO2hX//e9/G/G/8847m1eFx4T3YCFQN51Ayh6Hl73sZZP73Oc+u3k01q5dO1VovO1tb5vstddeTWjtvffek1e/+tU7xca//vWvne0LX/jCzELDM956663NY/SABzyghaR98IMfbAKwUCgUCoXCPRMlNArLHkg2ki/3YPXq1ZMNGzZMrrzyyrZ5UZ+LMA28DC94wQva7rjf+9735o7uwN/+9rfJM57xjOaR+PGPfzx3dDx0isA45ZRTJuvWrWv9mbXxbBBHV1111eTvf//73NXmx3Of+9zJ/e53v8kvf/nLuSOTJq7soJ53iRUOtu+++04OO+yw5pF42tOe1gTK17/+9bbDLK8LgeF5gWC64YYbmsg64IADmqcnxMyXv/zlRYVOOf/Pf/7z5JZbbplcdtllk4c97GFtZ9vLL798F1FUKBQKhUJhz0YJjcKyB+s6Moykr1q1qnkdeAYWA6KFkNi+ffvkSU96UrPA8wIIa0LGewv89ddfP9myZcvki1/84tyRHeC90C+k2muCZ1qTsI3U+3x4DLJ3Yj5I3OZZ+dWvfjV3ZDI5/fTT25hkD4xnIWR6j8a0ezr2n//8p/UvQ2gYsUE0LAbG5brrrmvjt//++0/OPffcJhgLhUKhUCjcM1BCo7CiQCiw4MupOOeccyYHHnjg5Ljjjptce+218+YlwB/+8IfJRz7ykVatiVck2lve8pYmPvrmuKpUErB7fOtb35oceeSRk7e//e1zR6aDtyDCthD7hWBIaPzmN79peRG5UtR8ORo/+tGPJieccMLk/ve/fxu/V77yle0ZeW6MoXA01ad4INzv9a9//dyZ00GsyD2RN+IZeU70+bvf/e7kr3/969ynCoVCoVAo3JNQQqOwooFkI9UI9M9+9rO5o+NgZecBiEpL05rrvvnNb26Ee6jqlORp4Ugs9m984xsnb3rTm9rPoUa0KCErJ4JwWajQeOELX9jCpLLQ6PHTn/609Uci95DQ4MmR0+GZCLb5wAPCEyH8aj6YB/kr8mQiWb5QKBQKhcI9GyU0CoUpmFbeltA4+uijW26HfTa++c1vtpCuoaY6FU8GD4wqVbMIDcnYyu1u2rSpeSHkeMil2Lx5c2uOn3baaZOnPvWprXrWbbfdNnnMYx7TciLGvAhEjpK4zn/ve9/bziFQhHfxFHktMV5uBa8Hz0bOASkUCoVCoVCYFSU0CoUpEJok/GcoT0GStVAsIUOzNHtf+CkpfSH5GQuBBHPeDFWgxqDCFCHCa6Ni1re//e3JJz/5yZaH4jWviecqz0ShUCgUCoW7ghIahUKhUCgUCoVCYclRQqOwR4NlP8KC7JkRjUU/wpeUemXd56Ho4XzVl+x7kc/PjfXf3hrhpXAdHhDnyAfxvtc2FIzcCN6NKDEL8iF4EHJit/vmPvE0ZC+DvjlPtaixJiclXzPD/fNz8N5o+djQJobAc+KZMzw/b0reI0QfY+zy9Y2L9zRzYYx68LyYNxsf8ir94Ac/aDkg08oCqxqW51r/Y5ycp48xHwHj5Fico/k9KpCZA8dUDhtCzLe9VP7yl7+MhsU53s9P38zXLN4uz3TppZe2dTsG49evj349yNvxmSEoDuC7k2HOhwojFAqFQqEwhBIahRUBYT1yD57znOe05GTlbmcB0oYIIolIE6L1la98pZWzRVzhxhtvbNWVkOAezkfQ4vy+wfnnn9/2pgggeEHCH/vYx07e9773tT4grggi2M/jq1/9ansNdhO3B4aqT+985zsn73nPeyYnnXRSy8N4/OMf396zk7fKVQHXIjyG+qUhtmeddVZ7zrhvhj4J49Ivr+0homqVa+q/94ZIqOfTfxv/ZSCwSgS/6lWvmjuyY/z0xdi+4hWvaIno7hXjYz6U0FUFLCC87IILLmg/3ctzRCPU9Euolw0Le3g/nl2uifXi2T/2sY+1csYgNyXfT+7Mtm3b2mvnmuv83ASivBr9DBB9z3zmM1vFLkJJn6IRUfY4MQ55t3p98n7MT27WmDC25z3veW1/mB7OIyzOOOOMth40if1ybZ7whCe03+3rYn+YmOv+fn7v14Od5Y1zwM71z372syfPetaz2hzYu0ZTyMA5ntuaLhQKhUJhFpTQKKwIhJXXhnXKryJEiwGr80te8pLJNddcM3dk0gj/1q1bGzmU0L1QXHTRRW0X7B7EQ5BAVvkMVaSy0PjEJz7RSs1mC7Xn1C9kWalYJWPzPhQs9Jdcckm7vvv0Tfla4sQ5Q1ZyCd+SzfVF1S4CAPFn5ZZcbp8OlbQyXAdRlQSvrDACGwihYafxHgj3wQcf3JLXAZG/4447GjFG4gmQDAIDgZecrozwzTff3Np3vvOdVjGs3+8kQDTaRwRhlyR/xRVXtOPWi40MjdXxxx/fxEXAbvFEQwaCbq0QSMbPOBCoAUKLcJoG4lAxgcDXvva1ycUXX9z2bhmaK4n3dlSfxWNg3CTrK1IwSwWxgOfnFYr10AsN+MY3vjE59dRT27rQN+vQfAChYV0UCoVCoTALSmgUVhQWKjSQVSIAwVKGFjFj2c2wJ8drX/vaFh7TgyUc+UMOo7H6a/E7K7NNBMNDgpASMkhyEDqvP/7xjzfyCm9961snt956a3sNRIPyvD7np4bwZ0L9oQ99qIVcBZB3ZHao3/MB6UXGiRiNFdvvdiD3Oh+/884723MgzUTVL37xi/Y7govwEjtCkVi8XTcIuM8QMKplqaAFxAwx4j4RZuQ9JYCBZZ9Y4NnhdRhrb3jDG5qnKIefgV3MCcmAPhFvnuUpT3lKO+Y81wioHsZqz0NiH5BovFS8C8Ki7JWSd0hHznmynNcTdefwHBAnef4IKt6UsRCsWaCv1jKhxCtkvHmihDkZu2kQDma95tCzXmjYm4XHMH9HjIMxNZ+Ehr1jCFHiuFAoFAqFaSihUVhRWIxHg3WaR0HYkXKufuam9CyCjwTPAiFD73//++d+G8a73vWuyWc/+9ndRE0AqVUSNwPJQyR5VcKC77XGkq/ErVyFAK+Az7uOErqejZXbbubOdYx1mhAJwRPIvwuVQsaRZuE5v/71r9uYDYGFn+eFmBhqvBnIb8B9iCPPO/T5l7/85U1UqHYVcG+bKgpvQ2hz83ki5cwzz2yf6cGrwitj3I1PhEARaRE69e53v3unsIHwxLjfUB+JDsLRuPRg6bdukHyiws8+DyLAE8M7Y75uueWW1geknviN4+Yze4kCrmlc8hq1X8pPfvKT9proIzx8rp9r0LfVq1fv5i0Z8mgQVMQhb4Y1RxTHOJZHo1AoFAoLQQmNworCmNBgObdHBKLVA/FC/JDCvgGLOrI75hkQr94T04hxH4O+yLXQL6Eq4aXQJCvzgNx0001zn94BhM6eHAgvYt43BHuI7GYcdNBBuyQ6TwPvxEc/+tFGIhHVGBOvEXV5EMKPAsbRs4jXj99zcy6RkUOnjJPcA3MG+fPg3sY/QpyAp8Y95KfwqHid29Of/vQmiHqhBvrO0k6QTEPcH/SB1yvC3/o+WhfCq/r5uqsgROVxzDenY7ADO6/XGMyHvBIhWYTvkMdjSGgEeGV832IceGKs6RIahUKhUJgVJTQKKwpID6If5CeA0IZwWCh4NIS0SOIdgpAnYTLi+Ica0cAqHQnOIHldfoWcBKFVfWMpZsXOcA2hUJJth86R+Ou64SVB/IQf5SbMpz+mElQ/XsCTgeja5E8icdzHa+FiwmX6MB9el/zZvgnN+cAHPjD36R3zQmwRDUOfR1rlJ3zqU5+aO2NHNSyhbELdxs6RWyIEqoe1QewY+6G50oinfC5xwuPx6Ec/evB+npfgsfM5uIdxIYj6sc7N+7wQsTY9Vz7H6/4afreOeiHLy5E/pw3NtVBB/QPXiNdjINzyuvd5fdV68emn0L8+b6dQKBQKhTGU0CgUCoVCoVAoFApLjhIahRUDVnlWWm3IQj8En2XlZSkOiHNnxY1YeO9JcM6hJaz98hEiNr2HmHfv52TfQPTNNVjO+/AW4V2s/NP2g3AN3oOx+wd8znXEzov7l+vAUyKBm8WbFXoM3vfcwmPGmmfsIZ8jhzoZY3kY7qUvQ+VZM1j5eZB4WjJY8uXMuB6wrpun6GP8FGoU+3vwLMhTGFoPxk5Y0NgzOm6Mcz5KJOXH+5pwMd4CsEac0/e9h5wP3qd8bTBOWs7d8Rlr1DU1r3k/Yu7MgdyK+dZCoVAoFArLDSU0CssewjVe9KIXtco6gHSpdKR87HxA6JDaHO7x/e9/v5WyjUpICKF9NHKOhgRsCceaHI0ceuN35T+F76i61MN1VTWyx0K07du3t+RlRJIAcZ1cqhaQ5SDM8jHE4J977rlNRAwRaWJCTkeIgThfC7LuPpLCh0JohE1JdkbmkduhlsUXgi2sS4jRPvvs02L45UIIqzFOCDPSr8JTD+LO3iDGTSjSAx/4wBYKpoJS7EHieQ499ND2ugcB8vCHP3zutx3iBiTJC2saGp/5QOwp4SpXoof7KWErmT7DGrnqqquakOhBQHpGOSQbN25s4XH2orB2FRsASeWS0cc2QgTiRjnjSNwWuicnJSqWFQqFQqGwUlBCo7DsoVqPGPqM6667rhHW+UBoIH+vec1rGtlGnFX2kZysrC0gkyeeeGLbGyKqSSHnCKMkWmVoCQ/H/PS7ylNIfi8WAFEkEBBVn5dAqwRpkFaWexV95G8oU4qgS9pFmAkiFvtMnHlNiCr38/nwxITFm/UbSZdrEh4N+QQEBEI+FqcvaVuSO4KcY/O9jhaEPkAkIc7ERAg1lnZCg3cBeR8SGhm8DIcffnjLSckYExq8DCpdeS/2slCqVVL95z//+TZPiwGRQmxFWeKANSMx374fckWyKJgmNDLM/THHHNPmJIPIUGmKwAvx0SP2MeFVgRIahUKhUFipKKFRWFFAhHkGWMJnKUfL2s6KzLKMWBIZwpmQUztHQ+/RQK6RTc35gFQje64BjiPo+tNb023EJ5HaZ5F1RPnJT37yTg8MoUA0xLUA8WTtDy/KUJMELXk3wsBUSeINQIrH2pVXXtn2Aumrcem35wtx4RlUtiJ08nGvQ2x4Ft4MxNtrnyVUjAXR57VSsFFhqgdBpBoXgUVs2ZfDa0ISeqHBo0LUSMSPEDVWftWy7GsBvFP2/+jngJfHXhsS3WP8JHYbY/eMY94nJniaPJNKW0StZw8ISVNxyjEhVOaxFyfgmcy7/gm3gs997nNtg8DPfOYz7XfPb/31SfYZRLHKWeFNKqFRKBQKhZWKEhqFFQOk8rzzzttpSZ8VSCiS2FvnA0ik0KahfITFIvZWYO3XkFACI5r3e3KsGpJ9MvSTMMgN7AORCS5iTEywfseeD7kh0HYGD8v4GFyHWDBGfmqO9fD+3nvv3V7ro/Ky7uPzSPu00CmI8ScgkPGo4hQY8mi4Ji+Gew01FaaEr/VjCTGOIRoIJBWxek9KhjkToheeLfMUzXV4m+xinvcKySCanEuEWVMhxuS0+J34IXYJuCEQjrw3PDaBEhqFQqFQWKkooVFY9uARsM+F0qmIo/AdBDTnDyCb8iIQwh7Ir1j8sCrPAoQSKeURECqTmx2eeRaER2WvxKwgPOR59GFX69ata6E1xITr5ibB+pBDDtklFIcYsEszb42cDnkrfWPtz+VLMzwDUsuTk0UY0u4coVU+E/AZIUE8SrwTdtomFoz5fELDNYktDRkXPsRTwFNBAAiDQsZtKrcQmHf3GxIaQpByiNwsMDeEBo8J74XE92jGyVwQC0TONOgPAcbDIyxP2N0YXJdHjbcob8gYKKFRKBQKhZWKEhqFPR6qK8nTkAMxRMbtiE3IsKjPCrkIQmBUKFooCCW5FhFeA4gpD4SQMEnmfZPT4b0c18/CLqnde0PnaMKF5G0MeSiINqRZSJZrCOfSnCfpm9AgDIaQLfIItY3/CD9kGDFeDHiULr/88sG+jkE+jByYoXNcjygcm3fHCdAsAjyLUDPj1o+lJgFeeFQkave48cYbm+gThkZIhgAiqHhSCB/vSfCfFcSUfUamJZAXCoVCobAcUUKjUCgUCoVCoVAoLDlKaBQKyxC8A3IZJH5LHM6hTfPBZ4U2CUdaiHdAeBAvC2v7UPOezwRY63l0brvttmZ1H2ryavI5Af2Sh8Az4NpC1TyrazrumDEQqpRD5LwvcVvlL0n3zoscDPDMuSQtr437x300OSvuEWPjGYRxZfA66fvNN988+f3vf9+OuY89MNzfT9fkAekhFIrnyb14XHgizAkvmGd0jvA485vhnkL1sqfLdcylMeAhEZ7WQ86PSl7xfJpQu/gsr5RQr8j1AePm+YRqeXZz5XX0yTVV1hpbd/leWl4bxtU1c8ie8DrjrulbvHaeZ+Mhc3/XyjBW5srYqaamOpt5NxY+Oy2p3rPktWktWTee2+9xjfA6ZRjPPA8BfRYeGDBn5kjflUR2P699JsZfCJ/zMoyRexhjY6WvuR+8bMpJzwLj7rNjeT/mMNY6Dx8Pp/lwLO6p364zNN+8cTFmY01IZ6wv1/Sdye/56RpR+MG8WpN9n91fyKIxjXNuv/329n2zJjPcxxj7jsU67Js5NDb5bwS4v+v5Lt9www0tZNUx1/PT2OhD3gPJ91h/rKN4Nq/zM91xxx27zXWhcE9HCY1CYZkBGZALEVWw7AMiHGeWZHVVjVSAEsp073vfe2eFqlkghAxZGiJejiEjOWfDP1ZVoYQuLRQIonA1eTAZciQ+/elPt7AtfRcalgm5SmGqRgWQFZWkgiApd3vSSSe114BovPjFL27VpMYgjE2+SUD4l71P5HZ4bnk5EQ6mgpnkdHtvmJcsggLbtm1rZZF7CEfTD+Pm+RBSQOSU2ZXnoiSuSljCuhAboVbIMUIkzItAmAXydmIurRthd67RA6nTr6jqFUDsrCNjP/SMGe6hj9YPIJx9VbWAOd1///3ba7lXcpWQMwTP3iHGNEA0yoPqRWDAGrr66qtbKNp8QAbluQiJm4XAq/ylbz0+/OEPt9LWQ3B9eWKA2Ho+4yoPSl5TxtatW3fZB8h85c+cfPLJrfLYNCDpxMETn/jEyZo1a1qBAsKn//4i+ooMyAMSKul7p0CBZzTPIJfL+T0hnwXut9dee7Xn7WHt+DvUw3zLuctri+Bfu3ZtE5Q9jKO/i/m7PwsIensJ9X3zXfD3jFGCcPXs/pYQNd4zDnK1fE99X8H33hiO5bz522V8/Q0mVBYzloXCnogSGoXCMgMSi2QF/JN96EMfutsO49OAhCB08lNmBaLI2tkTFXBMtaQsNPwj9Y8XeUNUiKG+EQpDydhIonwMuTPg+vqMEMiRQIJ6ocFKKrka2QsgjQh4VJIaEhrEUF9pijAJa6rKXVloIB/IZFR+IgLkZSAcRAdishihIReGAESykE8W0YC+EDwEhqpWcW+J7vZi8ezGYlYrt3vFDu2EBlJnPLPFmlXb9QkbnhSekwBijvTpY1hsx+CaclPMDYTQID68l9eTPtz3vvdtY2muracsNFiWwTnOJVa2bNnSxISqXfKYrDnVvJSo1v9eJIFnk9dCvBGNebd64kppaOQe6c6eHgTRWjFerk9g5jEbExo8EESRPB6vkU73QfxtaplFhKp5l1122S6eEQLUMdZ5mCY0rI/jjjuuiVBeEcJYi/LRvo/6kr/7xs4zGQtzbc6V9Q4SzbBx9tlnN3FljhYC32ViNcOYWXfG2Lwq/uD3uJ/5lu8U3x9zaE5918bgeXw//H0Ac0UcrF+/vpUPV5UvNxUKrR0/+++pNWd+rduANeHvgLlzbQUhfMb6t56IEVX2eEr87XMsN3Pgb54qgM7xjIVCoYRGobCsgShv2rSpkbaAcAv/lJFx/5iRmJ5sLUZoIDrEBjKnIdda/O49pB4QQdeWQC/EYFpjqfTPO6MXGsJ7WHh5DBAoZLIXGqy3rMZZNPiHjuBGOdpeaAhjQBZYQhEAz8ia+6UvfantccHSiySxegZ4Tdw/PEqslCpCRWI5IrkYoYGEXnzxxe06frKeBvwe+7ogosYZUSWSkBv9dN2wQE+DMUfsnA/macOGDe06yDcgf4hUJtnG3t4mSBZhg7zNYpVFHlnVI0HeNfXVWCHl2ZqsL4QGIhpC44c//GEj5D6PDLMkG3t9YJFW2UzoFGLuJ8GtIYvG0PP286Av5t1a0qwbY6Kf1pRjSKH1QDiCsCzCJYe+IPLGhHcIeNv8HmDlVlgge130y1iGYYB4yrvPmwP31e+AdauPUZBiTGj4Lnhm/dGMj3VNICjTTZix1Pt7Qaz6nvpuER8+7zvlGgi2PgQZZigwDkPzbdx4YHKzASkyH7/b68bfCDC3xijGFRwzJ0Hs/T0g8PPfBX8TXMsc8PYZC+eYd+e5BzEesE6Nv+Ou47l4IwgurzXfAessi8UA8WkduCdBpP8RLmdN+ztLjAUICXvsEHp5/yKviRTzzcM2nyeqULinoYRGobAM4R8+8oAUZ0vzrFiM0OiBJPpHenegFxoBhGEsdMqzIP05VAsxQJh4W2BWj0ZGHzrFgyAEJginfgaJuiuhU7wMQ6FTgFgSP4AA8Xh4XkRoIaFTSDlRlL0QY6FTLLOs24gd4qi0MEFCdHl+1vXsjRgCIsd6nve2GQudsibt6B9WayEo+mrOkcE+dAoyQWQ1JsZis8b54Dvk2YkTY20OhdmFQCHaXDPD2Jvv2IcmN94P4iTEWsBzEv3mbOgcAj2TbuuLByELTUKZtyE8WdM8GvrsO+H7MdTMiXWZ5844Oo60m9deUOi/kCXkvYfrEEX5eqrU9espwxpH3omeEI3R/M744Hs69P2ZFfpj3Zgzc6yPjC5HHXVUm1vHiFh/A4bu4/O+U+Y6N/PIwMIbRlRkEGY8tMIxGQ6OPvrodj8C2THCZKxSX6FwT0UJjUJhmYGL/sEPfnAj3Mguq55/pkMhImMYExqIOAuc9zOQPWE5iGw0/2z988zHfCaskMgEcoggTcMQWZWIiYQJK2GVRUgQL89rvxP/7JEm1w+hAcaBBRMpQiKIIdcIDHk0kAmCDQl3n2iSgQkewiULDSRM6WIkA3FAwnwWCSNKWI4lkI4JDZZ5XhaE1n0kkF5//fWTww47rD2bcUO2s9AghEJ8IKGuD0QAazlS57g56IG46S/io7/93CLbwlQyMfQZngVW2iF4rn322afNQY8g/ATVUH4EocGbYMwC+sgyHuTZXFx44YU7PXXENIt+Fp7Wruf3bGAd6Vc/5kPryxol9mwMOQZikdcmlw323TPvY2RxKHTK3Ol3fC96uJ6QuQzHrElrRA6FfmavyDShgewSDUPNe/pn3PLcCanieVmM0YK3yUadvSibBt8ha8B3W79CBGl+93dEDlDMpe+WtZj/1gw158XfNN8jXk6C2XvO5xWV5+RvRj5nSBT5W3HQQQe177fvnu9kNH9//J3U/wyezhw+2sN3NEIIC4XCDpTQKBQK7R+9f9y5sYJq/fGwenoP6fbPN/+Tzs0/8Z5kTQOCwgKJoCN7wlkQnVnh3IXcD5B7YVTzATlHkJEbYR3O60n9NPBYED7OEVvOujsfWL+RdGNgrBdDFIkpnpQhUeS5eXyOPfbY5oXYvHlzC6MRB5+9CQuBOeThGMvtIAz6OUXSPV+28usvMcAzgPj1a8sx3occZ59h/XgP6SYoPJ8QNTvwE0nEQXgQAkipsESf5YnIDcF0PHtvQMiMdT7tHNXLFgK5IRF6tVD47hC2eYyJct7JfgyjseQzbPh+9yAweJGGzouGqIcHEKwdYUa8sv2YRHNe/v70f2fGWnhjrKOh98aO92C00A8ePPPUN+tk6G9JeAJ5PIRk8ujJbyEeeWoKhcKuKKFRKBQKhUKhUCgUlhwlNAqFZQjhAWKMue5Z0FjlZgELIUsdayjr+6zeACEXLMqs7tGE+egDsBIKyxlKRvae0ATv5/Nz855woLAs+ile3HFWZxb32CMhLKOuyXocHhShY55Jgm5AGIaQE30A4RNyKOJ3llXHWJSFqPTNuZI+WdKHrJ4g3IOVODwQ5kI4lLAMfZNka44CQpXE7csbGbpn9CUnSYPrxV4Rxl4YiOcXziMMaSycxzO6pnAmY20ehd3k8TfG/T4E4Fkib6SHXIS8fsT2x/WEC/mpr2F5NxbOySE25lAoCe+DcBZNjHv0T589q5/G0Lz5mYsf8HYYr7H15dlckzcLjIc1YuziM/E6QsViv5XwEKm2xootxr/3Uhl3z+gzmr7IL/Gc1o11IbzRmujPFUZmzfbeK+dap3ktFwqFwp6IEhqFwjKDhGehDBFjjbyJd54v1AZ5ktQbxI94kGyLpC4mDEZIASIJzhcCghAirAu9nhCVAw44YPA8BPHQQw+d++3/QGyFL4TQQODlbAhZEE4T4SqPfOQjWz6JMVNVSDWchUD4kDCOsRwYQsJ9CRIQ0iNkB1lGjIWkxDjNAiRVXkkub6tykIo1wpyGIEzLc0X50zEg8vYm2bhxY8tRQKynzZXP2+dgSIQY51iDGZ7ZvgkBoliugeeRH2PdAYFgfoR/Zfj8qlWr2mv3NY/IOOIt2VquSZ8HMQ3WltCdvrBABuGnaIAwNCAc3YtIiVLSRIFcBrH5YyBG5eZIaie6rRtCcKw4gPkUWpiTwYGAU+ggby5ZKBQKeyJKaBQKywyIWSQ8AqurGHqeimlgCSYysnVafLpkZyRqViCnksAlq2bPALFDBIn/HrP+jwG5ds0M1l9Wf8TSvSRR83JEXwkQSdohNJBE15B8jhzKlZDQSUwRHmLrCZOc2G0sWKkRYImaUe40GgGhzj5yOWTVh15oINAIJmIpkViFJl6EAM+NPrCQX3rppbvcz++SoOVFyAcIuLcYb2RYXwgZYiSSho0963nMR4a1oj8+xyOSrerGk+CwDtwvn+89uQuIvv7yXmVRMiQ0/G4c5TiIvzcHBBrvjPck+IbQQKYlwPcCTn8VKrCGrG1ihNDgWfFaRaOFCA1rSz5AlIYNuC9Pg2tJRM/v85goEkAgWW8xLpL9zc8YCAffAeuBoDPH8m3GhAbx85CHPKQJyfyd9jlVmY488si2psfyWQqFQmGlo4RGobCMISTGPg+IWIB1NIioqj8IT0/mkDyJjqzts1SLEUKEqCOPSFtPaBFQZDYn6wYQNQnELLTR/H7CCSc0Yu135BS5jp3A9ZdgyVVdEE9hU4g9CIvizQmhAZ4LIUYUfY5l2tgglM4lXPIGgUi3mv7GgjiRrOlzwpSc43dlLr03Jp7ci0hguQbjyXviGHLsWeM9UCWJhV3feCDcjwgknNzPfd0/qhTx1BAxRBMLO++In/GaF8kzETCulccD7MshNMk9iQaNsBKyRIBFRR0kOu6J3CLI+Vr66x5xzPrKyecSYJHpgDWi31Ehi1hyTqxFgkPlqrinNWSsCUXrQH94EAhBe024jvm1T0mUMEbIjYP1TkhFiVnCiwgjFpSsveKKKxqpJ/x4Sawb14/1pa/5WYkuY9tXI3Jf+yqMgWeEaBBCReDwVmSPRjwr7wyPoLkD4t0ceHaCzOd8N0K8FgqFwp6KEhqFwjIEkqaMKxKJfC0E4ueRr2xBnQVEhHAg5w81pHAsphxxygRf/4X6BPEagv6xpCNlxAJSGE2VHLH7NhrLVnaiBmlnjefhyQ0JR/JZ6QPOZYXWb+/zivBA2FcC4XMtx4Wl9fH1AcJLueHYv4HQmBY6JaY/ym4SMAgw0snrwXLtOKGRrd/Gye/xrIiwe/jcLCBWCFB5DAQLDxgy73f5Ce7ZCxShWoiy/rP652YHdHOTS78Ccs+i339eI0RUdYrnCo9GeDgCfudlIZKJT54TXi0eLSLJOHuegPHmeTCmXgt1CyFCBOZyxgGChhBUSYkw9TzuBUSCew+FqU3zaBAWPE7ElHwPwsEY8UAOeTTkcgwJczDfSuVGmGOhUCjsqSihUSgsM7CSH3LIIY1MsZZGMutQLfgMxM6ma8I5nIP8IppIWpBMgoH1eyGhVIAAI8u51n8GErjvvvvO/TaO7ClB9HkaWP31nSU6Gqs4wUE0ZILMI+AYQYFESpjOjZck5z4I6eH54BkSViRfRT6HEB/XdyySs/scGKIE8TSGwAPAIo08IttKkHoeoU5huUaEEVvk07V5CQgbfWa5N36OuxbxE14UFn3hOCHkHDdHWWTlscsw9o94xCOmipJ169Y1T0EeS8SeQBu7LpFnjjJ4EsbEpv7alDCEhbHgzcoizDGCVYjbkIBG1COEKkB08BrZfXka8nMQGoQAQeo4bwkvlOvz8hgPc5u/B9ZdeFkyjIHvIm+fuQvIDSHUCGXXN4dZWPse8sJMw9jYFwqFwp6CEhqFQmFeIEQII1I7BKRRgiySxlrdN8dZ2LOXBSnjtUDGxhoyn8EKTSwheMh+35BIuyyHJV6/kXU/c4v3osVnZoEQIQQ9Eq0Ji6hmlK8ZLTDtPSJBuM/Q2GmIrpr+faWqAJHE84JMGwNiUpgRyz1rPst579EI4TQ07przejFgDQhTGvq8pp+ZvCP2PAlEQr6WkCzzpJ8IvH4LrxPmRiRkWCeEG8/Q0PpyzHvEYsA6sbasE+PAS6LvKkOFoBTKZgzAHBImhOqs0C95HzwdEsP1O+f5ELLER9/faMbXOioUCoU9GSU0CoVCoVAoFAqFwpKjhEahsAwhb0DIDquscJsckjENypUK47Gzs3AOOQE5/GYa+lAkoT4REsT6LkQk76DtuuLdeSpYrP0U9y+vxO+aa7AiZwifkc8hpp0XhGU7rMzyG4SJjSXJ8qiwErs2C/61117b8gZcxzEhVfo5BPH8vAY9WJ5zfokQNaFn+sUSzvLs2cThR3lUYUh92VahNz4j/MZ5QrLyvOlbDr0JCO/xvDnx2hiElwR4duRZBKwPnp0Ye/H+cjpY1wP6n3e+5lHwWaFJMcfGQz/D2yGsLOdlGBtWfp/lrYhzY36cx2PRFyMA68P5ckDME29FTu53TpSJDXhPsrznkQNj/ZhnjUfHM4+FEbqeOTHOsT6uueaaXbxiQu98p7I3iQdGSJ3xco71xUPiGo6Z65x7EXCe7xvvib4JkTOXihhE+JjvrvDHPk8lQyie9eZas4AXxDOZe/01L55TX/XZOJufvPbMg+eOee+bcZbbM1Z5rVAoFBaLEhqFwjKDhFOkJQifJF+VcOYrgWmvBbHn8Tlx/ieffHIj0WMVlaaBQIhYecRs+/btLRF2CEglwnTaaae1+PtMcDOE/lxyySWN3ASIDMnAhACyLowGuZ8GpBLhFHYjbwDhygnEPRBYIT85UTwQITZDkJyMfAMhoKpVVBRC7gLEk+TjTGpVJ0ICg7ydffbZjcT2IE48A4JM2CGmj3vc4xqBR9ARyvXr1zdRNw32zjCuiCairHRqLulqnI39NNKrSlSIvh5nnXXWboTbs9lTw7xlWA8qNDknYA48Q+xzYi1IplZFSigRINtK8XoG+S3WoEIB1pb17bsgRE7y+SwwlvJG7FdhLuQESVgPCOUzx75vYyDa5Yj040Ls6J8kf/NEUBFl7hn5Mog/8d0LI4KauLHu5OaYFz8JL+fO8n01j8LBnCcfxJz73mURNSuIWNcY2xSyUCgUFosSGoXCMoPk7RyLz4quVCxL70KA7NkjAvFZjNBAsIJkDwkNxBEJZsFH/BBLDTFExn0euVOeNCCxFuHOFX8QePHxqhYREGNCw/3kGiD4niknKiP6yChih7h79gArv74oJypen5DLom1IaLAGI7uETJQJRoIj/yCs9AECQalVfQh4bsnQ5hPGhAZPkhK02bJPZDjGsk88EJBD3hDE0PvIu7F3z8hZOOKII3YRGtaRBPDFCg2Vunqr+5jQAPNKdBIRxofVH6EOj5P3kX5eBjC/Es1DXLDKmxvjoxGW8jBUJOM5GPIygOt4bmtV89q60A9zedFFF819ckcOiXnUD9c170HUrSnfOfNq3eTck4B1Txjpr0phvg9yTYgI8MzWELERO5cTWwSJ+c7rFNzD8+tvbCTYg9eG98J46CPxS4A5lxjzHu9E9jLxdhF0vgeKF+Tme+f7R6yPVV4rFAqFxaKERqGwjIHsI5FBxgBRk/DLYm6vBMQikwqhFYiKRGBN1SXkayFAgB71qEftJOS90PC76k5CLpBhBAuJVmrU7txIi2PeE24TSeAED88FCz6Spp/ETFyX5XdMaPBYIEVCswgYBBEJ3LJlS7tXhJA4HntxIIr6kgkyUqzvEYqE2OXQKcSUZTmIIRA1wmAinEkScQ6dQvCJgxAVwNJNaIT4GBMaBIR5JF7MpTEz54hvAJmNZwLhPDw0+jkURgSumTcFDG8Bwi0J21gSJMh8CNExoeF9giInXMM0oQHIr7VkfWaxy1tHPOh7kG3vq0BmfU9rBAgi3wsNz8crJME9j1WG86y73uqPYJt3oYHCu4RtEaxCjpD5MfAoIPHCAK1lgoTwDpFgvvVHOBVYP0IarVfneD83a9ca8BzWTU/8/R0ggH0/IkzR34L4zkTYnrm1fmLMeT2sF2vY94840cyn9e81sbTQvxOFQqEwH0poFArLEEg1CzKL6DSik4GYIEd9PseDHvSg3fZDmAZkZtOmTS10JTDk0XAMMRyzvC4GiKdQlrHQKeSSNwNZJCAQ5hgnlnzH+v7wgug3IhfCKBohR7AhbRmIFysvIt6fIzxKmJRzA8Sd68h3COiL5wiRNSY0wFizcPcEOID49uVrkWbP7x7Gw7ghnMZBeJH56svTBnh3CKWeWI4JDaFiSKxd2LM3aJrQsG5Z0ZFeQHpZ3Ik6RFr/WOWJh+iH6xFrRBoSn5vPGftzzjmnibIh8AQadwKb8BQSZUz81H+iIHvYrBXza14Qf/3iSSLSQgxYA0REXw0LiFX3sVZcOwwANkTkOXNv45wFA28DwWfNWmfej0YIEATmJ+fk9OAhIhj1izjRhxD7hErveeL54kW0Q7pzPbe/MYQUg4Jns057D0uhUCjcVZTQKBSWGRCHAw88sBEWr1ktEauxkJYMpE6ZUeQIaUKYkDQEDlhDWTeHxAvih2QhLz3h9budw10v4JoIt/OmIcgLMonQsLh6pmhIPpLm+q4lrr8n/oCsIYU57r+HpFo7qWdhhbyxNA/t2wAs697PQAb1YSw8R7/NTYbwFQQXURSK5XXekG2a0PDsSPe00JV+TuRxGK+cRB5AGnmWWP/Dqq24QHiChGJt27ateYB4XZBwx+91r3vtTHgHpNx6yuV7nYNQw3xCQxiR+Qbn8jDxQPEomRfXDiECCLLQJiFWPD36n5tcC6IsxFsPnowTTzyxXbuHsSVUXHvsfOCBIKyWAoSpsc3En4eP0BBCRVgL7YtGDPiOCnHLXswM82W9jBkPfN+J7xDc7mGNuy+Po++a7xGhbC7l8oSgniZ2C4VCYTEooVEoFGYCEsKDEQjiyNKMkA415JYVe1YgrsJ9cr5ChvcRMHkJLLis1xFuhCghbz3pZeVGUFUs4hnILY4JG8ngCfBc084ZI4JjsFfDmPfHcX1k7R8aRyE9PBq9B0IVLuKKgDEWiCNLub4vJrEXue89YtNgDTgnhGwGgWlc5SMYMyFa5kLYFyGnIcU59IsYsF6ELzlnqCHMkQMxBOLQNRBp4glxZ/3n7Ztl3wprOufa3BVYx0LwsrCXsG1++nUVzfMRStPmj4D1OaFzvgee0WvfA4I3C+SFCIcSGYVCYalRQqNQKBQKhUKhUCgsOUpoFFYEJFOyTgrZUCllKDRiPrAqsoSzGmaw0gsdyJZ31W+EHgntsE+DkCLJvsIOhKuIgRdiNBTO5D4sssJnhNH0TWiGsBVVggKsqMJa3EuZSfH2wiNcJ6oECTHJHgX38Z7QGNV9WI/9HhZ94RquM2YZFY6V++X5WEdZzVk2JZP6PeAYi7JQmGhyA4RPgcRf+QKepVAoFAqFQqGERmHZQ0z28ccf3zYpA2Qa6VaqchaILxemIXRCkqUEVaELhIXwDlVXxD0L4+iBtBMmEjtVOJLkKRl5Wiz9LJBsrfrMEI499tiWGNpDImcO6RCWYWw0fRJ6I6REaIpQFiEa8hlyKdlpENctmVn8vCYcQ57BNAjRkEAMQm7su7GQUKlpEB4iprxQKBQKhcLKRAmNwoqDpEkehZxEOguUJF29enWrmZ+TQZHqXmjwVIg9R5olESP4YuBV3JGEqUQkgj+UiDsLnN8npPLSnHnmmU3YqCYkGRzZjrj8Cy+8sFWJCfAwEB8q47ie57vgggtaXoRzVJWxgZ4+E2c5/tozSIzlnZGw615yI3g/ouIPjwqBleEaErqj9GovNJStJXjC0xIwTpKheaMkWXtWYk1cOS+VeHP5BTw6RBJvjOMHH3xwyzmQu6Bv5sF1MvTVOADRaFM+95G3QJzyHOmX68jn4JXiVbKOCoVCoVAo3H0ooVFYMUBWlWYU/iQsaayCTw/WfiQdmUVuVdUREoTYIqLKaWahIUwLWUdKiQtkGJn3ecf87rjQKoS5T54lXCTA8qBornv++ec3giupN45L4g2hQgwg8PlaXjseG4UJjcrChgA69dRTd3p6QLUcYVDOJTTsDM5LoVRmLl3pmhKQPQfBIZHamORmfI1dBgFDgISnw1gSA+CeRJzxkficvT76st9++7XkZfBZ4WqbN29u3iUeJ2MjRM0xIKoIwwzlQNeuXTv32w4IL+MhAoJERaU8TsK9VCLKx4R42UitUCgUCoXC3YcSGoVlD5Vi+hAeFX9Y21VYmQ9hyZfTwJqOiGcgtK6F+Gew7AuV4j1hAUewWdwJBmVep+WJqNvPIxJw7nx9JZ5sXheVcgiZaMj7mjVrdqu2w8qvpOfpp5/ehJAcjvCAEExKoOZdxjOQfZ/nNRlqSrgSYBnEivGzPwDYiC32VXA94WxDoVPCuFatWrWzOpHPeibX118iQD95eWKTuSGhYcw3bNgw99sOqLQT4oRg6cOtCCnjk8EbJk+mUCgUCoXC3YcSGoVlD5ZxZUvlSLB4C/NBdLO1nYeA9b7f9wAR9p68DPkZ69evbzkQ9pI477zz2nF19b0mLAJKY0rKHiuHyWshXKcXJ4EzzjijEWlApIkdzeto2cMAQoV4UngqeBwIo2iwdevWXUKnhAIRQkQPb4GNzAimU045pY2VRHaW/rEcDQLBDtRyWHIt/2hKvIY3BeycTBjwGBxzzDHtXsKueGuEbNmLgodlmtDIXg5zQ0SZA9clUoRyuTcQHRLUeaMc97ux4NFyzL0l0LuvuYVZhYbQqhIahUKhUCjcvSihUbjHg3UduZVXkCGPQU4Aj4HwIxuGCd2RDyGMaVqStZAi4UNyGYaa67pnFhuItONyCoYacTO26dwQXFuoElEzBCJC2NjQvaLNsu9AwP08NwFRKBQKhUKhUEKjUCgUCoVCoVAoLDlKaBQKhUKhUCgUCoUlRwmNQqFQKBQKhUKhsOQooVEoFAqFQqFQKBSWHCU0CoVCoVAoFAqFwpKjhEahUCgUCoVCoVBYcpTQKBQKhUKhUCgUCkuOEmlU9LcAAACPSURBVBqFQqFQKBQKhUJhyVFCo1AoFAqFQqFQKCw5SmgUCoVCoVAoFAqFJUcJjUKhUCgUCoVCobDkKKFRKBQKhUKhUCgUlhwlNAqFQqFQKBQKhcKSo4RGoVAoFAqFQqFQWHKU0CgUCoVCoVAoFApLjhIahUKhUCgUCoVCYclRQqNQKBQKhUKhUCgsMSaT/wE1GjVih3ruLwAAAABJRU5ErkJggg==) ###Code from sklearn.cluster import KMeans model_iris = KMeans(n_clusters=3, init='k-means++', random_state = 0) model_iris.fit(x_iris) # 각 데이터가 어떤 클러스터에 포함되는지 확인해보자 print(model_iris.labels_) # pandas 데이터프레임의 groupby 연산을 활용하여 실제 붓꽃 품종 분류값과 얼마나 차이가 나는지 확인해보자 df_iris['cluster'] = model_iris.labels_ print(df_iris.groupby(['target','cluster'])['sepal length (cm)'].count()) ###Output target cluster 0 1 50 1 0 2 2 48 2 0 36 2 14 Name: sepal length (cm), dtype: int64 ###Markdown 클러스터의 label값은 단순히 클러스터를 구분하기 위해 정해놓은 숫자이므로 target값과 꼭 일치하지 않아도 된다. . 분류 target이 '0'인 데이터는 모두 1번 클러스터으로 잘 군집화되었다. 분류 target이 '1'인 데이터는 2개만 0번 클러스터으로 그루핑되었고, 나머지 48개는 모두 0번 클러스터으로 그루핑되었다. 하지만 분류 target이 '2'인 데이터는 0번 클러스터에 36개, 2번 클러스터에 14개로 분산되어 그루핑되었다. ###Code # 이번에는 시각화를 통해 잘 군집화되었는지 확인해보자 plt.figure(figsize = (10, 5)) # 클러스터 값이 0, 1, 2 인 경우마다 별도의 인덱스를 추출 cluster0_index = df_iris[df_iris['cluster'] == 0].index cluster1_index = df_iris[df_iris['cluster'] == 1].index cluster2_index = df_iris[df_iris['cluster'] == 2].index # 클러스터 0,1,2 에 해당하는 인덱스로 각 군집별 sepal length, petal length값 추출 plt.scatter(df_iris.iloc[cluster0_index, 0], df_iris.iloc[cluster0_index, 2], marker = 'o') plt.scatter(df_iris.iloc[cluster1_index, 0], df_iris.iloc[cluster1_index, 2], marker = 's') plt.scatter(df_iris.iloc[cluster2_index, 0], df_iris.iloc[cluster2_index, 2], marker = '^') plt.xlabel('sepal length') plt.ylabel('petal length') plt.title('3 clusters visualization') plt.show() ###Output _____no_output_____ ###Markdown (심화학습) KMeans pseudocode ```class KMeans(object): 학습 과정 def fit(X_data): 1. 각 군집별로 중심점을 랜덤으로 초기화 2. 반복 학습을 통해 최적 군집 찾기 2-1. 모든 데이터에 대해 가장 가까운 중심점이 속한 군집으로 할당 2-2. 각 군집별로 새로운 중심점(평균값) 계산 2-3. 기존의 중심점이 변경되지 않을 경우 학습 종료 3. 최종 군집화 결과 return```참고 : https://www.python-engineer.com/courses/mlfromscratch/12_kmeans/ ###Code import numpy as np import matplotlib.pyplot as plt np.random.seed(42) def euclidean_distance(x1, x2): return np.sqrt(np.sum((x1 - x2)**2)) class KMeans(): def __init__(self, k=5, max_iters=100, plot_steps=False): """ [hyper_parameter] K: cluster 개수 max_iters: 반복 학습 횟수 plot_steps: 군집 시각화 여부 """ self.K = k self.max_iters = max_iters self.plot_steps = plot_steps # 각 cluster에 속한 데이터를 담는 리스트 self.clusters = [[] for _ in range(self.K)] # 각 cluster의 중심점을 담는 리스트 self.centroids = [] # 학습 과정 def fit(self, X): self.X = X self.n_samples, self.n_features = X.shape # 각 cluster의 중심점을 랜덤으로 초기화 random_sample_idxs = np.random.choice(self.n_samples, self.K, replace=False) self.centroids = [self.X[idx] for idx in random_sample_idxs] # 반복 학습을 통해 최적 군집 찾기 for _ in range(self.max_iters): # 중심점과의 거리를 기반으로 군집 형성 (create clusters) self.clusters = self._create_clusters(self.centroids) if self.plot_steps: self.plot() # 새로 형성된 군집의 중심점 계산 centroids_old = self.centroids self.centroids = self._get_centroids(self.clusters) # 중심점이 더 이상 변경되지 않을 경우 학습 종료 if self._is_converged(centroids_old, self.centroids): break if self.plot_steps: self.plot() # 각 데이터가 속한 군집 index return return self._get_cluster_labels(self.clusters) def _get_cluster_labels(self, clusters): # 각 데이터가 속한 군집의 label을 return 하는 함수 labels = np.empty(self.n_samples) for cluster_idx, cluster in enumerate(clusters): for sample_index in cluster: labels[sample_index] = cluster_idx return labels def _create_clusters(self, centroids): # 모든 데이터에 대해 가장 가까운 중심점이 속한 군집으로 할당 clusters = [[] for _ in range(self.K)] for idx, sample in enumerate(self.X): # 가장 가까운 중심점이 속한 군집 index 도출 centroid_idx = self._closest_centroid(sample, centroids) # 해당 군집에 데이터 할당 clusters[centroid_idx].append(idx) return clusters def _closest_centroid(self, sample, centroids): # 각 중심점에 대해 euclidean 거리 계산 distances = [euclidean_distance(sample, point) for point in centroids] # 가장 짧은 거리의 군집 index closest_index = np.argmin(distances) return closest_index def _get_centroids(self, clusters): centroids = np.zeros((self.K, self.n_features)) # 각 군집의 중심점(평균 vector) 계산 for cluster_idx, cluster in enumerate(clusters): cluster_mean = np.mean(self.X[cluster], axis=0) centroids[cluster_idx] = cluster_mean return centroids def _is_converged(self, centroids_old, centroids): # 기존 중심점과 새로운 중심점과의 거리를 계산 distances = [euclidean_distance(centroids_old[i], centroids[i]) for i in range(self.K)] # 하나의 중심점이라도 다른 경우 return False return sum(distances) == 0 def plot(self): # 군집화 결과 시각화 fig, ax = plt.subplots(figsize=(12, 8)) for i, index in enumerate(self.clusters): point = self.X[index].T ax.scatter(*point) for point in self.centroids: ax.scatter(*point, marker="x", color='black', linewidth=2) plt.show() import numpy as np from sklearn.datasets import make_blobs X, y = make_blobs(centers = 4, n_samples = 500, n_features = 2, shuffle = True, random_state = 42) clusters = len(np.unique(y)) model = KMeans(k = clusters, max_iters=150, plot_steps=False) y_pred = model.fit(X) model.plot() ###Output _____no_output_____ ###Markdown 데이터의 개수가 많으면, K-Means의 수행 시간은 어떻게 달라질까? 데이터가 매우 커지지 않는 이상, 큰 차이는 나지 않습니다.**[잠깐! 상식]** 코드 수행 시간을 체크할 때, 보통 time 패키지를 사용합니다. time.time() 은 해당 현재 시각을 반환해주는 함수로서, 이 함수를 수행시간을 체크해볼 코드 앞뒤에 넣어서 시각의 차이를 계산함으로써, 수행시간을 체크합니다. ###Code import time start = time.time() model_r = KMeans(n_clusters=3, init="random", random_state = 0).fit(input_feature) end = time.time() print(end - start) # n_samples를 20만개로 늘려 새로운 가상데이터를 생성해보자 input_expanded, _ = make_blobs(n_samples=200000, centers=3, cluster_std=0.8, random_state=30) plt.scatter(input_expanded[:, 0], input_expanded[:, 1], c='blue', edgecolor='k', lw=0.5) plt.show() start = time.time() model_r = KMeans(n_clusters=3, init="random", random_state = 0).fit(input_expanded) end = time.time() print(end - start) ###Output 0.9078826904296875
notebooks/misc_figures.ipynb
###Markdown Misc figures of AU-emotion mappings projectJust for documentation purposes. See `figures.ipynb` for the visualizatin of results. ###Code import pandas as pd import seaborn as sns import numpy as np import matplotlib import matplotlib.pyplot as plt %matplotlib inline matplotlib.rcParams['font.sans-serif'] = "Arial" matplotlib.rcParams['figure.facecolor'] = "white" ###Output _____no_output_____ ###Markdown Figure 1: overview methodMade in google draw, but used the mapping matrix below: ###Code M_classes = ['Anger', 'Disgust', 'Fear', 'Happiness', 'Surprise', 'Sadness'] AUs = ['AU4', 'AU9', 'AU10', 'AU12', 'AU23'] M_mapping = [ ['AU4', 'AU23', 'AU10'], ['AU9', 'AU10'], ['AU12'] ] M = [[1 if au in mapp else 0 for au in AUs] for mapp in M_mapping] M = np.array(M) plt.imshow(M, cmap='gray', aspect=1) plt.xticks([]) plt.yticks([]) plt.ylabel(r'$Q\ \mathrm{(emo.)}$', fontsize=25) plt.xlabel(r'$P\ \mathrm{(AU)}$', fontsize=25) plt.show() S_ = [ ['AU10', 'AU23'], ['AU9'], ['AU12'], ['AU4', 'AU12'] ] S = [[1 if au in stim else 0 for au in AUs] for stim in S_] E = [0, 1, 2, 1] E = np.array([e == np.arange(3) for e in E]).astype(int) S = np.array(S) plt.imshow(S, cmap='gray', aspect=1) plt.xticks([]) plt.yticks([]) plt.ylabel(r'$N\ \mathrm{(stim.)}$', fontsize=25) plt.xlabel(r'$P\ \mathrm{(AU)}$', fontsize=25) plt.show() norm = np.sqrt((S ** 2).sum(axis=1, keepdims=True) @ (M.T ** 2).sum(axis=0, keepdims=True)) ϕ = (S @ M.T) / norm # Alternatively, using scikit-learn: # from sklearn.metrics.pairwise import cosine_similarity # cosine_similarity(S, M) im = plt.imshow(ϕ, cmap='gray', aspect=1) plt.xticks([]) plt.yticks([]) plt.colorbar(im,fraction=0.05, pad=0.03) plt.text(3.5, 1.5, 'Cosine similarity', rotation=90, va='center', fontsize=20) plt.ylabel(r'$N\ \mathrm{(stim.)}$', fontsize=25) plt.xlabel(r'$Q\ \mathrm{(emo.)}$', fontsize=25) plt.show() def softmax_2d(arr, beta): """ Vectorized softmax implementation including an inverse temperature parameter (beta). It assumes a 2D array with stimuli x features (similarities). """ scaled = beta * arr num = np.exp(scaled - scaled.max(axis=1, keepdims=True)) denom = np.sum(num, axis=1, keepdims=True) return num / denom pE = softmax_2d(ϕ, 1) im = plt.imshow(pE, cmap='gray', aspect=1) plt.xticks([]) plt.yticks([]) plt.colorbar(im,fraction=0.05, pad=0.03) plt.text(3.5, 1.5, r'$p(e | \mathbf{M}, \mathbf{S})$', rotation=90, va='center', fontsize=20) plt.ylabel(r'$N\ \mathrm{(stim.)}$', fontsize=25) plt.xlabel(r'$Q\ \mathrm{(emo.)}$', fontsize=25) plt.show() im = plt.imshow(E, cmap='gray', aspect=1) plt.xticks([]) plt.yticks([]) plt.colorbar(im,fraction=0.05, pad=0.03) plt.text(3.5, 1.5, r'$p(e | \mathbf{M}, \mathbf{S})$', rotation=90, va='center', fontsize=20) plt.ylabel(r'$N\ \mathrm{(stim.)}$', fontsize=25) plt.xlabel(r'$Q\ \mathrm{(emo.)}$', fontsize=25) plt.show() print(pE.round(2)) print(E) from sklearn.metrics import roc_auc_score #mask = E.sum(axis=0) != 0 #roc_auc_score(E[:, mask], softmax_2d(ϕ[:, mask], 1)) score = roc_auc_score(E, pE, average=None) plt.bar([0, 1, 2], score, color=['tab:blue', 'tab:orange', 'darkred']) sns.despine() plt.xlim(-0.5, 2.5) plt.ylim(0.4, 1) plt.axhline(0.5, ls='--', c='k') plt.xticks([0, 1, 2]) plt.gca().set_xticklabels([]) ###Output _____no_output_____
_doc/notebooks/git_dataframes.ipynb
###Markdown Git in DataFramespython + git + dataframe = [git-pandas](http://wdm0006.github.io/git-pandas/) ###Code from jyquickhelper import add_notebook_menu add_notebook_menu() %matplotlib inline ###Output _____no_output_____ ###Markdown Repository ###Code from gitpandas import Repository tries = [".", "../..", "../../.."] err = None for t in tries: try: repo = Repository(working_dir=t, verbose=True) err = None break except Exception as e: err = e continue if err is not None: import os raise Exception("issue in current folder '{0}'".format(os.getcwd())) from err repo.branches() ###Output _____no_output_____ ###Markdown One funny function. No idea if that gives a good estimation. ###Code try: use = repo.hours_estimate() except Exception as e: # Not always reliable. print(e) use = None use if use is not None: workdays = use.hours.sum() / 8 else: workdays = None workdays ###Output _____no_output_____ ###Markdown Not sure what this number reflects. LogsThe following cane take some time depending on you repository size. ###Code try: hist = repo.commit_history() except Exception as e: # Not always reliable. print(e) import pandas hist = pandas.DataFrame() hist.head() try: histf = repo.file_change_history() except Exception as e: # Not always reliable. print(e) import pandas histf = pandas.DataFrame(dict(filename=[""])) histf.head() histf.tail() ###Output _____no_output_____ ###Markdown Check removed files ###Code unique = set(histf.filename) len(unique) import os sorted_unique = list(sorted(unique)) full_path = [os.path.join(repo.repo.working_dir, _) for _ in sorted_unique] import numpy exists = [os.path.exists(f) for f in full_path] sizes = [os.stat(f).st_size if os.path.exists(f) else numpy.nan for f in full_path] import pandas removed = pandas.DataFrame(dict(name=sorted_unique, exists=exists, size=sizes)) removed.sort_values("size").dropna().tail() ###Output _____no_output_____ ###Markdown How many files not exist anymore? ###Code removed[~removed.exists].shape removed[~removed.exists].head() g = repo.repo.git() print(g.execute('git log --log-size --abbrev --follow "build_script.bat"').replace( '@gmail.com', '@').replace("@ensae.fr", "@")) from pyquickhelper.loghelper.repositories.pygit_helper import get_repo_log res = get_repo_log(repo.repo.working_dir) res[0] df = pandas.DataFrame(data=res, columns="owner hash datetime comment full_hash path".split()) df.head() res = get_repo_log(repo.repo.working_dir, file_detail=True) res[0] df = pandas.DataFrame(data=res, columns="owner hash datetime comment full_hash path name net bytes".split()) df.head() df["ext"] = df.name.apply(lambda x: os.path.splitext(x)[-1].strip()) df.head(n=2) gr = df[df.ext.isin((".py", ".ipynb", ".txt", ".zip", ".yml"))].groupby("ext").sum() gr.T gr.plot(kind="bar"); ###Output _____no_output_____
complete_solutions/2018-09-18_reproducible_dataframe.ipynb
###Markdown [How do I provide a reproducible copy of my existing DataFrame?](https://stackoverflow.com/questions/52413246/how-do-i-provide-a-reproducible-copy-of-my-existing-dataframe/5241324752413247) ###Code import pandas as pd from pprint import pprint as pp ###Output _____no_output_____ ###Markdown Create your DataFrame from some file ###Code df = pd.read_csv('data/2018-09-18_flavors_of_cacao.csv') df.head() ###Output _____no_output_____ ###Markdown Easiest Method Using to_clipboard and read_clipboard ###Code df.head(10).to_clipboard(sep=',', index=False) ###Output _____no_output_____ ###Markdown If you have a multi-index DataFrame or an index other than 0...n, use index=True and provide a note in your question as to which column(s) are the index. output of to_clipboard ###Code "Company  (Maker-if known)","Specific Bean Origin or Bar Name",REF,"Review Date","Cocoa Percent","Company Location",Rating,"Bean Type","Broad Bean Origin" A. Morin,Agua Grande,1876,2016,63%,France,3.75, ,Sao Tome A. Morin,Kpime,1676,2015,70%,France,2.75, ,Togo A. Morin,Atsane,1676,2015,70%,France,3.0, ,Togo A. Morin,Akata,1680,2015,70%,France,3.5, ,Togo A. Morin,Quilla,1704,2015,70%,France,3.5, ,Peru A. Morin,Carenero,1315,2014,70%,France,2.75,Criollo,Venezuela A. Morin,Cuba,1315,2014,70%,France,3.5, ,Cuba A. Morin,Sur del Lago,1315,2014,70%,France,3.5,Criollo,Venezuela A. Morin,Puerto Cabello,1319,2014,70%,France,3.75,Criollo,Venezuela A. Morin,Pablino,1319,2014,70%,France,4.0, ,Peru ###Output _____no_output_____ ###Markdown after executing to_clipboard, run pd.read_clipboard ###Code pd.read_clipboard(sep=',') ###Output _____no_output_____ ###Markdown With Lists pretty print the headers ###Code pp(list(df.columns)) ###Output ['Company\xa0\r\n(Maker-if known)', 'Specific Bean Origin\r\nor Bar Name', 'REF', 'Review\r\nDate', 'Cocoa\r\nPercent', 'Company\r\nLocation', 'Rating', 'Bean\r\nType', 'Broad Bean\r\nOrigin'] ###Markdown create a variable, copy the printed headers and assign the copy this can then be copied and pasted into Stack Overflow ###Code sof_headers = ['Company\xa0\n(Maker-if known)', 'Specific Bean Origin\nor Bar Name', 'REF', 'Review\nDate', 'Cocoa\nPercent', 'Company\nLocation', 'Rating', 'Bean\nType', 'Broad Bean\nOrigin'] ###Output _____no_output_____ ###Markdown pretty print some small range of the DataFrame values ###Code pp(df.iloc[0:10].values) ###Output array([['A. Morin', 'Agua Grande', 1876, 2016, '63%', 'France', 3.75, '\xa0', 'Sao Tome'], ['A. Morin', 'Kpime', 1676, 2015, '70%', 'France', 2.75, '\xa0', 'Togo'], ['A. Morin', 'Atsane', 1676, 2015, '70%', 'France', 3.0, '\xa0', 'Togo'], ['A. Morin', 'Akata', 1680, 2015, '70%', 'France', 3.5, '\xa0', 'Togo'], ['A. Morin', 'Quilla', 1704, 2015, '70%', 'France', 3.5, '\xa0', 'Peru'], ['A. Morin', 'Carenero', 1315, 2014, '70%', 'France', 2.75, 'Criollo', 'Venezuela'], ['A. Morin', 'Cuba', 1315, 2014, '70%', 'France', 3.5, '\xa0', 'Cuba'], ['A. Morin', 'Sur del Lago', 1315, 2014, '70%', 'France', 3.5, 'Criollo', 'Venezuela'], ['A. Morin', 'Puerto Cabello', 1319, 2014, '70%', 'France', 3.75, 'Criollo', 'Venezuela'], ['A. Morin', 'Pablino', 1319, 2014, '70%', 'France', 4.0, '\xa0', 'Peru']], dtype=object) ###Markdown create a variable, copy the printed values and assign the copy this can be copied and pasted into Stack Overflow ###Code sof_values = [['A. Morin', 'Agua Grande', 1876, 2016, '63%', 'France', 3.75, '\xa0', 'Sao Tome'], ['A. Morin', 'Kpime', 1676, 2015, '70%', 'France', 2.75, '\xa0', 'Togo'], ['A. Morin', 'Atsane', 1676, 2015, '70%', 'France', 3.0, '\xa0', 'Togo'], ['A. Morin', 'Akata', 1680, 2015, '70%', 'France', 3.5, '\xa0', 'Togo'], ['A. Morin', 'Quilla', 1704, 2015, '70%', 'France', 3.5, '\xa0', 'Peru'], ['A. Morin', 'Carenero', 1315, 2014, '70%', 'France', 2.75, 'Criollo', 'Venezuela'], ['A. Morin', 'Cuba', 1315, 2014, '70%', 'France', 3.5, '\xa0', 'Cuba'], ['A. Morin', 'Sur del Lago', 1315, 2014, '70%', 'France', 3.5, 'Criollo', 'Venezuela'], ['A. Morin', 'Puerto Cabello', 1319, 2014, '70%', 'France', 3.75, 'Criollo', 'Venezuela'], ['A. Morin', 'Pablino', 1319, 2014, '70%', 'France', 4.0, '\xa0', 'Peru']] ###Output _____no_output_____ ###Markdown Using sof_values and sof_headers, the Stack Overflow community can easily reproduce your DataFrame and more easily answer your question ###Code sof_df = pd.DataFrame(sof_values, columns=sof_headers) sof_df ###Output _____no_output_____
docs/examples/UserRequests/Live plotting the derivative.ipynb
###Markdown Live plotting the derivativeThis tutorial shows how to add a column-wise derivative to a 2D measurement ###Code # imports import numpy as np import qcodes as qc from qcodes.utils.wrappers import init, do1d, do2d from qcodes.tests.instrument_mocks import DummyInstrument from qcodes.instrument.parameter import ArrayParameter ###Output _____no_output_____ ###Markdown Setting up a mock experiment ###Code # Dummy instruments dac = DummyInstrument('dac', gates=['ch1', 'ch2']) lockin = DummyInstrument('lockin', gates=['X', 'Y']) station = qc.Station(dac, lockin) init('./sandboxdata', 'sandboxsample', station, annotate_image=False) # add a mock non-trivial signal array parameter to the lock-in class Signal(ArrayParameter): def __init__(self, name, instrument): super().__init__(name, shape=(500,), unit='arb. un.', setpoint_names=('Voltage',), setpoint_units=('V',) ) self.setpoints = (tuple(np.linspace(-3, 3, 500)),) self._instrument = instrument self.xpoint = self._xpoint() def reset_signal(self): self.xpoint = self._xpoint() def _xpoint(self): """ A frequency counter """ n = 0 xx = np.linspace(-3, 3, 50) while True: yield xx[(n % len(xx))] n += 1 def get(self): yy = np.array(self.setpoints[0]) x = next(self.xpoint) sig = (1 - x/2 + x**5 + yy**3) * np.exp(-x**2 - yy**2) noise = 0.01*np.random.randn(500) sig += np.convolve(noise, np.hanning(5), mode='same')/np.sum(np.hanning(5)) return sig lockin.add_parameter(name='signal', parameter_class=Signal) # Measure the beautiful signal do1d(dac.ch1, 0, 1, 50, 0.02, lockin.signal) ###Output _____no_output_____ ###Markdown Adding the derivative ###Code # Define a new parameter. Since this will return an array of values, it must # be a subclass of the ArrayParameter class Derivative(ArrayParameter): def __init__(self, name, instrument, antiderivative): """ The antiderivative is the parameter we wish to differentiate """ super().__init__(name, shape=(antiderivative.shape[0] - 1,), # derivative is one shorter setpoint_names=antiderivative.setpoint_names, setpoint_units=antiderivative.setpoint_units) self._instrument = instrument self.ad = antiderivative self.setpoints = (self.ad.setpoints[0][:-1],) def get(self): yy = self.ad.get_latest() xx = np.array(self.ad.setpoints[0]) return np.diff(yy)/np.diff(xx) lockin.add_parameter('deriv', antiderivative=lockin.signal, parameter_class=Derivative) do1d(dac.ch1, 0, 1, 50, 0.2, lockin.signal, lockin.deriv, use_threads=False) ###Output _____no_output_____ ###Markdown Adding the derivative + smoothingDirectly taking the derivative of experimental data with noise usually results in a very noisy derivative signal.Here we add some pre-smoothening of the signal prior to taking the derivative. ###Code class SmoothDerivative(ArrayParameter): def __init__(self, name, instrument, antiderivative, kernel_size): """ The antiderivative is the parameter we wish to differentiate Some pre-smoothing is added """ super().__init__(name, shape=(antiderivative.shape[0] - 1 - kernel_size,), setpoint_names=antiderivative.setpoint_names, setpoint_units=antiderivative.setpoint_units) self._instrument = instrument self.ad = antiderivative self.ks = kernel_size self.setpoints = (tuple(np.array(self.ad.setpoints[0])[self.ks//2:-self.ks//2-1]),) def change_kernel_size(self, ks): """ Update the kernel size for more/less agressive smoothening """ if (ks % 2) != 0: raise ValueError('Kernel size must be an even integer') self.ks = ks self.setpoints = (tuple(np.array(self.ad.setpoints[0])[self.ks//2:-self.ks//2-1]),) self.shape = (self.ad.shape[0] - 1 - self.ks,) @staticmethod def smoothen(signal, xx, ks): """ Smoothen a signal and reduce the x-axis accordingly """ if (ks % 2) != 0: raise ValueError('Kernel size must be an even integer' ) smooth_sig = np.convolve(signal, np.hanning(ks), mode='same')/np.sum(np.hanning(ks)) smooth_sig = smooth_sig[ks//2:-ks//2] xx = xx[ks//2:-ks//2] return xx, smooth_sig def get(self): yy = self.ad.get_latest() xx = np.array(self.ad.setpoints[0]) sxx, syy = self.smoothen(yy, xx, self.ks) return np.diff(syy)/np.diff(sxx) lockin.add_parameter('smoothderiv', antiderivative=lockin.signal, parameter_class=SmoothDerivative, kernel_size=10) do1d(dac.ch1, 0, 1, 50, 0.2, lockin.signal, lockin.smoothderiv, use_threads=False) ###Output _____no_output_____ ###Markdown Changing kernel sizeYou can change the kernel size easily: ###Code lockin.smoothderiv.change_kernel_size(24) do1d(dac.ch1, 0, 1, 50, 0.2, lockin.signal, lockin.smoothderiv, use_threads=False) ###Output _____no_output_____
notebooks/DeepWalk_Event_Embeddings.ipynb
###Markdown DeepWalk ###Code %reload_ext autoreload %autoreload 2 %matplotlib inline import os os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"; os.environ["CUDA_VISIBLE_DEVICES"]="0"; ###Output _____no_output_____ ###Markdown Baixando e instalando bibliotecas ###Code !git clone https://github.com/shenweichen/GraphEmbedding !pip install tensorflow==1.4.0 !cd GraphEmbedding; python setup.py install !mv GraphEmbedding/* . ###Output _____no_output_____ ###Markdown Carregando as Redes ###Code !gdown --id 1RF_bIo5ndxPhu9SJw-T8HBcuHyaGQGL0 !tar -xzvf datasets.tar.gz from os import listdir from os.path import isfile, join path_datasets = 'datasets_runs/' network_files = [f for f in listdir(path_datasets) if isfile(join(path_datasets, f))] print(network_files) ###Output ['run_1_google_news_5w1h_graph_hin.nx', 'run_6_40er_5w1h_graph_hin.nx', 'run_4_bbc_5w1h_graph_hin.nx', 'run_8_gold_standard_5w1h_graph_hin.nx', 'run_5_bbc_5w1h_graph_hin.nx', 'run_9_google_news_5w1h_graph_hin.nx', 'run_5_gold_standard_5w1h_graph_hin.nx', 'run_2_bbc_5w1h_graph_hin.nx', 'run_9_news_cluster_5w1h_graph_hin.nx', 'run_7_40er_5w1h_graph_hin.nx', 'run_9_gold_standard_5w1h_graph_hin.nx', 'run_8_google_news_5w1h_graph_hin.nx', 'run_10_bbc_5w1h_graph_hin.nx', 'run_8_news_cluster_5w1h_graph_hin.nx', 'run_2_news_cluster_5w1h_graph_hin.nx', 'run_8_40er_5w1h_graph_hin.nx', 'run_6_bbc_5w1h_graph_hin.nx', 'run_4_google_news_5w1h_graph_hin.nx', 'run_2_google_news_5w1h_graph_hin.nx', 'run_7_gold_standard_5w1h_graph_hin.nx', 'run_4_gold_standard_5w1h_graph_hin.nx', 'run_5_40er_5w1h_graph_hin.nx', 'run_3_gold_standard_5w1h_graph_hin.nx', 'run_4_40er_5w1h_graph_hin.nx', 'run_5_google_news_5w1h_graph_hin.nx', 'run_10_news_cluster_5w1h_graph_hin.nx', 'run_10_40er_5w1h_graph_hin.nx', 'run_9_40er_5w1h_graph_hin.nx', 'run_10_google_news_5w1h_graph_hin.nx', 'run_6_google_news_5w1h_graph_hin.nx', 'run_1_news_cluster_5w1h_graph_hin.nx', 'run_3_news_cluster_5w1h_graph_hin.nx', 'run_5_news_cluster_5w1h_graph_hin.nx', 'run_4_news_cluster_5w1h_graph_hin.nx', 'run_7_bbc_5w1h_graph_hin.nx', 'run_1_gold_standard_5w1h_graph_hin.nx', 'run_7_google_news_5w1h_graph_hin.nx', 'run_10_gold_standard_5w1h_graph_hin.nx', 'run_6_gold_standard_5w1h_graph_hin.nx', 'run_3_bbc_5w1h_graph_hin.nx', 'run_1_bbc_5w1h_graph_hin.nx', 'run_2_gold_standard_5w1h_graph_hin.nx', 'run_3_google_news_5w1h_graph_hin.nx', 'run_9_bbc_5w1h_graph_hin.nx', 'run_2_40er_5w1h_graph_hin.nx', 'run_8_bbc_5w1h_graph_hin.nx', 'run_6_news_cluster_5w1h_graph_hin.nx', 'run_1_40er_5w1h_graph_hin.nx', 'run_7_news_cluster_5w1h_graph_hin.nx', 'run_3_40er_5w1h_graph_hin.nx'] ###Markdown Treinando e avaliando o DeepWalk ###Code from ge import DeepWalk import numpy as np import networkx as nx from tqdm.notebook import tqdm from sklearn.metrics import accuracy_score from sklearn.metrics import f1_score from sklearn.svm import LinearSVC experimental_results = [] emb_sizes = [ 64 ] for network_file in tqdm(network_files): for emb_size in emb_sizes: G = nx.read_gpickle(path_datasets+network_file) model = DeepWalk(G,walk_length=10,num_walks=80,workers=1) model.train(window_size=5,iter=3,embed_size=64)# train model embeddings = model.get_embeddings()# get embedding vectors # gerando features e classificando X_train = [] Y_train = [] X_test = [] Y_test = [] for node in G.nodes(): if ':event' in node: if 'train' in G.nodes[node]: X_train.append(embeddings[node]) Y_train.append(G.nodes[node]['label']) if 'test' in G.nodes[node]: X_test.append(embeddings[node]) Y_test.append(G.nodes[node]['label']) X_train = np.array(X_train) X_test = np.array(X_test) clf = LinearSVC(tol=1e-5).fit(X_train, Y_train) y_pred = clf.predict(X_test) f1_macro = f1_score(Y_test, y_pred, average='macro') acc = accuracy_score(Y_test, y_pred) print('--->' ,network_file,emb_size,'f1_macro',f1_macro,'acc',acc) experimental_results.append((network_file,emb_size,'f1_macro',f1_macro,'acc',acc,Y_test,y_pred)) import pandas as pd df_results = pd.DataFrame(experimental_results) df_results df_results.to_excel('deepwalk_results_r1.xls') ###Output _____no_output_____
aces-g4 (1).ipynb
###Markdown Exploratory Data Analysis ###Code data.isnull().sum() data.drop(['id', 'Unnamed: 32'], inplace=True, axis=1) data[['diagnosis']].value_counts() ax = data[['diagnosis']].value_counts().plot(kind='bar', figsize=(8, 6), title="Diagnosis Counts") ax.set_xlabel("Benign & Malignant") ax.set_ylabel("Frequency") data['diagnosis'] = data['diagnosis'].map( {'B': 1, 'M': 0} ) plt.figure(figsize=(16,9)) sns.heatmap(data.corr(), annot=True) plt.title("Correlation between Features", fontsize=23) plt.show() data[data.columns[0:]].corr()['diagnosis'][:].sort_values(ascending=False) fig, axs = plt.subplots( figsize=(15,8)) data.hist(ax=axs) plt.tight_layout() ###Output /opt/conda/lib/python3.7/site-packages/ipykernel_launcher.py:2: UserWarning: To output multiple subplots, the figure containing the passed axes is being cleared ###Markdown Feature Selection - Dimentionality Reduction ###Code feature_cols = [c for c in data.columns if c not in ['diagnosis']] X = data[feature_cols] y = data['diagnosis'] # to visualise al the columns in the dataframe pd.pandas.set_option('display.max_columns', None) feature_sel_model = SelectFromModel(Lasso(alpha=0.005, random_state=23, max_iter=3000,tol=30.295954819192826)) feature_sel_model.fit(data[feature_cols], data['diagnosis']) feature_sel_model.get_support() #A list of the selected features selected_feat = data[feature_cols].columns[(feature_sel_model.get_support())] print('total features: {}'.format((data[feature_cols].shape[1]))) print('selected features: {}'.format(len(selected_feat))) print(selected_feat) X = X[selected_feat] y = data['diagnosis'] ###Output _____no_output_____ ###Markdown Modelling ###Code X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3) ###Output _____no_output_____ ###Markdown Model 1 ###Code first_model = RandomForestClassifier(n_estimators = 10, criterion = 'entropy', random_state = 42) first_model.fit(X_train, y_train) pred_y = first_model.predict(X_test) preds = first_model.predict(X_train) print("Accuracy:", accuracy_score(y_test, pred_y)) ###Output Accuracy: 0.9415204678362573 ###Markdown Model 2 ###Code second_model = DecisionTreeClassifier(max_depth=3, random_state=42) second_model.fit(X_train, y_train) pred_n = second_model.predict(X_test) print(accuracy_score(y_test, pred_n)) ###Output 0.9298245614035088 ###Markdown Model_3 ###Code gbrt = GradientBoostingClassifier(random_state = 0, max_depth = 1) gbrt.fit(X_train, y_train) print("Accuracy on training set:", gbrt.score(X_train, y_train)) print("Accuracy on test set:", gbrt.score(X_test, y_test)) ###Output Accuracy on training set: 0.9773869346733668 Accuracy on test set: 0.9532163742690059
docs/notebooks/fitting/1D_fitting/plot_2_Na2SiO3.ipynb
###Markdown 17O MAS NMR of crystalline Na2SiO3 In this example, we illustrate the use of the mrsimulator objects to- create a spin system fitting model,- use the fitting model to perform a least-squares fit on the experimental, and- extract the tensor parameters of the spin system model.We will be using the `LMFIT `_ methods toestablish fitting parameters and fit the spectrum. The following example illustratesthe least-squares fitting on a $^{17}\text{O}$ measurement of$\text{Na}_{2}\text{SiO}_{3}$ [f5]_.We will begin by importing relevant modules and presetting figure style and layout. ###Code import csdmpy as cp import matplotlib as mpl import matplotlib.pyplot as plt import mrsimulator.signal_processing as sp import mrsimulator.signal_processing.apodization as apo from lmfit import Minimizer, report_fit from mrsimulator import Simulator, SpinSystem, Site from mrsimulator.methods import BlochDecayCentralTransitionSpectrum from mrsimulator.utils import get_spectral_dimensions from mrsimulator.utils.spectral_fitting import LMFIT_min_function, make_LMFIT_params font = {"size": 9} mpl.rc("font", **font) mpl.rcParams["figure.figsize"] = [4.25, 3.0] mpl.rcParams["grid.linestyle"] = "--" ###Output _____no_output_____ ###Markdown Import the datasetImport the experimental data. In this example, we will import the dataset fileserialized with the CSDM file-format, using the`csdmpy `_ module. ###Code filename = "https://sandbox.zenodo.org/record/687656/files/Na2SiO3_O17.csdf" oxygen_experiment = cp.load(filename) # For spectral fitting, we only focus on the real part of the complex dataset oxygen_experiment = oxygen_experiment.real # Convert the dimension coordinates from Hz to ppm. oxygen_experiment.dimensions[0].to("ppm", "nmr_frequency_ratio") # Normalize the spectrum oxygen_experiment /= oxygen_experiment.max() # plot of the dataset. ax = plt.subplot(projection="csdm") ax.plot(oxygen_experiment, "k", alpha=0.5) ax.set_xlim(-50, 100) ax.invert_xaxis() plt.tight_layout() plt.show() ###Output _____no_output_____ ###Markdown Create a fitting modelNext, we will create a ``simulator`` object that we use to fit the spectrum. We willstart by creating the guess ``SpinSystem`` objects.**Step 1:** Create initial guess sites and spin systems ###Code O17_1 = Site( isotope="17O", isotropic_chemical_shift=60.0, # in ppm, quadrupolar={"Cq": 4.2e6, "eta": 0.5}, # Cq in Hz ) O17_2 = Site( isotope="17O", isotropic_chemical_shift=40.0, # in ppm, quadrupolar={"Cq": 2.4e6, "eta": 0}, # Cq in Hz ) system_object = [SpinSystem(sites=[s], abundance=50) for s in [O17_1, O17_2]] ###Output _____no_output_____ ###Markdown **Step 2:** Create the method object. Note, when performing the least-squares fit, youmust create an appropriate method object which matches the method used in acquiringthe experimental data. The attribute values of this method must match theexact conditions under which the experiment was acquired. This including theacquisition channels, the magnetic flux density, rotor angle, rotor frequency, andthe spectral/spectroscopic dimension. In the following example, we set up a centraltransition selective Bloch decay spectrum method, where we obtain thespectral/spectroscopic information from the metadata of the CSDM dimension. Use the:func:`~mrsimulator.utils.get_spectral_dimensions` utility function for quickextraction of the spectroscopic information, `i.e.`, count, spectral_width, andreference_offset from the CSDM object. The remaining attribute values are set to theexperimental conditions. ###Code # get the count, spectral_width, and reference_offset information from the experiment. spectral_dims = get_spectral_dimensions(oxygen_experiment) method = BlochDecayCentralTransitionSpectrum( channels=["17O"], magnetic_flux_density=9.4, # in T rotor_frequency=14000, # in Hz spectral_dimensions=spectral_dims, ) ###Output _____no_output_____ ###Markdown Assign the experimental dataset to the ``experiment`` attribute of the above method. ###Code method.experiment = oxygen_experiment ###Output _____no_output_____ ###Markdown **Step 3:** Create the Simulator object and add the method and spin system objects. ###Code sim = Simulator() sim.spin_systems = system_object sim.methods = [method] ###Output _____no_output_____ ###Markdown **Step 4:** Simulate the spectrum. ###Code for iso in sim.spin_systems: # A method object queries every spin system for a list of transition pathways that # are relevant for the given method. Since the method and the number of spin systems # remain the same during the least-squares fit, a one-time query is sufficient. To # avoid querying for the transition pathways at every iteration in a least-squares # fitting, evaluate the transition pathways once and store it as follows iso.transition_pathways = method.get_transition_pathways(iso) # Now simulate as usual. sim.run() ###Output _____no_output_____ ###Markdown **Step 5:** Create the SignalProcessor class object and apply the post-simulationsignal processing operations. ###Code processor = sp.SignalProcessor( operations=[ sp.IFFT(), apo.Gaussian(FWHM="100 Hz"), sp.FFT(), sp.Scale(factor=20000.0), ] ) processed_data = processor.apply_operations(data=sim.methods[0].simulation).real ###Output _____no_output_____ ###Markdown **Step 6:** The plot of initial guess simulation (black) along with the experiment(red) is shown below. ###Code ax = plt.subplot(projection="csdm") ax.plot(oxygen_experiment, "k", alpha=0.5, linewidth=2, label="Experiment") ax.plot(processed_data, "r", label="guess spectrum") ax.set_xlim(-50, 100) ax.invert_xaxis() plt.legend() plt.grid() plt.tight_layout() plt.show() ###Output _____no_output_____ ###Markdown Least-squares minimization with LMFITOnce you have a fitting model, you need to create the list of parameters to use in theleast-squares fitting. For this, you may use the`Parameters `_ class from *LMFIT*,as described in the previous example.Here, we make use of a utility function,:func:`~mrsimulator.utils.spectral_fitting.make_LMFIT_params`, that considerablysimplifies the LMFIT parameters generation process.**Step 7:** Create a list of parameters. ###Code params = make_LMFIT_params(sim, processor) ###Output _____no_output_____ ###Markdown The `make_LMFIT_params` parses the instances of the ``Simulator`` and the``PostSimulator`` objects for parameters and returns an LMFIT `Parameters` object.**Customize the Parameters:**You may customize the parameters list, ``params``, as desired. Here, we remove theabundance of the two spin systems and constrain it to the initial value of 50% each. ###Code params.pop("sys_0_abundance") params.pop("sys_1_abundance") print(params.pretty_print(columns=["value", "min", "max", "vary", "expr"])) ###Output _____no_output_____ ###Markdown **Step 8:** Perform least-squares minimization. For the user's convenience, we alsoprovide a utility function,:func:`~mrsimulator.utils.spectral_fitting.LMFIT_min_function`, for evaluating thedifference vector between the simulation and experiment, based onthe parameters update. You may use this function directly as the argument of theLMFIT Minimizer class, as follows, ###Code minner = Minimizer(LMFIT_min_function, params, fcn_args=(sim, processor)) result = minner.minimize() report_fit(result) ###Output _____no_output_____ ###Markdown **Step 9:** The plot of the fit, measurement and the residuals is shown below. ###Code # Best fit spectrum sim.run() processed_data = processor.apply_operations(data=sim.methods[0].simulation).real ax = plt.subplot(projection="csdm") plt.plot(oxygen_experiment, "k", alpha=0.5, linewidth=2, label="Experiment") plt.plot(processed_data, "r--", label="Best Fit") plt.xlabel("$^{17}$O frequency / ppm") plt.xlim(100, -50) plt.legend() plt.grid() plt.tight_layout() plt.show() ###Output _____no_output_____
Policy Gradients and Actor Critic/Synchronous_A2C_torch.ipynb
###Markdown [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/kinalmehta/Reinforcement-Learning-Notebooks/blob/master/Policy%20Gradients%20and%20Actor%20Critic/Synchronous_A2C_torch.ipynb) Basic Setup step in **Colab** ###Code #remove " > /dev/null 2>&1" to see what is going on under the hood !pip install gym pyvirtualdisplay > /dev/null 2>&1 !apt-get install -y xvfb python-opengl ffmpeg > /dev/null 2>&1 !apt-get update > /dev/null 2>&1 !apt-get install cmake > /dev/null 2>&1 !apt-get install libopenmpi-dev > /dev/null 2>&1 !apt-get install zlib1g-dev > /dev/null 2>&1 !pip install --upgrade setuptools 2>&1 !pip install ez_setup > /dev/null 2>&1 !pip install gym[atari,box2d,classic_control] > /dev/null 2>&1 # change to gym[atari,box2d,classic_control] !pip install stable-baselines[mpi] > /dev/null 2>&1 # %tensorflow_version 2.x %tensorflow_version 1.x ###Output _____no_output_____ ###Markdown Adding a virtual display for rendering ###Code from pyvirtualdisplay import Display display = Display(visible=0, size=(1400, 900)) display.start() ###Output _____no_output_____ ###Markdown Uncomment below to connect to drive to save model and video outputs ###Code # from google.colab import drive # drive.mount('/content/gdrive') # root_path = 'gdrive/My Drive/Colab Notebooks/RL/' # import os # os.chdir(root_path) ###Output _____no_output_____ ###Markdown Standard imports and notebook setup ###Code import gym from gym import logger as gymlogger from gym.wrappers import Monitor gymlogger.set_level(40) #error only import tensorflow as tf import numpy as np import random import matplotlib import matplotlib.pyplot as plt %matplotlib inline import math import glob import io import base64 from IPython.display import HTML from IPython import display as ipythondisplay from stable_baselines.common.vec_env import DummyVecEnv, SubprocVecEnv, VecVideoRecorder """ Utility functions to enable video recording of gym environment and displaying it To enable video, just do "env = wrap_env(env)"" """ def show_video(): mp4list = glob.glob('video/*.mp4') if len(mp4list) > 0: mp4 = mp4list[-1] video = io.open(mp4, 'r+b').read() encoded = base64.b64encode(video) # you can add "loop" after autoplay to keep the video looping after it ends ipythondisplay.display(HTML(data='''<video alt="test" autoplay controls style="height: 400px;"> <source src="data:video/mp4;base64,{0}" type="video/mp4" /> </video>'''.format(encoded.decode('ascii')))) else: print("Could not find video") def wrap_env(env): env = VecVideoRecorder(env, './video') return env ###Output _____no_output_____ ###Markdown Synchronous A2C Algorithm ###Code def make_env(env_id, rank, seed=0): """ Utility function for multiprocessed env. :param env_id: (str) the environment ID :param num_env: (int) the number of environment you wish to have in subprocesses :param seed: (int) the inital seed for RNG :param rank: (int) index of the subprocess """ def _init(): env = gym.make(env_id) env.seed(seed + rank) return env return _init num_cpu = 6 # Number of processes to use env_list = ["CartPole-v0", "LunarLander-v2", "MsPacman-ram-v0", "CartPole-v0", "MountainCar-v0", "Breakout-ram-v4", "Acrobot-v1"] env_to_use = env_list[0] # # Create the vectorized environment # env = SubprocVecEnv([make_env(env_to_use, i) for i in range(num_cpu)]) # s0 = env.reset() # print(s0.shape) # actions = [env.action_space.sample() for i in range(num_cpu)] # print(actions) # env.step(actions) # for i in range(1): # actions = [env.action_space.sample() for i in range(num_cpu)] # obs, ret, done, info = env.step(actions) # print(obs, done, info) import torch import torch.nn as nn import torch.nn.functional as F class Actor(nn.Module): """Actor (Policy) Model.""" def __init__(self, state_size, action_size, seed, fc1_units=64, fc2_units=64): """Initialize parameters and build model. Params ====== state_size (int): Dimension of each state action_size (int): Dimension of each action seed (int): Random seed fc1_units (int): Number of nodes in first hidden layer fc2_units (int): Number of nodes in second hidden layer """ super(Actor, self).__init__() torch.manual_seed(seed) self.fc1 = nn.Linear(state_size, fc1_units) self.fc2 = nn.Linear(fc1_units, fc2_units) self.actor = nn.Linear(fc2_units, action_size) def forward(self, state): """Build a network that maps state -> action values.""" x = F.relu(self.fc1(state)) x = F.relu(self.fc2(x)) return self.actor(x) class Critic(nn.Module): """Actor (Policy) Model.""" def __init__(self, state_size, seed, fc1_units=64, fc2_units=64): """Initialize parameters and build model. Params ====== state_size (int): Dimension of each state action_size (int): Dimension of each action seed (int): Random seed fc1_units (int): Number of nodes in first hidden layer fc2_units (int): Number of nodes in second hidden layer """ super(Critic, self).__init__() torch.manual_seed(seed) self.fc1 = nn.Linear(state_size, fc1_units) self.fc2 = nn.Linear(fc1_units, fc2_units) self.critic = nn.Linear(fc2_units, 1) def forward(self, state): """Build a network that maps state -> action values.""" x = F.relu(self.fc1(state)) x = F.relu(self.fc2(x)) return self.critic(x) class PGLoss(nn.Module): def forward(self, policy, act, rew_wt): logp = policy.log_prob(act) return -(logp * rew_wt).mean() import torch import torch.optim as optim from torch.distributions.categorical import Categorical from collections import defaultdict class Agent: def __init__(self, env, device='cpu'): self.env = env self.device = device self.state_size = self.env.observation_space.shape[0] self.action_size = self.env.action_space.n # self.policy_network = ACNet(self.state_size, self.action_size, 4) self.actor_network = Actor(self.state_size, self.action_size, 4).to(self.device) self.critic_network = Critic(self.state_size, 4).to(self.device) self.actor_loss = PGLoss() self.critic_loss = nn.MSELoss() self.train_stats = defaultdict(list) self.gamma=0.9 def get_policy(self, obs): net_op = self.actor_network(torch.as_tensor(obs, dtype=torch.float32).to(self.device)) return Categorical(logits=net_op.cpu()) def get_critic_value(self,obs): return torch.squeeze(self.critic_network(torch.as_tensor(obs, dtype=torch.float32).to(self.device)).cpu()) def get_action(self, policy): return policy.sample() def train(self, epochs): actor_optimizer = optim.Adam(self.actor_network.parameters(), lr=1e-2) critic_optimizer = optim.Adam(self.critic_network.parameters(), lr=1e-2) cur_obs = self.env.reset() cur_policy = self.get_policy(cur_obs) cur_ret = self.get_critic_value(cur_obs) total_rewards = 0 total_episodes = 0 batch_rewards = np.zeros(num_cpu) for i in range(epochs): cur_action = self.get_action(cur_policy) # print("[INFO 1] action/return shape", cur_action.shape, cur_ret.shape) # next_obs, cur_reward, done, _ = self.env.step(list(cur_actions)) # print("a", cur_actions, cur_action) next_obs, cur_reward, done, _ = self.env.step(list(cur_action.numpy())) next_policy = self.get_policy(next_obs) next_ret = self.get_critic_value(next_obs) # print("[INFO 2] reward shape", cur_reward.shape, next_ret.shape, done.shape) target_ret = torch.tensor(cur_reward, dtype=torch.float32) + self.gamma*next_ret.detach()*(1-torch.tensor(done, dtype=torch.float32)) actor_optimizer.zero_grad() critic_optimizer.zero_grad() actor_loss_val = self.actor_loss(cur_policy, cur_action, target_ret-cur_ret.detach()) critic_loss_val = self.critic_loss(cur_ret, target_ret) entropy_loss_val = cur_policy.entropy().mean() actor_loss = actor_loss_val + entropy_loss_val*0.01 # print("[INFO 3]",actor_loss_val, entropy_loss_val, critic_loss_val) actor_loss_val.backward() actor_optimizer.step() critic_loss_val.backward() critic_optimizer.step() cur_policy = next_policy cur_ret = next_ret if np.any(done): indxes = np.squeeze(np.argwhere(done), axis=-1) total_episodes += len(indxes) total_rewards += np.sum(batch_rewards[indxes]) batch_rewards[indxes] = 0 else: batch_rewards += cur_reward if (i+1)%5000==0: self.train_stats["actor_loss"] += [actor_loss_val.item()] self.train_stats["critic_loss"] += [critic_loss_val.item()] self.train_stats["returns"] += [total_rewards/(total_episodes+1e-8)] total_episodes = 0 total_rewards = 0 print("Epoch:", i, actor_loss_val.item(), critic_loss_val.item(), entropy_loss_val.item(), self.train_stats["returns"][-1]) def plot_train_stats(self): if len(self.train_stats)==0: print("first train to print train stats") for i in self.train_stats: plt.plot(self.train_stats[i]) plt.xlabel("Epoch") plt.ylabel(i) plt.show() return print("GPU available:", torch.cuda.is_available()) num_cpu = 1 device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") env = SubprocVecEnv([make_env(env_to_use, i) for i in range(num_cpu)]) agent = Agent(env, device=device) agent.train(20*5000) agent.plot_train_stats() # watch the trained agent env = wrap_env(gym.make(env_to_use)) state = env.reset() done=False while not done: policy, _ = agent.get_net_op(state) action = agent.get_action(policy) env.render() state, reward, done, _ = env.step(action) if done: break env.close() show_video() ###Output _____no_output_____
_notebooks/2021-10-20-Dropout-and-Batch-Normalization.ipynb
###Markdown "Dropout and Batch Normalization"> "Add functionality using layers not containing any neurons themselves but benefiting a model in various ways."- toc: true- branch: master- badges: true- comments: true- author: Rishiraj Acharya- categories: [deep learning, regularization, normalization]- image: images/nb/dropout.gif- hide: false- search_exclude: false IntroductionThere's so much more to Chicken Biryani than just the chicken. Surely chicken plays the most important role, but the rice, the masala (and the potato if you're a Bengali) are important too. Similarly, in deep learning, there're layers apart from the dense layers that plays different important roles. Let's discuss two of them. ###Code # Setup plotting import matplotlib.pyplot as plt plt.style.use('seaborn-whitegrid') # Set Matplotlib defaults plt.rc('figure', autolayout=True) plt.rc('axes', labelweight='bold', labelsize='large', titleweight='bold', titlesize=18, titlepad=10) plt.rc('animation', html='html5') ###Output _____no_output_____ ###Markdown DropoutDropout is an important technique for regularization that only emerged recently, thanks to Geoffrey Hinton and works amazingly well. In deep learning we have one layer connecting to another layer and the values that go from one layer to the next are called activations. Dropout takes these activations and randomly set half of them to zero for every example we train our network on. Simply put, it iteratively takes half of the data that's flowing through our network and just drops it. Thus, our network can never rely on any given activation to be present because they might be dropped at any given iteration. So it is forced to learn a redundant representation for everything to make sure that at least some of the information remains. This makes the network robust and prevents overfitting all while making it act like taking an agreement of an ensemble of networks. Load Dataset and Preprocess ###Code import pandas as pd from sklearn.preprocessing import StandardScaler, OneHotEncoder from sklearn.compose import make_column_transformer from sklearn.model_selection import GroupShuffleSplit from tensorflow import keras from tensorflow.keras import layers from tensorflow.keras import callbacks spotify = pd.read_csv('/content/spotify.csv') X = spotify.copy().dropna() y = X.pop('track_popularity') artists = X['track_artist'] features_num = ['danceability', 'energy', 'key', 'loudness', 'mode', 'speechiness', 'acousticness', 'instrumentalness', 'liveness', 'valence', 'tempo', 'duration_ms'] features_cat = ['playlist_genre'] preprocessor = make_column_transformer( (StandardScaler(), features_num), (OneHotEncoder(), features_cat), ) def group_split(X, y, group, train_size=0.75): splitter = GroupShuffleSplit(train_size=train_size) train, test = next(splitter.split(X, y, groups=group)) return (X.iloc[train], X.iloc[test], y.iloc[train], y.iloc[test]) X_train, X_valid, y_train, y_valid = group_split(X, y, artists) X_train = preprocessor.fit_transform(X_train) X_valid = preprocessor.transform(X_valid) y_train = y_train / 100 y_valid = y_valid / 100 input_shape = [X_train.shape[1]] print("Input shape: {}".format(input_shape)) ###Output Input shape: [18] ###Markdown Define and fit the original Model ###Code model = keras.Sequential([ layers.Dense(128, activation='relu', input_shape=input_shape), layers.Dense(64, activation='relu'), layers.Dense(1) ]) model.compile( optimizer='adam', loss='mae', ) history = model.fit( X_train, y_train, validation_data=(X_valid, y_valid), batch_size=512, epochs=50, verbose=0, ) history_df = pd.DataFrame(history.history) history_df.loc[:, ['loss', 'val_loss']].plot() print("Minimum Validation Loss: {:0.4f}".format(history_df['val_loss'].min())) ###Output Minimum Validation Loss: 0.1922 ###Markdown Define and fit the Model with Dropout ###Code # Adding two 30% dropout layers, one after 128 and one after 64 model = keras.Sequential([ layers.Dense(128, activation='relu', input_shape=input_shape), layers.Dropout(0.3), layers.Dense(64, activation='relu'), layers.Dropout(0.3), layers.Dense(1) ]) model.compile( optimizer='adam', loss='mae', ) history = model.fit( X_train, y_train, validation_data=(X_valid, y_valid), batch_size=512, epochs=50, verbose=0, ) history_df = pd.DataFrame(history.history) history_df.loc[:, ['loss', 'val_loss']].plot() print("Minimum Validation Loss: {:0.4f}".format(history_df['val_loss'].min())) ###Output Minimum Validation Loss: 0.1879 ###Markdown From the learning curves, we can see that the validation loss remains near a constant minimum even though the training loss continues to decrease. So we can see that adding dropout did prevent overfitting this time. Moreover, by making it harder for the network to fit spurious patterns, dropout may have encouraged the network to seek out more of the true patterns, possibly improving the validation loss some as well). Batch NormalizationIn deep learning it is a common practice to normalize the data on a similar scale before it goes to the network. We can easily achieve this through the scalers available from [sklearn.preprocessing](https://scikit-learn.org/stable/modules/classes.htmlmodule-sklearn.preprocessing) and has the effect of preventing the network from getting unstable with the weights being shifted in proportion to how large an activation the data produces. In a same way Batch Normalization normalizes the data on a similar scale when it's inside the network, just before or after the activation function of each hidden layer. It zero-centers and normalizes each input, then shifts and scales the result using one new parameter vector for shifting and another for scaling for each layer. This makes the network learn the optimal scale and mean of each of the layer's inputs. Load Dataset and Preprocess ###Code import pandas as pd concrete = pd.read_csv('/content/concrete.csv') df = concrete.copy() df_train = df.sample(frac=0.7, random_state=0) df_valid = df.drop(df_train.index) X_train = df_train.drop('CompressiveStrength', axis=1) X_valid = df_valid.drop('CompressiveStrength', axis=1) y_train = df_train['CompressiveStrength'] y_valid = df_valid['CompressiveStrength'] input_shape = [X_train.shape[1]] ###Output _____no_output_____ ###Markdown Define and fit the original Model ###Code model = keras.Sequential([ layers.Dense(512, activation='relu', input_shape=input_shape), layers.Dense(512, activation='relu'), layers.Dense(512, activation='relu'), layers.Dense(1), ]) model.compile( optimizer='sgd', # SGD is more sensitive to differences of scale loss='mae', metrics=['mae'], ) history = model.fit( X_train, y_train, validation_data=(X_valid, y_valid), batch_size=64, epochs=100, verbose=0, ) history_df = pd.DataFrame(history.history) history_df.loc[0:, ['loss', 'val_loss']].plot() print(("Minimum Validation Loss: {:0.4f}").format(history_df['val_loss'].min())) ###Output Minimum Validation Loss: nan ###Markdown Define and fit the Model with Batch Normalization ###Code # Adding a BatchNormalization layer before each Dense layer model = keras.Sequential([ layers.BatchNormalization(input_shape=input_shape), layers.Dense(512, activation='relu'), layers.BatchNormalization(), layers.Dense(512, activation='relu'), layers.BatchNormalization(), layers.Dense(512, activation='relu'), layers.BatchNormalization(), layers.Dense(1), ]) model.compile( optimizer='sgd', loss='mae', metrics=['mae'], ) EPOCHS = 100 history = model.fit( X_train, y_train, validation_data=(X_valid, y_valid), batch_size=64, epochs=EPOCHS, verbose=0, ) history_df = pd.DataFrame(history.history) history_df.loc[0:, ['loss', 'val_loss']].plot() print(("Minimum Validation Loss: {:0.4f}").format(history_df['val_loss'].min())) ###Output Minimum Validation Loss: 3.9774
Handling_Test_Data.ipynb
###Markdown Handle Test Data set ###Code import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns test_df=pd.read_csv('test.csv') test_df.shape test_df.head() #check null values test_df.isnull().sum() ## Fill Missing Values test_df['LotFrontage']=test_df['LotFrontage'].fillna(test_df['LotFrontage'].mean()) test_df['MSZoning']=test_df['MSZoning'].fillna(test_df['MSZoning'].mode()[0]) test_df.shape test_df.drop(['Alley'],axis=1,inplace=True) test_df.shape test_df['BsmtCond']=test_df['BsmtCond'].fillna(test_df['BsmtCond'].mode()[0]) test_df['BsmtQual']=test_df['BsmtQual'].fillna(test_df['BsmtQual'].mode()[0]) test_df['FireplaceQu']=test_df['FireplaceQu'].fillna(test_df['FireplaceQu'].mode()[0]) test_df['GarageType']=test_df['GarageType'].fillna(test_df['GarageType'].mode()[0]) test_df.drop(['GarageYrBlt'],axis=1,inplace=True) test_df.shape test_df['GarageFinish']=test_df['GarageFinish'].fillna(test_df['GarageFinish'].mode()[0]) test_df['GarageQual']=test_df['GarageQual'].fillna(test_df['GarageQual'].mode()[0]) test_df['GarageCond']=test_df['GarageCond'].fillna(test_df['GarageCond'].mode()[0]) test_df.drop(['PoolQC','Fence','MiscFeature'],axis=1,inplace=True) test_df.shape test_df.drop(['Id'],axis=1,inplace=True) test_df['MasVnrType']=test_df['MasVnrType'].fillna(test_df['MasVnrType'].mode()[0]) test_df['MasVnrArea']=test_df['MasVnrArea'].fillna(test_df['MasVnrArea'].mode()[0]) sns.heatmap(test_df.isnull(),yticklabels=False,cbar=False,cmap='viridis') test_df['BsmtExposure']=test_df['BsmtExposure'].fillna(test_df['BsmtExposure'].mode()[0]) sns.heatmap(test_df.isnull(),yticklabels=False,cbar=False,cmap='viridis') test_df['BsmtFinType2']=test_df['BsmtFinType2'].fillna(test_df['BsmtFinType2'].mode()[0]) test_df.loc[:, test_df.isnull().any()].head() test_df['Utilities']=test_df['Utilities'].fillna(test_df['Utilities'].mode()[0]) test_df['Exterior1st']=test_df['Exterior1st'].fillna(test_df['Exterior1st'].mode()[0]) test_df['Exterior2nd']=test_df['Exterior2nd'].fillna(test_df['Exterior2nd'].mode()[0]) test_df['BsmtFinType1']=test_df['BsmtFinType1'].fillna(test_df['BsmtFinType1'].mode()[0]) test_df['BsmtFinSF1']=test_df['BsmtFinSF1'].fillna(test_df['BsmtFinSF1'].mean()) test_df['BsmtFinSF2']=test_df['BsmtFinSF2'].fillna(test_df['BsmtFinSF2'].mean()) test_df['BsmtUnfSF']=test_df['BsmtUnfSF'].fillna(test_df['BsmtUnfSF'].mean()) test_df['TotalBsmtSF']=test_df['TotalBsmtSF'].fillna(test_df['TotalBsmtSF'].mean()) test_df['BsmtFullBath']=test_df['BsmtFullBath'].fillna(test_df['BsmtFullBath'].mode()[0]) test_df['BsmtHalfBath']=test_df['BsmtHalfBath'].fillna(test_df['BsmtHalfBath'].mode()[0]) test_df['KitchenQual']=test_df['KitchenQual'].fillna(test_df['KitchenQual'].mode()[0]) test_df['Functional']=test_df['Functional'].fillna(test_df['Functional'].mode()[0]) test_df['GarageCars']=test_df['GarageCars'].fillna(test_df['GarageCars'].mean()) test_df['GarageArea']=test_df['GarageArea'].fillna(test_df['GarageArea'].mean()) test_df['SaleType']=test_df['SaleType'].fillna(test_df['SaleType'].mode()[0]) test_df.shape test_df.to_csv('Modified Test.csv',index=False) ###Output _____no_output_____
src/Classification-NB-Model/.ipynb_checkpoints/Final-checkpoint.ipynb
###Markdown Content-based recommender for kickstarter projects ###Code # run this statement only once to install Rake !pip install rake_nltk !pip install nltk import numpy as np import pandas as pd from rake_nltk import Rake from sklearn.metrics.pairwise import cosine_similarity from sklearn.feature_extraction.text import CountVectorizer import re, nltk, gensim nltk.download('wordnet') from nltk.tokenize import ToktokTokenizer from nltk.stem import wordnet from nltk.corpus import stopwords from string import punctuation ###Output C:\Users\Raymond\anaconda3\lib\site-packages\gensim\similarities\__init__.py:15: UserWarning: The gensim.similarities.levenshtein submodule is disabled, because the optional Levenshtein package <https://pypi.org/project/python-Levenshtein/> is unavailable. Install Levenhstein (e.g. `pip install python-Levenshtein`) to suppress this warning. warnings.warn(msg) [nltk_data] Downloading package wordnet to [nltk_data] C:\Users\Raymond\AppData\Roaming\nltk_data... [nltk_data] Package wordnet is already up-to-date! ###Markdown Step 1: Read in and analyse the data ###Code import pandas as pd import glob path = r'./data/' # use your path all_files = glob.glob(path + "/*.csv") li = [] for filename in all_files: df = pd.read_csv(filename, index_col=None, header=0) li.append(df) frame = pd.concat(li, axis=0, ignore_index=True) # df = pd.read_csv('Kickstarter057.csv') df = frame df.head() def extract_cat(text): text = text.split(",") text = text[2] text = text.replace ("/", " ") text = text.replace ("name", "") text = text.replace ("slug", "") text = text.replace ('"', "") text = text.replace ('{', "") text = text.replace (':', "") text = text.lower() text = re.sub(r"\'\n", " ", text) text = re.sub(r"\'\xa0", " ", text) text = re.sub('\s+', ' ', text) # matches all whitespace characters text = text.strip(' ') return text df['category'] = df['category'].apply(lambda x: extract_cat(x)) df.head() df = df[['name','category','blurb']] df.head() import nltk from nltk.tokenize import RegexpTokenizer from nltk.stem import WordNetLemmatizer,PorterStemmer from nltk.corpus import stopwords import re lemmatizer = WordNetLemmatizer() stemmer = PorterStemmer() def preprocess(sentence): sentence=str(sentence) sentence = sentence.lower() sentence=sentence.replace('{html}',"") cleanr = re.compile('<.*?>') cleantext = re.sub(cleanr, '', sentence) rem_url=re.sub(r'http\S+', '',cleantext) rem_num = re.sub('[0-9]+', '', rem_url) tokenizer = RegexpTokenizer(r'\w+') tokens = tokenizer.tokenize(rem_num) filtered_words = [w for w in tokens if len(w) > 2 if not w in stopwords.words('english')] stem_words=[stemmer.stem(w) for w in filtered_words] lemma_words=[lemmatizer.lemmatize(w) for w in stem_words] return " ".join(filtered_words) df['blurb']=df['blurb'].map(lambda s:preprocess(s)) df.head() #Tokenize everything in the category dfcat = df['category'] dfcat.head num_dfcat = len(dfcat) #print(num_dfcat) from nltk.tokenize import word_tokenize dfcattok = dfcat.apply(word_tokenize) dfcattok.head() #for loop each one and collect the first value. This shows the main categories that we have derived from our own dataset. maincat_list = [] for x in dfcattok: if x[0] not in maincat_list: maincat_list.append(x[0]) print(maincat_list) #Load dataset import pandas as pd import glob #Dataset from nltk.corpus import PlaintextCorpusReader art = PlaintextCorpusReader('data/Train/Art', '.+\.txt') tech = PlaintextCorpusReader('data/Train/Tech', '.+\.txt') comics = PlaintextCorpusReader('data/Train/Comics', '.+\.txt') film = PlaintextCorpusReader('data/Train/Film', '.+\.txt') music = PlaintextCorpusReader('data/Train/Music', '.+\.txt') photography = PlaintextCorpusReader('data/Train/Photography', '.+\.txt') publishing = PlaintextCorpusReader('data/Train/Publishing', '.+\.txt') art_docs1 = [art.words(fid) for fid in art.fileids()] tech_docs1 = [tech.words(fid) for fid in tech.fileids()] comics_docs1 = [comics.words(fid) for fid in comics.fileids()] film_docs1 = [film.words(fid) for fid in film.fileids()] music_docs1 = [music.words(fid) for fid in music.fileids()] photography_docs1 = [photography.words(fid) for fid in photography.fileids()] publishing_docs1 = [publishing.words(fid) for fid in publishing.fileids()] print(art_docs1[0][0:20]) print(tech_docs1[0][0:20]) print(comics_docs1[0][0:20]) print(film_docs1[0][0:20]) print(music_docs1[0][0:20]) print(photography_docs1[0][0:20]) print(publishing_docs1[0][0:20]) ###Basically preprocessing date from dataset # Combine the categories of the corpus all_docs1 = art_docs1 + tech_docs1 + comics_docs1 + film_docs1 + music_docs1 + photography_docs1 + publishing_docs1 num_art_docs = len(art_docs1) num_2 = len(art_docs1) + len(tech_docs1) num_3 = num_2 + len(comics_docs1) num_4 = num_3 + len(film_docs1) num_5 = num_4 + len(music_docs1) num_6 = num_5 + len(photography_docs1) #For verifying the whether the output in dictionary is correct print(num_art_docs) print (len(tech_docs1)) print (len(comics_docs1)) print (len(film_docs1)) print (len(music_docs1)) print (len(photography_docs1)) # Processsing for stopwords, alphabetic words, Stemming all_docs2 = [[w.lower() for w in doc] for doc in all_docs1] import re all_docs3 = [[w for w in doc if re.search('^[a-z]+$',w)] for doc in all_docs2] from nltk.corpus import stopwords stop_list = stopwords.words('english') all_docs4 = [[w for w in doc if w not in stop_list] for doc in all_docs3] from nltk.stem.porter import * stemmer = PorterStemmer() all_docs5 = [[stemmer.stem(w) for w in doc] for doc in all_docs4] #Create dictionary from gensim import corpora dictionary = corpora.Dictionary(all_docs5) print(dictionary) # Convert all documents to TF Vectors all_tf_vectors = [dictionary.doc2bow(doc) for doc in all_docs5] #Label the trained data. Since the folder name is the label, I use the same labels. all_data_as_dict = [{id:1 for (id, tf_value) in vec} for vec in all_tf_vectors] print(type(all_data_as_dict)) #print(all_data_as_dict). The labels are generated by our own dataset and used here. art_data = [(d, 'art') for d in all_data_as_dict[0:num_art_docs]] #First document to number of art documents, which is 4. Document 0-4 tech_data = [(d, 'tech') for d in all_data_as_dict[num_art_docs:num_2]] comics_data = [(d, 'comics') for d in all_data_as_dict[num_2:num_3]] film_data = [(d, 'film') for d in all_data_as_dict[num_3:num_4]] music_data = [(d, 'music') for d in all_data_as_dict[num_4:num_5]] photography_data = [(d, 'photography') for d in all_data_as_dict[num_5:num_6]] publishing_data = [(d, 'publishing') for d in all_data_as_dict[num_6:]] all_labeled_data = art_data + tech_data + comics_data + film_data + music_data + photography_data + publishing_data #Generate the trained classifier classifier = nltk.NaiveBayesClassifier.train(all_labeled_data) test_doc = all_data_as_dict[200] #print(all_data_as_dict[0]) print(classifier.classify(test_doc)) ### Validate # Read the files in validate folder and preparing the validation corpus art_validation = PlaintextCorpusReader('data/Validate/Art', '.+\.txt') tech_validation = PlaintextCorpusReader('data/Validate/Tech', '.+\.txt') comics_validation = PlaintextCorpusReader('data/Validate/Comics', '.+\.txt') film_validation = PlaintextCorpusReader('data/Validate/Film', '.+\.txt') music_validation = PlaintextCorpusReader('data/Validate/Music', '.+\.txt') photography_validation = PlaintextCorpusReader('data/Validate/Photography', '.+\.txt') publishing_validation = PlaintextCorpusReader('data/Validate/Publishing', '.+\.txt') # Tokenization art_valid_docs1 = [art_validation.words(fid) for fid in art_validation.fileids()] tech_valid_docs1 = [tech_validation.words(fid) for fid in tech_validation.fileids()] comics_valid_docs1 = [comics_validation.words(fid) for fid in comics_validation.fileids()] film_valid_docs1 = [film_validation.words(fid) for fid in film_validation.fileids()] music_valid_docs1 = [music_validation.words(fid) for fid in music_validation.fileids()] photography_valid_docs1 = [photography_validation.words(fid) for fid in photography_validation.fileids()] publishing_valid_docs1 = [publishing_validation.words(fid) for fid in publishing_validation.fileids()] # Combine the two sets of documents for easy processing. all_valid_docs = art_valid_docs1 + tech_valid_docs1 + comics_valid_docs1 + film_valid_docs1 + music_valid_docs1 + photography_valid_docs1 + publishing_valid_docs1 # This number will be used to separate the two sets of documents later. num_art_valid_docs = len(art_valid_docs1) num_valid_2 = num_art_valid_docs + len(tech_valid_docs1) num_valid_3 = num_valid_2 + len(comics_valid_docs1) num_valid_4 = num_valid_3 + len(film_valid_docs1) num_valid_5 = num_valid_4 + len(music_valid_docs1) num_valid_6 = num_valid_5 + len(photography_valid_docs1) # Text pre-processing, including stop word removal, stemming, etc. all_valid_docs2 = [[w.lower() for w in doc] for doc in all_valid_docs] all_valid_docs3 = [[w for w in doc if re.search('^[a-z]+$',w)] for doc in all_valid_docs2] all_valid_docs4 = [[w for w in doc if w not in stop_list] for doc in all_valid_docs3] all_valid_docs5 = [[stemmer.stem(w) for w in doc] for doc in all_valid_docs4] # Note that we're using the dictionary created earlier. all_valid_tf_vectors = [dictionary.doc2bow(doc) for doc in all_valid_docs5] # Convert documents into dict representation. all_valid_data_as_dict = [{id:1 for (id, tf_value) in vec} for vec in all_valid_tf_vectors] # Separate the two sets of documents and add labels. art_valid_data_with_labels = [(d, 'art') for d in all_valid_data_as_dict[0:num_art_valid_docs]] tech_valid_data_with_labels = [(d, 'tech') for d in all_valid_data_as_dict[num_art_valid_docs:num_valid_2]] comics_valid_data_with_labels = [(d, 'comics') for d in all_valid_data_as_dict[num_valid_2:num_valid_3]] film_valid_data_with_labels = [(d, 'film') for d in all_valid_data_as_dict[num_valid_3:num_valid_4]] music_valid_data_with_labels = [(d, 'music') for d in all_valid_data_as_dict[num_valid_4:num_valid_5]] photography_valid_data_with_labels = [(d, 'photography') for d in all_valid_data_as_dict[num_valid_5:num_valid_6]] publishing_valid_data_with_labels = [(d, 'publishing') for d in all_valid_data_as_dict[num_valid_6:]] # Combine the labeled documents. all_valid_data_with_labels = art_valid_data_with_labels + tech_valid_data_with_labels + comics_valid_data_with_labels + film_valid_data_with_labels + music_valid_data_with_labels + photography_valid_data_with_labels + publishing_valid_data_with_labels print(nltk.classify.accuracy(classifier, all_valid_data_with_labels)) ###Output 0.9928571428571429 ###Markdown Mode Testing - Predicting labels for other documents ###Code #Read the text files test_corpus = PlaintextCorpusReader('data/Test', '.+\.txt') fids = test_corpus.fileids() # Tokenization test_docs1 = [test_corpus.words(fid) for fid in fids] # Text pre-processing, including stop word removal, stemming, etc. test_docs2 = [[w.lower() for w in doc] for doc in test_docs1] test_docs3 = [[w for w in doc if re.search('^[a-z]+$',w)] for doc in test_docs2] test_docs4 = [[w for w in doc if w not in stop_list] for doc in test_docs3] test_docs5 = [[stemmer.stem(w) for w in doc] for doc in test_docs4] # Note that we're using the dictionary created earlier to create TF vectors test_tf_vectors = [dictionary.doc2bow(doc) for doc in test_docs5] # Convert documents into dict representation. This is document-label representation test_data_as_dict = [{id:1 for (id, tf_value) in vec} for vec in test_tf_vectors] #For each file, classify and print the label. for i in range(len(fids)): print(fids[i], '-->', classifier.classify(test_data_as_dict[i])) ###Output art_2_000001.txt --> art art_2_000002.txt --> art art_2_000003.txt --> art art_2_000004.txt --> art art_2_000005.txt --> art art_2_000006.txt --> art art_2_000007.txt --> art art_2_000008.txt --> art art_2_000009.txt --> art art_2_000010.txt --> art comics_10_000001.txt --> comics comics_10_000002.txt --> publishing comics_10_000003.txt --> comics comics_10_000004.txt --> comics comics_10_000005.txt --> comics comics_10_000006.txt --> comics comics_10_000007.txt --> comics comics_10_000008.txt --> comics comics_10_000009.txt --> comics comics_10_000010.txt --> comics film_4_000001.txt --> film film_4_000002.txt --> film film_4_000003.txt --> film film_4_000004.txt --> film film_4_000005.txt --> film film_4_000006.txt --> film film_4_000007.txt --> film film_4_000008.txt --> film film_4_000009.txt --> film film_4_000010.txt --> film music_4_000001.txt --> music music_4_000002.txt --> music music_4_000003.txt --> music music_4_000004.txt --> music music_4_000005.txt --> music music_4_000006.txt --> music music_4_000007.txt --> music music_4_000008.txt --> music music_4_000009.txt --> music music_4_000010.txt --> music photography_9_000001.txt --> photography photography_9_000002.txt --> photography photography_9_000003.txt --> photography photography_9_000004.txt --> photography photography_9_000005.txt --> photography photography_9_000006.txt --> photography photography_9_000007.txt --> photography photography_9_000008.txt --> photography photography_9_000009.txt --> photography photography_9_000010.txt --> photography publishing_5_000001.txt --> publishing publishing_5_000002.txt --> publishing publishing_5_000003.txt --> publishing publishing_5_000004.txt --> publishing publishing_5_000005.txt --> publishing publishing_5_000006.txt --> publishing publishing_5_000007.txt --> publishing publishing_5_000008.txt --> publishing publishing_5_000009.txt --> publishing publishing_5_000010.txt --> publishing tech_4_000001.txt --> tech tech_4_000002.txt --> tech tech_4_000003.txt --> tech tech_4_000004.txt --> tech tech_4_000005.txt --> tech tech_4_000006.txt --> tech tech_4_000007.txt --> tech tech_4_000008.txt --> tech tech_4_000009.txt --> tech tech_4_000010.txt --> tech
experiments/life_1D/diffusion/diffusion_1.ipynb
###Markdown **An initial concentration pulse (near the left edge of the system) moving towards equilibrium**The system starts out with a "concentration pulse" in bin 2 (the 3rd bin from the left) - i.e. that bin is initially the only one with a non-zero concentration of the only chemical species.Then the system is left undisturbed, and followed to equilibrium.**OUTPUT (incl. graphics):** overwritten into the .htm file with the same base name. Visualized with heatmaps and line curves.LAST REVISED: June 13, 2022 ###Code import set_path set_path.add_ancestor_dir_to_syspath(3) # The number of levels to go up # to reach the project's home from the folder containing this notebook from life_1D.bio_sim_1d import BioSim1D as bio from experiments.get_notebook_info import get_notebook_basename from modules.chemicals.chemicals import Chemicals as chem from modules.html_log.html_log import HtmlLog as log from modules.visualization.graphic_log import GraphicLog # Initialize the HTML logging log_file = get_notebook_basename() + ".htm" # Use the notebook base filename for the log file GraphicLog.config(filename=log_file, components=["vue_heatmap_11", "vue_curves_3"], home_rel_path="../../..") # relative path is from the location of THE LOG FILE to the project's home # Prepare the initial system chem_data = chem(diffusion_rates=[0.1]) bio.initialize_system(n_bins=10, chem_data=chem_data) log.write("1-D diffusion to equilibrium of a single species, with Diffusion rate 0.1", style=log.h2) bio.set_uniform_concentration(species_index=0, conc=0.) bio.inject_conc_to_bin(bin=2, species_index=0, delta_conc=10.) bio.describe_state(show_diffusion_rates=True) # Set the heatmap parameters heatmap_pars = {"range": [0, 2.5], "outer_width": 850, "outer_height": 150, "margins": {"top": 30, "right": 30, "bottom": 30, "left": 55} } # Set the parameters of the line plots lineplot_pars = {"range": [0, 10], "outer_width": 850, "outer_height": 250, "margins": {"top": 30, "right": 30, "bottom": 30, "left": 55} } log.write("Initial system state at time t=0:", blanks_before=2, style=log.bold) # Output a heatmap to the log file bio.single_species_heatmap(species_index=0, heatmap_pars=heatmap_pars, graphic_component="vue_heatmap_11") # Output a line plot the log file bio.single_species_line_plot(species_index=0, plot_pars=lineplot_pars, graphic_component="vue_curves_3") log.write("Advancing to time t=10, with time steps of 0.1 ... ", blanks_before=2, newline=False) total_time = 0. delta_time = 10. status = bio.diffuse(time_duration=delta_time, time_step=0.1) print("\n", status) total_time += delta_time log.write(f"After delta time {delta_time}. TOTAL TIME {total_time} ({status['steps']} steps taken):") bio.describe_state(concise=True) # Output a heatmap into the log file bio.single_species_heatmap(species_index=0, heatmap_pars=heatmap_pars, graphic_component="vue_heatmap_11") # Output a line plot the log file bio.single_species_line_plot(species_index=0, plot_pars=lineplot_pars, graphic_component="vue_curves_3") ###Output _____no_output_____ ###Markdown _Note: this is still an early stage in the diffusion process; let's advance it more... (Results at selected times plotted to log file)_--- ###Code for i in range(50): status = bio.diffuse(time_duration=delta_time, time_step=0.1) total_time += delta_time print(f"\nAfter Delta time {delta_time}. TOTAL TIME {total_time} ({status['steps']} steps taken):") bio.describe_state(concise=True) if i<2 or i==6 or i>=49: # Output a heatmap to the log file bio.single_species_heatmap(species_index=0, heatmap_pars=heatmap_pars, header=f"Time {total_time} :\n", graphic_component="vue_heatmap_11") # Output a line plot the log file bio.single_species_line_plot(species_index=0, plot_pars=lineplot_pars, graphic_component="vue_curves_3") ###Output After Delta time 10.0. TOTAL TIME 20.0 (100 steps taken): [[1.79154498 2.04604996 2.15752876 1.81408657 1.18572897 0.61493163 0.26031377 0.09234937 0.02835038 0.00911562]] Time 20.0 : After Delta time 10.0. TOTAL TIME 30.0 (100 steps taken): [[1.908894 1.93254508 1.86205856 1.60230147 1.1912129 0.75904212 0.41665574 0.19951697 0.08641213 0.04136102]] Time 30.0 : After Delta time 10.0. TOTAL TIME 40.0 (100 steps taken): [[1.89162641 1.84625985 1.72030668 1.48693078 1.1664388 0.82118645 0.51779486 0.29499154 0.15846579 0.09599884]] After Delta time 10.0. TOTAL TIME 50.0 (100 steps taken): [[1.83433746 1.76930884 1.63070397 1.41626323 1.14422761 0.85269498 0.58491221 0.37309998 0.2318666 0.16258513]] After Delta time 10.0. TOTAL TIME 60.0 (100 steps taken): [[1.76697624 1.69877596 1.56309588 1.36661912 1.12701103 0.87222369 0.63367319 0.43785006 0.30151635 0.23225847]] After Delta time 10.0. TOTAL TIME 70.0 (100 steps taken): [[1.69980096 1.63420894 1.50697719 1.32775575 1.11331789 0.8864918 0.67231694 0.49325804 0.36586375 0.30000873]] After Delta time 10.0. TOTAL TIME 80.0 (100 steps taken): [[1.63637931 1.57533401 1.45817295 1.29515888 1.10189171 0.89806096 0.7048592 0.54188555 0.42468407 0.36357336]] Time 80.0 : After Delta time 10.0. TOTAL TIME 90.0 (100 steps taken): [[1.57781285 1.52179206 1.41478512 1.26674457 1.09200065 0.90798758 0.73325992 0.58522943 0.47821243 0.42217538]] After Delta time 10.0. TOTAL TIME 100.0 (100 steps taken): [[1.5242553 1.47317189 1.37581121 1.2414767 1.08324636 0.91675072 0.75852442 0.6241924 0.52682923 0.47574177]] After Delta time 10.0. TOTAL TIME 110.0 (100 steps taken): [[1.47549769 1.42905472 1.34063054 1.21878187 1.07540452 0.92459475 0.78121841 0.65937036 0.57094556 0.52450158]] After Delta time 10.0. TOTAL TIME 120.0 (100 steps taken): [[1.43120344 1.38903875 1.30879999 1.19829836 1.06833665 0.93166317 0.80170171 0.69120023 0.61096132 0.56879638]] After Delta time 10.0. TOTAL TIME 130.0 (100 steps taken): [[1.39100434 1.35274953 1.27996855 1.17976686 1.06194682 0.93805314 0.82023316 0.72003151 0.64725048 0.60899562]] After Delta time 10.0. TOTAL TIME 140.0 (100 steps taken): [[1.35453928 1.31984299 1.25383973 1.16298212 1.05616129 0.94383869 0.83701788 0.74616029 0.68015701 0.6454607 ]] After Delta time 10.0. TOTAL TIME 150.0 (100 steps taken): [[1.32146906 1.29000514 1.23015414 1.14777111 1.0509191 0.9490809 0.8522289 0.76984587 0.70999486 0.67853094]] After Delta time 10.0. TOTAL TIME 160.0 (100 steps taken): [[1.29148094 1.26295037 1.20868068 1.13398258 1.04616753 0.95383247 0.86601742 0.79131932 0.73704963 0.70851906]] After Delta time 10.0. TOTAL TIME 170.0 (100 steps taken): [[1.26428912 1.23841937 1.18921159 1.1214819 1.04185994 0.95814006 0.8785181 0.81078841 0.76158063 0.73571088]] After Delta time 10.0. TOTAL TIME 180.0 (100 steps taken): [[1.2396335 1.21617681 1.17155929 1.1101481 1.0379545 0.9620455 0.8898519 0.82844071 0.78382319 0.7603665 ]] After Delta time 10.0. TOTAL TIME 190.0 (100 steps taken): [[1.21727779 1.19600926 1.15555401 1.09987193 1.03441355 0.96558645 0.90012807 0.84444599 0.80399074 0.78272221]] After Delta time 10.0. TOTAL TIME 200.0 (100 steps taken): [[1.19700758 1.17772317 1.14104198 1.09055457 1.03120299 0.96879701 0.90944543 0.85895802 0.82227683 0.80299242]] After Delta time 10.0. TOTAL TIME 210.0 (100 steps taken): [[1.17862837 1.16114301 1.12788385 1.08210651 1.02829198 0.97170802 0.91789349 0.87211615 0.83885699 0.82137163]] After Delta time 10.0. TOTAL TIME 220.0 (100 steps taken): [[1.16196378 1.14610965 1.11595329 1.0744466 1.02565255 0.97434745 0.9255534 0.88404671 0.85389035 0.83803622]] After Delta time 10.0. TOTAL TIME 230.0 (100 steps taken): [[1.14685385 1.13247878 1.10513576 1.06750132 1.02325937 0.97674063 0.93249868 0.89486424 0.86752122 0.85314615]] After Delta time 10.0. TOTAL TIME 240.0 (100 steps taken): [[1.13315355 1.12011956 1.09532742 1.06120397 1.02108945 0.97891055 0.93879603 0.90467258 0.87988044 0.86684645]] After Delta time 10.0. TOTAL TIME 250.0 (100 steps taken): [[1.12073138 1.10891335 1.08643413 1.05549413 1.01912197 0.98087803 0.94450587 0.91356587 0.89108665 0.87926862]] After Delta time 10.0. TOTAL TIME 260.0 (100 steps taken): [[1.1094681 1.0987526 1.07837051 1.05031696 1.01733804 0.98266196 0.94968304 0.92162949 0.9012474 0.8905319 ]] After Delta time 10.0. TOTAL TIME 270.0 (100 steps taken): [[1.09925559 1.08953976 1.07105916 1.04562279 1.01572054 0.98427946 0.95437721 0.92894084 0.91046024 0.90074441]] After Delta time 10.0. TOTAL TIME 280.0 (100 steps taken): [[1.08999583 1.08118641 1.0644299 1.04136654 1.01425394 0.98574606 0.95863346 0.9355701 0.91881359 0.91000417]] After Delta time 10.0. TOTAL TIME 290.0 (100 steps taken): [[1.08159993 1.07361236 1.0584191 1.03750737 1.01292416 0.98707584 0.96249263 0.9415809 0.92638764 0.91840007]] After Delta time 10.0. TOTAL TIME 300.0 (100 steps taken): [[1.07398731 1.06674491 1.05296906 1.03400823 1.01171844 0.98828156 0.96599177 0.94703094 0.93325509 0.92601269]] After Delta time 10.0. TOTAL TIME 310.0 (100 steps taken): [[1.06708488 1.06051814 1.04802747 1.03083553 1.0106252 0.9893748 0.96916447 0.95197253 0.93948186 0.93291512]] After Delta time 10.0. TOTAL TIME 320.0 (100 steps taken): [[1.06082639 1.05487228 1.04354689 1.02795882 1.00963395 0.99036605 0.97204118 0.95645311 0.94512772 0.93917361]] After Delta time 10.0. TOTAL TIME 330.0 (100 steps taken): [[1.05515177 1.04975313 1.03948431 1.02535049 1.00873518 0.99126482 0.97464951 0.96051569 0.95024687 0.94484823]] After Delta time 10.0. TOTAL TIME 340.0 (100 steps taken): [[1.05000655 1.04511156 1.03580073 1.02298549 1.00792026 0.99207974 0.97701451 0.96419927 0.95488844 0.94999345]] After Delta time 10.0. TOTAL TIME 350.0 (100 steps taken): [[1.04534133 1.04090301 1.03246081 1.02084112 1.00718136 0.99281864 0.97915888 0.96753919 0.95909699 0.95465867]] After Delta time 10.0. TOTAL TIME 360.0 (100 steps taken): [[1.04111134 1.03708708 1.02943247 1.01889681 1.0065114 0.9934886 0.98110319 0.97056753 0.96291292 0.95888866]] After Delta time 10.0. TOTAL TIME 370.0 (100 steps taken): [[1.03727598 1.03362715 1.02668666 1.01713389 1.00590394 0.99409606 0.98286611 0.97331334 0.96637285 0.96272402]] After Delta time 10.0. TOTAL TIME 380.0 (100 steps taken): [[1.03379843 1.03049 1.024197 1.01553543 1.00535315 0.99464685 0.98446457 0.975803 0.96951 0.96620157]] After Delta time 10.0. TOTAL TIME 390.0 (100 steps taken): [[1.0306453 1.02764553 1.02193961 1.0140861 1.00485374 0.99514626 0.9859139 0.97806039 0.97235447 0.9693547 ]] After Delta time 10.0. TOTAL TIME 400.0 (100 steps taken): [[1.02778634 1.02506642 1.01989282 1.01277198 1.00440092 0.99559908 0.98722802 0.98010718 0.97493358 0.97221366]] After Delta time 10.0. TOTAL TIME 410.0 (100 steps taken): [[1.02519409 1.02272792 1.01803698 1.01158045 1.00399035 0.99600965 0.98841955 0.98196302 0.97727208 0.97480591]] After Delta time 10.0. TOTAL TIME 420.0 (100 steps taken): [[1.02284368 1.02060758 1.01635427 1.01050009 1.00361808 0.99638192 0.98949991 0.98364573 0.97939242 0.97715632]] After Delta time 10.0. TOTAL TIME 430.0 (100 steps taken): [[1.02071255 1.01868506 1.01482855 1.00952051 1.00328055 0.99671945 0.99047949 0.98517145 0.98131494 0.97928745]] After Delta time 10.0. TOTAL TIME 440.0 (100 steps taken): [[1.01878023 1.01694189 1.01344516 1.00863233 1.0029745 0.9970255 0.99136767 0.98655484 0.98305811 0.98121977]] After Delta time 10.0. TOTAL TIME 450.0 (100 steps taken): [[1.01702819 1.01536135 1.01219083 1.007827 1.002697 0.997303 0.992173 0.98780917 0.98463865 0.98297181]] After Delta time 10.0. TOTAL TIME 460.0 (100 steps taken): [[1.01543959 1.01392826 1.01105353 1.0070968 1.00244539 0.99755461 0.9929032 0.98894647 0.98607174 0.98456041]] After Delta time 10.0. TOTAL TIME 470.0 (100 steps taken): [[1.0139992 1.01262886 1.01002232 1.00643473 1.00221726 0.99778274 0.99356527 0.98997768 0.98737114 0.9860008 ]] After Delta time 10.0. TOTAL TIME 480.0 (100 steps taken): [[1.01269318 1.01145069 1.00908732 1.00583442 1.0020104 0.9979896 0.99416558 0.99091268 0.98854931 0.98730682]] After Delta time 10.0. TOTAL TIME 490.0 (100 steps taken): [[1.01150901 1.01038243 1.00823954 1.00529011 1.00182285 0.99817715 0.99470989 0.99176046 0.98961757 0.98849099]] After Delta time 10.0. TOTAL TIME 500.0 (100 steps taken): [[1.01043531 1.00941383 1.00747086 1.00479659 1.00165279 0.99834721 0.99520341 0.99252914 0.99058617 0.98956469]] After Delta time 10.0. TOTAL TIME 510.0 (100 steps taken): [[1.00946178 1.00853559 1.00677389 1.0043491 1.0014986 0.9985014 0.9956509 0.99322611 0.99146441 0.99053822]] Time 510.0 :
Boda/parallel models/XGB.ipynb
###Markdown Prepare the data ###Code def prepare(df): y = df.label.tolist() X = np.matrix(df.drop(labels = ['label'], axis = 1)).astype(np.float) return X, y ###Output _____no_output_____ ###Markdown XGB ###Code from xgboost import XGBClassifier import xgboost as xgb trainPath = ['/scratch/by8jj/stratified samples/test of test/train/train_11.csv', '/scratch/by8jj/stratified samples/test of test/train/train_12.csv', '/scratch/by8jj/stratified samples/test of test/train/train_13.csv', '/scratch/by8jj/stratified samples/test of test/train/train_14.csv', '/scratch/by8jj/stratified samples/test of test/train/train_15.csv', '/scratch/by8jj/stratified samples/test of test/train/train_16.csv', '/scratch/by8jj/stratified samples/test of test/train/train_17.csv', '/scratch/by8jj/stratified samples/test of test/train/train_18.csv', '/scratch/by8jj/stratified samples/test of test/train/train_19.csv', '/scratch/by8jj/stratified samples/test of test/train/train_20.csv', '/scratch/by8jj/stratified samples/test of test/train/train_21.csv', ] testPath = ['/scratch/by8jj/stratified samples/test of test/test/test_12.csv', '/scratch/by8jj/stratified samples/test of test/test/test_13.csv', '/scratch/by8jj/stratified samples/test of test/test/test_14.csv', '/scratch/by8jj/stratified samples/test of test/test/test_15.csv', '/scratch/by8jj/stratified samples/test of test/test/test_16.csv', '/scratch/by8jj/stratified samples/test of test/test/test_17.csv', '/scratch/by8jj/stratified samples/test of test/test/test_18.csv', '/scratch/by8jj/stratified samples/test of test/test/test_19.csv', '/scratch/by8jj/stratified samples/test of test/test/test_20.csv', '/scratch/by8jj/stratified samples/test of test/test/test_21.csv', '/scratch/by8jj/stratified samples/test of test/test/test_22.csv', ] for train, test in zip(trainPath, testPath): #drop different port numbers df_train = pd.read_csv(train) temp1 = set(df_train.columns) #for test in testPath: df_test = pd.read_csv(test) temp2 = set(df_test.columns) df_train = df_train.drop(list(temp1 - temp2), axis = 1) df_test = df_test.drop(list(temp2 - temp1), axis = 1) X, y = prepare(df_train) model = XGBClassifier(silent=False, scale_pos_weight=1, learning_rate=0.01, colsample_bytree = 0.4, subsample = 0.8, objective='binary:logistic', n_estimators=150, reg_alpha = 0.3, max_depth=5, verbosity = 0, n_jobs = 8) X_A, X_B, Y_A, Y_B = train_test_split(X, y, test_size=0.33) eval_set = [(X_A, Y_A), (X_B, Y_B)] eval_metric = ["auc","error"] model.fit(X_A, Y_A, eval_metric=eval_metric, eval_set=eval_set) X_test, y_test = prepare(df_test) y_pred = model.predict_proba(X_test) temp = [1 if x[1]>0.8 else 0 for x in y_pred] cm= confusion_matrix(y_test, temp) tn, fp, fn, tp = cm.ravel() precision=tp/(tp+fp) recall=tp/(tp+fn) fpr = fp/(fp+ tn) accuracy = (tp + tn)/(tn + tp + fn + fp) F1 = 2 * (precision * recall) / (precision + recall) print("precision:", precision*100) print("recall:", recall*100) print("false positive rate:", fpr*100) print("accuracy", accuracy*100) print("F1-score", F1) pd.DataFrame(y_pred).to_csv('/scratch/by8jj/stratified samples/test of test/xgb/'+ train[-12:-4] + '-' + test[-11:-4] + '.csv', index = False) ###Output [23:20:06] /workspace/src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 36 extra nodes, 0 pruned nodes, max_depth=5 [0] validation_0-auc:0.925686 validation_0-error:0.096602 validation_1-auc:0.925962 validation_1-error:0.096693 [23:20:06] /workspace/src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 40 extra nodes, 0 pruned nodes, max_depth=5 [1] validation_0-auc:0.934292 validation_0-error:0.081063 validation_1-auc:0.934535 validation_1-error:0.081167 [23:20:07] /workspace/src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 38 extra nodes, 0 pruned nodes, max_depth=5 [2] validation_0-auc:0.938634 validation_0-error:0.095006 validation_1-auc:0.938865 validation_1-error:0.095074 [23:20:07] /workspace/src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 42 extra nodes, 0 pruned nodes, max_depth=5 [3] validation_0-auc:0.9478 validation_0-error:0.095098 validation_1-auc:0.948053 validation_1-error:0.095173 [23:20:08] /workspace/src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 44 extra nodes, 0 pruned nodes, max_depth=5 [4] validation_0-auc:0.967353 validation_0-error:0.076425 validation_1-auc:0.967574 validation_1-error:0.076576 [23:20:09] /workspace/src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 28 extra nodes, 0 pruned nodes, max_depth=5 [5] validation_0-auc:0.967487 validation_0-error:0.090736 validation_1-auc:0.967696 validation_1-error:0.09083 [23:20:09] /workspace/src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 52 extra nodes, 0 pruned nodes, max_depth=5 [6] validation_0-auc:0.976446 validation_0-error:0.075292 validation_1-auc:0.976632 validation_1-error:0.075367 [23:20:10] /workspace/src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 42 extra nodes, 0 pruned nodes, max_depth=5 [7] validation_0-auc:0.974672 validation_0-error:0.076687 validation_1-auc:0.974873 validation_1-error:0.076705 [23:20:10] /workspace/src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 44 extra nodes, 0 pruned nodes, max_depth=5 [8] validation_0-auc:0.974778 validation_0-error:0.053415 validation_1-auc:0.974985 validation_1-error:0.053468 [23:20:11] /workspace/src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 42 extra nodes, 0 pruned nodes, max_depth=5 [9] validation_0-auc:0.975511 validation_0-error:0.053382 validation_1-auc:0.975714 validation_1-error:0.053453 [23:20:11] /workspace/src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 42 extra nodes, 0 pruned nodes, max_depth=5 [10] validation_0-auc:0.975515 validation_0-error:0.053969 validation_1-auc:0.975714 validation_1-error:0.05403 [23:20:12] /workspace/src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 50 extra nodes, 0 pruned nodes, max_depth=5 [11] validation_0-auc:0.97675 validation_0-error:0.053432 validation_1-auc:0.976923 validation_1-error:0.05348 [23:20:12] /workspace/src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 50 extra nodes, 0 pruned nodes, max_depth=5 [12] validation_0-auc:0.976198 validation_0-error:0.056078 validation_1-auc:0.976396 validation_1-error:0.056146 [23:20:13] /workspace/src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 46 extra nodes, 0 pruned nodes, max_depth=5 [13] validation_0-auc:0.976306 validation_0-error:0.055506 validation_1-auc:0.976498 validation_1-error:0.055587 [23:20:13] /workspace/src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 40 extra nodes, 0 pruned nodes, max_depth=5 [14] validation_0-auc:0.976529 validation_0-error:0.052893 validation_1-auc:0.976718 validation_1-error:0.052985 [23:20:14] /workspace/src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 48 extra nodes, 0 pruned nodes, max_depth=5 [15] validation_0-auc:0.97658 validation_0-error:0.055454 validation_1-auc:0.976772 validation_1-error:0.055537 [23:20:14] /workspace/src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 42 extra nodes, 0 pruned nodes, max_depth=5 [16] validation_0-auc:0.976151 validation_0-error:0.056057 validation_1-auc:0.976335 validation_1-error:0.056148 [23:20:15] /workspace/src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 42 extra nodes, 0 pruned nodes, max_depth=5 [17] validation_0-auc:0.976693 validation_0-error:0.052623 validation_1-auc:0.976884 validation_1-error:0.052653 [23:20:16] /workspace/src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 46 extra nodes, 0 pruned nodes, max_depth=5 [18] validation_0-auc:0.976801 validation_0-error:0.053186 validation_1-auc:0.97699 validation_1-error:0.053224 [23:20:16] /workspace/src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 48 extra nodes, 0 pruned nodes, max_depth=5 [19] validation_0-auc:0.976795 validation_0-error:0.055974 validation_1-auc:0.97699 validation_1-error:0.056001 [23:20:17] /workspace/src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 44 extra nodes, 0 pruned nodes, max_depth=5 [20] validation_0-auc:0.976835 validation_0-error:0.056134 validation_1-auc:0.977024 validation_1-error:0.05619 [23:20:17] /workspace/src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 42 extra nodes, 0 pruned nodes, max_depth=5 [21] validation_0-auc:0.97688 validation_0-error:0.055708 validation_1-auc:0.977071 validation_1-error:0.055738 [23:20:18] /workspace/src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 46 extra nodes, 0 pruned nodes, max_depth=5 [22] validation_0-auc:0.97726 validation_0-error:0.052704 validation_1-auc:0.97744 validation_1-error:0.052734 [23:20:18] /workspace/src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 42 extra nodes, 0 pruned nodes, max_depth=5 [23] validation_0-auc:0.977269 validation_0-error:0.052177 validation_1-auc:0.977448 validation_1-error:0.052165 [23:20:19] /workspace/src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 54 extra nodes, 0 pruned nodes, max_depth=5 [24] validation_0-auc:0.977201 validation_0-error:0.052663 validation_1-auc:0.977384 validation_1-error:0.052686 [23:20:19] /workspace/src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 32 extra nodes, 0 pruned nodes, max_depth=5 [25] validation_0-auc:0.977362 validation_0-error:0.053072 validation_1-auc:0.977538 validation_1-error:0.053014 [23:20:20] /workspace/src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 48 extra nodes, 0 pruned nodes, max_depth=5 [26] validation_0-auc:0.977383 validation_0-error:0.052077 validation_1-auc:0.977558 validation_1-error:0.052058 [23:20:20] /workspace/src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 40 extra nodes, 0 pruned nodes, max_depth=5 [27] validation_0-auc:0.977364 validation_0-error:0.051732 validation_1-auc:0.977539 validation_1-error:0.05171 [23:20:21] /workspace/src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 46 extra nodes, 0 pruned nodes, max_depth=5 [28] validation_0-auc:0.977161 validation_0-error:0.052382 validation_1-auc:0.977333 validation_1-error:0.052364 [23:20:21] /workspace/src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 44 extra nodes, 0 pruned nodes, max_depth=5 [29] validation_0-auc:0.977799 validation_0-error:0.051805 validation_1-auc:0.977976 validation_1-error:0.051752 [23:20:22] /workspace/src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 48 extra nodes, 0 pruned nodes, max_depth=5 [30] validation_0-auc:0.977875 validation_0-error:0.052074 validation_1-auc:0.978052 validation_1-error:0.052122 [23:20:22] /workspace/src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 48 extra nodes, 0 pruned nodes, max_depth=5 [31] validation_0-auc:0.977872 validation_0-error:0.05197 validation_1-auc:0.978048 validation_1-error:0.051885 [23:20:23] /workspace/src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 40 extra nodes, 0 pruned nodes, max_depth=5 [32] validation_0-auc:0.977886 validation_0-error:0.051541 validation_1-auc:0.97806 validation_1-error:0.051556 [23:20:23] /workspace/src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 36 extra nodes, 0 pruned nodes, max_depth=5 [33] validation_0-auc:0.977537 validation_0-error:0.05149 validation_1-auc:0.97771 validation_1-error:0.051502 [23:20:24] /workspace/src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 50 extra nodes, 0 pruned nodes, max_depth=5 [34] validation_0-auc:0.977582 validation_0-error:0.051667 validation_1-auc:0.977754 validation_1-error:0.051657
research/best_models_tuning.ipynb
###Markdown Cross validation in the context of time seriesPour les séries de données temporelles, nous ne pouvons pas utiliser les outils classiques de validation croisée, simplement parce que l'ordre de l'ensemble de données a beaucoup d'importance. En fait, une procédure de validation croisée standard créera beaucoup de fuites de données car elle utilisera des données du futur pour prédire le passé. C'est pourquoi nous avons besoin d'un schéma spécifique comme celui ci-dessus.La classe TimeSeriesSplit de Scikit-learn fournit des indices train/test pour diviser les échantillons de données de séries temporelles. À chaque division, les indices de test doivent être plus élevés qu'avant, et le brassage dans le validateur croisé est donc inapproprié.Cet objet de validation croisée est une variation de KFold. Dans la kième division, il retourne les k premiers folds comme ensemble d'entraînement et le (k+1) ième fold comme ensemble de test.Notez que, contrairement aux méthodes de validation croisée standard, les ensembles d'entraînement successifs sont des surensembles de ceux qui les précèdent. Ainsi, pour une instance TimeSeriesSplit(n_splits=5), on obtient la validation croisée suivante :- TRAIN: [0] TEST: [1]- TRAIN: [0 1] TEST: [2]- TRAIN: [0 1 2] TEST: [3]- TRAIN: [0 1 2 3] TEST: [4]- TRAIN: [0 1 2 3 4] TEST: [5] Hyperparameters tuning : GS vs RS vs BOGridsearch n'est pas très souvent utilisée dans la pratique, car le nombre de modèles à entraîner croît de manière exponentielle lorsque le nombre d'hyperparamètres à entraîner augmente. Cela peut être très inefficace, tant en termes de puissance de calcul que de temps.Randomsearch diffère en ce sens que nous ne fournissons plus un ensemble explicite de valeurs pour chaque hyperparamètre mais une distribution statistique pour chaque hyperparamètre à partir de laquelle les valeurs sont échantillonnées. Essentiellement, nous définissons une distribution d'échantillonnage pour chaque hyperparamètre afin d'effectuer une recherche aléatoire (exponentielle, lognormal, normal, uniform, loguniform...).Dans les deux méthodes précédentes, nous avons réalisé des expériences individuelles en construisant plusieurs modèles avec différentes valeurs d'hyperparamètres. Toutes ces expériences étaient indépendantes les unes des autres. Comme chaque expérience a été réalisée indépendamment, nous ne sommes pas en mesure d'utiliser les informations d'une expérience pour améliorer l'expérience suivante.A contrario, l'optimisation bayésienne est un algorithme d'optimisation séquentielle basée sur un modèle qui utilise les résultats de l'itération précédente pour décider des prochaines valeurs candidates des hyperparamètres.Ainsi, au lieu de rechercher aveuglément l'espace des hyperparamètres, cette méthode préconise l'utilisation des expériences précédentes pour choisir le prochain ensemble d'hyperparamètres qui améliorera la performance du modèle. Nous répétons ce processus de manière itérative jusqu'à ce que nous convergions vers un optimum.Enfin, l'optimisation de Parzen arborescente (TPE) est similaire à celle de l'optimisation bayésienne. Au lieu de trouver les valeurs de p(y|x) où y est la fonction à minimiser et x est la valeur de l'hyperparamètre, le TPE modélise P(x|y) et P(y). L'un des grands inconvénients des estimateurs de Parzen structurés en arbre est qu'ils ne modélisent pas les interactions entre les hyperparamètres. Cela dit, le TPE fonctionne extrêmement bien en pratique et a été testé dans la plupart des domaines. Ici, pour des raisons de puissance de calcul mais aussi de performance, nous utilisons l'optimisation bayésienne. ###Code from sklearn.model_selection import TimeSeriesSplit, cross_val_score from sklearn.model_selection import learning_curve from xgboost import XGBRegressor import pandas as pd import numpy as np import pickle train = pd.read_csv('../data/train.csv', index_col='date') test = pd.read_csv('../data/test.csv', index_col='date') y_train = train.reel X_train = train.drop(['reel'], axis=1) y_test = test.reel X_test = test.drop(['reel'], axis=1) X_train.columns # perform time series split cv_ts = TimeSeriesSplit(n_splits=2) ###Output _____no_output_____ ###Markdown L'optimisation consiste à trouver la valeur d'entrée ou l'ensemble de valeurs d'une fonction objectif qui donne la valeur de sortie la plus faible, appelée "perte". En général, dans l'apprentissage automatique, notre fonction objective est multidimensionnelle car elle prend en compte un ensemble d'hyperparamètres du modèle.Pour les modèles dont l'apprentissage prend plusieurs jours, nous voulons un moyen de limiter les appels à la fonction d'évaluation. La recherche aléatoire est en fait plus efficace que la recherche sur grille pour les problèmes à haute dimension, mais il s'agit toujours d'une méthode uniforme où la recherche n'utilise pas les résultats précédents pour choisir les prochaines valeurs d'entrée à essayer. L'optimisation bayésienne, également appelée optimisation séquentielle basée sur un modèle (SMBO), met en œuvre cette idée d'apprentissage par l'expérience en construisant un modèle de probabilité de la fonction objectif qui associe les valeurs d'entrée à une probabilité de perte : p (perte | valeurs d'entrée). Le modèle de probabilité, également appelé surrogate, est plus facile à optimiser que la fonction objectif réelle. Le concept consiste à limiter les évasions de la fonction objective en passant plus de temps à choisir les prochaines valeurs à essayer.Les choix courants de surrogate incluent les processus gaussiens, la régression Random Forest, et, le choix dans Hyperopt, l'estimateur Tree Parzen (TPE).La formulation d'un problème d'optimisation dans Hyperopt nécessite quatre parties :- Fonction objective : prend une entrée et renvoie une perte à minimiser.- Espace du domaine : la gamme de valeurs d'entrée à évaluer.- Algorithme d'optimisation : la méthode utilisée pour construire la fonction de substitution et choisir les prochaines valeurs à évaluer.- Résultats : score, paires de valeurs que l'algorithme utilise pour construire le modèle. ###Code # Optimization objective def cv_score(max_depth, n_estimators, eta, gamma): params = {'eval_metric': 'rmse', 'max_depth': int(max_depth), 'n_estimators': int(n_estimators), 'eta': eta, 'gamma': gamma} score = cross_val_score( XGBRegressor(**params, scoring='neg_mean_squared_error', cv=cv_ts, n_jobs=-1), X_train, y_train, n_jobs=-1).mean() return score = np.array(score) optimizer = BayesianOptimization(cv_score, {'max_depth': (3, 7), 'eta': (0, 0.3), 'n_estimators': (50, 500), 'gamma': (0, 1)}) # Use expected improvement acquisition function to handle negative numbers # needs quite a few more initiation points and number of iterations optimizer.maximize(init_points=3, n_iter=10, acq='ei') ###Output | iter | target | eta | gamma | max_depth | n_esti... | ------------------------------------------------------------------------- |  1  |  0.956  |  0.2614  |  0.06776  |  4.863  |  368.8  | |  2  |  0.9637  |  0.06828  |  0.5474  |  4.333  |  195.4  | |  3  |  0.9633  |  0.1491  |  0.8501  |  3.045  |  226.4  | |  4  |  0.9634  |  0.2251  |  0.2347  |  3.065  |  78.04  | |  5  |  0.964  |  0.08854  |  0.8799  |  3.064  |  136.3  | |  6  |  0.9567  |  0.2495  |  0.7831  |  6.992  |  153.4  | |  7  |  0.9499  |  0.2693  |  0.1507  |  5.551  |  468.3  | |  8  |  0.9609  |  0.2052  |  0.4348  |  4.065  |  225.3  | |  9  |  0.961  |  0.2613  |  0.486  |  3.084  |  229.8  | |  10  |  0.9633  |  0.1178  |  0.9844  |  4.545  |  133.8  | |  11  |  0.9621  |  0.06833  |  0.4562  |  6.117  |  137.7  | |  12  |  0.9588  |  0.1339  |  0.03721  |  6.207  |  192.0  | |  13  |  0.9625  |  0.1759  |  0.6659  |  3.274  |  198.1  | ========================================================================= ###Markdown Comparons maintenant l'optimisation d'XGBoost par random search et par bayesian search.La meilleur itération est la suivante : | 5 | 0.964 | 0.08854 | 0.8799 | 3.064 | 136.3 | ###Code xgb = XGBRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=1, colsample_bynode=1, colsample_bytree=1.0, gamma=0.8799, gpu_id=-1, importance_type='gain', interaction_constraints='', learning_rate=0.08854, max_delta_step=0, max_depth=3, min_child_weight=4, monotone_constraints='()', n_estimators=136, n_jobs=8, num_parallel_tree=1, random_state=0, reg_alpha=0, reg_lambda=1, scale_pos_weight=1, subsample=1, tree_method='exact', validate_parameters=1, verbosity=None) xgb.fit(X_train, y_train) from sklearn.metrics import mean_squared_error, mean_absolute_error, median_absolute_error def print_metrics(y_true, y_predicted): print('Root Mean Square Error = ' + str(np.sqrt(mean_squared_error(y_true, y_predicted)))) print('Mean Absolute Error = ' + str(mean_absolute_error(y_true, y_predicted))) print('Median Absolute Error = ' + str(median_absolute_error(y_true, y_predicted))) y_pred = xgb.predict(X_test) print_metrics(y_test**2, y_pred**2) y_pred = models['best_xgbr'].predict(X_test) print(models['best_xgbr'], '\n') print_metrics(y_test**2, y_pred**2) y_pred = models['best_lgbr'].predict(X_test) print(models['best_lgbr'], '\n') print_metrics(y_test**2, y_pred**2) ###Output LGBMRegressor(max_depth=5, n_estimators=150, num_leaves=8) Root Mean Square Error = 22.094410468949704 Mean Absolute Error = 15.99914128977054 Median Absolute Error = 12.415289455391928 ###Markdown Les résultats sont globalement identiques par rapport à la randomized search, la technique est cependant plus complexe à prendre en main et davantage de tuning est requis. Pour notre cas ici présent, nous continuerons à privilégier randomized search. Optimisation des autres modèles selectionnéesNous avons déjà optimisé XGBoost et LightGBM avec de bons résultats.Travaillons sur les modèles restants selectionnés par notre package: - ElasticNet et LassoLars- Multilayer perceptron- Decision Tree- Random Forest ###Code # random forest regressor opti from sklearn.ensemble import RandomForestRegressor from sklearn.model_selection import RandomizedSearchCV import time params = {'n_estimators': [int(x) for x in np.linspace(start = 100, stop = 500, num = 5)], 'max_features': ['auto', 'sqrt'], 'max_depth': [int(x) for x in np.linspace(5, 20, num = 4)], 'min_samples_split': [2, 5, 10, 20, 50], 'min_samples_leaf': [2, 5, 10]} reg = RandomForestRegressor(random_state=42) n_iter_search = 50 random_search = RandomizedSearchCV(reg, param_distributions=params, verbose = 2, n_iter=n_iter_search, cv=cv_ts, scoring='neg_mean_squared_error', random_state=42) start = time.time() random_search.fit(X_train, y_train) print("RandomizedSearchCV took %.2f seconds for %d candidates" " parameter settings." % ((time.time() - start), n_iter_search)) rfr = RandomForestRegressor(max_depth=5, min_samples_leaf=10, min_samples_split=50, n_estimators=300, random_state=42) rfr.fit(X_train, y_train) y_pred = rfr.predict(X_test) print(models['best_rfr'], '\n') print_metrics(y_test**2, y_pred**2) pickle.dump(rfr, open('models/best_rfr.sav', 'wb')) # decision tree regressor opti from sklearn.tree import DecisionTreeRegressor from sklearn.model_selection import GridSearchCV parameters = {"splitter": ["best", "random"], "max_depth": [3, 5, 7, 10], "min_samples_leaf": [5, 10, 20, 40], "min_weight_fraction_leaf": [0.1, 0.3, 0.5, 0.7, 0.9], "max_leaf_nodes": [None, 10, 20, 50, 70]} dtr = DecisionTreeRegressor(random_state=42) grid_cv = GridSearchCV(dtr, parameters, cv=cv_ts, n_jobs=-1, verbose=2) grid_cv.fit(X_train, y_train) from sklearn.tree import DecisionTreeRegressor dtr = DecisionTreeRegressor(max_depth=5, max_leaf_nodes=10, min_samples_leaf=10, min_weight_fraction_leaf=0.1, random_state=42) dtr.fit(X_train, y_train) y_pred = dtr.predict(X_test) print(models['best_dtr'], '\n') print_metrics(y_test**2, y_pred**2) # results clearly lack behind, it will not be used for ensembling, # only for interpretation purposes pickle.dump(dtr, open('models/best_dtr.sav', 'wb')) # MLP regressor opti from sklearn.neural_network import MLPRegressor from sklearn.model_selection import RandomizedSearchCV import time params = {'hidden_layer_sizes': [(16,), (64,), (100,), (32, 64)], 'activation': ['relu'], 'alpha': [1e-5, 1e-4, 1e-3, 1e-2, 0.1, 1], 'learning_rate': ['invscaling', 'adaptive'], 'tol': [1e-4, 1e-3, 1e-2], 'warm_start': [True, False], 'batch_size': ['auto', 50], 'max_iter': [1000], 'early_stopping': [True, False], 'epsilon': [1e-8, 1e-5] } mlpr = MLPRegressor(random_state=42) n_iter_search = 100 random_search = RandomizedSearchCV(mlpr, param_distributions=params, verbose=2, n_iter=n_iter_search, cv=cv_ts, scoring='neg_mean_squared_error', random_state=42) start = time.time() random_search.fit(X_train, y_train) print("RandomizedSearchCV took %.2f seconds for %d candidates" " parameter settings." % ((time.time() - start), n_iter_search)) from sklearn.neural_network import MLPRegressor mlpr = MLPRegressor(alpha=1, batch_size=50, epsilon=1e-05, hidden_layer_sizes=(64,), learning_rate='adaptive', max_iter=1000, random_state=42, warm_start=True) mlpr.fit(X_train, y_train) y_pred = mlpr.predict(X_test) print(models['best_mlpr'], '\n') print_metrics(y_test**2, y_pred**2) pickle.dump(mlpr, open('models/best_mlpr.sav', 'wb')) # elasticnet is a combination of both Ridge and lasso from sklearn.linear_model import ElasticNet from sklearn.model_selection import GridSearchCV elastic = ElasticNet() parameters = {'alpha': [0.001, 0.01, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9], 'l1_ratio': [1e-5, 1e-4, 1e-3, 0.01, 0.1, 0.5, 0.6, 0.7, 0.8, 0.9, 1]} grid = GridSearchCV(elastic, param_grid=parameters, cv=cv_ts, scoring="neg_mean_squared_error", n_jobs=-1, verbose=2) grid.fit(X_train, y_train) from sklearn.linear_model import ElasticNet elastic = ElasticNet(alpha=0.1, l1_ratio=0.8) elastic.fit(X_train, y_train) y_pred = elastic.predict(X_test) print(models['best_elastic'], '\n') print_metrics(y_test**2, y_pred**2) pickle.dump(elastic, open('models/best_elastic.sav', 'wb')) from sklearn.linear_model import LassoLars, Lasso lassol = LassoLars() parameters = {'alpha': np.logspace(-5, -0.1, 100)} grid = GridSearchCV(lassol, param_grid=parameters, cv=cv_ts, scoring="neg_mean_squared_error", n_jobs=-1, verbose=2) grid.fit(X_train, y_train) lassol = LassoLars(alpha=0.00076) lassol.fit(X_train, y_train) y_pred = lassol.predict(X_test) print(models['best_lassol'], '\n') print_metrics(y_test**2, y_pred**2) pickle.dump(lassol, open('models/best_lassol.sav', 'wb')) from sklearn.linear_model import LassoLars, Lasso lass = Lasso(alpha=0.01, random_state=62) lass.fit(X_train, y_train) pickle.dump(lass, open('models/best_lasso.sav', 'wb')) y_pred = lass.predict(X_test) print(models['best_lasso'], '\n') print_metrics(y_test**2, y_pred**2) ###Output Lasso(alpha=0.01, random_state=62) Root Mean Square Error = 25.703738783164106 Mean Absolute Error = 18.61978391669346 Median Absolute Error = 15.55926229391335 ###Markdown Linear models perform worth than tree based or NN, that said, they will brind diversity and uncorrelated predictions for ensembling. Ensembling des différents modèlesIl existe différentes techniques d'ensembling : ###Code import glob, re, pickle, os models = {} for model in glob.glob("models/*.sav"): loaded_model = pickle.load(open(model, 'rb')) name = os.path.basename(model) name = os.path.splitext(name)[0] models[name] = loaded_model models ###Output _____no_output_____ ###Markdown 1.Test de la corrélation des prédictions des différents modèles ###Code import matplotlib.pyplot as plt import seaborn as sns %matplotlib inline sns.set(style='white', context='notebook', palette='deep') pred_best_lgbr = pd.Series(models['best_lgbr'].predict(X_test), name="lgbr") pred_best_elastic = pd.Series( models['best_elastic'].predict(X_test), name="elasticNet") pred_best_lassol = pd.Series( models['best_lassol'].predict(X_test), name="lassoLars") pred_best_rfr = pd.Series(models['best_rfr'].predict(X_test), name="rfr") pred_best_mlpr = pd.Series(models['best_mlpr'].predict(X_test), name="mlpr") pred_best_dtr = pd.Series(models['best_dtr'].predict(X_test), name="dtr") pred_best_xgbr = pd.Series(models['best_xgbr'].predict(X_test), name="xgbr") # Concatenate all regressor results ensemble_res = pd.concat([ pred_best_elastic, pred_best_lassol, pred_best_mlpr, pred_best_dtr, pred_best_rfr, pred_best_lgbr, pred_best_xgbr], axis=1) plt.figure(figsize=(10,8)) plt.title('Comparaison des prédictions') g = sns.heatmap(ensemble_res.corr(), annot=True) ###Output _____no_output_____ ###Markdown Les prédictions sont très corrélées entre les différents modèles, à priori, l'ensembling n'apportera pas de plus value ici. La raison de la corrélation moins forte de l'arbre de décision et simplement sa propension à faire davantage d'erreurs. 2. Ensembling techniques (averaging, voting et stacking) ###Code # Simple averaging ensemble_res['predictions'] = np.mean(ensemble_res[['elasticNet', 'lassoLars', 'mlpr', 'dtr', 'rfr', 'lgbr', 'xgbr']], axis=1) print_metrics(y_test**2, ensemble_res['predictions']**2) ###Output Root Mean Square Error = 23.090010282760787 Mean Absolute Error = 16.84447024156129 Median Absolute Error = 13.314907886983512 ###Markdown Pas d'améliorations car pas de différence dans les prédictions, essayons le voting. ###Code from sklearn.ensemble import VotingRegressor ensemble = VotingRegressor([('lgbr', models['best_lgbr']), ('elasticNet', models['best_elastic']), ('xgbr', models['best_xgbr']), ('mlpr', models['best_mlpr']), ('rfr', models['best_rfr'])], n_jobs=-1) ensemble.fit(X_train, y_train) y_pred = ensemble.predict(X_test) print('VotingRegressor:', '\n') print_metrics(y_test**2, y_pred**2) ###Output VotingRegressor: Root Mean Square Error = 22.61388504828029 Mean Absolute Error = 16.511757286035905 Median Absolute Error = 13.115560272803897 ###Markdown Le vote est mieux que le simple averaging mais cela ne parvient pas à battre lightGBM ou XGboost. Essayons le stacking. ###Code from sklearn.ensemble import StackingRegressor ensemble_s = StackingRegressor([('lgbr', models['best_lgbr']), ('elasticNet', models['best_elastic']), ('xgbr', models['best_xgbr']), ('mlpr', models['best_mlpr']), ('rfr', models['best_rfr'])], final_estimator=Lasso(), n_jobs=-1) ensemble_s.fit(X_train, y_train) y_pred = ensemble_s.predict(X_test) print_metrics(y_test**2, y_pred**2) ###Output Root Mean Square Error = 26.333890640845347 Mean Absolute Error = 18.684786666859047 Median Absolute Error = 13.05969559820823
Live_station_status.ipynb
###Markdown Scrap live station status data in https://bikeshare.metro.net/stations/json/ ###Code def get_live_station(url): response = requests.get(url, headers={'User-Agent': 'Mozilla/5.0'}) stations = response.json() output = [] for station in stations['features']: dict_keys = ['kioskId', 'bikesAvailable', 'docksAvailable', 'name', 'latitude', 'longitude'] data = {k : station['properties'][k] for k in dict_keys} data['time'] = pd.to_datetime('today') output.append(data) return pd.DataFrame(output) live_station_df = get_live_station("https://bikeshare.metro.net/stations/json/") live_station_df.head() ###Output _____no_output_____ ###Markdown Remove nano-seconds in time feature ###Code live_station_df['time'] = live_station_df['time'].astype('datetime64[s]') # change name of time feature to ds and set it to index # replace minutes and seconds to 0 live_station_df.rename(columns={'time':'ds'}, inplace=True) live_station_df.set_index('ds', inplace=True) live_station_df.index = live_station_df.index.map(lambda x: x.replace(second=0)) live_station_df.index = live_station_df.index.map(lambda x: x.replace(minute=0)) live_station_df.kioskId = live_station_df.kioskId.astype(str) live_station_df.head() ###Output _____no_output_____ ###Markdown Create new column that concatenated with ds and kioskid to merge it with station_pred_df1 ###Code live_station_df.reset_index(inplace=True) live_station_df['id'] = live_station_df.ds.astype(str) + '-' + live_station_df.kioskId live_station_df.head() ###Output _____no_output_____ ###Markdown Import forecasted station_pred_df1 that stored as a pickle ###Code with open('station_pred_df1.pickle', 'rb') as f: station_pred_df1 = pickle.load(f) station_pred_df1.head() station_pred_df1.tail() # Take first 4 character in station feature station_pred_df1.station = station_pred_df1.station.str[:4] ###Output _____no_output_____ ###Markdown Create unique id feature by concatenating ds and station_id ###Code station_pred_df1['id'] = station_pred_df1.ds.astype(str) + '-' + station_pred_df1.station station_pred_df1.tail() station_pred_df1.set_index('ds', inplace=True) merge_live_pred_df = station_pred_df1.merge(live_station_df, on='id') merge_live_pred_df.head() merge_live_pred_df.head() def normalize_demand_number(record): if record['y_hat'] >= 4.4: return round(record['y_hat']) * 3 elif record['y_hat'] >= 4: return round(record['y_hat']) * 2 elif record['y_hat'] >= 3: return round(record['y_hat']) * 2 elif record['y_hat'] >= 1: return round(record['y_hat']) * 3 elif record['y_hat'] >= 0.7: return round(record['y_hat']) * 2 elif record['y_hat'] >= 0.4: return 1 else: return 0 merge_live_pred_df['demand'] = merge_live_pred_df.apply(normalize_demand_number, axis=1) merge_live_pred_df with open('end_station_pred_df.pickle', 'rb') as f: end_station_pred_df = pickle.load(f) end_station_pred_df.rename(columns={'y_hat': 'ys'}, inplace=True) end_station_pred_df.head() ###Output _____no_output_____ ###Markdown Execute the same process for end_station_pred_df ###Code end_station_pred_df.station = end_station_pred_df.station.str[:4] end_station_pred_df['id'] = end_station_pred_df.ds.astype(str) + '-' + end_station_pred_df.station end_station_pred_df.set_index('ds', inplace=True) merge_live_pred_df = merge_live_pred_df.merge(end_station_pred_df[['id', 'ys']], on='id') def normalize_ys_number(record): if record['ys'] >= 3: return round(record['ys']) * 2 elif record['ys'] >= 1: return round(record['ys']) * 3 elif record['ys'] >= .8: return round(record['ys']) * 2 elif record['ys'] >= .5: return round(record['ys']) * 1 else: return 0 merge_live_pred_df['surplus'] = merge_live_pred_df.apply(normalize_ys_number, axis=1) merge_live_pred_df def nexthour(record): if record['bikesAvailable'] - record['demand'] + record['surplus'] >= 0: return record['bikesAvailable'] - record['demand'] + record['surplus'] else: return 0 merge_live_pred_df['nextHour'] = merge_live_pred_df.apply(nexthour, axis=1) merge_live_pred_df # df = merge_live_pred_df.copy() # df.to_csv('df_9_30_5pm.csv') !ls ###Output Data end_station_pred_df.pickle EDA_FoliumMap.ipynb functions.py LICENSE heatmaptime_animation.html Live_station_status.ipynb heatmaptimes.html README.md history TimeSeries_BikeStation.ipynb markers.html capstone_EDA.ipynb station_name.csv capstone_TimeSeries-Copy1.ipynb station_name.json capstone_TimeSeries.ipynb station_pred_df1.pickle capstone_TimeSeries_hourly.ipynb ###Markdown Create Folium Map that will display available bikes, open docks, and forecasted available bikes in next hour for each bike stations. Also add minimap using folium plugins ###Code def get_base_map(df): minimap = MiniMap(toggle_display=True, width=300, height=150, zoom_animation=True, zoom_level_offset=-6, position='topleft') return folium.Map(location=[df.latitude.mean(), df.longitude.mean()], zoom_start=20, tiles='OpenStreetMap').add_child(minimap) base = get_base_map(merge_live_pred_df) base def add_station_markers(initial_map, df): #station location visualization out_map = initial_map for lat, lon, Name, Available, Opendocks, Next_hr_remaining, in zip(df['latitude'], df['longitude'], df['name'], df['bikesAvailable'], df['docksAvailable'], df['nextHour']): folium.Marker([lat,lon], popup=(str(Name).capitalize() + '<br>' '<br><b>Available: </b>' + str(Available) + '<br>' '<br><b>Opendocks: </b>' + str(Opendocks) + '<br>' '<br><b>Remaining Next Hour:</b> ' + str(Next_hr_remaining)), icon=folium.Icon(color='green')).add_to(out_map) return out_map markers = add_station_markers(base, merge_live_pred_df) markers !ls ###Output Data capstone_TimeSeries.ipynb LICENSE capstone_TimeSeries_hourly.ipynb Live_station_status.ipynb functions.py README.md markers.html TimeSeries_BikeStation.ipynb station_pred_df.pickle capstone_EDA.ipynb station_pred_df1.pickle capstone_TimeSeries-Copy1.ipynb ###Markdown Create dictionary key as station name and values as lat & long. The purpose is for flask app dropdown in index page. ###Code live_station_df.groupby('name')['latitude','longitude'].apply(lambda x: x.values.tolist()).to_dict() d = {} for i in df['name'].unique(): d[i] = [{live_station_df['name'][j]: (live_station_df['latitude'][j], live_station_df['longitude'][j])} for j in live_station_df[live_station_df['name']==i].index] d ###Output _____no_output_____ ###Markdown Take station, name, latitude, longitude features and save it as csv or json format ###Code station_name_df = merge_live_pred_df[['station', 'name', 'latitude', 'longitude']] station_name_df.shape station_name_df.head() station_name_df.to_csv("station_name.csv") import csv with open('station_name.csv') as csvfile: reader = csv.DictReader(csvfile) stations = {} for row in reader: kiosk_id= row['station'] lat = row['latitude'] lng = row['longitude'] stations[kiosk_id] = (lat, lng) import json with open('station_name.json', 'w') as outfile: json.dump(stations, outfile) !ls ###Output Data df.csv LICENSE df_9_26_6pm.csv Live_station_status.ipynb df_9_30_5pm.csv README.md functions.py TimeSeries_BikeStation.ipynb markers.html capstone_EDA.ipynb station_name.csv capstone_TimeSeries-Copy1.ipynb station_name.json capstone_TimeSeries.ipynb station_pred_df.pickle capstone_TimeSeries_hourly.ipynb station_pred_df1.pickle
notebooks/BERTopic - Andrew 2-1-2022.ipynb
###Markdown NotesHow should we think about this problem? Should topics be extracted one-by-one from each post? (This actually doesn't work using BERT as it wants to find multiple topics when learning) Or, the entire subreddit? (Much more manageable using BERT, but with some caveats). Do we want to include comments in topic modeling? This is also feasible, but comments in different threads can be labeled in the same topic, which feels strange to me. In any case, what's presented here is a view of the entire subreddit. ###Code topic, probs, topic_model, freq, topic_df = train_bert(corpus_df, 'text') topic_df pd.set_option('display.max_rows', None) freq topic_nr = freq.iloc[3]["Topic"] topic_model.get_topic(topic_nr) topic_model.visualize_topics() topic_df['pos_sentiment'].hist() topic_df[topic_df['post_id'] == 'n2n0ax'] final_df = topic_df[['topic_label', 'topic_name', 'neg_sentiment', 'neu_sentiment', 'pos_sentiment', 'compound_sentiment']] pd.set_option('display.max_rows', None) final_df.groupby(by=['topic_label', 'topic_name']).mean().reset_index() ###Output _____no_output_____
data_cleaner.ipynb
###Markdown Turn absolute path into relative path ###Code for manifest_file in os.listdir(manifest_path): if 'csv' not in manifest_file: continue df = pd.read_csv('{}/{}'.format(manifest_path, manifest_file), header=None) df[0] = df[0].apply(lambda x: './' + x[x.index('data/'):]) df[1] = df[1].apply(lambda x: './' + x[x.index('data/'):]) df.to_csv('{}/{}'.format(manifest_path, manifest_file), index=False, header=False) df.head() ###Output _____no_output_____ ###Markdown Reduce manifest size ###Code is_reduce = False if is_reduce: for manifest_file in os.listdir(manifest_path): if 'csv' not in manifest_file: continue df = pd.read_csv('{}/{}'.format(manifest_path, manifest_file), header=None).head(100) df.to_csv(manifest_file, index=False, header=False) for col in df.columns: split_path = df.loc[:,col].str.split('/') split_file = list(split_path.apply(lambda x: x[-1])) split_dir = split_path.apply(lambda x: '/'.join(x[:-1])) for file in os.listdir(split_dir[0]): if file not in split_file: os.remove('{}/{}'.format(split_dir[0], file)) ###Output _____no_output_____ ###Markdown Here the data cleaning takes place. Because of hardware restrictions only Tunics from samll dataset are used for this task. here the target is to create a smaller dataset which contains information only related to Tunics ###Code import pandas as pd import numpy as np import cv2 data = pd.read_csv("small-2oq-c1r.csv", error_bad_lines=False) data.head() pid = data['productId'] title = data['title'] description = data['description'] urls = data['imageUrlStr'] urls_tun = [] for i, name in enumerate(list(pid)): try: if name[:3]=='TUN': urls_tun.append(urls[i]) except: print("ignore") len(urls_tun) urls_200_tun = [] pid_tun = [] title_tun = [] for i in range(len(urls_tun)): a = urls_tun[i] b = a.split(";") c = str(b).split("/") # print(c[4]) if c[4] == 'tunic': urls_200_tun.append(b[2]) pid_tun.append(pid[i]) title_tun.append(title[i]) print(len(urls_200_tun)) print(len(pid_tun)) print(len(title_tun)) my_col = [] for i in zip(urls_200_tun, pid_tun, title_tun): my_col.append(i) E = pd.DataFrame(my_col,columns=["imageURL", "ProductId","title"]) # g = pd.concat([]) E.to_csv("tunic_urls.csv") ###Output _____no_output_____
notebooks/nlp/neutralising-equalising-word-embeddings.ipynb
###Markdown Neutralising and Equalising Word Embeddings This notebook shows how non-gender specific words can have the gender part neutralised to avoid bias in word embeddings. In addition to that, it also depicts the process of equalisation, where words that are gender-specific can be equalised towards words that are non-gender specific. Import dependencies ###Code import numpy as np ###Output _____no_output_____ ###Markdown Load the GloVe dataset The GloVe dataset is not part of the repository due to the size of the file. However, feel free to download it from here: https://nlp.stanford.edu/projects/glove/ ###Code def read_glove_vecs(glove_file): with open(glove_file, 'r') as f: words = set() word_to_vec_map = {} for line in f: line = line.strip().split() curr_word = line[0] words.add(curr_word) word_to_vec_map[curr_word] = np.array(line[1:], dtype=np.float64) return words, word_to_vec_map words, word_to_vec_map = read_glove_vecs('glove/glove.6B.50d.txt') ###Output _____no_output_____ ###Markdown * words: set of words in the vocabulary.* word_to_vec_map: dictionary mapping words to their GloVe vector representation. ###Code def cosine_similarity(u, v): """ Cosine similarity reflects the degree of similariy between u and v Arguments: u -- a word vector of shape (n,) v -- a word vector of shape (n,) Returns: cosine_similarity -- the cosine similarity between u and v defined by the formula above. """ dot = np.dot(u, v) norm_u = np.sqrt(np.sum(u**2)) norm_v = np.sqrt(np.sum(v**2)) cosine_similarity = dot / (norm_u * norm_v) return cosine_similarity father = word_to_vec_map["father"] mother = word_to_vec_map["mother"] ball = word_to_vec_map["ball"] crocodile = word_to_vec_map["crocodile"] france = word_to_vec_map["france"] italy = word_to_vec_map["italy"] paris = word_to_vec_map["paris"] rome = word_to_vec_map["rome"] print("cosine_similarity(father, mother) = ", cosine_similarity(father, mother)) print("cosine_similarity(ball, crocodile) = ", cosine_similarity(ball, crocodile)) # This should not show similarity as in the first vector we subtract the city representation # and in the second we subtract the country representation. print("cosine_similarity(france - paris, rome - italy) = ", cosine_similarity(france - paris, rome - italy)) # This one, on the other hand, should show similarity as we are checking for the similarity between 2 cities. print("cosine_similarity(paris - france, rome - italy) = ", cosine_similarity(paris - france, rome - italy)) ###Output cosine_similarity(father, mother) = 0.8909038442893615 cosine_similarity(ball, crocodile) = 0.2743924626137942 cosine_similarity(france - paris, rome - italy) = -0.6751479308174201 cosine_similarity(paris - france, rome - italy) = 0.6751479308174201 ###Markdown Debiasing word vectors We start by identifying the gender by subtracting the `man` vector representation from the `woman` vector representation. ###Code gender = word_to_vec_map['woman'] - word_to_vec_map['man'] ###Output _____no_output_____ ###Markdown Similarity between gender and names Negative similarities mean that the name is more related to the `female` gender. ###Code name_list = ['john', 'marie', 'sophie', 'ronaldo', 'priya', 'rahul', 'danielle', 'reza', 'katy', 'yasmin', 'sam', 'carolina', 'logan'] for w in name_list: print (w, cosine_similarity(word_to_vec_map[w], gender)) ###Output john -0.23163356145973724 marie 0.315597935396073 sophie 0.31868789859418784 ronaldo -0.31244796850329437 priya 0.17632041839009402 rahul -0.16915471039231716 danielle 0.24393299216283895 reza -0.07930429672199553 katy 0.2831068659572615 yasmin 0.23313857767928758 sam -0.33642281213435427 carolina 0.0938795106708001 logan -0.16937077820548485 ###Markdown Unfortunately, non-gender specific words contain bias and hence need some extra treatment. Below a list of common words that look pretty biased. ###Code word_list = ['lipstick', 'driver', 'science', 'arts', 'literature', 'warrior','doctor', 'librarian', 'receptionist', 'technology', 'fashion', 'teacher', 'engineer', 'pilot', 'computer', 'singer', 'model', 'mechanic', 'babysitter'] for w in word_list: print (w, cosine_similarity(word_to_vec_map[w], gender)) ###Output lipstick 0.2769191625638267 driver -0.010681433817247916 science -0.06082906540929701 arts 0.008189312385880337 literature 0.06472504433459932 warrior -0.20920164641125288 doctor 0.11895289410935041 librarian 0.23302221769690296 receptionist 0.33077941750593737 technology -0.13193732447554302 fashion 0.03563894625772699 teacher 0.17920923431825664 engineer -0.0803928049452407 pilot 0.0010764498991916937 computer -0.10330358873850498 singer 0.1850051813649629 model 0.0343357596036095 mechanic -0.0035264430229621927 babysitter 0.2797785047879521 ###Markdown Again, positive similarities relates to women whislt negative similaties don't. It's shocking to see that `computer`, `technology` and `engineer` do not relate to women. Neutralise bias for non-gender specific words The formula below is used to compute the debiased version of a given vector representation. \begin{align}v^{bias\_component} = \frac{v \cdot gender}{\|gender\|_2^2}gender \\\\v^{debiased} = v - v^{bias\_component} \\\end{align} You can find the implementation of the `neutralise` formula below: ###Code def neutralize(word, gender, word_to_vec_map): """ Removes the bias of "word" by projecting it on the space orthogonal to the bias axis. This function ensures that gender neutral words are zero in the gender subspace. Arguments: word -- string indicating the word to debias gender -- numpy-array of shape (50,), corresponding to the bias axis (such as gender) word_to_vec_map -- dictionary mapping words to their corresponding vectors. Returns: v_debiased -- neutralised word vector representation of the input "word" """ v = word_to_vec_map[word] v_biascomponent = (np.dot(v, gender) / np.sqrt(np.sum(gender**2))**2) * gender v_debiased = v - v_biascomponent return v_debiased w = "receptionist" print("cosine similarity between " + w + " and gender, before neutralizing: ", cosine_similarity(word_to_vec_map["receptionist"], gender)) v_debiased = neutralize("receptionist", gender, word_to_vec_map) print("cosine similarity between " + w + " and gender, after neutralizing: ", cosine_similarity(v_debiased, gender)) ###Output cosine similarity between receptionist and gender, before neutralizing: 0.33077941750593737 cosine similarity between receptionist and gender, after neutralizing: -2.099120994400013e-17
Model backlog/Inference/151-melanoma-inf-5fold-efficientnetb3-meta-fts-384.ipynb
###Markdown Dependencies ###Code !pip install --quiet efficientnet # !pip install --quiet image-classifiers import warnings, json, re, glob, math # from scripts_step_lr_schedulers import * from melanoma_utility_scripts import * from kaggle_datasets import KaggleDatasets from sklearn.model_selection import KFold import tensorflow.keras.layers as L import tensorflow.keras.backend as K from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, LearningRateScheduler from tensorflow.keras import optimizers, layers, metrics, losses, Model # import tensorflow_addons as tfa import efficientnet.tfkeras as efn # from classification_models.tfkeras import Classifiers ###Output _____no_output_____ ###Markdown TPU configuration ###Code strategy, tpu = set_up_strategy() REPLICAS = strategy.num_replicas_in_sync print(f'REPLICAS: {REPLICAS}') AUTO = tf.data.experimental.AUTOTUNE ###Output Running on TPU grpc://10.0.0.2:8470 REPLICAS: 8 ###Markdown Model parameters ###Code input_base_path = '/kaggle/input/151-melanoma-5fold-efficientnetb3-meta-fts-384/' with open(input_base_path + 'config.json') as json_file: config = json.load(json_file) config ###Output _____no_output_____ ###Markdown Load data ###Code database_base_path = '/kaggle/input/siim-isic-melanoma-classification/' test = pd.read_csv(database_base_path + 'test.csv') print(f'Test samples: {len(test)}') display(test.head()) GCS_PATH = KaggleDatasets().get_gcs_path(config['DATASET_PATH']) TEST_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/test*.tfrec') model_path_list = glob.glob(input_base_path + '*.h5') model_path_list.sort() print('Models to predict:') print(*model_path_list, sep='\n') ###Output Models to predict: /kaggle/input/151-melanoma-5fold-efficientnetb3-meta-fts-384/model_0_last.h5 /kaggle/input/151-melanoma-5fold-efficientnetb3-meta-fts-384/model_1_last.h5 /kaggle/input/151-melanoma-5fold-efficientnetb3-meta-fts-384/model_2_last.h5 /kaggle/input/151-melanoma-5fold-efficientnetb3-meta-fts-384/model_3_last.h5 /kaggle/input/151-melanoma-5fold-efficientnetb3-meta-fts-384/model_4_last.h5 ###Markdown Augmentations ###Code def data_augment(image): p_rotation = tf.random.uniform([], 0, 1.0, dtype=tf.float32) p_rotate = tf.random.uniform([], 0, 1.0, dtype=tf.float32) p_cutout = tf.random.uniform([], 0, 1.0, dtype=tf.float32) p_shear = tf.random.uniform([], 0, 1.0, dtype=tf.float32) p_crop = tf.random.uniform([], 0, 1.0, dtype=tf.float32) if p_shear > .2: if p_shear > .6: image = transform_shear(image, config['HEIGHT'], shear=20.) else: image = transform_shear(image, config['HEIGHT'], shear=-20.) if p_rotation > .2: if p_rotation > .6: image = transform_rotation(image, config['HEIGHT'], rotation=45.) else: image = transform_rotation(image, config['HEIGHT'], rotation=-45.) if p_crop > .2: image = data_augment_crop(image) if p_rotate > .2: image = data_augment_rotate(image) image = data_augment_spatial(image) image = tf.image.random_saturation(image, 0.7, 1.3) image = tf.image.random_contrast(image, 0.8, 1.2) image = tf.image.random_brightness(image, 0.1) if p_cutout > .5: image = data_augment_cutout(image) return image def data_augment_tta(image): p_rotation = tf.random.uniform([], 0, 1.0, dtype=tf.float32) p_rotate = tf.random.uniform([], 0, 1.0, dtype=tf.float32) p_crop = tf.random.uniform([], 0, 1.0, dtype=tf.float32) if p_rotation > .2: if p_rotation > .6: image = transform_rotation(image, config['HEIGHT'], rotation=45.) else: image = transform_rotation(image, config['HEIGHT'], rotation=-45.) if p_crop > .2: image = data_augment_crop(image) if p_rotate > .2: image = data_augment_rotate(image) image = data_augment_spatial(image) image = tf.image.random_saturation(image, 0.7, 1.3) image = tf.image.random_contrast(image, 0.8, 1.2) image = tf.image.random_brightness(image, 0.1) return image def data_augment_spatial(image): p_spatial = tf.random.uniform([], 0, 1.0, dtype=tf.float32) image = tf.image.random_flip_left_right(image) image = tf.image.random_flip_up_down(image) if p_spatial > .75: image = tf.image.transpose(image) return image def data_augment_rotate(image): p_rotate = tf.random.uniform([], 0, 1.0, dtype=tf.float32) if p_rotate > .66: image = tf.image.rot90(image, k=3) # rotate 270º elif p_rotate > .33: image = tf.image.rot90(image, k=2) # rotate 180º else: image = tf.image.rot90(image, k=1) # rotate 90º return image def data_augment_crop(image): p_crop = tf.random.uniform([], 0, 1.0, dtype=tf.float32) crop_size = tf.random.uniform([], int(config['HEIGHT']*.7), config['HEIGHT'], dtype=tf.int32) if p_crop > .5: image = tf.image.random_crop(image, size=[crop_size, crop_size, config['CHANNELS']]) else: if p_crop > .4: image = tf.image.central_crop(image, central_fraction=.7) elif p_crop > .2: image = tf.image.central_crop(image, central_fraction=.8) else: image = tf.image.central_crop(image, central_fraction=.9) image = tf.image.resize(image, size=[config['HEIGHT'], config['WIDTH']]) return image def data_augment_cutout(image, min_mask_size=(int(config['HEIGHT'] * .1), int(config['HEIGHT'] * .1)), max_mask_size=(int(config['HEIGHT'] * .125), int(config['HEIGHT'] * .125))): p_cutout = tf.random.uniform([], 0, 1.0, dtype=tf.float32) if p_cutout > .85: # 10~15 cut outs n_cutout = tf.random.uniform([], 10, 15, dtype=tf.int32) image = random_cutout(image, config['HEIGHT'], config['WIDTH'], min_mask_size=min_mask_size, max_mask_size=max_mask_size, k=n_cutout) elif p_cutout > .6: # 5~10 cut outs n_cutout = tf.random.uniform([], 5, 10, dtype=tf.int32) image = random_cutout(image, config['HEIGHT'], config['WIDTH'], min_mask_size=min_mask_size, max_mask_size=max_mask_size, k=n_cutout) elif p_cutout > .25: # 2~5 cut outs n_cutout = tf.random.uniform([], 2, 5, dtype=tf.int32) image = random_cutout(image, config['HEIGHT'], config['WIDTH'], min_mask_size=min_mask_size, max_mask_size=max_mask_size, k=n_cutout) else: # 1 cut out image = random_cutout(image, config['HEIGHT'], config['WIDTH'], min_mask_size=min_mask_size, max_mask_size=max_mask_size, k=1) return image ###Output _____no_output_____ ###Markdown Auxiliary functions ###Code def read_labeled_tfrecord(example): tfrec_format = { 'image' : tf.io.FixedLenFeature([], tf.string), 'image_name' : tf.io.FixedLenFeature([], tf.string), 'sex' : tf.io.FixedLenFeature([], tf.int64), 'age_approx' : tf.io.FixedLenFeature([], tf.int64), 'target' : tf.io.FixedLenFeature([], tf.int64) } example = tf.io.parse_single_example(example, tfrec_format) # Meta data meta_data = {} meta_data['age_approx'] = tf.cast(example['age_approx'], tf.int32) meta_data['sex'] = tf.cast(example['sex'], tf.int32) return {'input_image': example['image'], 'input_meta': meta_data}, example['target'] def read_unlabeled_tfrecord(example, return_image_name): tfrec_format = { 'image' : tf.io.FixedLenFeature([], tf.string), 'image_name' : tf.io.FixedLenFeature([], tf.string), 'sex' : tf.io.FixedLenFeature([], tf.int64), 'age_approx' : tf.io.FixedLenFeature([], tf.int64), } example = tf.io.parse_single_example(example, tfrec_format) # Meta data meta_data = {} meta_data['age_approx'] = tf.cast(example['age_approx'], tf.int32) meta_data['sex'] = tf.cast(example['sex'], tf.int32) return {'input_image': example['image'], 'input_meta': meta_data}, example['image_name'] if return_image_name else 0 def prepare_image(data, augment=None, dim=256): # Image img = data['input_image'] img = tf.image.decode_jpeg(img, channels=3) img = tf.cast(img, tf.float32) / 255.0 if augment: img = augment(img) img = tf.reshape(img, [dim, dim, 3]) # Meta meta = data['input_meta'] meta = [tf.cast(meta[tfeat], dtype = tf.float32) for tfeat in ['age_approx', 'sex']] data['input_image'] = img data['input_meta'] = meta return data def get_dataset(files, augment=None, shuffle=False, repeat=False, labeled=True, return_image_names=True, batch_size=16, dim=256): ds = tf.data.TFRecordDataset(files, num_parallel_reads=AUTO) ds = ds.cache() if repeat: ds = ds.repeat() if shuffle: ds = ds.shuffle(1024*8) opt = tf.data.Options() opt.experimental_deterministic = False ds = ds.with_options(opt) if labeled: ds = ds.map(read_labeled_tfrecord, num_parallel_calls=AUTO) else: ds = ds.map(lambda example: read_unlabeled_tfrecord(example, return_image_names), num_parallel_calls=AUTO) ds = ds.map(lambda data, imgname_or_label: (prepare_image(data, augment=augment, dim=dim), imgname_or_label), num_parallel_calls=AUTO) ds = ds.batch(batch_size * REPLICAS) ds = ds.prefetch(AUTO) return ds def get_dataset_sampling(files, augment=None, shuffle=False, repeat=False, labeled=True, return_image_names=True, batch_size=16, dim=256): ds = tf.data.TFRecordDataset(files, num_parallel_reads=AUTO) ds = ds.cache() if repeat: ds = ds.repeat() if shuffle: ds = ds.shuffle(1024*8) opt = tf.data.Options() opt.experimental_deterministic = False ds = ds.with_options(opt) if labeled: ds = ds.map(read_labeled_tfrecord, num_parallel_calls=AUTO) else: ds = ds.map(lambda example: read_unlabeled_tfrecord(example, return_image_names), num_parallel_calls=AUTO) ds = ds.map(lambda data, imgname_or_label: (prepare_image(data, augment=augment, dim=dim), imgname_or_label), num_parallel_calls=AUTO) return ds def count_data_items(filenames): n = [int(re.compile(r"-([0-9]*)\.").search(filename).group(1)) for filename in filenames] return np.sum(n) ###Output _____no_output_____ ###Markdown Model ###Code def model_fn(image_input_shape=(256, 256, 3), meta_input_shape=(2)): input_image = L.Input(shape=image_input_shape, name='input_image') input_meta = L.Input(shape=meta_input_shape, name='input_meta') base_model = efn.EfficientNetB3(input_shape=image_input_shape, weights=config['BASE_MODEL_WEIGHTS'], include_top=False, pooling='avg', drop_connect_rate=0.4) # Image block x = base_model(input_image) x = L.Dropout(0.2)(x) # Meta data block # Merge blocks x_merged = L.concatenate([x, input_meta]) x_merged = L.Dense(512)(x_merged) x_merged = L.BatchNormalization()(x_merged) x_merged = L.Activation('relu')(x_merged) x_merged = L.Dropout(0.2)(x_merged) output = L.Dense(1, activation='sigmoid', kernel_initializer='zeros', name='output')(x_merged) model = Model(inputs=[input_image, input_meta], outputs=output) return model ###Output _____no_output_____ ###Markdown Inference ###Code preds = np.zeros((len(test), 1)) for model_path in model_path_list: print(model_path) if tpu: tf.tpu.experimental.initialize_tpu_system(tpu) TEST_FILENAMES = np.sort(np.array(tf.io.gfile.glob(GCS_PATH + '/test*.tfrec'))) ct_test = count_data_items(TEST_FILENAMES) TEST_STEPS = config['TTA_STEPS'] * ct_test/config['BATCH_SIZE']/4/REPLICAS # MODEL K.clear_session() with strategy.scope(): model = model_fn((config['HEIGHT'], config['WIDTH'], config['CHANNELS'])) model.load_weights(model_path) # PREDICT TEST USING TTA print('Predicting Test with TTA...') ds_test = get_dataset(TEST_FILENAMES, labeled=False, return_image_names=False, augment=data_augment_tta, repeat=True, shuffle=False, dim=config['HEIGHT'], batch_size=config['BATCH_SIZE']*4) pred = model.predict(ds_test, steps=TEST_STEPS, verbose=2)[:config['TTA_STEPS']*ct_test,] preds[:,0] += np.mean(pred.reshape((ct_test, config['TTA_STEPS']), order='F'), axis=1) / config['N_USED_FOLDS'] ###Output /kaggle/input/151-melanoma-5fold-efficientnetb3-meta-fts-384/model_0_last.h5 Downloading data from https://github.com/qubvel/efficientnet/releases/download/v0.0.1/efficientnet-b3_noisy-student_notop.h5 43933696/43933088 [==============================] - 1s 0us/step Predicting Test with TTA... 537/536 - 201s /kaggle/input/151-melanoma-5fold-efficientnetb3-meta-fts-384/model_1_last.h5 Predicting Test with TTA... 537/536 - 184s /kaggle/input/151-melanoma-5fold-efficientnetb3-meta-fts-384/model_2_last.h5 Predicting Test with TTA... 537/536 - 183s /kaggle/input/151-melanoma-5fold-efficientnetb3-meta-fts-384/model_3_last.h5 Predicting Test with TTA... 537/536 - 183s /kaggle/input/151-melanoma-5fold-efficientnetb3-meta-fts-384/model_4_last.h5 Predicting Test with TTA... 537/536 - 184s ###Markdown Visualize test predictions ###Code ds = get_dataset(TEST_FILENAMES, augment=False, repeat=False, dim=config['HEIGHT'], labeled=False, return_image_names=True) image_names = np.array([img_name.numpy().decode("utf-8") for img, img_name in iter(ds.unbatch())]) submission = pd.DataFrame(dict(image_name=image_names, target=preds[:,0])) submission = submission.sort_values('image_name') print(f"Test predictions {len(submission[submission['target'] > .5])}|{len(submission[submission['target'] <= .5])}") print('Top 10 samples') display(submission.head(10)) print('Top 10 positive samples') display(submission.query('target > .5').head(10)) fig = plt.subplots(figsize=(20, 4)) plt.hist(submission['target'], bins=100) plt.title('Target', size=18) plt.show() ###Output Test predictions 500|10482 Top 10 samples ###Markdown Test set predictions ###Code display(submission.head(10)) display(submission.describe().T) submission[['image_name', 'target']].to_csv('submission.csv', index=False) ###Output _____no_output_____
pytorch_mario_filho_live.ipynb
###Markdown São Paulo 04 de Maio de 2020 Live 24 Tutorial Basico de Pytorch Uma biblioteca de Deep Learn do [email protected] ###Code # Instalando as dependencia !pip install --upgrade pip #!pip install pytorch # Realizando os imports import torch import torch.nn as nn import torch.nn.functional as F import pandas as pd import numpy as np t = torch.Tensor([1,2,3]) t array = np.array([1,2,3]) t2 = torch.from_numpy(array) t2 t *10 t.dot(t) t3 = torch.Tensor([2,2,2]) t3.dot(t3) !ls sample_data/california_housing_test.csv # Lendo o dataset data_test = pd.read_csv("sample_data/california_housing_test.csv") data_train = pd.read_csv("sample_data/california_housing_train.csv") data_train.sample(3) ###Output _____no_output_____
tutorials/TFAuto_|_Regression.ipynb
###Markdown ###Code # !pip install git+https://github.com/rafiqhasan/auto-tensorflow.git !pip install auto-tensorflow from auto_tensorflow.tfa import TFAuto ###Output _____no_output_____ ###Markdown **Download data** ###Code !rm -rf data.* !rm -rf /content/*.png !rm -rf *trainer.py !rm -r /content/train_data !rm -r /content/test_data !rm -rf untitled_project !mkdir /content/train_data !mkdir /content/test_data !sudo rm -r /content/tfauto # # House price %%bash cd /content/train_data wget https://raw.githubusercontent.com/rafiqhasan/AI_DL_ML_Repo/master/Datasets/house_price/data.csv cd ../test_data wget https://raw.githubusercontent.com/rafiqhasan/AI_DL_ML_Repo/master/Datasets/house_price/data.csv ##Taken same data for demonstration purposes only ##Initialize TFAuto with root and Data path tfa = TFAuto(train_data_path='/content/train_data/', test_data_path='/content/test_data/', path_root='/content/tfauto') ##Step 1 ##Run Data setup -> Infer Schema, find anomalies, create profile and show viz tfa.step_data_explore(viz=False) ##Step 2 ##Run Model Training -> tfa.step_model_build(label_column = 'price', model_type='REGRESSION') ##--> Default model_complexity # tfa.step_model_build(label_column = 'price', model_type='REGRESSION', model_complexity=0) ##--> Model_complexity = 0 ( Simple model - No HPT ) ##Step 3 ##Show model What-If Tool tfa.step_model_whatif() #Check signature !saved_model_cli show --dir "/content/tfauto/model/1" --all ###Output _____no_output_____ ###Markdown **Tensorflow Model Serving** ###Code !apt-get remove tensorflow-model-server !echo "deb http://storage.googleapis.com/tensorflow-serving-apt stable tensorflow-model-server tensorflow-model-server-universal" | tee /etc/apt/sources.list.d/tensorflow-serving.list && \ curl https://storage.googleapis.com/tensorflow-serving-apt/tensorflow-serving.release.pub.gpg | apt-key add - !apt update !apt-get install tensorflow-model-server ###Start Tensorflow server # %%bash --bg # export TF_CPP_MIN_VLOG_LEVEL=0 %%bash --bg nohup tensorflow_model_server \ --rest_api_port=8502 \ --model_name=model \ --model_base_path="/content/tfauto/model" >server.log 2>&1 !tail server.log import json import requests #Create payload data_py = {"inputs":{'bedrooms': [[3]], 'bathrooms': [[2.0]], 'sqft_living': [[1180]], 'sqft_lot': [[5650]], 'floors': [[2.0]], 'waterfront': [[1]], 'view': [[1]], 'condition': [[3]], 'grade': [[7]], 'sqft_above': [[1180]], 'sqft_basement': [[0]], 'yr_built': [[1997]], 'sqft_living15': [[1340]], 'sqft_lot15': [[5650]] }} data = json.dumps(data_py) print("payload: ", data) #Run request on TMS headers = {"content-type": "application/json"} json_response = requests.post('http://localhost:8502/v1/models/model:predict', data=data, headers=headers) json_response.text ###Output _____no_output_____
snail-and-well-checkpoint.ipynb
###Markdown The Snail and the WellA snail falls at the bottom of a 125 cm well. Each day the snail rises 30 cm. But at night, while sleeping, slides 20 cm because the walls are wet. How many days does it take for the snail to escape the well?**Hint**: The snail gets out of the well when it surpasses the 125cm of height. Tools1. Loop: **while**2. Conditional statements: **if-else**3. Function: **print()** Tasks 1. Assign the challenge data to variables with representative names: `well_height`, `daily_distance`, `nightly_distance` and `snail_position`. ###Code well_height = 125 daily_distance = 30 nightly_distance = 20 snail_position = 0 ###Output _____no_output_____ ###Markdown 2. Create a variable `days` to keep count of the days that pass until the snail escapes the well. ###Code days = 0 ###Output _____no_output_____ ###Markdown 3. Find the solution to the challenge using the variables defined above. ###Code while snail_position < well_height: days += 1 snail_position += daily_distance - nightly_distance ###Output _____no_output_____ ###Markdown 4. Print the solution. ###Code print('Snail gets out of the well on day:', days) ###Output Snail gets out of the well on day: 13 ###Markdown BonusThe distance traveled by the snail each day is now defined by a list.```advance_cm = [30, 21, 33, 77, 44, 45, 23, 45, 12, 34, 55]```On the first day, the snail rises 30cm but during the night it slides 20cm. On the second day, the snail rises 21cm but during the night it slides 20cm, and so on. 1. How many days does it take for the snail to escape the well?Follow the same guidelines as in the previous challenge.**Hint**: Remember that the snail gets out of the well when it surpasses the 125cm of height. ###Code advance_cm = [30,21,33,77,44,45,23,45,12,34,55] well_height = 125 snail_position = 0 days = 0 nightly_distance = 20 for i in advance_cm: snail_position += i - nightly_distance while snail_position < well_height: days +=1 print('Snail gets out of wall on day:', days) ###Output _____no_output_____ ###Markdown 2. What is its maximum displacement in one day? And its minimum? Calculate the displacement using only the travel distance of the days used to get out of the well. **Hint**: Remember that displacement means the total distance risen taking into account that the snail slides at night. ###Code print(max(advance_cm)) print(min(advance_cm)) ###Output _____no_output_____ ###Markdown 3. What is its average progress? Take into account the snail slides at night. ###Code avr_progress = sum(advance_cm)/len(advance_cm) print(avr_progress) ###Output _____no_output_____ ###Markdown 4. What is the standard deviation of its displacement? Take into account the snail slides at night. ###Code import numpy as np snail_position = [] advance_cm = [30,21,33,77,44,45,23,45,12,34,55] nightly_distance = 20 for i in advance_cm: snail_position.append(i-nightly_distance) print(std.(snail_position)) ###Output _____no_output_____
Community_Backbone_Q2.ipynb
###Markdown ###Code lapply(c("XLConnect","GGally","tidyverse","visNetwork","magrittr","DiagrammeR","data.table", "plotly","intergraph","networkD3","optrees","disparityfilter","network","Matrix","igraph" ,"CINNA","ggplot2","poweRlaw","devtools","ForceAtlas2","remotes","netdiffuseR","lfe","stargazer","foreign","MASS"), require,character.only = TRUE) Backbone_Q2 <- read.csv("Backbone_Q2.csv") Edgelist <- data.frame(Backbone_Q2) g <- graph.data.frame(Edgelist,directed=F) c_g <- fastgreedy.community(graph) size_community <- data.frame(sizes(c_g)) dim(size_community) V(graph)$name <- membership(c_g) x <- as_edgelist(graph, names = T) x = data.frame(x) x <- x[which(x$X1 != x$X2),] y <- ddply(x, .(X1,X2), nrow) g3 <- graph_from_data_frame(y,directed = T) E(g3)$weight <- y$V1 size_community_1 <- size_community[c(1:102),] V(g3)$size <- size_community_1$Freq plot(g3,vertex.label = NA,layout=layout.sphere, main="sphere",vertex.size = V(g3)$size/60, edge.arrow.width=0, edge.width = E(g3)$weight/5, vertex.color = "brown", edge.color = "blue") ggnet2(g3,node.label = NA ,node.size = V(g3)$size/55, node.color = "black", edge.color = "grey", layout=layout.sphere, main="sphere",edge.width = E(g3)$weight/5) ###Output _____no_output_____
doc/Report-wf2020-10-03.ipynb
###Markdown Demoing `annotated_plt()` and `start_frame`+`max_frame`. But found issue with timing files and video tracking. ###Code import os import sys import cv2 from matplotlib import pyplot as plt # load our code sys.path.insert(0, os.path.abspath('../')) from tracker import auto_tracker, set_tracker, TrackedFrame, Box # specific to jupyter notebook from jupyter_help import cvplt, cvplt_sub # load any changes as we make them %load_ext autoreload %autoreload 2 vid_fname = "../input/run1.mov" csv_fname = "../input/10997_20180818_mri_1_view.csv" init_box = (64,46,70,79) start_frame = 6*60 # 6 seconds is start of first trial end_frame = start_frame + 12*60 # 10 seconds after start should get all of trail ###Output _____no_output_____ ###Markdown Run eye tracking for 6 to 12 seconds ###Code track = auto_tracker(vid_fname, init_box, write_img=False, start_frame=start_frame, max_frames=end_frame) track.set_events(csv_fname) track.run_tracker() ###Output initializign tracking @ 360 frame @ step 500, center = (105.00, 84.50); 78.16 fps @ step 750, center = (99.00, 84.50); 78.64 fps @ step 1000, center = (105.00, 85.50); 78.54 fps Ending of the analysis ###Markdown taskHere is an attempt to match task onsets to eye tracking w/ colored vertical lines for each type. But they do not overlap! Wrong file?! ~Wrong FPS?~ Saccades appear to happen before the event onset. The participant is not prescient. ###Code plt.plot([float('nan') if x==0 else x for x in track.pupil_x]) d = track.onset_labels in_range = (d.onset_frame >= start_frame) & (d.onset_frame <= end_frame) d = d[in_range] event_colors = {'cue': 'k', 'vgs': 'g', 'dly': 'b', 'mgs': 'r'} plt.vlines(d.onset_frame - start_frame,80,110, color=[event_colors[x] for x in d.event]) ###Output _____no_output_____ ###Markdown into functionPulled this code into the class as function `annotated_plt`. ###Code track.annotated_plt() ###Output _____no_output_____ ###Markdown imageAlso added a new frame overlay drawer to add event to frames. But is also likely misaligned.Note the colored symbol on the top left and right ###Code vs = cv2.VideoCapture(vid_fname) count = 6*60 vs.set(1, count) tframe = TrackedFrame(vs.read()[1], count) track.draw_event(tframe.frame, count) cue = tframe.frame.copy() count = (6+2)*60 vs.set(1, count) tframe = TrackedFrame(vs.read()[1], count) box = track.find_box(tframe.frame) tframe.set_box(box) track.draw_event(tframe.frame, count) vgs = tframe.frame.copy() count = (6+5)*60 vs.set(1, count) tframe = TrackedFrame(vs.read()[1], count) track.draw_event(tframe.frame, count) dly = tframe.frame.copy() count = (6+10)*60 vs.set(1, count) tframe = TrackedFrame(vs.read()[1], count) track.draw_event(tframe.frame, count) mgs = tframe.frame.copy() cvplt_sub([cue, vgs, dly, mgs],2,2) track.onset_labels ###Output _____no_output_____
data/sars-cov-2-genbank/1-download-data.ipynb
###Markdown Download SARS-CoV-2 genomes from genbankThe download script is derived from one provided by [covidcg](https://github.com/vector-engineering/covidcg/tree/master/workflow_genbank_ingest/scripts). ###Code import time output_file = 'data/sars-cov-2-genbank.json.gz' begin_time = time.time() !echo "Downloaded on: `date`" !python ../../scripts/download-ncbi-sars-cov-2.py | gzip > {output_file} end_time = time.time() print(f'Took {(end_time - begin_time)/60:0.1f} minutes') print(f'Output in: {output_file}') !ls -lh {output_file} ###Output -rw-r--r-- 1 apetkau grp_apetkau 1.5G May 6 12:06 data/sars-cov-2-genbank.json.gz
dyad-year-eda.ipynb
###Markdown Read in data ###Code colnames = list(pd.read_csv('data/colnames.csv', header=None).loc[:,0]) df_raw = pd.read_csv('data/ICOWdyadyr.csv', header=0, names=colnames, low_memory=False) df_raw.sample(10) df_raw.groupby('claim').issue.count().sort_values(ascending=False).head() df_raw[df_raw.claim == 120].sort_values('year') #df_raw[df_raw.claim == 56].sort_values('year').attanyp df_raw.dyad.value_counts(dropna=False).sort_index() df_raw[df_raw.dyad.isna()] ###Output _____no_output_____ ###Markdown Remove irrelevant and extraneous columns ###Code # conflict-type-specific columns df = df_raw[df_raw.dyad.notnull()] df = df.dropna(axis=1) # ~metadata columns #dropcols = 'issue claimdy claim dyadnum chal tgt version'.split() dropcols = 'issue claimdy claim dyadnum chal tgt version'.split() # columns that are linearly dependent on target dropcols.append('attemptsp') dropcols.append('attemptst') dropcols.append('attnone') dropcols.append('nbilat') dropcols.append('attbilat') dropcols.append('attanyt') dropcols.append('icowsalc') dropcols.append('n3non') dropcols.append('att3non') dropcols.append('att3rd') dropcols.append('attmult') df = df.drop(dropcols, axis=1) # REMOVE: # issue # claimdy # claim # dyadnum # chal # tgt # dyad # version # ~year ###Output _____no_output_____ ###Markdown Replace categorical columns with dummy variable columns ###Code # region region_names = ['west_hemi', 'europe', 'mid_east'] region_dummies = pd.get_dummies(df.region) region_dummies.columns = region_names region_dummies df = df.drop('region', axis=1) df = pd.concat([df, region_dummies], axis=1) df ###Output _____no_output_____ ###Markdown Basic random forest MVP ###Code def evaluate_model(predictions, probs, train_predictions, train_probs): """Compare machine learning model to baseline performance. Computes statistics and shows ROC curve.""" baseline = {} baseline['recall'] = recall_score(y_test, [1 for _ in range(len(y_test))]) baseline['precision'] = precision_score(y_test, [1 for _ in range(len(y_test))]) baseline['roc'] = 0.5 results = {} results['recall'] = recall_score(y_test, predictions) results['precision'] = precision_score(y_test, predictions) results['roc'] = roc_auc_score(y_test, probs) train_results = {} train_results['recall'] = recall_score(y_train, train_predictions) train_results['precision'] = precision_score(y_train, train_predictions) train_results['roc'] = roc_auc_score(y_train, train_probs) for metric in ['recall', 'precision', 'roc']: print(f'{metric.capitalize()} Baseline: {round(baseline[metric], 2)} Test: {round(results[metric], 2)} Train: {round(train_results[metric], 2)}') # Calculate false positive rates and true positive rates base_fpr, base_tpr, _ = roc_curve(y_test, [1 for _ in range(len(y_test))]) model_fpr, model_tpr, _ = roc_curve(y_test, probs) plt.figure(figsize = (8, 6)) plt.rcParams['font.size'] = 16 # Plot both curves plt.plot(base_fpr, base_tpr, 'b', label = 'baseline') plt.plot(model_fpr, model_tpr, 'r', label = 'model') plt.legend(); plt.xlabel('False Positive Rate'); plt.ylabel('True Positive Rate'); plt.title('ROC Curves'); y = df.attanyp X = df.drop('attanyp', axis=1) randseed = np.random.randint(low=1, high=100) # 30% examples in test data X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, test_size=0.25, random_state=randseed) # Create the model with 100 trees rfm = RandomForestClassifier( n_estimators=500, #criterion='gini', #max_depth=None, #min_samples_split=2, #min_samples_leaf=1, #min_weight_fraction_leaf=0, max_features='sqrt', #max_leaf_nodes=None, #min_impurity_decrease=0, #min_impurity_split=1e-7, #bootstrap=True, #oob_score=False, #n_jobs=None, #random_state=None, #verbose=0, #warm_start=False, #class_weight=None ) # Fit on training data rfm.fit(X_train, y_train) # train set train_rfm_predictions = rfm.predict(X_train) train_rfm_probs = rfm.predict_proba(X_train)[:, 1] # test set rfm_predictions = rfm.predict(X_test) rfm_probs = rfm.predict_proba(X_test)[:, 1] rfm.feature_importances_ plt.plot(rfm.feature_importances_, 'o') #pd.DataFrame([rfm_predictions, y_test]) rfm.feature_importances_ mask = rfm.feature_importances_ >= 0.036 X.columns[mask] evaluate_model(rfm_predictions, rfm_probs, train_rfm_predictions, train_rfm_probs) # Calculate roc auc roc_value = roc_auc_score(y_test, rfm_probs) roc_value print(classification_report(y_test, rfm_predictions)) print(confusion_matrix(y_test, rfm_predictions)) ###Output [[2106 72] [ 253 74]] ###Markdown EDA pair plots ###Code df_y = df['attanyp'] df_ = df.iloc[:,20:30] df_['TARGET'] = df_y df_.columns df_ pairfig = sns.pairplot(data=df_) ###Output _____no_output_____
jupyter/data/Similarity.ipynb
###Markdown GloVeの単語ベクトルによる類似語検索 ###Code ''' 検索したい類似語と上位何位まで取得するか入力する ''' word = "カバー" top_k = 20 from gensim.models import KeyedVectors import numpy as np import matplotlib.pyplot as plt import matplotlib as mpl import matplotlib.font_manager as fm mpl.font_manager._rebuild() plt.rcParams['font.family'] = 'IPAexGothic' glove_vector_file = "vector/gensim_glove_vectors.txt" glove_vectors = KeyedVectors.load_word2vec_format(glove_vector_file, binary=False) results = glove_vectors.most_similar(word, [], top_k) rank = np.array([i + 1 for i in range(len(results))]) words = np.array([result[0] for result in results]) score = np.array([result[1] for result in results]) plt.rcParams["font.size"] = 15 plt.figure(figsize=(10,10)) plt.barh(rank, score, color="blue", alpha=0.6) plt.yticks(rank, words) plt.title("Top {} similar words for {}".format(top_k, word)) plt.xlabel("Score") plt.ylabel("Words") plt.show() ###Output _____no_output_____
big_data_essentials/homeworks/week_6/sample_estimates.ipynb
###Markdown week_6 ###Code import pandas as pd df = pd.read_csv('sample10000.csv') df.shape all_count = df.shape[0] df.head() # Using the data dictionary, check how many passengers in the sample paid for their ride with cash. cash_payers = df[df['payment_type'] == 2] cash_payers_amount = cash_payers.shape[0] print('%s people paid for their ride with cash.' % cash_payers_amount) # sample10000 = pd.read_csv('sample10000.csv') # sample10000.payment_type.value_counts() cash_payers_proportion = cash_payers_amount / all_count cash_payers_proportion # Build a 99% confidence interval for the proportion of cash payers. What is its' lower boundary? from statsmodels.stats.proportion import proportion_confint proportion_confint(cash_payers_amount, all_count, alpha=0.01) # is_cash = sample10000.payment_type == 2 # from statsmodels.stats.proportion import proportion_confint # proportion_confint(sum(is_cash), len(is_cash), alpha=0.01) import numpy as np ph = cash_payers_proportion s = np.sqrt(ph * (1-ph) / cash_payers_amount) s ph - 1.95996 * s # Use the same sample to estimate the average trip distance in miles. # Provide the answer with at least two digits after decimal. trip_distance_avg = df['trip_distance'].mean() print('trip_distance_avg = %.3f' % trip_distance_avg) # sample10000.trip_distance.mean() # What is the standard deviation of the estimator from the previous question? # Provide the answer with at least three digits after decimal. trip_distance = df['trip_distance'] sd = trip_distance.std(ddof=1) / np.sqrt(len(trip_distance)) print('trip_distance_avg_standart_deviation = %.3f' % sd) # sample10000.trip_distance.std(ddof=1) / np.sqrt(len(sample10000.trip_distance)) # Calculate 95% confidence interval for the mean trip distance. # What is the upper boundary? Provide the answer with at least two digits after decimal. from scipy.stats import sem trip_distance_95ci = 1.95996 * sem(trip_distance) td_min = trip_distance_avg - trip_distance_95ci td_max = trip_distance_avg + trip_distance_95ci print('confidence interval for trip_distance = [%.2f, %.2f]' %(td_min, td_max)) # from statsmodels.stats.weightstats import _tconfint_generic # _tconfint_generic(sample10000.trip_distance.mean(), # sample10000.trip_distance.std(ddof=1) / np.sqrt(len(sample10000.trip_distance)), # len(sample10000.trip_distance) - 1, # 0.05, 'two-sided') ###Output confidence interval for trip_distance = [2.81, 2.95]
courses/machine_learning/deepdive2/production_ml/labs/census.ipynb
###Markdown Preprocessing Data with Advanced Example using TensorFlow Transform Learning objectives1. Create a tf.Transform `preprocessing_fn`.2. Transform the data.3. Create an input function for training.4. Build the model.5. Train and Evaluate the model.6. Export the model. Introduction***The Feature Engineering Component of TensorFlow Extended (TFX)***This notebook provides a somewhat more advanced example of how TensorFlow Transform (`tf.Transform`) can be used to preprocess data using exactly the same code for both training a model and serving inferences in production.TensorFlow Transform is a library for preprocessing input data for TensorFlow, including creating features that require a full pass over the training dataset. For example, using TensorFlow Transform you could:* Normalize an input value by using the mean and standard deviation* Convert strings to integers by generating a vocabulary over all of the input values* Convert floats to integers by assigning them to buckets, based on the observed data distributionTensorFlow has built-in support for manipulations on a single example or a batch of examples. `tf.Transform` extends these capabilities to support full passes over the entire training dataset.The output of `tf.Transform` is exported as a TensorFlow graph which you can use for both training and serving. Using the same graph for both training and serving can prevent skew, since the same transformations are applied in both stages. What you're doing in this notebookIn this notebook you'll be processing a widely used dataset containing census data, and training a model to do classification. Along the way you'll be transforming the data using `tf.Transform`. Install TensorFlow Transform ###Code !pip install tensorflow-transform ###Output Requirement already satisfied: tensorflow-transform in /opt/conda/lib/python3.7/site-packages (1.8.0) Requirement already satisfied: pydot<2,>=1.2 in /opt/conda/lib/python3.7/site-packages (from tensorflow-transform) (1.4.2) Requirement already satisfied: numpy<2,>=1.16 in /opt/conda/lib/python3.7/site-packages (from tensorflow-transform) (1.19.5) Requirement already satisfied: tensorflow-metadata<1.9.0,>=1.8.0 in /opt/conda/lib/python3.7/site-packages (from tensorflow-transform) (1.8.0) Requirement already satisfied: tfx-bsl<1.9.0,>=1.8.0 in /opt/conda/lib/python3.7/site-packages (from tensorflow-transform) (1.8.0) Collecting pyarrow<6,>=1 Downloading pyarrow-5.0.0-cp37-cp37m-manylinux2014_x86_64.whl (23.6 MB)  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 23.6/23.6 MB 53.7 MB/s eta 0:00:0000:0100:01 [?25hRequirement already satisfied: protobuf<4,>=3.13 in /opt/conda/lib/python3.7/site-packages (from tensorflow-transform) (3.20.1) Requirement already satisfied: apache-beam[gcp]<3,>=2.38 in /opt/conda/lib/python3.7/site-packages (from tensorflow-transform) (2.39.0) Collecting tensorflow!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*,<2.9,>=1.15.5 Downloading tensorflow-2.8.2-cp37-cp37m-manylinux2010_x86_64.whl (497.9 MB)  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 497.9/497.9 MB 1.9 MB/s eta 0:00:0000:0100:01 [?25hRequirement already satisfied: absl-py<2.0.0,>=0.9 in /opt/conda/lib/python3.7/site-packages (from tensorflow-transform) (0.15.0) Requirement already satisfied: six in /opt/conda/lib/python3.7/site-packages (from absl-py<2.0.0,>=0.9->tensorflow-transform) (1.15.0) Requirement already satisfied: pytz>=2018.3 in /opt/conda/lib/python3.7/site-packages (from apache-beam[gcp]<3,>=2.38->tensorflow-transform) (2022.1) Requirement already satisfied: hdfs<3.0.0,>=2.1.0 in /opt/conda/lib/python3.7/site-packages (from apache-beam[gcp]<3,>=2.38->tensorflow-transform) (2.7.0) Collecting dill<0.3.2,>=0.3.1.1 Downloading dill-0.3.1.1.tar.gz (151 kB)  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 152.0/152.0 kB 267.1 kB/s eta 0:00:00a 0:00:01 [?25h Preparing metadata (setup.py) ... [?25ldone [?25hRequirement already satisfied: grpcio<2,>=1.29.0 in /opt/conda/lib/python3.7/site-packages (from apache-beam[gcp]<3,>=2.38->tensorflow-transform) (1.46.3) Requirement already satisfied: crcmod<2.0,>=1.7 in /opt/conda/lib/python3.7/site-packages (from apache-beam[gcp]<3,>=2.38->tensorflow-transform) (1.7) Requirement already satisfied: orjson<4.0 in /opt/conda/lib/python3.7/site-packages (from apache-beam[gcp]<3,>=2.38->tensorflow-transform) (3.6.8) Requirement already satisfied: pymongo<4.0.0,>=3.8.0 in /opt/conda/lib/python3.7/site-packages (from apache-beam[gcp]<3,>=2.38->tensorflow-transform) (3.12.3) Requirement already satisfied: cloudpickle<3,>=2.0.0 in /opt/conda/lib/python3.7/site-packages (from apache-beam[gcp]<3,>=2.38->tensorflow-transform) (2.1.0) Requirement already satisfied: fastavro<2,>=0.23.6 in /opt/conda/lib/python3.7/site-packages (from apache-beam[gcp]<3,>=2.38->tensorflow-transform) (1.4.12) Collecting httplib2<0.20.0,>=0.8 Downloading httplib2-0.19.1-py3-none-any.whl (95 kB)  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 95.4/95.4 kB 177.0 kB/s eta 0:00:00a 0:00:01 [?25hRequirement already satisfied: requests<3.0.0,>=2.24.0 in /opt/conda/lib/python3.7/site-packages (from apache-beam[gcp]<3,>=2.38->tensorflow-transform) (2.27.1) Requirement already satisfied: typing-extensions>=3.7.0 in /opt/conda/lib/python3.7/site-packages (from apache-beam[gcp]<3,>=2.38->tensorflow-transform) (3.10.0.2) Requirement already satisfied: python-dateutil<3,>=2.8.0 in /opt/conda/lib/python3.7/site-packages (from apache-beam[gcp]<3,>=2.38->tensorflow-transform) (2.8.2) Requirement already satisfied: proto-plus<2,>=1.7.1 in /opt/conda/lib/python3.7/site-packages (from apache-beam[gcp]<3,>=2.38->tensorflow-transform) (1.20.4) Collecting google-cloud-core<2,>=0.28.1 Downloading google_cloud_core-1.7.2-py2.py3-none-any.whl (28 kB) Collecting google-cloud-videointelligence<2,>=1.8.0 Downloading google_cloud_videointelligence-1.16.2-py2.py3-none-any.whl (183 kB)  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 183.9/183.9 kB 28.0 MB/s eta 0:00:00 [?25hCollecting google-cloud-datastore<2,>=1.8.0 Downloading google_cloud_datastore-1.15.4-py2.py3-none-any.whl (134 kB)  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 134.2/134.2 kB 22.8 MB/s eta 0:00:00 [?25hRequirement already satisfied: google-cloud-recommendations-ai<=0.2.0,>=0.1.0 in /opt/conda/lib/python3.7/site-packages (from apache-beam[gcp]<3,>=2.38->tensorflow-transform) (0.2.0) Collecting google-cloud-language<2,>=1.3.0 Downloading google_cloud_language-1.3.1-py2.py3-none-any.whl (83 kB)  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 83.6/83.6 kB 14.0 MB/s eta 0:00:00 [?25hRequirement already satisfied: google-cloud-dlp<4,>=3.0.0 in /opt/conda/lib/python3.7/site-packages (from apache-beam[gcp]<3,>=2.38->tensorflow-transform) (3.7.0) Collecting google-cloud-bigtable<2,>=0.31.1 Downloading google_cloud_bigtable-1.7.1-py2.py3-none-any.whl (267 kB)  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 267.7/267.7 kB 36.4 MB/s eta 0:00:00 [?25hRequirement already satisfied: google-auth-httplib2<0.2.0,>=0.1.0 in /opt/conda/lib/python3.7/site-packages (from apache-beam[gcp]<3,>=2.38->tensorflow-transform) (0.1.0) Requirement already satisfied: google-cloud-bigquery<3,>=1.6.0 in /opt/conda/lib/python3.7/site-packages (from apache-beam[gcp]<3,>=2.38->tensorflow-transform) (2.34.3) Collecting google-cloud-spanner<2,>=1.13.0 Downloading google_cloud_spanner-1.19.2-py2.py3-none-any.whl (255 kB)  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 255.5/255.5 kB 34.4 MB/s eta 0:00:00 [?25hRequirement already satisfied: google-cloud-bigquery-storage>=2.6.3 in /opt/conda/lib/python3.7/site-packages (from apache-beam[gcp]<3,>=2.38->tensorflow-transform) (2.13.1) Requirement already satisfied: google-apitools<0.5.32,>=0.5.31 in /opt/conda/lib/python3.7/site-packages (from apache-beam[gcp]<3,>=2.38->tensorflow-transform) (0.5.31) Requirement already satisfied: cachetools<5,>=3.1.0 in /opt/conda/lib/python3.7/site-packages (from apache-beam[gcp]<3,>=2.38->tensorflow-transform) (4.2.4) Requirement already satisfied: google-auth<3,>=1.18.0 in /opt/conda/lib/python3.7/site-packages (from apache-beam[gcp]<3,>=2.38->tensorflow-transform) (1.35.0) Requirement already satisfied: google-cloud-pubsublite<2,>=1.2.0 in /opt/conda/lib/python3.7/site-packages (from apache-beam[gcp]<3,>=2.38->tensorflow-transform) (1.4.2) Requirement already satisfied: google-cloud-pubsub<3,>=2.1.0 in /opt/conda/lib/python3.7/site-packages (from apache-beam[gcp]<3,>=2.38->tensorflow-transform) (2.12.1) Requirement already satisfied: grpcio-gcp<1,>=0.2.2 in /opt/conda/lib/python3.7/site-packages (from apache-beam[gcp]<3,>=2.38->tensorflow-transform) (0.2.2) Collecting google-cloud-vision<2,>=0.38.0 Downloading google_cloud_vision-1.0.1-py2.py3-none-any.whl (435 kB)  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 435.1/435.1 kB 44.4 MB/s eta 0:00:00 [?25hRequirement already satisfied: pyparsing>=2.1.4 in /opt/conda/lib/python3.7/site-packages (from pydot<2,>=1.2->tensorflow-transform) (3.0.9) Requirement already satisfied: flatbuffers>=1.12 in /opt/conda/lib/python3.7/site-packages (from tensorflow!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*,<2.9,>=1.15.5->tensorflow-transform) (1.12) Requirement already satisfied: astunparse>=1.6.0 in /opt/conda/lib/python3.7/site-packages (from tensorflow!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*,<2.9,>=1.15.5->tensorflow-transform) (1.6.3) Collecting tensorflow-io-gcs-filesystem>=0.23.1 Downloading tensorflow_io_gcs_filesystem-0.26.0-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl (2.4 MB)  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 2.4/2.4 MB 80.1 MB/s eta 0:00:00 [?25hRequirement already satisfied: google-pasta>=0.1.1 in /opt/conda/lib/python3.7/site-packages (from tensorflow!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*,<2.9,>=1.15.5->tensorflow-transform) (0.2.0) Collecting keras<2.9,>=2.8.0rc0 Downloading keras-2.8.0-py2.py3-none-any.whl (1.4 MB)  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 1.4/1.4 MB 76.2 MB/s eta 0:00:00 [?25hCollecting tensorboard<2.9,>=2.8 Downloading tensorboard-2.8.0-py3-none-any.whl (5.8 MB)  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 5.8/5.8 MB 97.1 MB/s eta 0:00:00ta 0:00:01 [?25hRequirement already satisfied: setuptools in /opt/conda/lib/python3.7/site-packages (from tensorflow!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*,<2.9,>=1.15.5->tensorflow-transform) (59.8.0) Collecting libclang>=9.0.1 Downloading libclang-14.0.1-py2.py3-none-manylinux1_x86_64.whl (14.5 MB)  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 14.5/14.5 MB 78.4 MB/s eta 0:00:0000:0100:01 [?25hCollecting numpy<2,>=1.16 Downloading numpy-1.21.6-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl (15.7 MB)  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 15.7/15.7 MB 75.7 MB/s eta 0:00:0000:0100:01 [?25hRequirement already satisfied: wrapt>=1.11.0 in /opt/conda/lib/python3.7/site-packages (from tensorflow!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*,<2.9,>=1.15.5->tensorflow-transform) (1.12.1) Requirement already satisfied: h5py>=2.9.0 in /opt/conda/lib/python3.7/site-packages (from tensorflow!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*,<2.9,>=1.15.5->tensorflow-transform) (3.1.0) Requirement already satisfied: keras-preprocessing>=1.1.1 in /opt/conda/lib/python3.7/site-packages (from tensorflow!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*,<2.9,>=1.15.5->tensorflow-transform) (1.1.2) Requirement already satisfied: gast>=0.2.1 in /opt/conda/lib/python3.7/site-packages (from tensorflow!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*,<2.9,>=1.15.5->tensorflow-transform) (0.4.0) Requirement already satisfied: termcolor>=1.1.0 in /opt/conda/lib/python3.7/site-packages (from tensorflow!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*,<2.9,>=1.15.5->tensorflow-transform) (1.1.0) Collecting protobuf<4,>=3.13 Downloading protobuf-3.19.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (1.1 MB)  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 1.1/1.1 MB 70.3 MB/s eta 0:00:00 [?25hRequirement already satisfied: opt-einsum>=2.3.2 in /opt/conda/lib/python3.7/site-packages (from tensorflow!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*,<2.9,>=1.15.5->tensorflow-transform) (3.3.0) Collecting tensorflow-estimator<2.9,>=2.8 Downloading tensorflow_estimator-2.8.0-py2.py3-none-any.whl (462 kB)  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 462.3/462.3 kB 47.6 MB/s eta 0:00:00 [?25hRequirement already satisfied: googleapis-common-protos<2,>=1.52.0 in /opt/conda/lib/python3.7/site-packages (from tensorflow-metadata<1.9.0,>=1.8.0->tensorflow-transform) (1.56.1) Requirement already satisfied: tensorflow-serving-api!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*,<3,>=1.15 in /opt/conda/lib/python3.7/site-packages (from tfx-bsl<1.9.0,>=1.8.0->tensorflow-transform) (2.8.0) Collecting google-api-python-client<2,>=1.7.11 Downloading google_api_python_client-1.12.11-py2.py3-none-any.whl (62 kB)  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 62.1/62.1 kB 10.0 MB/s eta 0:00:00 [?25hRequirement already satisfied: pandas<2,>=1.0 in /opt/conda/lib/python3.7/site-packages (from tfx-bsl<1.9.0,>=1.8.0->tensorflow-transform) (1.3.5) Requirement already satisfied: wheel<1.0,>=0.23.0 in /opt/conda/lib/python3.7/site-packages (from astunparse>=1.6.0->tensorflow!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*,<2.9,>=1.15.5->tensorflow-transform) (0.37.1) Collecting uritemplate<4dev,>=3.0.0 Downloading uritemplate-3.0.1-py2.py3-none-any.whl (15 kB) Requirement already satisfied: google-api-core<3dev,>=1.21.0 in /opt/conda/lib/python3.7/site-packages (from google-api-python-client<2,>=1.7.11->tfx-bsl<1.9.0,>=1.8.0->tensorflow-transform) (2.8.0) Requirement already satisfied: fasteners>=0.14 in /opt/conda/lib/python3.7/site-packages (from google-apitools<0.5.32,>=0.5.31->apache-beam[gcp]<3,>=2.38->tensorflow-transform) (0.17.3) Requirement already satisfied: oauth2client>=1.4.12 in /opt/conda/lib/python3.7/site-packages (from google-apitools<0.5.32,>=0.5.31->apache-beam[gcp]<3,>=2.38->tensorflow-transform) (4.1.3) Requirement already satisfied: pyasn1-modules>=0.2.1 in /opt/conda/lib/python3.7/site-packages (from google-auth<3,>=1.18.0->apache-beam[gcp]<3,>=2.38->tensorflow-transform) (0.2.7) Requirement already satisfied: rsa<5,>=3.1.4 in /opt/conda/lib/python3.7/site-packages (from google-auth<3,>=1.18.0->apache-beam[gcp]<3,>=2.38->tensorflow-transform) (4.8) Requirement already satisfied: packaging>=14.3 in /opt/conda/lib/python3.7/site-packages (from google-cloud-bigquery<3,>=1.6.0->apache-beam[gcp]<3,>=2.38->tensorflow-transform) (21.3) Requirement already satisfied: google-resumable-media<3.0dev,>=0.6.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-bigquery<3,>=1.6.0->apache-beam[gcp]<3,>=2.38->tensorflow-transform) (2.3.3) Requirement already satisfied: grpc-google-iam-v1<0.13dev,>=0.12.3 in /opt/conda/lib/python3.7/site-packages (from google-cloud-bigtable<2,>=0.31.1->apache-beam[gcp]<3,>=2.38->tensorflow-transform) (0.12.4) Collecting google-api-core<3dev,>=1.21.0 Downloading google_api_core-1.31.6-py2.py3-none-any.whl (93 kB)  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 93.3/93.3 kB 14.5 MB/s eta 0:00:00 [?25hRequirement already satisfied: grpcio-status>=1.16.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-pubsub<3,>=2.1.0->apache-beam[gcp]<3,>=2.38->tensorflow-transform) (1.46.3) Requirement already satisfied: overrides<7.0.0,>=6.0.1 in /opt/conda/lib/python3.7/site-packages (from google-cloud-pubsublite<2,>=1.2.0->apache-beam[gcp]<3,>=2.38->tensorflow-transform) (6.1.0) Requirement already satisfied: cached-property in /opt/conda/lib/python3.7/site-packages (from h5py>=2.9.0->tensorflow!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*,<2.9,>=1.15.5->tensorflow-transform) (1.5.2) Requirement already satisfied: docopt in /opt/conda/lib/python3.7/site-packages (from hdfs<3.0.0,>=2.1.0->apache-beam[gcp]<3,>=2.38->tensorflow-transform) (0.6.2) Collecting pyparsing>=2.1.4 Downloading pyparsing-2.4.7-py2.py3-none-any.whl (67 kB)  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 67.8/67.8 kB 10.8 MB/s eta 0:00:00 [?25hRequirement already satisfied: certifi>=2017.4.17 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0,>=2.24.0->apache-beam[gcp]<3,>=2.38->tensorflow-transform) (2022.5.18.1) Requirement already satisfied: idna<4,>=2.5 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0,>=2.24.0->apache-beam[gcp]<3,>=2.38->tensorflow-transform) (3.3) Requirement already satisfied: charset-normalizer~=2.0.0 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0,>=2.24.0->apache-beam[gcp]<3,>=2.38->tensorflow-transform) (2.0.12) Requirement already satisfied: urllib3<1.27,>=1.21.1 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0,>=2.24.0->apache-beam[gcp]<3,>=2.38->tensorflow-transform) (1.26.9) Requirement already satisfied: tensorboard-plugin-wit>=1.6.0 in /opt/conda/lib/python3.7/site-packages (from tensorboard<2.9,>=2.8->tensorflow!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*,<2.9,>=1.15.5->tensorflow-transform) (1.8.1) Requirement already satisfied: google-auth-oauthlib<0.5,>=0.4.1 in /opt/conda/lib/python3.7/site-packages (from tensorboard<2.9,>=2.8->tensorflow!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*,<2.9,>=1.15.5->tensorflow-transform) (0.4.6) Requirement already satisfied: werkzeug>=0.11.15 in /opt/conda/lib/python3.7/site-packages (from tensorboard<2.9,>=2.8->tensorflow!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*,<2.9,>=1.15.5->tensorflow-transform) (2.1.2) Requirement already satisfied: markdown>=2.6.8 in /opt/conda/lib/python3.7/site-packages (from tensorboard<2.9,>=2.8->tensorflow!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*,<2.9,>=1.15.5->tensorflow-transform) (3.3.7) Requirement already satisfied: tensorboard-data-server<0.7.0,>=0.6.0 in /opt/conda/lib/python3.7/site-packages (from tensorboard<2.9,>=2.8->tensorflow!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*,<2.9,>=1.15.5->tensorflow-transform) (0.6.1) Collecting google-api-core[grpc,grpcgcp]!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.0,<3.0.0dev,>=1.31.5 Downloading google_api_core-2.8.1-py3-none-any.whl (114 kB)  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 114.7/114.7 kB 16.3 MB/s eta 0:00:00 [?25h Downloading google_api_core-2.7.3-py3-none-any.whl (114 kB)  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 114.6/114.6 kB 18.9 MB/s eta 0:00:00 [?25h Downloading google_api_core-2.7.2-py3-none-any.whl (114 kB)  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 114.6/114.6 kB 18.2 MB/s eta 0:00:00 [?25h Downloading google_api_core-2.7.1-py3-none-any.whl (114 kB)  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 114.7/114.7 kB 19.1 MB/s eta 0:00:00 [?25h Downloading google_api_core-2.7.0-py3-none-any.whl (114 kB)  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 114.7/114.7 kB 18.3 MB/s eta 0:00:00 [?25h Downloading google_api_core-2.6.1-py3-none-any.whl (114 kB)  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 114.6/114.6 kB 19.4 MB/s eta 0:00:00 [?25h Downloading google_api_core-2.6.0-py2.py3-none-any.whl (114 kB)  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 114.6/114.6 kB 18.7 MB/s eta 0:00:00 [?25h Downloading google_api_core-2.5.0-py2.py3-none-any.whl (111 kB)  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 111.8/111.8 kB 18.6 MB/s eta 0:00:00 [?25h Downloading google_api_core-2.4.0-py2.py3-none-any.whl (111 kB)  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 111.7/111.7 kB 19.0 MB/s eta 0:00:00 [?25h Downloading google_api_core-2.3.2-py2.py3-none-any.whl (109 kB)  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 109.8/109.8 kB 18.2 MB/s eta 0:00:00 [?25hRequirement already satisfied: requests-oauthlib>=0.7.0 in /opt/conda/lib/python3.7/site-packages (from google-auth-oauthlib<0.5,>=0.4.1->tensorboard<2.9,>=2.8->tensorflow!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*,<2.9,>=1.15.5->tensorflow-transform) (1.3.1) Requirement already satisfied: google-crc32c<2.0dev,>=1.0 in /opt/conda/lib/python3.7/site-packages (from google-resumable-media<3.0dev,>=0.6.0->google-cloud-bigquery<3,>=1.6.0->apache-beam[gcp]<3,>=2.38->tensorflow-transform) (1.1.2) Requirement already satisfied: importlib-metadata>=4.4 in /opt/conda/lib/python3.7/site-packages (from markdown>=2.6.8->tensorboard<2.9,>=2.8->tensorflow!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*,<2.9,>=1.15.5->tensorflow-transform) (4.11.4) Requirement already satisfied: pyasn1>=0.1.7 in /opt/conda/lib/python3.7/site-packages (from oauth2client>=1.4.12->google-apitools<0.5.32,>=0.5.31->apache-beam[gcp]<3,>=2.38->tensorflow-transform) (0.4.8) Requirement already satisfied: typing-utils>=0.0.3 in /opt/conda/lib/python3.7/site-packages (from overrides<7.0.0,>=6.0.1->google-cloud-pubsublite<2,>=1.2.0->apache-beam[gcp]<3,>=2.38->tensorflow-transform) (0.1.0) Requirement already satisfied: cffi>=1.0.0 in /opt/conda/lib/python3.7/site-packages (from google-crc32c<2.0dev,>=1.0->google-resumable-media<3.0dev,>=0.6.0->google-cloud-bigquery<3,>=1.6.0->apache-beam[gcp]<3,>=2.38->tensorflow-transform) (1.15.0) Requirement already satisfied: zipp>=0.5 in /opt/conda/lib/python3.7/site-packages (from importlib-metadata>=4.4->markdown>=2.6.8->tensorboard<2.9,>=2.8->tensorflow!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*,<2.9,>=1.15.5->tensorflow-transform) (3.8.0) Requirement already satisfied: oauthlib>=3.0.0 in /opt/conda/lib/python3.7/site-packages (from requests-oauthlib>=0.7.0->google-auth-oauthlib<0.5,>=0.4.1->tensorboard<2.9,>=2.8->tensorflow!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*,<2.9,>=1.15.5->tensorflow-transform) (3.2.0) Requirement already satisfied: pycparser in /opt/conda/lib/python3.7/site-packages (from cffi>=1.0.0->google-crc32c<2.0dev,>=1.0->google-resumable-media<3.0dev,>=0.6.0->google-cloud-bigquery<3,>=1.6.0->apache-beam[gcp]<3,>=2.38->tensorflow-transform) (2.21) Building wheels for collected packages: dill Building wheel for dill (setup.py) ... [?25ldone [?25h Created wheel for dill: filename=dill-0.3.1.1-py3-none-any.whl size=78544 sha256=09930991cd66588f2c5939dc4c413540a2f0d291f15299d801fa4b83094f7780 Stored in directory: /home/jupyter/.cache/pip/wheels/a4/61/fd/c57e374e580aa78a45ed78d5859b3a44436af17e22ca53284f Successfully built dill Installing collected packages: tensorflow-estimator, libclang, keras, uritemplate, tensorflow-io-gcs-filesystem, pyparsing, protobuf, numpy, dill, pyarrow, httplib2, google-api-core, tensorboard, google-cloud-core, google-api-python-client, tensorflow, google-cloud-vision, google-cloud-videointelligence, google-cloud-spanner, google-cloud-language, google-cloud-datastore, google-cloud-bigtable Attempting uninstall: tensorflow-estimator Found existing installation: tensorflow-estimator 2.6.0 Uninstalling tensorflow-estimator-2.6.0: Successfully uninstalled tensorflow-estimator-2.6.0 Attempting uninstall: keras Found existing installation: keras 2.6.0 Uninstalling keras-2.6.0: Successfully uninstalled keras-2.6.0 Attempting uninstall: uritemplate Found existing installation: uritemplate 4.1.1 Uninstalling uritemplate-4.1.1: Successfully uninstalled uritemplate-4.1.1 Attempting uninstall: pyparsing Found existing installation: pyparsing 3.0.9 Uninstalling pyparsing-3.0.9: Successfully uninstalled pyparsing-3.0.9 Attempting uninstall: protobuf Found existing installation: protobuf 3.20.1 Uninstalling protobuf-3.20.1: Successfully uninstalled protobuf-3.20.1 Attempting uninstall: numpy Found existing installation: numpy 1.19.5 Uninstalling numpy-1.19.5: Successfully uninstalled numpy-1.19.5 Attempting uninstall: dill Found existing installation: dill 0.3.5.1 Uninstalling dill-0.3.5.1: Successfully uninstalled dill-0.3.5.1 Attempting uninstall: pyarrow Found existing installation: pyarrow 8.0.0 Uninstalling pyarrow-8.0.0: Successfully uninstalled pyarrow-8.0.0 Attempting uninstall: httplib2 Found existing installation: httplib2 0.20.4 Uninstalling httplib2-0.20.4: Successfully uninstalled httplib2-0.20.4 Attempting uninstall: google-api-core Found existing installation: google-api-core 2.8.0 Uninstalling google-api-core-2.8.0: Successfully uninstalled google-api-core-2.8.0 Attempting uninstall: tensorboard Found existing installation: tensorboard 2.6.0 Uninstalling tensorboard-2.6.0: Successfully uninstalled tensorboard-2.6.0 Attempting uninstall: google-cloud-core Found existing installation: google-cloud-core 2.3.0 Uninstalling google-cloud-core-2.3.0: Successfully uninstalled google-cloud-core-2.3.0 Attempting uninstall: google-api-python-client Found existing installation: google-api-python-client 2.49.0 Uninstalling google-api-python-client-2.49.0: Successfully uninstalled google-api-python-client-2.49.0 Attempting uninstall: tensorflow Found existing installation: tensorflow 2.6.4 Uninstalling tensorflow-2.6.4: Successfully uninstalled tensorflow-2.6.4 Attempting uninstall: google-cloud-vision Found existing installation: google-cloud-vision 2.7.2 Uninstalling google-cloud-vision-2.7.2: Successfully uninstalled google-cloud-vision-2.7.2 Attempting uninstall: google-cloud-videointelligence Found existing installation: google-cloud-videointelligence 2.7.0 Uninstalling google-cloud-videointelligence-2.7.0: Successfully uninstalled google-cloud-videointelligence-2.7.0 Attempting uninstall: google-cloud-spanner Found existing installation: google-cloud-spanner 3.14.0 Uninstalling google-cloud-spanner-3.14.0: Successfully uninstalled google-cloud-spanner-3.14.0 Attempting uninstall: google-cloud-language Found existing installation: google-cloud-language 2.4.2 Uninstalling google-cloud-language-2.4.2: Successfully uninstalled google-cloud-language-2.4.2 Attempting uninstall: google-cloud-datastore Found existing installation: google-cloud-datastore 2.6.0 Uninstalling google-cloud-datastore-2.6.0: Successfully uninstalled google-cloud-datastore-2.6.0 Attempting uninstall: google-cloud-bigtable Found existing installation: google-cloud-bigtable 2.9.0 Uninstalling google-cloud-bigtable-2.9.0: Successfully uninstalled google-cloud-bigtable-2.9.0 ERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts. tensorflow-io 0.21.0 requires tensorflow<2.7.0,>=2.6.0, but you have tensorflow 2.8.2 which is incompatible. tensorflow-io 0.21.0 requires tensorflow-io-gcs-filesystem==0.21.0, but you have tensorflow-io-gcs-filesystem 0.26.0 which is incompatible. google-cloud-storage 2.3.0 requires google-cloud-core<3.0dev,>=2.3.0, but you have google-cloud-core 1.7.2 which is incompatible. cloud-tpu-client 0.10 requires google-api-python-client==1.8.0, but you have google-api-python-client 1.12.11 which is incompatible. Successfully installed dill-0.3.1.1 google-api-core-1.31.6 google-api-python-client-2.48.0 google-cloud-bigtable-1.7.1 google-cloud-core-2.2.2 google-cloud-datastore-1.15.4 google-cloud-language-1.3.1 google-cloud-spanner-1.19.2 google-cloud-videointelligence-1.16.2 google-cloud-vision-1.0.1 httplib2-0.19.1 keras-2.8.0 libclang-14.0.1 numpy-1.21.6 protobuf-3.19.4 pyarrow-5.0.0 pyparsing-2.4.7 tensorboard-2.8.0 tensorflow-2.8.2 tensorflow-estimator-2.8.0 tensorflow-io-gcs-filesystem-0.26.0 uritemplate-3.0.1 ###Markdown **Note:** Restart the kernel before proceeding further. Select **Kernel > Restart kernel > Restart** from the menu. ###Code # This cell is only necessary because packages were installed while python was # running. It avoids the need to restart the runtime when running in Colab. import pkg_resources import importlib importlib.reload(pkg_resources) ###Output _____no_output_____ ###Markdown Imports and globalsFirst import the stuff you need. ###Code import math import os import pprint import pandas as pd import matplotlib.pyplot as plt import tensorflow as tf print('TF: {}'.format(tf.__version__)) import apache_beam as beam print('Beam: {}'.format(beam.__version__)) import tensorflow_transform as tft import tensorflow_transform.beam as tft_beam print('Transform: {}'.format(tft.__version__)) from tfx_bsl.public import tfxio from tfx_bsl.coders.example_coder import RecordBatchToExamples ###Output TF: 2.8.2 Beam: 2.39.0 Transform: 1.8.0 ###Markdown Next download the data files: ###Code !wget https://storage.googleapis.com/artifacts.tfx-oss-public.appspot.com/datasets/census/adult.data !wget https://storage.googleapis.com/artifacts.tfx-oss-public.appspot.com/datasets/census/adult.test train_path = './adult.data' test_path = './adult.test' ###Output --2022-05-30 14:05:34-- https://storage.googleapis.com/artifacts.tfx-oss-public.appspot.com/datasets/census/adult.data Resolving storage.googleapis.com (storage.googleapis.com)... 74.125.197.128, 74.125.142.128, 74.125.195.128, ... Connecting to storage.googleapis.com (storage.googleapis.com)|74.125.197.128|:443... connected. HTTP request sent, awaiting response... 200 OK Length: 3974305 (3.8M) [application/octet-stream] Saving to: ‘adult.data’ adult.data 100%[===================>] 3.79M --.-KB/s in 0.02s 2022-05-30 14:05:35 (187 MB/s) - ‘adult.data’ saved [3974305/3974305] --2022-05-30 14:05:35-- https://storage.googleapis.com/artifacts.tfx-oss-public.appspot.com/datasets/census/adult.test Resolving storage.googleapis.com (storage.googleapis.com)... 74.125.20.128, 108.177.98.128, 74.125.197.128, ... Connecting to storage.googleapis.com (storage.googleapis.com)|74.125.20.128|:443... connected. HTTP request sent, awaiting response... 200 OK Length: 2003153 (1.9M) [application/octet-stream] Saving to: ‘adult.test’ adult.test 100%[===================>] 1.91M --.-KB/s in 0.01s 2022-05-30 14:05:35 (165 MB/s) - ‘adult.test’ saved [2003153/2003153] ###Markdown Name our columnsYou'll create some handy lists for referencing the columns in our dataset. ###Code CATEGORICAL_FEATURE_KEYS = [ 'workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race', 'sex', 'native-country', ] NUMERIC_FEATURE_KEYS = [ 'age', 'capital-gain', 'capital-loss', 'hours-per-week', 'education-num' ] ORDERED_CSV_COLUMNS = [ 'age', 'workclass', 'fnlwgt', 'education', 'education-num', 'marital-status', 'occupation', 'relationship', 'race', 'sex', 'capital-gain', 'capital-loss', 'hours-per-week', 'native-country', 'label' ] LABEL_KEY = 'label' ###Output _____no_output_____ ###Markdown Here's a quick preview of the data: ###Code pandas_train = pd.read_csv(train_path, header=None, names=ORDERED_CSV_COLUMNS) pandas_train.head(5) one_row = dict(pandas_train.loc[0]) COLUMN_DEFAULTS = [ '' if isinstance(v, str) else 0.0 for v in dict(pandas_train.loc[1]).values()] ###Output _____no_output_____ ###Markdown The test data has 1 header line that needs to be skipped, and a trailing "." at the end of each line. ###Code pandas_test = pd.read_csv(test_path, header=1, names=ORDERED_CSV_COLUMNS) pandas_test.head(5) testing = os.getenv("WEB_TEST_BROWSER", False) if testing: pandas_train = pandas_train.loc[:1] pandas_test = pandas_test.loc[:1] ###Output _____no_output_____ ###Markdown Define our features and schemaLet's define a schema based on what types the columns are in our input. Among other things this will help with importing them correctly. ###Code RAW_DATA_FEATURE_SPEC = dict( [(name, tf.io.FixedLenFeature([], tf.string)) for name in CATEGORICAL_FEATURE_KEYS] + [(name, tf.io.FixedLenFeature([], tf.float32)) for name in NUMERIC_FEATURE_KEYS] + [(LABEL_KEY, tf.io.FixedLenFeature([], tf.string))] ) SCHEMA = tft.tf_metadata.dataset_metadata.DatasetMetadata( tft.tf_metadata.schema_utils.schema_from_feature_spec(RAW_DATA_FEATURE_SPEC)).schema ###Output _____no_output_____ ###Markdown [Optional] Encode and decode tf.train.Example protos This tutorial needs to convert examples from the dataset to and from `tf.train.Example` protos in a few places. The hidden `encode_example` function below converts a dictionary of features forom the dataset to a `tf.train.Example`. ###Code #@title def encode_example(input_features): input_features = dict(input_features) output_features = {} for key in CATEGORICAL_FEATURE_KEYS: value = input_features[key] feature = tf.train.Feature( bytes_list=tf.train.BytesList(value=[value.strip().encode()])) output_features[key] = feature for key in NUMERIC_FEATURE_KEYS: value = input_features[key] feature = tf.train.Feature( float_list=tf.train.FloatList(value=[value])) output_features[key] = feature label_value = input_features.get(LABEL_KEY, None) if label_value is not None: output_features[LABEL_KEY] = tf.train.Feature( bytes_list = tf.train.BytesList(value=[label_value.strip().encode()])) example = tf.train.Example( features = tf.train.Features(feature=output_features) ) return example ###Output _____no_output_____ ###Markdown Now you can convert dataset examples into `Example` protos: ###Code tf_example = encode_example(pandas_train.loc[0]) tf_example.features.feature['age'] serialized_example_batch = tf.constant([ encode_example(pandas_train.loc[i]).SerializeToString() for i in range(3) ]) serialized_example_batch ###Output 2022-05-30 14:06:15.528156: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libcuda.so.1'; dlerror: libcuda.so.1: cannot open shared object file: No such file or directory; LD_LIBRARY_PATH: /usr/local/cuda/lib64:/usr/local/nccl2/lib:/usr/local/cuda/extras/CUPTI/lib64 2022-05-30 14:06:15.528200: W tensorflow/stream_executor/cuda/cuda_driver.cc:269] failed call to cuInit: UNKNOWN ERROR (303) 2022-05-30 14:06:15.528226: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:156] kernel driver does not appear to be running on this host (tensorflow-2-6-20220530-191935): /proc/driver/nvidia/version does not exist 2022-05-30 14:06:15.528739: I tensorflow/core/platform/cpu_feature_guard.cc:151] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 FMA To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags. ###Markdown You can also convert batches of serialized Example protos back into a dictionary of tensors: ###Code decoded_tensors = tf.io.parse_example( serialized_example_batch, features=RAW_DATA_FEATURE_SPEC ) ###Output _____no_output_____ ###Markdown In some cases the label will not be passed in, so the encode function is written so that the label is optional: ###Code features_dict = dict(pandas_train.loc[0]) features_dict.pop(LABEL_KEY) LABEL_KEY in features_dict ###Output _____no_output_____ ###Markdown When creating an `Example` proto it will simply not contain the label key. ###Code no_label_example = encode_example(features_dict) LABEL_KEY in no_label_example.features.feature.keys() ###Output _____no_output_____ ###Markdown Setting hyperparameters and basic housekeepingConstants and hyperparameters used for training. ###Code NUM_OOV_BUCKETS = 1 EPOCH_SPLITS = 10 TRAIN_NUM_EPOCHS = 2*EPOCH_SPLITS NUM_TRAIN_INSTANCES = len(pandas_train) NUM_TEST_INSTANCES = len(pandas_test) BATCH_SIZE = 128 STEPS_PER_TRAIN_EPOCH = tf.math.ceil(NUM_TRAIN_INSTANCES/BATCH_SIZE/EPOCH_SPLITS) EVALUATION_STEPS = tf.math.ceil(NUM_TEST_INSTANCES/BATCH_SIZE) # Names of temp files TRANSFORMED_TRAIN_DATA_FILEBASE = 'train_transformed' TRANSFORMED_TEST_DATA_FILEBASE = 'test_transformed' EXPORTED_MODEL_DIR = 'exported_model_dir' if testing: TRAIN_NUM_EPOCHS = 1 ###Output _____no_output_____ ###Markdown Preprocessing with `tf.Transform` Create a `tf.Transform` preprocessing_fnThe _preprocessing function_ is the most important concept of tf.Transform. A preprocessing function is where the transformation of the dataset really happens. It accepts and returns a dictionary of tensors, where a tensor means a [`Tensor`](https://www.tensorflow.org/versions/r1.15/api_docs/python/tf/Tensor) or [`SparseTensor`](https://www.tensorflow.org/versions/r1.15/api_docs/python/tf/SparseTensor). There are two main groups of API calls that typically form the heart of a preprocessing function:1. **TensorFlow Ops:** Any function that accepts and returns tensors, which usually means TensorFlow ops. These add TensorFlow operations to the graph that transforms raw data into transformed data one feature vector at a time. These will run for every example, during both training and serving.2. **Tensorflow Transform Analyzers/Mappers:** Any of the analyzers/mappers provided by tf.Transform. These also accept and return tensors, and typically contain a combination of Tensorflow ops and Beam computation, but unlike TensorFlow ops they only run in the Beam pipeline during analysis requiring a full pass over the entire training dataset. The Beam computation runs only once, (prior to training, during analysis), and typically make a full pass over the entire training dataset. They create `tf.constant` tensors, which are added to your graph. For example, `tft.min` computes the minimum of a tensor over the training dataset. Here is a `preprocessing_fn` for this dataset. It does several things:1. Using `tft.scale_to_0_1`, it scales the numeric features to the `[0,1]` range.2. Using `tft.compute_and_apply_vocabulary`, it computes a vocabulary for each of the categorical features, and returns the integer IDs for each input as an `tf.int64`. This applies both to string and integer categorical-inputs.3. It applies some manual transformations to the data using standard TensorFlow operations. Here these operations are applied to the label but could transform the features as well. The TensorFlow operations do several things: * They build a lookup table for the label (the `tf.init_scope` ensures that the table is only created the first time the function is called). * They normalize the text of the label. * They convert the label to a one-hot. ###Code def preprocessing_fn(inputs): """Preprocess input columns into transformed columns.""" # Since you are modifying some features and leaving others unchanged, you # start by setting `outputs` to a copy of `inputs. outputs = inputs.copy() # Scale numeric columns to have range [0, 1]. for key in NUMERIC_FEATURE_KEYS: outputs[key] = tft.scale_to_0_1(inputs[key]) # For all categorical columns except the label column, you generate a # vocabulary but do not modify the feature. This vocabulary is instead # used in the trainer, by means of a feature column, to convert the feature # from a string to an integer id. for key in CATEGORICAL_FEATURE_KEYS: outputs[key] = tft.compute_and_apply_vocabulary( tf.strings.strip(inputs[key]), num_oov_buckets=NUM_OOV_BUCKETS, vocab_filename=key) # For the label column you provide the mapping from string to index. table_keys = ['>50K', '<=50K'] with tf.init_scope(): initializer = tf.lookup.KeyValueTensorInitializer( keys=table_keys, values=tf.cast(tf.range(len(table_keys)), tf.int64), key_dtype=tf.string, value_dtype=tf.int64) table = tf.lookup.StaticHashTable(initializer, default_value=-1) # Remove trailing periods for test data when the data is read with tf.data. # label_str = tf.sparse.to_dense(inputs[LABEL_KEY]) label_str = inputs[LABEL_KEY] label_str = tf.strings.regex_replace(label_str, r'\.$', '') label_str = tf.strings.strip(label_str) data_labels = table.lookup(label_str) transformed_label = tf.one_hot( indices=data_labels, depth=len(table_keys), on_value=1.0, off_value=0.0) outputs[LABEL_KEY] = tf.reshape(transformed_label, [-1, len(table_keys)]) return outputs ###Output _____no_output_____ ###Markdown SyntaxYou're almost ready to put everything together and use Apache Beam to run it.Apache Beam uses a special syntax to define and invoke transforms. For example, in this line:```result = pass_this | 'name this step' >> to_this_call```The method `to_this_call` is being invoked and passed the object called `pass_this`, and this operation will be referred to as `name this step` in a stack trace. The result of the call to `to_this_call` is returned in `result`. You will often see stages of a pipeline chained together like this:```result = apache_beam.Pipeline() | 'first step' >> do_this_first() | 'second step' >> do_this_last()```and since that started with a new pipeline, you can continue like this:```next_result = result | 'doing more stuff' >> another_function()``` Transform the dataNow you're ready to start transforming our data in an Apache Beam pipeline.1. Read in the data using the `tfxio.CsvTFXIO` CSV reader (to process lines of text in a pipeline use `tfxio.BeamRecordCsvTFXIO` instead).1. Analyse and transform the data using the `preprocessing_fn` defined above.1. Write out the result as a `TFRecord` of `Example` protos, which you will use for training a model later ###Code def transform_data(train_data_file, test_data_file, working_dir): """Transform the data and write out as a TFRecord of Example protos. Read in the data using the CSV reader, and transform it using a preprocessing pipeline that scales numeric data and converts categorical data from strings to int64 values indices, by creating a vocabulary for each category. Args: train_data_file: File containing training data test_data_file: File containing test data working_dir: Directory to write transformed data and metadata to """ # The "with" block will create a pipeline, and run that pipeline at the exit # of the block. with beam.Pipeline() as pipeline: with tft_beam.Context(temp_dir=tempfile.mkdtemp()): # Create a TFXIO to read the census data with the schema. To do this you # need to list all columns in order since the schema doesn't specify the # order of columns in the csv. # You first read CSV files and use BeamRecordCsvTFXIO whose .BeamSource() # accepts a PCollection[bytes] because you need to patch the records first # (see "FixCommasTrainData" below). Otherwise, tfxio.CsvTFXIO can be used # to both read the CSV files and parse them to TFT inputs: # csv_tfxio = tfxio.CsvTFXIO(...) # raw_data = (pipeline | 'ToRecordBatches' >> csv_tfxio.BeamSource()) train_csv_tfxio = tfxio.CsvTFXIO( file_pattern=train_data_file, telemetry_descriptors=[], column_names=ORDERED_CSV_COLUMNS, schema=SCHEMA) # Read in raw data and convert using CSV TFXIO. raw_data = ( pipeline | 'ReadTrainCsv' >> train_csv_tfxio.BeamSource()) # Combine data and schema into a dataset tuple. Note that you already used # the schema to read the CSV data, but you also need it to interpret # raw_data. cfg = train_csv_tfxio.TensorAdapterConfig() raw_dataset = (raw_data, cfg) # The TFXIO output format is chosen for improved performance. transformed_dataset, transform_fn = ( raw_dataset | tft_beam.AnalyzeAndTransformDataset( preprocessing_fn, output_record_batches=True)) # Transformed metadata is not necessary for encoding. transformed_data, _ = transformed_dataset # Extract transformed RecordBatches, encode and write them to the given # directory. # TODO(b/223384488): Switch to `RecordBatchToExamplesEncoder`. _ = ( transformed_data | 'EncodeTrainData' >> beam.FlatMapTuple(lambda batch, _: RecordBatchToExamples(batch)) | 'WriteTrainData' >> beam.io.WriteToTFRecord( os.path.join(working_dir, TRANSFORMED_TRAIN_DATA_FILEBASE))) # Now apply transform function to test data. In this case you remove the # trailing period at the end of each line, and also ignore the header line # that is present in the test data file. test_csv_tfxio = tfxio.CsvTFXIO( file_pattern=test_data_file, skip_header_lines=1, telemetry_descriptors=[], column_names=ORDERED_CSV_COLUMNS, schema=SCHEMA) raw_test_data = ( pipeline | 'ReadTestCsv' >> test_csv_tfxio.BeamSource()) raw_test_dataset = (raw_test_data, test_csv_tfxio.TensorAdapterConfig()) # The TFXIO output format is chosen for improved performance. transformed_test_dataset = ( (raw_test_dataset, transform_fn) | tft_beam.TransformDataset(output_record_batches=True)) # Transformed metadata is not necessary for encoding. transformed_test_data, _ = transformed_test_dataset # Extract transformed RecordBatches, encode and write them to the given # directory. _ = ( transformed_test_data | 'EncodeTestData' >> beam.FlatMapTuple(lambda batch, _: RecordBatchToExamples(batch)) | 'WriteTestData' >> beam.io.WriteToTFRecord( os.path.join(working_dir, TRANSFORMED_TEST_DATA_FILEBASE))) # Will write a SavedModel and metadata to working_dir, which can then # be read by the tft.TFTransformOutput class. _ = ( transform_fn | 'WriteTransformFn' >> tft_beam.WriteTransformFn(working_dir)) ###Output _____no_output_____ ###Markdown Run the pipeline: ###Code import tempfile import pathlib output_dir = os.path.join(tempfile.mkdtemp(), 'keras') # Transform the data # TODO 1: Your code goes here ###Output WARNING:apache_beam.runners.interactive.interactive_environment:Dependencies required for Interactive Beam PCollection visualization are not available, please use: `pip install apache-beam[interactive]` to install necessary dependencies to enable all data visualization features. ###Markdown Wrap up the output directory as a `tft.TFTransformOutput`: ###Code tf_transform_output = tft.TFTransformOutput(output_dir) tf_transform_output.transformed_feature_spec() ###Output _____no_output_____ ###Markdown If you look in the directory you'll see it contains three things:1. The `train_transformed` and `test_transformed` data files2. The `transform_fn` directory (a `tf.saved_model`)3. The `transformed_metadata` The followning sections show how to use these artifacts to train a model. ###Code !ls -l {output_dir} ###Output total 15704 -rw-r--r-- 1 jupyter jupyter 5356449 May 30 14:07 test_transformed-00000-of-00001 -rw-r--r-- 1 jupyter jupyter 10712569 May 30 14:07 train_transformed-00000-of-00001 drwxr-xr-x 4 jupyter jupyter 4096 May 30 14:07 transform_fn drwxr-xr-x 2 jupyter jupyter 4096 May 30 14:07 transformed_metadata ###Markdown Using our preprocessed data to train a model using tf.kerasTo show how `tf.Transform` enables us to use the same code for both training and serving, and thus prevent skew, you're going to train a model. To train our model and prepare our trained model for production you need to create input functions. The main difference between our training input function and our serving input function is that training data contains the labels, and production data does not. The arguments and returns are also somewhat different. Create an input function for training Running the pipeline in the previous section created `TFRecord` files containing the the transformed data.The following code uses `tf.data.experimental.make_batched_features_dataset` and `tft.TFTransformOutput.transformed_feature_spec` to read these data files as a `tf.data.Dataset`: ###Code def _make_training_input_fn(tf_transform_output, train_file_pattern, batch_size): """An input function reading from transformed data, converting to model input. Args: tf_transform_output: Wrapper around output of tf.Transform. transformed_examples: Base filename of examples. batch_size: Batch size. Returns: The input data for training or eval, in the form of k. """ def input_fn(): return tf.data.experimental.make_batched_features_dataset( file_pattern=train_file_pattern, batch_size=batch_size, features=tf_transform_output.transformed_feature_spec(), reader=tf.data.TFRecordDataset, label_key=LABEL_KEY, shuffle=True) return input_fn train_file_pattern = pathlib.Path(output_dir)/f'{TRANSFORMED_TRAIN_DATA_FILEBASE}*' # Create the input function input_fn = # TODO 2: Your code goes here ###Output _____no_output_____ ###Markdown Below you can see a transformed sample of the data. Note how the numeric columns like `education-num` and `hourd-per-week` are converted to floats with a range of [0,1], and the string columns have been converted to IDs: ###Code for example, label in input_fn().take(1): break pd.DataFrame(example) label ###Output _____no_output_____ ###Markdown Train, Evaluate the model Build the model ###Code def build_keras_model(working_dir): inputs = build_keras_inputs(working_dir) encoded_inputs = encode_inputs(inputs) stacked_inputs = tf.concat(tf.nest.flatten(encoded_inputs), axis=1) output = tf.keras.layers.Dense(100, activation='relu')(stacked_inputs) output = tf.keras.layers.Dense(50, activation='relu')(output) output = tf.keras.layers.Dense(2)(output) model = tf.keras.Model(inputs=inputs, outputs=output) return model def build_keras_inputs(working_dir): tf_transform_output = tft.TFTransformOutput(working_dir) feature_spec = tf_transform_output.transformed_feature_spec().copy() feature_spec.pop(LABEL_KEY) # Build the `keras.Input` objects. inputs = {} for key, spec in feature_spec.items(): if isinstance(spec, tf.io.VarLenFeature): inputs[key] = tf.keras.layers.Input( shape=[None], name=key, dtype=spec.dtype, sparse=True) elif isinstance(spec, tf.io.FixedLenFeature): inputs[key] = tf.keras.layers.Input( shape=spec.shape, name=key, dtype=spec.dtype) else: raise ValueError('Spec type is not supported: ', key, spec) return inputs def encode_inputs(inputs): encoded_inputs = {} for key in inputs: feature = tf.expand_dims(inputs[key], -1) if key in CATEGORICAL_FEATURE_KEYS: num_buckets = tf_transform_output.num_buckets_for_transformed_feature(key) encoding_layer = ( tf.keras.layers.CategoryEncoding( num_tokens=num_buckets, output_mode='binary', sparse=False)) encoded_inputs[key] = encoding_layer(feature) else: encoded_inputs[key] = feature return encoded_inputs model = build_keras_model(output_dir) tf.keras.utils.plot_model(model,rankdir='LR', show_shapes=True) ###Output _____no_output_____ ###Markdown Build the datasets ###Code def get_dataset(working_dir, filebase): tf_transform_output = tft.TFTransformOutput(working_dir) data_path_pattern = os.path.join( working_dir, filebase + '*') input_fn = _make_training_input_fn( tf_transform_output, data_path_pattern, batch_size=BATCH_SIZE) dataset = input_fn() return dataset ###Output _____no_output_____ ###Markdown Train and evaluate the model: ###Code def train_and_evaluate( model, working_dir): """Train the model on training data and evaluate on test data. Args: working_dir: The location of the Transform output. num_train_instances: Number of instances in train set num_test_instances: Number of instances in test set Returns: The results from the estimator's 'evaluate' method """ train_dataset = get_dataset(working_dir, TRANSFORMED_TRAIN_DATA_FILEBASE) validation_dataset = get_dataset(working_dir, TRANSFORMED_TEST_DATA_FILEBASE) model = build_keras_model(working_dir) # Train the model # TODO 3: Your code goes here metric_values = model.evaluate(validation_dataset, steps=EVALUATION_STEPS, return_dict=True) return model, history, metric_values def train_model(model, train_dataset, validation_dataset): model.compile(optimizer='adam', loss=tf.losses.CategoricalCrossentropy(from_logits=True), metrics=['accuracy']) history = model.fit(train_dataset, validation_data=validation_dataset, epochs=TRAIN_NUM_EPOCHS, steps_per_epoch=STEPS_PER_TRAIN_EPOCH, validation_steps=EVALUATION_STEPS) return history model, history, metric_values = train_and_evaluate(model, output_dir) plt.plot(history.history['loss'], label='Train') plt.plot(history.history['val_loss'], label='Eval') plt.ylim(0,max(plt.ylim())) plt.legend() plt.title('Loss'); ###Output _____no_output_____ ###Markdown Transform new dataIn the previous section the training process used the hard-copies of the transformed data that were generated by `tft_beam.AnalyzeAndTransformDataset` in the `transform_dataset` function. For operating on new data you'll need to load final version of the `preprocessing_fn` that was saved by `tft_beam.WriteTransformFn`. The `TFTransformOutput.transform_features_layer` method loads the `preprocessing_fn` SavedModel from the output directory. Here's a function to load new, unprocessed batches from a source file: ###Code def read_csv(file_name, batch_size): return tf.data.experimental.make_csv_dataset( file_pattern=file_name, batch_size=batch_size, column_names=ORDERED_CSV_COLUMNS, column_defaults=COLUMN_DEFAULTS, prefetch_buffer_size=0, ignore_errors=True) for ex in read_csv(test_path, batch_size=5): break pd.DataFrame(ex) ###Output _____no_output_____ ###Markdown Load the `tft.TransformFeaturesLayer` to transform this data with the `preprocessing_fn`: ###Code ex2 = ex.copy() ex2.pop('fnlwgt') tft_layer = tf_transform_output.transform_features_layer() t_ex = tft_layer(ex2) label = t_ex.pop(LABEL_KEY) pd.DataFrame(t_ex) ###Output INFO:tensorflow:struct2tensor is not available. ###Markdown The `tft_layer` is smart enough to still execute the transformation if only a subset of features are passed in. For example, if you only pass in two features, you'll get just the transformed versions of those features back: ###Code ex2 = pd.DataFrame(ex)[['education', 'hours-per-week']] ex2 pd.DataFrame(tft_layer(dict(ex2))) ###Output _____no_output_____ ###Markdown Here's a more robust version that drops features that are not in the feature-spec, and returns a `(features, label)` pair if the label is in the provided features: ###Code class Transform(tf.Module): def __init__(self, working_dir): self.working_dir = working_dir self.tf_transform_output = tft.TFTransformOutput(working_dir) self.tft_layer = tf_transform_output.transform_features_layer() @tf.function def __call__(self, features): raw_features = {} for key, val in features.items(): # Skip unused keys if key not in RAW_DATA_FEATURE_SPEC: continue raw_features[key] = val # Apply the `preprocessing_fn`. transformed_features = tft_layer(raw_features) if LABEL_KEY in transformed_features: # Pop the label and return a (features, labels) pair. data_labels = transformed_features.pop(LABEL_KEY) return (transformed_features, data_labels) else: return transformed_features transform = Transform(output_dir) t_ex, t_label = transform(ex) pd.DataFrame(t_ex) ###Output _____no_output_____ ###Markdown Now you can use `Dataset.map` to apply that transformation, on the fly to new data: ###Code # Evaluate the model # TODO 4: Your code goes here ###Output 128/128 [==============================] - 1s 4ms/step - loss: 0.3027 - accuracy: 0.8781 ###Markdown Export the modelSo you have a trained model, and a method to apply the `preporcessing_fn` to new data. Assemble them into a new model that accepts serialized `tf.train.Example` protos as input. ###Code class ServingModel(tf.Module): def __init__(self, model, working_dir): self.model = model self.working_dir = working_dir self.transform = Transform(working_dir) @tf.function(input_signature=[tf.TensorSpec(shape=[None], dtype=tf.string)]) def __call__(self, serialized_tf_examples): # parse the tf.train.Example feature_spec = RAW_DATA_FEATURE_SPEC.copy() feature_spec.pop(LABEL_KEY) parsed_features = tf.io.parse_example(serialized_tf_examples, feature_spec) # Apply the `preprocessing_fn` transformed_features = self.transform(parsed_features) # Run the model outputs = self.model(transformed_features) # Format the output classes_names = tf.constant([['0', '1']]) classes = tf.tile(classes_names, [tf.shape(outputs)[0], 1]) return {'classes': classes, 'scores': outputs} def export(self, output_dir): # Increment the directory number. This is required in order to make this # model servable with model_server. save_model_dir = pathlib.Path(output_dir)/'model' number_dirs = [int(p.name) for p in save_model_dir.glob('*') if p.name.isdigit()] id = max([0] + number_dirs)+1 save_model_dir = save_model_dir/str(id) # Set the signature to make it visible for serving. concrete_serving_fn = self.__call__.get_concrete_function() signatures = {'serving_default': concrete_serving_fn} # Export the model. tf.saved_model.save( self, str(save_model_dir), signatures=signatures) return save_model_dir ###Output _____no_output_____ ###Markdown Build the model and test-run it on the batch of serialized examples: ###Code serving_model = ServingModel(model, output_dir) serving_model(serialized_example_batch) ###Output _____no_output_____ ###Markdown Export the model as a SavedModel: ###Code # Export the model saved_model_dir = # TODO 5: Your code goes here saved_model_dir ###Output INFO:tensorflow:Assets written to: /tmp/tmps2c64whv/keras/model/1/assets ###Markdown Reload the the model and test it on the same batch of examples: ###Code reloaded = tf.saved_model.load(str(saved_model_dir)) run_model = reloaded.signatures['serving_default'] run_model(serialized_example_batch) ###Output _____no_output_____
Seaborn/Example3/Joyplot.ipynb
###Markdown Joyplots ###Code import joypy import pandas as pd import numpy as np from matplotlib import pyplot as plt from matplotlib import cm ###Output _____no_output_____ ###Markdown Obligatory iris stuffThough not a great fit for this kind of visualization, we can generate some joyplots with the `iris` dataset. ###Code iris = pd.read_csv("data/iris.csv") ###Output _____no_output_____ ###Markdown By default, `joypy.joyplot()` will draw joyplot with a density subplot for each numeric column in the dataframe.The density is obtained with the `gaussian_kde` function of `scipy`. ###Code %matplotlib inline fig, axes = joypy.joyplot(iris) ###Output _____no_output_____ ###Markdown If you pass a grouped dataframe, or if you pass a column name to the `by` argument, you get a density plotfor each value in the grouped column. ###Code %matplotlib inline fig, axes = joypy.joyplot(iris, by="Name") ###Output _____no_output_____ ###Markdown In the previous plot, one subplot had a much larger `y` extensions than the others.Since, by default, the subplots share the `y`-limits, the outlier causes all the other subplots to be quitecompressed.We can change this behavior letting each subplot take up the whole `y` space with `ylim='own'`, as follows. ###Code %matplotlib inline fig, axes = joypy.joyplot(iris, by="Name", ylim='own') ###Output _____no_output_____ ###Markdown In this case, we achieved more overlap, but the subplots are no longer directly comparable.Yet another option is to keep the default ylim behavior (i.e., `ylim='max'`),and simply increase the overlap factor: ###Code %matplotlib inline fig, axes = joypy.joyplot(iris, by="Name", overlap=3) ###Output _____no_output_____ ###Markdown It's also possible to draw histograms with `hist=True`, though they don't look nice when overlapping, so it's better to set `overlap=0`.With `grid=True` or `grid='both'` you also get grid lines on both axis. ###Code %matplotlib inline fig, axes = joypy.joyplot(iris, by="Name", column="SepalWidth", hist=True, bins=20, overlap=0, grid=True, legend=False) ###Output _____no_output_____ ###Markdown Global daily temperaturesSomething that is probably a better fit for joyplots than `iris`: let's visualize the distribution ofglobal daily temperatures from 1880 to 2014.(The original file can be found [here](http://berkeleyearth.org/data/)) ###Code %matplotlib inline temp = pd.read_csv("data/daily_temp.csv",comment="%") temp.head() ###Output _____no_output_____ ###Markdown The column `Anomaly` contains the global daily temperature (in °C) computed as the difference between thedaily value and the 1950-1980 global average.We can draw the distribution of the temperatures in time, grouping by `Year`, to seehow the daily temperature distribution shifted across time.Since the `y` label would get pretty crammed if we were to show all the year labels, we first preparea list where we leave only the multiples of 10.To reduce the clutter, the option `range_style='own'` limits the `x` range of each individual density plot to the range where the density is non-zero(+ an "aestethic" tolerance to avoid cutting the tails too early/abruptly),rather than spanning the whole `x` axis.The option `colormap=cm.autumn_r` provides a colormap to use along the plot.(Grouping the dataframe and computing the density plots can take a few seconds here.) ###Code %matplotlib inline labels=[y if y%10==0 else None for y in list(temp.Year.unique())] fig, axes = joypy.joyplot(temp, by="Year", column="Anomaly", labels=labels, range_style='own', grid="y", linewidth=1, legend=False, figsize=(6,5), title="Global daily temperature 1880-2014 \n(°C above 1950-80 average)", colormap=cm.autumn_r) ###Output _____no_output_____ ###Markdown If you want, you can also plot the raw counts, rather than the estimated density. This results in noisier plots, but it might be preferable in some cases.With `fade=True`, the subplots get a progressively larger alpha value. ###Code %matplotlib inline labels=[y if y%10==0 else None for y in list(temp.Year.unique())] fig, axes = joypy.joyplot(temp, by="Year", column="Anomaly", labels=labels, range_style='own', grid="y", linewidth=1, legend=False, fade=True, figsize=(6,5), title="Global daily temperature 1880-2014 \n(°C above 1950-80 average)", kind="counts", bins=30) ###Output _____no_output_____ ###Markdown Just for fun, let's plot the same data as it were on the cover of Unknown Pleasures,the Joy Division's album where the nickname to this kind of visualization comes from.No labels/grids, no filling, black background, white lines, and a couple of adjustments just to make it look a bit more like the album cover. ###Code %matplotlib inline fig, axes = joypy.joyplot(temp,by="Year", column="Anomaly", ylabels=False, xlabels=False, grid=False, fill=False, background='k', linecolor="w", linewidth=1, legend=False, overlap=0.5, figsize=(6,5),kind="counts", bins=80) plt.subplots_adjust(left=0, right=1, top=1, bottom=0) for a in axes[:-1]: a.set_xlim([-8,8]) ###Output _____no_output_____ ###Markdown NBA players - regular season statsThe files can be obtained from [Kaggle datasets](https://www.kaggle.com/drgilermo/nba-players-stats). ###Code players = pd.read_csv("data/Players.csv",index_col=0) players.head() seasons = pd.read_csv("data/Seasons_Stats.csv", index_col=0) seasons.head() ###Output _____no_output_____ ###Markdown Join the dataframes and filter:- years starting from the 3 point line introduction (1979-80)- player seasons with at least 10 field goal attempts. ###Code joined = seasons.merge(players, on="Player") threepoints = joined[(joined.Year > 1979) & (joined["FGA"] > 10)].sort_values("Year") threepoints["3Pfract"] = threepoints["3PA"]/threepoints.FGA ###Output _____no_output_____ ###Markdown The fraction of 3 pointers attempted by each player in a season has clearly shifted a lot. In today's NBA there's a good number of players who take 40% or more of their shots from behind the line. ###Code %matplotlib inline decades = [int(y) if y%10==0 or y == 2017 else None for y in threepoints.Year.unique()] fig, axes = joypy.joyplot(threepoints, by="Year", column="3Pfract", kind="kde", range_style='own', tails=0.2, overlap=3, linewidth=1, colormap=cm.autumn_r, labels=decades, grid='y', figsize=(7,7), title="Fraction of 3 pointers \n over all field goal attempts") ###Output _____no_output_____ ###Markdown In this last plot, the distributions of the 3P percentages across the playersare drawn as raw binned counts. With `kind=normalized_counts`, the values are normalizedover the occurrences in each year: this is probably needed here, because that the number of teams and playersin the NBA has grown during the years.The median NBA player has become a much better 3P shooter. ###Code %matplotlib inline threepoint_shooters = threepoints[threepoints["3PA"] >= 20] decades = [int(y) if y%10==0 or y == 2017 else None for y in threepoint_shooters.Year.unique()] fig, axes = joypy.joyplot(threepoint_shooters, by="Year", column="3P%", kind="normalized_counts", bins=30, range_style='all', x_range=[-0.05,0.65], overlap=2, linewidth=1, colormap=cm.autumn_r, labels=decades, grid='both', figsize=(7,7), title="3 Points % \n(at least 20 3P attempts)") ###Output _____no_output_____ ###Markdown Plotting with pre-aggregated data In case you have already aggregated data and you just want to plot them as a joyplot, you can still use joypy.Suppose we have a bunch of aggregated visit counts for each hour of the day. ###Code np.random.seed(42) df = pd.DataFrame(np.random.poisson(10,(24,7))) df.columns = ["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"] df.head() ###Output _____no_output_____ ###Markdown To plot them, simply call `joyplot()` with the option `kind=values`.Note that you need to manually handle some stuff about the range of the x-axis and the x ticks. ###Code %matplotlib inline x_range = list(range(24)) fig, axes = joypy.joyplot(df, kind="values", x_range=x_range) axes[-1].set_xticks(x_range); ###Output _____no_output_____ ###Markdown Plotting with raw data (not in a dataframe) Joypy can also be used to plot data not contained in a pandas dataframe. The simplest structure we can use is actually a list of lists or arrays. ###Code x = np.arange(0,100,0.1) y =[n*x for n in range(1,4)] fig, ax = joypy.joyplot(y, labels=["a","b","c"]) ###Output _____no_output_____ ###Markdown Another possibility is passing a dictionary. **Note**: until 3.6, python dictionary were not guaranteed to preserve their order. From 3.7+, dictionaries preserve the insertion order. ###Code labels = ["a","b","c"] d = {l:v for l,v in zip(labels,y)} fig, ax = joypy.joyplot(d) ###Output _____no_output_____ ###Markdown On colorsColors can be specified with combinations of the arguments `fill`, `color`, `linecolor`, `colormap`.A few minimal examples below. By default, the arguments are:- `fill=True`- `color=None`- `linecolor=None`- `colormap=None`. ###Code %matplotlib inline fig, axes = joypy.joyplot(iris) ###Output _____no_output_____ ###Markdown With the default `fill=True`, `color` or `colormap` are used to set the fill color and `linecolor` is used to set the line color.If both `color` and `colormap` are passed, `color` has the precedence. ###Code %matplotlib inline fig, axes = joypy.joyplot(iris, color="red", linecolor="blue") %matplotlib inline fig, axes = joypy.joyplot(iris, linecolor="blue", colormap=cm.autumn_r) ###Output _____no_output_____ ###Markdown With `fill=False`, the color of the line can be set in 3 ways: `color`, `linecolor`, `colormap`. If more than one argument is passed, the precedence is `linecolor` > `color` > `colormap`. ###Code %matplotlib inline fig, axes = joypy.joyplot(iris, fill=False, color="r") %matplotlib inline fig, axes = joypy.joyplot(iris, fill=False, linecolor="r") %matplotlib inline fig, axes = joypy.joyplot(iris,fill=False, colormap=cm.autumn) ###Output _____no_output_____ ###Markdown If more than one argument is passed, the precedence goes `linecolor` > `color` > `colormap`. ###Code %matplotlib inline fig, axes = joypy.joyplot(iris, fill=False, linecolor="r", color="g", colormap=cm.autumn) ###Output _____no_output_____ ###Markdown When grouping the dataframe with the argument `by`, you can pass a list of colorswith the same size of the colums you want to plot. ###Code %matplotlib inline fig, axes = joypy.joyplot(iris,fill=True, legend=True, by="Name", color=["k","b","r","g"]) ###Output _____no_output_____ ###Markdown You can do the same with colormaps, although the resulting plot might be rather hard to parse. Note also that the legend only uses the color in the first subplot. ###Code %matplotlib inline fig, axes = joypy.joyplot(iris,fill=True, legend=True, by="Name", column=["SepalLength","PetalLength"], colormap=[cm.autumn,cm.Blues_r]) ###Output _____no_output_____
explore_and_clean_data.ipynb
###Markdown Data ExplorationLet's begin with examining a few datapoints and attributes. ###Code df.head(5) set(df.Sentiment) ###Output _____no_output_____ ###Markdown There are some immediate observations about the tweets: Obviously there are hashstags, which are concatenations of words, which might not be able to be tokenized easily. Hashtags could, however be extracted as a separate feature.Some tweets contain URL, which might not be easily processed within standard NLP pipelines.Some tweets contain tab and newline characters ('\n' or '\r').The location attributes seems not to correspond to physical information in a lot of cases. If this attribute was to be used, there would be a need for complex data cleaning. ###Code set(df.Location) ###Output _____no_output_____ ###Markdown Furthermore, the location is missing in an approximate 20% of datapoints ###Code df.isna().sum() sns.countplot(data=df, x="Sentiment", order=['Extremely Negative', 'Negative', 'Neutral', 'Positive', 'Extremely Positive']) ###Output _____no_output_____ ###Markdown Data CleaningNext let's identify potentially problematic patterns using regex. ###Code clean_df = df.copy() ###Output _____no_output_____ ###Markdown Clean up whitespace. ###Code clean_df['OriginalTweet'] = clean_df['OriginalTweet'].str.replace('[\n\r]', ' ', regex=True) clean_df['OriginalTweet'] = clean_df['OriginalTweet'].str.replace(' +', ' ', regex=True) ###Output _____no_output_____ ###Markdown Remove repeated question marks. ###Code clean_df['OriginalTweet'] = clean_df['OriginalTweet'].str.replace('\?+', '?', regex=True) clean_df['OriginalTweet'] = clean_df['OriginalTweet'].str.replace('(?:\? ?)+', '?', regex=True) ###Output _____no_output_____ ###Markdown Remove URLs. ###Code clean_df['OriginalTweet'] = clean_df['OriginalTweet'].str.replace('http[^ ]*', ' ', regex=True) ###Output _____no_output_____ ###Markdown Change ampercent sign and the xml entity to "and" word. ###Code clean_df['OriginalTweet'] = clean_df['OriginalTweet'].str.replace('(&amp;)|&', 'and', regex=True) ###Output _____no_output_____ ###Markdown Change hashtags to normal words (remove the '').'yolo' -> 'yolo' ###Code clean_df['OriginalTweet'] = clean_df['OriginalTweet'].str.replace('#', ' ', regex=True) ###Output _____no_output_____ ###Markdown Remove reference to twitter Users.Example: '@bbc' ###Code clean_df['OriginalTweet'] = clean_df['OriginalTweet'].str.replace('@[^ ]+', ' ', regex=True) ###Output _____no_output_____ ###Markdown remove duplicate whitespaces ###Code clean_df['OriginalTweet'] = clean_df['OriginalTweet'].str.replace(' +', ' ', regex=True) ###Output _____no_output_____ ###Markdown ...and lastly drop tweets, which likely don't contain a word. ###Code clean_df = clean_df[clean_df['OriginalTweet'].str.contains('\w{4,}')] ###Output _____no_output_____ ###Markdown Change labels to integer format. ###Code set(clean_df['Sentiment']) label_dict = { 'Extremely Negative': 0, 'Negative': 0, 'Neutral': 1, 'Positive': 2, 'Extremely Positive': 2 } clean_df['SentimentCode'] = clean_df['Sentiment'].replace(label_dict) clean_df ###Output _____no_output_____ ###Markdown Put all these cleaning steps in a function called 'clean_dataframe' and apply the same logic to the test set ###Code from clean_data import clean_dataframe clean_df_test = clean_dataframe(df_test) clean_df.to_csv('./data/Corona_NLP_train_clean.csv') clean_df_test.to_csv('./data/Corona_NLP_test_clean.csv') ###Output _____no_output_____
Languages/Python/00 - Python Object and Data Structure/.ipynb_checkpoints/03-Print Formatting with Strings-checkpoint.ipynb
###Markdown String FormattingString formatting lets you inject items into a string rather than trying to chain items together using commas or string concatenation. As a quick comparison, consider: player = 'Thomas' points = 33 'Last night, '+player+' scored '+str(points)+' points.' concatenation f'Last night, {player} scored {points} points.' string formattingThere are three ways to perform string formatting.* The oldest method involves placeholders using the modulo `%` character.* An improved technique uses the `.format()` string method.* The newest method, introduced with Python 3.6, uses formatted string literals, called *f-strings*.Since you will likely encounter all three versions in someone else's code, we describe each of them here. Formatting with placeholdersYou can use %s to inject strings into your print statements. The modulo `%` is referred to as a "string formatting operator". ###Code print("I'm going to inject %s here." %'something') ###Output I'm going to inject something here. ###Markdown You can pass multiple items by placing them inside a tuple after the `%` operator. ###Code print("I'm going to inject %s text here, and %s text here and %s ." %('some','more','there')) ###Output I'm going to inject some text here, and more text here and there . ###Markdown You can also pass variable names: ###Code x, y = 'some', 'more' print("I'm going to inject %s text here, and %s text here."%(x,y)) ###Output I'm going to inject some text here, and more text here. ###Markdown Format conversion methods.It should be noted that two methods %s and %r convert any python object to a string using two separate methods: `str()` and `repr()`. We will learn more about these functions later on in the course, but you should note that `%r` and `repr()` deliver the *string representation* of the object, including quotation marks and any escape characters. ###Code print('He said his name was %s.' %'Fred') print('He said his name was %r.' %'Fred') ###Output He said his name was Fred. He said his name was 'Fred'. ###Markdown As another example, `\t` inserts a tab into a string. ###Code print('I once caught a fish %s.' %'this \tbig') print('I once caught a fish %r.' %'this \tbig') ###Output I once caught a fish this big. I once caught a fish 'this \tbig'. ###Markdown The `%s` operator converts whatever it sees into a string, including integers and floats. The `%d` operator converts numbers to integers first, without rounding. Note the difference below: ###Code print('I wrote %s programs today.' %3.75) print('I wrote %d programs today.' %3.75) ###Output I wrote 3.75 programs today. I wrote 3 programs today. ###Markdown Padding and Precision of Floating Point NumbersFloating point numbers use the format %5.2f. Here, 5 would be the minimum number of characters the string should contain; these may be padded with whitespace if the entire number does not have this many digits. Next to this, .2f stands for how many numbers to show past the decimal point. Let's see some examples: ###Code print('Floating point numbers: %5.2f' %(13.144)) print('Floating point numbers: %1.0f' %(13.144)) print('Floating point numbers: %1.5f' %(13.144)) print('Floating point numbers: %10.2f' %(13.144)) print('Floating point numbers: %25.2f' %(13.144)) ###Output Floating point numbers: 13.14 ###Markdown For more information on string formatting with placeholders visit https://docs.python.org/3/library/stdtypes.htmlold-string-formatting Multiple FormattingNothing prohibits using more than one conversion tool in the same print statement: ###Code print('First: %s, Second: %5.2f, Third: %r' %('hi!',3.1415,'bye!')) ###Output First: hi!, Second: 3.14, Third: 'bye!' ###Markdown Formatting with the `.format()` methodA better way to format objects into your strings for print statements is with the string `.format()` method. The syntax is: 'String here {} then also {}'.format('something1','something2') For example: ###Code print('This is a string with an {}'.format('insert')) ###Output This is a string with an insert ###Markdown The .format() method has several advantages over the %s placeholder method: 1. Inserted objects can be called by index position: ###Code print('The {2} {1} {0}'.format('fox','brown','quick')) ###Output The quick brown fox ###Markdown 2. Inserted objects can be assigned keywords: ###Code print('First Object: {a}, Second Object: {b}, Third Object: {c}'.format(a=1,b='Two',c=12.3)) ###Output First Object: 1, Second Object: Two, Third Object: 12.3 ###Markdown 3. Inserted objects can be reused, avoiding duplication: ###Code print('A %s saved is a %s earned.' %('penny','penny')) # vs. print('A {p} saved is a {p} earned.'.format(p='penny')) ###Output A penny saved is a penny earned. A penny saved is a penny earned. ###Markdown Alignment, padding and precision with `.format()`Within the curly braces you can assign field lengths, left/right alignments, rounding parameters and more ###Code print('{0:8} | {1:9}'.format('Fruit', 'Quantity')) print('{0:8} | {1:9}'.format('Apples', 3.)) print('{0:8} | {1:9}'.format('Oranges', 10)) ###Output Fruit | Quantity Apples | 3.0 Oranges | 10 ###Markdown By default, `.format()` aligns text to the left, numbers to the right. You can pass an optional `` to set a left, center or right alignment: ###Code print('{0:<8} | {1:^8} | {2:>8}'.format('Left','Center','Right')) print('{0:<8} | {1:^8} | {2:>8}'.format(11,22,33)) ###Output Left | Center | Right 11 | 22 | 33 ###Markdown You can precede the aligment operator with a padding character ###Code print('{0:=<8} | {1:-^8} | {2:.>8}'.format('Left','Center','Right')) print('{0:=<8} | {1:-^8} | {2:.>8}'.format(11,22,33)) ###Output Left==== | -Center- | ...Right 11====== | ---22--- | ......33 ###Markdown Field widths and float precision are handled in a way similar to placeholders. The following two print statements are equivalent: ###Code print('This is my ten-character, two-decimal number:%10.2f' %13.579) print('This is my ten-character, two-decimal number:{0:10.2f}'.format(13.579)) ###Output This is my ten-character, two-decimal number: 13.58 This is my ten-character, two-decimal number: 13.58 ###Markdown Note that there are 5 spaces following the colon, and 5 characters taken up by 13.58, for a total of ten characters.For more information on the string `.format()` method visit https://docs.python.org/3/library/string.htmlformatstrings Formatted String Literals (f-strings) Introduced in Python 3.6, f-strings offer several benefits over the older `.format()` string method described above. For one, you can bring outside variables immediately into to the string rather than pass them as arguments through `.format(var)`. ###Code name = 'Fred' print(f"He said his name is {name}.") ###Output He said his name is Fred. ###Markdown Pass `!r` to get the string representation: ###Code print(f"He said his name is {name!r}") ###Output He said his name is 'Fred' ###Markdown Float formatting follows `"result: {value:{width}.{precision}}"` Where with the `.format()` method you might see `{value:10.4f}`, with f-strings this can become `{value:{10}.{6}}` ###Code num = 23.45678 print("My 10 character, four decimal number is:{0:10.4f}".format(num)) print(f"My 10 character, four decimal number is:{num:{10}.{6}}") ###Output My 10 character, four decimal number is: 23.4568 My 10 character, four decimal number is: 23.4568 ###Markdown Note that with f-strings, *precision* refers to the total number of digits, not just those following the decimal. This fits more closely with scientific notation and statistical analysis. Unfortunately, f-strings do not pad to the right of the decimal, even if precision allows it: ###Code num = 23.45 print("My 10 character, four decimal number is:{0:10.4f}".format(num)) print(f"My 10 character, four decimal number is:{num:{10}.{6}}") ###Output My 10 character, four decimal number is: 23.4500 My 10 character, four decimal number is: 23.45 ###Markdown If this becomes important, you can always use `.format()` method syntax inside an f-string: ###Code num = 23.45 print("My 10 character, four decimal number is:{0:10.4f}".format(num)) print(f"My 10 character, four decimal number is:{num:10.4f}") ###Output My 10 character, four decimal number is: 23.4500 My 10 character, four decimal number is: 23.4500
adressa_models/Adressa Baselines v3.ipynb
###Markdown 1. Preprocessing ###Code behaviors["time"] = pd.to_datetime(behaviors["time"], unit="s") behaviors = behaviors.drop_duplicates(["userId", "id"]) print("before merge: ",len(behaviors)) behaviors = behaviors.drop(columns=["title"]) articles.rename(columns={"article_id": "id"}, inplace=True) behaviors = behaviors.merge(articles, on=["id"]) print("after merge:",len(behaviors)) print("Len before removal: ",len(behaviors)) behaviors = behaviors[behaviors.groupby('userId').userId.transform('count')>2].copy() print("Len after removal: ",len(behaviors)) user_enc = LabelEncoder() article_enc = LabelEncoder() behaviors["user_id"] = user_enc.fit_transform(behaviors["userId"].values) behaviors["article_id"] = article_enc.fit_transform(behaviors["id"].values) import nltk from nltk.corpus import stopwords # Helper functions def _removeNonAscii(s): return "".join(i for i in s if ord(i)<128) def make_lower_case(text): return text.lower() def remove_stop_words(text): text = text.split() stops = set(stopwords.words("norwegian")) text = [w for w in text if not w in stops] text = " ".join(text) return text def remove_html(text): html_pattern = re.compile('<.*?>') return html_pattern.sub(r'', text) def remove_punctuation(text): text = re.sub(r'[^\w\s]', '', text) return text def text_to_list(text): text = text.split(" ") return text def take_one_category(text): temp = text.split() if len(temp) > 1: return temp[1] return temp[0] def clean_title(df): df["title_cleaned"] = df.title.apply(func = make_lower_case) df["title_cleaned"] = df.title_cleaned.apply(func = remove_stop_words) df["title_cleaned"] = df.title_cleaned.apply(func = remove_punctuation) return df def hyphen_to_underline(category): """ Convert hyphen to underline for the subcategories. So that Tfidf works correctly """ return category.replace("-","_") #behaviors = clean_title(behaviors) behaviors["category_cleaned"] = behaviors["kw_category"].apply(func = take_one_category) category_enc = LabelEncoder() subcategory_enc = LabelEncoder() behaviors["category_int"] = subcategory_enc.fit_transform(behaviors["category_cleaned"].values) users = behaviors["user_id"].unique() userid_to_profile = collections.defaultdict(list) for user_id in tqdm(users): user_subcat = behaviors[behaviors["user_id"] == user_id]["category_int"].values.tolist() counter = Counter(user_subcat) s = sorted(user_subcat, key=lambda x: (counter[x], x), reverse=True) final_subcategories = [] for elem in s: if elem not in final_subcategories: final_subcategories.append(elem) while len(final_subcategories) < 6: final_subcategories.append(0) userid_to_profile[user_id] = final_subcategories[:6] profile_df = pd.DataFrame.from_dict(userid_to_profile, orient="index") profile_df["user_id"] = profile_df.index behaviors = behaviors.merge(profile_df, on="user_id") behaviors = behaviors.rename(columns={"0": "p0","1": "p1","2": "p2","3": "p3","4": "p4","5": "p5",}) article_id_to_category_int = behaviors[["article_id", "category_int"]].set_index("article_id").to_dict() article_id_to_category_int = article_id_to_category_int["category_int"] behaviors.head(1) ###Output _____no_output_____ ###Markdown 2. Train test split ###Code SEED = 42 SAMPLE_SIZE = 99 NUM_NEGATIVES = 4 ALL_ARTICLE_IDS = behaviors["article_id"].unique() behaviors["article_id"].unique() interactions = behaviors[["user_id", "article_id"]] rating = [1 for i in range(len(interactions))] interactions = interactions.assign(label=pd.Series(rating)) msk = np.random.rand(len(interactions)) <0.8 train = interactions[msk] test = interactions[~msk] interactions = interactions.set_index("user_id") train = train.set_index("user_id") test = test.set_index("user_id") def negative_sampling(train_df, user_id, article_id): """ Negative sample training instance; for each positive instance, add 4 negative articles Return user_ids, news_ids, category_1, category_2, authors_onehotencoded, titles """ users, articles, labels = [], [], [] user_item_set = set(zip(train_df.index.values, train_df[article_id].values)) for (u,i) in user_item_set: for _ in range(NUM_NEGATIVES): negative_item = np.random.choice(ALL_ARTICLE_IDS) while (u, negative_item) in user_item_set: negative_item = np.random.choice(ALL_ARTICLE_IDS) users.append(u) articles.append(negative_item) labels.append(0) users.append(u) articles.append(i) labels.append(1) users, articles, labels = shuffle(users, articles, labels, random_state=0) return users[:40000], articles[:40000], labels[:40000] train_users, train_articles, train_labels = negative_sampling(train, "user_id", "article_id") train_df = pd.DataFrame(list(zip(train_users, train_articles, train_labels)), columns=["user_id", "article_ids", "label"]) def get_items_interacted(user_id, interactions_df=behaviors): interacted_items = interactions_df.loc[user_id]["article_id"] return set(interacted_items if type(interacted_items) == pd.Series else [interacted_items]) def get_not_interacted(user_id, interactions_df=behaviors): interacted_items = get_items_interacted(user_id, interactions_df) all_items = set(behaviors["article_id"]) not_interacted_items = all_items - interacted_items random.seed(SEED) not_interacted_items = random.sample(not_interacted_items, SAMPLE_SIZE) return not_interacted_items num_users = len(behaviors["user_id"].unique()) num_items = len(behaviors["article_id"].unique()) dims = 20 def get_model_neumf(num_users, num_items, dims, dense_layers=[128, 64, 32, 8]): user_input = Input(shape=(1,), name="user") item_input = Input(shape=(1,), name="item") mf_user_emb = Embedding(output_dim=dims, input_dim=num_users, input_length=1, embeddings_initializer='he_normal', embeddings_regularizer=regularizers.l2(0.001), name="mf_user_emb")(user_input) mf_item_emb = Embedding(output_dim=dims, input_dim=num_items, input_length=1, embeddings_initializer='he_normal', embeddings_regularizer=regularizers.l2(0.001), name="mf_item_emb")(item_input) num_layers = len(dense_layers) mlp_user_emb = Embedding(output_dim=int(dense_layers[0] / 2), input_dim=num_users, input_length=1, embeddings_initializer='he_normal', embeddings_regularizer=regularizers.l2(0.001), name="mlp_user_emb")(user_input) mlp_item_emb = Embedding(output_dim=int(dense_layers[0] / 2), input_dim=num_items, input_length=1, embeddings_initializer='he_normal', embeddings_regularizer=regularizers.l2(0.001), name="mlp_user_item")(item_input) # Matrix factorization mf_user_vecs = Reshape([dims])(mf_user_emb) mf_item_vecs = Reshape([dims])(mf_item_emb) mf_vec = multiply([mf_user_vecs, mf_item_vecs]) #MLP mlp_vec = Concatenate()([mlp_user_emb, mlp_item_emb]) mlp_vector = Flatten()(mlp_vec) for num_nodes in dense_layers: l = Dense(num_nodes, activation="relu") mlp_vector = l(mlp_vector) y = Concatenate()([mf_vec, mlp_vector]) y = Dense(1, activation="sigmoid", name="pred")(y) model = Model(inputs=[user_input, item_input], outputs=y) model.compile( optimizer=Adam(0.01), loss="binary_crossentropy", metrics=["accuracy"], ) return model model_neumf = get_model_neumf(num_users, num_items, dims) users_input, articles_input, labels_input = np.array(train_users).reshape(-1,1), np.array(train_articles).reshape(-1,1), np.array(train_labels).reshape(-1,1) all_user_ids = train.index.unique().values #user_input = df_train.iloc[:, 0].values.reshape((-1,1)) #profile_input = df_train.iloc[:, 1:6].values #item_input = df_train.iloc[:, 7].values.reshape((-1,1)) #labels = df_train.iloc[:, 8].values.reshape((-1,1)) train_loss = [] val_loss = [] train_acc = [] val_acc = [] hits_list = [] ndcg_list = [] best_hits = 0 best_ndcgs = 0 best_hits_five = 0 best_ndcgs_five = 0 epochs=4 for epoch in range(epochs): hist = model_neumf.fit([users_input, articles_input], labels_input, epochs=1, shuffle=True, verbose=1, batch_size=32) train_loss.append(hist.history["loss"]) train_acc.append(hist.history["accuracy"]) #val_loss.append(hist.history["val_loss"]) #val_acc.append(hist.history["val_accuracy"]) #hits, ndcgs, hits_five, ndcgs_five = evalaute_model_neumf( model_neumf, df_test, userid_to_true_item) #hits_list.append(np.average(hits)) #ndcg_list.append(np.average(ndcgs)) #temp_hits = np.average(hits) #temp_ndcgs = np.average(ndcgs) #if (temp_hits > best_hits): # best_hits = temp_hits # best_ndcgs = temp_ndcgs # best_hits_five = np.average(hits_five) # best_ndcgs_five = np.average(ndcgs_five) test_users = test.index.values[:1000] test_items = test.article_id.values[:1000] test_set = zip(test_users, test_items) hits = [] for (u,i) in tqdm(test_set): not_interacted_items = get_not_interacted(u) users = np.array([u]*100).astype(int) items = np.array([i] + not_interacted_items) np.random.shuffle(items) #items = random.sample(items, len(items)) predictions = model_neumf.predict([users, items]) predicted_labels = np.squeeze(predictions) print(i) print(items) top_ten_items = [items[k] for k in np.argsort(predicted_labels)[::-1][0:10].tolist()] if i in top_ten_items: hits.append(1) else: hits.append(0) print(np.average(hits)) u = test.index.values[0] i = test.article_id.values[0] not_interacted_items = get_not_interacted(u) users = np.array([u]*100) items = np.array([i] + not_interacted_items) np.random.shuffle(items) items predictions = model_neumf.predict([users, items]) predicted_labels = np.squeeze(predictions) top_ten_items = [items[k] for k in np.argsort(predicted_labels)[::-1][0:10].tolist()] users_input.shape test_users = test.index.values test_items = test.article_id.values len(test_users) ###Output _____no_output_____
docs/PySpark-Distributed-KModes-example.ipynb
###Markdown ![](images/thinkbig.png) Demonstration of distributed K-Modes clustering First load the library: ###Code from pyspark_kmodes import * ###Output _____no_output_____ ###Markdown Create some sample data: ###Code # Create the data set import numpy as np data = np.random.choice(["a", "b", "c"], (50000, 10)) data2 = np.random.choice(["e", "f", "g"], (50000, 10)) data = list(data) + list(data2) from random import shuffle shuffle(data) # Create a Spark RDD from our sample data and decrease partitions to max_partions max_partitions = 32 rdd = sc.parallelize(data) rdd = rdd.coalesce(max_partitions) ###Output _____no_output_____ ###Markdown Specify 2 cluster centers and a maximum of 10 iterations: ###Code n_clusters = 2 max_iter = 10 method = EnsembleKModes(n_clusters, max_iter) ###Output _____no_output_____ ###Markdown Fit the model using PySpark: ###Code model = method.fit(rdd) ###Output Iteration 0 Iteration 1 Iteration 2 Init: initializing centroids Init: initializing clusters Starting iterations... Run 1, iteration: 1/100, moves: 0, cost: 32.0 Avg cost/partition: 4.0 Final centroids: ['a' 'a' 'a' 'a' 'b' 'a' 'b' 'c' 'a' 'c'] ['f' 'f' 'f' 'f' 'g' 'e' 'g' 'f' 'g' 'g'] ###Markdown Inspect the results: ###Code print(model.clusters) print(method.mean_cost) predictions = method.predictions datapoints = method.indexed_rdd combined = datapoints.zip(predictions) print(combined.take(10)) model.predict(rdd).take(5) model.predict(sc.parallelize(['e', 'e', 'f', 'e', 'e', 'f', 'g', 'e', 'f', 'e'])).collect() ###Output _____no_output_____
notebooks/5_regularised_linear_regression_bias_vs_variance.ipynb
###Markdown 1 Regularised Linear Regression 1.1 Data Extraction and Transformation ###Code def get_data(file_path, xLabel, yLabel): data = loadmat(file_path) X = np.insert(data[xLabel], 0, 1, axis=1) n_samples, n_variables = X.shape y = data[yLabel] return X.flatten(), y.flatten(), n_samples, n_variables def get_β(n_variables): β = np.zeros(n_variables) return β ###Output _____no_output_____ ###Markdown 1.2 Data Visualisation ###Code def visualiseData(file_path, xLabel, yLabel, title): data = loadmat(file_path) plt.plot(data[xLabel], data[yLabel], 'o') plt.xlabel("Change in water level (x)") plt.ylabel("Water flowing out of the dam (y)") plt.title(title) return plt.show() ###Output _____no_output_____ ###Markdown 1.2.1 Training Set ###Code visualiseData(file_path, 'X', 'y', 'Traning Data Set') ###Output _____no_output_____ ###Markdown 1.2.2 Validation Set ###Code visualiseData(file_path, 'Xval', 'yval', 'Cross Validation Data Set') ###Output _____no_output_____ ###Markdown 1.2.3 Test Set ###Code visualiseData(file_path, 'Xtest', 'ytest', 'Test Data Set') ###Output _____no_output_____ ###Markdown 1.3 Regularised Linear Regression**Hypothesis** $h_{\beta}(X) = X\cdot\beta$**Error** $e = (h_{\beta}(X) - y)$**Cost Function** $J = \frac{1}{2n}{\sum(h_{\beta} - y)^2}$**Regularisation Term** $R = \frac{\lambda}{2n}{\sum{\beta}^2}$**Regularised Cost** $J = \frac{1}{2n}{\sum(h_{\beta} - y)^2} + \frac{\lambda}{2n}{\sum{\beta}^2}$**Gradient** $\frac{\partial J}{\partial \beta _0} = \frac{1}{n}X^{T}\cdot e$$\frac{\partial J}{\partial \beta _{≠0}} = \frac{1}{n}X^{T}\cdot e + \frac{\lambda}{n}\beta$In the code $\frac{\partial J}{\partial \beta}$ is denoted symply as **g**. ###Code def get_hypothesis(β, X, n_samples, n_variables): β = β.reshape(n_variables, -1) X = X.reshape(n_samples, -1) # return hypothesis vector h(n, 1), where n is n_samples return np.dot(X, β) def cost_function(β, X, y, n_samples, n_variables, λ=0.): β = β.reshape(n_variables, -1) X = X.reshape(n_samples, -1) y = y.reshape(n_samples, -1) # hypothesis vector h(n, 1) h = get_hypothesis(β, X, n_samples, n_variables) # cost scalar J(1, 1); technically the result is an array (1,1) rather than a float J = np.dot((y-h).T, y-h)/(2*n_samples) # similarly cost J can be calculated using np.sum # J = np.sum((y-h)**2)/(2*n_samples) R = λ*np.dot(β.T, β)/(2*n_samples) return (J + R)[0][0] def get_gradient(β, X, y, n_samples, n_variables, λ=0.): β = β.reshape(n_variables, -1) X = X.reshape(n_samples, -1) y = y.reshape(n_samples, -1) # hypothesis vector h(n, 1) h = get_hypothesis(β, X, n_samples, n_variables) # error vector e(n, 1) = h(n, 1) - y(n, 1) e = h - y # gradient vector g(k, 1) = X(n, k).T*e(n, 1)* g = np.dot(X.T,e)/(n_samples) # regularisation term vector (r(400x1)) — derivative of the regularisation term of the cost funtion r = β[1:]*(λ/n_samples) g[1:] = g[1:] + r return g.flatten() def plot_regression(β, X, y, n_samples, n_variables): β = β.reshape(n_variables, -1) X = X.reshape(n_samples, -1) y = y.reshape(n_samples, -1) y_fit = np.dot(X, β) MSE = np.sum((y - y_fit)**2)/y.shape[0] plt.plot(X[:,1:], y, 'o', X[:,1:], y_fit, '-') plt.xlabel("X") plt.ylabel("Y") print ("β_0:", β[0][0], "\nβ_1:", β[1][0], "\nRegression: Y =", '{:10.2f}'.format(β[0][0]), '+', '{:10.2f}'.format(β[1][0]), "X" "\nMSE =",'{:10.2f}'.format(MSE)) return plt.show() ###Output _____no_output_____ ###Markdown Function TestFor the trainin set and the $\β$-vector set to ones the output of the functions should be as follows:cost_function — J = 303.951525554gradient — gradient = [ -15.30301567 598.16741084] ###Code X, y, n_samples, n_variables = get_data(file_path, 'X', 'y') β = get_β(n_variables) βOnes = np.ones(n_variables) # print("hypothesis =", get_hypothesis(β_flatOnes, X_flat, n_samples, n_variables)) J = cost_function(βOnes, X, y, n_samples, n_variables, λ=0.) print(f"J = {J}") gradient = get_gradient(βOnes, X, y, n_samples, n_variables, λ=0.) print(f"gradient = {gradient}") def optimise_β(β_flat, X_flat, Y_flat, n_samples, n_variables, λ=0.): β_optimisation = optimize.minimize(cost_function, β_flat, args=(X_flat, Y_flat, n_samples, n_variables, λ), method=None, jac=get_gradient, options={'maxiter':50}) β_opt = β_optimisation['x'] # β_optimisation = optimize.fmin_cg(cost_function, fprime=gradient, x0=β_flat, # args=(X_flat, Y_flat, n_samples, n_variables, λ), # maxiter=50, disp=False, full_output=True) # β_flat = β_optimisation[0] return β_opt β_opt = optimise_β(β, X, y, n_samples, n_variables) print (f"optimised β {β_opt}") plot_regression(β_opt, X, y, n_samples, n_variables) X, y, n_samples, n_variables = get_data(file_path, 'X', 'y') X_val, y_val, n_samples_val, n_variables_val = get_data(file_path, 'Xval', 'yval') β = get_β(n_variables) J_test = [] J_val = [] for i in range(n_samples): # np.random.seed(0) # indexSet = np.random.choice(n_samples, i+1, replace=False) # subsetX = reshapeT(X, n_samples)[indexSet] # subsetY = reshapeT(y, n_samples)[indexSet] subsetX = X.reshape(n_samples, -1)[:i+1,:] subsetY = y.reshape(n_samples, -1)[:i+1] flatSubsetX = subsetX.flatten() flatSubsetY = subsetY.flatten() β_fit = optimise_β(β, flatSubsetX, flatSubsetY, i+1, n_variables) y_fit_test = np.dot(subsetX, β_fit) J_test += [cost_function(β_fit, flatSubsetX, flatSubsetY, i+1, n_variables, λ=0.)] y_fit_val = np.dot(X_val.reshape(n_samples_val, -1), β_fit) J_val += [cost_function(β_fit, X_val, y_val, n_samples_val, n_variables_val, λ=0.)] plt.plot(range(1,n_samples + 1), J_test, '-', label='Training Set') plt.plot(range(1,n_samples + 1), J_val, '-', label='Cross-Validation Set') plt.xlabel("Training-Set Size") plt.ylabel("J") plt.title("Linear-Regression Learning Curve") plt.legend() plt.show() def polynomialsANDinteractions(file_path, xLabel, yLabel, polynomialDegree): data = loadmat(file_path) X = data[xLabel] y = data[yLabel] poly = PolynomialFeatures(polynomialDegree) poly_X = poly.fit_transform(X) n_samples, n_variables = poly_X.shape return poly_X.flatten(), y.flatten(), n_samples, n_variables def normalise(X, n_samples): normalisedX = X.reshape(n_samples, -1).copy() for i in (range(normalisedX.shape[1])): if np.std(normalisedX[:,i]) != 0: normalisedX[:,i] = (normalisedX[:,i] - np.mean(normalisedX[:,i]))/np.std(normalisedX[:,i]) return normalisedX.flatten() polynomialDegree = 8 poly_X, y, polyn_samples, polyn_variables = polynomialsANDinteractions(file_path, 'X', 'y', polynomialDegree) poly_β = get_β(polyn_variables) normPolyX = normalise(poly_X, polyn_samples) normY = normalise(y, polyn_samples) print(polyn_samples) print(polyn_variables) print(np.max(normPolyX)) β_opt_poly = optimise_β(poly_β, normPolyX, normY, polyn_samples, polyn_variables) print (β_opt_poly) # def plot_regression(β, X, y, n_samples, n_variables): poly_β = poly_β.reshape(polyn_variables, -1) poly_X = poly_X.reshape(n_samples, -1) y = normY.reshape(n_samples, -1) y_fit = np.dot(poly_X, poly_β) MSE = np.sum((y - y_fit)**2)/y.shape[0] plt.plot(poly_X[:,1:2], y_fit, 'o') # plt.plot(poly_X[:,1:2], y, 'o', X[:,1:2], y_fit, '-') # plt.xlabel("X") # plt.ylabel("Y") # print ("β_0:", β[0][0], # "\nβ_1:", β[1][0], # "\nRegression: Y =", '{:10.2f}'.format(β[0][0]), '+', '{:10.2f}'.format(β[1][0]), "X" # "\nMSE =",'{:10.2f}'.format(MSE)) plt.show() ###Output [ 4.80289899e-16 8.83572359e-01 1.65030044e+00 1.11363909e+00 -3.34178136e+00 -3.19209768e+00 2.99447999e+00 2.86241752e+00 -1.05521662e-01]
metadata/20190913_usda_excel/20190913_usda_excel_export.ipynb
###Markdown Generating `publications.json` partitions This is a template notebook for generating metadata on publications - most importantly, the linkage between the publication and dataset (datasets are enumerated in `datasets.json`)Process goes as follows:1. Import CSV with publication-dataset linkages. Your csv should have at the minimum, fields (spelled like the below): * `dataset` to hold the dataset_ids, and * `title` for the publication title. Update the csv with these field names to ensure this code will run. We read in, dedupe and format the title2. Match to `datasets.json` -- alert if given dataset doesn't exist yet3. Generate list of dicts with publication metadata4. Write to a publications.json file Import CSV containing publication-dataset linkages Set `linkages_path` to the location of the csv containg dataset-publication linkages and read in csv ###Code import pandas as pd import os file_name = 'usda_linkages_sbr.csv' rcm_subfolder = '20190913_usda_excel' linkages_path = os.path.join('/Users/sophierand/RichContextMetadata/metadata',rcm_subfolder,file_name) # linkages_path = os.path.join(os.getcwd(),'SNAP_DATA_DIMENSIONS_SEARCH_DEMO.csv') linkages_csv = pd.read_csv(linkages_path) ###Output _____no_output_____ ###Markdown Format/clean linkage data - apply `scrub_unicode` to `title` field. ###Code import unicodedata def scrub_unicode (text): """ try to handle the unicode edge cases encountered in source text, as best as possible """ x = " ".join(map(lambda s: s.strip(), text.split("\n"))).strip() x = x.replace('“', '"').replace('”', '"') x = x.replace("‘", "'").replace("’", "'").replace("`", "'") x = x.replace("`` ", '"').replace("''", '"') x = x.replace('…', '...').replace("\\u2026", "...") x = x.replace("\\u00ae", "").replace("\\u2122", "") x = x.replace("\\u00a0", " ").replace("\\u2022", "*").replace("\\u00b7", "*") x = x.replace("\\u2018", "'").replace("\\u2019", "'").replace("\\u201a", "'") x = x.replace("\\u201c", '"').replace("\\u201d", '"') x = x.replace("\\u20ac", "€") x = x.replace("\\u2212", " - ") # minus sign x = x.replace("\\u00e9", "é") x = x.replace("\\u017c", "ż").replace("\\u015b", "ś").replace("\\u0142", "ł") x = x.replace("\\u0105", "ą").replace("\\u0119", "ę").replace("\\u017a", "ź").replace("\\u00f3", "ó") x = x.replace("\\u2014", " - ").replace('–', '-').replace('—', ' - ') x = x.replace("\\u2013", " - ").replace("\\u00ad", " - ") x = str(unicodedata.normalize("NFKD", x).encode("ascii", "ignore").decode("utf-8")) # some content returns text in bytes rather than as a str ? try: assert type(x).__name__ == "str" except AssertionError: print("not a string?", type(x), x) return x ###Output _____no_output_____ ###Markdown Scrub titles of problematic characters, drop nulls and dedupe ###Code linkages_csv['title'] = linkages_csv['title'].apply(scrub_unicode) linkages_csv = linkages_csv.loc[pd.notnull(linkages_csv.dataset)].drop_duplicates() linkages_csv = linkages_csv.loc[pd.notnull(linkages_csv.title)].drop_duplicates() linkages_csv = linkages_csv.drop_duplicates(subset='title') pub_metadata_fields = ['title'] original_metadata_cols = list(set(linkages_csv.columns.values.tolist()) - set(pub_metadata_fields)-set(['dataset'])) ###Output _____no_output_____ ###Markdown Generate list of dicts of metadata Read in `datasets.json`. Update `datasets_path` to your local. ###Code import json datasets_path = '/Users/sophierand/RCDatasets/datasets.json' with open(datasets_path) as json_file: datasets = json.load(json_file) ###Output _____no_output_____ ###Markdown Create list of dictionaries of publication metadata. `format_metadata` iterrates through `linkages_csv` dataframe, splits the `dataset` field (for when multiple datasets are listed); throws an error if the dataset doesn't exist and needs to be added to `datasets.json`. ###Code def create_pub_dict(linkages_dataframe,datasets): pub_dict_list = [] for i, r in linkages_dataframe.iterrows(): r['title'] = scrub_unicode(r['title']) ds_id_list = [f for f in [d.strip() for d in r['dataset'].split(",")] if f not in [""," "]] for ds in ds_id_list: check_ds = [b for b in datasets if b['id'] == ds] if len(check_ds) == 0: print('dataset {} isnt listed in datasets.json. Please add to file'.format(ds)) required_metadata = r[pub_metadata_fields].to_dict() required_metadata.update({'datasets':ds_id_list}) pub_dict = required_metadata if len(original_metadata_cols) > 0: original_metadata = r[original_metadata_cols].to_dict() pub_dict.update({'original':original_metadata}) pub_dict_list.append(pub_dict) return pub_dict_list ###Output _____no_output_____ ###Markdown Generate publication metadata and export to json ###Code linkage_list = create_pub_dict(linkages_csv,datasets) ###Output _____no_output_____ ###Markdown Update `pub_path` to be: `_publications.json` ###Code json_pub_path = os.path.join('/Users/sophierand/RCPublications/partitions/',rcm_subfolder+'_publications.json') with open(json_pub_path, 'w') as outfile: json.dump(linkage_list, outfile, indent=2) ###Output _____no_output_____
code_notebooks/1.1-quantecon/pandas/reshape.ipynb
###Markdown Reshape**Prerequisites**- [pandas intro](intro.ipynb) - [pandas basics](basics.ipynb) - [Importance of index](the_index.ipynb) **Outcomes**- Understand and be able to apply the `melt`/`stack`/`unstack`/`pivot` methods - Practice transformations of indices - Understand tidy data ###Code # Uncomment following line to install on colab #! pip install qeds import numpy as np import pandas as pd %matplotlib inline # activate plot theme import qeds qeds.themes.mpl_style(); ###Output _____no_output_____ ###Markdown Outline- [Reshape](Reshape) - [Tidy Data](Tidy-Data) - [Reshaping your Data](Reshaping-your-Data) - [Long vs Wide](Long-vs-Wide) - [`set_index`, `reset_index`, and Transpose](`set_index`,-`reset_index`,-and-Transpose) - [`stack` and `unstack`](`stack`-and-`unstack`) - [`melt`](`melt`) - [`pivot` and `pivot_table`](`pivot`-and-`pivot_table`) - [Visualizing Reshaping](Visualizing-Reshaping) - [Exercises](Exercises) Tidy DataWhile pushed more generally in the `R` language, the concept of “[tidy data](https://en.wikipedia.org/wiki/Tidy_data)” is helpful in understanding theobjectives for reshaping data, which in turn makes advanced features like[GroupBy](groupby.ipynb) more seamless.Hadley Wickham gives a terminology slightly better-adapted for the experimentalsciences, but nevertheless useful for the social sciences.> A dataset is a collection of values, usually either numbers (ifquantitative) or strings (if qualitative). Values are organized in twoways. Every value belongs to a variable and an observation. A variablecontains all values that measure the same underlying attribute (likeheight, temperature, duration) across units. An observation contains allvalues measured on the same unit (like a person, or a day, or a race)across attributes. – [Tidy Data (Journal of Statistical Software 2013)](https://www.jstatsoft.org/index.php/jss/article/view/v059i10/v59i10.pdf)With this framing,> A dataset is messy or tidy depending on how rows, columns and tables arematched with observations, variables, and types. In tidy data:1. Each variable forms a column.2. Each observation forms a row.3. Each type of observational unit forms a table.The “column” and “row” terms map directly to pandas columns and rows, while the“table” maps to a pandas DataFrame.With this thinking and interpretation, it becomes essential to think throughwhat uniquely identifies an “observation” in your data.Is it a country? A year? A combination of country and year?These will become the indices of your DataFrame.For those with more of a database background, the “tidy” format matches the[3rd normal form](https://en.wikipedia.org/wiki/Third_normal_form) indatabase theory, where the referential integrity of the database is maintainedby the uniqueness of the index.When considering how to map this to the social sciences, note thatreshaping data can change what we consider to be the variable andobservation in a way that doesn’t occur within the natural sciences.For example, if the “observation” uniquely identified by a country and year andthe “variable” is GDP, you may wish to reshape it so that the “observable” is acountry, and the variables are a GDP for each year.A word of caution: The tidy approach, where there is no redundancy and eachtype of observational unit forms a table, is a good approach for storing data,but you will frequently reshape/merge/etc. in order to make graphing oranalysis easier. This doesn’t break the tidy format since those examples areephemeral states used in analysis. Reshaping your DataThe data you receive is not always in a “shape” that makes it easy to analyze.What do we mean by shape? The number of rows and columns in aDataFrame and how information is stored in the index and column names.This lecture will teach you the basic concepts of reshaping data.As with other topics, we recommend reviewing the [pandasdocumentation](https://pandas.pydata.org/pandas-docs/stable/reshaping.html)on this subject for additional information.We will keep our discussion here as brief and simple as possible becausethese tools will reappear in subsequent lectures. ###Code url = "https://datascience.quantecon.org/assets/data/bball.csv" bball = pd.read_csv(url) bball.info() bball ###Output <class 'pandas.core.frame.DataFrame'> RangeIndex: 9 entries, 0 to 8 Data columns (total 8 columns): Year 9 non-null int64 Player 9 non-null object Team 9 non-null object TeamName 9 non-null object Games 9 non-null int64 Pts 9 non-null float64 Assist 9 non-null float64 Rebound 9 non-null float64 dtypes: float64(3), int64(2), object(3) memory usage: 656.0+ bytes ###Markdown Long vs WideMany of these operations change between long and wide DataFrames.What does it mean for a DataFrame to be long or wide?Here is long possible long-form representation of our basketball data. ###Code # Don't worry about what this command does -- We'll see it soon bball_long = bball.melt(id_vars=["Year", "Player", "Team", "TeamName"]) bball_long ###Output _____no_output_____ ###Markdown And here is a wide-form version. ###Code # Again, don't worry about this command... We'll see it soon too bball_wide = bball_long.pivot_table( index="Year", columns=["Player", "variable", "Team"], values="value" ) bball_wide ###Output _____no_output_____ ###Markdown `set_index`, `reset_index`, and TransposeWe have already seen a few basic methods for reshaping aDataFrame.- `set_index`: Move one or more columns into the index. - `reset_index`: Move one or more index levels out of the index and make them either columns or drop from DataFrame. - `T`: Swap row and column labels. Sometimes, the simplest approach is the right approach.Let’s review them briefly. ###Code bball2 = bball.set_index(["Player", "Year"]) bball2.head() bball3 = bball2.T bball3.head() ###Output _____no_output_____ ###Markdown `stack` and `unstack`The `stack` and `unstack` methods operate directly on the indexand/or column labels. `stack``stack` is used to move certain levels of the column labels into theindex (i.e. moving from wide to long)Let’s take `ball_wide` as an example. ###Code bball_wide ###Output _____no_output_____ ###Markdown Suppose that we want to be able to use the `mean` method to compute theaverage value of each stat for each player, regardless of year or team.To do that, we need two column levels: one for the player and one for the variable.We can achieve this using the `stack` method. ###Code bball_wide.stack() ###Output _____no_output_____ ###Markdown Now, we can compute the statistic we are after. ###Code player_stats = bball_wide.stack().mean() player_stats ###Output _____no_output_____ ###Markdown Now suppose instead of that we wanted to compute the average for each team andstat, averaging over years and players.We’d need to move the `Player` level down into the index so we areleft with column levels for Team and variable.We can ask pandas do this using the `level` keyword argument. ###Code bball_wide.stack(level="Player") ###Output _____no_output_____ ###Markdown Now we can compute the mean. ###Code bball_wide.stack(level="Player").mean() ###Output _____no_output_____ ###Markdown Notice a few features of the `stack` method:- Without any arguments, the `stack` arguments move the level of column labels closest to the data (also called inner-most or bottom level of labels) to become the index level closest to the data (also called the inner-most or right-most level of the index). In our example, this moved `Team` down from columns to the index. - When we do pass a level, that level of column labels is moved down to the right-most level of the index and all other column labels stay in their relative position. Note that we can also move multiple levels at a time in one call to `stack`. ###Code bball_wide.stack(level=["Player", "Team"]) ###Output _____no_output_____ ###Markdown In the example above, we started with one level on the index (just the year) andstacked two levels to end up with a three-level index.Notice that the two new index levels went closer to the data than the existinglevel and that their order matched the order we passed in our list argument to`level`. `unstack`Now suppose that we wanted to see a bar chart of each player’s stats.This chart should have one “section” for each player and a different coloredbar for each variable.As we’ll learn in more detail in a later lecture, we willneed to have the player’s name on the index and the variables as columns to do this.>**Note**>>In general, for a DataFrame, calling the `plot` method will put the indexon the horizontal (x) axis and make a new line/bar/etc. for each column.Notice that we are close to that with the `player_stats` variable. ###Code player_stats ###Output _____no_output_____ ###Markdown We now need to rotate the variable level of the index up to be column layers.We use the `unstack` method for this. ###Code player_stats.unstack() ###Output _____no_output_____ ###Markdown And we can make our plot! ###Code player_stats.unstack().plot.bar() ###Output _____no_output_____ ###Markdown This particular visualization would be helpful if we wanted to see which statsfor which each player is strongest.For example, we can see that Steph Curry scores far more points than he doesrebound, but Serge Ibaka is a bit more balanced.What if we wanted to be able to compare all players for each statistic?This would be easier to do if the bars were grouped by variable, with adifferent bar for each player.To plot this, we need to have the variables on the index and the playername as column names.We can get this DataFrame by setting `level="Player"` when calling `unstack`. ###Code player_stats.unstack(level="Player") player_stats.unstack(level="Player").plot.bar() ###Output _____no_output_____ ###Markdown Now we can use the chart to make a number of statements about players:- Ibaka does not get many assists, compared to Curry and Durant. - Steph and Kevin Durant are both high scorers. Based on the examples above, notice a few things about `unstack`:- It is the *inverse* of `stack`; `stack` will move labels down from columns to index, while `unstack` moves them up from index to columns. - By default, `unstack` will move the level of the index closest to the data and place it in the column labels closest to the data. >**Note**>>Just as we can pass multiple levels to `stack`, we can also pass multiplelevels to `unstack`.We needed to use this in our solution to the exercise below.> See exercise 1 in the [*exercise list*](exerciselist-0) SummaryIn some ways `set_index`, `reset_index`, `stack`, and `unstack`are the “most fundamental” reshaping operations…The other operations we discuss can be formulated with thesefour operations (and, in fact, some of them are exactly written as theseoperations in `pandas`’s code base).*Pro tip*: We remember stack vs unstack with a mnemonic: **U**nstack moves indexlevels **U**p `melt`The `melt` method is used to move from wide to long form.It can be used to move all of the “values” stored in your DataFrame to asingle column with all other columns being used to contain identifyinginformation.**Warning**: When you use `melt`, any index that you currently havewill be deleted.We saw used `melt` above when we constructed `bball_long`: ###Code bball # this is how we made ``bball_long`` bball.melt(id_vars=["Year", "Player", "Team", "TeamName"]) ###Output _____no_output_____ ###Markdown Notice that the columns we specified as `id_vars` remained columns, but allother columns were put into two new columns:1. `variable`: This has dtype string and contains the former column names. as values 1. `value`: This has the former values. Using this method is an effective way to get our data in *tidy* form as notedabove.> See exercise 2 in the [*exercise list*](exerciselist-0) `pivot` and `pivot_table`The next two reshaping methods that we will use are closely related.Some of you might even already be familiar with these ideas because youhave previously used *pivot tables* in Excel.- If so, good news. We think this is even more powerful than Excel and easier to use! - If not, good news. You are about to learn a very powerful and user-friendly tool. We will begin with `pivot`.The `pivot` method:- Takes the unique values of one column and places them along the index. - Takes the unique values of another column and places them along the columns. - Takes the values that correspond to a third column and fills in the DataFrame values that correspond to that index/column pair. We’ll illustrate with an example. ###Code # .head 8 excludes Ibaka -- will discuss why later bball.head(6).pivot(index="Year", columns="Player", values="Pts") ###Output _____no_output_____ ###Markdown We can replicate `pivot` using three of the fundamental operationsfrom above:1. Call `set_index` with the `index` and `columns` arguments 1. Extract the `values` column 1. `unstack` the columns level of the new index ###Code # 1--------------------------------------- 2--- 3---------------------- bball.head(6).set_index(["Year", "Player"])["Pts"].unstack(level="Player") ###Output _____no_output_____ ###Markdown One important thing to be aware of is that in order for `pivot` towork, the index/column pairs must be *unique*!Below, we demonstrate the error that occurs when they are not unique. ```python Ibaka shows up twice in 2016 because he was traded mid-season from the Orlando Magic to the Toronto Raptorsbball.pivot(index="Year", columns="Player", values="Pts")``` `pivot_table`The `pivot_table` method is a generalization of `pivot`.It overcomes two limitations of `pivot`:1. It allows you to choose multiple columns for the index/columns/values arguments. 1. It allows you to deal with duplicate entries by having you choose how to combine them. ###Code bball ###Output _____no_output_____ ###Markdown Notice that we can replicate the functionality of `pivot` if we passthe same arguments. ###Code bball.head(6).pivot(index="Year", columns="Player", values="Pts") ###Output _____no_output_____ ###Markdown But we can also choose multiple columns to be used inindex/columns/values. ###Code bball.pivot_table(index=["Year", "Team"], columns="Player", values="Pts") bball.pivot_table(index="Year", columns=["Player", "Team"], values="Pts") ###Output _____no_output_____ ###Markdown AND we can deal with duplicated index/column pairs. ###Code # This produced an error # bball.pivot(index="Year", columns="Player", values="Pts") # This doesn't! bball_pivoted = bball.pivot_table(index="Year", columns="Player", values="Pts") bball_pivoted ###Output _____no_output_____ ###Markdown `pivot_table` handles duplicate index/column pairs using an aggregation.By default, the aggregation is the mean.For example, our duplicated index/column pair is `("x", 1)` and hadassociated values of 2 and 5.Notice that `bball_pivoted.loc[2016, "Ibaka"]` is `(15.1 + 14.2)/2 = 14.65`.We can choose how `pandas` aggregates all of the values.For example, here’s how we would keep the max. ###Code bball.pivot_table(index="Year", columns="Player", values="Pts", aggfunc=max) ###Output _____no_output_____ ###Markdown Maybe we wanted to count how many values there were. ###Code bball.pivot_table(index="Year", columns="Player", values="Pts", aggfunc=len) ###Output _____no_output_____ ###Markdown We can even pass multiple aggregation functions! ###Code bball.pivot_table(index="Year", columns="Player", values="Pts", aggfunc=[max, len]) ###Output _____no_output_____ ###Markdown > See exercise 3 in the [*exercise list*](exerciselist-0) Visualizing ReshapingNow that you have learned the basics and had a chance to experiment,we will use some generic data to provide a visualization of what the abovereshape operations do.The data we will use is: ###Code # made up # columns A and B are "identifiers" while C, D, and E are variables. df = pd.DataFrame({ "A": [0, 0, 1, 1], "B": "x y x z".split(), "C": [1, 2, 1, 4], "D": [10, 20, 30, 20,], "E": [2, 1, 5, 4,] }) df.info() df df2 = df.set_index(["A", "B"]) df2.head() df3 = df2.T df3.head() ###Output _____no_output_____ ###Markdown `stack` and `unstack`Below is an animation that shows how stacking works. ###Code df2 df2_stack = df2.stack() df2_stack ###Output _____no_output_____ ###Markdown And here is an animation that shows how unstacking works. ###Code df2 df2.unstack() ###Output _____no_output_____ ###Markdown `melt`As noted above, the `melt` method transforms data from wide to long in form.Here’s a visualization of that operation. ###Code df df_melted = df.melt(id_vars=["A", "B"]) df_melted ###Output _____no_output_____ ###Markdown Exercises**Exercise 1**(*Warning*: This one is challenging):Recall the `bball_wide` DataFrame from above (repeated below to jogyour memory).In this task, you will start from `ball` and re-recreate `bball_wide`by combining the operations we just learned about.There are many ways to do this, so be creative.Our solution used `set_index`, `T`, `stack`, and `unstack` inthat order.Here are a few hints:- Think about what columns you will need to call `set_index` on so that their data ends up as labels (either in index or columns). - Leave other columns (e.g. the actual game stats) as actual columns so their data can stay data during your reshaping. Don't spend too much time on this... if you get stuck, open up **this**markdown cell, and you will see our answer hidden.**Hint**: You might need to add `.sort_index(axis=1)` after you arefinished to get the columns in the same order.**Hint**: You may not end up with a `variable` header on the secondlevel of column labels. This is ok.`bball.drop("TeamName", axis=1).set_index(["Year", "Player", "Team"]).stack().unstack(level=[1, 3, 2]).sort_index(axis=1)` ###Code bball_wide ###Output _____no_output_____
notebooks/reduced_basis.ipynb
###Markdown Reduced-order basis ###Code import sys sys.path.append('..') import matplotlib.pyplot as plt import numpy as np from importlib import reload import lfigw.waveform_generator as wfg ###Output _____no_output_____ ###Markdown Dataset ###Code wfd = wfg.WaveformDataset(spins_aligned=False, domain='FD') ###Output _____no_output_____ ###Markdown Generate and save ###Code wfd.f_max = 512.0 wfd.delta_f = 1.0 wfd.prior['time'] = [-0.1, 0.1] wfd.generate_dataset(10000) wfd.init_training() wfd.generate_noisy_test_data() wfd.save('.') wfd.save_train('.') wfd.save_noisy_test_data('.') plt.plot(wfd.h_dict['H1'][0].real) ###Output _____no_output_____ ###Markdown Load ###Code wfd.load('.') wfd.load_train('.') wfd.load_noisy_test_data('.') ###Output _____no_output_____ ###Markdown Factor out the time shift $t_c$The time shift makes a big contribution to the oscillations in the FD waveform, so before constructing a reduced-order basis, we should do our best to remove it. This should result in a smaller basis for the same accuracy. The time shift data can be saved and passed as an extra piece of information to the network. ###Code # Take data from one detector strain = wfd.h_dict['H1']/wfd._noise_std plt.plot(strain[0].real) strain_ifft = np.fft.irfft(strain) # Take the peak of the absolute value of the IFFT as the coalescence time. Not perfect, but probably good enough. # *** How will this work if there is noise? *** t = np.argmax(np.abs(strain_ifft), axis=-1) strain_shifted = np.exp( 2 * np.pi * 1j * np.outer(t, range(513)) / 1024) * strain plt.plot(strain_shifted[1].imag) strain_shifted_ifft = np.fft.irfft(strain_shifted) # All the waveforms now coalesce at t=0 plt.plot(strain_shifted_ifft[4]) ###Output _____no_output_____ ###Markdown Noisy case ###Code start_idx = int(wfd.f_min / wfd.delta_f) strain_noisy = strain + np.pad(np.random.randn(10000, 513 - start_idx) + 1j * np.random.randn(10000, 513 - start_idx), ((0, 0), (start_idx, 0))) strain_noisy_ifft = np.fft.irfft(strain_noisy) strain_ifft_smooth = np.fft.irfft(strain) plt.plot(strain_noisy_ifft[1].real) plt.plot(strain_ifft_smooth[1].real) plt.show() ###Output _____no_output_____ ###Markdown If SNR is too low, how can we pick out the peak? Cross-check on noiseI just want to make sure I'm adding the right amount of noise. Ignore this if uninterested. ###Code import pycbc fseries_1 = pycbc.types.frequencyseries.FrequencySeries(strain_noisy[1] * wfd._noise_std * np.sqrt(wfd.delta_t * 2.0), delta_f=wfd.delta_f) tseries_1 = fseries_1.to_timeseries() plt.plot(tseries_1.numpy()) plt.plot(test) plt.show() np.mean(tseries_1.numpy()**2) fseries = pycbc.types.frequencyseries.FrequencySeries(strain[1] * wfd._noise_std, delta_f=wfd.delta_f) tseries = fseries.to_timeseries() plt.plot(tseries.numpy()) test = tseries.numpy() * np.sqrt(wfd.delta_t * 2.0) test_noisy = test + np.random.randn(1024) plt.plot(test_noisy) plt.plot(test) plt.show() np.mean(test_noisy**2) ###Output _____no_output_____ ###Markdown SVD ###Code import scipy # strain = U S Vh # where S is diagonal with s = diag(S), and # U, Vh are unitary. U, s, Vh = scipy.linalg.svd(strain, full_matrices=False) U_shifted, s_shifted, Vh_shifted = scipy.linalg.svd(strain_shifted, full_matrices=False) plt.plot(s) plt.plot(s_shifted) plt.yscale('log') plt.show() ###Output _____no_output_____ ###Markdown From the figure, and depending on the accuracy threshold, we may be able to get away with fewer basis elements if we use the time shift. ###Code U.shape Vh.shape ###Output _____no_output_____ ###Markdown Transform waveforms to SVD basis and evaluate mismatch ###Code # Magnitude of basis coefficients for one example waveform plt.plot(np.abs(U[0] * s)) # With just 50 components, waveform looks indistinguishable plt.plot(((U[0] * s)[:50] @ Vh[:50,:]).real) plt.plot(strain[0], '.') plt.show() end = 200 means = np.empty(end) stds = np.empty(end) mins = np.empty(end) maxs = np.empty(end) for truncation in range(end): reconstructed = (U @ np.diag(s))[:,:truncation] @ Vh[:truncation,:] norms = np.mean(np.abs(strain)**2, axis=-1) reconstructed_norms = np.mean(np.abs(reconstructed)**2, axis=-1) matches = np.mean(reconstructed.conj() * strain, axis=-1).real faithfulness = matches / np.sqrt(norms * reconstructed_norms) means[truncation] = np.mean(faithfulness) stds[truncation] = np.std(faithfulness) mins[truncation] = np.min(faithfulness) maxs[truncation] = np.max(faithfulness) plt.plot(1- means) plt.yscale('log') plt.plot(stds) plt.yscale('log') plt.plot(1-mins) plt.yscale('log') ###Output _____no_output_____ ###Markdown It appears that, even without worrying about the time shift, we should be able to get away with 100 basis elements. This is a factor of 5 smaller than the number of frequency bins. Moreover, this factor should improve as we move to longer waveforms. Incorporate time shift separately ###Code end = 200 means = np.empty(end) stds = np.empty(end) mins = np.empty(end) maxs = np.empty(end) for truncation in range(end): reconstructed = (U_shifted @ np.diag(s_shifted))[:,:truncation] @ Vh_shifted[:truncation,:] norms = np.mean(np.abs(strain_shifted)**2, axis=-1) reconstructed_norms = np.mean(np.abs(reconstructed)**2, axis=-1) matches = np.mean(reconstructed.conj() * strain_shifted, axis=-1).real faithfulness = matches / np.sqrt(norms * reconstructed_norms) means[truncation] = np.mean(faithfulness) stds[truncation] = np.std(faithfulness) mins[truncation] = np.min(faithfulness) maxs[truncation] = np.max(faithfulness) plt.plot(1- means) plt.yscale('log') plt.plot(stds) plt.yscale('log') plt.plot(1-mins) plt.yscale('log') ###Output _____no_output_____ ###Markdown With the time shift, we could get away with half as many basis elements. Reduced basis class ###Code import reduced_basis reload(reduced_basis) basis = reduced_basis.SVDBasis() basis.load() basis.generate_basis(strain, 100) basis.init_time_translation(-0.15, 0.15, 1001, np.linspace(0, 512, 513)) basis.init_whitening(wfd.psd_names['H1'], wfd._get_psd(wfd.delta_f, 'H1'), wfd.psd_names['V1'], wfd._get_psd(wfd.delta_f, 'V1')) basis.whitening_dict[wfd.psd_names['V1']][:5,:5].real coeffs = basis.fseries_to_basis_coefficients(strain[1]) coeffs_V1 = basis.whiten(coeffs, wfd.psd_names['V1']) coeffs_H1 = basis.whiten(coeffs, wfd.psd_names['H1']) plt.plot(coeffs.imag) plt.plot(coeffs_V1.imag) plt.show() strain_RB = np.empty((len(strain), 100), dtype=np.complex64) for i in range(len(strain)): strain_RB[i] = basis.fseries_to_basis_coefficients(strain[i]) for i in range(len(strain_RB)): _ = basis.whiten(strain_RB[i], wfd.psd_names['V1']) len(strain) basis.V basis.whitening_dict['AdVDesignSensitivityP1200087'] basis.whitening_dict['AdVDesignSensitivityP1200087'].astype(np.complex64) (basis.T_matrices[0,0,0] * 3.0).dtype basis.save() ###Output _____no_output_____ ###Markdown Performance evaluation ###Code from pycbc.waveform import get_fd_waveform wfd._sample_prior(1) pars = wfd.parameters[0] pars[3] plt.plot(wfd.h_dict['H1'][0].imag) wfd.param_idx hp_fd, hc_fd = get_fd_waveform(mass1=pars[0], mass2=pars[1], distance=pars[4], coa_phase=pars[2], f_lower=wfd.f_min, f_final=wfd.f_max, delta_f=wfd.delta_f, f_ref=wfd.f_ref, approximant=wfd.approximant) fs = hp_fd.sample_frequencies hp_fd = hp_fd / (wfd._get_psd(hp_fd.delta_f, 'H1') ** 0.5) hc_fd = hc_fd / (wfd._get_psd(hc_fd.delta_f, 'H1') ** 0.5) hp_fd_p, hc_fd_p = get_fd_waveform(mass1=pars[0], mass2=pars[1], distance=pars[4], coa_phase=pars[2] + np.pi, f_lower=wfd.f_min, f_final=wfd.f_max, delta_f=wfd.delta_f, f_ref=wfd.f_ref, approximant=wfd.approximant) hp_fd_p = hp_fd_p.cyclic_time_shift(-1.01) hc_fd_p = hc_fd_p.cyclic_time_shift(-1.01) hp_fd_p = hp_fd_p / (wfd._get_psd(hp_fd.delta_f, 'H1') ** 0.5) hc_fd_p = hc_fd_p / (wfd._get_psd(hc_fd.delta_f, 'H1') ** 0.5) plt.plot((hp_fd.numpy() * np.exp(-2j * np.pi * 0.03 * fs.numpy())).real) plt.plot(hp_fd_p.numpy().real) plt.show() plt.plot((hp_fd.numpy() * np.exp(-1j *0)).real) plt.plot(hp_fd_p.numpy().real) plt.show() plt.plot(hp_fd_p.numpy().imag) dt = -0.0989 hp_fd_translated = hp_fd.cyclic_time_shift(dt) hc_fd_translated = hc_fd.cyclic_time_shift(dt) basis.t_grid[150:200] plt.plot(hp_fd_translated.numpy().imag) hp = np.empty(513, dtype=np.complex64) hp[:] = hp_fd hp_translated = np.empty(513, dtype=np.complex64) hp_translated[:] = hp_fd_translated ###Output _____no_output_____ ###Markdown Transform to basis ###Code coeff = basis.fseries_to_basis_coefficients(hp) plt.plot(basis.basis_coefficients_to_fseries(coeff).imag) match = np.sum(hp.conj() * basis.basis_coefficients_to_fseries(coeff)) / np.sqrt(np.sum(np.abs(hp)**2) * np.sum(np.abs(basis.basis_coefficients_to_fseries(coeff))**2)) match coeff_translated = basis.time_translate(coeff, dt, interpolation='cubic') test = basis.fseries_to_basis_coefficients(hp_translated) np.sum(hp_translated.conj() * basis.basis_coefficients_to_fseries(test)) / np.sqrt(np.sum(np.abs(hp_translated)**2) * np.sum(np.abs(basis.basis_coefficients_to_fseries(test))**2)) plt.plot(basis.basis_coefficients_to_fseries(coeff_translated).real - hp_translated.real) plt.plot(basis.basis_coefficients_to_fseries(coeff_translated).imag - hp_translated.imag) plt.plot(basis.basis_coefficients_to_fseries(test).real - hp_translated.real) 1-np.sum(hp_translated.conj() * basis.basis_coefficients_to_fseries(coeff_translated)) / np.sqrt(np.sum(np.abs(hp_translated)**2) * np.sum(np.abs(basis.basis_coefficients_to_fseries(coeff_translated))**2)) sample_times = np.linspace(0,1,1024,endpoint=False) test = dict(mass_1=0, mass_2=1, phase=2, time=3, distance=4) test['mass_1'] np.random.random(4, dtype=np.float32) x = [1,2,3] x* from lal import REARTH_SI, C_SI 2 * REARTH_SI / C_SI ###Output _____no_output_____ ###Markdown Time shifting performanceStudy how basis truncation affects the ability to time-shift waveforms. ###Code import sys sys.path.append('../src/') import waveform_generator as wfg import numpy as np import matplotlib.pyplot as plt from tqdm import tqdm # Load a waveform dataset wfd = wfg.WaveformDataset() wfd.load_noisy_test_data('../waveforms/GW150914') wfd.load_event('../data/events/GW150914') wfd.initialize_reduced_basis_aux() idx = 2 ntimes = 300 ifo = 'L1' Nrb = 300 p = wfd.noisy_waveforms_parameters[idx,:].astype(np.float64) p[wfd.param_idx['time']] = 0.0 h_FD = wfd._generate_whitened_waveform(p) h_FD = h_FD[ifo] matches = [] times = np.linspace(wfd.basis.t_grid[0], wfd.basis.t_grid[-1], ntimes) # times = wfd.basis.t_grid for dt in tqdm(times): h_FD_dt = h_FD * np.exp(- 2 * np.pi * 1j * wfd.sample_frequencies * dt) h_RB = (h_FD_dt @ wfd.basis.V)[:Nrb] h_FD_dt_recon = h_RB @ wfd.basis.Vh[:Nrb] # compute mismatch norm1 = np.mean(np.abs(h_FD_dt)**2) norm2 = np.mean(np.abs(h_FD_dt_recon)**2) inner = np.mean(h_FD_dt.conj()*h_FD_dt_recon).real matches.append(inner / np.sqrt(norm1 * norm2)) mismatches = 1 - np.array(matches) # Nrb 100 L1 plt.plot(times, mismatches) plt.yscale('log') plt.show() # Nrb 100 H1 plt.plot(times, mismatches) plt.yscale('log') plt.show() # Nrb 100 L1 plt.plot(times, mismatches) plt.yscale('log') plt.show() # Nrb 200 plt.plot(times, mismatches) plt.yscale('log') plt.show() # Nrb 300 plt.plot(times, mismatches) plt.yscale('log') plt.show() # Nrb 600 plt.plot(times, mismatches) plt.yscale('log') plt.show() ###Output _____no_output_____ ###Markdown The biggest factor seems to be basis truncation ###Code plt.figure(figsize=(15,10)) plt.plot(wfd.sample_frequencies, h_FD.real) plt.plot(wfd.sample_frequencies, ((h_FD @ wfd.basis.V)[:Nrb] @ wfd.basis.Vh[:Nrb]).real) plt.xscale('log') plt.xlim((18,1024)) plt.show() ###Output _____no_output_____ ###Markdown Check on GW150914 mean params ###Code import h5py f = h5py.File('../data/events/GW150914/GW150914_data.h5', 'r') PhenomPv2_samples = f['allIsp_post'] ligo_samples = np.zeros((len(PhenomPv2_samples), 15)) ligo_samples[:, wfd.param_idx['mass_1']] = PhenomPv2_samples['mass1_det'] ligo_samples[:, wfd.param_idx['mass_2']] = PhenomPv2_samples['mass2_det'] ligo_samples[:, wfd.param_idx['a_1']] = PhenomPv2_samples['a1'] ligo_samples[:, wfd.param_idx['a_2']] = PhenomPv2_samples['a2'] ligo_samples[:, wfd.param_idx['tilt_1']] = PhenomPv2_samples['tilt1'] ligo_samples[:, wfd.param_idx['tilt_2']] = PhenomPv2_samples['tilt2'] ligo_samples[:, wfd.param_idx['distance']] = PhenomPv2_samples['distance'] ligo_samples[:, wfd.param_idx['theta_jn']] = PhenomPv2_samples['theta_jn'] ligo_samples[:, wfd.param_idx['time']] = PhenomPv2_samples['time'] - wfd.ref_time ligo_samples[:, wfd.param_idx['ra']] = PhenomPv2_samples['right_ascension'] ligo_samples[:, wfd.param_idx['dec']] = PhenomPv2_samples['declination'] f.close() ligo_samples.mean(axis=0) ntimes = 100 ifo = 'L1' Nrb = 100 p = ligo_samples.mean(axis=0) p[wfd.param_idx['time']] = 0.0 h_FD = wfd._generate_whitened_waveform(p) h_FD = h_FD[ifo] matches = [] times = np.linspace(wfd.basis.t_grid[0], wfd.basis.t_grid[-1], ntimes) # times = wfd.basis.t_grid for dt in tqdm(times): h_FD_dt = h_FD * np.exp(- 2 * np.pi * 1j * wfd.sample_frequencies * dt) h_RB = (h_FD_dt @ wfd.basis.V)[:Nrb] h_FD_dt_recon = h_RB @ wfd.basis.Vh[:Nrb] # compute mismatch norm1 = np.mean(np.abs(h_FD_dt)**2) norm2 = np.mean(np.abs(h_FD_dt_recon)**2) inner = np.mean(h_FD_dt.conj()*h_FD_dt_recon).real matches.append(inner / np.sqrt(norm1 * norm2)) mismatches = 1 - np.array(matches) # Nrb 100 L1 plt.plot(times, mismatches) plt.yscale('log') plt.show() # Nrb 100 H1 plt.plot(times, mismatches) plt.yscale('log') plt.show() # Nrb 200 plt.plot(times, mismatches) plt.yscale('log') plt.show() # Nrb 600 plt.plot(times, mismatches) plt.yscale('log') plt.show() ###Output _____no_output_____ ###Markdown Evaluate interpolation of T-matrices ###Code wfd.basis.truncate(100) idx = 1 ntimes = 300 ifo = 'H1' Nrb = 200 p = wfd.noisy_waveforms_parameters[idx,:].astype(np.float64) p[wfd.param_idx['time']] = 0.0 h_FD = wfd._generate_whitened_waveform(p) h_FD = h_FD[ifo] matches = [] times = np.linspace(wfd.basis.t_grid[200], wfd.basis.t_grid[205], ntimes) # times = wfd.basis.t_grid for dt in tqdm(times): h_FD_dt = h_FD * np.exp(- 2 * np.pi * 1j * wfd.sample_frequencies * dt) h_RB = (h_FD @ wfd.basis.V)[:Nrb] h_RB_dt = wfd.basis.time_translate(h_RB, dt, interpolation='linear') h_FD_dt_recon = h_RB_dt @ wfd.basis.Vh[:Nrb] # compute mismatch norm1 = np.mean(np.abs(h_FD_dt)**2) norm2 = np.mean(np.abs(h_FD_dt_recon)**2) inner = np.mean(h_FD_dt.conj()*h_FD_dt_recon).real matches.append(inner / np.sqrt(norm1 * norm2)) mismatches = 1 - np.array(matches) plt.plot(times, mismatches) plt.yscale('log') plt.show() # cubic plt.plot(times, mismatches) plt.yscale('log') plt.show() ###Output _____no_output_____ ###Markdown Seems very smooth ###Code # linear plt.plot(times, mismatches) plt.yscale('log') plt.show() wfd.basis.t_grid[200:206] ###Output _____no_output_____
notebooks/Stability_Region_PC.ipynb
###Markdown Stability region for a simple Predictor-Corrector method[AMath 586, Spring Quarter 2019](http://staff.washington.edu/rjl/classes/am586s2019/) at the University of Washington. For other notebooks, see [Index.ipynb](Index.ipynb) or the [Index of all notebooks on Github](https://github.com/rjleveque/amath586s2019/blob/master/notebooks/Index.ipynb).Plot the region of absolute stability for the Predictor-Corrector method based on Forward and Backward Euler,\begin{align*}&\hat U^0 = U^n + kf(U^n)\\&\text{for} ~j = 0,~1,~\ldots,~N-1\\&\qquad \hat U^{j+1} = U^n + kf(\hat U^j)\\&\qquad \text{end}\\&U^{n+1} = \hat U^N.\end{align*}The general approach is to apply the method to $u' = \lambda u$ with time step $k$ to obtain $U^{n+1} = R(z) U^n$.This gives $$R(z) = 1 + z + z^2 + \cdots + z^{N+1} = \frac{1-z^{N+2}}{1-z}.$$Then evaluate $|R(z)|$ on a grid of points in the complex plane and do a filled contour plot that shows the regions where $|R(z)| \leq 1$. ###Code %pylab inline #seterr(divide='ignore', invalid='ignore') # suppress divide by zero warnings from ipywidgets import interact def plotS(R, axisbox = [-2, 2, -2, 2], npts=500): """ Compute |R(z)| over a fine grid on the region specified by axisbox and do a contour plot with contourf (filled contours) to show the region of absolute stability. """ xa, xb, ya, yb = axisbox x = linspace(xa,xb,npts) y = linspace(ya,yb,npts) X,Y = meshgrid(x,y) Z = X + 1j*Y Rval = R(Z) Rabs = abs(Rval) # plot interior, exterior, as green and white: levels = [-1e9,1,1e9] CS1 = contourf(X, Y, Rabs, levels, colors = ('g', 'w')) # plot boundary as a black curve: CS2 = contour(X, Y, Rabs, [1,], colors = ('k',), linewidths = (2,)) title('Region of absolute stability') grid(True) plot([xa,xb],[0,0],'k') # x-axis plot([0,0],[ya,yb],'k') # y-axis axis('scaled') # scale x and y same so that circles are circular axis(axisbox) # set limits def plotS_PC(N): def R(z): # return Rz = 1 + z + z^2 + ... + z^(N+1) = (1 - z^(N+2)) / (1-z) return (1 - z**(N+2)) / (1-z) plotS(R, npts=1000) title('PC method N = %i' % N) figure(figsize=(14,4)) subplot(1,4,1) plotS_PC(5) subplot(1,4,2) plotS_PC(10) subplot(1,4,3) plotS_PC(20) subplot(1,4,4) plotS_PC(50) ###Output _____no_output_____ ###Markdown The limiting regionAs $N \rightarrow \infty$, the stability region appears to approach the intersection of the unit circle and the stability region of the backward Euler method, which is the exterior of the circle of radius 1 centered at $z=1$. This makes sense since (a) the P-C iteration converges only if $|z| \leq 1$ and (b) if it does converge then it converges to the value $U^{n+1}$ that solves the implicit equation defining the backward Euler method. The blobs on the unit circleNote that the stability region seems to also include an increasing number of small blobs equally spaced around the unit circle. This is because, for any finite $N$, the function $R(z)$ is given by$$R(z) = \frac{1-z^{N+2}}{1-z}.$$If $|z|<1$ then at points outside the stability region of backward Euler the limiting value $1/(1-z)$ is greater than 1 in modulus, but for finite $N$ the quantity $1 - z^{N+2}$ in the numerator *vanishes* if $z^{N+2} = 1$, i.e., at the $N+2$ roots of unity $\exp({2\pi ij/(N+2)})$ for $j = 1, 2, \ldots,~ N+2$. These points are equally spaced around the unit circle and the plot below confirms that the blobs are centered around these points, which are expressed as $\omega^j$ where $\omega = \exp({2\pi i/(N+2)})$. ###Code figure(figsize=(10,10)) N = 50 plotS_PC(N) omega = exp(2*pi*1j/(N+2)) roots_of_unity = omega**range(1,N+3) plot(real(roots_of_unity), imag(roots_of_unity), 'rx', markersize=5, label='roots of unity') legend(fontsize=15) ###Output _____no_output_____
social-network-analysis/Social network Analysis/4th class/Assortativity.ipynb
###Markdown Assortativity During this seminar we will: ###Code import urllib import numpy as np import pandas as pd import scipy.spatial as spt import matplotlib.pyplot as plt import networkx as nx %matplotlib inline url = "https://dl.dropboxusercontent.com/s/3ieuhkkhzrdpw3a/lesmis.gml?dl=1" with urllib.request.urlopen(url) as f: data = f.read().decode('utf-8') with open('lesmis.gml', 'w') as f: f.write(data) ###Output _____no_output_____ ###Markdown Similarities Calculation ###Code G = nx.karate_club_graph() A = nx.to_numpy_matrix(G, dtype=int) A = np.asarray(A) def plotDist(A): f, ax = plt.subplots(2, 2, figsize=(10,10)) ax[0, 0].imshow(A, cmap = 'Greys', interpolation = 'None') ax[0, 0].set_title('Adjacency Matrix') D = np.corrcoef(A) ax[1, 0].imshow(D, cmap = 'Greys', interpolation = 'None') ax[1, 0].set_title('Correlation coeff.') dVec = spt.distance.pdist(A, metric = 'euclidean') D = spt.distance.squareform(dVec) ax[0, 1].imshow(D, cmap = 'Greys', interpolation = 'None') ax[0, 1].set_title('Euclidean Dist.') dVec = spt.distance.pdist(A, metric = 'cosine') D = spt.distance.squareform(dVec) ax[1, 1].imshow(D, cmap = 'Greys', interpolation = 'None') ax[1, 1].set_title('Cosine Dist.') plotDist(A) G = nx.read_gml('lesmis.gml') A = nx.to_numpy_matrix(G, dtype=int) A = np.asarray(A) plotDist(A) ###Output _____no_output_____ ###Markdown Node Reordering Without special preprocess procedures graph adjacency matrix can look very noisy and hide network's structure (just look at the matrices above). Offcourse usually you don't know the structure itself (communities, groups of closelly connected nodes, etc.) unless it is given, however there are some procedures of node reordering that provides a better view of the network's adjacency matrix.*[Reverse Cuthill-McKee](http://en.wikipedia.org/wiki/Cuthill%E2%80%93McKee_algorithm)* finds permutation of the nodes that minimizes the **bandwidth** of the matrix, which is calculated as:$$ \theta = \max_{a_{ij} > 0}|i-j|$$Unformally, this algorithm puts some *mass* on the diagonal of adjacency matrix.Run this reordering with *nx.utils.reverse_cuthill_mckee_ordering(G)* and compare with the results above ###Code G = nx.karate_club_graph() # run procedure cm = nx.utils.reverse_cuthill_mckee_ordering(G) # get permutation l = [n for n in cm] l A = nx.to_numpy_matrix(G) A = np.asarray(A) # apply reordering A = A[np.ix_(l,l)] plotDist(A) ###Output _____no_output_____
examples/5_feature_extraction.ipynb
###Markdown Feature ExtractionIn this notebook we follow the wavelet decomposition approach of [Lochner et al. (2016)](https://iopscience.iop.org/article/10.3847/0067-0049/225/2/31) to extract features. We also include the photometric redshift and its uncertainty as classification features. Index1. [Import Packages](imports)2. [Load Dataset](loadData)3. [Extract Features](features) 1. [Fit Gaussian Processes](gps) 2. [Wavelet Decomposition](waveletDecomp) 3. [Include Redshift Information](addZ) 4. [Save the Features](saveFeatures) 5. [Load Features](load) 1. Import Packages ###Code import collections import os import pickle import sys import time import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import lightgbm as lgb import matplotlib.pyplot as plt import numpy as np import pandas as pd import scipy.stats from snmachine import gps, snfeatures from utils.plasticc_pipeline import create_folder_structure, get_directories, load_dataset %config Completer.use_jedi = False # enable autocomplete ###Output _____no_output_____ ###Markdown 2. Load DatasetFirst, **write** the path to the folder that contains the dataset we want to augment, `folder_path`. ###Code folder_path = '../snmachine/example_data' ###Output _____no_output_____ ###Markdown Then, **write** in `data_file_name` the name of the file where your dataset is saved.In this notebook we use the dataset saved in [4_augment_data](4_augment_data.ipynb). ###Code data_file_name = 'example_dataset_aug.pckl' ###Output _____no_output_____ ###Markdown Load the dataset. ###Code data_path = os.path.join(folder_path, data_file_name) dataset = load_dataset(data_path) ###Output Opening from binary pickle Dataset loaded from pickle file as: <snmachine.sndata.PlasticcData object at 0x7faa88591b10> ###Markdown 3. Extract Features 3.1. Fit Gaussian ProcessesTo obtain the wavelet decomposition, we first used the GPs to interpolate all light curves onto the same time grid; we chose approximately one grid point per day and used a two-level wavelet decomposition, following [Lochner et al. (2016)](https://iopscience.iop.org/article/10.3847/0067-0049/225/2/31).If you have not fitted the GPs previously, **run** **A2)**; it follows the GP modeling of light curves described in [3_model_lightcurves]().Otherwise, follow **B2)** to **read in** the previously saved GPs. First **write** the path to the folder where the GP files will be/were saved (`path_saved_gps`). Similarly to previous notebooks, you can opt:**A1)** Obtain GP path from folder structure.If you created a folder structure, you can obtain the path from there. **Write** the name of the folder in `analysis_name`. ###Code analysis_name = data_file_name[:-5] ###Output _____no_output_____ ###Markdown Create the folder structure, if needed. ###Code create_folder_structure(folder_path, analysis_name) ###Output Folders already exist with this analysis name. Are you sure you would like to proceed, this will overwrite the example_dataset_aug folder [Y/n] Please respond with 'yes' or 'no' ###Markdown Obtain the required GP path. ###Code directories = get_directories(folder_path, analysis_name) path_saved_gps = directories['intermediate_files_directory'] ###Output _____no_output_____ ###Markdown **A2)** Directly **write** where you saved the GP files. ```pythonpath_saved_gps = os.path.join(folder_path, data_file_name[:-5])``` **B1)** **Choose**:- `t_min`: minimim time to evaluate the Gaussian Process Regression at.- `t_max`: maximum time to evaluate the Gaussian Process Regression at.- `gp_dim`: dimension of the Gaussian Process Regression. If `gp_dim` is 1, the filters are fitted independently. If `gp_dim` is 2, the Matern kernel is used to fit light curves both in time and wavelength.- `number_gp`: number of points to evaluate the Gaussian Process Regression at.- `number_processes`: number of processors to use for parallelisation (**optional**). ###Code t_min = 0 t_max = 278 gp_dim = 2 number_gp = 276 number_processes = 1 gps.compute_gps(dataset, number_gp=number_gp, t_min=t_min, t_max=t_max, gp_dim=gp_dim, output_root=path_saved_gps, number_processes=number_processes) ###Output Performing Gaussian process regression. Models fitted with the Gaussian Processes values. Time taken for Gaussian process regression: 5.58s. ###Markdown **B2)** Read in the previously saved GPs. ```pythongps.read_gp_files_into_models(dataset, saved_gps_path)``` 3.2. Wavelet DecompositionNow, we do a wavelet decomposition of the events. **Write** in `path_saved_wavelets` the path to the folder where to save them. ###Code path_saved_wavelets = directories['intermediate_files_directory'] ###Output _____no_output_____ ###Markdown Following [Lochner et al. (2016)](https://iopscience.iop.org/article/10.3847/0067-0049/225/2/31), we then reduced the dimensionality of this wavelet space using Principal Component Analysis (PCA). Therefore, **choose** the number of PCA components to keep (`number_comps`) and **write** the path to the folder where to save the reduced wavelets (`path_saved_reduced_wavelets`). ###Code number_comps = 40 path_saved_reduced_wavelets = directories['features_directory'] ###Output _____no_output_____ ###Markdown **A)** Perform the wavelet decomposition and dimensionality reduction. ###Code wf = snfeatures.WaveletFeatures(output_root=path_saved_wavelets) reduced_wavelet_features = wf.compute_reduced_features( dataset, number_comps=number_comps, **{'wavelet_name': 'sym2', 'number_decomp_levels': 2, 'path_save_eigendecomp': path_saved_reduced_wavelets}) path_saved_reduced_wavelets ###Output _____no_output_____ ###Markdown If you previously calculated the wavelet decomposition of the events, and are only looking to project them into a lower dimensional space saved in `path_saved_reduced_wavelets`, run **B)**.**B)** Project previously calculated wavelet features onto a lower dimensional space. ```pythonwf = snfeatures.WaveletFeatures(output_root=saved_wavelets_path)feature_space = wf.load_feature_space(dataset)reduced_wavelet_features = wf.project_to_space( feature_space, path_saved_eigendecomp=saved_reduced_wavelets_path, number_comps=10)``` Save the reduced features. ###Code wf.save_reduced_features(reduced_wavelet_features, path_saved_reduced_wavelets) ###Output _____no_output_____ ###Markdown 3.3. Include Redshift InformationIn [paper]() we found that photometric redshift and its uncertainty are crucial for classification. Therefore, in the cell bellow, we include these properties as features. **Modify** it to include other properties as features. ###Code features = reduced_wavelet_features.copy() # only the wavelet features metadata = dataset.metadata features['hostgal_photoz'] = metadata.hostgal_photoz.values.astype(float) features['hostgal_photoz_err'] = metadata.hostgal_photoz_err.values.astype(float) ###Output _____no_output_____ ###Markdown 3.4. Save the Features**Write** in `saved_features_path` the path to the folder where to save the final set of features. ###Code path_saved_features = directories['features_directory'] ###Output _____no_output_____ ###Markdown Save the features and the class of the events. ###Code features.to_pickle(os.path.join(path_saved_features, 'features.pckl')) data_labels = dataset.labels.astype(int) # class label of each event data_labels.to_pickle(os.path.join(path_saved_features, 'data_labels.pckl')) ###Output _____no_output_____ ###Markdown 3.5. Load Features (Optional)We can load the saved files to verify weather they were correctly saved. ###Code saved_features = pd.read_pickle(os.path.join(path_saved_features, 'features.pckl')) saved_data_labels = pd.read_pickle(os.path.join(path_saved_features, 'data_labels.pckl')) ###Output _____no_output_____ ###Markdown As we can see, the quantities are the same. ###Code print(np.allclose(saved_features, features)) print(np.allclose(saved_data_labels, data_labels)) ###Output True True
notebooks/Example3.ipynb
###Markdown Refolded fold exampleThis example was originally presented Laurent et al., 2016. It uses the automatic classification of the fold geometry from the S-PLot (Grose et al., 2017) ReferencesGrose, L., Laurent, G., Aillères, L., Armit, R., Jessell, M. and Caumon, G.: Structural data constraints for implicit modeling of folds, J. Struct. Geol., 104, 80–92, doi:10.1016/j.jsg.2017.09.013, 2017.Laurent, G., Ailleres, L., Grose, L., Caumon, G., Jessell, M. and Armit, R.: Implicit modeling of folds and overprinting deformation, Earth Planet. Sci. Lett., 456, 26–38, doi:10.1016/j.epsl.2016.09.040, 2016. ###Code from LoopStructural import GeologicalModel from LoopStructural.visualisation import LavaVuModelViewer, RotationAnglePlotter from LoopStructural.datasets import load_laurent2016 import numpy as np import pandas as pd import glob import os import matplotlib.pyplot as plt import logging # logging.getLogger().setLevel(logging.INFO) # load in the data from the provided examples data, bb = load_laurent2016() # define view for the figure rotation= [-69.11981964111328, 15.704948425292969, 6.000146389007568] # bb[1,2] = 10000 newdata = pd.DataFrame([[5923.504395,4748.135254,3588.621094,'s2',1.0]],columns=['X','Y','Z','feature_name','val']) data = pd.concat([data,newdata],sort=False) data[np.logical_and(data['feature_name'] == 's2',np.isnan(data['nx']))] data.loc[np.logical_and(data['feature_name'] == 's0',~np.isnan(data['val'])),'feature_name'] = 's01' ###Output _____no_output_____ ###Markdown Modelling S2In this model both F2 and F1 share the same cylindrical fold axis. As a result we only need to model the axial foliations. LoopStructural will give a warning saying that the 1st coordinate of the fold frame could not be built but this is not a problem for this model. ###Code model = GeologicalModel(bb[0,:],bb[1,:]) model.set_model_data(data) s2 = model.create_and_add_fold_frame('s2', nelements=10000, buffer=0.5, solver='lu', damp=True) viewer = LavaVuModelViewer(model) viewer.add_scalar_field(s2[0], cmap='prism') viewer.add_isosurface(s2[0], slices=[0,1]) viewer.add_data(s2[0]) viewer.rotation = rotation viewer.interactive() # viewer.display() ###Output _____no_output_____ ###Markdown Modelling S1S1 is modelled using the geometry of S2 to constrain the axial foliation and a constant fold axis that is defined by the average intersection lineation between the gradient of S2 and the observations of S1. The geometry of the fold is constrained by fitting a periodic profile to the S-Plot which is a cross plot of the axial foliation scalar field value and the fold rotation angle. ###Code s1 = model.create_and_add_folded_fold_frame('s1', limb_wl=4, av_fold_axis=True, nelements=50000, buffer=0.3, solver='lu' ) s1.fold.foldframe[0].set_model(model) # s1.fold.foldframe[1].set_model(model) s2_s1_splot = RotationAnglePlotter(s1) s2_s1_splot.add_fold_limb_data() s2_s1_splot.add_fold_limb_curve() s2_s1_splot.add_limb_svariogram()#fold_limb_ s2_s1_splot.default_titles() s2_s1_splot.fig.delaxes(s2_s1_splot.ax[0][0]) s2_s1_splot.fig.delaxes(s2_s1_splot.ax[0][1]) viewer = LavaVuModelViewer(model) viewer.add_scalar_field(s1[0], cmap='prism') viewer.rotate([-69.11979675292969, 15.704944610595703, 6.00014591217041]) viewer.display() # fig, ax = plt.subplots(1,2,figsize=(10,5)) # x = np.linspace(s2[0].min(),s2[0].max(),1000) # ax[0].plot(x,s1.fold.fold_limb_rotation(x)) # ax[0].plot(s1.f['foliation'],s1['limb_rotation'],'bo') # ax[1].plot(s1['limb_svariogram'].lags,s1['limb_svariogram'].variogram,'bo') ###Output _____no_output_____ ###Markdown Modelling S0S0 is modelled using the same workflow as S1 except the S1 field is used as the axial foliation. ###Code s0 = model.create_and_add_folded_foliation('s0', limb_wl=1., av_fold_axis=True, nelements=50000, buffer=0.2, damp=True, solver='lu' ) s0.fold.foldframe[0].set_model(model) s1_s0_splot = RotationAnglePlotter(s0) s1_s0_splot.add_fold_limb_data() s1_s0_splot.add_fold_limb_curve() s1_s0_splot.add_limb_svariogram() s1_s0_splot.default_titles() s1_s0_splot.fig.delaxes(s1_s0_splot.ax[0][0]) s1_s0_splot.fig.delaxes(s1_s0_splot.ax[0][1]) viewer = LavaVuModelViewer(model) viewer.add_scalar_field(s0, cmap='tab20') viewer.rotation = rotation#[69.11979675292969, 15.704944610595703, 6.00014591217041] viewer.display() viewer.image('images/refolded_folds.jpg') viewer = LavaVuModelViewer(model) viewer.add_isosurface(s0,nslices=10,paint_with=s0,cmap='tab20') # viewer.add_data(s0['feature']) # viewer.add_fold(s0['fold'],locations=s0['support'].barycentre()[::80]) viewer.rotation = rotation viewer.display() ###Output _____no_output_____
15_MC-2/ass15.ipynb
###Markdown task 15.3: estimating Markov chains ###Code clusters_num = 10 matM, inds = vq.kmeans2(matX, k=clusters_num, iter=100, minit='++') fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.scatter(matX[:,1],matX[:,0],matX[:,2], marker='o',alpha=0.05) ax.scatter(matM[:,1],matM[:,0],matM[:,2], marker='^',alpha=1) ax.set_xlabel('X Label') ax.set_ylabel('Y Label') ax.set_zlabel('Z Label') plt.show() def get_dist(x1, x2): return np.sum((x1-x2)**2)**0.5 def get_state_seq(matX, matM): seq = [] seq_idx = [] for x in matX: idx = None state = None state_dist = np.inf for i, m in enumerate(matM): dist = get_dist(x,m) if dist < state_dist: idx = i state = m state_dist = dist seq.append(state) seq_idx.append(idx) return np.array(seq), np.array(seq_idx) def estimate_transisions(seq_idx): """ Single sequence. """ matP = np.zeros((clusters_num,clusters_num)) for s in range(clusters_num): x_s = np.where((seq_idx == s)) for x in x_s[0]: if (x+1) >= seq_idx.shape[0]: continue succ_state = seq_idx[x+1] matP[s,succ_state] += 1 matP /= matP.sum(axis=1) return matP seq, seq_idx = get_state_seq(matX, matM) matP = estimate_transisions(seq_idx) (np.round(matP, 2)) matX = np.loadtxt('../data/q3dm1-path1.csv', delimiter=',') matM, inds = vq.kmeans2(matX, k=clusters_num, iter=100, minit='++') fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.scatter(matX[:,1],matX[:,0],matX[:,2], marker='o',alpha=0.05) ax.scatter(matM[:,1],matM[:,0],matM[:,2], marker='^',alpha=1) ax.set_xlabel('X Label') ax.set_ylabel('Y Label') ax.set_zlabel('Z Label') plt.show() seq, seq_idx = get_state_seq(matX, matM) matP = estimate_transisions(seq_idx) (np.round(matP, 2)) ###Output _____no_output_____
task_ties/SciBert_Faiss_Recommender.ipynb
###Markdown ###Code !pip install faiss-cpu --no-cache -q !pip install --upgrade git+https://github.com/zalandoresearch/flair.git -q !pip install transformers -q import pandas as pd import numpy as np from pathlib import Path, PurePath import faiss from flair.data import Sentence from flair.embeddings import BertEmbeddings,DocumentPoolEmbeddings from transformers import * ###Output _____no_output_____ ###Markdown Similarity Search ###Code # set data paths, this requires local drive to have a folder calld "COVID-19" with the metadata.csv file # returns a string to the local path setup def setup_local_data(): from google.colab import drive drive.mount('/content/drive') drive_path=PurePath('/content/drive/My Drive') input_dir = drive_path/'COVID-19' print(list(Path(input_dir).glob('*'))) return input_dir #read the metadata file into df def read_metadata_csv(input_dir): metadata_path = input_dir/ 'clean_metadata.csv' metadata = pd.read_csv(metadata_path, dtype={'title':str, 'abstract':str}) #set the abstract to the paper title if it is null metadata['abstract'] = metadata['abstract'].fillna(metadata['title']) #remove if abstract is empty or contains only one word metadata = metadata.dropna(subset=['abstract'], axis = 0) metadata['number_tokens'] = metadata['abstract'].apply(lambda x: len(x.split())) metadata = metadata[metadata['number_tokens']>1].reset_index(drop=True) metadata = metadata.drop('Unnamed: 0', axis=1) return metadata # read preprocessed SciBERT embeddings def read_summary_data(input_dir): summary_path = input_dir/'AbstractSummaries' summaries = pd.concat([pd.read_json(f) for f in Path(summary_path).glob('*')]).reset_index(drop=True) return summaries def read_embeddings(input_dir): vector_path = input_dir/'AbstractEmbeddings' embeddings = pd.concat([pd.read_json(f) for f in Path(vector_path).glob('*')]).reset_index(drop=True) return embeddings def get_embeddings(text, model): sentence = Sentence(text) document_embedding = DocumentPoolEmbeddings([model], pooling= 'mean') document_embedding.embed(sentence) # now check out the embedded sentence. return sentence.get_embedding().data.numpy() # read database to be indexed local_dir = setup_local_data() # metadata = read_metadata_csv(local_dir) # summaries = read_summary_data(local_dir) # embeddings = read_embeddings(local_dir) # print(metadata.info()) # print(summaries.info()) # print(embeddings.info()) merged = metadata.merge(summaries, on=['cord_uid','sha']).merge(embeddings, on=['cord_uid','sha']) merged.info() merged[pd.isnull(merged['scibert_emb'])]['summary'] merged[pd.isnull(merged['scibert_emb'])]['abstract'] for row in merged.loc[merged['scibert_emb'].isnull(), 'scibert_emb'].index: try: merged.at[row, 'scibert_emb'] = get_embeddings(merged.iloc[row]['abstract'], emb_model) except RuntimeError: #two articles have very long abstracts that exceeds bert's sequence length limit merged.at[row, 'scibert_emb'] = get_embeddings(merged.iloc[row]['abstract'][:512], emb_model) merged.info() check_lens = [len(lst) for lst in merged['scibert_emb']] print(set(check_lens)) # merged.to_json(local_dir/'metadata_scibert.json') del merged summary_embeddings = pd.read_json(local_dir/'metadata_scibert.json') summary_embeddings.info() ###Output <class 'pandas.core.frame.DataFrame'> Int64Index: 31647 entries, 0 to 31646 Data columns (total 20 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 cord_uid 31647 non-null object 1 sha 31646 non-null object 2 source_x 31647 non-null object 3 title 31647 non-null object 4 doi 31647 non-null object 5 pmcid 16634 non-null object 6 pubmed_id 24729 non-null float64 7 license 31647 non-null object 8 abstract 31647 non-null object 9 publish_time 31647 non-null datetime64[ns] 10 authors 31149 non-null object 11 journal 30525 non-null object 12 Microsoft Academic Paper ID 357 non-null float64 13 WHO #Covidence 490 non-null object 14 has_full_text 31647 non-null bool 15 full_text_file 31647 non-null object 16 url 31465 non-null object 17 number_tokens 31647 non-null int64 18 summary 31647 non-null object 19 scibert_emb 31647 non-null object dtypes: bool(1), datetime64[ns](1), float64(2), int64(1), object(15) memory usage: 4.9+ MB ###Markdown Faiss similarity search ###Code # create a matrix to store abstarct vectors xb = np.array([np.array(lst) for lst in summary_embeddings['scibert_emb']]).astype('float32') print(xb.shape) # assign dimension for the vector space d = xb.shape[1] # create a matrix to store queries # (basically the same matrix since we will use a selected article to find similar) xq = xb.copy() # build the index index = faiss.IndexFlatL2(d) #brute-force L2 index print(index.is_trained) # add vectors to the index index.add(xb) print(index.ntotal) summary_embeddings.iloc[20:30] # get selected article's index from df (not sure how selected article will be saved) # Title: The Incubation Period of Coronavirus Disease 2019 (COVID-19) From Publicly Reported Confirmed Cases: Estimation and Application # ce8609a60724d457d5b5916d57a31dea0ffb831b # Title: Duration of viral detection in throat and rectum of a patient with COVID-19 # 598d3eb737dfa7701ce8c29c86bc9f6589d8a581 # Title: Asymptomatic carrier state, acute respiratory disease, and pneumonia due to severe acute respiratory syndrome coronavirus 2 (SARSCoV-2): Facts and myths # 89a8918f7e3044b89642aaa74defc7381abef482; 1f5c1597a84ed1d4f84c488cd19098a091a3d513 # Title: Stability and inactivation of SARS coronavirus # 8a6f8fe47a3aa58e61e1eee0cba5af0037f38ee4 # Title: Feasibility of controlling COVID-19 outbreaks by isolation of cases and contacts # a14b5655cb13ed64cb8cff7c806a7b58c858b8b7; 43064e9a5b81ad1ac0743c818cda48383c246c95 # Title:The use of masks and respirators to prevent transmission of influenza: a systematic review of the scientific evidence # 963dfcc10563ef66cf366f73640ab9b3e84a9a55 # Title: Dispersal of Respiratory Droplets With Open vs Closed Oxygen Delivery Masks Implications for the Transmission of Severe Acute Respiratory Syndrome # 80ce66bae70e5eb76387b05c25dd486e29c0087c selected_index = summary_embeddings[summary_embeddings['sha'] == '80ce66bae70e5eb76387b05c25dd486e29c0087c'].index.astype(int) # set number of most similar articles to return k = 10 # retrieve selected article's embedding xq = xb[selected_index] # search top k similar articles and return a distance array (D) and an index array (I) # D is L2 distance, to get cosine similarity, normalize xb and xq with faiss.normalize_L2 D, I = index.search(xq, k+1) # actually search k+1 to get k articles additional to self print(I) #the first item (not always first) is the selected article itself print(D) #thus, distance is zero # convert index array to a list I_new = I[:][0].tolist() # locate selected article from the list query_ix = I_new.index(selected_index) # it's not always the first item so safer to save index separately print(query_ix) # remove selected article from the list I_new.remove(selected_index) print(I_new) # use the index to remove selected article's distance to itself D_new = D[:][0].tolist() del D_new[query_ix] print(D_new) I[0] # retrieve info for the top 10 simialr articles top10_similar = summary_embeddings.iloc[I[0]].reset_index(drop=True) #I_new # attach similarity scores top10_similar['distance'] = D[0] #D_new # view results top10_similar top10_similar['title'].tolist() top10_similar.to_csv(local_dir/'FaissOutputs'/'ppe.csv', index=False) ###Output _____no_output_____ ###Markdown K-means clustering ###Code # train kmeans to locate centroids in vector space ncentroids = 10 #assuming there're 10 clusters niter = 20 verbose = True d = xb.shape[1] kmeans = faiss.Kmeans(d, ncentroids, niter=niter, verbose=verbose) kmeans.train(xb) kmeans.centroids.shape #(ncentroids, d) # locate the nearest centroid to each vector (abstract) Dk, Ik = kmeans.index.search(xb, 1) # reverse operation to find 20 nearest articles to the centroids (n=10) index = faiss.IndexFlatL2(d) index.add (xb) D, I = index.search(kmeans.centroids, 20) I.shape for i in range(I.shape[0]): print('Cluster {}:'.format(i+1)) for j, title in enumerate(full_text.loc[I[i],'title']): print(j+1, title) print() ###Output _____no_output_____
.ipynb_checkpoints/Introduction to Python and Jupyter-checkpoint.ipynb
###Markdown Introduction to PythonPython is organised in packages, modules and functions in that hierarchical order. The most important Python packages for scientific calculations are **numpy** and **scipy**. The basic package for generating scientific plots is **matplotlib**.Python packages typically contain several modules, where each module is a separate file with file ending ".py". For instance, scipy contains the module **stats** and matplotlib the module **pyplot**.Each module can contain several functions. A Python program starts with an import of the required modules such as ###Code import numpy as np # import the whole package numpy import matplotlib.pyplot as plt # import the subpacke pyplot from scipy.stats import norm # import the function norm ###Output _____no_output_____ ###Markdown In this workshop we use **jupyter notebooks** which, like IPython, offer *magic commands*. For instance, all our notebooks start with``` python %pylab inline```This populates all numpy and matplotlib.pyplot functions to the main namespace, resulting in a similar environment as for instance in Matlab.* When a function is imported (such as _normal_ above) it can be used by its name.* When a module is imported, then its functions are used by appending a "." and the function's name to the module name``` python np.sqrt()```* When the sought function is located in a sub-module, the "." notation can be used again``` python np.linalg.lstsq()```Functions in Python can have necessary and optional arguments. Optional arguments are defined by keyword, that is their name in the function call.``` python normal.pdf(x, loc = 0, scale = 1)```Keyword arguments are used to define default values for arguments that can be changed by the user. Vectors and matricesThe Python package **numpy** contains several powerful tools for efficient matrix-vector algebra.A vector in Python is a numpy array:``` python import numpy as np vec = np.array( [ 1, 2, 3 ] )```Similarly, a matrix is a two-dimensional numpy array:``` python mat = np.array( [ [1, 2, 3], [1, 4, 9], [1, 8, 27] ] )```Calculations with matrices and vectors are implemented easily:* addition and subtraction```python vec + vec vec - vec```* element-wise multiplication and division```python vec * vec vec / vec```* matrix transpose```python transposed = mat.T```Special vector and matrix algebra are built-in numpy functions* inner product```python np.dot( vec1, vec ) np.dot( mat, vec )```* matrix inverse```python np.linalg.inverse(mat)```* matrix rank```python np.linalg.matrix_rank(mat)```Indexing into a vector* indexing in Python starts at $0$* ranges can be obtained using $:$``` python v = np.array([1,2,3,4,5,6]) v[3] = 4 v[1:5] = np.array([2,3,4,5]) v[:3] = np.array([1,2,3]) v[-1] = 6```* matrices are indexed as``` python mat[2,3] mat[:3,2:4]```* fancy indexing also works``` python v = np.array([2,3,4,5]) w = np.array([11,12,13,14]) w[v>3] = np.array([13,14])```Some special arrays can be generated using built-in numpy functions* array containing all zeros or ones```python mat = np.ones( (100, 10) ) vec = np.zeros(100) complex_mat = np.zeros( (20, 20), dtype=complex )``` PlottingThe **matplotlib.pyplot** package contains several functions for basic plotting tasks. Its syntax is very similar to that of Matlab.``` python import matplotlib.pyplot as plt plt.figure(1) plt.plot(time, signal, label="my signal") plt.legend(loc="upper left") plt.xlabel("x axis") plt.ylabel("y axis") plt.figure(2) plt.hist(signal, bins=100) plt.title("histogram of values in signal")``` EXAMPLE GUM-S1 4.1.1Modelling power $P$ (the measurand) dissipated by a resistor at the temperatur $t$ dependent on potential difference $V$, resistance $R_0$, linear temperature coefficient of resistance $\alpha$ and defined temperature $t_0$:$$ P = \frac{V^2}{R_0[1+\alpha(t-t_0)]} $$ TaskPlot the value of $P$ for a range of temperatures with some chosen value for the other input quantities. ###Code t0 = 20.0 alpha = 0.9 R_0 = 1.3 V = 10 t = np.arange(21, 30, 0.5) # different temperature values ranging from 21 to 30 in steps of 0.5 P = V**2 / (R_0 * (1+alpha*(t-t0))) plt.figure(1) plt.plot(t, P) plt.xlabel("temperature / °C") plt.ylabel("power / W") plt.show() ###Output _____no_output_____
tests/KMeansIndustriaCafeteraExp3D.ipynb
###Markdown Clustering con K-means - Catado de cafe![Imagen](https://mott.social/wp-content/uploads/2019/12/catar-caf%C3%A9-1.jpg)Se tiene un conjunto de datos con los resultados de diferentes catados de múltiples muestras de café. Se desea realizar un agrupamiento de éstas muestras según sus métricas estadísticas. Entre ellas se encuentra la calificación promedio del catador certificado y niveles de sabor: Vainilla, floral, cereral, cocoa, alcohol, fermentado, tostado, oscuro, amargo, entre otros.En este caso se hace uso del algorítmo KMeans, que se explica más adelante. ###Code # Imports import numpy as np import matplotlib.pyplot as plt from sklearn.cluster import KMeans import pandas as pd %matplotlib inline ###Output _____no_output_____ ###Markdown Importando Dataset y visualizando sus Características ###Code cafes = pd.read_csv('../datasets/catacafe.csv',engine='python') cafes.info() ###Output <class 'pandas.core.frame.DataFrame'> RangeIndex: 178 entries, 0 to 177 Data columns (total 14 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Cafe 178 non-null int64 1 Dulce 178 non-null float64 2 Floral 178 non-null float64 3 Especias 178 non-null float64 4 Tostado 178 non-null float64 5 Frutal 178 non-null int64 6 Fermentado 178 non-null float64 7 Vegetal 178 non-null float64 8 Otro 178 non-null float64 9 Cocoa 178 non-null float64 10 Cereal 178 non-null float64 11 Vainilla 178 non-null float64 12 Picante 178 non-null float64 13 Calificacion promedio 178 non-null int64 dtypes: float64(11), int64(3) memory usage: 19.6 KB ###Markdown ![Imagen](https://irp-cdn.multiscreensite.com/12b80734/ejemplo.jpg) Mostrar las primeras filas para una previsualización del orden de los datos ###Code cafes.head() ###Output _____no_output_____ ###Markdown Se puede ver que la columna "Cafe" es el índice numérico de la muestra, pero que ya se ha enumerado con el método de lectura, por ello, se procede a eliminarla. ###Code cafes = cafes.drop(['Cafe'],axis=1) cafes.head() ###Output _____no_output_____ ###Markdown Se obtienen las variables estadísticas de los datos por columna. ###Code cafes.describe() ###Output _____no_output_____ ###Markdown Se normalizan los datos en un rango adecuado y se vuelven a obtener sus métricas ###Code cafes_normalizado = (cafes - cafes.min())/(cafes.max()-cafes.min()) cafes_normalizado.describe() ###Output _____no_output_____ ###Markdown Se puede observar que ahora los datos tienen valores entre cero y uno, max y min. Ahora con este preprocesamiento se tienen datos ordenados, numéricos y normalizados, listos para un agrupamiento óptimo.El método de KMeans presenta gran efectividad y velocidad para datos no tan amplios, sin embargo, su principal debilidad es la selección de parámetros de entrada, entiéndase número de clusters a realizar. Como este valor no se conoce, se debe usar algún método para óptimizar su implementación, en este caso se utilizará el método del codo de Jambú para encontrar un número de clusters óptimo: Obtención del gráfico del Codo de Jambú Se desean que los clusteres sean lo más separados entre sí y que sus elementos sean lo más cercanos entre sí. para ello se utiliza la medida WCSS: suma de los cuadrados de las distancias de cada punto de datos, en todos los grupos a sus respectivos centroides, es decir, es una medida de similitud.La idea es minimizar esta suma. Para ello se obtiene la inercia de cada clustering realizado con KMeans para un cierto número de grupos, desde 1 hasta uno deseado (n+1 en este caso), estos valores obtenidos en cada iteración se almacena en WCSS, donde luego se imprimen en una gráfica para su análisis. ###Code num_clusters = 10 wcss = [] for i in range(1,num_clusters+1): kmeans_model = KMeans(n_clusters=i,max_iter=300) kmeans_model.fit(cafes_normalizado) wcss.append(kmeans_model.inertia_) ## Ahora se grafican los resultados: plt.plot(range(1,num_clusters+1),wcss) plt.title("Codo de Jambú") plt.xlabel("Número de Clusters") plt.ylabel("WCSS") plt.show() ###Output _____no_output_____ ###Markdown Se observa que le número de clusteres óptimo es 3, para este método y este dataset. Ahora se procede a utilizar el método Kmeans con este parametro. Igual que anteriormente, se crea el modelo de clustering y luego se aplica con .fit ###Code agrupamiento = KMeans(n_clusters=3, max_iter=300) agrupamiento.fit(cafes_normalizado) ###Output _____no_output_____ ###Markdown Este método crea un atributo label_ dentro del modelo clustering generado. Se agrega esta calificacion al archivo original del Dataset. Finalmente los datos procesados obtenidos se muestran: ###Code cafes['KMeans_clusters'] = agrupamiento.labels_ cafes.head() ###Output _____no_output_____ ###Markdown Visualización de los clusters GeneradosLos datos tienen múltiples variables que los caracterizan, en este caso se desea visualizar un gráfico lo mayor resumido posible, y en la naturaleza humana se alcanzan a visualizar hasta tres dimensiones.Para efectos didácticos, se mostraran en dos dimensiones ¿Cuales? se seleccionan las variables que mejor caractericen a todos los datos, para ello se hace uso del Análisis de Componentes Principales (PCA) para reducir el número de variables a analizar, en este caso a visualizar.Se hace uso del paquete descomposition de sklearn y se crea un dataframe a partir de estos componentes para graficarlo. ###Code from sklearn.decomposition import PCA pca = PCA(n_components=2) # Dos componentes principales pca_cafes = pca.fit_transform(cafes_normalizado) pca_cafes_df = pd.DataFrame(data= pca_cafes, columns=['Componente_1', 'Componente_2']) pca_names_cafes = pd.concat([pca_cafes_df, cafes[['KMeans_clusters']]], axis=1) # veamos el resultado de los datos procesados: pca_names_cafes ###Output _____no_output_____ ###Markdown Graficar el dataframe procesadoAhora se configura la figura plot a mostrar con estos datos obtenidos ###Code # Configurando la figura plot fig = plt.figure(figsize= (7,7)) # área del gráfico grafico = fig.add_subplot(1,1,1) # Se delimita el área subplot a una sola figura grafico.set_xlabel('Componente 1',fontsize = 12 ) # Etiqueta de eje X y tamaño de letra grafico.set_ylabel('Componente 2',fontsize = 12 ) # Etiqueta de eje Y y tamaño de letra grafico.set_title('Componentes Principales - Clustering Kmeans',fontsize = 20 ) # Setear el título de la figura Colores = np.array(["blue", "orange", "green"]) # Vector de nombres de colores a llamar. Deben haber tantos colores como clusters(etiquetas) generados grafico.scatter(x=pca_names_cafes.Componente_1, y=pca_names_cafes.Componente_2, c=Colores[pca_names_cafes.KMeans_clusters], s=40) #llamado al método de figura de puntos de dispersión. plt.show() # Graficar ###Output _____no_output_____ ###Markdown Guardar los datos generadosSe procede a guardar el dataframe en formato csv: ###Code # Se crea un archivo csv en la carpeta Results cafes.to_csv('../Results/cafe-kmeans.csv') ###Output _____no_output_____ ###Markdown Ejercicios/ Experimentos propuestos 1. Elija un valor aleatorio para el número de clusters a implementar, suponiendo que no conoce el resultado del método Codo de Jambú. ¿Cómo cambia el resultado? ¿Qu+e se nota? 2. ¿Qué sucede al aumentar o disminuir el número de clusters a implementar en la llamada a KMeans? ¿Por qué? 3. ¿Qué sucede con los clusters al aumentar o disminuir significativamente el número de iteraciones máximo (seteado en 300) al usar el metodo del Codo de Jambú y en la llamada a KMeans? ¿Por qué? 4. Explique las ventajas y desventajas que tiene el algoritmo KMeans. Puede investigar diferentes fuentes. 5. Aumente el número de PCA a 3 componentes y grafíquelo en 3 Dimensiones. ¿Qué es lo que cambió y qué se está añadiendo? Experimentos a realizarAumentar las variables PCA a 3, configurando el grafico para mostrarlo. ###Code pca = PCA(n_components=3) pca_cafes = pca.fit_transform(cafes_normalizado) pca_cafes_df = pd.DataFrame(data= pca_cafes, columns=['Componente_1', 'Componente_2','Componente_3']) pca_names_cafes = pd.concat([pca_cafes_df, cafes[['KMeans_clusters']]], axis=1) pca_names_cafes # veamos el resultado de los datos procesados: # Graficamos este nuevo dataframe en 3D #from mpl_toolkits.mplot3d import Axes3D #otra forma de plotearlo, permite interactividad #grafico = Axes3D(fig) fig = plt.figure(figsize= (12,12)) grafico = fig.add_subplot(111, projection='3d') x = pca_names_cafes.Componente_1 y = pca_names_cafes.Componente_2 z = pca_names_cafes.Componente_3 grafico.set_xlabel('Componente 1',fontsize = 12 ) grafico.set_ylabel('Componente 2',fontsize = 12 ) grafico.set_zlabel('Componente 3',fontsize = 12 ) grafico.set_title('3 Componentes principales - Clustering Kmeans',fontsize = 20 ) Colores = np.array(["blue", "orange", "green"]) # Agregamos los puntos en el plano 3D grafico.scatter(x, y, z, c=Colores[pca_names_cafes.KMeans_clusters], marker='o') plt.show() # Como se puede observar, se está agregando la tercer componente más representativa del conjunto de datos, obtenida por medio de PCA, alcanzando la maxima visualizacion posible. # Es posible mover el gráfico 3D en algunos visualizadores y observar mejor la relacion entre los diferentes clusters. ###Output _____no_output_____
t81_558_reference.ipynb
###Markdown T81-558: Applications of Deep Neural Networks**Course Reference*** Instructor: [Jeff Heaton](https://sites.wustl.edu/jeffheaton/), School of Engineering and Applied Science, [Washington University in St. Louis](https://engineering.wustl.edu/Programs/Pages/default.aspx)* For more information visit the [class website](https://sites.wustl.edu/jeffheaton/t81-558/). PandasThis section describes Pandas, which is used to load/access data. For more information, refer to [class 2](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class1_intro_python.ipynb). Primary Imports```import pandas as pdimport numpy as np``` Load a Dataframe from CSV```path = "./data/"filename_read = os.path.join(path,"auto-mpg.csv")df = pd.read_csv(filename_read)``` Write a Dataframe to CSV```df = pd.read_csv(filename_read,na_values=['NA','?'])df = df.reindex(np.random.permutation(df.index))df.to_csv(filename_write,index=False) ``` Shuffle a Dataframe```filename_read = os.path.join(path,"auto-mpg.csv")df = pd.read_csv(filename_read,na_values=['NA','?'])np.random.seed(42) df = df.reindex(np.random.permutation(df.index))df.reset_index(inplace=True, drop=True)``` Access a Row and Column```print("The first car is: {}".format(df['name'].iloc[0]))``` Add Calculated Field```df.insert(1,'weight_kg',(df['weight']*0.45359237).astype(int))``` ZScore```df['mpg'] = zscore(df['mpg'])``` Fill Missing Values```med = df['horsepower'].median()df['horsepower'] = df['horsepower'].fillna(med)``` Create New Dataset with Just Some Columns```col_horsepower = df['horsepower']col_name = df['name']result = pd.concat([col_name,col_horsepower],axis=1)``` Training/Validation Split```mask = np.random.rand(len(df)) < 0.8trainDF = pd.DataFrame(df[mask])validationDF = pd.DataFrame(df[~mask])print("Training DF: {}".format(len(trainDF)))print("Validation DF: {}".format(len(validationDF)))``` KerasThe following imports allow access to Keras. ###Code # Imports from keras.models import Sequential from keras.layers.core import Dense, Activation ###Output Using TensorFlow backend. ###Markdown Keras Feedforward RegressionThe following is a basic Keras feedforward neural network for regression: ###Code input_size = 5 # How many input neurons do you need? model = Sequential() model.add(Dense(25, input_dim=input_size, activation='relu')) # Hidden 1 model.add(Dense(10, activation='relu')) # Hidden 2 model.add(Dense(1)) # Output model.compile(loss='mean_squared_error', optimizer='adam') ###Output _____no_output_____ ###Markdown Ever neural network used in this class will be of type [**Sequential**](https://keras.io/models/sequential/), even the recurrent neural networks.For feedforward, all layers are [**Dense**](https://keras.io/layers/core/). The first **Dense** layer is must specify how many input neurons are needed. Anny additional hidden layers will also be **Dense**, but should not have an **input_dim** specification. Dense has the following parameters:* **units** - A number above that specifies how many neurons on that layer. Above it is 25, 10 and 1.* **input_dim** - ONLY valid on the first Dense layer, how many input neurons you need. * **activation** - For this class should always be **relu**, unless it is the output for a regression network. If this is the output layer of a regression, then units should be 1 and **activation** should be **linear** or omitted (it defaults to **linear**). There are quite a few additional [activations](https://keras.io/activations/) you can use, just not in this course. Relu is the most common.The **compile** function builds the neural network. It has two parameters:* **loss** - How are errors calculated for training optimization. The **mean_squared_error** option should always be used for regression in this course. However, there are other [losses](https://keras.io/losses/) that can be used. * **optimizer** - The optimizer specifies the algorithm that will be used to adjust neural network weights during training. Typically **adam** is a good choice. However, other [optimizers](https://keras.io/optimizers/) are available. Keras Feedforward ClassificationThe following is a basic Keras feedforward neural network for regression: ###Code input_size = 5 # How many input neurons do you need? output_classes = 3 # How many output classes are there? model = Sequential() model.add(Dense(50, input_dim=input_size, activation='relu')) # Hidden 1 model.add(Dense(25, activation='relu')) # Hidden 2 model.add(Dense(output_classes,activation='softmax')) # Output model.compile(loss='categorical_crossentropy', optimizer='adam') ###Output _____no_output_____ ###Markdown T81-558: Applications of Deep Neural Networks**Course Reference*** Instructor: [Jeff Heaton](https://sites.wustl.edu/jeffheaton/), School of Engineering and Applied Science, [Washington University in St. Louis](https://engineering.wustl.edu/Programs/Pages/default.aspx)* For more information visit the [class website](https://sites.wustl.edu/jeffheaton/t81-558/). PandasThis section describes Pandas, which is used to load/access data. For more information, refer to [class 2](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class1_intro_python.ipynb). Primary Imports```import pandas as pdimport numpy as np``` Load a Dataframe from CSV```path = "./data/"filename_read = os.path.join(path,"auto-mpg.csv")df = pd.read_csv(filename_read)``` Write a Dataframe to CSV```df = pd.read_csv(filename_read,na_values=['NA','?'])df = df.reindex(np.random.permutation(df.index))df.to_csv(filename_write,index=False) ``` Shuffle a Dataframe```filename_read = os.path.join(path,"auto-mpg.csv")df = pd.read_csv(filename_read,na_values=['NA','?'])np.random.seed(42) df = df.reindex(np.random.permutation(df.index))df.reset_index(inplace=True, drop=True)``` Access a Row and Column```print("The first car is: {}".format(df['name'].iloc[0]))``` Add Calculated Field```df.insert(1,'weight_kg',(df['weight']*0.45359237).astype(int))``` ZScore```df['mpg'] = zscore(df['mpg'])``` Fill Missing Values```med = df['horsepower'].median()df['horsepower'] = df['horsepower'].fillna(med)``` Create New Dataset with Just Some Columns```col_horsepower = df['horsepower']col_name = df['name']result = pd.concat([col_name,col_horsepower],axis=1)``` Training/Validation Split```mask = np.random.rand(len(df)) < 0.8trainDF = pd.DataFrame(df[mask])validationDF = pd.DataFrame(df[~mask])print("Training DF: {}".format(len(trainDF)))print("Validation DF: {}".format(len(validationDF)))``` KerasThe following imports allow access to Keras. ###Code # Imports from keras.models import Sequential from keras.layers.core import Dense, Activation ###Output Using TensorFlow backend. ###Markdown Keras Feedforward RegressionThe following is a basic Keras feedforward neural network for regression: ###Code input_size = 5 # How many input neurons do you need? model = Sequential() model.add(Dense(25, input_dim=input_size, activation='relu')) # Hidden 1 model.add(Dense(10, activation='relu')) # Hidden 2 model.add(Dense(1)) # Output model.compile(loss='mean_squared_error', optimizer='adam') ###Output _____no_output_____ ###Markdown Ever neural network used in this class will be of type [**Sequential**](https://keras.io/models/sequential/), even the recurrent neural networks.For feedforward, all layers are [**Dense**](https://keras.io/layers/core/). The first **Dense** layer is must specify how many input neurons are needed. Anny additional hidden layers will also be **Dense**, but should not have an **input_dim** specification. Dense has the following parameters:* **units** - A number above that specifies how many neurons on that layer. Above it is 25, 10 and 1.* **input_dim** - ONLY valid on the first Dense layer, how many input neurons you need. * **activation** - For this class should always be **relu**, unless it is the output for a regression network. If this is the output layer of a regression, then units should be 1 and **activation** should be **linear** or omitted (it defaults to **linear**). There are quite a few additional [activations](https://keras.io/activations/) you can use, just not in this course. Relu is the most common.The **compile** function builds the neural network. It has two parameters:* **loss** - How are errors calculated for training optimization. The **mean_squared_error** option should always be used for regression in this course. However, there are other [losses](https://keras.io/losses/) that can be used. * **optimizer** - The optimizer specifies the algorithm that will be used to adjust neural network weights during training. Typically **adam** is a good choice. However, other [optimizers](https://keras.io/optimizers/) are available. Keras Feedforward ClassificationThe following is a basic Keras feedforward neural network for regression: ###Code input_size = 5 # How many input neurons do you need? output_classes = 3 # How many output classes are there? model = Sequential() model.add(Dense(50, input_dim=input_size, activation='relu')) # Hidden 1 model.add(Dense(25, activation='relu')) # Hidden 2 model.add(Dense(output_classes,activation='softmax')) # Output model.compile(loss='categorical_crossentropy', optimizer='adam') ###Output _____no_output_____
fibroblast_joxm/fibroblast_20210908.ipynb
###Markdown Input dataHere we import the AD and DP matrices of informative mtDNA variants detected by MQuad. ###Code mquad_AD = mmread('data/mquad/passed_ad.mtx').tocsc() mquad_DP = mmread('data/mquad/passed_dp.mtx').tocsc() mquad_AD.shape, mquad_DP.shape with open('data/mquad/passed_variant_names.txt') as f: var = f.readlines() var = [x.strip() for x in var] AD_df = pd.DataFrame(mquad_AD.todense(), index=var) DP_df = pd.DataFrame(mquad_DP.todense(), index=var) plt.imshow(AD_df/DP_df, cmap='Greens') AF_df = AD_df/DP_df AF_df = AF_df.fillna(0) ###Output _____no_output_____ ###Markdown Assign clones with vireo (Fig. 3a,b, S4a)Here we assign each cell to a clone using mtDNA variants found by MQuad. After that we compare the assignment concordance with nuclear clone labels. ###Code np.random.seed(42) _model = BinomMixtureVB(n_var=len(mquad_AD.getnnz(axis=1)), n_cell=len(mquad_AD.getnnz(axis=0)), n_donor=3) _model.fit(mquad_AD, mquad_DP, min_iter=30, n_init=300) mquad_modelCA = _model def plot_figure_S4a(ID_prob, model): im = heat_matrix(ID_prob, cmap="Oranges", alpha=0.8, display_value=False, row_sort=True, interpolation = 'none') plt.colorbar(im, fraction=0.046, pad=0.04) plt.title("Assignment probability") plt.xlabel("Clone") plt.ylabel("%d cells" %(model.n_cell)) plt.xticks(range(model.n_donor)) plt.tight_layout() def segment_cmp(cmap): top = cmp.get_cmap(cmap, 200) newcolors = np.vstack((top(np.linspace(0, 0.7, 10)), top(np.linspace(0.7, 1, 90)))) newcmp = ListedColormap(newcolors, name='segBlues') return newcmp def plot_figure_3a(AF, model, cmp): im = heat_matrix(AF, cmap=cmp, alpha=0.8, display_value=False, row_sort=True, interpolation='none', aspect='auto') plt.colorbar(im, fraction=0.046, pad=0.04) plt.title("Mean allelic ratio") plt.xlabel("Clone") plt.ylabel("%d SNPs" %(model.n_var)) plt.xticks(range(model.n_donor)) plt.tight_layout() def plot_figure_3b(ID_prob, ax): clone_id = np.argmax(ID_prob, axis=1) real_df = pd.read_csv('data/cardelino_clones_files.csv') real_lab = [i for i in real_df.V2 if i != 'unassigned'] confusion = confusionMatrix(clone_id, real_lab) #confusion = confusion[["Clone 1", "Clone 2", "Clone 3"]] res = plot_confusionMatrix(confusion, ax) plt.title('MQuad-VireoSNP prediction') plt.ylabel('Predicted clones') plt.xlabel('Nuclear clones') plt.xticks(range(3), confusion.columns) plt.yticks(range(len(confusion)), set(clone_id)) plt.tight_layout() fig = plt.figure(figsize=(8,4)) plt.subplot(1,3,1) ID_prob_sorted = np.array([[i[2], i[0], i[1]]for i in mquad_modelCA.ID_prob]) plot_figure_S4a(ID_prob_sorted, mquad_modelCA) ax = plt.subplot(1,3,2) AF_SNPs = mquad_modelCA.beta_mu #rearrange clones to match fig 3a AF_SNPs_sorted = np.array([[i[2], i[0], i[1]]for i in AF_SNPs]) #plot_figure_3a(AF_SNPs_sorted, mquad_modelCA, segment_cmp('Blues')) plot_figure_3a(AF_SNPs_sorted, mquad_modelCA, segment_cmp('Blues')) #mquad confusion matrix ax = plt.subplot(1,3,3) plot_figure_3b(ID_prob_sorted, ax) ###Output Precision = 0.8484848484848485 Recall = 0.8888888888888888 ###Markdown Gene expression analysis (Fig. 3d, e)The GE analysis was performed in R with DESeq2 (DE_fibroblast.R). Here we simply import dataframe outputs from R to do the plotting. ###Code ## Import gene expression analysis from R from matplotlib.pyplot import Line2D from adjustText import adjust_text import re def plot_figure_3d(ax): de_tab = pd.read_csv('data/de_tab.csv', index_col=0) de_tab_sig = de_tab[de_tab.sig == True] de_tab_not_sig = de_tab[de_tab.sig == False] ax.scatter(de_tab_not_sig.logFC, -np.log10(de_tab_not_sig.PValue), color = 'grey', label = 'N.S.', s=0.5) ax.scatter(de_tab_sig.logFC, -np.log10(de_tab_sig.PValue), color='r', label = 'FDR < 10%', s=0.5) ax.set_xlim(left=-10, right=10) ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) legend_elements = [Line2D([0], [0], marker = 'o', color = 'grey', label = 'N.S.', linestyle = 'None'), Line2D([0], [0], marker = 'o', color = 'r', label = 'FDR < 10%', linestyle = 'None')] ax.legend(handles = legend_elements) ax.text(-0.5, 0, 'Higher in clone1.MT0', ha='right', va='center', bbox = dict(boxstyle="larrow", fc='white', lw=1), fontsize = 6) ax.text(0.5, 0, 'Higher in clone1.MT1', ha='left', va='center', bbox = dict(boxstyle="rarrow", fc='white', lw=1), fontsize = 6) plt.xlabel('logFC') plt.ylabel('-log10(PValue)') texts = [] props = dict(boxstyle='round', alpha = 0.7, facecolor='white') for x,y,s in zip(de_tab_sig[de_tab_sig.lab.notnull()].logFC, -np.log10(de_tab_sig[de_tab_sig.lab.notnull()].PValue), de_tab_sig[de_tab_sig.lab.notnull()].lab): texts.append(plt.text(x,y,s, fontsize = 6, bbox=props)) adjust_text(texts, force_text = 1.4, force_points = 1.3, expand_points=(1.3, 2), expand_text=(1.3, 1.3), arrowprops = dict(arrowstyle="->", color='black', lw=0.5)) plt.tight_layout() def plot_figure_3e(ax): cam_H_pw = pd.read_csv('data/cam_H_pw.csv', index_col=0) cam_H_pw_not_sig = cam_H_pw[cam_H_pw.sig == False] cam_H_pw_sig = cam_H_pw[cam_H_pw.sig == True] #cam_H_pw.head() sns.swarmplot(x = cam_H_pw.Direction, y = -np.log10(cam_H_pw.PValue), hue = cam_H_pw.sig, palette = ['grey', 'r'], ax=ax) ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) texts=[] props = dict(boxstyle='round', alpha = 0.7, facecolor='white') for y,s in zip(-np.log10(cam_H_pw_sig[cam_H_pw_sig.lab.notnull()].PValue), cam_H_pw_sig[cam_H_pw_sig.lab.notnull()].lab): if s != "HALLMARK_MYC_TARGETS_V1": s = re.sub("HALLMARK_", "", s) texts.append(plt.text(0,y,s, fontsize = 6, bbox=props)) adjust_text(texts, force_text = 1.3, force_points = 1.3, expand_points=(1.3, 2), expand_text=(1.3, 1.3), autoalign='y', ha='left',arrowprops = dict(arrowstyle="->", color='black', lw=0.5)) ##separately annotate myc targets to avoid overlapping with legend ax.annotate("MYC_TARGETS_V1", xy=(0,-np.log10(0.000027)), xytext=(0,3.8), bbox=props, fontsize = 6, ha='center', arrowprops = dict(arrowstyle="->", color='black', lw=0.5)) legend_elements = [Line2D([0], [0], marker = 'o', color = 'grey', label = 'N.S.', linestyle = 'None'), Line2D([0], [0], marker = 'o', color = 'r', label = 'FDR < 5%', linestyle = 'None')] ax.legend(handles = legend_elements) plt.xticks(range(2), ['Clone1.MT0', 'Clone1.MT1']) plt.xlabel('Gene set enrichment direction (Higher in)') plt.ylabel('-log10(PValue)') plt.tight_layout() fig = plt.figure(figsize=(8,4)) ax = plt.subplot(1,2,1) plot_figure_3d(ax) ax = plt.subplot(1,2,2) plot_figure_3e(ax) ###Output _____no_output_____ ###Markdown Now we just need to put everything together and leave some space for Fig. 3c, which is added in manually after. ###Code plt.style.use('default') font = {'family' : 'Arial', 'size' : 7} plt.rc('font', **font) cm = 1/2.54 fig = plt.figure(figsize=(18*cm, 13.5*cm), dpi=100) ax1 = plt.subplot(2,3,1) ax1.text(-0.5*cm, 3*cm, 'a', transform = ax1.transAxes, fontsize = 8, fontweight = 'bold', va='bottom', ha='left') plot_figure_3a(AF_SNPs, mquad_modelCA, segment_cmp('Blues')) #mquad confusion matrix ax2 = plt.subplot(2,3,2) ax2.text(-0.5*cm, 3*cm, 'b', transform = ax2.transAxes, fontsize = 8, fontweight = 'bold', va='bottom', ha='left') plot_figure_3b(ID_prob_sorted, ax2) ax3 = plt.subplot(2,3,4) ax3.text(-0.5*cm, 3*cm, 'd', transform = ax3.transAxes, fontsize = 8, fontweight = 'bold', va='bottom', ha='left') plot_figure_3d(ax3) ax4 = plt.subplot(2,3,5) ax4.text(-0.5*cm, 3*cm, 'e', transform = ax4.transAxes, fontsize = 8, fontweight = 'bold', va='bottom', ha='left') plot_figure_3e(ax4) ax5 = plt.subplot(2,3,3) ax5.text(-0.5*cm, 3*cm, 'c', transform = ax5.transAxes, fontsize = 8, fontweight = 'bold', va='bottom', ha='left') ax5.axis('off') plt.tight_layout() plt.savefig('figures/tmp.svg') ###Output Precision = 0.8484848484848485 Recall = 0.8888888888888888 ###Markdown Supplementary Fig. S4 ###Code clone_id = np.argmax(ID_prob_sorted, axis=1) col_idx = np.argsort(clone_id) sorted_AF = AF_df.loc[:, col_idx] clone_pal = ['royalblue', 'darkgreen', 'firebrick'] clone_lut = dict(zip(set(clone_id), clone_pal)) clone_colors = pd.Series(clone_id).map(clone_lut) g = sns.clustermap(sorted_AF, col_cluster = False, row_cluster = True, xticklabels = False, cmap = 'Greens', col_colors = clone_colors, cbar_kws = dict(use_gridspec=False,orientation="horizontal")) g.cax.set_position([.7, 0.85, .2, 0.05]) ## save the clone assignment to pipe into R for DE analysis real_df = pd.read_csv('data/cardelino_clones_files.csv') real_lab = [i for i in real_df.V2 if i != 'unassigned'] d = {'mquad': clone_id, 'cardelino': real_lab} clone_df = pd.DataFrame(d) clone_df['combined'] = clone_df.cardelino + '.MT' + clone_df.mquad.astype(str) clone_df.to_csv('data/clone_id.csv') ## supp cm = 1/2.54 fig = plt.figure(figsize=(5,5)) from mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable ax = plt.subplot2grid((2,2), (0,0), colspan=1) ax.text(-0.6*cm, 3*cm, 'a', transform = ax.transAxes, fontsize = 8, fontweight = 'bold', va='top', ha='right') im = heat_matrix(ID_prob_sorted, cmap="Oranges", alpha=0.8, display_value=False, row_sort=True, interpolation = 'none') plt.colorbar(im, fraction=0.046, pad=0.04) plt.title("Assignment probability") plt.xlabel("Clone") plt.ylabel("%d cells" %(mquad_modelCA.n_cell)) plt.xticks(range(mquad_modelCA.n_donor)) ax = plt.subplot2grid((2,2), (0,1), colspan=1) ax.text(-0.6*cm, 3*cm, 'b', transform = ax.transAxes, fontsize = 8, fontweight = 'bold', va='top', ha='right') mquad = pd.read_csv('data/mquad/BIC_params.csv') x,y,knee_x, knee_y = findKnee(mquad.deltaBIC) plt.plot(x,y) plt.axvline(x=knee_x, linestyle='--', color='black', label='cutoff') plt.legend() ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) plt.xlabel('Cumulative probability') plt.ylabel('\u0394BIC') ax = plt.subplot2grid((2,2), (1,0), colspan=2) ax.text(-0.3*cm, 3*cm, 'c', transform = ax.transAxes, fontsize = 8, fontweight = 'bold', va='top', ha='left') res = ax.imshow(sorted_AF.iloc[g.dendrogram_row.reordered_ind,:], cmap=segment_cmp('Greens'), aspect='auto', interpolation='none') ax.set_yticks(range(len(sorted_AF))) ax.set_yticklabels(sorted_AF.iloc[g.dendrogram_row.reordered_ind,:].index, fontsize = 7) plt.tick_params( axis='x', # changes apply to the x-axis which='both', # both major and minor ticks are affected bottom=False, # ticks along the bottom edge are off top=False, # ticks along the top edge are off labelbottom=False) plt.colorbar(res) divider = make_axes_locatable(ax) top_ax = divider.append_axes("top", size=0.13, pad=0.02, sharex=ax) label_num = [len(clone_id[clone_id == 0]),len(clone_id[clone_id == 1]),len(clone_id[clone_id == 2])] LEFT = 0 iicolor = 0 c_names = ['MT0', 'MT1', 'MT2'] clone_pal = ['royalblue', 'firebrick', 'green'] for num in label_num: top_ax.barh(0,num,left=LEFT,color=clone_pal[iicolor]) top_ax.text(x=LEFT + num/2, y=1, s=c_names[iicolor], va='center', ha='center') top_ax.set_xlim(0,76) top_ax.axis('off') LEFT += num iicolor += 1 plt.tight_layout() plt.savefig('figures/supp.pdf') ###Output _____no_output_____
course2 - ML Data Lifecycle in Production/week1/TFDV_Lab/C2_W1_Lab_1_TFDV_Exercise.ipynb
###Markdown Ungraded Lab: TFDV ExerciseIn this notebook, you will get to practice using [TensorFlow Data Validation (TFDV)](https://cloud.google.com/solutions/machine-learning/analyzing-and-validating-data-at-scale-for-ml-using-tfx), an open-source Python package from the [TensorFlow Extended (TFX)](https://www.tensorflow.org/tfx) ecosystem. TFDV helps to understand, validate, and monitor production machine learning data at scale. It provides insight into some key questions in the data analysis process such as:* What are the underlying statistics of my data?* What does my training dataset look like?* How does my evaluation and serving datasets compare to the training dataset?* How can I find and fix data anomalies?The figure below summarizes the usual TFDV workflow:As shown, you can use TFDV to compute descriptive statistics of the training data and generate a schema. You can then validate new datasets (e.g. the serving dataset from your customers) against this schema to detect and fix anomalies. This helps prevent the different types of skew. That way, you can be confident that your model is training on or predicting data that is consistent with the expected feature types and distribution.This ungraded exercise demonstrates useful functions of TFDV at an introductory level as preparation for this week's graded programming exercise. Specifically, you will:- **Generate and visualize statistics from a dataset**- **Detect and fix anomalies in an evaluation dataset**Let's begin! Package Installation and Imports ###Code import tensorflow as tf import tensorflow_data_validation as tfdv import pandas as pd from sklearn.model_selection import train_test_split from util import add_extra_rows from tensorflow_metadata.proto.v0 import schema_pb2 print('TFDV Version: {}'.format(tfdv.__version__)) print('Tensorflow Version: {}'.format(tf.__version__)) ###Output TFDV Version: 0.24.1 Tensorflow Version: 2.3.1 ###Markdown Download the datasetYou will be working with the [Census Income Dataset](http://archive.ics.uci.edu/ml/datasets/Census+Income), a dataset that can be used to predict if an individual earns more than or less than 50k US Dollars annually. The summary of attribute names with descriptions/expected values is shown below and you can read more about it [in this data description file.](https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.names)* **age**: continuous.* **workclass**: Private, Self-emp-not-inc, Self-emp-inc, Federal-gov, Local-gov, State-gov, Without-pay, Never-worked.* **fnlwgt**: continuous.* **education**: Bachelors, Some-college, 11th, HS-grad, Prof-school, Assoc-acdm, Assoc-voc, 9th, 7th-8th, 12th, Masters, 1st-4th, 10th, Doctorate, 5th-6th, Preschool.* **education-num**: continuous.* **marital-status**: Married-civ-spouse, Divorced, Never-married, Separated, Widowed, Married-spouse-absent, Married-AF-spouse.* **occupation**: Tech-support, Craft-repair, Other-service, Sales, Exec-managerial, Prof-specialty, Handlers-cleaners, Machine-op-inspct, Adm-clerical, Farming-fishing, Transport-moving, Priv-house-serv, Protective-serv, Armed-Forces.* **relationship**: Wife, Own-child, Husband, Not-in-family, Other-relative, Unmarried.* **race**: White, Asian-Pac-Islander, Amer-Indian-Eskimo, Other, Black.* **sex**: Female, Male.* **capital-gain**: continuous.* **capital-loss**: continuous.* **hours-per-week**: continuous.* **native-country**: United-States, Cambodia, England, Puerto-Rico, Canada, Germany, Outlying-US(Guam-USVI-etc), India, Japan, Greece, South, China, Cuba, Iran, Honduras, Philippines, Italy, Poland, Jamaica, Vietnam, Mexico, Portugal, Ireland, France, Dominican-Republic, Laos, Ecuador, Taiwan, Haiti, Columbia, Hungary, Guatemala, Nicaragua, Scotland, Thailand, Yugoslavia, El-Salvador, Trinadad&Tobago, Peru, Hong, Holand-Netherlands.Let's load the dataset and split it into training and evaluation sets. We will not shuffle them for consistent results in this demo notebook but you should otherwise in real projects. ###Code # Read in the training and evaluation datasets df = pd.read_csv('data/adult.data', skipinitialspace=True) # Split the dataset. Do not shuffle for this demo notebook. train_df, eval_df = train_test_split(df, test_size=0.2, shuffle=False) ###Output _____no_output_____ ###Markdown Let's see the first few columns of the train and eval sets. ###Code # Preview the train set train_df.head() # Preview the eval set eval_df.head() ###Output _____no_output_____ ###Markdown From these few columns, you can get a first impression of the data. You will notice that most are strings and integers. There are also columns that are mostly zeroes. In the next sections, you will see how to use TFDV to aggregate and process this information so you can inspect it more easily. Adding extra rowsTo demonstrate how TFDV can detect anomalies later, you will add a few extra rows to the evaluation dataset. These are either malformed or have values that will trigger certain alarms later in this notebook. The code to add these can be seen in the `add_extra_rows()` function of `util.py` found in your Jupyter workspace. You can look at it later and even modify it after you've completed the entire exercise. For now, let's just execute the function and add the rows that we've defined by default. ###Code # add extra rows eval_df = add_extra_rows(eval_df) # preview the added rows eval_df.tail(4) ###Output _____no_output_____ ###Markdown Generate and visualize training dataset statistics You can now compute and visualize the statistics of your training dataset. TFDV accepts three input formats: TensorFlow’s TFRecord, Pandas Dataframe, and CSV file. In this exercise, you will feed in the Pandas Dataframes you generated from the train-test split. You can compute your dataset statistics by using the [`generate_statistics_from_dataframe()`](https://www.tensorflow.org/tfx/data_validation/api_docs/python/tfdv/generate_statistics_from_dataframe) method. Under the hood, it distributes the analysis via [Apache Beam](https://beam.apache.org/) which allows it to scale over large datasets.The results returned by this step for numerical and categorical data are summarized in this table:| Numerical Data | Categorical Data ||:-:|:-:||Count of data records|Count of data records|% of missing data records|% of missing data records||Mean, std, min, max|unique records||% of zero values|Avg string length| ###Code # Generate training dataset statistics train_stats = tfdv.generate_statistics_from_dataframe(train_df) ###Output _____no_output_____ ###Markdown Once you've generated the statistics, you can easily visualize your results with the [`visualize_statistics()`](https://www.tensorflow.org/tfx/data_validation/api_docs/python/tfdv/visualize_statistics) method. This shows a [Facets interface](https://pair-code.github.io/facets/) and is very useful to spot if you have a high amount of missing data or high standard deviation. Run the cell below and explore the different settings in the output interface (e.g. Sort by, Reverse order, Feature search). ###Code # Visualize training dataset statistics tfdv.visualize_statistics(train_stats) ###Output _____no_output_____ ###Markdown Infer data schema Next step is to create a data schema to describe your train set. Simply put, a schema describes standard characteristics of your data such as column data types and expected data value range. The schema is created on a dataset that you consider as reference, and can be reused to validate other incoming datasets.With the computed statistics, TFDV allows you to automatically generate an initial version of the schema using the [`infer_schema()`](https://www.tensorflow.org/tfx/data_validation/api_docs/python/tfdv/infer_schema) method. This returns a Schema [protocol buffer](https://developers.google.com/protocol-buffers) containing the result. As mentioned in the [TFX paper](http://stevenwhang.com/tfx_paper.pdf) (Section 3.3), the results of the schema inference can be summarized as follows:* The expected type of each feature.* The expected presence of each feature, in terms of a minimum count and fraction of examples that must containthe feature.* The expected valency of the feature in each example, i.e.,minimum and maximum number of values.* The expected domain of a feature, i.e., the small universe ofvalues for a string feature, or range for an integer feature.Run the cell below to infer the training dataset schema. ###Code # Infer schema from the computed statistics. schema = tfdv.infer_schema(statistics=train_stats) # Display the inferred schema tfdv.display_schema(schema) ###Output _____no_output_____ ###Markdown Generate and visualize evaluation dataset statistics The next step after generating the schema is to now look at the evaluation dataset. You will begin by computing its statistics then compare it with the training statistics. It is important that the numerical and categorical features of the evaluation data belongs roughly to the same range as the training data. Otherwise, you might have distribution skew that will negatively affect the accuracy of your model.TFDV allows you to generate both the training and evaluation dataset statistics side-by-side. You can use the [`visualize_statistics()`](https://www.tensorflow.org/tfx/data_validation/api_docs/python/tfdv/visualize_statistics) function and pass additional parameters to overlay the statistics from both datasets (referenced as left-hand side and right-hand side statistics). Let's see what these parameters are:- `lhs_statistics`: Required parameter. Expects an instance of `DatasetFeatureStatisticsList `.- `rhs_statistics`: Expects an instance of `DatasetFeatureStatisticsList ` to compare with `lhs_statistics`.- `lhs_name`: Name of the `lhs_statistics` dataset.- `rhs_name`: Name of the `rhs_statistics` dataset. ###Code # Generate evaluation dataset statistics eval_stats = tfdv.generate_statistics_from_dataframe(eval_df) # Compare training with evaluation tfdv.visualize_statistics( lhs_statistics=eval_stats, rhs_statistics=train_stats, lhs_name='EVAL_DATASET', rhs_name='TRAIN_DATASET' ) ###Output _____no_output_____ ###Markdown We encourage you to observe the results generated and toggle the menus to practice manipulating the visualization (e.g. sort by missing/zeroes). You'll notice that TFDV detects the malformed rows we introduced earlier. First, the `min` and `max` values of the `age` row shows `0` and `1000`, respectively. We know that those values do not make sense if we're talking about working adults. Secondly, the `workclass` row in the Categorical Features says that `0.02%` of the data is missing that particular attribute. Let's drop these rows to make the data more clean. ###Code # filter the age range eval_df = eval_df[eval_df['age'] > 16] eval_df = eval_df[eval_df['age'] < 91] # drop missing values eval_df.dropna(inplace=True) ###Output _____no_output_____ ###Markdown You can then compute the statistics again and see the difference in the results. ###Code # Generate evaluation dataset statistics eval_stats = tfdv.generate_statistics_from_dataframe(eval_df) # Compare training with evaluation tfdv.visualize_statistics( lhs_statistics=eval_stats, rhs_statistics=train_stats, lhs_name='EVAL_DATASET', rhs_name='TRAIN_DATASET' ) ###Output _____no_output_____ ###Markdown Calculate and display evaluation anomalies You can use your reference schema to check for anomalies such as new values for a specific feature in the evaluation data. Detected anomalies can either be considered a real error that needs to be cleaned, or depending on your domain knowledge and the specific case, they can be accepted. Let's detect and display evaluation anomalies and see if there are any problems that need to be addressed. ###Code # Check evaluation data for errors by validating the evaluation dataset statistics using the reference schema anomalies = tfdv.validate_statistics(statistics=eval_stats, schema=schema) # Visualize anomalies tfdv.display_anomalies(anomalies) ###Output _____no_output_____ ###Markdown Revising the SchemaAs shown in the results above, TFDV is able to detect the remaining irregularities we introduced earlier. The short and long descriptions tell us what were detected. As expected, there are string values for `race`, `native-country` and `occupation` that are not found in the domain of the training set schema (you might see a different result if the shuffling of the datasets was applied). What you decide to do about the anomalies depend on your domain knowledge of the data. If an anomaly indicates a data error, then the underlying data should be fixed. Otherwise, you can update the schema to include the values in the evaluation dataset.TFDV provides a set of utility methods and parameters that you can use for revising the inferred schema. This [reference](https://www.tensorflow.org/tfx/data_validation/anomalies) lists down the type of anomalies and the parameters that you can edit but we'll focus only on a couple here.- You can relax the minimum fraction of values that must come from the domain of a particular feature (as described by `ENUM_TYPE_UNEXPECTED_STRING_VALUES` in the [reference](https://www.tensorflow.org/tfx/data_validation/anomalies)):```pythontfdv.get_feature(schema, 'feature_column_name').distribution_constraints.min_domain_mass = ```- You can add a new value to the domain of a particular feature:```pythontfdv.get_domain(schema, 'feature_column_name').value.append('string')```Let's use these in the next section. Fix anomalies in the schemaLet's say that we want to accept the string anomalies reported as valid. If you want to tolerate a fraction of missing values from the evaluation dataset, you can do it like this: ###Code # Relax the minimum fraction of values that must come from the domain for the feature `native-country` country_feature = tfdv.get_feature(schema, 'native-country') country_feature.distribution_constraints.min_domain_mass = 0.9 # Relax the minimum fraction of values that must come from the domain for the feature `occupation` occupation_feature = tfdv.get_feature(schema, 'occupation') occupation_feature.distribution_constraints.min_domain_mass = 0.9 ###Output _____no_output_____ ###Markdown If you want to be rigid and instead add only valid values to the domain, you can do it like this: ###Code # Add new value to the domain of the feature `race` race_domain = tfdv.get_domain(schema, 'race') race_domain.value.append('Asian') ###Output _____no_output_____ ###Markdown In addition, you can also restrict the range of a numerical feature. This will let you know of invalid values without having to inspect it visually (e.g. the invalid `age` values earlier). ###Code # Restrict the range of the `age` feature tfdv.set_domain(schema, 'age', schema_pb2.IntDomain(name='age', min=17, max=90)) # Display the modified schema. Notice the `Domain` column of `age`. tfdv.display_schema(schema) ###Output _____no_output_____ ###Markdown With these revisions, running the validation should now show no anomalies. ###Code # Validate eval stats after updating the schema updated_anomalies = tfdv.validate_statistics(eval_stats, schema) tfdv.display_anomalies(updated_anomalies) ###Output _____no_output_____ ###Markdown Examining dataset slicesTFDV also allows you to analyze specific slices of your dataset. This is particularly useful if you want to inspect if a feature type is well-represented in your dataset. Let's walk through an example where we want to compare the statistics for male and female participants. First, you will use the [`get_feature_value_slicer`](https://github.com/tensorflow/data-validation/blob/master/tensorflow_data_validation/utils/slicing_util.pyL48) method from the `slicing_util` to get the features you want to examine. You can specify that by passing a dictionary to the `features` argument. If you want to get the entire domain of a feature, then you can map the feature name with `None` as shown below. This means that you will get slices for both `Male` and `Female` entries. This returns a function that can be used to extract the said feature slice. ###Code from tensorflow_data_validation.utils import slicing_util slice_fn = slicing_util.get_feature_value_slicer(features={'sex': None}) ###Output _____no_output_____ ###Markdown With the slice function ready, you can now generate the statistics. You need to tell TFDV that you need statistics for the features you set and you can do that through the `slice_functions` argument of [`tfdv.StatsOptions`](https://www.tensorflow.org/tfx/data_validation/api_docs/python/tfdv/StatsOptions). Let's prepare that in the cell below. Notice that you also need to pass in the schema. ###Code # Declare stats options slice_stats_options = tfdv.StatsOptions(schema=schema, slice_functions=[slice_fn], infer_type_from_schema=True) ###Output _____no_output_____ ###Markdown You will then pass these options to the `generate_statistics_from_csv()` method. As of writing, generating sliced statistics only works for CSVs so you will need to convert the Pandas dataframe to a CSV. Passing the `slice_stats_options` to `generate_statistics_from_dataframe()` will not produce the expected results. ###Code # Convert dataframe to CSV since `slice_functions` works only with `tfdv.generate_statistics_from_csv` CSV_PATH = 'slice_sample.csv' train_df.to_csv(CSV_PATH) # Calculate statistics for the sliced dataset sliced_stats = tfdv.generate_statistics_from_csv(CSV_PATH, stats_options=slice_stats_options) ###Output _____no_output_____ ###Markdown With that, you now have the statistics for the set slice. These are packed into a `DatasetFeatureStatisticsList` protocol buffer. You can see the dataset names below. The first element in the list (i.e. index=0) is named `All_Examples` which just contains the statistics for the entire dataset. The next two elements (i.e. named `sex_Male` and `sex_Female`) are the datasets that contain the stats for the slices. It is important to note that these datasets are of the type: `DatasetFeatureStatistics`. You will see why this is important after the cell below. ###Code print(f'Datasets generated: {[sliced.name for sliced in sliced_stats.datasets]}') print(f'Type of sliced_stats elements: {type(sliced_stats.datasets[0])}') ###Output Datasets generated: ['All Examples', 'sex_Male', 'sex_Female'] Type of sliced_stats elements: <class 'tensorflow_metadata.proto.v0.statistics_pb2.DatasetFeatureStatistics'> ###Markdown You can then visualize the statistics as before to examine the slices. An important caveat is `visualize_statistics()` accepts a `DatasetFeatureStatisticsList` type instead of `DatasetFeatureStatistics`. Thus, at least for this version of TFDV, you will need to convert it to the correct type. ###Code from tensorflow_metadata.proto.v0.statistics_pb2 import DatasetFeatureStatisticsList # Convert `Male` statistics (index=1) to the correct type and get the dataset name male_stats_list = DatasetFeatureStatisticsList() male_stats_list.datasets.extend([sliced_stats.datasets[1]]) male_stats_name = sliced_stats.datasets[1].name # Convert `Female` statistics (index=2) to the correct type and get the dataset name female_stats_list = DatasetFeatureStatisticsList() female_stats_list.datasets.extend([sliced_stats.datasets[2]]) female_stats_name = sliced_stats.datasets[2].name # Visualize the two slices side by side tfdv.visualize_statistics( lhs_statistics=male_stats_list, rhs_statistics=female_stats_list, lhs_name=male_stats_name, rhs_name=female_stats_name ) ###Output _____no_output_____
chapter_multilayer-perceptrons/underfit-overfit.ipynb
###Markdown Model Selection, Underfitting and Overfitting:label:`sec_model_selection`As machine learning scientists, our goal is to discover *patterns*.But how can we be sure that we have truly discovered a *general* patternand not simply memorized our data. For example, imagine that we wanted to hunt for patterns among genetic markers linking patients to their dementia status,(let the labels be drawn from the set{*dementia*, *mild cognitive impairment*, *healthy*}).Because each person's genes identify them uniquely(ignoring identical siblings),it is possible to memorize the entire dataset.We do not want our model to say *"That's Bob! I remember him! He has dementia!*The reason why is simple. When we deploy the model in the future,we will encounter patientsthat the model has never seen before.Our predictions will only be usefulif our model has truly discovered a *general* pattern.To recapitulate more formally, our goal is to discover patternsthat capture regularities in the underlying populationfrom which our training set was drawn.If we are successful in this endeavor,then we could successfully assess riskeven for individuals that we have never encountered before.This problem---how to discover patterns that *generalize*---isthe fundamental problem of machine learning.The danger is that when we train models,we access just a small sample of data.The largest public image datasets contain roughly one million images.More often, we must learn from only thousands or tens of thousands of data points.In a large hospital system, we might accesshundreds of thousands of medical records.When working with finite samples, we run the riskthat we might discover *apparent* associationsthat turn out not to hold up when we collect more data.The phenomena of fitting our training datamore closely than we fit the underlying distribution is called overfitting, and the techniques used to combat overfitting are called regularization.In the previous sections, you might have observedthis effect while experimenting with the Fashion-MNIST dataset.If you altered the model structure or the hyper-parameters during the experiment, you might have noticed that with enough nodes, layers, and training epochs, the model can eventually reach perfect accuracy on the training set, even as the accuracy on test data deteriorates. Training Error and Generalization ErrorIn order to discuss this phenomenon more formally,we need to differentiate between *training error* and *generalization error*.The training error is the error of our modelas calculated on the training dataset,while generalization error is the expectation of our model's errorwere we to apply it to an infinite stream of additional data pointsdrawn from the same underlying data distribution as our original sample.Problematically, *we can never calculate the generalization error exactly*.That is because the stream of infinite data is an imaginary object.In practice, we must *estimate* the generalization errorby applying our model to an independent test setconstituted of a random selection of data pointsthat were withheld from our training set.The following three thought experimentswill help illustrate this situation better.Consider a college student trying to prepare for her final exam.A diligent student will strive to practice welland test her abilities using exams from previous years.Nonetheless, doing well on past exams is no guaranteethat she will excel when it matters.For instance, the student might try to prepareby rote learning the answers to the exam questions.This requires the student to memorize many things.She might even remember the answers for past exams perfectly.Another student might prepare by trying to understandthe reasons for giving certain answers.In most cases, the latter student will do much better.Likewise, consider a model that simply uses a lookup table to answer questions. If the set of allowable inputs is discrete and reasonably small, then perhaps after viewing *many* training examples, this approach would perform well. Still this model has no ability to do better than random guessing when faced with examples that it has never seen before.In reality the input spaces are far too large to memorize the answers corresponding to every conceivable input. For example, consider the black and white $28\times28$ images. If each pixel can take one among $256$ grayscale values, then there are $256^{784}$ possible images. That means that there are far more low-res grayscale thumbnail-sized images than there are atoms in the universe. Even if we could encounter this data, we could never afford to store the lookup table.Last, consider the problem of tryingto classify the outcomes of coin tosses (class 0: heads, class 1: tails)based on some contextual features that might be available.No matter what algorithm we come up with,the generalization error will always be $\frac{1}{2}$.However, for most algorithms,we should expect our training error to be considerably lower,depending on the luck of the draw,even if we did not have any features!Consider the dataset {0, 1, 1, 1, 0, 1}.Our feature-less algorithm would have to fall back on always predictingthe *majority class*, which appears from our limited sample to be *1*.In this case, the model that always predicts class 1will incur an error of $\frac{1}{3}$,considerably better than our generalization error.As we increase the amount of data,the probability that the fraction of headswill deviate significantly from $\frac{1}{2}$ diminishes,and our training error would come to match the generalization error. Statistical Learning TheorySince generalization is the fundamental problem in machine learning,you might not be surprised to learnthat many mathematicians and theorists have dedicated their livesto developing formal theories to describe this phenomenon.In their [eponymous theorem](https://en.wikipedia.org/wiki/Glivenko%E2%80%93Cantelli_theorem), Glivenko and Cantelliderived the rate at which the training errorconverges to the generalization error.In a series of seminal papers, [Vapnik and Chervonenkis](https://en.wikipedia.org/wiki/Vapnik%E2%80%93Chervonenkis_theory)extended this theory to more general classes of functions.This work laid the foundations of [Statistical Learning Theory](https://en.wikipedia.org/wiki/Statistical_learning_theory).In the *standard supervised learning setting*, which we have addressed up until now and will stick with throughout most of this book,we assume that both the training data and the test dataare drawn *independently* from *identical* distributions(commonly called the i.i.d. assumption).This means that the process that samples our data has no *memory*.The $2^{\mathrm{nd}}$ example drawn and the $3^{\mathrm{rd}}$ drawnare no more correlated than the $2^{\mathrm{nd}}$ and the $2$-millionth sample drawn.Being a good machine learning scientist requires thinking critically,and already you should be poking holes in this assumption,coming up with common cases where the assumption fails.What if we train a mortality risk predictoron data collected from patients at UCSF,and apply it on patients at Massachusetts General Hospital?These distributions are simply not identical.Moreover, draws might be correlated in time.What if we are classifying the topics of Tweets.The news cycle would create temporal dependenciesin the topics being discussed, violating any assumptions of independence.Sometimes we can get away with minor violations of the i.i.d. assumptionand our models will continue to work remarkably well.After all, nearly every real-world applicationinvolves at least some minor violation of the i.i.d. assumption,and yet we have useful tools for face recognition,speech recognition, language translation, etc.Other violations are sure to cause trouble.Imagine, for example, if we try to traina face recognition system by training itexclusively on university studentsand then want to deploy it as a toolfor monitoring geriatrics in a nursing home population.This is unlikely to work well since college studentstend to look considerably different from the elderly.In subsequent chapters and volumes, we will discuss problemsarising from violations of the i.i.d. assumption.For now, even taking the i.i.d. assumption for granted,understanding generalization is a formidable problem.Moreover, elucidating the precise theoretical foundationsthat might explain why deep neural networks generalize as well as they docontinues to vex the greatest minds in learning theory.When we train our models, we attempt to search for a functionthat fits the training data as well as possible.If the function is so flexible that it can catch on to spurious patternsjust as easily as to true associations,then it might perform *too well* without producing a modelthat generalizes well to unseen data.This is precisely what we want to avoid (or at least control).Many of the techniques in deep learning are heuristics and tricksaimed at guarding against overfitting. Model ComplexityWhen we have simple models and abundant data,we expect the generalization error to resemble the training error.When we work with more complex models and fewer examples,we expect the training error to go down but the generalization gap to grow.What precisely constitutes model complexity is a complex matter.Many factors govern whether a model will generalize well.For example a model with more parameters might be considered more complex.A model whose parameters can take a wider range of valuesmight be more complex.Often with neural networks, we think of a modelthat takes more training steps as more complex,and one subject to *early stopping* as less complex.It can be difficult to compare the complexity among membersof substantially different model classes(say a decision tree versus a neural network).For now, a simple rule of thumb is quite useful:A model that can readily explain arbitrary factsis what statisticians view as complex,whereas one that has only a limited expressive powerbut still manages to explain the data wellis probably closer to the truth.In philosophy, this is closely related to Popper’scriterion of [falsifiability](https://en.wikipedia.org/wiki/Falsifiability)of a scientific theory: a theory is good if it fits dataand if there are specific tests that can be used to disprove it.This is important since all statistical estimation is[post hoc](https://en.wikipedia.org/wiki/Post_hoc),i.e., we estimate after we observe the facts,hence vulnerable to the associated fallacy.For now, we will put the philosophy aside and stick to more tangible issues.In this section, to give you some intuition,we’ll focus on a few factors that tendto influence the generalizability of a model class:1. The number of tunable parameters. When the number of tunable parameters, sometimes called the *degrees of freedom*, is large, models tend to be more susceptible to overfitting.1. The values taken by the parameters. When weights can take a wider range of values, models can be more susceptible to overfitting.1. The number of training examples. It’s trivially easy to overfit a dataset containing only one or two examples even if your model is simple. But overfitting a dataset with millions of examples requires an extremely flexible model. Model SelectionIn machine learning, we usually select our final modelafter evaluating several candidate models.This process is called model selection.Sometimes the models subject to comparisonare fundamentally different in nature(say, decision trees vs linear models).At other times, we are comparingmembers of the same class of modelsthat have been trained with different hyperparameter settings.With multilayer perceptrons, for example,we may wish to compare models withdifferent numbers of hidden layers,different numbers of hidden units,and various choices of the activation functionsapplied to each hidden layer.In order to determine the best among our candidate models,we will typically employ a validation set. Validation DatasetIn principle we should not touch our test setuntil after we have chosen all our hyper-parameters.Were we to use the test data in the model selection process,there is a risk that we might overfit the test data.Then we would be in serious trouble.If we overfit our training data,there is always the evaluation on test data to keep us honest.But if we overfit the test data, how would we ever know?Thus, we should never rely on the test data for model selection.And yet we cannot rely solely on the training datafor model selection either becausewe cannot estimate the generalization erroron the very data that we use to train the model.The common practice to address this problemis to split our data three ways,incorporating a *validation set*in addition to the training and test sets.In practical applications, the picture gets muddier.While ideally we would only touch the test data once,to assess the very best model or to comparea small number of models to each other,real-world test data is seldom discarded after just one use.We can seldom afford a new test set for each round of experiments.The result is a murky practice where the boundariesbetween validation and test data are worryingly ambiguous.Unless explicitly stated otherwise, in the experiments in this bookwe are really working with what should rightly be calledtraining data and validation data, with no true test sets.Therefore, the accuracy reported in each experimentis really the validation accuracy and not a true test set accuracy.The good news is that we do not need too much data in the validation set.The uncertainty in our estimates can be shownto be of the order of $\mathcal{O}(n^{-\frac{1}{2}})$. $K$-Fold Cross-ValidationWhen training data is scarce,we might not even be able to afford to hold outenough data to constitute a proper validation set.One popular solution to this problem is to employ$K$*-fold cross-validation*.Here, the original training data is split into $K$ non-overlapping subsets.Then model training and validation are executed $K$ times,each time training on $K-1$ subsets and validatingon a different subset (the one not used for training in that round).Finally, the training and validation error rates are estimatedby averaging over the results from the $K$ experiments. Underfitting or Overfitting?When we compare the training and validation errors,we want to be mindful of two common situations:First, we want to watch out for caseswhen our training error and validation error are both substantialbut there is a little gap between them.If the model is unable to reduce the training error,that could mean that our model is too simple(i.e., insufficiently expressive)to capture the pattern that we are trying to model.Moreover, since the *generalization gap*between our training and validation errors is small,we have reason to believe that we could get away with a more complex model.This phenomenon is known as underfitting.On the other hand, as we discussed above,we want to watch out for the caseswhen our training error is significantly lowerthan our validation error, indicating severe overfitting.Note that overfitting is not always a bad thing.With deep learning especially, it is well knownthat the best predictive models often performfar better on training data than on holdout data.Ultimately, we usually care more about the validation errorthan about the gap between the training and validation errors.Whether we overfit or underfit can dependboth on the complexity of our modeland the size of the available training datasets,two topics that we discuss below. Model ComplexityTo illustrate some classical intuitionabout overfitting and model complexity,we give an example using polynomials.Given training data consisting of a single feature $x$and a corresponding real-valued label $y$,we try to find the polynomial of degree $d$$$\hat{y}= \sum_{i=0}^d x^i w_i$$to estimate the labels $y$.This is just a linear regression problemwhere our features are given by the powers of $x$,the model's weights are given by $w_i$,and the bias is given by $w_0$ since $x^0 = 1$ for all $x$.Since this is just a linear regression problem,we can use the squared error as our loss function.A higher-order polynomial function is more complexthan a lower order polynomial function,since the higher-order polynomial has more parametersand the model function’s selection range is wider.Fixing the training dataset,higher-order polynomial functions should alwaysachieve lower (at worst, equal) training errorrelative to lower degree polynomials.In fact, whenever the data points each have a distinct value of $x$,a polynomial function with degree equal to the number of data pointscan fit the training set perfectly.We visualize the relationship between polynomial degreeand under- vs over-fitting in :numref:`fig_capacity_vs_error`.![Influence of Model Complexity on Underfitting and Overfitting](https://raw.githubusercontent.com/d2l-ai/d2l-en/master/img/capacity_vs_error.svg):label:`fig_capacity_vs_error` Dataset SizeThe other big consideration to bear in mind is the dataset size.Fixing our model, the fewer samples we have in the training dataset,the more likely (and more severely) we are to encounter overfitting.As we increase the amount of training data,the generalization error typically decreases.Moreover, in general, more data never hurts.For a fixed task and data *distribution*,there is typically a relationship between model complexity and dataset size.Given more data, we might profitably attempt to fit a more complex model.Absent sufficient data, simpler models may be difficult to beat.For many tasks, deep learning only outperforms linear modelswhen many thousands of training examples are available.In part, the current success of deep learningowes to the current abundance of massive datasetsdue to Internet companies, cheap storage, connected devices,and the broad digitization of the economy. Polynomial RegressionWe can now explore these concepts interactivelyby fitting polynomials to data.To get started we will import our usual packages. ###Code %mavenRepo snapshots https://oss.sonatype.org/content/repositories/snapshots/ %maven ai.djl:api:0.7.0-SNAPSHOT %maven org.slf4j:slf4j-api:1.7.26 %maven org.slf4j:slf4j-simple:1.7.26 %maven ai.djl.mxnet:mxnet-engine:0.7.0-SNAPSHOT %maven ai.djl.mxnet:mxnet-native-auto:1.7.0-b %%loadFromPOM <dependency> <groupId>tech.tablesaw</groupId> <artifactId>tablesaw-jsplot</artifactId> <version>0.30.4</version> </dependency> %load ../utils/plot-utils.ipynb %load ../utils/DataPoints.java import ai.djl.Device; import ai.djl.Model; import ai.djl.ndarray.NDArray; import ai.djl.ndarray.NDManager; import ai.djl.ndarray.index.NDIndex; import ai.djl.ndarray.types.DataType; import ai.djl.ndarray.types.Shape; import ai.djl.nn.Block; import ai.djl.nn.ParameterList; import ai.djl.nn.SequentialBlock; import ai.djl.nn.core.Linear; import ai.djl.training.DefaultTrainingConfig; import ai.djl.training.Trainer; import ai.djl.training.dataset.ArrayDataset; import ai.djl.training.dataset.Batch; import ai.djl.training.listener.TrainingListener; import ai.djl.training.loss.Loss; import ai.djl.training.optimizer.Optimizer; import ai.djl.training.tracker.Tracker; import ai.djl.util.RandomUtils; import ai.djl.training.EasyTrain; import ai.djl.translate.TranslateException; import org.apache.commons.lang3.ArrayUtils; import java.lang.*; import ai.djl.metric.Metrics; import ai.djl.training.evaluator.Evaluator; import ai.djl.training.listener.EvaluatorTrainingListener; ###Output _____no_output_____ ###Markdown Generating the DatasetFirst we need data. Given $x$, we will use the following cubic polynomial to generate the labels on training and test data:$$y = 5 + 1.2x - 3.4\frac{x^2}{2!} + 5.6 \frac{x^3}{3!} + \epsilon \text{ where }\epsilon \sim \mathcal{N}(0, 0.1).$$The noise term $\epsilon$ obeys a normal distributionwith a mean of 0 and a standard deviation of 0.1.We will synthesize 100 samples each for the training set and test set. ###Code // utility functions used public float facti(float x) { if (x < 0) return 0f; float fact = 1.0f; while(x > 1) { fact = fact * x; x = x - 1; } return fact; } public void swap(NDArray arr, int i, int j) { float tmp = arr.getFloat(i); arr.set(new NDIndex(i), arr.getFloat(j)); arr.set(new NDIndex(j), tmp); } public NDArray shuffle(NDArray arr) { int size = (int) arr.size(); Random rnd = RandomUtils.RANDOM; for (int i = Math.toIntExact(size) - 1; i > 0; --i) { swap(arr, i, rnd.nextInt(i)); } return arr; } public NDArray factorial(NDArray input) { for (int i = 0; i < input.size(); i++) { input.set(new NDIndex(i), facti(input.get(i).getFloat())); } return input; } int maxDegree = 20; // Maximum degree of the polynomial // Training and test dataset sizes int nTrain = 100; int nTest = 100; NDManager manager = NDManager.newBaseManager(); NDArray trueW = manager.zeros(new Shape(maxDegree)); // Allocate lots of empty space NDArray tempArr = manager.create(new float[]{5f, 1.2f, -3.4f, 5.6f}); for (int i = 0; i < tempArr.size(); i++) { trueW.set(new NDIndex(i), tempArr.getFloat(i)); } NDArray features = manager.randomNormal(new Shape(nTrain + nTest, 1)); features = shuffle(features); NDArray polyFeatures = features.pow(manager.arange(maxDegree).reshape(1, -1)); NDArray factorialArr = factorial(manager.arange(maxDegree).add(1.0f).toType(DataType.FLOAT32, false)).reshape(1, -1); polyFeatures = polyFeatures.div(factorialArr); // Shape of `labels`: (`n_train` + `n_test`,) NDArray labels = polyFeatures.dot(trueW); labels = labels.add(manager.randomNormal(0, 0.1f, labels.getShape(), DataType.FLOAT32, Device.defaultDevice())); ###Output _____no_output_____ ###Markdown For optimization, we typically want to avoidvery large values of gradients, losses, etc.This is why the monomials stored in `polyDeatures`are rescaled from $x^i$ to $\frac{1}{i!} x^i$.It allows us to avoid very large values for large exponents $i$.Take a look at the first 2 samples from the generated dataset.The value 1 is technically a feature,namely the constant feature corresponding to the bias. ###Code System.out.println("features: " + features.get(":2")); System.out.println("polyFeatures: " + polyFeatures.get(":2")); System.out.println("labels: " + labels.get(":2")); ###Output _____no_output_____ ###Markdown Training and Testing ModelLet us first implement a function to evaluate the loss on a given data. ###Code int numEpochs = 1000; public ArrayDataset loadArray(NDArray features, NDArray labels, int batchSize, boolean shuffle) { return new ArrayDataset.Builder() .setData(features) // set the features .optLabels(labels) // set the labels .setSampling(batchSize, shuffle) // set the batch size and random sampling .build(); } double[] trainLoss; double[] testLoss; double[] epochCount; trainLoss = new double[numEpochs/50]; testLoss = new double[numEpochs/50]; epochCount = new double[numEpochs/50]; ###Output _____no_output_____ ###Markdown Now define the training function. ###Code NDArray weight = null; public void train(NDArray trainFeatures, NDArray testFeatures, NDArray trainLabels, NDArray testLabels, int nDegree) throws IOException, TranslateException { Loss l2Loss = Loss.l2Loss(); NDManager manager = NDManager.newBaseManager(); Tracker lrt = Tracker.fixed(0.01f); Optimizer sgd = Optimizer.sgd().setLearningRateTracker(lrt).build(); DefaultTrainingConfig config = new DefaultTrainingConfig(l2Loss) .optOptimizer(sgd) // Optimizer (loss function) .addTrainingListeners(TrainingListener.Defaults.logging()); // Logging Model model = Model.newInstance("mlp"); SequentialBlock net = new SequentialBlock(); // Switch off the bias since we already catered for it in the polynomial // features Linear linearBlock = Linear.builder().optBias(false).setUnits(1).build(); net.add(linearBlock); model.setBlock(net); Trainer trainer = model.newTrainer(config); int batchSize = Math.min(10, (int) trainLabels.getShape().get(0)); ArrayDataset trainIter = loadArray(trainFeatures, trainLabels, batchSize, true); ArrayDataset testIter = loadArray(testFeatures, testLabels, batchSize, true); trainer.initialize(new Shape(1, nDegree)); for (int epoch = 1; epoch <= numEpochs; epoch++) { // Iterate over dataset for (Batch batch : trainer.iterateDataset(trainIter)) { // Update loss and evaulator EasyTrain.trainBatch(trainer, batch); // Update parameters trainer.step(); batch.close(); } // reset training and validation evaluators at end of epoch for (Batch batch : trainer.iterateDataset(testIter)) { // Update loss and evaulator EasyTrain.validateBatch(trainer, batch); batch.close(); } trainer.notifyListeners(listener -> listener.onEpoch(trainer)); if (epoch % 50 == 0) { epochCount[epoch/50 - 1] = epoch; trainLoss[epoch/50 - 1] = (float) Math.log10(trainer.getTrainingResult().getEvaluations().get("train_loss")); testLoss[epoch/50 - 1] = (float) Math.log10(trainer.getTrainingResult().getEvaluations().get("validate_loss")); } } Block layer = model.getBlock(); ParameterList params = layer.getParameters(); weight = params.get(0).getValue().getArray(); } ###Output _____no_output_____ ###Markdown Third-Order Polynomial Function Fitting (Normal)We will begin by first using a third-order polynomial functionwith the same order as the data generation function.The results show that this model’s training error ratewhen using the testing dataset is low. The trained model parameters are also closeto the true values $w = [5, 1.2, -3.4, 5.6]$. ###Code // Pick the first four dimensions, i.e., 1, x, x^2/2!, x^3/3! from the // polynomial features int nDegree = 4; train(polyFeatures.get("0:" + nTrain + ", 0:"+nDegree), polyFeatures.get(nTrain + ": , 0:"+nDegree), labels.get(":" + nTrain), labels.get(nTrain + ":"), nDegree); String[] lossLabel = new String[trainLoss.length + testLoss.length]; Arrays.fill(lossLabel, 0, testLoss.length, "test loss"); Arrays.fill(lossLabel, testLoss.length, trainLoss.length + testLoss.length, "train loss"); Table data = Table.create("Data").addColumns( DoubleColumn.create("epochCount", ArrayUtils.addAll(epochCount, epochCount)), DoubleColumn.create("loss", ArrayUtils.addAll(testLoss, trainLoss)), StringColumn.create("lossLabel", lossLabel) ); render(LinePlot.create("", data, "epochCount", "loss", "lossLabel"),"text/html"); ###Output _____no_output_____ ###Markdown Linear Function Fitting (Underfitting)Let’s take another look at linear function fitting.After the decline in the early epoch,it becomes difficult to further decreasethis model’s training error rate.After the last epoch iteration has been completed,the training error rate is still high.When used to fit non-linear patterns(like the third-order polynomial function here)linear models are liable to underfit. ###Code // Pick the first two dimensions, i.e., 1, x, from the polynomial features int nDegree = 2; train(polyFeatures.get("0:" + nTrain + ", 0:"+nDegree), polyFeatures.get(nTrain + ": , 0:"+nDegree), labels.get(":" + nTrain), labels.get(nTrain + ":"), nDegree); String[] lossLabel = new String[trainLoss.length + testLoss.length]; Arrays.fill(lossLabel, 0, testLoss.length, "test loss"); Arrays.fill(lossLabel, testLoss.length, trainLoss.length + testLoss.length, "train loss"); Table data = Table.create("Data").addColumns( DoubleColumn.create("epochCount", ArrayUtils.addAll(epochCount, epochCount)), DoubleColumn.create("loss", ArrayUtils.addAll(testLoss, trainLoss)), StringColumn.create("lossLabel", lossLabel) ); render(LinePlot.create("", data, "epochCount", "loss", "lossLabel"),"text/html"); ###Output _____no_output_____ ###Markdown Insufficient Training (Overfitting)Now let us try to train the modelusing a polynomial of too high degree.Here, there is insufficient data to learn thatthe higher-degree coefficients should have values close to zero.As a result, our overly-complex modelis far too susceptible to being influencedby noise in the training data.Of course, our training error will now be low(even lower than if we had the right model!)but our test error will be high.Try out different model complexities (`nDegree`)and training set sizes (`nSubset`)to gain some intuition of what is happening. ###Code // Pick all the dimensions from the polynomial features int nSubset = 100; int nDegree = 20; train(polyFeatures.get("1:" + nSubset + ", 0:"+ nDegree), polyFeatures.get(nTrain + ": , 0:"+ nDegree), labels.get("1:" + nSubset), labels.get(nTrain + ":"), nDegree); String[] lossLabel = new String[trainLoss.length + testLoss.length]; Arrays.fill(lossLabel, 0, testLoss.length, "test loss"); Arrays.fill(lossLabel, testLoss.length, trainLoss.length + testLoss.length, "train loss"); Table data = Table.create("Data").addColumns( DoubleColumn.create("epochCount", ArrayUtils.addAll(epochCount, epochCount)), DoubleColumn.create("loss", ArrayUtils.addAll(testLoss, trainLoss)), StringColumn.create("lossLabel", lossLabel) ); render(LinePlot.create("", data, "epochCount", "loss", "lossLabel"),"text/html"); ###Output _____no_output_____
courses/python/material/ipynbs/Getting Started with Pandas.ipynb
###Markdown `Pandas` in a nutshell `Pandas` is the `Python` library designed specifically for data analysis. The author of _Python for Data Analysis_, Wes McKinney, began developing `Pandas` in 2008> while at [AQR Capital Management](http://en.wikipedia.org/wiki/AQR_Capital) out of need for a performant, flexible tool to perform quantitative analysis on financial data. Before leaving AQR he was able to convince management to allow him to open source the library.> Another AQR employee, Chang She, joined the effort in 2012 as the second major contributor to the library. Right around that time, the library became popular in the `Python` community, and many more contributors joined the project making it one of the most vital and active data analysis libraries for `Python`. ([Wikipedia](http://en.wikipedia.org/wiki/Pandas_(software))`Pandas` can be thought of the `Python` equivalent of Microsoft Excel. It abstracts the notion of the spreadsheet, allowing the user to use powerful and robust analytical tools generally to automate repeated processes.The twin centerpieces of the `Pandas` library are the `Series` and the `DataFrame`. The `Series` class is, at its core, a one-dimensional `NumPy` array, surrounded by additional information, such as its index. The `DataFrame` is conceptually an _array_ of `Series` classes, each sharing the same index. ###Code from pandas import Series, DataFrame import pandas as pd ###Output _____no_output_____ ###Markdown We will be using Wes McKinney's [GitHub notebook](https://github.com/pydata/pydata-book/blob/master/ch05.ipynb) as a skeleton. He imports the following libraries for later use: ###Code from __future__ import division from numpy.random import randn import numpy as np import os import matplotlib.pyplot as plt np.random.seed(12345) plt.rc('figure', figsize=(10, 6)) from pandas import Series, DataFrame import pandas as pd np.set_printoptions(precision=4) ###Output _____no_output_____ ###Markdown Introduction to pandas data structures Series Consider the following input: ###Code obj = Series([4, 7, -5, 3]) obj ###Output _____no_output_____ ###Markdown We have set the variable `obj` to reference a new `Pandas Series`, which we initialized by giving a `Python` list as input. Notice that `Pandas` automatically interprets the input data as type `int64`, which indicates that it is fairly smart! Also, notice that upon printing `obj` we see _two_ columns. The first column is the _index_ of the `Series` class, which is presently the natural index, `range(4)`. The second column is the input data that we gave initially, which `Pandas` refers to as the _values_ of `obj`. You can access these columns individualy by calling `obj.index` and `obj.values`, respectively. For example: ###Code print obj.index, "\n\n" print obj.values ###Output _____no_output_____ ###Markdown IndexingAs previously mentioned, the natural index simply starts at 0 and increments integers to the size of the list of the input values. Alternatively, we can specify the index explicitly when we initialize the `Series`, as in the following: ###Code obj2 = Series([4, 7, -5, 3], index=['d', 'b', 'a', 'c']) obj2 ###Output _____no_output_____ ###Markdown What happens when you examine the index now? ###Code obj2.index ###Output _____no_output_____ ###Markdown Don't be alarmed by the `u` prefix to each of the index values. In `Python` (as well as in other languages, this simply indicates a [Unicode string](http://stackoverflow.com/questions/2464959/whats-the-u-prefix-in-a-python-string). Ostensibly, there is no difference between normal strings and Unicode strings.You can access a particular member of the `Series` data by specifying its index. For example, ###Code obj2['a'] ###Output _____no_output_____ ###Markdown This allows you to change the value of specific entries in your `Series` data. Additionally, you can call _sub_`Series` by specifying a sublist of the index. ###Code obj2['d'] = 6 obj2[['c', 'a', 'd']] ###Output _____no_output_____ ###Markdown A powerful tool in `Pandas` is the ability to concisely access data meeting `Boolean` qualifications. In the case below, `obj2 > 0` is given as the "index," and the output is the sub`Series` of `obj2` for which all entries are positive. ###Code obj2[obj2 > 0] ###Output _____no_output_____ ###Markdown AsideWhat is `obj2 > 0` actually? ###Code obj2 > 0 ###Output _____no_output_____ ###Markdown This is actually a neat property of `Pandas` which is similar to `NumPy`. In `NumPy`, suppose you are given an array: ###Code arr = np.random.rand(5) * 2.0 - 1.0 ###Output _____no_output_____ ###Markdown The actual array itself is given by ###Code arr ###Output _____no_output_____ ###Markdown The `Boolean` array specifying which elements of `arr` are positive is given by ###Code boolArr = arr > 0.0 boolArr ###Output _____no_output_____ ###Markdown Similarly, you can generate a new `Series` of `Boolean` values by subjecting the original `Series` to a `Boolean` statement, as we did above. BroadcastingLike `NumPy`, we can _broadcast_ arithmetic operations onto `Series` data. For example, ###Code obj2 * 2 ###Output _____no_output_____ ###Markdown returns a `Series` whose values are doubled, and ###Code np.exp(obj2) ###Output _____no_output_____ ###Markdown returns a `Series` whose values have all been subject to the transformation $x\mapsto e^x$. Notice additionally that the `dtype` of `obj2` has automatically been changed from `int64` to `float64`. Again, `Pandas` is being smart! Querying a `Series` In `Python`, there is a binary operator called `in`, which takes two "arguments." The left-hand argument is can be any type of data (or object, we won't get into this), while the right-hand argument is some type of iterable object. Then `in` returns `True` if the left-hand argument is an _element_ of the right-hand argument. Mathematically, this is equivalent to set membership. For example, ###Code odds = [i for i in range(20) if i%2 == 1] print 3 in odds, "|", 2 in odds ###Output _____no_output_____ ###Markdown is equivalent to noting that if $$\text{Odds} = \{ n : 0 \leq n < 20 \text{ and } n \text{ is odd}\}$$ we have that $$3 \in \text{Odds}$$ while $$2 \notin \text{Odds}.$$(In fact, we have already seen `in` in action with `Python`'s `for` loop, which has the form ```Pythonfor element in iterative_object:```indicating that the code should loop through every element `element` that is a member of `iterative_object`.)You can use `in` with `Pandas` `Series` to test that an element is a member of the index of the `Series`. For example, ###Code 'b' in obj2 'e' in obj2 ###Output _____no_output_____ ###Markdown AsideWe have talked about the native `Python` list data type. There is another important native data type in `Python`, called a `dict`, which you can learn about more [here](http://www.codecademy.com/courses/python-beginner-en-pwmb1/2/1?curriculum_id=4f89dab3d788890003000096). `Python` `dict` types are similar to association lists in `Scheme`, in that they require a lookup key in order to access elements.Crucially, `Pandas` can create a `Series` from a `dict` by interpreting the key for each item as its corresponding index value, which is actually quite natural. In this sense, I find that it is useful to think of the relationship between `NumPy` and `Pandas` as akin to the relationship between a list and a `dict`. ###Code sdata = {'Ohio': 35000, 'Texas': 71000, 'Oregon': 16000, 'Utah': 5000} obj3 = Series(sdata) obj3 ###Output _____no_output_____ ###Markdown What happens when you use an existing dataset with a new index, in which there is a new, unfilled index? ###Code states = ['California', 'Ohio', 'Oregon', 'Texas'] obj4 = Series(sdata, index=states) obj4 ###Output _____no_output_____ ###Markdown In this case, `California` is a previously-unused index, which has no corresponding value. Thus, `Pandas` initializes the new `Series` with the value corresponding to `California` set to `NaN` (`Python`-speak for null).The `isnull` method returns a `Series` of `Boolean` values whenever the original `Series` has a null (`NaN`) value. ###Code pd.isnull(obj4) ###Output _____no_output_____ ###Markdown The `notnull` method does the exact opposite! ###Code pd.notnull(obj4) ###Output _____no_output_____ ###Markdown The methods `isnull` and `notnull` are "static" in the sense that they can be called straight from the `pd` module or for a specific `Series` object. ###Code obj4.isnull() ###Output _____no_output_____ ###Markdown Recall the two `Series`, `obj3` and `obj4`: ###Code print "\tobj3:\n",obj3, "\n\n\tobj4:\n", obj4 ###Output _____no_output_____ ###Markdown Arithmetic operations between distinct `Series` objects work conservatively. For data types, `int64 + float64 = float64` to preserve the decimal information. The summed index is the union of the two indices. Consider the following example: ###Code obj3 + obj4 ###Output _____no_output_____ ###Markdown No entry for `California` exists in `obj3`, while no entry for `Utah` exists in `obj4`. `Pandas` interprets `NaN + x = NaN` for all `x`, so the resultant `Series` sets `NaN` for both `California` and `Utah`. We can set some metadata for a `Series`, such as the name of the values column and the name of the index column. ###Code obj4.name = 'population' obj4.index.name = 'state' obj4 ###Output _____no_output_____ ###Markdown You can also completely change the index at any time. This is something we will get into more detail later. ###Code obj.index = ['Bob', 'Steve', 'Jeff', 'Ryan'] obj ###Output _____no_output_____ ###Markdown DataFrame Like we said before, you can think of a `DataFrame` as an _array_ of `Series` objects. Specifically, a `DataFrame` is a two-dimensional array of `Series` objects, all indexed by the same index series. You can also think of a `DataFrame` as a single Microsoft Excel spreadsheet.One way to initialize a `DataFrame` is by giving a `dict` where each key indicates a `Python` list. ###Code data = {'state': ['Ohio', 'Ohio', 'Ohio', 'Nevada', 'Nevada'], 'year': [2000, 2001, 2002, 2001, 2002], 'pop': [1.5, 1.7, 3.6, 2.4, 2.9]} frame = DataFrame(data) frame ###Output _____no_output_____ ###Markdown You can reorder the columns in a new `DataFrame` using the following argument: ###Code DataFrame(data, columns=['year', 'state', 'pop']) ###Output _____no_output_____ ###Markdown Similarly, the `index` optional argument in `DataFrame` allows you to specify the index list. Additionally, adding a `debt` column with no corresponding data in `data` will initialize a column filled with `NaN` entries. ###Code frame2 = DataFrame(data, columns=['year', 'state', 'pop', 'debt'], index=['one', 'two', 'three', 'four', 'five']) frame2 ###Output _____no_output_____ ###Markdown You can access the columns of a `DataFrame` as follows: ###Code frame2.columns ###Output _____no_output_____ ###Markdown You can slice a particular column by specifying its column name. Notice how this returns a `Series`. ###Code frame2['state'] ###Output _____no_output_____ ###Markdown Alternatively, you can slice a column using the following syntax: ###Code frame2.year ###Output _____no_output_____ ###Markdown To slice a row, you can specify an index, which will return a `Series` representing the row at the index. ###Code frame2.ix['three'] ###Output _____no_output_____ ###Markdown Broadcasting works in the natural way that you might expect: ###Code frame2['debt'] = 16.5 frame2 ###Output _____no_output_____ ###Markdown You can also give a particular column a list or `ndarray`, which will then be distributed across the column. ###Code frame2['debt'] = np.arange(5.) frame2 ###Output _____no_output_____ ###Markdown Finally, you can give a column of a `DataFrame` a `Series`. If you specify a `Series` with an index differing from the main `DataFrame`, then the entries of the `DataFrame` will be set to `NaN`. ###Code val = Series([-1.2, -1.5, -1.7], index=['two', 'four', 'five']) frame2['debt'] = val frame2 ###Output _____no_output_____ ###Markdown The point of `Pandas` is there are _numerous_ ways to achieve the same effect, depending on whatever is easiest for the task at hand. Here is another way to add a column: ###Code frame2['eastern'] = frame2.state == 'Ohio' frame2 ###Output _____no_output_____ ###Markdown We can also use `Python`'s `del` function to remove a column: ###Code del frame2['eastern'] frame2.columns ###Output _____no_output_____ ###Markdown One final way to initialize `DataFrame` objects is with nested `dict` objects. ###Code pop = {'Nevada': {2001: 2.4, 2002: 2.9}, 'Ohio': {2000: 1.5, 2001: 1.7, 2002: 3.6}} frame3 = DataFrame(pop) frame3 ###Output _____no_output_____ ###Markdown You can transpose a `DataFrame` if it makes more sense to work with the rows and columns flipped. ###Code frame3.T ###Output _____no_output_____ ###Markdown You can do this transpose operation from the outset by manually specifying the index. ###Code DataFrame(pop, index=[2001, 2002, 2003]) ###Output _____no_output_____ ###Markdown `DataFrame` objects can also be initialized from `dict`s of `Series` objects. ###Code pdata = {'Ohio': frame3['Ohio'][:-1], 'Nevada': frame3['Nevada'][:2]} DataFrame(pdata) frame3.index.name = 'year'; frame3.columns.name = 'state' frame3 ###Output _____no_output_____ ###Markdown If you need to access the underlying `ndarray` from any `DataFrame`, use the `DataFrame.values` field. ###Code frame3.values frame2.values ###Output _____no_output_____ ###Markdown Index objects The `Index` is the "metadata" object for `Series` and `DataFrame` objects. We've seen ways of initializing `Index` objects before, so we will go over some features of these objects. ###Code obj = Series(range(3), index=['a', 'b', 'c']) index = obj.index index ###Output _____no_output_____ ###Markdown `Index` objects can be sliced like arrays. ###Code index[1:] ###Output _____no_output_____ ###Markdown Importantly, `Index` objects are not mutable, so you can't change their values in the natural way: ###Code index[1] = 'd' ###Output _____no_output_____ ###Markdown You can initialize `Index` objects with `NumPy` `ndarray` objects. ###Code index = pd.Index(np.arange(3)) obj2 = Series([1.5, -2.5, 0], index=index) obj2.index is index frame3 print 'Ohio' in frame3.columns, "|", 2003 in frame3.index ###Output _____no_output_____ ###Markdown Essential functionality Now that we are familiar with the basic objects in `Pandas`, we will start working with the mechanics of these objects. Reindexing In the previous section we mentioned that `Index` objects are immutable. Here we will address this issue. ###Code obj = Series([4.5, 7.2, -5.3, 3.6], index=['d', 'b', 'a', 'c']) obj ###Output _____no_output_____ ###Markdown The simplest way to change an `Index` object in an existing `Series` or `DataFrame` is with the `reindex` method. ###Code obj2 = obj.reindex(['a', 'b', 'c', 'd', 'e']) obj2 ###Output _____no_output_____ ###Markdown In the above example, since `"e"` was not in the original `Index`, the corresponding `Series` value is set to `NaN`. If you want to change the default fill value, `reindex` can take an additional parameter, `fill_value`. ###Code obj.reindex(['a', 'b', 'c', 'd', 'e'], fill_value=0) ###Output _____no_output_____ ###Markdown A different approach uses a `method` parameter that attempts to extrapolate existing data into the new `Index`. One such method is `ffill`, which "step-fills" the existing data forward. Alternatively, `bfill` "step-fills" the data backwards. ###Code obj3 = Series(['blue', 'purple', 'yellow'], index=[0, 2, 4]) obj3.reindex(range(6), method='ffill') ###Output _____no_output_____ ###Markdown The `reindex` method works for `DataFrame` objects as well. For `DataFrame` objects, `reindex` can also specify column reindexing. ###Code frame = DataFrame(np.arange(9).reshape((3, 3)), index=['a', 'c', 'd'], columns=['Ohio', 'Texas', 'California']) frame frame.reindex(index=['a', 'b', 'c', 'd'], method='ffill', columns=states) ###Output _____no_output_____ ###Markdown Alternatively, you can use `ix` to achieve the same effect more concisely. ###Code frame.ix[['a', 'b', 'c', 'd'], states] ###Output _____no_output_____ ###Markdown Dropping entries from an axis Suppose you have a `Series` object with data you wish to remove. Using the `drop` method, you can specify an index element to remove. ###Code obj = Series(np.arange(5.), index=['a', 'b', 'c', 'd', 'e']) new_obj = obj.drop('c') new_obj ###Output _____no_output_____ ###Markdown You can also drop a list of index elements at once. ###Code obj.drop(['d', 'c']) ###Output _____no_output_____ ###Markdown The same works for `DataFrame` objects and the `drop` method. ###Code data = DataFrame(np.arange(16).reshape((4, 4)), index=['Ohio', 'Colorado', 'Utah', 'New York'], columns=['one', 'two', 'three', 'four']) data.drop(['Colorado', 'Ohio']) ###Output _____no_output_____ ###Markdown Additionally, `DataFrame.drop()` can remove columns by specifying an `axis` parameter. ###Code data.drop('two', axis=1) data.drop(['two', 'four'], axis=1) ###Output _____no_output_____ ###Markdown Indexing, selection, and filtering In this section we will explore the various techniques available for slicing `Series` and `DataFrame` objects. One the one hand, we can deal with these objects as `dict` structures, accessing elements by requesting their index keys. On the other hand, we can treat these objects as list structures, accessing elements by the order of the index list. ###Code obj = Series(np.arange(4.), index=['a', 'b', 'c', 'd']) obj['b'] obj[1] ###Output _____no_output_____ ###Markdown This flexibility allows you to incorporate all of the previous array slicing that worked for `NumPy ndarray` objects. ###Code obj[2:4] ###Output _____no_output_____ ###Markdown Conversely, you can use a list of `dict` keys to achieve the same end. ###Code obj[['b', 'a', 'd']] ###Output _____no_output_____ ###Markdown Here are some alternative slicing techniques for `Series` objects. ###Code obj[[1, 3]] obj[obj < 2] obj['b':'c'] ###Output _____no_output_____ ###Markdown You can assign values to sub-objects which then reflect on the original object. ###Code obj['b':'c'] = 5 obj ###Output _____no_output_____ ###Markdown The same capabilities are extended to the `DataFrame` objects. The added flexibility is that the same indexing techniques also apply to column slicing as well as index slicing. ###Code data = DataFrame(np.arange(16).reshape((4, 4)), index=['Ohio', 'Colorado', 'Utah', 'New York'], columns=['one', 'two', 'three', 'four']) data data['two'] data[['three', 'one']] ###Output _____no_output_____ ###Markdown The natural slicing will always refer to the index list, not the column list, which is useful to keep in mind.data[:2] ###Code data[data['three'] > 5] ###Output _____no_output_____ ###Markdown Recall that you can generate a corresponding `Boolean` array by subjecting a `DataFrame` to a boolean statement, such as the following: ###Code data < 5 ###Output _____no_output_____ ###Markdown You can use `Boolean` arrays to do simple thresholding to your data. You can isolate entries in your data subject to identical `Boolean` conditions, and manipulate these specific subsets of the data. ###Code data[data < 5] = 0 data ###Output _____no_output_____ ###Markdown The `DataFrame.ix` field gives you even more powerful ways to slice your data. In general, slicing works by providing two arguments, an index and a column specification, and it will then return that particular subset. ###Code data.ix['Colorado', ['two', 'three']] ###Output _____no_output_____ ###Markdown You can overload requests by using a list of index or column elements. Additionally, you may reorder the indices or columns in your subset by permuting the order of the specified elements, so long as they exist in the original `DataFrame`. ###Code data.ix[['Colorado', 'Utah'], [3, 0, 1]] ###Output _____no_output_____ ###Markdown The `ix` approach is very powerful. See if you can work through the mechanics of the next few examples to see just how versatile slicing with `ix` actually is. ###Code data.ix[2] data.ix[:'Utah', 'two'] data.ix[data.three > 5, :3] ###Output _____no_output_____ ###Markdown Arithmetic and data alignment As we mentioned before, we can do arithmetic on `Series` and `DataFrame` objects. ###Code s1 = Series([7.3, -2.5, 3.4, 1.5], index=['a', 'c', 'd', 'e']) s2 = Series([-2.1, 3.6, -1.5, 4, 3.1], index=['a', 'c', 'e', 'f', 'g']) print s1, "\n\n" print s2 ###Output _____no_output_____ ###Markdown Importantly, arithmetic is only performed on elements sharing an index. If either object has an index value that the other does not, the arithmetic operation is undefined, so the resultant object contains an `NaN` element. ###Code s1 + s2 ###Output _____no_output_____ ###Markdown The same holds for `DataFrame` arithmetic, except now it requires that both the index and column of each `DataFrame` object is well-defined. ###Code df1 = DataFrame(np.arange(9.).reshape((3, 3)), columns=list('bcd'), index=['Ohio', 'Texas', 'Colorado']) df2 = DataFrame(np.arange(12.).reshape((4, 3)), columns=list('bde'), index=['Utah', 'Ohio', 'Texas', 'Oregon']) print df1, "\n\n" print df2 df1 + df2 ###Output _____no_output_____ ###Markdown Arithmetic methods with fill values Often `NaN` values are undesirable, as they can cause errors when doing arithmetic operations on the data. ###Code df1 = DataFrame(np.arange(12.).reshape((3, 4)), columns=list('abcd')) df2 = DataFrame(np.arange(20.).reshape((4, 5)), columns=list('abcde')) print df1, "\n\n" print df2 df1 + df2 ###Output _____no_output_____ ###Markdown This can be avoided by using the built-in `DataFrame.add()` method, which takes as parameters a `DataFrame` object _and_ an optional `fill_value` which deals with otherwise `NaN` entries. ###Code df1.add(df2, fill_value=0) ###Output _____no_output_____ ###Markdown In fact, most `DataFrame` organization methods take `fill_value` as a parameter to deal with undefined cases, such as `reindex`. ###Code df1.reindex(columns=df2.columns, fill_value=0) ###Output _____no_output_____ ###Markdown Operations between DataFrame and Series Broadcasting `NumPy` arrays is a very useful technique for performing arithmetic operations concisely. And efficiently, actually. This is because while normal `Python` arithmetic is _interpreted_, `NumPy` arithmetic is based on _compiled_ `C` code, which is much more efficient in general. ###Code arr = np.arange(12.).reshape((3, 4)) arr ###Output _____no_output_____ ###Markdown Normally we think of broadcasting a scalar element onto a one-dimensional array vector. In fact, broadcasting is much more powerful, because you can broadcast an _array_ over a bigger array. ###Code arr[0] arr - arr[0] ###Output _____no_output_____ ###Markdown `DataFrame` and `Series` objects work along similar lines. ###Code frame = DataFrame(np.arange(12.).reshape((4, 3)), columns=list('bde'), index=['Utah', 'Ohio', 'Texas', 'Oregon']) series = frame.ix[0] print frame, "\n\n" print series ###Output _____no_output_____ ###Markdown You can broadcast the values in a `Series` over its parent `DataFrame` as you would with `NumPy ndarrays`. ###Code frame - series ###Output _____no_output_____ ###Markdown Of course, if either a `Series` and `DataFrame` object has index or column values the other does not, the undefined arithmetic simply is sent to `NaN`. (We discussed ways to avoid this issue in the previous sections). ###Code series2 = Series(range(3), index=['b', 'e', 'f']) frame + series2 series3 = frame['d'] print frame, "\n\n" print series3 ###Output _____no_output_____ ###Markdown Using the built-in `DataFrame` arithmetic operations such as `add` or `sub` gives the option to specify the axis (0: index, 1: columns) over which the arithmetic will take place (again, you can use `fill_value` to avoid potential `NaN` values). ###Code frame.sub(series3, axis=0) ###Output _____no_output_____ ###Markdown Function application and mapping One of the most important capabilities of `Series` and `DataFrame` is the ability to apply function transformations to the data. Every `ufunc` defined by `NumPy` can be applied to a `DataFrame` (or `Series`) object. ###Code frame = DataFrame(np.random.randn(4, 3), columns=list('bde'), index=['Utah', 'Ohio', 'Texas', 'Oregon']) frame ###Output _____no_output_____ ###Markdown For example, you can apply a nonnegativity transform by including a built-in `NumPy` absolute value. ###Code np.abs(frame) ###Output _____no_output_____ ###Markdown You can define and apply custom functions in two fashions. One is by using lambdas to construct anonymous functions: ###Code frame.apply(lambda x: x.max() - x.min()) frame.apply(lambda x: x.max() - x.min(), axis=1) ###Output _____no_output_____ ###Markdown Alternatively, you can define your own unary function and simply apply it using the same overall approach. ###Code def f(x): return Series([x.min(), x.max()], index=['min', 'max']) frame.apply(f) ###Output _____no_output_____ ###Markdown For presentations and general readability, it is useful to format decimal or date values into condensed forms, and `Pandas` lets you achieve this by using the `applymap` method for `DataFrame` and `Series` objects. The difference between `apply` and `applymap` is rather subtle and often functionally neglibible, but the idea is that `apply` works on a particular subsets of rows or columns, whereas `applymap` is element-wise. ###Code format = lambda x: '%.2f' % x frame.applymap(format) ###Output _____no_output_____ ###Markdown Alternatively, you can use the built-in `Python` `map` function. ###Code frame['e'].map(format) ###Output _____no_output_____ ###Markdown Exercise:Determine which of `apply` or `map` is computationally more efficient. Sorting and ranking One fundamental problem in data analysis, let alone computer science in general, is sorting data. `Pandas` provides a number of techniques for sorting information in the index, columns, and the actual data itself.The first technique is `sort_index`, which is a method for both `Series` and `DataFrame` objects. For `Series` objects, `sort_index` works as follows: ###Code obj = Series(range(4), index=['d', 'a', 'b', 'c']) obj.sort_index() ###Output _____no_output_____ ###Markdown Since there is only one meaningful index to sort, the labels, `sort_index` is a very intuitive method. I want to point out that `sort_index` does not have side-effects; that is, calling `sort_index` on an object does not actually change the internals of the object itself. Instead, a sorted copy of the original object is produced.The method `sort_index` works similarly with `DataFrame` objects, but now there are two potential axes along which to sort. The default is the `index`, as we see below: ###Code frame = DataFrame(np.arange(8).reshape((2, 4)), index=['three', 'one'], columns=['d', 'a', 'b', 'c']) frame.sort_index() ###Output _____no_output_____ ###Markdown By specifying the axis as a parameter, one can choose the columns instead. (Recall that in `Python` everything begins at 0, so the second axis corresponds to axis number 1). ###Code frame.sort_index(axis=1) ###Output _____no_output_____ ###Markdown The `sort_index` method also allows you to flip the ordering by specifying the `ascending` parameter. ###Code frame.sort_index(axis=1, ascending=False) ###Output _____no_output_____ ###Markdown If you want to sort the elements themselves, as opposed to the index, `Pandas` provides the `order` method for `Series` objects. ###Code obj = Series([4, 7, -3, 2]) obj.order() ###Output _____no_output_____ ###Markdown By default, `NaN` values are placed at the end upon sorting the `Series`. ###Code obj = Series([4, np.nan, 7, np.nan, -3, 2]) obj.order() ###Output _____no_output_____ ###Markdown For `DataFrame` objects you can specify the index or column you wish to sort. Additionally, if your data set is properly constructed, you can sort by two columns or indices, as the below example exhibits: ###Code frame = DataFrame({'b': [4, 7, -3, 2], 'a': [0, 1, 0, 1]}) frame.sort_index(by='b') frame.sort_index(by=['a', 'b']) ###Output _____no_output_____ ###Markdown Axis indexes with duplicate values It is possible for a `Series` or `DataFrame` object not to have a unique index. For example: ###Code obj = Series(range(5), index=['a', 'a', 'b', 'b', 'c']) obj ###Output _____no_output_____ ###Markdown `Pandas` has a field for the index of any object to indicate whether or ot the index is unique (no duplicate indices). ###Code obj.index.is_unique ###Output _____no_output_____ ###Markdown If an index is not unique, then slicing the object for a repeated index returns a sub-object. For example: ###Code obj['a'] ###Output _____no_output_____ ###Markdown For unique index items, the default return-type is a scalar: ###Code obj['c'] ###Output _____no_output_____ ###Markdown The same goes for `DataFrame` objects, although they are more complicated. ###Code df = DataFrame(np.random.randn(4, 3), index=['a', 'a', 'b', 'b']) df ###Output _____no_output_____ ###Markdown Remember that you have to slice the index using `ix`, and you will observe the same behavior. ###Code df.ix['b'] ###Output _____no_output_____ ###Markdown Summarizing and computing descriptive statistics Oftentimes, you need a quick way to come up with basic summary statistics of data sets. The solution that `Pandas` provides is incredibly robust, especially with regard to `NaN` entries. ###Code df = DataFrame([[1.4, np.nan], [7.1, -4.5], [np.nan, np.nan], [0.75, -1.3]], index=['a', 'b', 'c', 'd'], columns=['one', 'two']) df ###Output _____no_output_____ ###Markdown By default, the `sum` method will skip `NaN` entries for each column in a `DataFrame`. ###Code df.sum() ###Output _____no_output_____ ###Markdown For the `DataFrame` object, you can also apply along either the index axis or the column axis. Again, `sum` will skip over `NaN` elements when arriving at a value. ###Code df.sum(axis=1) ###Output _____no_output_____ ###Markdown If you don't want this behavior, you can always tell the statistics function you are applying not to skip the `NaN` entries. Here is an example using `mean`: ###Code df.mean(axis=1, skipna=False) ###Output _____no_output_____ ###Markdown Another useful statistic is `idxmax`, which returns the index of the maximum value of a column in a `DataFrame`. ###Code df.idxmax() ###Output _____no_output_____ ###Markdown One incredibly useful method is `cumsum`, which has a number of important applications in the analysis of probability distributions and random walks. ###Code df.cumsum() ###Output _____no_output_____ ###Markdown You can also get a quick overview of all of the summary statistics of a `DataFrame` simply by calling the `describe` method. ###Code df.describe() ###Output _____no_output_____ ###Markdown Descriptive and Summary Statistics|Method | Description ||-------|-------------|| `count` | Number of non-`NaN` values || `describe` | Compute set of summary statistics for Series or each `DataFrame` column || `min`, `max` | Compute minimum and maximum values || `argmin`, `argmax` | Compute index locations for minimum and maximum values || `idmin`, `idmax` | Compute index values for minimum and maximum values || `quantile` | Compute sample quantile ranging from 0 to 1 || `sum` | Sum of values || `mean` | Mean of values || `median` | Arithmetic median of values || `mad` | Mean absolute deviation from mean value || `var` | Sample variance of values || `std` | Sample standard deviation of values || `skew` | Sample skewness (3rd moment) of values || `kurt` | Sample kurtosis (4th moment) of values || `cumsum` | Cumulative sum of values || `cummin`, `cummax` | Cumulative min and max of values || `cumprod` | cumulative product of values || `diff` | Compute 1st arithmetic difference (useful for time series || `pct_change` | Compute percent changes | `Series` objects also have a `describe` method. The `describe` method outputs statistics based on the `dtype` of the underlying object. In the above example, `df` had a `dtype` of `float64`, so `describe` produced information pertinent to floating-point numerics. In the below example, the `Series` object has a `dtype` of `object`, which results in different summary statistics. ###Code obj = Series(['a', 'a', 'b', 'c'] * 4) obj.describe() ###Output _____no_output_____ ###Markdown Correlation and covariance One common problem in data analysis, especially in the analysis of time series data like historical prices for financial securities, is correlation and covariance analysis. To this end `Pandas` has a number of features to make the analysis simple. Here is one example, using a built-in data aggregator using [Yahoo! Finance](http://finance.yahoo.com/) in the `Pandas` API. Returns on a stock are defined as the percent change in the stock's closing value from day-to-day. ###Code import pandas.io.data as web all_data = {} for ticker in ['AAPL', 'IBM', 'MSFT', 'CSCO']: all_data[ticker] = web.get_data_yahoo(ticker) price = DataFrame({tic: data['Adj Close'] for tic, data in all_data.iteritems()}) volume = DataFrame({tic: data['Volume'] for tic, data in all_data.iteritems()}) returns = price.pct_change() returns.tail() ###Output _____no_output_____ ###Markdown When given a `Series` object, the `corr` method computes the scalar correlation between the `Series` and another `Series`. ###Code returns.MSFT.corr(returns.IBM) ###Output _____no_output_____ ###Markdown By contrast, `corr` and `cov` returns a correlation and covariance matrix `DataFrame` with filled correlation and covariance values, respectively. ###Code returns.MSFT.cov(returns.IBM) returns.corr() returns.cov() ###Output _____no_output_____ ###Markdown The `corrwith` method computes pairwise correlations and stores the resultant in a `Series`. Note that the correlation between IBM and IBM is 1. ###Code returns.corrwith(returns.IBM) ###Output _____no_output_____ ###Markdown Passing a `DataFrame` instead computes correlation with like-columns. ###Code returns.corrwith(volume) ###Output _____no_output_____ ###Markdown Unique values, value counts, and membership Given data with repeats, you can eliminate the excess by using the `unique` method. ###Code obj = Series(['c', 'a', 'd', 'a', 'a', 'b', 'b', 'c', 'c']) uniques = obj.unique() uniques ###Output _____no_output_____ ###Markdown The `value_counts` returns a `Series` with an index made up of the unique entries in the original `Series`, and the new entries give the total appearances of each value. ###Code obj.value_counts() ###Output _____no_output_____ ###Markdown You can perform set-membership operations to, for example, construct masks which you can then apply to your original data. ###Code mask = obj.isin(['b', 'c']) # This forms a Series object of Boolean values obj[mask] ###Output _____no_output_____ ###Markdown For `DataFrame` objects, you can apply the `value_counts` method to each subseries, producing a new `DataFrame` of frequency statistics. ###Code data = DataFrame({'Qu1': [1, 3, 4, 3, 4], 'Qu2': [2, 3, 1, 2, 3], 'Qu3': [1, 5, 2, 4, 4]}) data.apply(pd.value_counts).fillna(0) ###Output _____no_output_____ ###Markdown Handling missing data One of the primary problems with data analysis is the prevalence of missing data. In many cases, arithmetic operations, summary statistics, and other functions require that your data be intact in order to provide meaningful results. `Pandas` gives a number of functions to address the problem of missing data, allowing you to filter it out easily. Consider this `Series` of `string` values. ###Code string_data = Series(['aardvark', 'artichoke', np.nan, 'avocado']) string_data ###Output _____no_output_____ ###Markdown The `isnull` method identifies every `NaN` entry. Alternatively,`notnull` will identify every non-`NaN` entry. ###Code string_data.isnull() string_data[0] = None string_data.notnull() ###Output _____no_output_____ ###Markdown Filtering out missing data A simple way to remove missing entries from a `Series` object is to use `dropna`. ###Code from numpy import nan as NA data = Series([1, NA, 3.5, NA, 7]) data.dropna() ###Output _____no_output_____ ###Markdown Alternatively, you can use `Boolean` `Series` and `notnull` to mask the original data. ###Code data[data.notnull()] ###Output _____no_output_____ ###Markdown `DataFrame` objects are trickier. For example, how should `Pandas` handle a mostly-complete row? The correct answer is ambiguous. By default, `dropna` will eliminate _any_ row with a `NaN` (we redefined `NaN` to `NA` here) value. ###Code data = DataFrame([[1., 6.5, 3.], [1., NA, NA], [NA, NA, NA], [NA, 6.5, 3.]]) cleaned = data.dropna() data cleaned ###Output _____no_output_____ ###Markdown Alternatively, you can require that a row be eliminated only if it is _completely_ empty. ###Code data.dropna(how='all') ###Output _____no_output_____ ###Markdown You can also specify columns for deletion. Again, you can change the deletion requirements as needed. ###Code data[4] = NA # fill a column entirely with NA data.dropna(axis=1, how='all') ###Output _____no_output_____ ###Markdown The `dropna` method is very robust. You can also specify a minimum threshold of data in a particular row as a criterion for deletion. In the next example, we threshold at 2 entries per row, allowing rows with one `NaN` value to stay while deleting any more patchy rows. ###Code df = DataFrame(np.random.randn(7, 3)) df.ix[:4, 1] = NA; df.ix[:2, 2] = NA df df.dropna(thresh=2) ###Output _____no_output_____ ###Markdown Filling in missing data Instead of eliminating missing data outright, `Pandas` lets you fill in the missing values. The simple approach, using `fillna`, is to pass a value that will then replace every `NaN` entry. ###Code df.fillna(0) ###Output _____no_output_____ ###Markdown Alternatively, you can specify different fill values in different columns by giving a `dict` with keys of column names. ###Code df.fillna({1: 0.5, 2: -1}) ###Output _____no_output_____ ###Markdown Using the `inplace` argument, you can overwrite the original `DataFrame` object. ###Code # always returns a reference to the filled object _ = df.fillna(0, inplace=True) df ###Output _____no_output_____ ###Markdown The other main filling technique is to fill by procedure. `ffill` will copy the previous value in a column into the `NaN` entry. ###Code df = DataFrame(np.random.randn(6, 3)) df.ix[2::2, 1] = NA; df.ix[4:, 2] = NA print df, "\n\n" print df.fillna(method='ffill') ###Output _____no_output_____ ###Markdown In cases where you don't want this to extend indefinitely, you can limit the fill method to a certain number of `NaN` entries after the last available one. ###Code df.fillna(method='ffill', limit=1) ###Output _____no_output_____ ###Markdown Hierarchical indexing From "Python for Data Analysis":> _Hierarchical indexing_ is an important feature of pandas enabling you to have multiple (two or more) index _levels_ on an axis. Somewhat abstractly, it provides a way for you to work with higher dimensional data in a lower dimensional form. Let's start with a simple example; create a `Series` with a list of lists or arrays as the index: ###Code data = Series(np.random.randn(10), index=[['a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'd', 'd'], [1, 2, 3, 1, 2, 3, 1, 2, 2, 3]]) data ###Output _____no_output_____ ###Markdown Notice how the index `a` corresponds to the sub-indices `1` and `2`, and their corresponding data. The `index` object is thus not a simple list but a series of lists corresponding to the inner sub-indices. ###Code data.index ###Output _____no_output_____ ###Markdown Accessing an outer label will give you the sub-`Series` that it corresponds to. ###Code print data['b'], "\n\n" print data['b':'c'] ###Output _____no_output_____ ###Markdown You can access sub-indices, which returns the `Series` of all upper indices and their corresponding values. ###Code data[:, 2] ###Output _____no_output_____ ###Markdown You can use `unstack` to take the multi-index and place it into a `DataFrame` object. ###Code data.unstack() ###Output _____no_output_____ ###Markdown The inverse of `unstack` is `stack`. Observe: ###Code data.unstack().stack() ###Output _____no_output_____ ###Markdown Multi-indexing has a similar logic with `DataFrame` objects, but it becomes more complicated as both the index and the columns can be given a hierarchy: ###Code frame = DataFrame(np.arange(12).reshape((4, 3)), index=[['a', 'a', 'b', 'b'], [1, 2, 1, 2]], columns=[['Ohio', 'Ohio', 'Colorado'], ['Green', 'Red', 'Green']]) frame ###Output _____no_output_____ ###Markdown For clarity, let's rename the labels so we know what level we are looking at. ###Code frame.index.names = ['key1', 'key2'] frame.columns.names = ['state', 'color'] frame ###Output _____no_output_____ ###Markdown Now by specifying any column, whether on the top level or any sublevel, you can get the `DataFrame` of values corresponding to the name. ###Code frame['Ohio'] ###Output _____no_output_____ ###Markdown `MultiIndex` objects are independent in `Pandas`, meaning that you can create them without a corresponding `DataFrame` and reuse them as needed. ###Code from pandas import MultiIndex MultiIndex.from_arrays([['Ohio', 'Ohio', 'Colorado'], ['Green', 'Red', 'Green']], names=['state', 'color']) ###Output _____no_output_____ ###Markdown Reordering and sorting levels You can always swap indices on the same level. For example, if you want `key2` and `key1` to switch, you can write ###Code frame.swaplevel('key1', 'key2') ###Output _____no_output_____ ###Markdown Additionally, you can sort a particular index (in general, you can't sort them all). Specify the index by its order (first is 0, second is 2), and you will see the sort take place: ###Code frame.sortlevel(1) ###Output _____no_output_____ ###Markdown As with the object-oriented paradigm, you can combine these actions into one statement. For example: ###Code frame.swaplevel(0, 1).sortlevel(0) ###Output _____no_output_____ ###Markdown Summary statistics by level With hierarchical indexing, you can specify the level and axis with which to compute summary statistics. If one wants to compute the sum of all values in the `key2` index, you get the relevant sub-`DataFrame`. ###Code frame.sum(level='key2') ###Output _____no_output_____ ###Markdown This of course gets extended to the columns as well, which you have grown accustomed to with `DataFrame` methods. ###Code frame.sum(level='color', axis=1) ###Output _____no_output_____ ###Markdown Using a DataFrame's columns In the examples above we showed how to `stack` and `unstack` `Series` objects into `DataFrames`. But in general `DataFrame` objects give you a lot of discretion regarding which columns you want to convert into indices. ###Code frame = DataFrame({'a': range(7), 'b': range(7, 0, -1), 'c': ['one', 'one', 'one', 'two', 'two', 'two', 'two'], 'd': [0, 1, 2, 0, 1, 2, 3]}) frame ###Output _____no_output_____ ###Markdown You can overload `set_index` with more than one column to produce a hierarchical index using the values of each respective column. ###Code frame2 = frame.set_index(['c', 'd']) frame2 ###Output _____no_output_____ ###Markdown Crucially, the default `Pandas` behavior is to remove the indexed columns. You can force `Pandas` to keep the old columns by specifying the `drop` parameter: ###Code frame.set_index(['c', 'd'], drop=False) frame2.reset_index() ###Output _____no_output_____
AAAI/Learnability/CIN/older/ds1/synthetic_type0_MLP2_m_1000.ipynb
###Markdown Generate dataset ###Code np.random.seed(12) y = np.random.randint(0,3,500) idx= [] for i in range(3): print(i,sum(y==i)) idx.append(y==i) x = np.zeros((500,)) np.random.seed(12) x[idx[0]] = np.random.uniform(low =-1,high =0,size= sum(idx[0])) x[idx[1]] = np.random.uniform(low =0,high =1,size= sum(idx[1])) x[idx[2]] = np.random.uniform(low =2,high =3,size= sum(idx[2])) x[idx[0]][0], x[idx[2]][5] print(x.shape,y.shape) idx= [] for i in range(3): idx.append(y==i) for i in range(3): y= np.zeros(x[idx[i]].shape[0]) plt.scatter(x[idx[i]],y,label="class_"+str(i)) plt.legend() bg_idx = [ np.where(idx[2] == True)[0]] bg_idx = np.concatenate(bg_idx, axis = 0) bg_idx.shape np.unique(bg_idx).shape x = x - np.mean(x[bg_idx], axis = 0, keepdims = True) np.mean(x[bg_idx], axis = 0, keepdims = True), np.mean(x, axis = 0, keepdims = True) x = x/np.std(x[bg_idx], axis = 0, keepdims = True) np.std(x[bg_idx], axis = 0, keepdims = True), np.std(x, axis = 0, keepdims = True) for i in range(3): y= np.zeros(x[idx[i]].shape[0]) plt.scatter(x[idx[i]],y,label="class_"+str(i)) plt.legend() foreground_classes = {'class_0','class_1' } background_classes = {'class_2'} fg_class = np.random.randint(0,2) fg_idx = np.random.randint(0,m) a = [] for i in range(m): if i == fg_idx: b = np.random.choice(np.where(idx[fg_class]==True)[0],size=1) a.append(x[b]) print("foreground "+str(fg_class)+" present at " + str(fg_idx)) else: bg_class = np.random.randint(2,3) b = np.random.choice(np.where(idx[bg_class]==True)[0],size=1) a.append(x[b]) print("background "+str(bg_class)+" present at " + str(i)) a = np.concatenate(a,axis=0) print(a.shape) print(fg_class , fg_idx) a.shape np.reshape(a,(m,1)) desired_num = 2000 mosaic_list_of_images =[] mosaic_label = [] fore_idx=[] for j in range(desired_num): np.random.seed(j) fg_class = np.random.randint(0,2) fg_idx = np.random.randint(0,m) a = [] for i in range(m): if i == fg_idx: b = np.random.choice(np.where(idx[fg_class]==True)[0],size=1) a.append(x[b]) # print("foreground "+str(fg_class)+" present at " + str(fg_idx)) else: bg_class = np.random.randint(2,3) b = np.random.choice(np.where(idx[bg_class]==True)[0],size=1) a.append(x[b]) # print("background "+str(bg_class)+" present at " + str(i)) a = np.concatenate(a,axis=0) mosaic_list_of_images.append(np.reshape(a,(m,1))) mosaic_label.append(fg_class) fore_idx.append(fg_idx) mosaic_list_of_images = np.concatenate(mosaic_list_of_images,axis=1).T mosaic_list_of_images.shape mosaic_list_of_images.shape, mosaic_list_of_images[0] for j in range(m): print(mosaic_list_of_images[0][j]) mosaic_list_of_images[0:2], mosaic_list_of_images[1000:1002] 0.2*-2.32955771 + 0.2*0.86577398 + 0.2*0.79067386 + 0.2*0.65150581 + 0.2*0.79065145 np.zeros(5) def create_avg_image_from_mosaic_dataset(mosaic_dataset,labels,foreground_index,dataset_number, m): """ mosaic_dataset : mosaic_dataset contains 9 images 32 x 32 each as 1 data point labels : mosaic_dataset labels foreground_index : contains list of indexes where foreground image is present so that using this we can take weighted average dataset_number : will help us to tell what ratio of foreground image to be taken. for eg: if it is "j" then fg_image_ratio = j/9 , bg_image_ratio = (9-j)/8*9 """ avg_image_dataset = [] cnt = 0 counter = np.zeros(m) for i in range(len(mosaic_dataset)): img = torch.zeros([1], dtype=torch.float64) np.random.seed(int(dataset_number*10000 + i)) give_pref = foreground_index[i] #np.random.randint(0,9) # print("outside", give_pref,foreground_index[i]) for j in range(m): if j == give_pref: img = img + mosaic_dataset[i][j]*dataset_number/m #2 is data dim else : img = img + mosaic_dataset[i][j]*(m-dataset_number)/((m-1)*m) if give_pref == foreground_index[i] : # print("equal are", give_pref,foreground_index[i]) cnt += 1 counter[give_pref] += 1 else : counter[give_pref] += 1 avg_image_dataset.append(img) print("number of correct averaging happened for dataset "+str(dataset_number)+" is "+str(cnt)) print("the averaging are done as ", counter) return avg_image_dataset , labels , foreground_index avg_image_dataset_1 , labels_1, fg_index_1 = create_avg_image_from_mosaic_dataset(mosaic_list_of_images[0:1000], mosaic_label[0:1000], fore_idx[0:1000] , 1, m) test_dataset , labels , fg_index = create_avg_image_from_mosaic_dataset(mosaic_list_of_images[1000:2000], mosaic_label[1000:2000], fore_idx[1000:2000] , m, m) avg_image_dataset_1 = torch.stack(avg_image_dataset_1, axis = 0) # mean = torch.mean(avg_image_dataset_1, keepdims= True, axis = 0) # std = torch.std(avg_image_dataset_1, keepdims= True, axis = 0) # avg_image_dataset_1 = (avg_image_dataset_1 - mean) / std # print(torch.mean(avg_image_dataset_1, keepdims= True, axis = 0)) # print(torch.std(avg_image_dataset_1, keepdims= True, axis = 0)) # print("=="*40) test_dataset = torch.stack(test_dataset, axis = 0) # mean = torch.mean(test_dataset, keepdims= True, axis = 0) # std = torch.std(test_dataset, keepdims= True, axis = 0) # test_dataset = (test_dataset - mean) / std # print(torch.mean(test_dataset, keepdims= True, axis = 0)) # print(torch.std(test_dataset, keepdims= True, axis = 0)) # print("=="*40) x1 = (avg_image_dataset_1).numpy() y1 = np.array(labels_1) # idx1 = [] # for i in range(3): # idx1.append(y1 == i) # for i in range(3): # z = np.zeros(x1[idx1[i]].shape[0]) # plt.scatter(x1[idx1[i]],z,label="class_"+str(i)) # plt.legend() plt.scatter(x1[y1==0], y1[y1==0]*0, label='class 0') plt.scatter(x1[y1==1], y1[y1==1]*0, label='class 1') # plt.scatter(x1[y1==2], y1[y1==2]*0, label='class 2') plt.legend() plt.title("dataset1 CIN with alpha = 1/"+str(m)) x1 = (avg_image_dataset_1).numpy() y1 = np.array(labels_1) idx_1 = y1==0 idx_2 = np.where(idx_1==True)[0] idx_3 = np.where(idx_1==False)[0] color = ['#1F77B4','orange', 'brown'] true_point = len(idx_2) plt.scatter(x1[idx_2[:25]], y1[idx_2[:25]]*0, label='class 0', c= color[0], marker='o') plt.scatter(x1[idx_3[:25]], y1[idx_3[:25]]*0, label='class 1', c= color[1], marker='o') plt.scatter(x1[idx_3[50:75]], y1[idx_3[50:75]]*0, c= color[1], marker='o') plt.scatter(x1[idx_2[50:75]], y1[idx_2[50:75]]*0, c= color[0], marker='o') plt.legend() plt.xticks( fontsize=14, fontweight = 'bold') plt.yticks( fontsize=14, fontweight = 'bold') plt.xlabel("X", fontsize=14, fontweight = 'bold') # plt.savefig(fp_cin+"ds1_alpha_04.png", bbox_inches="tight") # plt.savefig(fp_cin+"ds1_alpha_04.pdf", bbox_inches="tight") avg_image_dataset_1[0:10] x1 = (test_dataset).numpy()/m y1 = np.array(labels) # idx1 = [] # for i in range(3): # idx1.append(y1 == i) # for i in range(3): # z = np.zeros(x1[idx1[i]].shape[0]) # plt.scatter(x1[idx1[i]],z,label="class_"+str(i)) # plt.legend() plt.scatter(x1[y1==0], y1[y1==0]*0, label='class 0') plt.scatter(x1[y1==1], y1[y1==1]*0, label='class 1') # plt.scatter(x1[y1==2], y1[y1==2]*0, label='class 2') plt.legend() plt.title("test dataset1 ") test_dataset.numpy()[0:10]/m test_dataset = test_dataset/m test_dataset.numpy()[0:10] class MosaicDataset(Dataset): """MosaicDataset dataset.""" def __init__(self, mosaic_list_of_images, mosaic_label): """ Args: csv_file (string): Path to the csv file with annotations. root_dir (string): Directory with all the images. transform (callable, optional): Optional transform to be applied on a sample. """ self.mosaic = mosaic_list_of_images self.label = mosaic_label #self.fore_idx = fore_idx def __len__(self): return len(self.label) def __getitem__(self, idx): return self.mosaic[idx] , self.label[idx] #, self.fore_idx[idx] avg_image_dataset_1[0].shape, avg_image_dataset_1[0] batch = 200 traindata_1 = MosaicDataset(avg_image_dataset_1, labels_1 ) trainloader_1 = DataLoader( traindata_1 , batch_size= batch ,shuffle=True) testdata_1 = MosaicDataset(test_dataset, labels ) testloader_1 = DataLoader( testdata_1 , batch_size= batch ,shuffle=False) class Whatnet(nn.Module): def __init__(self): super(Whatnet,self).__init__() self.linear1 = nn.Linear(1,50) self.linear2 = nn.Linear(50,10) self.linear3 = nn.Linear(10,2) torch.nn.init.xavier_normal_(self.linear1.weight) torch.nn.init.zeros_(self.linear1.bias) torch.nn.init.xavier_normal_(self.linear2.weight) torch.nn.init.zeros_(self.linear2.bias) torch.nn.init.xavier_normal_(self.linear3.weight) torch.nn.init.zeros_(self.linear3.bias) def forward(self,x): x = F.relu(self.linear1(x)) x = F.relu(self.linear2(x)) x = (self.linear3(x)) return x def calculate_loss(dataloader,model,criter): model.eval() r_loss = 0 with torch.no_grad(): for i, data in enumerate(dataloader, 0): inputs, labels = data inputs, labels = inputs.to("cuda"),labels.to("cuda") outputs = model(inputs) loss = criter(outputs, labels) r_loss += loss.item() return r_loss/i def test_all(number, testloader,net): correct = 0 total = 0 out = [] pred = [] with torch.no_grad(): for data in testloader: images, labels = data images, labels = images.to("cuda"),labels.to("cuda") out.append(labels.cpu().numpy()) outputs= net(images) _, predicted = torch.max(outputs.data, 1) pred.append(predicted.cpu().numpy()) total += labels.size(0) correct += (predicted == labels).sum().item() pred = np.concatenate(pred, axis = 0) out = np.concatenate(out, axis = 0) print("unique out: ", np.unique(out), "unique pred: ", np.unique(pred) ) print("correct: ", correct, "total ", total) print('Accuracy of the network on the 1000 test dataset %d: %.2f %%' % (number , 100 * correct / total)) def train_all(trainloader, ds_number, testloader_list): print("--"*40) print("training on data set ", ds_number) torch.manual_seed(12) net = Whatnet().double() net = net.to("cuda") criterion_net = nn.CrossEntropyLoss() optimizer_net = optim.Adam(net.parameters(), lr=0.001 ) #, momentum=0.9) acti = [] loss_curi = [] epochs = 1500 running_loss = calculate_loss(trainloader,net,criterion_net) loss_curi.append(running_loss) print('epoch: [%d ] loss: %.3f' %(0,running_loss)) for epoch in range(epochs): # loop over the dataset multiple times ep_lossi = [] running_loss = 0.0 net.train() for i, data in enumerate(trainloader, 0): # get the inputs inputs, labels = data inputs, labels = inputs.to("cuda"),labels.to("cuda") # zero the parameter gradients optimizer_net.zero_grad() # forward + backward + optimize outputs = net(inputs) loss = criterion_net(outputs, labels) # print statistics running_loss += loss.item() loss.backward() optimizer_net.step() running_loss = calculate_loss(trainloader,net,criterion_net) if(epoch%200 == 0): print('epoch: [%d] loss: %.3f' %(epoch + 1,running_loss)) loss_curi.append(running_loss) #loss per epoch if running_loss<=0.05: print('epoch: [%d] loss: %.3f' %(epoch + 1,running_loss)) break print('Finished Training') correct = 0 total = 0 with torch.no_grad(): for data in trainloader: images, labels = data images, labels = images.to("cuda"), labels.to("cuda") outputs = net(images) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() print('Accuracy of the network on the 1000 train images: %.2f %%' % ( 100 * correct / total)) for i, j in enumerate(testloader_list): test_all(i+1, j,net) print("--"*40) return loss_curi, net train_loss_all=[] testloader_list= [ testloader_1 ] loss, net = train_all(trainloader_1, 1, testloader_list) train_loss_all.append(loss) net.linear1.weight, net.linear1.bias %matplotlib inline for i,j in enumerate(train_loss_all): plt.plot(j,label ="dataset "+str(i+1)) plt.xlabel("Epochs") plt.ylabel("Training_loss") plt.legend(loc='center left', bbox_to_anchor=(1, 0.5)) ###Output _____no_output_____