path
stringlengths
7
265
concatenated_notebook
stringlengths
46
17M
Convert Color to GrayScale/CtoBGresults.ipynb
###Markdown Convert Colored Images to Gray Scale Images and analyze the results for time taken ###Code import cv2 import shutil import os import time from matplotlib import pyplot as plt ###Output _____no_output_____ ###Markdown Generate Images 100,200,300 ###Code for i in range(5): os.mkdir(f"{i+1}KImages") for j in range(100*(i+1)): shutil.copy("./FolderWithImages/check.jpg",dst=f"{i+1}KImages/copy{j}.jpg") ###Output _____no_output_____ ###Markdown Convert Images to Gray Scale and Note the time taken ###Code data = {} for i in range(5): start = time.time() images = os.listdir(f'{i+1}KImages') for image in images: check = cv2.imread(f'./{i+1}KImages/{image}') gray = cv2.cvtColor(check,cv2.COLOR_BGR2GRAY) os.remove(f'./{i+1}KImages/{image}') cv2.imwrite(f'./{i+1}KImages/{image}',gray) end = time.time() data[f'{i+1}KImages'] = end - start print(data) ###Output {'1KImages': 1.9955856800079346, '2KImages': 3.9975745677948, '3KImages': 5.86980938911438, '4KImages': 8.362041473388672, '5KImages': 9.881592512130737} ###Markdown Plot the results ###Code plt.plot(list(data.keys()),list(data.values())) plt.title("Time Taken to Convert Images to GrayScale") plt.xlabel("Number of Files(K)") plt.ylabel("Time Taken") plt.show() ###Output _____no_output_____
notebooks/preprocessing/amino_acid_embedding.ipynb
###Markdown Drop non numeric columns ###Code data = data.drop(["3-letter", "AminoAcid"], axis =1 ) data = data.drop(["Pc", "Residue mass"], axis =1 ) #data = data.drop(["3-letter", "AminoAcid", "Chou-Fasman code for helix propensity","Chou-Fasman code for sheet propensity"], axis=1) data.abs().max(axis=0) std = data.std(ddof=0, axis=0) std data_std = data.div(std, axis=1) data_std data_std["Hyd"] = data_std["Hyd"] * (-1) transposed = data_std.transpose() transposed["0"] = [0, 0, 0, 0, 0, 0, 0, 7/1.724522] data_std = transposed.transpose() #data = pd.merge(left=data, right=generated, how="inner", left_index=True, right_index=True) data_std.sort_index() data = data_std delta_0_center = (data.max(axis=0)+data.min(axis=0))/2 - np.abs(np.random.normal(scale=0.0001, size=data.shape[1])) data = (data.subtract(delta_0_center, axis=1))#.div(data.std(ddof=0, axis=0), axis=1)) # data = (data.subtract(data.mean(axis=0), axis=1).div(data.std(ddof=0, axis=0), axis=1)) data = data.div(data.abs().max(axis=0)+np.abs(np.random.normal(scale=0.04, size=data.shape[1])), axis=1) data = data + np.random.normal(scale=0.0001, size=data.shape) data.max(), data.min() data # transposed = data.transpose() # transposed[0] = -1 + np.abs(np.random.normal(scale=0.001, size=data.shape[1])) # final = transposed.transpose() final=data.sort_index() final final = final.fillna(value=0.00000000000000001) def calculate_variation(data, col): one_column = data[[col]] one_column = one_column.append(pd.DataFrame([-0.999, 0.999], index = ["N_0", "N_1"], columns=[col])) one_column[col+"_min"] =one_column.sort_values(col).rolling(window=2).apply(lambda x: (x[0] - x[1])/2, raw=True) one_column[col +"_max"] =one_column.sort_values(col, ascending=False)[col].rolling(window=2).apply(lambda x: (x[0] - x[1])/2, raw=True) one_column = one_column.drop(["N_0", "N_1"], axis=0) return list(zip(one_column[col+"_min"].values.tolist(), one_column[col+"_max"].values.tolist())) #return one_column col = "Lip" one_column = final[[col]] one_column = one_column.append(pd.DataFrame([-0.999, 0.999], index = ["N_0", "N_1"], columns=[col])) one_column[col+"_min"] =one_column.sort_values(col).rolling(window=2).apply(lambda x: (x[0] - x[1])/2, raw=True) one_column[col +"_max"] =one_column.sort_values(col, ascending=False)[col].rolling(window=2).apply(lambda x: (x[0] - x[1])/2, raw=True) one_column = one_column.drop(["N_0", "N_1"], axis=0) one_column final.shape variation = [] for c in final.columns: variation.append(calculate_variation(final, c)) np.array(variation).shape np.array(variation).shape final.sum(axis=1) final_values = final.values final_values np.save("../data/protein/hand_crafted_embeddings_8.npy", final_values) np.save("../data/protein/embeddings_variation_8.npy", variation) variation[0][0] variation[0][0][0] + variation[0][0][1] abs(variation[0][0][0] - variation[0][0][1])/1.95 variation[0][0][0] + variation[0][0][1] hist, bin_edges = np.histogram(final.values) hist, bin_edges np.max(final.values) np.min(final.values) import tensorflow as tf tf.enable_eager_execution() embeddings = tf.constant([[ 0. , 0. , 0. ], [-0.78874256, 0.43069857, -0.47393251], [-0.68765258, 0.19845299, -0.64110991], [-0.19540625, -0.46831037, -0.43458175], [ 0.39009043, -0.46831037, -0.14043892], [-0.4217928 , 0.31515591, 0.5829058 ], [-1. , 0.43069857, -0.93603945], [ 0.088746 , -0.32018371, 0.23223556], [ 0.49063027, 0.43069857, 0.32813161], [ 0.97943816, -0.1715736 , 0.46384033], [ 0.74644982, 0.43069857, 0.26766763], [ 0.45239487, 0.1340585 , 0.05631338], [-0.14891861, -0.26652163, -0.23952655], [-0.58491215, 0.43069857, 0.03943025], [ 0.3517175 , -0.26652163, 0.0522144 ], [ 0.68015679, -1. , 0.360837 ], [-0.70594505, -0.15329949, -0.60368124], [ 0.93336313, 0.00826686, -0.29583345], [-0.2963587 , 0.43069857, 0.16728669], [ 0.12175498, 0.01397148, 1. ], [-0.40501324, -0.13937636, 0.21428112]]) embeddings acid_embeddings = tf.get_variable("acid_embeddings", shape=[21, 3], initializer=tf.constant_initializer(final[:, :3]), trainable=False) real_x = tf.nn.embedding_lookup(acid_embeddings, [0,1,2,3,4,5,6]) real_x emb_distances = tf.matmul( tf.nn.l2_normalize(acid_embeddings, axis=1), tf.nn.l2_normalize(real_x, axis=1), transpose_b=True) emb_distances tf.argmax(emb_distances, axis=0) tf.nn.l2_normalize(acid_embeddings, axis=1) #[-0.78874254, 0.43069857, -0.4739325 ] a = [[1,2,3],[50,60,70],[800, 900,1000], [-1, -2, -3]] a = tf.expand_dims(a, axis=0) a = tf.expand_dims(a, axis=3) tf.squeeze(a) x = tf.image.resize_nearest_neighbor(a, [4, 3 * 2]) tf.squeeze(x) ###Output _____no_output_____
ARIMA/daily-exchange-rates.ipynb
###Markdown Autoregressive Integrated Moving Average (ARIMA)The ARIMA model is a generalisation of an ARMA model that can be applied to non-stationary time series. ###Code %matplotlib inline import matplotlib import matplotlib.pyplot as plt import numpy as np import pandas as pd from time import time import statsmodels.api as sm from statsmodels.tsa.seasonal import seasonal_decompose from statsmodels.tsa.stattools import adfuller matplotlib.rcParams['figure.figsize'] = (16, 9) pd.options.display.max_columns = 999 ###Output _____no_output_____ ###Markdown Load Dataset ###Code df = pd.read_csv('../_datasets/daily-exchange-rates.csv', parse_dates=[0], index_col='DateTime') print(df.shape) df.head() ###Output (7588, 8) ###Markdown Define ParametersMake predictions for four-week period using a training period of one year. ###Code dataset_name = 'Daily Exchange Rates' dataset_abbr = 'DER' model_name = 'ARIMA' context_length = 52*5 # 52 weeks prediction_length = 4*5 # four weeks ###Output _____no_output_____ ###Markdown Define Error MetricThe mean absolute scaled error (MASE) will be used to evaluate the forecasts. ###Code def calc_MASE(training_series, testing_series, prediction_series): a = training_series.iloc[1:].values b = training_series.iloc[:-1].values d = np.sum(np.abs(a-b)) / len(a) errors = np.abs(testing_series - prediction_series) return np.mean(errors) / d ###Output _____no_output_____ ###Markdown Example ARIMA ModelExploration of how ARIMA models work using a single example time series. ###Code ts_ex = 'ts8' df_ex = df.loc[:, ts_ex] # Plot data from two months df_ex.iloc[:4*5*2].plot(); ###Output _____no_output_____ ###Markdown Time Series DecompositionDecompose the example time series into trend, seasonal, and residual components. ###Code fig = seasonal_decompose(df_ex, model='additive').plot() ###Output _____no_output_____ ###Markdown Plot ACF and PACFThe Autocorrelation Function (ACF) is the correlation of a signal with a delayed copy of itself as a function of delay.The Partial Autocorrelation Function (PACF) is the partial correlation of a signal with a delayed copy of itself, controlling for the values of the time series at all shorter delays, as a function of delay. ###Code fig, ax = plt.subplots(2) ax[0] = sm.graphics.tsa.plot_acf(df_ex, lags=50, ax=ax[0]) ax[1] = sm.graphics.tsa.plot_pacf(df_ex, lags=50, ax=ax[1]) ###Output _____no_output_____ ###Markdown There is no seasonality. Build ModelGrid search will be implemented to identify optimal parameters for the ARIMA(p, d, q) model, using the following possible values: ###Code from itertools import product ps = range(0, 5) # Up to 4 AR terms ds = range(0, 2) # Either no or first differencing qs = range(0, 5) # Up to 4 MA terms params = product(ps, ds, qs) params_list = list(params) print("Number of parameter combinations for grid search: {}".format(len(params_list))) def optimiseARIMA(time_series, params_list=params_list, test_length=prediction_length, train_length=context_length): ts = time_series.iloc[-(test_length+train_length):] ts_train = ts.iloc[:-test_length] ts_test = ts.iloc[-test_length:] # Select the best model using a holdout validation period val_length = test_length ts_train_val = ts.iloc[:-(test_length+val_length)] ts_val = ts.iloc[-(test_length+val_length):-test_length] results = [] for params in params_list: p = params[0] d = params[1] q = params[2] # try/except loop in case model fails to converge for given parameters try: arima = sm.tsa.ARIMA(ts_train_val, order=(p, d, q)).fit() except: continue # Make predictions for validation holdout set and update best model if necessary val_pred = arima.predict(start=ts_val.index[0], end=ts_val.index[-1], dynamic=True) MASE = calc_MASE(ts_train, ts_val, val_pred) results.append([params, MASE]) df_results = pd.DataFrame(results) df_results.columns = ['parameters', 'MASE'] df_results = df_results.sort_values(by='MASE', ascending=True).reset_index(drop=True) # Retrain model with best parameters using all training data and generate test forecast # Use loop to fall back to next best model in case training fails using full dataset trained = False model_rank = 1 while not trained: train_params = df_results.iloc[model_rank-1, 0] try: arima = sm.tsa.ARIMA(ts_train, order=train_params).fit() trained = True except: model_rank += 1 summary = arima.summary() # Start index must be greater than q. Fill missing initial entries with first actual prediction fcst = arima.predict(start=ts_train.index[train_params[2]+1], end=ts_test.index[-1]) first_pred = fcst[0] fcst = np.concatenate([np.array([first_pred for i in range(train_params[2]+1)]), fcst]) fcst = pd.DataFrame(data=fcst, index=ts.index, columns=['pred%s' % ts.name[2:]]) return fcst, train_params, summary import warnings warnings.filterwarnings('ignore') %%time fcst, train_params, summary = optimiseARIMA(df_ex) df_ex = pd.concat([df_ex, fcst], axis=1) print("Best model: ARIMA{}".format(train_params)) print(summary) # Example forecast fcst0 = df_ex.copy() fcst0['pred%s' % ts_ex[2:]][fcst0['pred%s' % ts_ex[2:]] < 0] = 0 fcst0.iloc[-4*prediction_length:, 0].plot(label='Actual', c='k', alpha=0.5) fcst0.iloc[-4*prediction_length:, 1].plot(label='ARIMA%s' % str(train_params), c='b', alpha=0.5) plt.axvline(x=fcst0.index[-prediction_length], linestyle=':', linewidth=2, color='r', label='Start of test data') plt.legend() plt.title(ts_ex); ###Output _____no_output_____ ###Markdown Evaluating ARIMATo evaluate ARIMA, forecasts will be generated for each time series using the grid search methodology shown above (with subsequent zeroing of the negative values). MASE will be calculated for each individual time series, and the mean of all these scores will be used as the overall accuracy metric for ARIMA on this dataset. ###Code parameters = [] results = df.iloc[-(prediction_length+context_length):].copy() tic = time() for i, col in enumerate(df.columns): if i % 10 == 0: toc = time() print("Running predictions for {}. Cumulative time: {:.1f} minutes.".format(col, (toc-tic)/60)) # Prepare DataFrame for selected column dft = df.loc[:, col] # Find best model fcst, train_params, summary = optimiseARIMA(dft) # Add predictions to results DataFrame results['pred%s' % col[2:]] = fcst.values # Store model parameteres for reference parameters.append(train_params) toc = time() print("Finished! Total run time: {:.1f} minutes.".format((toc-tic)/60)) results0 = results.copy() results0[results0 < 0] = 0 results0.head() MASEs = [] for i, col in enumerate(df.columns): MASEs.append(calc_MASE(results0[col].iloc[-(context_length + prediction_length):-prediction_length], results0[col].iloc[-prediction_length:], results0['pred%s' % str(i+1)].iloc[-prediction_length:])) fig, ax = plt.subplots() ax.hist(MASEs, bins=20) ax.set_title('Distributions of MASEs for {} dataset'.format(dataset_name)) ax.set_xlabel('MASE') ax.set_ylabel('Count'); MASE = np.mean(MASEs) print("Overall MASE: {:.4f}".format(MASE)) ###Output Overall MASE: 6.1503 ###Markdown Show some example forecasts. ###Code fig, ax = plt.subplots(4, 2, sharex=True) ax = ax.ravel() for col in range(1, 9): ax[col-1].plot(results0.index[-prediction_length:], results0['ts%s' % col].iloc[-prediction_length:], label='Actual', c='k', linestyle='--', linewidth=1) ax[col-1].plot(results0.index[-prediction_length:], results0['pred%s' % col].iloc[-prediction_length:], label='ARIMA%s' % str(parameters[col-1]), c='b') ax[col-1].legend() fig.suptitle('{} Predictions'.format(dataset_name)); ###Output _____no_output_____ ###Markdown Clearly, some of the time series adopt an ARIMA(0,1,0) model which just flatlines at zero for the test period. The higher order models appear to do a better job of generating forecasts, although when such a large number of data points are zero, always forecasting zero is presumably a strategy that performs quite well. Store the predictions and accuracy score for the ARIMA models. ###Code import pickle with open('{}-MASE.pkl'.format(dataset_abbr), 'wb') as f: pickle.dump(MASE, f) with open('../_results/{}/{}-results.pkl'.format(model_name, dataset_abbr), 'wb') as f: pickle.dump(results.iloc[-prediction_length:], f) ###Output _____no_output_____
Natural Language Processing in TensorFlow/week3 Sequence models/NLP_in_TensorFlow_Week3_Note.ipynb
###Markdown Sequence ModelPrevious lesson, finish by looking at the effect of tokenizing words and classifer fail to get any meaningful results. The main reason is that context of words **was hard to follow for sub-word instead the sequence**For Example, Fibonacci sequence, $$x_{n} = x_{n-1} + x_{n-2}$$The ieda of Fibonacci sequence is similar to recurrent neural network. You can x as input and y as output. There's also an element that fed into function from previous function. **Output of previous function as input to next stage function**, **As you can see $x_{0}$ fed all the way through network** ###Code Image('rnn.PNG', width = 600) ###Output _____no_output_____ ###Markdown LSTMMotivating Example: I lived in Ireland, so at school they made me learn how to speak (Gaelic) Context Keyword gives the details about the language. Irish describe the people, Gaelic describe language. The word Irish appear eariler in the sentence, if we're looking at the sequence, we may lose that context.LSTM instead of context passed in RNN, LSTM have additional pipelines contexts called **cell state**, which can pass through the network to impact it. Cell state can also be **bidirectional**, so later context can impact earlier ones ###Code Image('LSTM.PNG', width = 600) import tensorflow as tf model = tf.keras.Sequential([ tf.keras.layers.Embedding(tokenizer.vocab_size, 64), # Bidirectional RNN, output will be 128, even if we specify 64, but bidirectional rnn double it tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(64)), # 64 is the number of outputs that I desire from that layer, tf.keras.layers.Dense(64, activation = 'relu'), tf.keras.layers.Dense(1, activation = 'sigmoid') ]) model.summary() model2 = tf.keras.Sequential([ tf.keras.layers.Embedding(tokenizer.vocab_size, 64), # stack two LSTM # do have put return_sequences = True on the first one # return_sequences = True : ensure the output of LSTM match the desired inputs of next LSTM tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(64), return_sequences = True), tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(32)) tf.keras.layers.Dense(64, activation = 'relu'), tf.keras.layers.Dense(1, activation = 'sigmoid') ]) ###Output _____no_output_____ ###Markdown Return Sequences,Whether to return the full sequence in the output sequence, or only the last output.default tf.keras.layers.Bidirectional return_sequence = False, it means **only output the last hidden state output** $a^{}$, The last hidden state output captures an abstract representation of the input sequence. * If specify return_sequence = True, output dimenison will be (Samples, Time steps, LSTM units)* If specify return_sequence = False, output dimenison will be (Samples, LSTM units), only one timestep for last hidden state output Return statesIn GRU, hidden state and cell state is equal , $c^{} = a^{}$, and for LSTM, $c^{} \neq a^{}$return_state = True, can output RNN last cell state in additional to hidden state. * **LSTM(units, return_state=True)**: output of LSTM have three components, $(a^{}, a^{}, c^{} )$, T stands for last step. **Note**: the number in first dimenison $a^{}$ and the number in second dimension $a^{}$ is the same. * **LSTM(units, return_sequences=True, return_state=True)**, output will be $(a^{}, a^{}, c^{} )$, **Note** at first dimension, $(a^{}$ last one number is the same as second dimension $a^{}$. * **GRU(units, return_sequences=True, return_state=True)**: output will be $(a^{}, c^{} )$ since in GRU $c^{} = a^{}$[Link for Quotation](https://www.dlology.com/blog/how-to-use-return_state-or-return_sequences-in-keras/targetText=By%20default%2C%20the%20return_sequences%20is,representation%20of%20the%20input%20sequence.) ###Code Image('rnn2.PNG', width = 600) ###Output _____no_output_____ ###Markdown Using GRU**Time for training is quicker than LSTM** ###Code model = tf.keras.Sequential([ tf.keras.layers.Embedding(tokenizer.vocab_size, 64), tf.keras.layers.Bidirectional(tf.keras.layers.GRU(32)), # 32 is the number of outputs that I desire from that layer, tf.keras.layers.Dense(64, activation = 'relu'), tf.keras.layers.Dense(1, activation = 'sigmoid') ]) ###Output _____no_output_____ ###Markdown Inspect Result using LSTM Using IMDB dataset, Compare the accuracy between Flatten plus GlobalAveragePooling and LSTM* **FLatten + GlobalAveragePooling**: traing quickly. quickly get to 85% and flatten there.* **LSTM**: traing slowly. reach 85% really quickly and continue climbing toward about 97.5% within 50 epochs. The validataion set drop slowly but close to NON-LSTM version value. A little overfitting. When accuracy of prediction increase, whereas the confidence decrease ###Code model = tf.keras.Sequential([ tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length = max_length), #tf.keras.layers.Flatten(), #tf.keras.layers.GlobalAveragePooling1D(), tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(32)), tf.keras.layers.Dense(24, activation = 'relu'), tf.keras.layers.Dense(1, activation = 'sigmoid') ]) Image('LSTM_result.PNG', width = 800) # Use convolution Neural Network model = tf.keras.Sequential([ tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length = max_length), tf.keras.layers.Conv1D(128, 5, activation = 'relu'), #output is (max_length - 5, 128) tf.keras.layers.GlobalMaxPooling1D(), tf.keras.layers.Dense(24, activation = 'relu'), tf.keras.layers.Dense(1, activation = 'sigmoid') ]) ###Output _____no_output_____ ###Markdown Inspect Result Using ConvolutionThe accuracy better than before with close to 100% on training and 80% on validation. As before, loss increase on validation, indicating **overfitting** ###Code Image('conv_result.PNG', width = 800) ###Output _____no_output_____
examples/Classification example.ipynb
###Markdown Victor Moraes - 2016027600 Sexto trabalho pratico de reconhecimento de padrões LS - SVM - Máquinas de vetores de suporte de mínimos quadráticos IntroduçãoNeste trabalho, será aplicado o classificador LS-SVM na resolução de um problema de classificação sintético. Neste exercı́cio o(a) aluno(a) aplicará o classificador LS-SVM na resolução de umproblema de classificação real. O aluno deverá seguir os seguintes passos: 1. Carregar a base de dados; 2. Separar os dados em treinamento e teste; 3. Treinar a SVM (atenção para a definição dos parâmetros do kernel escolhidoe do parâmetro C de regularização da SVM.); O aluno deve treinar uma SVM para resolver o problema de classificaçãode tipos de vidros do banco de dados Glass a partir de suas caracterı́sticasquı́micas. Ele possui 214 instâncias de 10 atributos. Entradas numéricas evariável de saida categórica Este banco de dados já é nativo do R para aquelesque estão utilizando o R mas pode ser encontrado no UCI Machine LearningRepositóry. No relatório deverá ser mostrado a acurácia média e desvio padrão para 10experimentos variando o conjunto de treinamento e teste. Apresentar tambémquais os parâmetros definidos por você e como eles foram definidos, como porexemplo o parâmetro C. 1- Conjunto de testes.Inicialmente importaremos as bibliotecas necessárias e em seguida criaremos o conjunto de amostras. ###Code import numpy as np import pandas as pd import sys #sys.path.insert(0, './LSSVMlib/') from LSSVMlib.LSSVMClassification import LSSVMClassification lssvc = LSSVMClassification(gamma=1, kernel='rbf', sigma=3.5) # Class instantiation from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt from matplotlib import cm from matplotlib.ticker import LinearLocator, ScalarFormatter from sklearn.model_selection import train_test_split from matplotlib.colors import Normalize from sklearn.model_selection import GridSearchCV from sklearn.model_selection import cross_val_score glass = pd.read_csv('glass.csv').dropna() glass.head() labels = pd.read_csv('classifications.csv').dropna() labels.head() ###Output _____no_output_____ ###Markdown 2. Separação de conjuntos de treinamento e testesNeste ponto é realizada a separação de amostras de treino e validação iniciais. ###Code glass.describe() features = ['RI','Na','Mg','Al','Si','K','Ca','Ba','Fe'] label = ['glass_type'] X = glass.iloc[:,1:-1].to_numpy() print(X.shape) y = glass.iloc[:,-1:].to_numpy().ravel() print(y.shape) test_size=0.2 X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size) enable_scalar = True sc_train = StandardScaler().fit(X_train) sc_test = StandardScaler().fit(X_test) sc_all = StandardScaler().fit(X) if(enable_scalar == True): X_train_std = sc_train.transform(X_train) X_test_std = sc_test.transform(X_test) X_std = sc_all.transform(X) else: X_train_std = X_train X_test_std = sc_test.transform(X_test) X_std = sc_all.transform(X) # Get information about input and outputs print(f"X_train.shape: {X_train.shape}") print(f"X_test.shape: {X_test.shape}") print(f"y_train.shape: {y_train.shape}") print(f"y_test.shape: {y_test.shape}") print(f"np.unique(y_train): {np.unique(y_train)}") print(f"np.unique(y_test): {np.unique(y_test)}") ###Output X_train.shape: (171, 9) X_test.shape: (43, 9) y_train.shape: (171,) y_test.shape: (43,) np.unique(y_train): [1 2 3 5 6 7] np.unique(y_test): [1 2 3 5 6 7] ###Markdown 3 Treinamento inicial do modelo, com parametros a determinar.Será utilizado a implementação SVC, ou classificação de vetores de suporte. Nesta implementação é permitido que os pontos não sejam necessariamente linearmente separáveis, sendo o desvio tolerável ditado pelo parâmetro C. Para determinar isso foi feito uma busca em rede para validar qual kernel e quais parametros possuem o melhor desempenho. A busca em rede conta com validação cruzada kfold com 5 conjuntos para cada classificador testado. Obtivemos que o kernel rbf ( radial basis function ), ou função radial, foi a de melhor acurácia.Outro parametro do kernel RBF é o gamma, representado na equação a seguir por sigma. É um parâmetro do kernel que determina a geometria e curvatura do superplano de classificação. Aqui é utilizado o kernel radial : $K(x_i,x_j)= exp(-\frac{||x_i-x_j||}{2\sigma^2})$Para a escolha de C e gama, adiante será feita varredura e de forma que evite overfitting e maximize a média de acertos. O problema de otimização do LS-SVM, como demonstra Suykens, na equação 3.5, se trata de solucionar o seguinte sistema linear:![drawing](images/01.png)E a predição é feita por:![drawing](images/02.png)Sendo $\Omega = y^t y K(X,X)$ Nos itens a seguir é investigado qual topologia apresenta melhor acurácia. RBT apresentou o melhor escore. Portanto é realizado uma busca em grid neste e variando gamma e sigma, para se encontrar o melhor modelo. ###Code # Use the classifier with different kernels print('Gaussian kernel:') lssvc = LSSVMClassification(gamma=1, kernel='rbf', sigma=3.5) # Class instantiation lssvc.fit(X_train_std, y_train) # Fitting the model y_pred = lssvc.predict(X_test_std) # Making predictions with the trained model acc = accuracy_score(y_test, y_pred) # Calculate Accuracy print('acc_test = ', acc, '\n') print('Polynomial kernel:') lssvc = LSSVMClassification(gamma=1, kernel='poly', d=2) lssvc.fit(X_train_std, y_train) y_pred = lssvc.predict(X_test_std) acc = accuracy_score(y_test, y_pred) print('acc_test = ', acc, '\n') print('Linear kernel:') lssvc = LSSVMClassification(gamma=1, kernel='linear') lssvc.fit(X_train_std, y_train) y_pred = lssvc.predict(X_test_std) acc = accuracy_score(y_test, y_pred) print('acc_test = ', acc, '\n') kernels = ['poly', 'rbf'] #kernels = ['linear', 'poly', 'rbf', 'sigmoid'] #C_range = np.logspace(-3,6,2) C_gamma = np.logspace(-6,3,50) #coef_0_range = np.linspace(-100,100,10) #coef_0_range = np.linspace(-10,10,5) param_grid = { 'kernel':kernels, 'gamma':C_gamma, } grid = GridSearchCV(LSSVMClassification(), param_grid=param_grid, n_jobs=3, verbose=True) grid.fit(X_std, y) grid.best_params_ best_estimator = grid.best_estimator_ best_estimator.fit(X_train_std,y_train) final_score = best_estimator.score(X_test_std, y_test) print("The best parameters are {} with a score of {:2.2f} %" .format(grid.best_params_, 100*final_score)) sigma_range = np.logspace(-1, 3, 30) gamma_range = np.logspace(-1, 1, 30) param_grid = dict(sigma=sigma_range, gamma=gamma_range) grid = GridSearchCV(LSSVMClassification(), param_grid=param_grid, n_jobs=3, verbose=True) grid.fit(X_std, y) grid.best_params_ best_estimator = grid.best_estimator_ best_estimator.fit(X_train_std,y_train) final_score = best_estimator.score(X_test_std, y_test) print("The best parameters are {} with a score of {:2.2f} %" .format(grid.best_params_, 100*final_score)) ###Output Fitting 5 folds for each of 900 candidates, totalling 4500 fits The best parameters are {'gamma': 2.807216203941177, 'sigma': 4.520353656360243} with a score of 60.47 % ###Markdown Aqui é apresentado o gráfico de acurácia da busca de grid, identificando õtimos locais de maior desempenho. ###Code scores_plot = np.reshape(grid.cv_results_['mean_test_score'],(gamma_range.shape[0],sigma_range.shape[0])) gamma_plot = grid.cv_results_['param_gamma'] sigma_plot = grid.cv_results_['param_sigma'] #plt.axhline(grid.best_params_['sigma'], color='gray') #plt.axvline(grid.best_params_['gamma'], color='gray') X_mesh,Y_mesh=np.meshgrid(gamma_range,sigma_range) Z = scores_plot im = plt.pcolor(X_mesh,Y_mesh,Z, cmap=plt.cm.hot) plt.xscale('log') plt.yscale('log') plt.xlabel('gamma') plt.ylabel('sigma') plt.colorbar(im, orientation='vertical') plt.title('Validation accuracy') plt.show() ###Output _____no_output_____ ###Markdown Em seguida realizamos a validação com todo conjunto de testes, obtendo acurácia de 60.47 % 3.d Escolha de modeloComo pode-se ver a seguir, a biblioteca seleciona o primeiro melhor estimador na métrica de escore, que foi o seguintes hiper parametros e kernel rbt: {'gamma': 2.807216203941177, 'sigma': 4.520353656360243} Cross validationO estimador será validado utilizando cross validation kfold, de 10 grupos. ###Code scores = cross_val_score(best_estimator, X_std, y, cv=10) print() print(scores) mean_score = np.mean(scores) * 100 mean_std = np.std(scores) * 100 print('Score medio:{:.2f}%, desvio medio:{:.2f}%'.format(mean_score, mean_std)) print(10*'\n') ###Output /home/vektor/.virtualenvs/science/lib/python3.8/site-packages/sklearn/model_selection/_split.py:666: UserWarning: The least populated class in y has only 9 members, which is less than n_splits=10. warnings.warn(("The least populated class in y has only %d" [0.63636364 0.54545455 0.77272727 0.68181818 0.47619048 0.61904762 0.76190476 0.71428571 0.66666667 0.76190476] Score medio:66.36%, desvio medio:9.27% ###Markdown Portanto temos que a acuracia final foi de 66.36%, com desvio médio de 5.75%.A baixa acuracia se deve, provavelmente, a quantidade baixa de amostras por classe. Demonstração de prediçãoAqui é feita uma demonstração e faz a previsão corretamente da amostra de vidro do tipo 2. ###Code sample = glass.iloc[70].to_numpy()[1:-1].reshape(1, -1) glass.iloc[70] best_estimator.predict(sc_all.transform(sample)) ###Output _____no_output_____
List/python-List.ipynb
###Markdown Python - Lists An Introduction to lists :* Let us consider an example : Write a program to accept marks of a student in 5 subject find the average of it. ###Code def main(): n1 = eval(input("Enter the marks of 1 subject : ")) n2 = eval(input("Enter the marks of 1 subject : ")) n3 = eval(input("Enter the marks of 1 subject : ")) n4 = eval(input("Enter the marks of 1 subject : ")) n5 = eval(input("Enter the marks of 1 subject : ")) avg = (n1+n2+n3+n4+n5) / 5 print("Average is : ",avg) main() ###Output Enter the marks of 1 subject : 90 Enter the marks of 1 subject : 89 Enter the marks of 1 subject : 76 Enter the marks of 1 subject : 68 Enter the marks of 1 subject : 94 Average is : 83.4 ###Markdown Suppose the number of values to average must increase from five to 25. If we use above program as a guide, twenty additional variables must be introduced, and the overall length of the program necessarily will grow. Averaging 1,000 numbers using this approach is impractical. But there is alternative approach for averaging numbers. ###Code def main(): sum = 0.0; for i in range(5): num = eval(input("Enter the marks of " + str(i + 1)+" subject : ")) sum += num print("Average is : ",sum / 5) main() ###Output Enter the marks of 1 subject : 90 Enter the marks of 2 subject : 89 Enter the marks of 3 subject : 76 Enter the marks of 4 subject : 68 Enter the marks of 5 subject : 94 Average is : 83.4 ###Markdown * In fact, the coding change to average 1,000 numbers is no more difficult.* However, unlike the original average program, this new version does not display the numbers entered. This is a significant difference; it may be necessary to retain all the values entered for various reasons:* All the values can be redisplayed after entry so the user can visually verify their correctness.* The values may need to be displayed in some creative way; for example, they may be placed in a graphical user interface component, like a visual grid (spreadsheet).* The values entered may need to be processed in a different way after they are all entered; for example, we may wish to display just the values entered above a certain value (like greater than zero), but the limit is not determined until after all the numbers are entered.In all the above situation we need to retain the values of all the variables for future use.To solve this python provides us a non-primitive datatype List. Using Lists* A list is collection of object.* A single list can hold intergers,floating point numbers,Strings and even functions. ###Code list1 = [2,5,3,6,10] print(list1) list2 = ['I',"am",'a','python','developer'] print(list2) ###Output [2, 5, 3, 6, 10] ['I', 'am', 'a', 'python', 'developer'] ###Markdown * We can access each element in the list by its index.Index starts from 0 from the left. ###Code print(list1[0]) print(list1[1]) print(list1[4]) print() print(list1[-1]) # prints the last element of the list print(list1[-2]) # prints the second last elements of the list ###Output 2 5 10 10 6 ###Markdown Python-list is mutable.That is its value can be changed. ###Code print(list1) print("The value at position 0 is :" , list1[0]) print() list1[0] = 44 # change the value at the position 0 to 44 print(list1) print("The value at position 0 is :" , list1[0]) ###Output [2, 5, 3, 6, 10] The value at position 0 is : 2 [44, 5, 3, 6, 10] The value at position 0 is : 44 ###Markdown Let's write a program to reverse the elemnts in the list. def main() : lst = [3,1,4,6,23,45,73,33] for i in range(len(lst) - 1,-1,-1): print(lst[i])main() * len() function returns the number of elements in the list .* The range function accept start parameter as len(lst) - 1 which returns the number of elements of the list (here 7 ),stop parameter as -1 and step parameter as -1.So the value of i ranges from 7,6,5,4,3,2,1,0. Program to add each element by particular value. ###Code lst = [3,43,12,56,34,23,1,3] val = eval(input("Enter the value to be added ")) print("Before adding the value") print(lst) print() for i in range(0,len(lst)): lst[i] = lst[i] + val print("after adding the value ",val) print(lst) ###Output Enter the value to be added 5 Before adding the value [3, 43, 12, 56, 34, 23, 1, 3] after adding the value 5 [8, 48, 17, 61, 39, 28, 6, 8] ###Markdown Program to add n elements to a list of same pattern. ###Code a = [0] * 3 print(a) a = ['abc'] * 5 print(a) a = [10,20,30] * 3 print(a) ###Output [0, 0, 0] ['abc', 'abc', 'abc', 'abc', 'abc'] [10, 20, 30, 10, 20, 30, 10, 20, 30] ###Markdown Now we can write a the program to calculate the average. ###Code def main(): marks = [] # initialized a empty list sum = 0.0 n = int(input("Enter the total number of subject ")) for i in range(0,n): value = int(input("Enter the mark of " + str(i+1)+ " subject ")) ## adding the element to the marks list . ## NB: we need to make the element in list inorder to add to ## the exisiting list .Otherqwise error occurs. marks = marks + [value] sum = sum + value print("\nMark of the student is ",marks) print("Average : ",sum / n) main() ###Output Enter the total number of subject 5 Enter the mark of 1 subject 89 Enter the mark of 2 subject 90 Enter the mark of 3 subject 67 Enter the mark of 4 subject 78 Enter the mark of 5 subject 65 Mark of the student is [89, 90, 67, 78, 65] Average : 77.8
mini_projects/Tic Tac Toe Game my work.ipynb
###Markdown this Game has a small flaw don't choose filled places in the board ohterwise it will stop you can work to fix this!!!! ###Code # Step One (1) def player_indentification(): # Iam making this while loop in case of player hits a wrong input while True: player_id = input('you wnat to be player 1 or 2') if player_id in ['1','2'] : print( f'you are player {int(player_id)}') print('you chooce X id') return int(player_id) else: print('please choose carefuley!') continue # Step Two (2) def player_markeer(): marker = input('you whant to be "X" or "O" : ') return marker # Step Three (3) def player_position(): pos = input('enter your play: ') return pos # Step Four (4) def player_move(): move = input('enter your move: ') # Step Five (5) def player_turn(): print('player 2 ') player2move = input('enter your move: ') if player2move in range(1,10): pos # Step Six (6) def marker_choose(): mark = input('You wnat to go for "X" or "O" :') return mark # Step Seven (7) from IPython.display import clear_output def board_display(board): clear_output() print(board[7],'|',board[8],'|',board[9]) print('----------') print(board[4],'|',board[5],'|',board[6]) print('----------') print(board[1],'|',board[2],'|',board[3]) # Step Eight (8) def choose_position(): while True: pos = int(input('Enter your move position (form 1:9): ')) return pos # Step Nine (9) def position_check(board,i): if board[i] == 'x' or board[i] =='o' : print() print('You choose full position try again') return False else: pass # Step Ten (10) def winning_check(board): for i in range(1,len(board)-3) : # check for horizontal line winning condintion if board[i] == 'x': if board[i] == board[i+1] and board[i] == board[i+2]: return True else: pass # check for horizontal line winning condintion if i in range(1,4): if board[i] == board[i+3] and board[i] == board[i+6]: return True else: pass elif board[i] == 'o': # check for horizontal line winning condintion if board[i] == board[i+1] and board[i] == board[i+2]: return True else: pass # check for horizontal line winning condintion if i in range(1,4): if board[i] == board[i+3] and board[i] == board[i+6]: return True else: pass else: pass # Step Elevne(11) def continue_playing(): ask = input('Want to continue playing "Y" or"N"') return ask from IPython.display import clear_output def play_game(): # writing the complete Game Here!! clear_output() print('Welcom to Tic Tac Toe Game! My first Game Ever!!!!!!!!!!') # in case user didn't choose x or o characters while True: markers = ['x','o'] player1 = player_markeer() if player1 not in markers: print('You must choose "X" or "O" ') continue else: break markers.remove(player1) player2 = markers[0] board = [' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '] board_display(board) play = True while play: p1 = True # first player Turn while p1: print('Player 1 ',player1,' choose position') pos = choose_position() check = position_check(board,pos) if check == False: continue else: pass board[pos] = player1 board_display(board) # winning check for player one win = winning_check(board) if win == True: print('congratulations!! you won') decision = continue_playing() if decision == 'y': %rerun elif decision == 'n': play = False break else: p1 = False continue # second player Turn while not p1: print('Player 2 ',player2,' choose position') pos = choose_position() check = position_check(board,pos) if check == False: continue else: pass board[pos] = player2 board_display(board) # winning check for player 2 win = winning_check(board) if win == True: print('congratulations!! you won') decision = continue_playing() if decision == 'y': %rerun break elif decision == 'n': play = False break else: p1 = True continue play_game() ###Output _____no_output_____
notebooks/n04_inertia.ipynb
###Markdown Introduction The next step is to provide some information about the mass and inertia of the bodies involved. Each of the three rigid bodies have both a mass which resists linear accelerations and inertia which resists rotational accelerations. In this notebook we will specify the mass of the three bodies, the inertia tensor/dyadic, and also create three `RigidBody` objects that hold all of the necessary information for each rigid body. Setup First, we will import the results from the previous notebook. Even if you didn't get everything correctly working, the following import statement will bring in the correct solution so you can move forward. We will do this in all of the subsquent notebooks. ###Code from __future__ import print_function, division from solution.kinematics import * ###Output _____no_output_____ ###Markdown We will also need the function for easily generating inertial quantities and the `RigigBody` class so we can create some rigid bodies. ###Code from sympy.physics.mechanics import inertia, RigidBody ###Output _____no_output_____ ###Markdown We will need to specify some constants for the mass and inertia values. ###Code from sympy import symbols ###Output _____no_output_____ ###Markdown Once again, initalize SymPy printing so that we get nicely renderd symbols. ###Code from sympy.physics.vector import init_vprinting init_vprinting(use_latex='mathjax', pretty_print=False) ###Output _____no_output_____ ###Markdown Mass The masses of each rigid body can be represented by constant values, so we create a symbol for each body. ###Code lower_leg_mass, upper_leg_mass, torso_mass = symbols('m_L, m_U, m_T') lower_leg_mass upper_leg_mass torso_mass ###Output _____no_output_____ ###Markdown Inertia Since we are studying a 2D planar problem, we are only concerned about the rotational inertia about the $\hat{i}_z$ axis. We will assume that the rigid bodies are symmetric about the $XZ$ and $YZ$ planes, so we only need a single variable for each rigid body to specify the rotation inertia. ###Code lower_leg_inertia, upper_leg_inertia, torso_inertia = symbols('I_Lz, I_Uz, I_Tz') ###Output _____no_output_____ ###Markdown The `inertia()` function is a convenience function for creating inertia dyadics (i.e. basis dependent tensors). You specify a reference frame to define the inertia with respect to and at a minimum for symmetric bodies provide the diagonal entries of the inertia tensor. In our case the rotational inertia about the $x$ and $y$ are not neeed so they are set to zero and $z$ inertia entry is set to the defined variable. ###Code lower_leg_inertia_dyadic = inertia(lower_leg_frame, 0, 0, lower_leg_inertia) lower_leg_inertia_dyadic ###Output _____no_output_____ ###Markdown In general, we store the inertia as dyadics, i.e. basis dependent tensors. If you want to see what the inertia is expressed in a particular frame, use the `to_matrix()` method. ###Code lower_leg_inertia_dyadic.to_matrix(lower_leg_frame) ###Output _____no_output_____ ###Markdown We will also eventually need to know what point the inertia is defined with respect to. In our case, we will simply define all inertia's about the mass center. We can store the total information needed by PyDy in a tuple of an inertia `Dyadic` and a `Point`. ###Code lower_leg_central_inertia = (lower_leg_inertia_dyadic, lower_leg_mass_center) ###Output _____no_output_____ ###Markdown The upper leg and torso inertias are found in the same fashion. ###Code upper_leg_inertia_dyadic = inertia(upper_leg_frame, 0, 0, upper_leg_inertia) upper_leg_inertia_dyadic.to_matrix(upper_leg_frame) upper_leg_central_inertia = (upper_leg_inertia_dyadic, upper_leg_mass_center) ###Output _____no_output_____ ###Markdown Exercise Create a tuple of an inertia `Dyadic` and `Point` for the torso. ###Code torso_inertia_dyadic = torso_central_inertia = %load exercise_solutions/n04_inertia_inertia-dyadic.py ###Output _____no_output_____ ###Markdown Rigid Bodies To completely define a rigid body, the mass center point, the reference frame, the mass, and the inertia defined about a point must be specified. ###Code lower_leg = RigidBody('Lower Leg', lower_leg_mass_center, lower_leg_frame, lower_leg_mass, lower_leg_central_inertia) ###Output _____no_output_____ ###Markdown Exercise Create RigidBody objects for the upper leg and torso ###Code upper_leg = torso = %load exercise_solutions/n04_inertia_define-rigid-body.py ###Output _____no_output_____
benchmarking/Final_Camel_Batch_Mango.ipynb
###Markdown Example of optimizing a convex function Goal is to test the objective values found by Mango- Search space size: Uniform- Number of iterations to try: 40- domain size: 5000 - Initial Random: 5 Benchmarking test with different iterations for serial executions ###Code from mango.tuner import Tuner from scipy.stats import uniform import math def get_param_dict(): param_dict = { 'a': uniform(-2, 4), 'b': uniform(-2, 4) } return param_dict def get_objective(x,y): x2 = math.pow(x,2) x4 = math.pow(x,4) y2 = math.pow(y,2) return ((4.0 - 2.1 * x2 + (x4 / 3.0)) * x2 + x*y + (-4.0 + 4.0 * y2) * y2) def objfunc(args_list): results = [] for hyper_par in args_list: a = hyper_par['a'] b = hyper_par['b'] result = -1.0*get_objective(a,b) results.append(result) return results def get_conf(): conf = dict() conf['batch_size'] = 5 conf['initial_random'] = 5 conf['num_iteration'] = 100 conf['domain_size'] = 1000 return conf def get_optimal_x(): param_dict = get_param_dict() conf = get_conf() tuner = Tuner(param_dict, objfunc,conf) results = tuner.maximize() return results Store_Optimal_X = [] Store_Results = [] num_of_tries = 20 for i in range(num_of_tries): results = get_optimal_x() Store_Results.append(results) print(i,":",results['best_objective']) #results['best_objective'] #len(Store_Results[0]['objective_values']) #Store_Results[0]['objective_values'][:15] #len(Store_Results[0]['params_tried']) ###Output _____no_output_____ ###Markdown Extract from the results returned the true optimal values for each iteration ###Code import numpy as np total_experiments = 20 initial_random = 5 plotting_itr =[10, 20,30,40,50,60,70,80,90,100] plotting_list = [] for exp in range(total_experiments): #for all exp local_list = [] for itr in plotting_itr: # for all points to plot # find the value of optimal parameters in itr+ initial_random max_value = np.array(Store_Results[exp]['objective_values'][:itr*5+initial_random]).max() local_list.append(max_value) plotting_list.append(local_list) plotting_array = np.array(plotting_list) plotting_array.shape #plotting_array Y = [] #count range between -1 and 1 and show it for i in range(len(plotting_itr)): y_value = plotting_array[:,i].mean() Y.append(y_value) Y import numpy as np import matplotlib.pyplot as plt fig = plt.figure(figsize=(10,10)) plt.plot(plotting_itr,Y,label = 'Mango(Batch=5)',linewidth=4.0) #x, y plt.xlabel('Number of Iterations',fontsize=25) plt.ylabel('Mean optimal achieved',fontsize=25) #plt.title('Variation of Optimal Value of X with iterations',fontsize=20) plt.xticks(fontsize=20) plt.yticks(fontsize=20) #plt.yticks(np.arange(10, 110, step=10)) #plt.xticks(np.arange(10, 110, step=10)) plt.grid(True) plt.legend(fontsize=20) plt.show() ###Output _____no_output_____
farmgate_prices/Untitled.ipynb
###Markdown This is where you set color sceme to color blindness friendly ###Code import matplotlib.pyplot as plt import pandas as pd # Read the data into a pandas DataFrame. gender_degree_data = pd.read_csv("http://www.randalolson.com/wp-content/uploads/percent-bachelors-degrees-women-usa.csv") # These are the "Tableau 20" colors as RGB. tableau20 = [(31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120), (44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150), (148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148), (227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199), (188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)] # Scale the RGB values to the [0, 1] range, which is the format matplotlib accepts. for i in range(len(tableau20)): r, g, b = tableau20[i] tableau20[i] = (r / 255., g / 255., b / 255.) # You typically want your plot to be ~1.33x wider than tall. This plot is a rare # exception because of the number of lines being plotted on it. # Common sizes: (10, 7.5) and (12, 9) plt.figure(figsize=(12, 14)) # Remove the plot frame lines. They are unnecessary chartjunk. ax = plt.subplot(111) ax.spines["top"].set_visible(False) ax.spines["bottom"].set_visible(False) ax.spines["right"].set_visible(False) ax.spines["left"].set_visible(False) # Ensure that the axis ticks only show up on the bottom and left of the plot. # Ticks on the right and top of the plot are generally unnecessary chartjunk. ax.get_xaxis().tick_bottom() ax.get_yaxis().tick_left() # Limit the range of the plot to only where the data is. # Avoid unnecessary whitespace. plt.ylim(0, 90) plt.xlim(1968, 2014) # Make sure your axis ticks are large enough to be easily read. # You don't want your viewers squinting to read your plot. plt.yticks(range(0, 91, 10), [str(x) + "%" for x in range(0, 91, 10)], fontsize=14) plt.xticks(fontsize=14) # Provide tick lines across the plot to help your viewers trace along # the axis ticks. Make sure that the lines are light and small so they # don't obscure the primary data lines. for y in range(10, 91, 10): plt.plot(range(1968, 2012), [y] * len(range(1968, 2012)), "--", lw=0.5, color="black", alpha=0.3) # # Remove the tick marks; they are unnecessary with the tick lines we just plotted. # plt.tick_params(axis="both", which="both", bottom="off", top="off", # labelbottom="on", left="off", right="off", labelleft="on") # Now that the plot is prepared, it's time to actually plot the data! # Note that I plotted the majors in order of the highest % in the final year. majors = ['Health Professions', 'Public Administration', 'Education', 'Psychology', 'Foreign Languages', 'English', 'Communications\nand Journalism', 'Art and Performance', 'Biology', 'Agriculture', 'Social Sciences and History', 'Business', 'Math and Statistics', 'Architecture', 'Physical Sciences', 'Computer Science', 'Engineering'] for rank, column in enumerate(majors): # Plot each line separately with its own color, using the Tableau 20 # color set in order. plt.plot(gender_degree_data.Year.values, gender_degree_data[column.replace("\n", " ")].values, lw=2.5, color=tableau20[rank]) # Add a text label to the right end of every line. Most of the code below # is adding specific offsets y position because some labels overlapped. y_pos = gender_degree_data[column.replace("\n", " ")].values[-1] - 0.5 if column == "Foreign Languages": y_pos += 0.5 elif column == "English": y_pos -= 0.5 elif column == "Communications\nand Journalism": y_pos += 0.75 elif column == "Art and Performance": y_pos -= 0.25 elif column == "Agriculture": y_pos += 1.25 elif column == "Social Sciences and History": y_pos += 0.25 elif column == "Business": y_pos -= 0.75 elif column == "Math and Statistics": y_pos += 0.75 elif column == "Architecture": y_pos -= 0.75 elif column == "Computer Science": y_pos += 0.75 elif column == "Engineering": y_pos -= 0.25 # # Again, make sure that all labels are large enough to be easily read # # by the viewer. plt.text(2011.5, y_pos, column, fontsize=14, color=tableau20[rank]) # # matplotlib's title() call centers the title on the plot, but not the graph, # # so I used the text() call to customize where the title goes. # # Make the title big enough so it spans the entire plot, but don't make it # # so big that it requires two lines to show. # # Note that if the title is descriptive enough, it is unnecessary to include # # axis labels; they are self-evident, in this plot's case. # plt.text(1995, 93, "Percentage of Bachelor's degrees conferred to women in the U.S.A." # ", by major (1970-2012)", fontsize=17, ha="center") # # Always include your data source(s) and copyright notice! And for your # # data sources, tell your viewers exactly where the data came from, # # preferably with a direct link to the data. Just telling your viewers # # that you used data from the "U.S. Census Bureau" is completely useless: # # the U.S. Census Bureau provides all kinds of data, so how are your # # viewers supposed to know which data set you used? # plt.text(1966, -8, "Data source: nces.ed.gov/programs/digest/2013menu_tables.asp" # "\nAuthor: Randy Olson (randalolson.com / @randal_olson)" # "\nNote: Some majors are missing because the historical data " # "is not available for them", fontsize=10) # # Finally, save the figure as a PNG. # # You can also save it as a PDF, JPEG, etc. # # Just change the file extension in this call. bbox_inches="tight" removes all the extra whitespace on the edges of your plot. plt.savefig("percent-bachelors-degrees-women-usa.png", bbox_inches="tight") gender_degree_data.Year # You typically want your plot to be ~1.33x wider than tall. This plot is a rare # exception because of the number of lines being plotted on it. # Common sizes: (10, 7.5) and (12, 9) plt.figure(figsize=(12, 14)) # Remove the plot frame lines. They are unnecessary chartjunk. ax = plt.subplot(111) ax.spines["top"].set_visible(False) ax.spines["bottom"].set_visible(False) ax.spines["right"].set_visible(False) ax.spines["left"].set_visible(False) # Ensure that the axis ticks only show up on the bottom and left of the plot. # Ticks on the right and top of the plot are generally unnecessary chartjunk. ax.get_xaxis().tick_bottom() ax.get_yaxis().tick_left() # Limit the range of the plot to only where the data is. # Avoid unnecessary whitespace. plt.ylim(0, 90) plt.xlim(1968, 2014) # Make sure your axis ticks are large enough to be easily read. # You don't want your viewers squinting to read your plot. plt.yticks(range(0, 91, 10), [str(x) + "%" for x in range(0, 91, 10)], fontsize=14) plt.xticks(fontsize=14) # Provide tick lines across the plot to help your viewers trace along # the axis ticks. Make sure that the lines are light and small so they # don't obscure the primary data lines. for y in range(10, 91, 10): plt.plot(range(1968, 2012), [y] * len(range(1968, 2012)), "--", lw=0.5, color="black", alpha=0.3) # Remove the tick marks; they are unnecessary with the tick lines we just plotted. plt.tick_params(axis="both", which="both", bottom="off", top="off", labelbottom="on", left="off", right="off", labelleft="on") plt.figure(figsize=(12, 14)) # Now that the plot is prepared, it's time to actually plot the data! # Note that I plotted the majors in order of the highest % in the final year. majors = ['Health Professions', 'Public Administration', 'Education', 'Psychology', 'Foreign Languages', 'English', 'Communications\nand Journalism', 'Art and Performance', 'Biology', 'Agriculture', 'Social Sciences and History', 'Business', 'Math and Statistics', 'Architecture', 'Physical Sciences', 'Computer Science', 'Engineering'] for rank, column in enumerate(majors): # Plot each line separately with its own color, using the Tableau 20 # color set in order. plt.plot(gender_degree_data.Year.values, gender_degree_data[j.replace("\n", " ")].values, lw=2.5, color=tableau20[rank]) # Add a text label to the right end of every line. Most of the code below # is adding specific offsets y position because some labels overlapped. y_pos = gender_degree_data[column.replace("\n", " ")].values[-1] - 0.5 if column == "Foreign Languages": y_pos += 0.5 elif column == "English": y_pos -= 0.5 elif column == "Communications\nand Journalism": y_pos += 0.75 elif column == "Art and Performance": y_pos -= 0.25 elif column == "Agriculture": y_pos += 1.25 elif column == "Social Sciences and History": y_pos += 0.25 elif column == "Business": y_pos -= 0.75 elif column == "Math and Statistics": y_pos += 0.75 elif column == "Architecture": y_pos -= 0.75 elif column == "Computer Science": y_pos += 0.75 elif column == "Engineering": y_pos -= 0.25 # Again, make sure that all labels are large enough to be easily read # by the viewer. plt.text(2011.5, y_pos, column, fontsize=14, color=tableau20[rank]) # matplotlib's title() call centers the title on the plot, but not the graph, # so I used the text() call to customize where the title goes. # Make the title big enough so it spans the entire plot, but don't make it # so big that it requires two lines to show. # Note that if the title is descriptive enough, it is unnecessary to include # axis labels; they are self-evident, in this plot's case. plt.text(1995, 93, "Percentage of Bachelor's degrees conferred to women in the U.S.A." ", by major (1970-2012)", fontsize=17, ha="center") # Always include your data source(s) and copyright notice! And for your # data sources, tell your viewers exactly where the data came from, # preferably with a direct link to the data. Just telling your viewers # that you used data from the "U.S. Census Bureau" is completely useless: # the U.S. Census Bureau provides all kinds of data, so how are your # viewers supposed to know which data set you used? plt.text(1966, -8, "Data source: nces.ed.gov/programs/digest/2013menu_tables.asp" "\nAuthor: Randy Olson (randalolson.com / @randal_olson)" "\nNote: Some majors are missing because the historical data " "is not available for them", fontsize=10) # Finally, save the figure as a PNG. # You can also save it as a PDF, JPEG, etc. # Just change the file extension in this call. # bbox_inches="tight" removes all the extra whitespace on the edges of your plot. plt.savefig("percent-bachelors-degrees-women-usa.png", bbox_inches="tight") gender_degree_data.Year.values ###Output _____no_output_____
SparkETLDemoPython.ipynb
###Markdown Spark ETL Demo PythonThis demo written in Python for Watson Data Studio illustrates the use of a Spark cluster to perform ETL. It imports data in flat files into Spark DataFrames, manipulates the data, aggregates it and then writes the result out to a relational database. The advantage of using Spark for this is scalability (by using a larger cluster one can achieve close to linear scalability) and simplified error recovery (a failed attempt at running this ETL job can be repeated at any stage and the final result will be the same). Step 1 Read in the source dataWe read two CSV files. One has statistics about Social Security payments for the state of Texas by zipcode and the other maps US zipcodes to US counties so we can aggregate the Social Security data by county rather than zipcode. Grab the input data files from Github and stick them in in gpfs using wget ###Code # Install wget if you don't already have it. !pip install wget import wget link_to_ssdata = 'https://raw.githubusercontent.com/djccarew/sparketldemo/master/data/oasdi-tx-clean.csv' link_to_zipdata = 'https://raw.githubusercontent.com/djccarew/sparketldemo/master/data/zip_codes_states.csv' social_security_data_file = wget.download(link_to_ssdata) print(social_security_data_file) zipcode_data_file = wget.download(link_to_zipdata) print(zipcode_data_file) ###Output _____no_output_____ ###Markdown Read in the Social Security data file into a DataFrame using a schema. Note the schema can be inferred but the inferred schema typically converts various numeric types to string so it's better to specify the schema so you know what you end up with ###Code from pyspark.sql.types import StructType, StructField, IntegerType, StringType, DoubleType from pyspark.sql import SparkSession spark = SparkSession.builder.getOrCreate() ssdata_schema = StructType([ StructField("Zip", StringType(), False), StructField("NumTotal", IntegerType(), False), StructField("NumRetired", IntegerType(), False), StructField("NumDisabled", IntegerType(), False), StructField("NumWidowerOrParent", IntegerType(), False), StructField("NumSpouses", IntegerType(), False), StructField("NumChildren", IntegerType(), False), StructField("BenTotal", IntegerType(), False), StructField("BenRetired", IntegerType(), False), StructField("BenWidowerOrParent", IntegerType(), False), StructField("NumSeniors", IntegerType(), False)]) df_ssdata_raw = spark.read\ .format('org.apache.spark.sql.execution.datasources.csv.CSVFileFormat')\ .option('header', 'true')\ .load(social_security_data_file, schema=ssdata_schema) df_ssdata_raw.printSchema() ###Output _____no_output_____ ###Markdown Repeat for zipcode data file ###Code zipdata_schema = StructType([ StructField("Zip", StringType(), False), StructField("Latitude", DoubleType(), False), StructField("Longitude", DoubleType(), False), StructField("City", StringType(), False), StructField("State", StringType(), False), StructField("County", StringType(), False)]) df_zipdata_raw = spark.read\ .format('org.apache.spark.sql.execution.datasources.csv.CSVFileFormat')\ .option('header', 'true')\ .load(zipcode_data_file, schema=zipdata_schema) df_zipdata_raw.printSchema() ###Output _____no_output_____ ###Markdown Step 2 Transform raw source data Only need County name and zip code columns for this demo so we don't use the other columns in the zipcode data ###Code df_counties = df_zipdata_raw.select('Zip','County') df_counties.printSchema() ###Output _____no_output_____ ###Markdown Join Social Security data with zipcode data to add a County column to Social Security data ###Code df_ssdata_counties = df_ssdata_raw.join(df_counties, "Zip") df_ssdata_counties.printSchema() ###Output _____no_output_____ ###Markdown Don't need the zipcode column anymore since we'll be aggregating by County instead ###Code df_ssdata_counties = df_ssdata_counties.drop("Zip") df_ssdata_counties.printSchema() ###Output _____no_output_____ ###Markdown Create a temp view so we can do the "by county" aggregation via SQL rather than using the Spark SQL DataFrame API. (Doing it via SQL is usually easier) ###Code df_ssdata_counties.createOrReplaceTempView("aggregated_by_county") ###Output _____no_output_____ ###Markdown Spark SQL query to aggregate Social Security data by county and sort by county name ###Code df_ssdata_data_by_county = spark.sql("select County, sum(NumTotal) as NumTotal, sum(NumRetired) as NumRetired, sum(NumDisabled) as NumDisabled, sum(NumWidowerOrParent) as NumWidowerOrParent, sum(NumSpouses) as NumSpouses, sum(NumChildren) as NumChildren, sum(BenTotal) as BenTotal, sum(BenRetired) as BenRetired, sum(BenWidowerOrParent) as BenWidowerOrParent, sum(NumSeniors) as NumSeniors from aggregated_by_county group by County order by County") df_ssdata_data_by_county.take(5) ###Output _____no_output_____ ###Markdown Step 3 Write modified data to target database We use the jdbc method of the DataFrameWriter to write the modified data to the target db. Appropriate credentials for the target db need to be set up first. Modify the code below with the approparaite values for your database ###Code jdbc_url = 'your-jdbc-url' dest_table = 'your-table-name' jdbc_properties = { 'driver': 'com.ibm.db2.jcc.DB2Driver', 'user': 'your-db-user', 'password': 'your-db-password' } df_ssdata_data_by_county.write.jdbc(jdbc_url, table=dest_table, mode='overwrite', properties=jdbc_properties) ###Output _____no_output_____
2021Q1_DSF/6.- Spark ML/notebooks/04_regression.ipynb
###Markdown Spark ML Problema de RegresiónEn este notebook abordaremos el problema de Machine Learning Supervisado de regresión. Trabajaremos con el dataset Boston Housing que contiene información sobre las diferentes características de casas en la ciudad de Boston. Utilizaremos como variable objetivo el precio de las casas. Accederemos a este a través de la librería de ML de Python scikit-learn. Ajustaremos distintos modelos y compararemos los resultados obtenidos en éstos. Crear SparkSessionNota: en Datio no es necesario crear la sesión de Spark ya al iniciar un notebook con el kernel PySpark Python3 - Spark 2.1.0 se crea automáticamente. ###Code # Respuesta aqui ###Output _____no_output_____ ###Markdown Cargamos los datos en un DataFrame de SparkCargamos los datos de scikit-learn y los consolidamos en un DataFrame de Spark. ###Code # Respuesta aqui ###Output _____no_output_____ ###Markdown Vemos el schema. ###Code # Respuesta aqui ###Output _____no_output_____ ###Markdown Podemos ver la descripción de las variables del objeto cargado de scikit-learn. ###Code # Respuesta aqui ###Output _____no_output_____ ###Markdown Pasos previos Vector AssemblerPara ajustar un modelo en Spark necesitamos indicar qué variables se van a utilizar como variables independientes. A través del parámetro _featuresCol_ de los distintos algoritmos, se le indica la variable que contiene la salida del VectorAssembler con las variables independientes. Hacemos el VectorAssembler con todas las variables con excepcion del target. ###Code # Respuesta aqui ###Output _____no_output_____ ###Markdown División train/testRealizamos la division train/test (o train/validation) para medir el desempeño del modelo tras el ajuste. ###Code # Respuesta aqui ###Output _____no_output_____ ###Markdown Regresión LinealModelo matemático usado para aproximar la relación de dependencia entre una variable dependiente $Y$, las variables independientes $X_i$ y un término aleatorio $ε$. Se expresa a traves de la siguiente ecuación:$$Y = \beta_0+\beta_1*X_1+\beta_2*X_2+...+\beta_p*X_p+ε$$donde - $Y$ es la variable dependiente, explicada o target,- $X_i$ son las variables explicativas, independientes o regresoras,- $\beta_i$ son los parametros que miden la influencia que tienen las variables explicativas sobre el target,para todo $0<=i<=p$.Ajustamos un modelo de regresión y sacamos los valores reales y las predicciones. ###Code # Respuesta aqui ###Output _____no_output_____ ###Markdown Arbol de DecisiónModelo predictivo que se construye ejecutando una partición binaria recursiva de los datos identificando las variables y los puntos de corte de las mismas que mejor determinan el valor de la variable objetivo. En el entrenamiento son importantes los parámetros: medida de impureza y criterio de parada (normalmente profundidad).Ajustamos un árbol de decisión para nuestro conjunto de datos y sacamos los valores reales y los predichos. ###Code # Respuesta aqui ###Output _____no_output_____ ###Markdown Random ForestModelo predictivo basado en árboles de decisión. Construye varios árboles tomando muestras del conjunto de datos (boosting) y muestras del conjunto de variables. Realiza la predicción para cada nuevo registro pasandolo por cada uno de los árboles y promediando los resultados obtenidos.Ajustamos un random forest y obtenemos los valores reales y los predichos. ###Code # Respuesta aqui ###Output _____no_output_____ ###Markdown Evaluación de modelos Para comparar el desempeño de los modelos ajustados y poder seleccionar el mejor de ellos disponemos de diversas métricas. Algunas de ellas son:- Error cuadrático medio (MSE)- Raíz del error cuadrático medio (RMSE)- R cuadrado (R²)- Error absoluto medio (MAE)Como ejemplo obtenemos el RMSE y el MAE de los modelos ajustados. ###Code # Respuesta aqui ###Output _____no_output_____ ###Markdown Imprimir métricas para los distintos modelos ###Code # Respuesta aqui ###Output _____no_output_____ ###Markdown Observando los valores obtenidos en las métricas de evaluación de modelos decidiríamos quedarnos con el random forest. Podemos crear una función que tenga como parámetros de entrada el dataframe transformado, la columna de predicción, y el target, y devuelva un diccionario con todas las métricas disponibles y sus respectivos valores. ###Code def calculate_metrics(dataset, predictionCol='prediction', labelCol='MEDV'): metrics = RegressionEvaluator(predictionCol=predictionCol, labelCol=labelCol) rmse = metrics.evaluate(dataset, {metrics.metricName: "rmse"}) mae = metrics.evaluate(dataset, {metrics.metricName: "mae"}) mse = metrics.evaluate(dataset, {metrics.metricName: "mse"}) r2 = metrics.evaluate(dataset, {metrics.metricName: "r2"}) return {'rmse': rmse, 'mae':mae, 'mse': mse, 'r2':r2} calculate_metrics(boston_test_linear_regression) calculate_metrics(boston_test_decision_tree_regression) calculate_metrics(boston_test_random_forest) ###Output _____no_output_____
Vhanilla_RNN/.ipynb_checkpoints/RNN-checkpoint.ipynb
###Markdown VANILLA RNN ON 8*8 MNIST DATASET TO PREDICT TEN CLASS Its a dynamic sequence and batch vhanilla rnn. This is created with tensorflow scan and map higher ops!!!! This is a base rnn which can be used to create GRU, LSTM, Neural Stack Machine, Neural Turing Machine and RNN-EM and so on! Importing Libraries ###Code import numpy as np import tensorflow as tf from sklearn.datasets import load_digits from sklearn.cross_validation import train_test_split import pylab as pl from IPython import display import sys %matplotlib inline ###Output /usr/local/lib/python3.5/dist-packages/sklearn/cross_validation.py:44: DeprecationWarning: This module was deprecated in version 0.18 in favor of the model_selection module into which all the refactored classes and functions are moved. Also note that the interface of the new CV iterators are different from that of this module. This module will be removed in 0.20. "This module will be removed in 0.20.", DeprecationWarning) ###Markdown Vhanilla RNN class and functions ###Code class RNN_cell(object): """ RNN cell object which takes 3 arguments for initialization. input_size = Input Vector size hidden_layer_size = Hidden layer size target_size = Output vector size """ def __init__(self, input_size, hidden_layer_size, target_size): # Initialization of given values self.input_size = input_size self.hidden_layer_size = hidden_layer_size self.target_size = target_size # Weights and Bias for input and hidden tensor self.Wx = tf.Variable(tf.zeros( [self.input_size, self.hidden_layer_size])) self.Wh = tf.Variable(tf.zeros( [self.hidden_layer_size, self.hidden_layer_size])) self.bi = tf.Variable(tf.zeros([self.hidden_layer_size])) # Weights for output layers self.Wo = tf.Variable(tf.truncated_normal( [self.hidden_layer_size, self.target_size],mean=0,stddev=.01)) self.bo = tf.Variable(tf.truncated_normal([self.target_size],mean=0,stddev=.01)) # Placeholder for input vector with shape[batch, seq, embeddings] self._inputs = tf.placeholder(tf.float32, shape=[None, None, self.input_size], name='inputs') # Processing inputs to work with scan function self.processed_input = process_batch_input_for_RNN(self._inputs) ''' Initial hidden state's shape is [1,self.hidden_layer_size] In First time stamp, we are doing dot product with weights to get the shape of [batch_size, self.hidden_layer_size]. For this dot product tensorflow use broadcasting. But during Back propagation a low level error occurs. So to solve the problem it was needed to initialize initial hiddden state of size [batch_size, self.hidden_layer_size]. So here is a little hack !!!! Getting the same shaped initial hidden state of zeros. ''' self.initial_hidden = self._inputs[:, 0, :] self.initial_hidden = tf.matmul( self.initial_hidden, tf.zeros([input_size, hidden_layer_size])) # Function for vhanilla RNN. def vanilla_rnn(self, previous_hidden_state, x): """ This function takes previous hidden state and input and outputs current hidden state. """ current_hidden_state = tf.tanh( tf.matmul(previous_hidden_state, self.Wh) + tf.matmul(x, self.Wx) + self.bi) return current_hidden_state # Function for getting all hidden state. def get_states(self): """ Iterates through time/ sequence to get all hidden state """ # Getting all hidden state throuh time all_hidden_states = tf.scan(self.vanilla_rnn, self.processed_input, initializer=self.initial_hidden, name='states') return all_hidden_states # Function to get output from a hidden layer def get_output(self, hidden_state): """ This function takes hidden state and returns output """ output = tf.nn.relu(tf.matmul(hidden_state, self.Wo) + self.bo) return output # Function for getting all output layers def get_outputs(self): """ Iterating through hidden states to get outputs for all timestamp """ all_hidden_states = self.get_states() all_outputs = tf.map_fn(self.get_output, all_hidden_states) return all_outputs # Function to convert batch input data to use scan ops of tensorflow. def process_batch_input_for_RNN(batch_input): """ Process tensor of size [5,3,2] to [3,5,2] """ batch_input_ = tf.transpose(batch_input, perm=[2, 0, 1]) X = tf.transpose(batch_input_) return X ###Output _____no_output_____ ###Markdown Placeholder and initializers ###Code hidden_layer_size = 110 input_size = 8 target_size = 10 y = tf.placeholder(tf.float32, shape=[None, target_size],name='inputs') ###Output _____no_output_____ ###Markdown Models ###Code #Initializing rnn object rnn=RNN_cell( input_size, hidden_layer_size, target_size) #Getting all outputs from rnn outputs = rnn.get_outputs() #Getting final output through indexing after reversing last_output = outputs[-1] #As rnn model output the final layer through Relu activation softmax is used for final output. output=tf.nn.softmax(last_output) #Computing the Cross Entropy loss cross_entropy = -tf.reduce_sum(y * tf.log(output)) # Trainning with Adadelta Optimizer train_step = tf.train.AdamOptimizer().minimize(cross_entropy) #Calculatio of correct prediction and accuracy correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(output,1)) accuracy = (tf.reduce_mean(tf.cast(correct_prediction, tf.float32)))*100 ###Output _____no_output_____ ###Markdown Dataset Preparation ###Code sess=tf.InteractiveSession() sess.run(tf.global_variables_initializer()) #Using Sklearn MNIST dataset. digits = load_digits() X=digits.images Y_=digits.target # One hot encoding Y = sess.run(tf.one_hot(indices=Y_, depth=target_size)) #Getting Train and test Dataset X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.22, random_state=42) #Cuttting for simple iteration X_train=X_train[:1400] y_train=y_train[:1400] #Iterations to do trainning for epoch in range(120): start=0 end=100 for i in range(14): X=X_train[start:end] Y=y_train[start:end] start=end end=start+100 sess.run(train_step,feed_dict={rnn._inputs:X, y:Y}) Loss=str(sess.run(cross_entropy,feed_dict={rnn._inputs:X, y:Y})) Train_accuracy=str(sess.run(accuracy,feed_dict={rnn._inputs:X_train, y:y_train})) Test_accuracy=str(sess.run(accuracy,feed_dict={rnn._inputs:X_test, y:y_test})) pl.plot([epoch],Loss,'b.',) pl.plot([epoch],Train_accuracy,'r*',) pl.plot([epoch],Test_accuracy,'g+') display.clear_output(wait=True) display.display(pl.gcf()) sys.stdout.flush() print("\rIteration: %s Loss: %s Train Accuracy: %s Test Accuracy: %s"%(epoch,Loss,Train_accuracy,Test_accuracy)), sys.stdout.flush() ###Output _____no_output_____
Experiments/Data_Pipeline_with_TensorFlow.ipynb
###Markdown ###Code !nvidia-smi ###Output NVIDIA-SMI has failed because it couldn't communicate with the NVIDIA driver. Make sure that the latest NVIDIA driver is installed and running. ###Markdown Learning how to build data pipelines with `tf.data`The `tf.data` help us to build complex input pipelines from single, resuable pieces. For example the pipeline, - for an image model might aggregate data from files in a distributed file system and apply random perturbations to each image, and merge randomly selected images into a batch for training. - can be even used for text model might involve extracting symbolds from raw text data, converting them to embedding idenitifiers with a lookup table and batching together sequences of different lengths. The `tf.data` API makes it possible to handle large amounts of data, read from different data formats, and perform complex transformations.The `tf.data` API introduces a `tf.data.Dataset` abstraction that represents a sequence of elements, in which each element consists of one or more components. For example, in an image pipeline, an element might be a single training example, with a **pair of tensor components representing the image and it's label.****The two distinct ways to create a dataset**: - A data **source** constructs a `Dataset` from data stored in memory or in one or more files. - A data **transformation** constructs a dataset from one or more `tf.data.Dataset`. Basic Mechanics - To create an input pipeline, we must start with a data source. - (Other files) For example, to construct a `Dataset` from data in memory (folders etc..) we can use `tf.data.ataset.from_tensors()` or `tf.data.Dataset.from_tensor_slices()`. - (TFRecord file) If the input data is stored in a TFRecord format, we can then use `tf.data.TFRecordDataset()`> The `Dataset` object is a Python iterable (we can loop through). ###Code # Importing the things we need import tensorflow as tf import pathlib import os import matplotlib.pyplot as plt import pandas as pd import numpy as np # Creating a dummy data and using tf.data.Dataset.from_tensor_slices() dum_list = [8 , 3, 0 , 8 , 2 , 1] dataset = tf.data.Dataset.from_tensor_slices(dum_list) dataset # Iterating a looking what's inside the dataset we created for elem in dataset: print(elem.numpy()) # Trying out a synthetic data (train_data , train_labels) , (test_data , test_labels) = tf.keras.datasets.mnist.load_data() # Printing out the shapes of our mnist dataset train_data.shape , train_labels.shape , test_data.shape , test_labels.shape ###Output Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz 11493376/11490434 [==============================] - 0s 0us/step ###Markdown Loading our data usig `tf.data` and create a TensorSliceDataset object for our train data ###Code # Turning our train data into TensorSliceDataset object train_dataset_slices = tf.data.Dataset.from_tensor_slices((train_data , train_labels)) train_dataset_slices ###Output _____no_output_____ ###Markdown Cool! Now we have packed our train images and labels into a one whole Dataset. To view the labels https://stackoverflow.com/questions/64132847/how-to-iterate-over-tensorslicedataset-object-in-tensorflow ###Code train_dataset_slices.element_spec ###Output _____no_output_____ ###Markdown Let's try the same for but this time with `tf.data.Dataset.from_tensors()` ###Code # Using tf.data.Dataset_from_tensors() train_data_tensors = tf.data.Dataset.from_tensors((train_data , train_labels)) train_data_tensors # Looking into our dataset train_data_tensors.element_spec train_data_tensors.list_files ###Output _____no_output_____ ###Markdown Using the `tf.data.Dataset.from_generator()` now, this well help us to create a Dataset object from a datagenerator object. Useful links- [Converting ImageDatasetGenerator into dataset object](https://stackoverflow.com/questions/54606302/tf-data-dataset-from-tf-keras-preprocessing-image-imagedatagenerator-flow-from-d)- [How to use during fit function]( https://stackoverflow.com/questions/52636127/how-to-use-keras-generator-with-tf-data-api) ###Code # Loading in the cats and dogs dataset # data's url _URL = 'https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip' # Extracting from the path path_to_zip = tf.keras.utils.get_file('cats_and_dogs.zip' , origin = _URL , extract = True) PATH = os.path.join(os.path.dirname(path_to_zip) , 'cats_and_dogs_filtered') # What's inside PATH? os.listdir(PATH) # Now setting up our train and validation directory (for images) train_dir = os.path.join(PATH , 'train') valid_dir = os.path.join(PATH , 'validation') # What's inside our train_dir os.listdir(train_dir) # Looking intos cats folder os.listdir(f'{train_dir}/cats')[:10] # Using ImageDataGenerator train_datagen = tf.keras.preprocessing.image.ImageDataGenerator(rescale = 1/255.) # Getting the images from our directory train_gen = train_datagen.flow_from_directory(train_dir) # For Validation valid_datagen = tf.keras.preprocessing.image.ImageDataGenerator(rescale = 1/255.) valid_gen = valid_datagen.flow_from_directory(valid_dir) images, labels = next(train_gen) # Checking their shapes images.shape , labels.shape , images.dtype , labels.dtype train_gen.labels # Gotta inspect our train_gen and collect some info that may help us in converting to Dataset object print(f'Target size of images: {train_gen.target_size}') print(f'Number of classes: {train_gen.num_classes}') print(f'Getting the class indices: {train_gen.class_indices}') ###Output Target size of images: (256, 256) Number of classes: 2 Getting the class indices: {'cats': 0, 'dogs': 1} ###Markdown Alright! Now is the big game of converting our generator to Dataset. ###Code train_dataset_gen = tf.data.Dataset.from_generator( lambda: train_datagen.flow_from_directory(train_dir) , output_types = (tf.float32 , tf.float32), output_shapes = ([None, 256, 256 ,3] , [None , 2]) ) valid_dataset_gen = tf.data.Dataset.from_generator( lambda: valid_datagen.flow_from_directory(valid_dir), output_types = (tf.float32 , tf.float32), output_shapes = ([None , 256 , 256 , 3] , [None , 2]) ) train_dataset_gen , valid_dataset_gen it = iter(train_dataset_gen) for elem in train_dataset_gen: print unbatch_data = train_dataset_gen.apply(tf.data.experimental.unbatch()) images , labels = next(iter(train_dataset_gen)) class GeneratorLen(object): def __init__(self, gen, length): self.gen = gen self.length = length def __len__(self): return self.length def __iter__(self): return self.gen g = train_dataset_gen h = GeneratorLen(g, 1) print(len(h)) list(h) dum_train = train_dataset_gen.shuffle(buffer_size= 1000).prefetch(buffer_size = tf.data.AUTOTUNE) dum_train len(dum_train) unbatch_data = unbatch_data. unbatch_data.padded_batch(32) train_dataset_gen.take(1) len(train_dataset_gen) model.compile(loss = tf.keras.losses.SparseCategoricalCrossentropy() , optimizer = tf.keras.optimizers.Adam(), metrics = ['accuracy']) model.fit(train_dataset_gen , epochs = 5) train_dataset_gen.element_spec ###Output _____no_output_____ ###Markdown Extracting images and labels from our dataset object. Useful link: https://stackoverflow.com/questions/56226621/how-to-extract-data-labels-back-from-tensorflow-dataset ###Code # Extracting images and labels from our dataset object for images , labels in train_dataset_gen.take(1): sample_images = images sample_labels = labels len(sample_images) , len(sample_labels) # Checking the image sample_images[:1] # Checking our labels sample_labels[:10] # Applying the same on the whole dataset #for images , labels in train_dataset_gen.take(-1): # train_images = images # train_labels = labels # train_images , train_labels = tuple(zip(*train_dataset_gen)) # The for loop is taking *infinitely* long time def preprocess_func(image , label): image = tf.image.resize(image , [224 , 224]) return tf.cast(image , tf.float32) , label # Map preprocess function to train and valid train_dataset_gen = train_dataset_gen.map(map_func=preprocess_func , num_parallel_calls=tf.data.AUTOTUNE) #train_dataset_gen = train_dataset_gen.shuffle(buffer_size = 1000).batch(batch_size = 32).prefetch(buffer_size = tf.data.AUTOTUNE) valid_dataset_gen = valid_dataset_gen.map(map_func=preprocess_func , num_parallel_calls=tf.data.AUTOTUNE) #valid_dataset_gen = valid_dataset_gen.batch(batch_size = 32).prefetch(buffer_size = tf.data.AUTOTUNE) train_dataset_gen , valid_dataset_gen model.fit(train_dataset_gen , epochs = 5) train_dataset_gen.class_names from tensorflow.keras import layers from tensorflow.keras.layers.experimental import preprocessing # Create base model input_shape = (256, 256, 3) base_model = tf.keras.applications.EfficientNetB0(include_top=False) base_model.trainable = False # freeze base model layers # Create Functional model inputs = layers.Input(shape=input_shape, name="input_layer") # Note: EfficientNetBX models have rescaling built-in but if your model didn't you could have a layer like below # x = preprocessing.Rescaling(1./255)(x) x = base_model(inputs, training=False) # set base_model to inference mode only x = layers.GlobalAveragePooling2D(name="pooling_layer")(x) #x = layers.Dense(2)(x) # want one output neuron per class # Separate activation of output layer so we can output float32 activations outputs = layers.Dense(2, activation="softmax")(x) model = tf.keras.Model(inputs, outputs) # Compile the model model.compile(loss="sparse_categorical_crossentropy", # Use sparse_categorical_crossentropy when labels are *not* one-hot optimizer=tf.keras.optimizers.Adam(), metrics=["accuracy"]) model.summary() # Compile the model model.compile(loss = tf.keras.losses.CategoricalCrossentropy() , optimizer = tf.keras.optimizers.Adam() , metrics = ['accuracy']) 2000 / 32 model.fit(train_dataset_gen , epochs = 3, steps_per_epoch = 62.5 , validation_data = valid_dataset_gen , validation_steps = 10) c ###Output _____no_output_____
rosalind_workbook/dynamic_programming.ipynb
###Markdown Dynamic ProgrammingThe algorithmic notion of building up a solution to a problem by solving it on progressively larger cases.Rosalind link: [Dynamic Programming](http://rosalind.info/problems/topics/dynamic-programming/) Import modules ###Code import os import sys from itertools import permutations import numpy as np import pandas as pd from Bio.Seq import Seq from Bio import SeqIO from Bio.Alphabet import generic_rna print('DONE!') ###Output _____no_output_____ ###Markdown Rabbits and Recurrence RelationsRosalind link: [Rabbits and Recurrence Relations](http://rosalind.info/problems/fib/) ###Code # TODO ###Output _____no_output_____ ###Markdown Mortal Fibonacci RabbitsRosalind link: [Mortal Fibonacci Rabbits](http://rosalind.info/problems/fibd/) ###Code # TODO ###Output _____no_output_____ ###Markdown Longest Increasing SubsequenceRosalind link: [Longest Increasing Subsequence](http://rosalind.info/problems/lgis/) ###Code # TODO ###Output _____no_output_____ ###Markdown Perfect Matchings and RNA Secondary StructuresRosalind link: [Perfect Matchings and RNA Secondary Structures](http://rosalind.info/problems/pmch/) ###Code # TODO ###Output _____no_output_____ ###Markdown Catalan Numbers and RNA Secondary StructuresRosalind link: [Catalan Numbers and RNA Secondary Structures](http://rosalind.info/problems/cat/) ###Code # TODO ###Output _____no_output_____ ###Markdown Finding a Shared Spliced MotifRosalind link: [Finding a Shared Spliced Motif](http://rosalind.info/problems/lcsq/) ###Code # TODO ###Output _____no_output_____ ###Markdown Maximum Matchings and RNA Secondary StructuresRosalind link: [Maximum Matchings and RNA Secondary Structures](http://rosalind.info/problems/mmch/) ###Code # TODO ###Output _____no_output_____ ###Markdown Edit DistanceRosalind link: [Edit Distance](http://rosalind.info/problems/edit/) ###Code # TODO ###Output _____no_output_____ ###Markdown Motzkin Numbers and RNA Secondary StructuresRosalind link: [Motzkin Numbers and RNA Secondary Structures](http://rosalind.info/problems/motz/) ###Code # TODO ###Output _____no_output_____ ###Markdown Interleaving Two MotifsRosalind link: [Interleaving Two Motifs](http://rosalind.info/problems/scsp/) ###Code # TODO ###Output _____no_output_____ ###Markdown Edit Distance AlignmentRosalind link: [Edit Distance Alignment](http://rosalind.info/problems/edta/) ###Code # TODO ###Output _____no_output_____ ###Markdown Finding Disjoint Motifs in a GeneRosalind link: [Finding Disjoint Motifs in a Gene](http://rosalind.info/problems/itwv/) ###Code # TODO ###Output _____no_output_____ ###Markdown Wobble Bonding and RNA Secondary StructuresRosalind link: [Wobble Bonding and RNA Secondary Structures](http://rosalind.info/problems/rnas/) ###Code # TODO ###Output _____no_output_____ ###Markdown Global Alignment with Scoring MatrixRosalind link: [Global Alignment with Scoring Matrix](http://rosalind.info/problems/glob/) ###Code # TODO ###Output _____no_output_____ ###Markdown Global Alignment with Constant Gap PenaltyRosalind link: [Global Alignment with Constant Gap Penalty](http://rosalind.info/problems/gcon/) ###Code # TODO ###Output _____no_output_____ ###Markdown Local Alignment with Scoring MatrixRosalind link: [Local Alignment with Scoring Matrix](http://rosalind.info/problems/loca/) ###Code # TODO ###Output _____no_output_____ ###Markdown Maximizing the Gap Symbols of an Optimal AlignmentRosalind link: [Maximizing the Gap Symbols of an Optimal Alignment](http://rosalind.info/problems/mgap/) ###Code # TODO ###Output _____no_output_____ ###Markdown Multiple AlignmentRosalind link: [Multiple Alignment](http://rosalind.info/problems/mult/) ###Code # TODO ###Output _____no_output_____ ###Markdown Global Alignment with Scoring Matrix and Affine Gap PenaltyRosalind link: [Global Alignment with Scoring Matrix and Affine Gap Penalty](http://rosalind.info/problems/gaff/) ###Code # TODO ###Output _____no_output_____ ###Markdown Overlap AlignmentRosalind link: [Overlap Alignment](http://rosalind.info/problems/oap/) ###Code # TODO ###Output _____no_output_____ ###Markdown Semiglobal AlignmentRosalind link: [Semiglobal Alignment](http://rosalind.info/problems/smgb/) ###Code # TODO ###Output _____no_output_____ ###Markdown Local Alignment with Affine Gap PenaltyRosalind link: [Local Alignment with Affine Gap Penalty](http://rosalind.info/problems/laff/) ###Code # TODO ###Output _____no_output_____ ###Markdown Isolating Symbols in AlignmentsRosalind link: [Isolating Symbols in Alignments](http://rosalind.info/problems/osym/) ###Code # TODO ###Output _____no_output_____
notebooks/Milestone3_Task1,2,3_73117335.ipynb
###Markdown Task 1 ###Code import pandas as pd import seaborn as sns data = pd.read_csv('Bike-Sharing-Dataset/hour.csv') ###Output _____no_output_____ ###Markdown Show the dataframe headers ###Code data.head() ###Output _____no_output_____ ###Markdown Let's plot the distribution of column 'cnt' ###Code sns.histplot(data=data, x="cnt") ###Output _____no_output_____ ###Markdown Let's plot the cnt vs time ###Code sns.relplot(x="dteday", y="cnt", kind="line", ci=None, data=data) ###Output _____no_output_____ ###Markdown Let's plot a crossplot of cnt and registered ###Code sns.relplot(x="cnt", y="registered", data=data); ###Output _____no_output_____ ###Markdown Task 2 1. Loading ###Code data = pd.read_csv('Bike-Sharing-Dataset/hour.csv') ###Output _____no_output_____ ###Markdown Print out the column data types and check for missing values ###Code data.info() ###Output <class 'pandas.core.frame.DataFrame'> RangeIndex: 17379 entries, 0 to 17378 Data columns (total 17 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 instant 17379 non-null int64 1 dteday 17379 non-null object 2 season 17379 non-null int64 3 yr 17379 non-null int64 4 mnth 17379 non-null int64 5 hr 17379 non-null int64 6 holiday 17379 non-null int64 7 weekday 17379 non-null int64 8 workingday 17379 non-null int64 9 weathersit 17379 non-null int64 10 temp 17379 non-null float64 11 atemp 17379 non-null float64 12 hum 17379 non-null float64 13 windspeed 17379 non-null float64 14 casual 17379 non-null int64 15 registered 17379 non-null int64 16 cnt 17379 non-null int64 dtypes: float64(4), int64(12), object(1) memory usage: 2.3+ MB ###Markdown Show the dataframe headers ###Code data.head() ###Output _____no_output_____ ###Markdown 2. Clean Data Drop column instant since we don't need it ###Code data.drop('instant', axis=1, inplace=True) data.head() ###Output _____no_output_____ ###Markdown 3. Process Data Calculate registered_ratio and casual_ratio ###Code data['registered_ratio'] = data['registered']/ data['cnt'] data['casual_ratio'] = data['casual']/ data['cnt'] ###Output _____no_output_____ ###Markdown 4. Wrangle Data Replace values in column yr ###Code data.loc[data['yr']==0,'yr'] = 2011 data.loc[data['yr']==1,'yr'] = 2012 data.head() ###Output _____no_output_____ ###Markdown Task 3 Step 1 Testing chaining in pandas ###Code # Method chaining begins import numpy as np df = pd.read_csv('Bike-Sharing-Dataset/hour.csv').drop('instant', axis=1).assign(registered_ratio=data['registered']/ data['cnt']).assign(casual_ratio=data['casual']/ data['cnt']).assign(yr=lambda x: np.where(x.yr==0, 2011, 2012)) df ###Output _____no_output_____ ###Markdown Moving chaining parts to a function ###Code def load_and_process(url_or_path_to_csv_file): df = pd.read_csv(url_or_path_to_csv_file).drop('instant', axis=1).assign(registered_ratio=lambda x : x['registered']/ x['cnt']).assign(casual_ratio=data['casual']/ data['cnt']).assign(yr=lambda x: np.where(x.yr==0, 2011, 2012)) return df load_and_process('Bike-Sharing-Dataset/hour.csv') import project_functions df = project_functions.load_and_process('Bike-Sharing-Dataset/hour.csv') df ###Output _____no_output_____
notebooks/tutorials/01 - Code Fundamentals.ipynb
###Markdown Code Fundamentals Code is a language for creating and building things. This course teaches the fundamentals of coding, music and signal processing. In lesson 1, we will learn the fundamentals of coding with python. In order to use MusiCode effectively in this class, you are required to know basic programming which is outlined in this lesson. For a more extended tutorial on python, you can also check out [Learn Python](https://www.learnpython.org/). MusiCode MusiCode is a python library for creating music. It is a digital audio workstation (DAW) controlled with code. You can create musical notes, intervals, chords, progressions, melodies, bass lines, drum beats and full songs! Jupyter NotebooksThe file we are working in right now is a Jupyter notebook or IPython notebook (.ipynb) in the Microsoft Azure system. This is the primary file type you will be using. Code is split into individual cells. You can add a new cell by clicking the plus sign on the top bar. To run a cell of code, make sure it is selected, then use the hot key shift-enter or click the 'run' button on the top bar of Jupyter. For more information on Jupyter notebooks check out: [Jupyter Notebook Beginner Guide](https://jupyter-notebook-beginner-guide.readthedocs.io/en/latest/what_is_jupyter.html) A few helpful tips:1. Start each session by going to the top menu, select kernel -> resart & run all. This will restart the kernal and run every cell of code. 2. Save your work consistently by cliicking file - save and checkpoint on the top bar. Or use the save icon on the top menu, directly to the left of the plus sign. 3. When you are finished working, save then select file - close and halt. TroubleshootIf you are experience issues with your notebook, try restart and run all, or log out and log back into Azure. ###Code # this is a cell # use a hashtag symbol to create a comment, which does not run as code # comments help other coders understand what your code is doing # using comments to describe what your code is doing is VERY important! x = 1 ###Output _____no_output_____ ###Markdown Key TermsBefore we start coding, it is important for you to know the following definitions:1. variable - named container for data2. data type - defines what operations can be performed on a particular object3. function - a body of code that returns a single value4. object/class - bundle of data and code5. method - a function defined within an object. 6. attribute - data stored within an object Object Oriented ProgrammingWhat is an object? View lesson here: [powerpoint about objects](https://teams.microsoft.com/l/file/44E58E79-5BB9-4E38-9A55-4C9E27E58891?tenantId=8deb1d4d-d0a4-4d04-89ae-f7076cbaa9fb&fileType=pptx&objectUrl=https%3A%2F%2Fduvalschoolsorg.sharepoint.com%2Fteams%2F3107_DAElectronicMusic%2FClass%20Materials%2FSlides%2FObject%20Oriented%20Programming.pptx&baseUrl=https%3A%2F%2Fduvalschoolsorg.sharepoint.com%2Fteams%2F3107_DAElectronicMusic&serviceName=teams&threadId=19:[email protected]&groupId=ed3f13a3-741b-42ef-badc-a300efd4a9cb) Let's talk about object oriented programming (OOP). An understanding of object oriented programming is fundamental to coding. Objects are the building blocks of software. Objects contain data and code (attributes & methods). Objects are sometimes referred to as classes. Once an object is defined, you can create many different instances of that object. In MusiCode we will create many different instances of the wave object. - Method: a function defined within an object. - Attribute: data stored within an objectBelow let's look at the two primary objects we will be using: Wave objectIn MusiCode, we are primarily working with wave objects. A wave object is a subclass of the numpy array. The wave object is a 1D (mono) or 2D (stereo) array, designed for signal processing and music making. Every time you create a waveform in MusiCode, it generates a wave object. Then we can use wave methods to modify the waveform. We will see many examples of this concept in the upcoming lessons. MusiCode objectWe use the musicode class to generate waveforms. There are several methods for generating waveforms: create_wave, rest, sequence, chord and arpeggio. all these methods return wave objects. Then we can call wave methods to transform the signal and create many different interesting sounds. Data TypesBelow we will look at some of the main data types that are built into python. ###Code # string type("hello world!") # integer / whole number type(10) # integer / decimal type(1.753) # list my_list = [1, 2, 3, 4, 5] type(my_list) my_list[0] ###Output _____no_output_____ ###Markdown VariablesA variable is a named container for data. Using variables, we can store many different values and refer to them by name in our code. Here are a few examples. ###Code x = 1 print(x) name = 'Wesley' print(name) # list of strings names = ['Wesley', 'Josh', 'Jarred'] print(names) ###Output ['Wesley', 'Josh', 'Jarred'] ###Markdown Functions Example 1 - Greeting ###Code # define function named greet_user # input: name # output: greeting def greet_user(name): greeting = 'Hello ' + name +'!' return greeting ###Output _____no_output_____ ###Markdown There are a few ways we can use a function once it has been defined. Lets see an example of each way: 1 ###Code # define variable name = 'Wesley' # call function and insert name variable as input greet_user(name) ###Output _____no_output_____ ###Markdown 2 ###Code # call function and insert name greet_user(name='Wesley') ###Output _____no_output_____ ###Markdown 3 ###Code # call function and insert name, no label greet_user('Wesley') ###Output _____no_output_____ ###Markdown Example 2 - Straight Line ###Code # define function named f. this is the equation for a straight line: y=mx+b # input: x, m, b # output: y value def f(x, m, b): # equation y = m*x+b # output return y # define variables x = 1 m = 1/8 b = 4 # call function y = f(x, m, b) ###Output _____no_output_____ ###Markdown LoopsLoops allow you to apply an operation to every item in a list. Here is an example: for loops ###Code # create list of string values names = ['Wesley', 'Josh', 'Jarred'] # print each name in list for name in names: print(name) # create list of intgers numbers = [1,3,5,7,9] # add 100 to each value in list for number in numbers: new_value = number+100 print(new_value) ###Output 101 103 105 107 109 ###Markdown while loops ###Code # iteration i = 0 # while this condition is true, loop while i < 5: # increment i = i + 1 # display result print(i) ###Output 1 2 3 4 5 ###Markdown Practice Problems 1)Create a list of string values naming your top 3 favorite genres of music. Then use a for loop to print all the values. ###Code # problem 1 code goes here ###Output _____no_output_____ ###Markdown 2)Write code to calcuate: square root of 808. Search Google and figure out how to take the square root of a number in python. Stack overflow and python.org are good resources. ###Code # problem 2 answer goes here ###Output _____no_output_____ ###Markdown 3)Define a function named add_nums that takes in two inputs, x and y, then returns the sum. Then use the function you build to sum the following numbers: 1731, 8332 ###Code # problem 3 code goes here ###Output _____no_output_____
ML-week/python/Pandas.ipynb
###Markdown Putting Some Pandas In Your Python ###Code import platform print(platform.python_version()) %load_ext watermark %watermark -a 'Gopala KR' -u -d -v -p watermark,numpy,pandas ###Output 3.5.4 Gopala KR last updated: 2018-08-30 CPython 3.5.4 IPython 6.2.1 watermark 1.6.1 numpy 1.15.0 pandas 0.19.2 ###Markdown Command: pip3 install pandas ###Code import pandas as pd import numpy as np df = pd.DataFrame({ 'col-1': ['Item-1', 'Item-2', 'Item-3', 'Item-4'], 'col-2': ['Gold', 'Bronze', 'Gold', 'Silver'], 'col-3': [1, 2, np.nan, 4] }) print(df) ###Output col-1 col-2 col-3 0 Item-1 Gold 1.0 1 Item-2 Bronze 2.0 2 Item-3 Gold NaN 3 Item-4 Silver 4.0 ###Markdown Creating a DataFrame from Dictionary ###Code data = {'Name':['Tom', 'Jack', 'Steve', 'Ricky'], 'Age':[28,34,29,42]} df = pd.DataFrame(data) print(df) # Creating indexed dataframe data = {'Name':['Tom', 'Jack', 'Steve', 'Ricky'], 'Age':[28,34,29,42]} df = pd.DataFrame(data, index=['I-1', 'I-2', 'I-3', 'I-4']) print(df) ###Output Age Name I-1 28 Tom I-2 34 Jack I-3 29 Steve I-4 42 Ricky ###Markdown DataFrame Basic Functionality ###Code import pandas as pd import numpy as np # Create Dictionary of Series dict = {'Name':pd.Series(['Tom', 'Jack', 'Steve', 'Ricky', 'Vin', 'James', 'Smith']), 'Age':pd.Series([25,26,25,35,23,33,31]), 'Rating':pd.Series([4.23,4.1,3.4,5,2.9,4.7,3.1])} df = pd.DataFrame(dict) print(df) # Transpose-> returns transpose of DataFrame print(df.T) # Axes-> returns list of row axis labels and column axis labels print(df.axes) # dtypes-> return datatype of each column print(df.dtypes) # shape-> returns tuple representing dimensionallity print(df.shape) # values-> returns actual data as ndarray print(df.values) # head-> by default head returns first n rows print(df.head()) print('*'*50) print(df.head(2)) # tail-> by default tail returns last n rows print(df.tail()) print('*'*50) print(df.tail(2)) ###Output Age Name Rating 2 25 Steve 3.4 3 35 Ricky 5.0 4 23 Vin 2.9 5 33 James 4.7 6 31 Smith 3.1 ************************************************** Age Name Rating 5 33 James 4.7 6 31 Smith 3.1 ###Markdown Statistics ###Code # sum()-> returns the sum of values for requested axis. by default axis = 0 print(df.sum()) # axis = 1 -> row wise sum print(df.sum(1)) # mean() print(df.mean()) # std() print(df.std()) # describe() -> summarizing the data print(df.describe()) # include object, number, all print(df.describe(include=['object'])) print(df.describe(include=['number'])) # Don't pass 'all' as a list print(df.describe(include='all')) ###Output Age Name Rating count 7.000000 7 7.000000 unique NaN 7 NaN top NaN Steve NaN freq NaN 1 NaN mean 28.285714 NaN 3.918571 std 4.644505 NaN 0.804828 min 23.000000 NaN 2.900000 25% 25.000000 NaN 3.250000 50% 26.000000 NaN 4.100000 75% 32.000000 NaN 4.465000 max 35.000000 NaN 5.000000 ###Markdown Working with .csv ###Code import pandas as pd df = pd.read_csv('Iris.csv') df.head() df.tail() print(df.shape) print(df.columns) print(df.mean()) print(df.std()) df.describe() ###Output _____no_output_____ ###Markdown Series Data Structure ###Code # pd.Series(data,index) # index-> Unique, Hashable, same length as data. By default np.arange(n) import pandas as pd s = pd.Series() print(s) ###Output Series([], dtype: float64) ###Markdown Creating Series from ndarray ###Code import numpy as np data = np.array(['a', 'b', 'c', 'd']) s = pd.Series(data) print(s) ###Output 0 a 1 b 2 c 3 d dtype: object ###Markdown Create Series from dict ###Code data = {'a':0., 'b':1., 'c':2.} s = pd.Series(data) print(s) ###Output a 0.0 b 1.0 c 2.0 dtype: float64 ###Markdown Data accessing using Index ###Code s = pd.Series([1, 2, 3, 4, 5], index=['a', 'b', 'c', 'd', 'e']) print(s) print(s['a']) # Retrieve multiple elements print(s[['a', 'b', 'e']]) print(s['f']) ###Output _____no_output_____
sonstiges/DSP_Python_Matlab/15.11 DFT Exercise.ipynb
###Markdown DFT in Python ###Code import numpy as np z = [1, 0, -1, 0] z = np.fft.fft(z) print(z) ###Output [0.+0.j 2.+0.j 0.+0.j 2.+0.j]
CamVid2Tiramisu.ipynb
###Markdown Imports ###Code %reload_ext autoreload %autoreload 2 %matplotlib inline from fastai import * from fastai.vision import * path = Path('C:/Users/jpatn/data/camvid_orig/') image_path = path/'images' label_path = path/'labels' valid_path = path/'valid.txt' ###Output _____no_output_____ ###Markdown Original Data ###Code im_fp= get_image_files(image_path) lbl_fp = get_image_files(label_path) def open_im(fp): return PIL.Image.open(fp) im1_fp = im_fp[0] im1 = open_im(im1_fp); im1 get_lbl_fp = lambda x: label_path/f'{x.stem}_P{x.suffix}' lbl1_fp = get_lbl_fp(im1_fp) lbl1_im = open_im(lbl1_fp);lbl1_im ###Output _____no_output_____ ###Markdown Where the labels come from: ###Code np.array(lbl1_im) ###Output _____no_output_____ ###Markdown Label opened as mask: ###Code lbl1_msk = open_mask(lbl1_fp) lbl1_msk.show(figsize=(8,8)) ###Output _____no_output_____ ###Markdown Converting Data ###Code orig_codes = np.loadtxt(path/'codes.txt',dtype=str);orig_codes new_codes = ['Building', 'Car', 'Cyclist', 'Fence', 'Pedestrian', 'Pole', 'Road', 'Sidewalk', 'Sign', 'Sky', 'Vegetation', 'Void'] maps = [(4,1,31,3),(5,14,25,22,27),(2,13),(9,),(16,7,0,6),(8,23),(17,10,11,28),(19,18,15),(20,24,12),(21,),(29,26),(30,)] rev_maps = {c:i for i,m in enumerate(maps) for c in m} def map_codes(c): for k,v in rev_maps.items(): if c==k: return v map_codes(31) def lbl_from_imp(im_path): return open_im(get_lbl_fp(im_path)) lbl_from_imp(im1_fp) def lbl_array(im): return np.array(im) lbl1_arr = lbl_array(lbl1_im) lbl1_arr def convert_arr(arr): v = np.vectorize(map_codes) return v(arr) convert_arr(lbl1_arr) def arr_to_lbl(arr): new_arr = np.asarray(arr,dtype=np.uint8) return PIL.Image.fromarray(new_arr,'L') ###Output _____no_output_____ ###Markdown New label with classes converted: ###Code arr_to_lbl(convert_arr(lbl1_arr)) ###Output _____no_output_____ ###Markdown Convert all labels: ###Code def get_new_labels(im_path): lbl = lbl_from_imp(im_path) arr = lbl_array(lbl) new_arr = convert_arr(arr) new_lbl = arr_to_lbl(new_arr) return new_lbl.save(get_lbl_fp(im_path)) for im in im_fp: get_new_labels(im) ###Output _____no_output_____ ###Markdown New Data ###Code new_lbls = path/'labels' ###Output _____no_output_____ ###Markdown New label: ###Code new_lbl1_fp = get_lbl_fp(im1_fp) new_lbl1 = open_im(new_lbl1_fp) new_lbl1 ###Output _____no_output_____ ###Markdown Labels: ###Code lbl_array(new_lbl1) ###Output _____no_output_____ ###Markdown Label opened as mask: ###Code new_msk = open_mask(new_lbl1_fp) new_msk ###Output _____no_output_____
models/deprecated/4-2 (1). InceptionV3 Triplet Network.ipynb
###Markdown Load Data ###Code train_df = pd.read_csv('./data/triplet/train.csv') val_df = pd.read_csv('./data/triplet/validation.csv') test_df = pd.read_csv('./data/triplet/test.csv') print('Train:\t\t', train_df.shape) print('Validation:\t', val_df.shape) print('Test:\t\t', test_df.shape) print('\nTrain Landmarks:\t', len(train_df['landmark_id'].unique())) print('Validation Landmarks:\t', len(val_df['landmark_id'].unique())) print('Test Landmarks:\t\t', len(test_df['landmark_id'].unique())) train_df.head() ###Output _____no_output_____ ###Markdown Helper Functions ###Code # training set triplet generator def train_triplet_generator(df, batch_size=74, img_size=(224, 224), seed=42, prefix='./data/triplet/train/'): """ training set triplet generator it will generate 7400 triplet images in total """ # get images with only one training image landmark id and the rest landmark ids np.random.seed(seed) grouped = df[['landmark_id', 'image_id']].groupby('landmark_id').count().reset_index() unique_neg_ids = list(grouped[grouped['image_id'] == 1]['landmark_id'].values) rest_ids = list(grouped[grouped['image_id'] > 1]['landmark_id'].values) size = 7400 * 2 - len(unique_neg_ids) zeros = np.zeros((batch_size, 3, 1), dtype=K.floatx()) while True: # get positive and negative image landmark ids np.random.shuffle(rest_ids) candidate_ids = list(np.random.choice(rest_ids, size=size, replace=False)) pos_landmark_ids = candidate_ids[:7400] neg_landmark_ids = candidate_ids[7400:] + unique_neg_ids np.random.shuffle(neg_landmark_ids) # transform landmark id into image id anc_img_ids = [] pos_img_ids = [] neg_img_ids = [] for i in range(len(pos_landmark_ids)): tmp_pos_ids = df[df['landmark_id'] == pos_landmark_ids[i]]['image_id'].values anc_img_ids.append(tmp_pos_ids[0]) pos_img_ids.append(tmp_pos_ids[1]) tmp_neg_ids = df[df['landmark_id'] == neg_landmark_ids[i]]['image_id'].values neg_img_ids.append(tmp_neg_ids[0]) # iterator to read batch images for j in range(len(pos_img_ids) // batch_size): batch_anc_img_ids = anc_img_ids[j * batch_size: (j + 1) * batch_size] batch_pos_img_ids = pos_img_ids[j * batch_size: (j + 1) * batch_size] batch_neg_img_ids = neg_img_ids[j * batch_size: (j + 1) * batch_size] # get images anc_imgs = [] pos_imgs = [] neg_imgs = [] # iteratively read images for k in range(batch_size): anc_path = prefix + str(batch_anc_img_ids[k]) + '.jpg' pos_path = prefix + str(batch_pos_img_ids[k]) + '.jpg' neg_path = prefix + str(batch_neg_img_ids[k]) + '.jpg' tmp_anc_img = load_img(anc_path, target_size=img_size) tmp_anc_img = img_to_array(tmp_anc_img) anc_imgs.append(tmp_anc_img) tmp_pos_img = load_img(pos_path, target_size=img_size) tmp_pos_img = img_to_array(tmp_pos_img) pos_imgs.append(tmp_pos_img) tmp_neg_img = load_img(neg_path, target_size=img_size) tmp_neg_img = img_to_array(tmp_neg_img) neg_imgs.append(tmp_neg_img) # transform list to array anc_imgs = np.array(anc_imgs, dtype=K.floatx()) / 255.0 pos_imgs = np.array(pos_imgs, dtype=K.floatx()) / 255.0 neg_imgs = np.array(neg_imgs, dtype=K.floatx()) / 255.0 yield [anc_imgs, pos_imgs, neg_imgs], zeros # validation set triplet generator def val_triplet_generator(df, batch_size=128, img_size=(224, 224), seed=42, prefix='./data/triplet/validation'): """ validation set triplet collector """ # get images with only one image landmark id and the rest landmark ids grouped = df[['landmark_id', 'image_id']].groupby('landmark_id').count().reset_index() unique_neg_ids = list(grouped[grouped['image_id'] == 1]['landmark_id'].values) rest_ids = list(grouped[grouped['image_id'] > 1]['landmark_id'].values) size = 3072 * 2 - len(unique_neg_ids) zeros = np.zeros((batch_size, 3, 1), dtype=K.floatx()) while True: # get positive and negative image landmark ids np.random.seed(seed) candidate_ids = list(np.random.choice(rest_ids, size=size, replace=False)) pos_landmark_ids = candidate_ids[:3072] neg_landmark_ids = candidate_ids[3072:] + unique_neg_ids np.random.shuffle(neg_landmark_ids) # transform landmark id into image id anc_img_ids = [] pos_img_ids = [] neg_img_ids = [] for i in range(len(pos_landmark_ids)): tmp_pos_ids = df[df['landmark_id'] == pos_landmark_ids[i]]['image_id'].values anc_img_ids.append(tmp_pos_ids[0]) pos_img_ids.append(tmp_pos_ids[1]) tmp_neg_ids = df[df['landmark_id'] == neg_landmark_ids[i]]['image_id'].values neg_img_ids.append(tmp_neg_ids[0]) # iterator to read batch images for j in range(len(pos_img_ids) // batch_size): batch_anc_img_ids = anc_img_ids[j * batch_size: (j + 1) * batch_size] batch_pos_img_ids = pos_img_ids[j * batch_size: (j + 1) * batch_size] batch_neg_img_ids = neg_img_ids[j * batch_size: (j + 1) * batch_size] # get images anc_imgs = [] pos_imgs = [] neg_imgs = [] # iteratively read images for k in range(batch_size): anc_path = prefix + str(batch_anc_img_ids[k]) + '.jpg' pos_path = prefix + str(batch_pos_img_ids[k]) + '.jpg' neg_path = prefix + str(batch_neg_img_ids[k]) + '.jpg' tmp_anc_img = load_img(anc_path, target_size=img_size) tmp_anc_img = img_to_array(tmp_anc_img) anc_imgs.append(tmp_anc_img) tmp_pos_img = load_img(pos_path, target_size=img_size) tmp_pos_img = img_to_array(tmp_pos_img) pos_imgs.append(tmp_pos_img) tmp_neg_img = load_img(neg_path, target_size=img_size) tmp_neg_img = img_to_array(tmp_neg_img) neg_imgs.append(tmp_neg_img) # transform list to array anc_imgs = np.array(anc_imgs, dtype=K.floatx()) / 255.0 pos_imgs = np.array(pos_imgs, dtype=K.floatx()) / 255.0 neg_imgs = np.array(neg_imgs, dtype=K.floatx()) / 255.0 yield [anc_imgs, pos_imgs, neg_imgs], zeros ###Output _____no_output_____ ###Markdown Define Triplet Loss Model ###Code # Define base network for triplet network def base_net(input_shape=(224, 224, 3), trainable=False): """ define triplet network """ # load pre-trained InceptionV3 model inception = InceptionV3(include_top=False, weights='imagenet', input_shape=input_shape) inception.trainable = trainable # define sequential model model = Sequential(name='base_net') model.add(inception) model.add(Flatten(name='flatten')) model.add(Dropout(rate=0.5, name='dropout')) model.add(Dense(512, activation=None, name='fc')) model.add(Lambda(lambda x: K.l2_normalize(x, axis=1), name='l2_norm')) return model # Define triplet network def triplet_net(base_model, input_shape=(224, 224, 3)): """ function to define triplet networks """ # define input: anchor, positive, negative anchor = Input(shape=input_shape, name='anchor_input') positive = Input(shape=input_shape, name='positive_input') negative = Input(shape=input_shape, name='negative_input') # extract vector represent using CNN based model anc_vec = base_model(anchor) pos_vec = base_model(positive) neg_vec = base_model(negative) # stack outputs stacks = Lambda(lambda x: K.stack(x, axis=1), name='output')([anc_vec, pos_vec, neg_vec]) # define inputs and outputs inputs=[anchor, positive, negative] outputs = stacks # define the triplet model model = Model(inputs=inputs, outputs=outputs, name='triplet_net') return model # Define triplet loss def triplet_loss(y_true, y_pred): """ function to compute triplet loss margin is predefined coded, manually change if needed """ # define triplet margin margin = K.constant(0.3) zero = K.constant(0.0) # get the prediction vector anchor, positive, negative = y_pred[:, 0], y_pred[:, 1], y_pred[:, 2] # compute distance pos_distance = K.sum(K.square(anchor - positive), axis=1) neg_distance = K.sum(K.square(anchor - negative), axis=1) # compute loss partial_loss = pos_distance - neg_distance + margin full_loss = K.sum(K.maximum(partial_loss, zero), axis=0) return full_loss ###Output _____no_output_____ ###Markdown Build Triplet Model ###Code # For reproduciable purpose seed = 42 K.clear_session() os.environ['PYTHONHASHSEED'] = '0' np.random.seed(seed) random.seed(seed) session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1) tf.set_random_seed(seed) sess = tf.Session(graph=tf.get_default_graph(), config=session_conf) K.set_session(sess) # Define Parameters img_size = (224, 224, 3) # target image size # triplet image generator train_generator = train_triplet_generator(train_df, batch_size=74, img_size=img_size[:2], seed=42, prefix='./data/triplet/train/') val_generator = val_triplet_generator(val_df, batch_size=64, img_size=img_size[:2], seed=42, prefix='./data/triplet/validation/') # Define triplet network model base_model = base_net(input_shape=img_size, trainable=False) base_model.summary() triplet_model = triplet_net(base_model=base_model, input_shape=img_size) triplet_model.summary() ###Output __________________________________________________________________________________________________ Layer (type) Output Shape Param # Connected to ================================================================================================== anchor_input (InputLayer) (None, 224, 224, 3) 0 __________________________________________________________________________________________________ positive_input (InputLayer) (None, 224, 224, 3) 0 __________________________________________________________________________________________________ negative_input (InputLayer) (None, 224, 224, 3) 0 __________________________________________________________________________________________________ base_net (Sequential) (None, 512) 48017696 anchor_input[0][0] positive_input[0][0] negative_input[0][0] __________________________________________________________________________________________________ output (Lambda) (None, 3, 512) 0 base_net[1][0] base_net[2][0] base_net[3][0] ================================================================================================== Total params: 48,017,696 Trainable params: 26,214,912 Non-trainable params: 21,802,784 __________________________________________________________________________________________________ ###Markdown Fit Triplet Model ###Code # define learning scheduler def lr_schedule(epoch): """ Learning rate schedule """ lr = 1e-3 if epoch > 80: lr *= 6e-1 elif epoch > 60: lr *= 7e-1 elif epoch > 40: lr *= 8e-1 elif epoch > 20: lr *= 9e-1 print('Learning rate: ', lr) return lr # define optimizer opt = keras.optimizers.Adam(lr=lr_schedule(0)) # Create call backs checkpoint = ModelCheckpoint(filepath='./models/inception-triplet(1)-ckpt.h5', monitor='val_loss', save_best_only=True) lr_scheduler = LearningRateScheduler(lr_schedule) lr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1), cooldown=0, patience=5, min_lr=0.5e-6) callbacks = [checkpoint, lr_reducer, lr_scheduler] # compile the model triplet_model.compile(optimizer=opt, loss=triplet_loss) # fit the mode history = triplet_model.fit_generator(train_generator, steps_per_epoch=100, epochs=100, validation_data=val_generator, validation_steps=48, verbose=2, callbacks=callbacks) triplet_model.save('./models/inception-triplet(1)-model.h5') base_model.save('./models/inception-base(1)-model.h5') pickle.dump(history.history, open('./models/inception-triplet(1)-history.p', 'wb')) _ = gc.collect() # Visualize the training process train_loss = history.history['loss'] val_loss = history.history['val_loss'] fig, ax = plt.subplots(figsize=(10, 7)) ax.plot(train_loss, label='Training Loss') ax.plot(val_loss, label='Validation Loss') ax.set_title('Loss vs. Epochs', fontsize=16) ax.set_xlabel('Epochs', fontsize=14) ax.set_ylabel('Loss', fontsize=14) ax.legend(fontsize=14) ax.grid(True) plt.show() ###Output _____no_output_____ ###Markdown Extract Features using Triplet Network ###Code train_df = pd.read_csv('./data/triplet/train.csv') val_df = pd.read_csv('./data/triplet/validation.csv') test_df = pd.read_csv('./data/triplet/test.csv') print('Train:\t\t', train_df.shape) print('Validation:\t', val_df.shape) print('Test:\t\t', test_df.shape) print('\nTrain Landmarks:\t', len(train_df['landmark_id'].unique())) print('Validation Landmarks:\t', len(val_df['landmark_id'].unique())) print('Test Landmarks:\t\t', len(test_df['landmark_id'].unique())) # Load trained model base_model = load_model('./models/inception-base(1)-model.h5') base_model.summary() # Define train_imgs and test_imgs train_imgs = np.zeros(shape=(len(train_df), 512), dtype=np.float32) val_imgs = np.zeros(shape=(len(val_df), 512), dtype=np.float32) test_imgs = np.zeros(shape=(len(test_df), 512), dtype=np.float32) # Process training images img_ids = train_df['image_id'].values steps = 20000 for i in range(0, len(train_df), steps): tmp_imgs = [] print('\nProcess: {:10d}'.format(i)) start = i end = min(len(train_df), i + steps) for idx in range(start, end): if idx % 250 == 0: print('=', end='') img_id = img_ids[idx] path = './data/triplet/train/' + str(img_id) + '.jpg' img = load_img(path, target_size=img_size[:2]) img = img_to_array(img) tmp_imgs.append(img) tmp_imgs = np.array(tmp_imgs, dtype=np.float32) / 255.0 tmp_prediction = base_model.predict(tmp_imgs) train_imgs[start: end, ] = tmp_prediction _ = gc.collect() # Process validation images img_ids = val_df['image_id'].values steps = 4000 for i in range(0, len(val_df), steps): tmp_imgs = [] print('\nProcess: {:10d}'.format(i)) start = i end = min(len(val_df), i + steps) for idx in range(start, end): if idx % 50 == 0: print('=', end='') img_id = img_ids[idx] path = './data/triplet/validation/' + str(img_id) + '.jpg' img = load_img(path, target_size=img_size[:2]) img = img_to_array(img) tmp_imgs.append(img) tmp_imgs = np.array(tmp_imgs, dtype=np.float32) / 255.0 tmp_prediction = base_model.predict(tmp_imgs) val_imgs[start: end, ] = tmp_prediction _ = gc.collect() # Process test images img_ids = test_df['image_id'].values steps = 4000 for i in range(0, len(test_df), steps): tmp_imgs = [] print('\nProcess: {:10d}'.format(i)) start = i end = min(len(test_df), i + steps) for idx in range(start, end): if idx % 50 == 0: print('=', end='') img_id = img_ids[idx] path = './data/triplet/test/' + str(img_id) + '.jpg' img = load_img(path, target_size=img_size[:2]) img = img_to_array(img) tmp_imgs.append(img) tmp_imgs = np.array(tmp_imgs, dtype=np.float32) / 255.0 tmp_prediction = base_model.predict(tmp_imgs) test_imgs[start: end, ] = tmp_prediction _ = gc.collect() print('Train:\t\t', train_imgs.shape) print('Validation:\t', val_imgs.shape) print('Test:\t\t', test_imgs.shape) # Save to disk np.save('./data/triplet/train_triplet_inception(1)_features.npy', train_imgs) np.save('./data/triplet/validation_triplet_inception(1)_features.npy', val_imgs) np.save('./data/triplet/test_triplet_inception(1)_features.npy', test_imgs) ###Output _____no_output_____
site/en/2/guide/eager.ipynb
###Markdown Copyright 2018 The TensorFlow Authors. ###Code #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ###Output _____no_output_____ ###Markdown Eager Execution View on TensorFlow.org Run in Google Colab View source on GitHub TensorFlow's eager execution is an imperative programming environment thatevaluates operations immediately, without building graphs: operations returnconcrete values instead of constructing a computational graph to run later. Thismakes it easy to get started with TensorFlow and debug models, and itreduces boilerplate as well. To follow along with this guide, run the codesamples below in an interactive `python` interpreter.Eager execution is a flexible machine learning platform for research andexperimentation, providing:* *An intuitive interface*—Structure your code naturally and use Python data structures. Quickly iterate on small models and small data.* *Easier debugging*—Call ops directly to inspect running models and test changes. Use standard Python debugging tools for immediate error reporting.* *Natural control flow*—Use Python control flow instead of graph control flow, simplifying the specification of dynamic models.Eager execution supports most TensorFlow operations and GPU acceleration.Note: Some models may experience increased overhead with eager executionenabled. Performance improvements are ongoing, but please[file a bug](https://github.com/tensorflow/tensorflow/issues) if you find aproblem and share your benchmarks. Setup and basic usageUpgrade to the latest version of TensorFlow: ###Code from __future__ import absolute_import, division, print_function !pip install tf-nightly-2.0-preview import tensorflow as tf ###Output _____no_output_____ ###Markdown In Tensorflow 2.0, eager execution is enabled by default. ###Code tf.executing_eagerly() ###Output _____no_output_____ ###Markdown Now you can run TensorFlow operations and the results will return immediately: ###Code x = [[2.]] m = tf.matmul(x, x) print("hello, {}".format(m)) ###Output _____no_output_____ ###Markdown Enabling eager execution changes how TensorFlow operations behave—now theyimmediately evaluate and return their values to Python. `tf.Tensor` objectsreference concrete values instead of symbolic handles to nodes in a computationalgraph. Since there isn't a computational graph to build and run later in asession, it's easy to inspect results using `print()` or a debugger. Evaluating,printing, and checking tensor values does not break the flow for computinggradients.Eager execution works nicely with [NumPy](http://www.numpy.org/). NumPyoperations accept `tf.Tensor` arguments. TensorFlow[math operations](https://www.tensorflow.org/api_guides/python/math_ops) convertPython objects and NumPy arrays to `tf.Tensor` objects. The`tf.Tensor.numpy` method returns the object's value as a NumPy `ndarray`. ###Code a = tf.constant([[1, 2], [3, 4]]) print(a) # Broadcasting support b = tf.add(a, 1) print(b) # Operator overloading is supported print(a * b) # Use NumPy values import numpy as np c = np.multiply(a, b) print(c) # Obtain numpy value from a tensor: print(a.numpy()) # => [[1 2] # [3 4]] ###Output _____no_output_____ ###Markdown Dynamic control flowA major benefit of eager execution is that all the functionality of the hostlanguage is available while your model is executing. So, for example,it is easy to write [fizzbuzz](https://en.wikipedia.org/wiki/Fizz_buzz): ###Code def fizzbuzz(max_num): counter = tf.constant(0) max_num = tf.convert_to_tensor(max_num) for num in range(1, max_num.numpy()+1): num = tf.constant(num) if int(num % 3) == 0 and int(num % 5) == 0: print('FizzBuzz') elif int(num % 3) == 0: print('Fizz') elif int(num % 5) == 0: print('Buzz') else: print(num.numpy()) counter += 1 fizzbuzz(15) ###Output _____no_output_____ ###Markdown This has conditionals that depend on tensor values and it prints these valuesat runtime. Build a modelMany machine learning models are represented by composing layers. Whenusing TensorFlow with eager execution you can either write your own layers oruse a layer provided in the `tf.keras.layers` package.While you can use any Python object to represent a layer,TensorFlow has `tf.keras.layers.Layer` as a convenient base class. Inherit fromit to implement your own layer, and set `self.dynamic=True` in the constructor if the layer must be executed imperatively: ###Code class MySimpleLayer(tf.keras.layers.Layer): def __init__(self, output_units): super(MySimpleLayer, self).__init__() self.output_units = output_units self.dynamic = True def build(self, input_shape): # The build method gets called the first time your layer is used. # Creating variables on build() allows you to make their shape depend # on the input shape and hence removes the need for the user to specify # full shapes. It is possible to create variables during __init__() if # you already know their full shapes. self.kernel = self.add_variable( "kernel", [input_shape[-1], self.output_units]) def call(self, input): # Override call() instead of __call__ so we can perform some bookkeeping. return tf.matmul(input, self.kernel) ###Output _____no_output_____ ###Markdown Use `tf.keras.layers.Dense` layer instead of `MySimpleLayer` above as it hasa superset of its functionality (it can also add a bias).When composing layers into models you can use `tf.keras.Sequential` to representmodels which are a linear stack of layers. It is easy to use for basic models: ###Code model = tf.keras.Sequential([ tf.keras.layers.Dense(10, input_shape=(784,)), # must declare input shape tf.keras.layers.Dense(10) ]) ###Output _____no_output_____ ###Markdown Alternatively, organize models in classes by inheriting from `tf.keras.Model`.This is a container for layers that is a layer itself, allowing `tf.keras.Model`objects to contain other `tf.keras.Model` objects. ###Code class MNISTModel(tf.keras.Model): def __init__(self): super(MNISTModel, self).__init__() self.dense1 = tf.keras.layers.Dense(units=10) self.dense2 = tf.keras.layers.Dense(units=10) def call(self, input): """Run the model.""" result = self.dense1(input) result = self.dense2(result) result = self.dense2(result) # reuse variables from dense2 layer return result model = MNISTModel() ###Output _____no_output_____ ###Markdown It's not required to set an input shape for the `tf.keras.Model` class sincethe parameters are set the first time input is passed to the layer.`tf.keras.layers` classes create and contain their own model variables thatare tied to the lifetime of their layer objects. To share layer variables, sharetheir objects. Eager training Computing gradients[Automatic differentiation](https://en.wikipedia.org/wiki/Automatic_differentiation)is useful for implementing machine learning algorithms such as[backpropagation](https://en.wikipedia.org/wiki/Backpropagation) for trainingneural networks. During eager execution, use `tf.GradientTape` to traceoperations for computing gradients later.`tf.GradientTape` is an opt-in feature to provide maximal performance whennot tracing. Since different operations can occur during each call, allforward-pass operations get recorded to a "tape". To compute the gradient, playthe tape backwards and then discard. A particular `tf.GradientTape` can onlycompute one gradient; subsequent calls throw a runtime error. ###Code w = tf.Variable([[1.0]]) with tf.GradientTape() as tape: loss = w * w grad = tape.gradient(loss, w) print(grad) # => tf.Tensor([[ 2.]], shape=(1, 1), dtype=float32) ###Output _____no_output_____ ###Markdown Train a modelThe following example creates a multi-layer model that classifies the standardMNIST handwritten digits. It demonstrates the optimizer and layer APIs to buildtrainable graphs in an eager execution environment. ###Code # Fetch and format the mnist data (mnist_images, mnist_labels), _ = tf.keras.datasets.mnist.load_data() dataset = tf.data.Dataset.from_tensor_slices( (tf.cast(mnist_images[...,tf.newaxis]/255, tf.float32), tf.cast(mnist_labels,tf.int64))) dataset = dataset.shuffle(1000).batch(32) # Build the model mnist_model = tf.keras.Sequential([ tf.keras.layers.Conv2D(16,[3,3], activation='relu'), tf.keras.layers.Conv2D(16,[3,3], activation='relu'), tf.keras.layers.GlobalAveragePooling2D(), tf.keras.layers.Dense(10) ]) ###Output _____no_output_____ ###Markdown Even without training, call the model and inspect the output in eager execution: ###Code for images,labels in dataset.take(1): print("Logits: ", mnist_model(images[0:1]).numpy()) ###Output _____no_output_____ ###Markdown While keras models have a builtin training loop (using the `fit` method), sometimes you need more customization. Here's an example, of a training loop implemented with eager: ###Code optimizer = tf.keras.optimizers.Adam() loss_history = [] for (batch, (images, labels)) in enumerate(dataset.take(400)): if batch % 80 == 0: print() print('.', end='') with tf.GradientTape() as tape: logits = mnist_model(images, training=True) loss_value = tf.keras.losses.sparse_categorical_crossentropy(labels, logits, from_logits=True) loss_history.append(loss_value.numpy().mean()) grads = tape.gradient(loss_value, mnist_model.variables) optimizer.apply_gradients(zip(grads, mnist_model.variables)) import matplotlib.pyplot as plt plt.plot(loss_history) plt.xlabel('Batch #') plt.ylabel('Loss [entropy]') ###Output _____no_output_____ ###Markdown This example uses the[dataset.py module](https://github.com/tensorflow/models/blob/master/official/mnist/dataset.py)from the[TensorFlow MNIST example](https://github.com/tensorflow/models/tree/master/official/mnist);download this file to your local directory. Run the following to download theMNIST data files to your working directory and prepare a `tf.data.Dataset`for training: Variables and optimizers`tf.Variable` objects store mutable `tf.Tensor` values accessed duringtraining to make automatic differentiation easier. The parameters of a model canbe encapsulated in classes as variables.Better encapsulate model parameters by using `tf.Variable` with`tf.GradientTape`. For example, the automatic differentiation example abovecan be rewritten: ###Code class Model(tf.keras.Model): def __init__(self): super(Model, self).__init__() self.W = tf.Variable(5., name='weight') self.B = tf.Variable(10., name='bias') def call(self, inputs): return inputs * self.W + self.B # A toy dataset of points around 3 * x + 2 NUM_EXAMPLES = 2000 training_inputs = tf.random.normal([NUM_EXAMPLES]) noise = tf.random.normal([NUM_EXAMPLES]) training_outputs = training_inputs * 3 + 2 + noise # The loss function to be optimized def loss(model, inputs, targets): error = model(inputs) - targets return tf.reduce_mean(tf.square(error)) def grad(model, inputs, targets): with tf.GradientTape() as tape: loss_value = loss(model, inputs, targets) return tape.gradient(loss_value, [model.W, model.B]) # Define: # 1. A model. # 2. Derivatives of a loss function with respect to model parameters. # 3. A strategy for updating the variables based on the derivatives. model = Model() optimizer = tf.keras.optimizers.SGD(learning_rate=0.01) print("Initial loss: {:.3f}".format(loss(model, training_inputs, training_outputs))) # Training loop for i in range(300): grads = grad(model, training_inputs, training_outputs) optimizer.apply_gradients(zip(grads, [model.W, model.B])) if i % 20 == 0: print("Loss at step {:03d}: {:.3f}".format(i, loss(model, training_inputs, training_outputs))) print("Final loss: {:.3f}".format(loss(model, training_inputs, training_outputs))) print("W = {}, B = {}".format(model.W.numpy(), model.B.numpy())) ###Output _____no_output_____ ###Markdown Use objects for state during eager executionWith TF 1.x graph execution, program state (such as the variables) is stored in globalcollections and their lifetime is managed by the `tf.Session` object. Incontrast, during eager execution the lifetime of state objects is determined bythe lifetime of their corresponding Python object. Variables are objectsDuring eager execution, variables persist until the last reference to the objectis removed, and is then deleted. ###Code if tf.test.is_gpu_available(): with tf.device("gpu:0"): v = tf.Variable(tf.random_normal([1000, 1000])) v = None # v no longer takes up GPU memory ###Output _____no_output_____ ###Markdown Object-based saving`tf.train.Checkpoint` can save and restore `tf.Variable`s to and fromcheckpoints: ###Code x = tf.Variable(10.) checkpoint = tf.train.Checkpoint(x=x) x.assign(2.) # Assign a new value to the variables and save. checkpoint_path = './ckpt/' checkpoint.save('./ckpt/') x.assign(11.) # Change the variable after saving. # Restore values from the checkpoint checkpoint.restore(tf.train.latest_checkpoint(checkpoint_path)) print(x) # => 2.0 ###Output _____no_output_____ ###Markdown To save and load models, `tf.train.Checkpoint` stores the internal state of objects,without requiring hidden variables. To record the state of a `model`,an `optimizer`, and a global step, pass them to a `tf.train.Checkpoint`: ###Code import os model = tf.keras.Sequential([ tf.keras.layers.Conv2D(16,[3,3], activation='relu'), tf.keras.layers.GlobalAveragePooling2D(), tf.keras.layers.Dense(10) ]) optimizer = tf.keras.optimizers.Adam(learning_rate=0.001) checkpoint_dir = 'path/to/model_dir' if not os.path.exists(checkpoint_dir): os.makedirs(checkpoint_dir) checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt") root = tf.train.Checkpoint(optimizer=optimizer, model=model) root.save(checkpoint_prefix) root.restore(tf.train.latest_checkpoint(checkpoint_dir)) ###Output _____no_output_____ ###Markdown Object-oriented metrics`tf.keras.metrics` are stored as objects. Update a metric by passing the new data tothe callable, and retrieve the result using the `tf.keras.metrics.result` method,for example: ###Code m = tf.keras.metrics.Mean("loss") m(0) m(5) m.result() # => 2.5 m([8, 9]) m.result() # => 5.5 ###Output _____no_output_____ ###Markdown Advanced automatic differentiation topics Dynamic models`tf.GradientTape` can also be used in dynamic models. This example for a[backtracking line search](https://wikipedia.org/wiki/Backtracking_line_search)algorithm looks like normal NumPy code, except there are gradients and isdifferentiable, despite the complex control flow: ###Code def line_search_step(fn, init_x, rate=1.0): with tf.GradientTape() as tape: # Variables are automatically recorded, but manually watch a tensor tape.watch(init_x) value = fn(init_x) grad = tape.gradient(value, init_x) grad_norm = tf.reduce_sum(grad * grad) init_value = value while value > init_value - rate * grad_norm: x = init_x - rate * grad value = fn(x) rate /= 2.0 return x, value ###Output _____no_output_____ ###Markdown Custom gradientsCustom gradients are an easy way to override gradients. Within the forward function, define the gradient with respect to theinputs, outputs, or intermediate results. For example, here's an easy way to clipthe norm of the gradients in the backward pass: ###Code @tf.custom_gradient def clip_gradient_by_norm(x, norm): y = tf.identity(x) def grad_fn(dresult): return [tf.clip_by_norm(dresult, norm), None] return y, grad_fn ###Output _____no_output_____ ###Markdown Custom gradients are commonly used to provide a numerically stable gradient for asequence of operations: ###Code def log1pexp(x): return tf.math.log(1 + tf.exp(x)) def grad_log1pexp(x): with tf.GradientTape() as tape: tape.watch(x) value = log1pexp(x) return tape.gradient(value, x) # The gradient computation works fine at x = 0. grad_log1pexp(tf.constant(0.)).numpy() # However, x = 100 fails because of numerical instability. grad_log1pexp(tf.constant(100.)).numpy() ###Output _____no_output_____ ###Markdown Here, the `log1pexp` function can be analytically simplified with a customgradient. The implementation below reuses the value for `tf.exp(x)` that iscomputed during the forward pass—making it more efficient by eliminatingredundant calculations: ###Code @tf.custom_gradient def log1pexp(x): e = tf.exp(x) def grad(dy): return dy * (1 - 1 / (1 + e)) return tf.math.log(1 + e), grad def grad_log1pexp(x): with tf.GradientTape() as tape: tape.watch(x) value = log1pexp(x) return tape.gradient(value, x) # As before, the gradient computation works fine at x = 0. grad_log1pexp(tf.constant(0.)).numpy() # And the gradient computation also works at x = 100. grad_log1pexp(tf.constant(100.)).numpy() ###Output _____no_output_____ ###Markdown PerformanceComputation is automatically offloaded to GPUs during eager execution. If youwant control over where a computation runs you can enclose it in a`tf.device('/gpu:0')` block (or the CPU equivalent): ###Code import time def measure(x, steps): # TensorFlow initializes a GPU the first time it's used, exclude from timing. tf.matmul(x, x) start = time.time() for i in range(steps): x = tf.matmul(x, x) # tf.matmul can return before completing the matrix multiplication # (e.g., can return after enqueing the operation on a CUDA stream). # The x.numpy() call below will ensure that all enqueued operations # have completed (and will also copy the result to host memory, # so we're including a little more than just the matmul operation # time). _ = x.numpy() end = time.time() return end - start shape = (1000, 1000) steps = 200 print("Time to multiply a {} matrix by itself {} times:".format(shape, steps)) # Run on CPU: with tf.device("/cpu:0"): print("CPU: {} secs".format(measure(tf.random.normal(shape), steps))) # Run on GPU, if available: if tf.test.is_gpu_available(): with tf.device("/gpu:0"): print("GPU: {} secs".format(measure(tf.random.normal(shape), steps))) else: print("GPU: not found") ###Output _____no_output_____ ###Markdown A `tf.Tensor` object can be copied to a different device to execute itsoperations: ###Code if tf.test.is_gpu_available(): x = tf.random_normal([10, 10]) x_gpu0 = x.gpu() x_cpu = x.cpu() _ = tf.matmul(x_cpu, x_cpu) # Runs on CPU _ = tf.matmul(x_gpu0, x_gpu0) # Runs on GPU:0 ###Output _____no_output_____
04c_tfrecord.ipynb
###Markdown Library ###Code import tensorflow as tf import numpy as np import os import glob import pandas as pd import PIL import gc from PIL import Image print(f'Numpy version : {np.__version__}') print(f'Pandas version : {pd.__version__}') print(f'Tensorflow version : {tf.__version__}') print(f'Pillow version : {PIL.__version__}') ###Output Numpy version : 1.18.1 Pandas version : 1.0.3 Tensorflow version : 2.2.0 Pillow version : 5.4.1 ###Markdown Dataset ###Code !ls /kaggle/input # df_train = pd.read_parquet('/kaggle/input/csv-with-cleaned-ocr-text/train.parquet', engine='pyarrow').sort_values("filename").reset_index(drop=True) df_test = pd.read_parquet('/kaggle/input/csv-with-cleaned-ocr-text/test.parquet', engine='pyarrow') df_test ###Output _____no_output_____ ###Markdown Create TFRecord ###Code def _bytes_feature(value): """Returns a bytes_list from a string / byte.""" if isinstance(value, type(tf.constant(0))): value = value.numpy() # BytesList won't unpack a string from an EagerTensor. return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) def _float_feature(value): """Returns a float_list from a float / double.""" return tf.train.Feature(float_list=tf.train.FloatList(value=[value])) def _list_float_feature(value): """Returns a float_list from a float / double.""" return tf.train.Feature(float_list=tf.train.FloatList(value=value)) def _int64_feature(value): """Returns an int64_list from a bool / enum / int / uint.""" return tf.train.Feature(int64_list=tf.train.Int64List(value=[value])) def _list_int64_feature(value): """Returns an int64_list from a bool / enum / int / uint.""" return tf.train.Feature(int64_list=tf.train.Int64List(value=value)) RESIZE_WIDTH = 512 RESIZE_HEIGHT = 512 TFRECORD_MAX_SIZE = 80 * 1024 * 1024 # 80 MB # TOTAL_IMAGES = len(df_train.index) TOTAL_IMAGES = len(df_test.index) # part 1 : 0:TOTAL_IMAGES // 2 (train) # part 2 : TOTAL_IMAGES // 2:TOTAL_IMAGES (train) # part 1 : 0:TOTAL_IMAGES (test) [CURRENT] START_INDEX = 0 END_INDEX = TOTAL_IMAGES BATCH_IMAGE = 1024 def create_tfrecord(index, df): index = str(index).zfill(3) curr_file = f"test-{index}.tfrecords" writer = tf.io.TFRecordWriter(curr_file) for index, row in df.iterrows(): category_str = str(row['category']).zfill(2) image = f'/kaggle/input/shopee-product-detection-student/test/test/test/{row["filename"]}' img = open(image, 'rb') img_read = img.read() image_decoded = tf.image.decode_jpeg(img_read, channels=3) resized_img = tf.image.resize_with_pad(image_decoded,target_width=RESIZE_WIDTH,target_height=RESIZE_HEIGHT,method=tf.image.ResizeMethod.BILINEAR) resized_img = tf.cast(resized_img,tf.uint8) resized_img = tf.io.encode_jpeg(resized_img) feature = { 'filename': _bytes_feature(tf.compat.as_bytes(row['filename'])), 'label': _int64_feature(row['category']), 'words': _list_float_feature(row['words']), 'image': _bytes_feature(resized_img), 'height' : _int64_feature(RESIZE_HEIGHT), 'width' : _int64_feature(RESIZE_WIDTH) } example = tf.train.Example(features=tf.train.Features(feature=feature)) writer.write(example.SerializeToString()) writer.close() for i in range(START_INDEX, END_INDEX, BATCH_IMAGE): print(f'Create TFRecords #{i // BATCH_IMAGE}') if i + BATCH_IMAGE < END_INDEX: create_tfrecord(i // BATCH_IMAGE, df_test.loc[i:i+BATCH_IMAGE]) else: create_tfrecord(i // BATCH_IMAGE, df_test.loc[i:END_INDEX]) gc.collect() !ls -lah ###Output total 1.1G drwxr-xr-x 2 root root 4.0K Jul 1 09:13 . drwxr-xr-x 6 root root 4.0K Jul 1 09:10 .. ---------- 1 root root 5.2K Jul 1 09:10 __notebook__.ipynb -rw-r--r-- 1 root root 93M Jul 1 09:11 test-000.tfrecords -rw-r--r-- 1 root root 94M Jul 1 09:11 test-001.tfrecords -rw-r--r-- 1 root root 93M Jul 1 09:11 test-002.tfrecords -rw-r--r-- 1 root root 93M Jul 1 09:11 test-003.tfrecords -rw-r--r-- 1 root root 94M Jul 1 09:12 test-004.tfrecords -rw-r--r-- 1 root root 93M Jul 1 09:12 test-005.tfrecords -rw-r--r-- 1 root root 94M Jul 1 09:12 test-006.tfrecords -rw-r--r-- 1 root root 93M Jul 1 09:13 test-007.tfrecords -rw-r--r-- 1 root root 93M Jul 1 09:13 test-008.tfrecords -rw-r--r-- 1 root root 95M Jul 1 09:13 test-009.tfrecords -rw-r--r-- 1 root root 94M Jul 1 09:13 test-010.tfrecords -rw-r--r-- 1 root root 84M Jul 1 09:14 test-011.tfrecords
Projekt_ZPO.ipynb
###Markdown Wgranie danych ###Code !wget "https://chmura.put.poznan.pl/s/MLk1k6RWWQQuOXs/download?path=%2F&files=train.tar.xz" -O train.tar.xz !tar xf train.tar.xz !rm train.tar.xz ###Output --2022-02-21 16:32:32-- https://chmura.put.poznan.pl/s/MLk1k6RWWQQuOXs/download?path=%2F&files=train.tar.xz Resolving chmura.put.poznan.pl (chmura.put.poznan.pl)... 150.254.5.31, 2001:808:201::5:31 Connecting to chmura.put.poznan.pl (chmura.put.poznan.pl)|150.254.5.31|:443... connected. HTTP request sent, awaiting response... 200 OK Length: 251078400 (239M) [application/octet-stream] Saving to: ‘train.tar.xz’ train.tar.xz 100%[===================>] 239.45M 21.2MB/s in 13s 2022-02-21 16:32:45 (18.7 MB/s) - ‘train.tar.xz’ saved [251078400/251078400] ###Markdown Przydatne funkcje do konwersji ###Code def convert_rgb_to_ids(labels: np.ndarray) -> np.ndarray: result = np.zeros(labels.shape[:2], dtype=np.uint8) result[np.where((labels == (0, 0, 255)).all(axis=2))] = 1 result[np.where((labels == (0, 255, 0)).all(axis=2))] = 2 result[np.where((labels == (255, 0, 0)).all(axis=2))] = 3 return result def convert_ids_to_rgb(labels: np.ndarray) -> np.ndarray: result = np.zeros((*labels.shape, 3), dtype=np.uint8) result[labels == 1] = (0, 0, 255) result[labels == 2] = (0, 255, 0) result[labels == 3] = (255, 0, 0) return result ###Output _____no_output_____ ###Markdown Customowy obiekt dataset dziedziczony z torcha ###Code class LunarDataset(torch.utils.data.Dataset): def __init__(self, path: Path, file_names: List[str], augment: bool = False): self._file_names = file_names self._images_dir = path / 'images' self._labels_dir = path / 'masks' self._augment = augment self.image_size = (270, 480) self.padded_image_size = ( math.ceil(self.image_size[0] / 32) * 32, math.ceil(self.image_size[1] / 32) * 32 ) self.transforms = A.Compose([ A.Resize(*self.image_size), A.PadIfNeeded(*self.padded_image_size), A.ToFloat(max_value=255), ToTensorV2() ]) self.augmentations = A.Compose([ A.Resize(*self.image_size), A.PadIfNeeded(*self.padded_image_size), A.ToFloat(max_value=255), ToTensorV2() ]) def __getitem__(self, index: int): image_path = self._images_dir / self._file_names[index].replace('.png', '.jpg') labels_path = self._labels_dir / self._file_names[index] image = cv2.imread(str(image_path)) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) labels = cv2.imread(str(labels_path)) labels = cv2.cvtColor(labels, cv2.COLOR_BGR2RGB) labels = convert_rgb_to_ids(labels) if self._augment: transformed = self.augmentations(image=image, mask=labels) else: transformed = self.transforms(image=image, mask=labels) return transformed['image'], transformed['mask'].type(torch.int64) def __len__(self): return len(self._file_names) def _convert_mask(self, mask): # (h, w, 3) -> (h, w) result = np.zeros(mask.shape[:2], dtype=np.uint8) result[np.where((mask == (0, 0, 255)).all(axis=2))] = 1 result[np.where((mask == (0, 255, 0)).all(axis=2))] = 2 result[np.where((mask == (255, 0, 0)).all(axis=2))] = 3 return result def calculate_weights(self): classes_sum = np.zeros((4,), dtype=np.uint64) for file_name in tqdm(self._file_names): labels_path = self._labels_dir / file_name labels = cv2.imread(str(labels_path)) labels = cv2.cvtColor(labels, cv2.COLOR_BGR2RGB) labels = self._convert_mask(labels) histogram, _ = np.histogram(labels.flatten(), bins=4, range=(0, 4)) classes_sum += histogram.astype(np.uint64) weights = 1 / classes_sum weights /= np.sum(weights) return weights ###Output _____no_output_____ ###Markdown Wczytywanie i dzielenie danych treningowych ###Code from sklearn.model_selection import train_test_split base_path = Path('/content/LunarSeg/train') train_names = sorted([path.name for path in (base_path / 'masks').iterdir()]) train_names, val_names = train_test_split(train_names, test_size=0.15, random_state=42) train_dataset = LunarDataset(base_path, train_names, augment=True) val_dataset = LunarDataset(base_path, val_names) ###Output _____no_output_____ ###Markdown Próba loss function z odwróceniem wag ###Code train_dataset.calculate_weights() weights = np.array([0.07004456, 0.02316237, 0.66572621, 0.24106686], dtype=np.float32) ###Output _____no_output_____ ###Markdown Próba loss function z dice loss ###Code class MultiClassDiceLoss(nn.Module): def __init__(self, smooth = 1.0): super().__init__() self._smooth = smooth def forward(self, preds, ground_truth): preds = torch.softmax(preds, dim=1) num_classes = preds.shape[1] dice_sum = torch.tensor(0.0, dtype=torch.float32, device=preds.device) for class_id in range(num_classes): class_preds = preds[:, class_id].reshape(-1) class_ground_truth = (ground_truth == class_id).view(-1) tp = (class_preds * class_ground_truth).sum() class_dice = 1 - (2 * tp + self._smooth) / (class_preds.sum() + class_ground_truth.sum() + self._smooth) dice_sum += class_dice return dice_sum / num_classes ###Output _____no_output_____ ###Markdown Segment class ###Code class Segmenter(pl.LightningModule): def __init__(self): super().__init__() self.network = Unet(encoder_name='resnet50', classes=4) # self.loss_function = torch.nn.CrossEntropyLoss() self.loss_function = MultiClassDiceLoss() # self.loss_function = nn.CrossEntropyLoss( # weight=torch.from_numpy(weights) # ) metrics = torchmetrics.MetricCollection([ torchmetrics.Precision(num_classes=4, average='macro', mdmc_average='samplewise'), torchmetrics.Recall(num_classes=4, average='macro', mdmc_average='samplewise'), torchmetrics.F1Score(num_classes=4, average='macro', mdmc_average='samplewise'), torchmetrics.Accuracy(num_classes=4, average='macro', mdmc_average='samplewise') ]) self.train_metrics = metrics.clone('train_') self.val_metrics = metrics.clone('val_') def forward(self, x): return self.network(x) def training_step(self, batch, batch_idx): inputs, labels = batch outputs = self(inputs) loss = self.loss_function(outputs, labels) self.log('train_loss', loss) outputs = torch.softmax(outputs, dim=1) self.log_dict(self.train_metrics(outputs, labels)) return loss def validation_step(self, batch, batch_idx): inputs, labels = batch outputs = self(inputs) loss = self.loss_function(outputs, labels) self.log('val_loss', loss, prog_bar=True) outputs = torch.softmax(outputs, dim=1) self.log_dict(self.val_metrics(outputs, labels)) def configure_optimizers(self): return torch.optim.Adam(self.parameters(), lr=1e-3) segmenter = Segmenter() model_checkpoint = pl.callbacks.ModelCheckpoint(dirpath='/content/checkpoints') early_stopping = pl.callbacks.EarlyStopping(monitor='val_loss', patience=10) logger = pl.loggers.NeptuneLogger( api_key='eyJhcGlfYWRkcmVzcyI6Imh0dHBzOi8vYXBwLm5lcHR1bmUuYWkiLCJhcGlfdXJsIjoiaHR0cHM6Ly9hcHAubmVwdHVuZS5haSIsImFwaV9rZXkiOiIzOWI2ZGJmZi1hNTVjLTQ4NmQtODBmOS00MDdkYWMyM2JhOGYifQ==', project='LunarSeg' ) train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=8, num_workers=2) val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=8, num_workers=2) trainer = pl.Trainer(logger=logger, callbacks=[model_checkpoint, early_stopping], gpus=1, max_epochs=100) trainer.fit(segmenter, train_dataloaders=train_loader, val_dataloaders=val_loader) logger.run.stop() ###Output GPU available: True, used: True TPU available: False, using: 0 TPU cores IPU available: False, using: 0 IPUs LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0] | Name | Type | Params ----------------------------------------------------- 0 | network | Unet | 32.5 M 1 | loss_function | MultiClassDiceLoss | 0 2 | train_metrics | MetricCollection | 0 3 | val_metrics | MetricCollection | 0 ----------------------------------------------------- 32.5 M Trainable params 0 Non-trainable params 32.5 M Total params 130.086 Total estimated model params size (MB) /usr/local/lib/python3.7/dist-packages/pytorch_lightning/callbacks/model_checkpoint.py:631: UserWarning: Checkpoint directory /content/checkpoints exists and is not empty. rank_zero_warn(f"Checkpoint directory {dirpath} exists and is not empty.") ###Markdown Wgranie danych do predykcji ###Code !wget "https://chmura.put.poznan.pl/s/MLk1k6RWWQQuOXs/download?path=%2F&files=test.tar.xz" -O test.tar.xz !tar xf test.tar.xz !rm test.tar.xz device = torch.device('cuda') segmenter = Segmenter.load_from_checkpoint(model_checkpoint.best_model_path).to(device) # wczytanie najlepszych wag z treningu segmenter = segmenter.eval() import cv2 input_transforms = val_dataset.transforms output_transforms = A.Compose([ A.CenterCrop(*val_dataset.image_size), A.Resize(720, 1280, interpolation=cv2.INTER_NEAREST) ]) test_base_path = Path('/content/LunarSeg/test') predictions_path = Path('/content/LunarSeg/test/predictions') predictions_path.mkdir(exist_ok=True, parents=True) for test_image_path in (test_base_path / 'images').iterdir(): image = cv2.imread(str(test_image_path)) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) image = input_transforms(image=image)['image'][None, ...] with torch.no_grad(): prediction = segmenter(image.to(device)).cpu().squeeze().argmax(dim=0).numpy() prediction = convert_ids_to_rgb(prediction) prediction = cv2.cvtColor(prediction, cv2.COLOR_RGB2BGR) prediction = output_transforms(image=prediction)['image'] cv2.imwrite(str(predictions_path / f'{test_image_path.stem}.png'), prediction) # predictions_path = Path('/content/LunarSeg/test/predictions_reverse_weights') predictions_path = Path('/content/LunarSeg/test/predictions_dice') # predictions_path = Path('/content/drive/MyDrive/Automatyka/mgr/predictions') import requests import pickle import zlib from multiprocessing.dummy import Pool as ThreadPool sum_result = 0 def calculate_score(prediction_path: Path): prediction = cv2.imread(str(prediction_path)) prediction = cv2.cvtColor(prediction, cv2.COLOR_BGR2RGB) response = requests.post(f'http://zpo.dpieczynski.pl/{prediction_path.stem}', data=zlib.compress(pickle.dumps(prediction))) if response.status_code == 200: result = response.json() global sum_result sum_result += float(str(result)[6:-1]) return f'{prediction_path.name} {result}' else: return f'Error processing prediction {prediction_path.name}: {response.text}' return None i = 0 with ThreadPool(processes=16) as pool: for result in pool.imap_unordered(calculate_score, predictions_path.iterdir()): i += 1 print(i) print(sum_result / i) from google.colab import drive drive.mount('/content/drive') !cp -r '/content/LunarSeg/test/predictions_reverse_weights' '/content/drive/MyDrive/Automatyka/mgr/predictions_reverse_weights' ###Output _____no_output_____
jupyterhub/notebooks/pmml/pmml_balancescale/04_PredictModel.ipynb
###Markdown Predict with Model Init Model ###Code %%bash pio init-model \ --model-server-url http://prediction-pmml.community.pipeline.io/ \ --model-type pmml \ --model-namespace default \ --model-name pmml_balancescale \ --model-version v1 \ --model-path . ###Output _____no_output_____ ###Markdown Predict with Model (CLI) ###Code %%bash pio predict \ --model-test-request-path ./data/test_request.json ###Output _____no_output_____ ###Markdown Predict ManyThis is a mini load test to provide instant feedback on relative performance. ###Code %%bash pio predict_many \ --model-test-request-path ./data/test_request.json \ --num-iterations 5 ###Output _____no_output_____ ###Markdown Predict with Model (REST) ###Code import requests model_type = 'scikit' model_namespace = 'default' model_name = 'scikit_decisiontree' model_version = 'v1' deploy_url = 'http://prediction-%s.community.pipeline.io/api/v1/model/predict/%s/%s/%s/%s' % (model_type, model_type, model_namespace, model_name, model_version) with open('./data/test_request.json', 'rb') as fh: model_input_binary = fh.read() response = requests.post(url=deploy_url, data=model_input_binary, timeout=30) print("Success! %s" % response.text) ###Output _____no_output_____
src/data_sorting/compare_microarray_RNAseq.ipynb
###Markdown identify how many promoters potentially bidirectional in each promoter category (potentially overlapping promoters where the upstream gene was positioned in the opposite direction and was less than 2000 bp away from the TSS.) ###Code overlapping_promoters_bed = f'../../data/output/{file_names}/overlapping_promoters.bed' overlappingproms_df = pd.read_table(overlapping_promoters_bed, sep='\t',header=None) cols = ['chr', 'start', 'stop', 'numberoverlapping', 'gene1','gene2'] overlappingproms_df.columns = cols #select genes present in both czechowski gene set and overlapping czechowski_overlappingproms = czechowski_df[czechowski_df.AGI.isin(overlappingproms_df.gene1) | czechowski_df.AGI.isin(overlappingproms_df.gene2)] len(czechowski_overlappingproms[czechowski_overlappingproms.gene_type == 'constitutive']) len(czechowski_overlappingproms[czechowski_overlappingproms.gene_type == 'variable']) len(czechowski_overlappingproms[czechowski_overlappingproms.gene_type == 'control']) ###Output _____no_output_____
.ipynb_checkpoints/predicting_survival_rate_lung_cancer_surgery-checkpoint.ipynb
###Markdown 폐암 수술 환자의 생존율 예측하기 실습 ###Code import tensorflow as tf tf.__version__ ###Output _____no_output_____
Machine Learning/8. Logistic Regression(Multi-class Classification) 2.ipynb
###Markdown Exercise Solution (Iris_Dataset) ###Code from sklearn.datasets import load_iris # load iris is one of the dataset which is available in sklearn for practice iris = load_iris() # storing the dataset type(iris) dir(iris) iris.data[0:5] # Every idividual data has sepal length,width and petal length,width iris.feature_names iris.target # Target seem our desired dependent variable iris.target_names[0:3] iris.filename len(iris.data) ###Output _____no_output_____ ###Markdown Base on above exploration of Dataset, we can use iris.data as independent and iris.target dependent variable Train,Test Split ###Code from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(iris.data,iris.target, test_size=0.2) X_test y_test len(X_train) ###Output _____no_output_____ ###Markdown Building Model ###Code from sklearn.linear_model import LogisticRegression model = LogisticRegression() model.fit(X_train, y_train) # Training ###Output _____no_output_____ ###Markdown Results ###Code y_test # This is the target that model should predict model.predict(X_test) model.score(X_test,y_test) # So, accuracy is 96.66% model.predict([[7.7, 3.8, 6.7, 2.2]]) ###Output _____no_output_____ ###Markdown Confusion Matrix ###Code y_predicted = model.predict(X_test) # Predicted values for my all test data from sklearn.metrics import confusion_matrix cm = confusion_matrix(y_test, y_predicted) # Truth Values & Predicted Values as parameter cm import seaborn as sns plt.figure(figsize = (10,7)) sns.heatmap(cm, annot=True) # Using Heatmap plt.xlabel('Predicted') plt.ylabel('Truth') ###Output _____no_output_____
assembly_eda_studies/change_point_bayes.ipynb
###Markdown Bayesian Changepoint Detection in PythonThis code computes the probability of changepoints in a time series.7 In this notebook I show how you can use it.First let's generate some data: ###Code from __future__ import division import numpy as np import matplotlib.pyplot as plt import seaborn %matplotlib inline %load_ext autoreload %autoreload 2 def generate_normal_time_series(num, minl=50, maxl=1000): data = np.array([], dtype=np.float64) partition = np.random.randint(minl, maxl, num) for p in partition: mean = np.random.randn()*10 var = np.random.randn()*1 if var < 0: var = var * -1 tdata = np.random.normal(mean, var, p) data = np.concatenate((data, tdata)) return data data = generate_normal_time_series(7, 50, 200) data.shape ###Output _____no_output_____ ###Markdown Let's have a look, how they look like: ###Code fig, ax = plt.subplots(figsize=[16, 12]) ax.plot(data) ###Output _____no_output_____ ###Markdown Offline Changepoint DetectionLets compute the probability of changepoints at each time step. We need two things for that. First a prior of how probable is it to have two successive changepoints with the distance `t`. The second thing is a model of the likelihood of data in a sequence `[s, t]` of the data, given that in this sequence there is *no* changepoint.For this example we assume a uniform prior over the length of sequences (`const_prior`) and a piecewise gaussian model (`gaussian_obs_log_likelihood`). ###Code import cProfile import bayesian_changepoint_detection.offline_changepoint_detection as offcd from functools import partial Q, P, Pcp = offcd.offline_changepoint_detection(data, partial(offcd.const_prior, l=(len(data)+1)), offcd.gaussian_obs_log_likelihood, truncate=-40) sum_exp=np.exp(Pcp).sum(0) sum_exp.shape sum_exp ###Output _____no_output_____ ###Markdown The `offline_changepoint_detection()` function returns three things: `Q[t]`, the log-likelihood of data `[t, n]`, `P[t, s]`, the log-likelihood of a datasequence `[t, s]`, given there is no changepoint between `t` and `s` and `Pcp[i, t]`, the log-likelihood that the `i`-th changepoint is at time step `t`. To actually get the probility of a changepoint at time step `t` sum the probabilities.How does that look like for our toy-data? ###Code fig, ax = plt.subplots(figsize=[18, 16]) ax = fig.add_subplot(2, 1, 1) ax.plot(data[:]) ax = fig.add_subplot(2, 1, 2, sharex=ax) ax.plot(np.exp(Pcp).sum(0)) ###Output _____no_output_____ ###Markdown That works pretty well, but is somewhat slow. It's possible to speed that up by truncating a sum in the algorithm. However that sometimes leeds to $\infty$ values. Set the `truncate` parameter to e.g. `-10` to test that out.To understand, what is happening have a look at the following papers:[1] Paul Fearnhead, Exact and Efficient Bayesian Inference for MultipleChangepoint problems, Statistics and computing 16.2 (2006), pp. 203--213[2] Xuan Xiang, Kevin Murphy, Modeling Changing Dependency Structure inMultivariate Time Series, ICML (2007), pp. 1055--1062 Online Changepoint DetectionLet's assume the data points come in one after another and not as these nice batches. During the process you want to know if the new point has the same hyperparameter or different ones. You need an online changepoint detection.Happily there is one, although it's interface is kind of suboptimal so far, in that it expects batches of data still and just assumes they drop in over time... I will change that at some point. ###Code import bayesian_changepoint_detection.online_changepoint_detection as oncd from functools import partial R, maxes = oncd.online_changepoint_detection(data, partial(oncd.constant_hazard, 250), oncd.StudentT(0.1, .01, 1, 0)) ###Output _____no_output_____ ###Markdown The online version computes slightly different things. For each time step it returns the probability distribution over the length of the last sequence. E.g. `R[7, 3]` is the probability at time step `7` that the last sequence is already `3` time steps long. It also returns the MAP estimate at each timestep for convenience.To plot the distributions we use a grey-scale colormap, black is zero, white 1. We also plot the probability at each time step for a sequence length of 0, i.e. the probability of the current time step to be a changepoint.Because it's very hard to correctly evaluate a change after a single sample of a new distribution, we instead can "wait" for `Nw` samples and evalute the probability of a change happening `Nw` samples prior. ###Code import matplotlib.cm as cm fig, ax = plt.subplots(figsize=[18, 16]) ax = fig.add_subplot(3, 1, 1) ax.plot(data) ax = fig.add_subplot(3, 1, 2, sharex=ax) sparsity = 5 # only plot every fifth data for faster display ax.pcolor(np.array(range(0, len(R[:,0]), sparsity)), np.array(range(0, len(R[:,0]), sparsity)), -np.log(R[0:-1:sparsity, 0:-1:sparsity]), cmap=cm.Greys, vmin=0, vmax=30) ax = fig.add_subplot(3, 1, 3, sharex=ax) Nw=10; ax.plot(R[Nw,Nw:-1]) ###Output _____no_output_____ ###Markdown Well, not bad, considering how much faster it is (if you can afford waiting for that extra `Nw` samples). To understand the whole algorithm look at[1] Ryan P. Adams, David J.C. MacKay, Bayesian Online Changepoint Detection,arXiv 0710.3742 (2007)There you also find a Matlab version, which this code is based on. ###Code data = generate_normal_time_series(7, 50, 200) %timeit Q, P, Pcp = offcd.offline_changepoint_detection(data, partial(offcd.const_prior, l=(len(data)+1)), offcd.gaussian_obs_log_likelihood) %timeit R, maxes = oncd.online_changepoint_detection(data, partial(oncd.constant_hazard, 250), oncd.StudentT(10, .03, 1, 0)) ###Output 1 loops, best of 3: 474 ms per loop
RL-Quadcopter/notebooks/RL-Quadcopter.ipynb
###Markdown Project: Train a Quadcopter How to FlyDesign an agent that can fly a quadcopter, and then train it using a reinforcement learning algorithm of your choice! Try to apply the techniques you have learnt, but also feel free to come up with innovative ideas and test them.![Quadcopter doing a flip trying to takeoff from the ground](images/quadcopter_tumble.png) Instructions> **Note**: If you haven't done so already, follow the steps in this repo's README to install ROS, and ensure that the simulator is running and correctly connecting to ROS.When you are ready to start coding, take a look at the `quad_controller_rl/src/` (source) directory to better understand the structure. Here are some of the salient items:- `src/`: Contains all the source code for the project. - `quad_controller_rl/`: This is the root of the Python package you'll be working in. - ... - `tasks/`: Define your tasks (environments) in this sub-directory. - `__init__.py`: When you define a new task, you'll have to import it here. - `base_task.py`: Generic base class for all tasks, with documentation. - `takeoff.py`: This is the first task, already defined for you, and set to run by default. - ... - `agents/`: Develop your reinforcement learning agents here. - `__init__.py`: When you define a new agent, you'll have to import it here, just like tasks. - `base_agent.py`: Generic base class for all agents, with documentation. - `policy_search.py`: A sample agent has been provided here, and is set to run by default. - ... TasksOpen up the base class for tasks, `BaseTask`, defined in `tasks/base_task.py`:```pythonclass BaseTask: """Generic base class for reinforcement learning tasks.""" def __init__(self): """Define state and action spaces, initialize other task parameters.""" pass def set_agent(self, agent): """Set an agent to carry out this task; to be called from update.""" self.agent = agent def reset(self): """Reset task and return initial condition.""" raise NotImplementedError def update(self, timestamp, pose, angular_velocity, linear_acceleration): """Process current data, call agent, return action and done flag.""" raise NotImplementedError ```All tasks must inherit from this class to function properly. You will need to override the `reset()` and `update()` methods when defining a task, otherwise you will get `NotImplementedError`'s. Besides these two, you should define the state (observation) space and the action space for the task in the constructor, `__init__()`, and initialize any other variables you may need to run the task.Now compare this with the first concrete task `Takeoff`, defined in `tasks/takeoff.py`:```pythonclass Takeoff(BaseTask): """Simple task where the goal is to lift off the ground and reach a target height.""" ...```In `__init__()`, notice how the state and action spaces are defined using [OpenAI Gym spaces](https://gym.openai.com/docs/spaces), like [`Box`](https://github.com/openai/gym/blob/master/gym/spaces/box.py). These objects provide a clean and powerful interface for agents to explore. For instance, they can inspect the dimensionality of a space (`shape`), ask for the limits (`high` and `low`), or even sample a bunch of observations using the `sample()` method, before beginning to interact with the environment. We also set a time limit (`max_duration`) for each episode here, and the height (`target_z`) that the quadcopter needs to reach for a successful takeoff.The `reset()` method is meant to give you a chance to reset/initialize any variables you need in order to prepare for the next episode. You do not need to call it yourself; it will be invoked externally. And yes, it will be called once before each episode, including the very first one. Here `Takeoff` doesn't have any episode variables to initialize, but it must return a valid _initial condition_ for the task, which is a tuple consisting of a [`Pose`](http://docs.ros.org/api/geometry_msgs/html/msg/Pose.html) and [`Twist`](http://docs.ros.org/api/geometry_msgs/html/msg/Twist.html) object. These are ROS message types used to convey the pose (position, orientation) and velocity (linear, angular) you want the quadcopter to have at the beginning of an episode. You may choose to supply the same initial values every time, or change it a little bit, e.g. `Takeoff` drops the quadcopter off from a small height with a bit of randomness.> **Tip**: Slightly randomized initial conditions can help the agent explore the state space faster.Finally, the `update()` method is perhaps the most important. This is where you define the dynamics of the task and engage the agent. It is called by a ROS process periodically (roughly 30 times a second, by default), with current data from the simulation. A number of arguments are available: `timestamp` (you can use this to check for timeout, or compute velocities), `pose` (position, orientation of the quadcopter), `angular_velocity`, and `linear_acceleration`. You do not have to include all these variables in every task, e.g. `Takeoff` only uses pose information, and even that requires a 7-element state vector.Once you have prepared the state you want to pass on to your agent, you will need to compute the reward, and check whether the episode is complete (e.g. agent crossed the time limit, or reached a certain height). Note that these two things (`reward` and `done`) are based on actions that the agent took in the past. When you are writing your own agents, you have to be mindful of this.Now you can pass in the `state`, `reward` and `done` values to the agent's `step()` method and expect an action vector back that matches the action space that you have defined, in this case a `Box(6,)`. After checking that the action vector is non-empty, and clamping it to the space limits, you have to convert it into a ROS `Wrench` message. The first 3 elements of the action vector are interpreted as force in x, y, z directions, and the remaining 3 elements convey the torque to be applied around those axes, respectively.Return the `Wrench` object (or `None` if you don't want to take any action) and the `done` flag from your `update()` method (note that when `done` is `True`, the `Wrench` object is ignored, so you can return `None` instead). This will be passed back to the simulation as a control command, and will affect the quadcopter's pose, orientation, velocity, etc. You will be able to gauge the effect when the `update()` method is called in the next time step. AgentsReinforcement learning agents are defined in a similar way. Open up the generic agent class, `BaseAgent`, defined in `agents/base_agent.py`, and the sample agent `RandomPolicySearch` defined in `agents/policy_search.py`. They are actually even simpler to define - you only need to implement the `step()` method that is discussed above. It needs to consume `state` (vector), `reward` (scalar value) and `done` (boolean), and produce an `action` (vector). The state and action vectors must match the respective space indicated by the task. And that's it!Well, that's just to get things working correctly! The sample agent given `RandomPolicySearch` uses a very simplistic linear policy to directly compute the action vector as a dot product of the state vector and a matrix of weights. Then, it randomly perturbs the parameters by adding some Gaussian noise, to produce a different policy. Based on the average reward obtained in each episode ("score"), it keeps track of the best set of parameters found so far, how the score is changing, and accordingly tweaks a scaling factor to widen or tighten the noise. ###Code %%html <div style="width: 100%; text-align: center;"> <h3>Teach a Quadcopter How to Tumble</h3> <video poster="images/quadcopter_tumble.png" width="640" controls muted> <source src="images/quadcopter_tumble.mp4" type="video/mp4" /> <p>Video: Quadcopter tumbling, trying to get off the ground</p> </video> </div> ###Output _____no_output_____ ###Markdown Obviously, this agent performs very poorly on the task. It does manage to move the quadcopter, which is good, but instead of a stable takeoff, it often leads to dizzying cartwheels and somersaults! And that's where you come in - your first _task_ is to design a better agent for this takeoff task. Instead of messing with the sample agent, create new file in the `agents/` directory, say `policy_gradients.py`, and define your own agent in it. Remember to inherit from the base agent class, e.g.:```pythonclass DDPG(BaseAgent): ...```You can borrow whatever you need from the sample agent, including ideas on how you might modularize your code (using helper methods like `act()`, `learn()`, `reset_episode_vars()`, etc.).> **Note**: This setup may look similar to the common OpenAI Gym paradigm, but there is one small yet important difference. Instead of the agent calling a method on the environment (to execute an action and obtain the resulting state, reward and done value), here it is the task that is calling a method on the agent (`step()`). If you plan to store experience tuples for learning, you will need to cache the last state ($S_{t-1}$) and last action taken ($A_{t-1}$), then in the next time step when you get the new state ($S_t$) and reward ($R_t$), you can store them along with the `done` flag ($\left\langle S_{t-1}, A_{t-1}, R_t, S_t, \mathrm{done?}\right\rangle$).When an episode ends, the agent receives one last call to the `step()` method with `done` set to `True` - this is your chance to perform any cleanup/reset/batch-learning (note that no reset method is called on an agent externally). The action returned on this last call is ignored, so you may safely return `None`. The next call would be the beginning of a new episode.One last thing - in order to run your agent, you will have to edit `agents/__init__.py` and import your agent class in it, e.g.:```pythonfrom quad_controller_rl.agents.policy_gradients import DDPG```Then, while launching ROS, you will need to specify this class name on the commandline/terminal:```bashroslaunch quad_controller_rl rl_controller.launch agent:=DDPG```Okay, now the first task is cut out for you - follow the instructions below to implement an agent that learns to take off from the ground. For the remaining tasks, you get to define the tasks as well as the agents! Use the `Takeoff` task as a guide, and refer to the `BaseTask` docstrings for the different methods you need to override. Use some debug print statements to understand the flow of control better. And just like creating new agents, new tasks must inherit `BaseTask`, they need be imported into `tasks/__init__.py`, and specified on the commandline when running:```bashroslaunch quad_controller_rl rl_controller.launch task:=Hover agent:=DDPG```> **Tip**: You typically need to launch ROS and then run the simulator manually. But you can automate that process by either copying/symlinking your simulator to `quad_controller_rl/sim/DroneSim` (`DroneSim` must be an executable/link to one), or by specifying it on the command line, as follows:> > ```bash> roslaunch quad_controller_rl rl_controller.launch task:=Hover agent:=DDPG sim:=> ``` Task 1: Takeoff Implement takeoff agentTrain an agent to successfully lift off from the ground and reach a certain threshold height. Develop your agent in a file under `agents/` as described above, implementing at least the `step()` method, and any other supporting methods that might be necessary. You may use any reinforcement learning algorithm of your choice (note that the action space consists of continuous variables, so that may somewhat limit your choices).The task has already been defined (in `tasks/takeoff.py`), which you should not edit. The default target height (Z-axis value) to reach is 10 units above the ground. And the reward function is essentially the negative absolute distance from that set point (upto some threshold). An episode ends when the quadcopter reaches the target height (x and y values, orientation, velocity, etc. are ignored), or when the maximum duration is crossed (5 seconds). See `Takeoff.update()` for more details, including episode bonus/penalty.As you develop your agent, it's important to keep an eye on how it's performing. Build in a mechanism to log/save the total rewards obtained in each episode to file. Once you are satisfied with your agent's performance, return to this notebook to plot episode rewards, and answer the questions below. Plot episode rewardsPlot the total rewards obtained in each episode, either from a single run, or averaged over multiple runs. ###Code import os os.getcwd() # TODO: Read and plot episode rewards import pandas as pd df_stats = pd.read_csv('../out/stats_2018-02-13_23-50-28.csv') df_stats[['total_reward']].plot(title="Episode Rewards") ###Output _____no_output_____ ###Markdown **Q**: What algorithm did you use? Briefly discuss why you chose it for this task.**A**: Deep Q algorithm**Q**: Using the episode rewards plot, discuss how the agent learned over time.- Was it an easy task to learn or hard?- Was there a gradual learning curve, or an aha moment?- How good was the final performance of the agent? (e.g. mean rewards over the last 10 episodes)**A**: hard to learn Task 2: Hover Implement hover agentNow, your agent must take off and hover at the specified set point (say, 10 units above the ground). Same as before, you will need to create an agent and implement the `step()` method (and any other supporting methods) to apply your reinforcement learning algorithm. You may use the same agent as before, if you think your implementation is robust, and try to train it on the new task. But then remember to store your previous model weights/parameters, in case your results were worth keeping. States and rewardsEven if you can use the same agent, you will need to create a new task, which will allow you to change the state representation you pass in, how you verify when the episode has ended (the quadcopter needs to hover for at least a few seconds), etc. In this hover task, you may want to pass in the target height as part of the state (otherwise how would the agent know where you want it to go?). You may also need to revisit how rewards are computed. You can do all this in a new task file, e.g. `tasks/hover.py` (remember to follow the steps outlined above to create a new task):```pythonclass Hover(BaseTask): ...```**Q**: Did you change the state representation or reward function? If so, please explain below what worked best for you, and why you chose that scheme. Include short code snippet(s) if needed.**A**: Implementation notes**Q**: Discuss your implementation below briefly, using the following questions as a guide:- What algorithm(s) did you try? What worked best for you?- What was your final choice of hyperparameters (such as $\alpha$, $\gamma$, $\epsilon$, etc.)?- What neural network architecture did you use (if any)? Specify layers, sizes, activation functions, etc.**A**: Plot episode rewardsAs before, plot the episode rewards, either from a single run, or averaged over multiple runs. Comment on any changes in learning behavior. ###Code # TODO: Read and plot episode rewards ###Output _____no_output_____ ###Markdown Task 3: LandingWhat goes up, must come down! But safely! Implement landing agentThis time, you will need to edit the starting state of the quadcopter to place it at a position above the ground (at least 10 units). And change the reward function to make the agent learn to settle down _gently_. Again, create a new task for this (e.g. `Landing` in `tasks/landing.py`), and implement the changes. Note that you will have to modify the `reset()` method to return a position in the air, perhaps with some upward velocity to mimic a recent takeoff.Once you're satisfied with your task definition, create another agent or repurpose an existing one to learn this task. This might be a good chance to try out a different approach or algorithm. Initial condition, states and rewards**Q**: How did you change the initial condition (starting state), state representation and/or reward function? Please explain below what worked best for you, and why you chose that scheme. Were you able to build in a reward mechanism for landing gently?**A**: Implementation notes**Q**: Discuss your implementation below briefly, using the same questions as before to guide you.**A**: Plot episode rewardsAs before, plot the episode rewards, either from a single run, or averaged over multiple runs. This task is a little different from the previous ones, since you're starting in the air. Was it harder to learn? Why/why not? ###Code # TODO: Read and plot episode rewards ###Output _____no_output_____ ###Markdown Task 4: CombinedIn order to design a complete flying system, you will need to incorporate all these basic behaviors into a single agent. Setup end-to-end taskThe end-to-end task we are considering here is simply to takeoff, hover in-place for some duration, and then land. Time to create another task! But think about how you might go about it. Should it be one meta-task that activates appropriate sub-tasks, one at a time? Or would a single combined task with something like waypoints be easier to implement? There is no right or wrong way here - experiment and find out what works best (and then come back to answer the following).**Q**: What setup did you ultimately go with for this combined task? Explain briefly.**A**: Implement combined agentUsing your end-to-end task, implement the combined agent so that it learns to takeoff (at least 10 units above ground), hover (again, at least 10 units above ground), and gently come back to ground level. Combination scheme and implementation notesJust like the task itself, it's up to you whether you want to train three separate (sub-)agents, or a single agent for the complete end-to-end task.**Q**: What did you end up doing? What challenges did you face, and how did you resolve them? Discuss any other implementation notes below.**A**: Plot episode rewardsAs before, plot the episode rewards, either from a single run, or averaged over multiple runs. ###Code # TODO: Read and plot episode rewards ###Output _____no_output_____
jupyter notebooks/Explore classes and properties of Schema.ipynb
###Markdown How to use biothings_schema package to explorer classes and properties ###Code # load python package from biothings_schema import Schema # load schema se = Schema("https://raw.githubusercontent.com/data2health/schemas/biothings/biothings/biothings_curie_kevin.jsonld") ###Output _____no_output_____ ###Markdown Find all classes defined in the Schema ###Code se.list_all_classes() ###Output _____no_output_____ ###Markdown Access class using different ID formats 1. Access using URI ###Code scls = se.get_class("http://schema.biothings.io/Gene") scls ###Output _____no_output_____ ###Markdown 2. Access using CURIE ###Code scls = se.get_class("bts:Gene") scls ###Output _____no_output_____ ###Markdown 3. Access using label ###Code scls = se.get_class("Gene") scls ###Output _____no_output_____ ###Markdown Find the URI of a specific class ###Code scls = se.get_class("Gene") scls.uri ###Output _____no_output_____ ###Markdown Find the label of a specific class ###Code scls = se.get_class("Gene") scls.label ###Output _____no_output_____ ###Markdown Find the CURIE of a specific class ###Code scls = se.get_class("Gene") scls.name ###Output _____no_output_____ ###Markdown Response if class is not defined ###Code scls = se.get_class("dd") scls.uri scls.name scls.label ###Output _____no_output_____ ###Markdown Find all parents of a specific class ###Code # find parents of "Gene" class scls = se.get_class("Gene") scls.parent_classes ###Output _____no_output_____ ###Markdown Find all direct children of a specific class ###Code # find direct children of "MolecularEntity" class scls = se.get_class("MolecularEntity") scls.child_classes ###Output _____no_output_____ ###Markdown Find all descendants of a specific class ###Code # find descendants of "MolecularEntity" class scls = se.get_class("MolecularEntity") scls.descendant_classes ###Output _____no_output_____ ###Markdown Find properties specifically defined for a class ###Code # find properties specifically defined for "Gene" class scls = se.get_class("Gene") scls.list_properties(group_by_class=False) ###Output _____no_output_____ ###Markdown Find all properties related to a class (including the parents' properties) ###Code # find all properties related to "Gene" scls = se.get_class("Gene") scls.list_properties(class_specific=False) ###Output _____no_output_____ ###Markdown Explore where a class is used ###Code # find where "GenomicEntity" class is used scls = se.get_class("GenomicEntity") scls.used_by() ###Output _____no_output_____ ###Markdown Explore all information related to the class Including:1. Related properties2. Parent classes3. Direct child classes4. Where the class is used ###Code # explore all information related to "GenomicEntity" class scls = se.get_class("GenomicEntity") scls.describe() ###Output _____no_output_____ ###Markdown Access property using different ID formats 1. Access using URI ###Code sp = se.get_property("http://schema.biothings.io/ensembl") print(sp) ###Output bts:ensembl ###Markdown 2. Access using CURIE ###Code sp = se.get_property("bts:ensembl") print(sp) ###Output bts:ensembl ###Markdown 3. Access using label ###Code sp = se.get_property("ensembl") print(sp) ###Output bts:ensembl ###Markdown Find all parents of a specific property ###Code # find parents of "ensembl" property sp = se.get_property("ensembl") sp.parent_properties ###Output _____no_output_____ ###Markdown Find all children of a specific property ###Code # find parents of "identifier" property sp = se.get_property("identifier") sp.child_properties ###Output _____no_output_____ ###Markdown Find property description ###Code # description of "ensembl" property sp = se.get_property("ensembl") sp.description ###Output _____no_output_____ ###Markdown Explore information about a property Includes:1. ID2. Description3. Domain (which class(es) use this property)4. Range (the value type)5. Parent properties6. Child properties ###Code # explore "ensembl" property sp = se.get_property("ensembl") sp.describe() ###Output _____no_output_____
03CodingExercise7Dictionaries.ipynb
###Markdown Create a dictionary where all the keys are strings, and all values are integers. For example {'Monday' : 19,'Tuesday' : 20} Just write the dictionary on a single line, don't assign a variable name to the dictionary. ###Code {'Coursera' : 1,'Udemy' : 2,'EdX' : 3} ###Output _____no_output_____
Section 4/Trading_system.ipynb
###Markdown Trading System with Moving Average Signals ###Code import numpy as np import pandas as pd import pandas_datareader as pdr import matplotlib.pyplot as plt %matplotlib inline gld = pdr.get_data_yahoo('GLD', '2013-01-01') gld.drop('Adj Close', axis=1, inplace=True) gld['9-day'] = gld['Close'].rolling(9).mean() gld['21-day'] = gld['Close'].rolling(21).mean() gld['Change'] = np.log(gld.Close / gld.Close.shift()) gld.tail() with plt.style.context('ggplot'): plt.figure(figsize=(8,6)) plt.plot(gld.Close[-120:]) plt.plot(gld['9-day'][-120:]) plt.plot(gld['21-day'][-120:]) plt.legend(loc=2) gld['position'] = np.where(gld['9-day'] > gld['21-day'], 1 , 0) gld['position'] = np.where(gld['9-day'] < gld['21-day'], -1, gld['position']) gld.tail(10) gld['system'] = gld['position'] * gld['Change'] gld[['Change', 'system']].cumsum().plot() ###Output _____no_output_____
content/python/pandas/.ipynb_checkpoints/Pandas_basic-Copy1-checkpoint.ipynb
###Markdown ---title: "Pandas"author: "Palaniappan S"date: 2020-08-11description: "-"type: technical_notedraft: false--- ###Code import pandas as pd df = pd.read_csv('glass.csv') df df.head(5) df.tail(5) df.info() df.shape df.columns df.isnull() df.isnull().sum() ###Output _____no_output_____
9_not_in_final/ETL_trinity.ipynb
###Markdown make data db ###Code # load file pickle plateDB = pickle.load(open(os.path.join(dir_save, 'file_summary_mwt.pickle'),'rb')) # get paths with trinity.id.dat pMWT = plateDB.index[~plateDB[('filepath','trinity.id.dat')].isna()].values del plateDB # make dataframe MWTDB = pd.DataFrame({'mwtpath':pMWT}) # take a look at the db to see if any missing trinity pickle # instantiate report_capture = np.zeros(len(pMWT),dtype='bool') for plateid, pPlate in enumerate(MWTDB['mwtpath']): # get expected apth to trinity data pfile = os.path.join(pPlate, 'trinity_all_worms.pickle') # see if file exist if os.path.exists(pfile): report_capture[plateid] = True else: print(f'{plateid} does not exist', end='\r') # report result print(f'{np.sum(report_capture)}/{len(report_capture)} files exist') # delete the plate that failed to concatenate trinity MWTDB.drop(index=MWTDB.index[~report_capture].values, inplace=True) ###Output _____no_output_____ ###Markdown Create MWTDB ###Code # add paths to trinitu files MWTDB['trinity_path'] = list(map(lambda x: os.path.join(x,'trinity_all_worms.pickle'), MWTDB['mwtpath'])) # reset index MWTDB.reset_index(drop=True, inplace=True) # extract experiment features df = MWTDB['mwtpath'].str.split(pat='/', expand=True) MWTDB['expname'] = df.iloc[:,4] MWTDB['groupname'] = df.iloc[:,5] MWTDB['platename'] = df.iloc[:,6] # get number of rows per pickle file # note some trinity files may not be converted to pickle files. Instead of checking availability, # random choose 1.1M numbers and then use only first 1M rows that has files pickle_rows = np.zeros(MWTDB.shape[0], dtype='int') for i, p in enumerate(MWTDB['trinity_path']): if i%5==0: print(f'getting row numbers from {i}th file', end='\r') df = pd.read_pickle(p) pickle_rows[i] = df.shape[0] MWTDB['rows'] = pickle_rows # define dropbox save folder, mkdir if not exist pDropbox_home = '/Users/connylin/Dropbox/MWT/db' pReplace = '/Volumes/COBOLT' # replace path MWTDB['mwtpath_dropbox'] = list(map(lambda p: p.replace(pReplace, pDropbox_home), MWTDB['mwtpath'])) MWTDB['trinity_path_dropbox'] = list(map(lambda p: p.replace(pReplace, pDropbox_home), MWTDB['trinity_path'])) # save database pickle.dump(MWTDB, open(os.path.join(dir_save, 'MWTDB_trinity_N2400mM.pickle'),'wb')) ###Output _____no_output_____ ###Markdown Data wrangling add labels to individual plate data ###Code # take a sample to see if need per file processing for ind in MWTDB.index.values: # get path ptrinity = MWTDB['trinity_path'].iloc[ind] # load to dataframe df = pickle.load(open(ptrinity,'rb')) row_n_original = df.shape[0] # check if the data already been cleaned if any(df.columns=='mwtid_trdb'): continue # clean nan data df.dropna(axis=0, inplace=True) row_n_after = df.shape[0] print(f'plateid {ind} dropped {row_n_original - row_n_after} rows to {row_n_after} rows', end='\r') # add file path df.insert(0,'mwtid_trdb', np.tile(MWTDB.index[ind], df.shape[0])) # add group id (ethanol=1 vs no ethanol=0) if MWTDB['groupname'][ind]=='N2': df.insert(1,'etoh', np.tile(0, df.shape[0])) else: df.insert(1,'etoh', np.tile(1, df.shape[0])) # save the file pickle.dump(df, open(ptrinity,'wb')) ###Output plateid 869 dropped 7112 rows to 162496 rowssss ###Markdown concat all trinity datahttps://stackoverflow.com/questions/56012595/how-to-pickle-multiple-pandas-dataframes-and-concatenate-all-of-them-i```df = pd.concat([pd.read_pickle('/PATH/df/{}/{}.F.K.df'.format('train', f)).iloc[:, :100] for f in Files], axis=1)````a = [pd.read_pickle(p) for p in MWTDB['trinity_path'][:10]]`Issues:* Each csv is ~100MB * 800 = 80GB csv. My computer won't be able to open this file. Can I predict which tap number the worm is reacting to by it's behavior before and after the tap?* for wildtype* for ethanol vs non ethanol* for mutants? ###Code # look at behavior and see if can predict which tap it is MWTDB = pickle.load(open(os.path.join(dir_save, 'MWTDB_trinity_N2400mM.pickle'),'rb')) ###Output _____no_output_____ ###Markdown approach 1 random 10 plates ###Code # choose 10 plates of 0mM and 10 plates of 400mM to look at np.random.seed(318) ind_0mM = np.random.choice(MWTDB.index[MWTDB['groupname']=='N2'].values, 10, replace=False) ind_400mM = np.random.choice(MWTDB.index[MWTDB['groupname']=='N2_400mM'].values, 10, replace=False) # combine index from 0mM and 400mM i = np.hstack((ind_0mM, ind_400mM)) # get trininty file paths from random samples ptrinity = MWTDB['trinity_path'].iloc[i].values # load data df = pd.concat([pd.read_pickle(p) for p in ptrinity]) df.info() ###Output <class 'pandas.core.frame.DataFrame'> Int64Index: 11894033 entries, 1 to 1579776 Data columns (total 17 columns): # Column Dtype --- ------ ----- 0 mwtid_trdb int64 1 etoh int64 2 time float64 3 speed float64 4 bias float64 5 tap float64 6 loc_x float64 7 loc_y float64 8 morphwidth float64 9 midline float64 10 area float64 11 angular float64 12 aspect float64 13 kink float64 14 curve float64 15 crab float64 16 wormid int64 dtypes: float64(14), int64(3) memory usage: 1.6 GB ###Markdown approach 2, random 1 million rows from each group* 20 plates gives 11,894,033 rows of data. 800/20 = 40*12M = 480M rows of data* 300s*20 frame per sec = 6000 time points. 1M rows would have 1000/6 = 500/3 = 166 samples per time point. Will start with this and see how it goes. ###Code # get number of rows per trinity file df = pd.read_pickle(os.path.join(dir_save, 'fileinfo_trinity_N2400mM.pickle')) # get sum row_total = df['row_number'].sum() print(f'total number of rows: {row_total}') # randomly choose between those numbers # get the data MWTDB['trinity_path'] ###Output _____no_output_____
KanchiTank_ML_Classification.ipynb
###Markdown Mushroom Classification Using Different Classifiers In this project, we will examine the data and create a machine learning algorithm that will detect if the mushroom is edible or poisonous by its specifications like cap shape, cap color, gill color, etc. using different classifiers. The dataset used in this project is "mushrooms.csv" which contains 8124 instances of mushrooms with 23 features like cap-shape, cap-surface, cap-color, bruises, odor, etc. and is made available by UCI Machine Learning. Importing the packages ###Code import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import os from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder from sklearn.metrics import classification_report, confusion_matrix from sklearn.tree import export_graphviz import graphviz ###Output _____no_output_____ ###Markdown Checking the files in the directory ###Code print(os.listdir("C:/Users/Kanchi/PycharmProjects/Mushroom-Classification")) ###Output ['.ipynb_checkpoints', 'images', 'KanchiTank_ML_Classification.ipynb', 'Mushroom-Classification', 'mushrooms.csv'] ###Markdown Reading the csv file of the dataset Pandas read_csv() function imports a CSV file (in our case, ‘mushrooms.csv’) to DataFrame format. ###Code df = pd.read_csv("mushrooms.csv") ###Output _____no_output_____ ###Markdown Examining the Data After importing the data, to learn more about the dataset, we'll use .head() .info() and .describe() methods. ###Code df.head() df.info() df.describe() ###Output _____no_output_____ ###Markdown Shape of the dataset ###Code print("Dataset shape:", df.shape) ###Output Dataset shape: (8124, 23) ###Markdown Visualizing the count of edible and poisonous mushrooms ###Code df['class'].value_counts() df["class"].unique() count = df['class'].value_counts() plt.figure(figsize=(8,7)) sns.barplot(count.index, count.values, alpha=0.8, palette="prism") plt.ylabel('Count', fontsize=12) plt.xlabel('Class', fontsize=12) plt.title('Number of poisonous/edible mushrooms') #plt.savefig("mushrooms1.png", format='png', dpi=900) plt.show() ###Output _____no_output_____ ###Markdown The dataset is balanced. Data Manipulation The data is categorical so we’ll use LabelEncoder to convert it to ordinal. LabelEncoder converts each value in a column to a number. This approach requires the category column to be of ‘category’ datatype. By default, a non-numerical column is of ‘object’ datatype. From the df.describe() method, we saw that our columns are of ‘object’ datatype. So we will have to change the type to ‘category’ before using this approach. ###Code df = df.astype('category') df.dtypes labelencoder=LabelEncoder() for column in df.columns: df[column] = labelencoder.fit_transform(df[column]) df.head() ###Output _____no_output_____ ###Markdown The column "veil-type" is 0 and not contributing to the data so we remove it. ###Code df['veil-type'] df=df.drop(["veil-type"],axis=1) ###Output _____no_output_____ ###Markdown Quick look at the characteristics of the data The violin plot below represents the distribution of the classification characteristics. It is possible to see that "gill-color" property of the mushroom breaks to two parts, one below 3 and one above 3, that may contribute to the classification. ###Code df_div = pd.melt(df, "class", var_name="Characteristics") fig, ax = plt.subplots(figsize=(16,6)) p = sns.violinplot(ax = ax, x="Characteristics", y="value", hue="class", split = True, data=df_div, inner = 'quartile', palette = 'Set1') df_no_class = df.drop(["class"],axis = 1) p.set_xticklabels(rotation = 90, labels = list(df_no_class.columns)); #plt.savefig("violinplot.png", format='png', dpi=900, bbox_inches='tight') ###Output _____no_output_____ ###Markdown Let's look at the correlation between the variables ###Code plt.figure(figsize=(14,12)) sns.heatmap(df.corr(),linewidths=.1,cmap="Purples", annot=True, annot_kws={"size": 7}) plt.yticks(rotation=0); #plt.savefig("corr.png", format='png', dpi=900, bbox_inches='tight') ###Output _____no_output_____ ###Markdown Usually, the least correlating variable is the most important one for classification. In this case, "gill-color" has -0.53 so let's look at it closely. ###Code df[['class', 'gill-color']].groupby(['gill-color'], as_index=False).mean().sort_values(by='class', ascending=False) ###Output _____no_output_____ ###Markdown Let's look closely at the feature "gill-color". ###Code new_var = df[['class', 'gill-color']] new_var = new_var[new_var['gill-color']<=3.5] sns.factorplot('class', col='gill-color', data=new_var, kind='count', size=4.5, aspect=.8, col_wrap=4); #plt.savefig("gillcolor1.png", format='png', dpi=900, bbox_inches='tight') new_var=df[['class', 'gill-color']] new_var=new_var[new_var['gill-color']>3.5] sns.factorplot('class', col='gill-color', data=new_var, kind='count', size=4.5, aspect=.8, col_wrap=4); #plt.savefig("gillcolor2.png", format='png', dpi=900, bbox_inches='tight') ###Output _____no_output_____ ###Markdown Preparing the Data Setting X and y axis and splitting the data into train and test respectively. ###Code X = df.drop(['class'], axis=1) y = df["class"] X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42, test_size=0.1) ###Output _____no_output_____ ###Markdown Classification Methods 1. Decision Tree Classification ###Code from sklearn.tree import DecisionTreeClassifier dt = DecisionTreeClassifier() dt.fit(X_train, y_train) os.environ["PATH"] += os.pathsep + 'C:/Program Files (x86)/Graphviz2.38/bin/' dot_data = export_graphviz(dt, out_file=None, feature_names=X.columns, filled=True, rounded=True, special_characters=True) graph = graphviz.Source(dot_data) #graph.render(filename='DecisionTree') graph ###Output _____no_output_____ ###Markdown Feature importance By all methods examined before the feature that is most important is "gill-color". ###Code features_list = X.columns.values feature_importance = dt.feature_importances_ sorted_idx = np.argsort(feature_importance) plt.figure(figsize=(8,7)) plt.barh(range(len(sorted_idx)), feature_importance[sorted_idx], align='center', color ="red") plt.yticks(range(len(sorted_idx)), features_list[sorted_idx]) plt.xlabel('Importance') plt.title('Feature importance') plt.draw() #plt.savefig("featureimp.png", format='png', dpi=900, bbox_inches='tight') plt.show() ###Output _____no_output_____ ###Markdown Predicting and estimating the result ###Code y_pred_dt = dt.predict(X_test) print("Decision Tree Classifier report: \n\n", classification_report(y_test, y_pred_dt)) print("Test Accuracy: {}%".format(round(dt.score(X_test, y_test)*100, 2))) ###Output Test Accuracy: 100.0% ###Markdown Confusion Matrix for Decision Tree Classifier ###Code cm = confusion_matrix(y_test, y_pred_dt) x_axis_labels = ["Edible", "Poisonous"] y_axis_labels = ["Edible", "Poisonous"] f, ax = plt.subplots(figsize =(7,7)) sns.heatmap(cm, annot = True, linewidths=0.2, linecolor="black", fmt = ".0f", ax=ax, cmap="Purples", xticklabels=x_axis_labels, yticklabels=y_axis_labels) plt.xlabel("PREDICTED LABEL") plt.ylabel("TRUE LABEL") plt.title('Confusion Matrix for Decision Tree Classifier') #plt.savefig("dtcm.png", format='png', dpi=900, bbox_inches='tight') plt.show() ###Output _____no_output_____ ###Markdown 2. Logistic Regression Classification ###Code from sklearn.linear_model import LogisticRegression lr = LogisticRegression(solver="lbfgs", max_iter=500) lr.fit(X_train, y_train) print("Test Accuracy: {}%".format(round(lr.score(X_test, y_test)*100,2))) ###Output Test Accuracy: 94.96% ###Markdown Classification report of Logistic Regression Classifier ###Code y_pred_lr = lr.predict(X_test) print("Logistic Regression Classifier report: \n\n", classification_report(y_test, y_pred_lr)) ###Output Logistic Regression Classifier report: precision recall f1-score support 0 0.96 0.94 0.95 433 1 0.94 0.96 0.95 380 accuracy 0.95 813 macro avg 0.95 0.95 0.95 813 weighted avg 0.95 0.95 0.95 813 ###Markdown Confusion Matrix for Logistic Regression Classifier ###Code cm = confusion_matrix(y_test, y_pred_lr) x_axis_labels = ["Edible", "Poisonous"] y_axis_labels = ["Edible", "Poisonous"] f, ax = plt.subplots(figsize =(7,7)) sns.heatmap(cm, annot = True, linewidths=0.2, linecolor="black", fmt = ".0f", ax=ax, cmap="Purples", xticklabels=x_axis_labels, yticklabels=y_axis_labels) plt.xlabel("PREDICTED LABEL") plt.ylabel("TRUE LABEL") plt.title('Confusion Matrix for Logistic Regression Classifier') #plt.savefig("lrcm.png", format='png', dpi=900, bbox_inches='tight') plt.show() ###Output _____no_output_____ ###Markdown 3. KNN Classification ###Code from sklearn.neighbors import KNeighborsClassifier best_Kvalue = 0 best_score = 0 for i in range(1,10): knn = KNeighborsClassifier(n_neighbors=i) knn.fit(X_train, y_train) if knn.score(X_test, y_test) > best_score: best_score = knn.score(X_train, y_train) best_Kvalue = i print("Best KNN Value: {}".format(best_Kvalue)) print("Test Accuracy: {}%".format(round(best_score*100,2))) ###Output Best KNN Value: 1 Test Accuracy: 100.0% ###Markdown Classification report of KNN Classifier ###Code y_pred_knn = knn.predict(X_test) print("KNN Classifier report: \n\n", classification_report(y_test, y_pred_knn)) ###Output KNN Classifier report: precision recall f1-score support 0 1.00 0.99 1.00 433 1 0.99 1.00 1.00 380 accuracy 1.00 813 macro avg 1.00 1.00 1.00 813 weighted avg 1.00 1.00 1.00 813 ###Markdown Confusion Matrix for KNN Classifier ###Code cm = confusion_matrix(y_test, y_pred_knn) x_axis_labels = ["Edible", "Poisonous"] y_axis_labels = ["Edible", "Poisonous"] f, ax = plt.subplots(figsize =(7,7)) sns.heatmap(cm, annot = True, linewidths=0.2, linecolor="black", fmt = ".0f", ax=ax, cmap="Purples", xticklabels=x_axis_labels, yticklabels=y_axis_labels) plt.xlabel("PREDICTED LABEL") plt.ylabel("TRUE LABEL") plt.title('Confusion Matrix for KNN Classifier') #plt.savefig("knncm.png", format='png', dpi=900, bbox_inches='tight') plt.show() ###Output _____no_output_____ ###Markdown 4. SVM Classification ###Code from sklearn.svm import SVC svm = SVC(random_state=42, gamma="auto") svm.fit(X_train, y_train) print("Test Accuracy: {}%".format(round(svm.score(X_test, y_test)*100, 2))) ###Output Test Accuracy: 100.0% ###Markdown Classification report of SVM Classifier ###Code y_pred_svm = svm.predict(X_test) print("SVM Classifier report: \n\n", classification_report(y_test, y_pred_svm)) ###Output SVM Classifier report: precision recall f1-score support 0 1.00 1.00 1.00 433 1 1.00 1.00 1.00 380 accuracy 1.00 813 macro avg 1.00 1.00 1.00 813 weighted avg 1.00 1.00 1.00 813 ###Markdown Confusion Matrix for SVM Classifier ###Code cm = confusion_matrix(y_test, y_pred_svm) x_axis_labels = ["Edible", "Poisonous"] y_axis_labels = ["Edible", "Poisonous"] f, ax = plt.subplots(figsize =(7,7)) sns.heatmap(cm, annot = True, linewidths=0.2, linecolor="black", fmt = ".0f", ax=ax, cmap="Purples", xticklabels=x_axis_labels, yticklabels=y_axis_labels) plt.xlabel("PREDICTED LABEL") plt.ylabel("TRUE LABEL") plt.title('Confusion Matrix for SVM Classifier') #plt.savefig("svmcm.png", format='png', dpi=900, bbox_inches='tight') plt.show() ###Output _____no_output_____ ###Markdown 5. Naive Bayes Classification ###Code from sklearn.naive_bayes import GaussianNB nb = GaussianNB() nb.fit(X_train, y_train) print("Test Accuracy: {}%".format(round(nb.score(X_test, y_test)*100, 2))) ###Output Test Accuracy: 92.62% ###Markdown Classification report of Naive Bayes Classifier ###Code y_pred_nb = nb.predict(X_test) print("Naive Bayes Classifier report: \n\n", classification_report(y_test, y_pred_nb)) ###Output Naive Bayes Classifier report: precision recall f1-score support 0 0.94 0.92 0.93 433 1 0.91 0.94 0.92 380 accuracy 0.93 813 macro avg 0.93 0.93 0.93 813 weighted avg 0.93 0.93 0.93 813 ###Markdown Confusion Matrix for Naive Bayes Classifier ###Code cm = confusion_matrix(y_test, y_pred_nb) x_axis_labels = ["Edible", "Poisonous"] y_axis_labels = ["Edible", "Poisonous"] f, ax = plt.subplots(figsize =(7,7)) sns.heatmap(cm, annot = True, linewidths=0.2, linecolor="black", fmt = ".0f", ax=ax, cmap="Purples", xticklabels=x_axis_labels, yticklabels=y_axis_labels) plt.xlabel("PREDICTED LABEL") plt.ylabel("TRUE LABEL") plt.title('Confusion Matrix for Naive Bayes Classifier') #plt.savefig("nbcm.png", format='png', dpi=900, bbox_inches='tight') plt.show() ###Output _____no_output_____ ###Markdown 6. Random Forest Classification ###Code from sklearn.ensemble import RandomForestClassifier rf = RandomForestClassifier(n_estimators=100, random_state=42) rf.fit(X_train, y_train) print("Test Accuracy: {}%".format(round(rf.score(X_test, y_test)*100, 2))) ###Output Test Accuracy: 100.0% ###Markdown Classification report of Random Forest Classifier ###Code y_pred_rf = rf.predict(X_test) print("Random Forest Classifier report: \n\n", classification_report(y_test, y_pred_rf)) ###Output Random Forest Classifier report: precision recall f1-score support 0 1.00 1.00 1.00 433 1 1.00 1.00 1.00 380 accuracy 1.00 813 macro avg 1.00 1.00 1.00 813 weighted avg 1.00 1.00 1.00 813 ###Markdown Confusion Matrix for Random Forest Classifier ###Code cm = confusion_matrix(y_test, y_pred_rf) x_axis_labels = ["Edible", "Poisonous"] y_axis_labels = ["Edible", "Poisonous"] f, ax = plt.subplots(figsize =(7,7)) sns.heatmap(cm, annot = True, linewidths=0.2, linecolor="black", fmt = ".0f", ax=ax, cmap="Purples", xticklabels=x_axis_labels, yticklabels=y_axis_labels) plt.xlabel("PREDICTED LABEL") plt.ylabel("TRUE LABEL") plt.title('Confusion Matrix for Random Forest Classifier'); #plt.savefig("rfcm.png", format='png', dpi=900, bbox_inches='tight') plt.show() ###Output _____no_output_____ ###Markdown Predictions Predicting some of the X_test results and matching it with true i.e. y_test values using Decision Tree Classifier. ###Code preds = dt.predict(X_test) print(preds[:36]) print(y_test[:36].values) # 0 - Edible # 1 - Poisonous ###Output [0 1 1 0 1 1 1 1 0 0 0 1 0 0 0 0 0 1 0 0 0 0 1 0 1 0 0 0 0 1 1 1 0 0 0 1] [0 1 1 0 1 1 1 1 0 0 0 1 0 0 0 0 0 1 0 0 0 0 1 0 1 0 0 0 0 1 1 1 0 0 0 1]
notebooks-1/8-mlp-scratch.ipynb
###Markdown 多层感知机的从零开始实现 ###Code import d2l from mxnet import np, npx, gluon npx.set_np() batch_size = 256 train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) ###Output _____no_output_____ ###Markdown 初始模型参数。 ###Code num_inputs, num_outputs, num_hiddens = 784, 10, 256 W1 = np.random.normal(scale=0.01, size=(num_inputs, num_hiddens)) b1 = np.zeros(num_hiddens) W2 = np.random.normal(scale=0.01, size=(num_hiddens, num_outputs)) b2 = np.zeros(num_outputs) params = [W1, b1, W2, b2] for param in params: param.attach_grad() ###Output _____no_output_____ ###Markdown 激活函数。 ###Code def relu(X): return np.maximum(X, 0) ###Output _____no_output_____ ###Markdown 定义模型。 ###Code def net(X): X = X.reshape((-1, num_inputs)) H = relu(np.dot(X, W1) + b1) return np.dot(H, W2) + b2 ###Output _____no_output_____ ###Markdown 训练。 ###Code loss = gluon.loss.SoftmaxCrossEntropyLoss() num_epochs, lr = 10, 0.5 d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, lambda batch_size: d2l.sgd(params, lr, batch_size)) ###Output _____no_output_____ ###Markdown 预测。 ###Code d2l.predict_ch3(net, test_iter) ###Output _____no_output_____ ###Markdown 多层感知机的从零开始实现 ###Code import d2l from mxnet import np, npx, gluon npx.set_np() batch_size = 256 train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) ###Output _____no_output_____ ###Markdown 初始模型参数。 ###Code num_inputs, num_outputs, num_hiddens = 784, 10, 256 W1 = np.random.normal(scale=0.01, size=(num_inputs, num_hiddens)) b1 = np.zeros(num_hiddens) W2 = np.random.normal(scale=0.01, size=(num_hiddens, num_outputs)) b2 = np.zeros(num_outputs) params = [W1, b1, W2, b2] for param in params: param.attach_grad() ###Output _____no_output_____ ###Markdown 激活函数。 ###Code def relu(X): return np.maximum(X, 0) ###Output _____no_output_____ ###Markdown 定义模型。 ###Code def net(X): X = X.reshape((-1, num_inputs)) H = relu(np.dot(X, W1) + b1) return np.dot(H, W2) + b2 ###Output _____no_output_____ ###Markdown 训练。 ###Code loss = gluon.loss.SoftmaxCrossEntropyLoss() num_epochs, lr = 10, 0.5 d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, lambda batch_size: d2l.sgd(params, lr, batch_size)) ###Output _____no_output_____ ###Markdown 预测。 ###Code d2l.predict_ch3(net, test_iter) ###Output _____no_output_____
archive/Chris_clstm_best_model_training_and_retrospective_analysis.ipynb
###Markdown First, load data ###Code import numpy as np import pandas as pd import matplotlib.pyplot as plt from keras.preprocessing.text import Tokenizer from sklearn.model_selection import train_test_split from keras.preprocessing import sequence from keras.models import Sequential from keras.layers import Dense from keras.layers import LSTM from keras.layers.convolutional import Conv1D from keras.layers.convolutional import MaxPooling1D from keras.layers import Dropout from keras.layers.embeddings import Embedding import random #import ndac import sklearn %matplotlib inline # read original data from /gscratch/pfaendtner/cnyambura/NovoNordisk_Capstone/dataframes data = pd.read_csv('/gscratch/pfaendtner/cnyambura/NovoNordisk_Capstone/dataframes/DF_prest.csv', index_col=0) data.head() #check shape of data data.shape ###Output _____no_output_____ ###Markdown Setup nt doc and classify expression ###Code #remove sequences that are not divisile by three def nt_seq_doc(nt_sequence): if 'GACAAGCTTGCGGCCGCA' not in nt_sequence: return None true_nt = nt_sequence.split('GACAAGCTTGCGGCCGCA')[1] if len(true_nt) % 3 != 0: return None return ' '.join([true_nt[i:i+3] for i in range(0, len(true_nt), 3)]) # split quantiles def assign_class(conc): if conc <= low_cut: return 0 elif conc >= high_cut: return 1 return data['nt_seq_doc'] = data['nt_seq'].apply(nt_seq_doc) data = data[pd.notnull(data['nt_seq_doc'])] # identify high and low classes by conc_cf quantiles low_cut = data['conc_cf'].quantile(0.25) high_cut = data['conc_cf'].quantile(0.75) data['class'] = data['conc_cf'].apply(assign_class) data = data[pd.notnull(data['class'])] # check shape print('data shape: ', data.shape) ###Output data shape: (22364, 8) ###Markdown Model Training and Data Pre-Processing ###Code #only keep proteins that have <5 PrESTs per protein low_num_uniprots = data.groupby('uniprot_id').count().aa_seq[data.groupby('uniprot_id').count().aa_seq < 5].index.tolist() #len(low_num_uniprots) data_filtered = data[data.uniprot_id.isin(low_num_uniprots)] #data_filtered #X = data['nt_seq_doc'] #y = data['class'].values # Get the number of prESTs per each uniprot uniprot_counts = data_filtered.groupby('uniprot_id').count().prest_id # Add all uniprots with a single prEST to the training set training_uniprots = uniprot_counts[uniprot_counts == 1].index.tolist() len(training_uniprots) # Randomly pick 70% of other uniprots and add them to training set random.seed(10) other_uniprots = uniprot_counts[uniprot_counts > 1].index.tolist() k = int(len(other_uniprots)*0.70) training_uniprots += random.sample(other_uniprots, k) len(training_uniprots) # Add all remaining uniprots to test set testing_uniprots = list(set(uniprot_counts.index.tolist()) - set(training_uniprots)) len(testing_uniprots) print('Total number of proteins:', len(data_filtered.uniprot_id.unique())) print('Number of training proteins:', len(training_uniprots)) print('Number of testing proteins:', len(testing_uniprots)) # Add all prESTs in training uniprots to training set nt_train = data_filtered[data_filtered.uniprot_id.isin(training_uniprots)] nt_train.shape # Repeat for test set nt_test = data_filtered[data_filtered.uniprot_id.isin(testing_uniprots)] nt_test.shape # define sequence documents docs_train = list(nt_train['nt_seq_doc']) # create the tokenizer t = Tokenizer() # fit the tokenizer on the documents t.fit_on_texts(docs_train) # integer encode documents X_train = t.texts_to_sequences(docs_train) y_train = nt_train['class'].values # repeat to test set docs_test = list(nt_test['nt_seq_doc']) # fit the tokenizer on the documents t.fit_on_texts(docs_test) # integer encode documents X_test = t.texts_to_sequences(docs_test) y_test = nt_test['class'].values # create test-train split #X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42) # fix random seed for reproducibility #np.random.seed(7) # repeat to test set docs = list(data['nt_seq_doc']) # fit the tokenizer on the documents t.fit_on_texts(docs) X = t.texts_to_sequences(docs) # load the dataset but only keep the top n words, zero the rest top_words = len(t.word_index) + 1 # truncate and pad input sequences seq_lengths = [len(seq) for seq in X] max_seq_length = max(seq_lengths) X_train = sequence.pad_sequences(X_train, maxlen=max_seq_length) X_test = sequence.pad_sequences(X_test, maxlen=max_seq_length) #X = sequence.pad_sequences(X, maxlen=max_seq_length) # create the model using parameters from grid search embedding_vecor_length = 16 drop = 0.5 recurrent_drop = 0.5 model = Sequential() model.add(Embedding(top_words, embedding_vecor_length, input_length=max_seq_length)) model.add(Conv1D(filters=200, kernel_size=5, padding='same', activation='selu')) model.add(MaxPooling1D(pool_size=4)) model.add(LSTM(150, dropout=drop, recurrent_dropout=recurrent_drop)) model.add(Dense(1, activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) # record training progress history = model.fit(X_train, y_train, epochs=35, batch_size=64, validation_data=(X_test, y_test)) # Final evaluation of the model scores = model.evaluate(X_test, y_test, verbose=0) print("Accuracy: %.2f%%" % (scores[1]*100)) history.history # plot loss vs. epoch # https://machinelearningmastery.com/diagnose-overfitting-underfitting-lstm-models/ plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('model train vs validation loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'validation'], loc='upper right') plt.show() y_pred = model.predict(X_test) len(y_pred) nt_test.loc[:,'act_prob'] = y_pred nt_test_final = nt_test[['prest_id','uniprot_id','conc_cf','aa_seq','nt_seq','class','act_prob']].sort_values('uniprot_id',ascending=0).reset_index(drop=True) print('Original total number of experiments:',len(nt_test_final)) print('Original total number of proteins:',len(nt_test_final.uniprot_id.unique())) print('Original Number of passed experiments:',len(nt_test_final[nt_test_final['class']==1])) print('Original Pass rate: %.2f%%'%(np.true_divide(len(nt_test_final[nt_test_final['class']==1]),len(nt_test))*100)) print('Number of proteins with >1 high expression:',len(nt_test_final[nt_test_final['class']==1].uniprot_id.unique())) len(nt_test_final[nt_test_final['class']==1]) print('True positive:', len(nt_test_final[(nt_test_final['act_prob'] > 0.5)]) & len(nt_test_final[nt_test_final['class']==1])) len(nt_test_final[nt_test_final['class']==0]) print('False positive:', len(nt_test_final[(nt_test_final['act_prob'] > 0.5)]) & len(nt_test_final[nt_test_final['class']==0])) #print 'False Positive:',len(DF_retro_final[(DF_retro_final.ens_score > .5) & (DF_retro_final.expressed==False)]) print('True negative:', len(nt_test_final[(nt_test_final['act_prob'] < 0.5)]) & len(nt_test_final[nt_test_final['class']==0])) len(nt_test_final[(nt_test_final['act_prob'] < 0.5)]) print('False negative:', len(nt_test_final[(nt_test_final['act_prob'] < 0.5)]) & len(nt_test_final[nt_test_final['class']==1])) #print 'False Negative:',len(DF_retro_final[(DF_retro_final.ens_score < .5) & (DF_retro_final.expressed)]) #grab only the top expressing proteins n = 5 np.random.seed(0) output_df = pd.DataFrame(columns=['prest_id','uniprot_id','class','act_prob','nt_seq']) remaining_df = nt_test_final.copy() for i in range(n): print('Iteration',i) new_output_df = remaining_df.sort_values(['uniprot_id','act_prob'],ascending=[1,0]).drop_duplicates('uniprot_id') output_df = pd.concat([output_df,new_output_df]) pred_pos_proteins = set(output_df[output_df['act_prob'] > 0.5].uniprot_id) true_pos_proteins = set(output_df[output_df['class']==1].uniprot_id) print('Total number of proposed experiments:',len(output_df)) print('Total number of expressed proteins:',len(true_pos_proteins)) print('Overall pass rate:',np.true_divide(len(true_pos_proteins),len(output_df))) # Prepare for next iteration remaining_df = remaining_df.drop(new_output_df.index) remaining_df = remaining_df[remaining_df.uniprot_id.isin(true_pos_proteins)==False] print print('Percent saved experiments:',(1 - np.true_divide(len(output_df),len(nt_test_final)))*100,'%') remain_df = nt_test_final.copy() n = 5 np.random.seed(0) n_output_df = remain_df.sort_values(['uniprot_id','act_prob'],ascending=[1,0]).drop_duplicates('uniprot_id') oput_df = pd.concat([oput_df,n_output_df]) pred_pos_proteins = set(oput_df[oput_df['act_prob'] > 0.5].uniprot_id) true_pos_proteins = set(oput_df[oput_df['class']==1].uniprot_id) print('Total number of proposed experiments:',len(oput_df)) print('Total number of expressed proteins:',len(true_pos_proteins)) print('Overall pass rate:',np.true_divide(len(true_pos_proteins),len(oput_df))) # Prepare for next iteration remaining_df = remain_df.drop(new_output_df.index) remaining_df = remain_df[remain_df.uniprot_id.isin(true_pos_proteins)==False] remaining_df ###Output _____no_output_____
Kinematic_Bicycle_Model.ipynb
###Markdown In this notebook, you will implement the kinematic bicycle model. The model accepts velocity and steering rate inputs and steps through the bicycle kinematic equations. Once the model is implemented, you will provide a set of inputs to drive the bicycle in a figure 8 trajectory.The bicycle kinematics are governed by the following set of equations:\begin{align*}\dot{x}_c &= v \cos{(\theta + \beta)} \\\dot{y}_c &= v \sin{(\theta + \beta)} \\\dot{\theta} &= \frac{v \cos{\beta} \tan{\delta}}{L} \\\dot{\delta} &= \omega \\\beta &= \tan^{-1}(\frac{l_r \tan{\delta}}{L})\end{align*}where the inputs are the bicycle speed $v$ and steering angle rate $\omega$. The input can also directly be the steering angle $\delta$ rather than its rate in the simplified case. The Python model will allow us both implementations.In order to create this model, it's a good idea to make use of Python class objects. This allows us to store the state variables as well as make functions for implementing the bicycle kinematics. The bicycle begins with zero initial conditions, has a maximum turning rate of 1.22 rad/s, a wheelbase length of 2m, and a length of 1.2m to its center of mass from the rear axle.From these conditions, we initialize the Python class as follows: ###Code from notebook_grader import BicycleSolution, grade_bicycle import numpy as np import matplotlib.pyplot as plt import matplotlib.image as mpimg class Bicycle(): def __init__(self): self.xc = 0 self.yc = 0 self.theta = 0 self.delta = 0 self.beta = 0 self.L = 2 self.lr = 1.2 self.w_max = 1.22 self.sample_time = 0.01 def reset(self): self.xc = 0 self.yc = 0 self.theta = 0 self.delta = 0 self.beta = 0 ###Output _____no_output_____ ###Markdown A sample time is required for numerical integration when propagating the kinematics through time. This is set to 10 milliseconds. We also have a reset function which sets all the state variables back to 0. With this sample time, implement the kinematic model using the function $\textit{step}$ defined in the next cell. The function should take speed + angular rate as inputs and update the state variables. Don't forget about the maximum turn rate on the bicycle! ###Code class Bicycle(Bicycle): def step(self, v, w): # ================================== # Implement kinematic model here # ================================== self.xc += v * np.cos(self.theta + self.beta) * self.sample_time self.yc += v * np.sin(self.theta + self.beta) * self.sample_time self.theta += v * np.cos(self.beta) * np.tan(self.delta) / self.L * self.sample_time if (w<self.w_max): self.delta += w * self.sample_time else: self.delta += self.w_max * self.sample_time self.beta = np.arctan2(self.lr * np.tan(self.delta), self.L) * self.sample_time pass ###Output _____no_output_____ ###Markdown With the model setup, we can now start giving bicycle inputs and producing trajectories. Suppose we want the model to travel a circle of radius 10 m in 20 seconds. Using the relationship between the radius of curvature and the steering angle, the desired steering angle can be computed.\begin{align*} \tan{\delta} &= \frac{L}{r} \\ \delta &= \tan^{-1}(\frac{L}{r}) \\ &= \tan^{-1}(\frac{2}{10}) \\ &= 0.1974\end{align*}If the steering angle is directly set to 0.1974 using a simplied bicycled model, then the bicycle will travel in a circle without requiring any additional steering input. The desired speed can be computed from the circumference of the circle:\begin{align*} v &= \frac{d}{t}\\ &= \frac{2 \pi 10}{20}\\ &= \pi\end{align*}We can now implement this in a loop to step through the model equations. We will also run our bicycle model solution along with your model to show you the expected trajectory. This will help you verify the correctness of your model. ###Code sample_time = 0.01 time_end = 20 model = Bicycle() solution_model = BicycleSolution() # set delta directly model.delta = np.arctan(2/10) solution_model.delta = np.arctan(2/10) t_data = np.arange(0,time_end,sample_time) x_data = np.zeros_like(t_data) y_data = np.zeros_like(t_data) x_solution = np.zeros_like(t_data) y_solution = np.zeros_like(t_data) for i in range(t_data.shape[0]): x_data[i] = model.xc y_data[i] = model.yc model.step(np.pi, 0) x_solution[i] = solution_model.xc y_solution[i] = solution_model.yc solution_model.step(np.pi, 0) model.beta = 0 solution_model.beta=0 plt.axis('equal') plt.plot(x_data, y_data,label='Learner Model') plt.plot(x_solution, y_solution,label='Solution Model') plt.legend() plt.show() ###Output _____no_output_____ ###Markdown The plot above shows the desired circle of 10m radius. The path is slightly offset which is caused by the sideslip effects due to $\beta$. By forcing $\beta = 0$ through uncommenting the last line in the loop, you can see that the offset disappears and the circle becomes centered at (0,10). However, in practice the steering angle cannot be directly set and must be changed through angular rate inputs $\omega$. The cell below corrects for this and sets angular rate inputs to generate the same circle trajectory. The speed $v$ is still maintained at $\pi$ m/s. ###Code sample_time = 0.01 time_end = 20 model.reset() solution_model.reset() t_data = np.arange(0,time_end,sample_time) x_data = np.zeros_like(t_data) y_data = np.zeros_like(t_data) x_solution = np.zeros_like(t_data) y_solution = np.zeros_like(t_data) for i in range(t_data.shape[0]): x_data[i] = model.xc y_data[i] = model.yc if model.delta < np.arctan(2/10): model.step(np.pi, model.w_max) else: model.step(np.pi, 0) x_solution[i] = solution_model.xc y_solution[i] = solution_model.yc if solution_model.delta < np.arctan(2/10): solution_model.step(np.pi, model.w_max) else: solution_model.step(np.pi, 0) plt.axis('equal') plt.plot(x_data, y_data,label='Learner Model') plt.plot(x_solution, y_solution,label='Solution Model') plt.legend() plt.show() ###Output _____no_output_____ ###Markdown Here are some other example trajectories: a square path, a spiral path, and a wave path. Uncomment each section to view. ###Code sample_time = 0.01 time_end = 60 model.reset() solution_model.reset() t_data = np.arange(0,time_end,sample_time) x_data = np.zeros_like(t_data) y_data = np.zeros_like(t_data) x_solution = np.zeros_like(t_data) y_solution = np.zeros_like(t_data) # maintain velocity at 4 m/s v_data = np.zeros_like(t_data) v_data[:] = 4 w_data = np.zeros_like(t_data) # ================================== # Square Path: set w at corners only # ================================== w_data[670:670+100] = 0.753 w_data[670+100:670+100*2] = -0.753 w_data[2210:2210+100] = 0.753 w_data[2210+100:2210+100*2] = -0.753 w_data[3670:3670+100] = 0.753 w_data[3670+100:3670+100*2] = -0.753 w_data[5220:5220+100] = 0.753 w_data[5220+100:5220+100*2] = -0.753 # ================================== # Spiral Path: high positive w, then small negative w # ================================== # w_data[:] = -1/100 # w_data[0:100] = 1 # ================================== # Wave Path: square wave w input # ================================== #w_data[:] = 0 #w_data[0:100] = 1 #w_data[100:300] = -1 #w_data[300:500] = 1 #w_data[500:5700] = np.tile(w_data[100:500], 13) #w_data[5700:] = -1 # ================================== # Step through bicycle model # ================================== for i in range(t_data.shape[0]): x_data[i] = model.xc y_data[i] = model.yc model.step(v_data[i], w_data[i]) x_solution[i] = solution_model.xc y_solution[i] = solution_model.yc solution_model.step(v_data[i], w_data[i]) plt.axis('equal') plt.plot(x_data, y_data,label='Learner Model') plt.plot(x_solution, y_solution,label='Solution Model') plt.legend() plt.show() ###Output _____no_output_____ ###Markdown We would now like the bicycle to travel a figure eight trajectory. Both circles in the figure eight have a radius of 8m and the path should complete in 30 seconds. The path begins at the bottom of the left circle and is shown in the figure below:![title](figure8.png)Determine the speed and steering rate inputs required to produce such trajectory and implement in the cell below. Make sure to also save your inputs into the arrays v_data and w_data, these will be used to grade your solution. The cell below also plots the trajectory generated by your own model. ###Code sample_time = 0.01 time_end = 30 model.reset() t_data = np.arange(0,time_end,sample_time) x_data = np.zeros_like(t_data) y_data = np.zeros_like(t_data) v_data = np.zeros_like(t_data) w_data = np.zeros_like(t_data) # ================================== # Learner solution begins here # ================================== model.delta = np.arctan(2/8) w_max = 1.22 w_max_in_step = w_max * sample_time desired_angle = np.arctan(2/8) desired_time_steps = int(np.floor(desired_angle / w_max_in_step)) last_disired_w = desired_angle - desired_time_steps * w_max_in_step print(desired_time_steps) w_data[0:desired_time_steps-1] = w_max w_data[desired_time_steps] = last_disired_w half_quarter = int(1/8 * t_data.shape[0]) w_data[half_quarter - desired_time_steps : half_quarter + desired_time_steps - 1] = -w_max w_data[half_quarter*5 - desired_time_steps : half_quarter*5 + desired_time_steps] = w_max v_data = np.ones_like(t_data) * np.pi * 16/15 print(w_data[half_quarter]) for i in range(t_data.shape[0]): x_data[i] = model.xc y_data[i] = model.yc model.step(v_data[i], w_data[i]) # ================================== # Learner solution ends here # ================================== plt.axis('equal') plt.plot(x_data, y_data) plt.show() ###Output 20 -1.22 ###Markdown We will now run your speed and angular rate inputs through our bicycle model solution. This is to ensure that your trajectory is correct along with your model. The cell below will display the path generated by our model along with some waypoints on a desired figure 8. Surrounding these waypoints are error tolerance circles with radius 1.5m, your solution will pass the grader if the trajectory generated stays within 80% of these circles. ###Code grade_bicycle(t_data,v_data,w_data) ###Output Assessment passed! Your trajectory meets 82.5% of the waypoints. ###Markdown The cell below will save the time and vehicle inputs as text file named $\textit{figure8.txt}$. To locate the file, change the end of your web directory to $\textit{/notebooks/Course_1_Module_4/figure8.txt}$Once you are there, you can download the file and submit to the Coursera grader to complete this assessment. ###Code data = np.vstack([t_data, v_data, w_data]).T np.savetxt('figure8.txt', data, delimiter=', ') ###Output _____no_output_____ ###Markdown Congratulations! You have now completed the assessment! Feel free to test the bicycle model with different inputs in the cell below, and see what trajectories they form. For example, try moving in an equilateral triangle. You'll find that it's rather difficult to generate desired trajectories by pre-setting the inputs. The next module on vehicle control will show you an easier and more accurate method. See you there! ###Code sample_time = 0.01 time_end = 30 model.reset() t_data = np.arange(0,time_end,sample_time) x_data = np.zeros_like(t_data) y_data = np.zeros_like(t_data) v_data = np.zeros_like(t_data) w_data = np.zeros_like(t_data) # ================================== # Test various inputs here # ================================== for i in range(t_data.shape[0]): model.step(v_data[i], w_data[i]) plt.axis('equal') plt.plot(x_data, y_data) plt.show() ###Output _____no_output_____
conll_2003baseline.ipynb
###Markdown Conll 2003 Name Entity Recognitiondataset link: https://huggingface.co/datasets/conll2003 ###Code from google.colab import drive drive.mount('/content/drive') ! pip install transformers import numpy as np import pandas as pd from transformers import AutoTokenizer from tqdm import tqdm import tensorflow as tf import matplotlib.pyplot as plt def load_sentences(filepath): final = [] sentences = [] with open(filepath, 'r') as f: for line in f.readlines(): if (line == ('-DOCSTART- -X- -X- O\n') or line == '\n'): if len(sentences) > 0: final.append(sentences) sentences = [] else: l = line.split(' ') sentences.append((l[0], l[3].strip('\n'))) return final base_path = './drive/MyDrive/berkeley/capstone/demo/conll2003/' train_samples = load_sentences(base_path + 'train.txt') test_samples = load_sentences(base_path + 'test.txt') valid_samples = load_sentences(base_path + 'valid.txt') samples = train_samples + test_samples schema = ['_'] + sorted({tag for sentence in samples for _, tag in sentence}) # dataset quick view train_data = [] sentences=[] with open(base_path + 'train.txt', 'r') as f: for line in f.readlines(): if (line == ('-DOCSTART- -X- -X- O\n') or line == '\n'): if len(sentences) > 0: train_data.append(sentences) sentences = [] else: l = line.split(" ") sentences.append((l[i].strip('\n') for i in range(4))) l[0].strip('\n') ###Output Streaming output truncated to the last 5000 lines. -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O -DOCSTART- -X- -X- O ###Markdown Model ###Code from transformers import AutoConfig, TFAutoModelForTokenClassification MODEL_NAME = 'bert-base-cased' config = AutoConfig.from_pretrained(MODEL_NAME, num_labels=len(schema)) model = TFAutoModelForTokenClassification.from_pretrained(MODEL_NAME, config=config) model.summary() tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) def tokenize_sample(sample): seq = [ (subtoken, tag) for token, tag in sample for subtoken in tokenizer(token)['input_ids'][1:-1] ] return [(3, 'O')] + seq + [(4, 'O')] def preprocess(samples): tag_index = {tag: i for i, tag in enumerate(schema)} tokenized_samples = list(tqdm(map(tokenize_sample, samples))) max_len = max(map(len, tokenized_samples)) X = np.zeros((len(samples), max_len), dtype=np.int32) y = np.zeros((len(samples), max_len), dtype=np.int32) for i, sentence in enumerate(tokenized_samples): for j, (subtoken_id, tag) in enumerate(sentence): X[i, j] = subtoken_id y[i,j] = tag_index[tag] return X, y X_train, y_train = preprocess(train_samples) X_test, y_test = preprocess(test_samples) X_valid, y_valid = preprocess(valid_samples) EPOCHS=5 BATCH_SIZE=8 optimizer = tf.keras.optimizers.Adam(lr=0.000001) loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) model.compile(optimizer=optimizer, loss=loss, metrics='accuracy') history = model.fit(tf.constant(X_train), tf.constant(y_train), validation_data=(X_test, y_test), epochs=EPOCHS, batch_size=BATCH_SIZE) plt.figure(figsize=(14,8)) plt.title('Losses') plt.plot(history.history['loss'], label='Train Loss') plt.plot(history.history['val_loss'], label='Valid Loss') plt.ylabel('Loss') plt.xlabel('Epochs') plt.legend() plt.show() ###Output _____no_output_____ ###Markdown ###Code ###Output _____no_output_____
periodicHill/sst_iddes_06/plots.ipynb
###Markdown Plot Periodic Hill Results for SST-IDDES model Run sst_iddes_06 with dynamic forcing everywhere, new code ###Code %%capture import sys import os sys.path.insert(1, '../utilities') import utilities import plotter import matplotlib.pyplot as plt from matplotlib.lines import Line2D import numpy as np import pandas as pd import copy figsize=(15,6) # Setup directories # Reference data refdir = os.path.abspath("../marchdf.periodicHill/refdata") basestyle= {'lw':1.25, 'ls':'-', 'marker':None, 'color':'r', 'mfc':plotter.cmap[-1], 'mec':plotter.cmap[-1], 'ms':3,} # Put all Nalu-Wind directories to plot here fdirlist=[ # Directory Input file Legend suffix Linestyle #['../sst/', 'periodicHill_sst.yaml', '', basestyle], ['../sst_iddes_05/', 'periodicHill_sstiddes.yaml', 'OLD CODE', {**basestyle, **dict(color='b')}], ['', 'periodicHill_sstiddes.yaml', 'NEW CODE', {**basestyle, **dict(color='r', ls='-', lw=2)}], ] # Define the data, styles, and the legend expdict = {'data':plotter.read_exp_data(os.path.join(refdir, "exp")), 'label':"EXP", 'lw':0, 'marker':plotter.markertype[2], 'color':plotter.cmap[-1], 'mfc':plotter.cmap[-1], 'mec':plotter.cmap[-1], 'ms':3, 'xoff':1.0} lesdict = {'data':plotter.read_les_data(os.path.join(refdir, "les")), 'label':"LES", 'lw':2.5, 'marker':None, 'color':plotter.cmap[2], 'mfc':plotter.cmap[-1], 'mec':plotter.cmap[-1], 'ms':0, 'xoff':1.0} v2fdict = {'data':plotter.read_cdp_data(os.path.join(refdir, "cdp-v2f")),'label':"CDP-v2f", 'lw':2.5, 'marker':None, 'color':plotter.cmap[2], 'mfc':plotter.cmap[-1], 'mec':plotter.cmap[-1], 'ms':0, 'xoff':0.0 } tamsdict = {'data':plotter.read_cdp_data(os.path.join(refdir, "cdp-tams")),'label':"CDP-v2f-TAMS", 'lw':2.5, 'marker':None, 'color':plotter.cmap[3], 'mfc':plotter.cmap[-1], 'mec':plotter.cmap[-1], 'ms':0, 'xoff':0.0 } # Comment out the data series you don't want to plot data2plot = [expdict, #lesdict, #v2fdict, #tamsdict, ] ###Output _____no_output_____ ###Markdown Plot velocity profiles ###Code legend_elements=[] # Loop through and plot each dataseriese for ds in data2plot: grouped = ds['data'].groupby(["x"]) xoffset = ds['xoff'] legend_elements += [ Line2D([0], [0], lw=ds['lw'], marker=ds['marker'], color=ds['color'], mfc=ds['mfc'], mec=ds['mec'], markersize=ds['ms'], label=ds['label'], ), ] #print(ds['data'].columns.tolist()) for k, (name, group) in enumerate(grouped): idx = group.y.values >= utilities.hill(group.x.values) if ("u" in ds['data'].columns.tolist()): plt.figure("u", figsize=figsize) p = plt.plot(group[idx].u + xoffset*group[idx].x, group[idx].y, lw=ds['lw'], color=ds['color'], marker=ds['marker'], mec=ds['mec'], mfc=ds['mfc'], ms=ds['ms'], ) if ("v" in ds['data'].columns.tolist()): plt.figure("v", figsize=figsize) p = plt.plot(group[idx].v + xoffset*group[idx].x, group[idx].y, lw=ds['lw'], color=ds['color'], marker=ds['marker'], mec=ds['mec'], mfc=ds['mfc'], ms=ds['ms'], ) # Nalu data for i, fdirentry in enumerate(fdirlist): fdir = fdirentry[0] yamlfile = fdirentry[1] suffix = fdirentry[2] style = fdirentry[3] yname = os.path.join(os.path.dirname(fdir), yamlfile) u0, rho0, mu, turb_model = plotter.parse_ic(yname) model = turb_model.upper().replace("_", "-") legend_elements += [ Line2D([0], [0], lw=style['lw'], ls=style['ls'], color=style['color'], label=f"Nalu-{model} "+suffix, marker=style['marker'], mfc=style['mfc'], mec=style['mec'], markersize=style['ms']) ] ndf = pd.read_csv(os.path.join(fdir, "profiles.dat")) ndf.loc[ndf.u > 5, ["u", "v", "w"]] = 0.0 grouped = ndf.groupby(["x"]) for k, (name, group) in enumerate(grouped): idx = group.y.values >= utilities.hill(group.x.values) plt.figure("u") p = plt.plot(group[idx].u + group[idx].x, group[idx].y, lw=style['lw'], ls=style['ls'], color=style['color'], marker=style['marker'], mfc=style['mfc'], mec=style['mec'], markersize=style['ms']) plt.figure("v") p = plt.plot(group[idx].v + group[idx].x, group[idx].y, lw=style['lw'], ls=style['ls'], color=style['color'], marker=style['marker'], mfc=style['mfc'], mec=style['mec'], markersize=style['ms']) x = np.linspace(0,9,1001) # Set up the U-velocity plot plt.figure("u") ax = plt.gca() plt.plot(x, utilities.hill(x), lw=1) plt.fill_between(x, utilities.hill(x),0, color='gray') plt.xlabel(r"$\langle u_x \rangle + x$", fontsize=22, fontweight="bold") plt.ylabel(r"$y / h$", fontsize=22, fontweight="bold") plt.setp(ax.get_xmajorticklabels(), fontsize=18, fontweight="bold") plt.setp(ax.get_ymajorticklabels(), fontsize=18, fontweight="bold") plt.xlim([-0.5, 9.5]) plt.ylim([0, 3.5]) legend = ax.legend(handles=legend_elements, loc="lower left") plt.grid() plt.title('Horizontal velocity', fontsize=18) plt.tight_layout() # Set up the V-velocity plot plt.figure("v") ax = plt.gca() plt.plot(x, utilities.hill(x), lw=1) plt.fill_between(x, utilities.hill(x),0, color='gray') plt.xlabel(r"$\langle u_y \rangle + x$", fontsize=22, fontweight="bold") plt.ylabel(r"$y / h$", fontsize=22, fontweight="bold") plt.setp(ax.get_xmajorticklabels(), fontsize=18, fontweight="bold") plt.setp(ax.get_ymajorticklabels(), fontsize=18, fontweight="bold") plt.xlim([-0.5, 9.5]) plt.ylim([0, 3.5]) legend = ax.legend(handles=legend_elements, loc="best") plt.grid() plt.title('Vertical velocity', fontsize=18) plt.tight_layout() ###Output _____no_output_____ ###Markdown Plot surface & time history of Nalu-Runs ###Code # Nalu data maxt=-1.0E100 for i, fdirentry in enumerate(fdirlist): fdir = fdirentry[0] yamlfile = fdirentry[1] suffix = fdirentry[2] style = fdirentry[3] yname = os.path.join(os.path.dirname(fdir), yamlfile) u0, rho0, mu, turb_model = plotter.parse_ic(yname) model = turb_model.upper().replace("_", "-") legend_elements += [ Line2D([0], [0], lw=style['lw'], ls=style['ls'], color=style['color'], label=f"Nalu-{model} "+suffix, marker=style['marker'], mfc=style['mfc'], mec=style['mec'], markersize=style['ms']) ] h = 1.0 tau = h / u0 dynPres = rho0 * 0.5 * u0 * u0 ndf = pd.read_csv(os.path.join(fdir, "profiles.dat")) ndf.loc[ndf.u > 5, ["u", "v", "w"]] = 0.0 grouped = ndf.groupby(["x"]) cf = pd.read_csv(os.path.join(fdir, "tw.dat")) cf["cf"] = cf.tauw / dynPres plt.figure("cf", figsize=figsize) plt.plot(cf.x, cf.cf, lw=style['lw'], ls=style['ls'], color=style['color'], label=f"Nalu-{model} "+suffix, marker=style['marker'], mfc=style['mfc'], mec=style['mec'], markersize=style['ms']) inlet = pd.read_csv(os.path.join(fdir, "inlet.dat")) plt.figure("u_inlet", figsize=figsize) plt.plot((inlet.t-inlet.t[0]) / tau, inlet.u, lw=style['lw'], ls=style['ls'], color=style['color'], label=f"Nalu-{model} "+suffix, marker=style['marker'], mfc=style['mfc'], mec=style['mec'], markersize=style['ms']) plt.figure("tke_inlet", figsize=figsize) plt.plot((inlet.t-inlet.t[0]) / tau, inlet.tke, lw=style['lw'], ls=style['ls'], color=style['color'], label=f"Nalu-{model} "+suffix, marker=style['marker'], mfc=style['mfc'], mec=style['mec'], markersize=style['ms']) plt.figure("sdr_inlet", figsize=figsize) plt.plot((inlet.t-inlet.t[0]) / tau, inlet.sdr, lw=style['lw'], ls=style['ls'], color=style['color'], label=f"Nalu-{model} "+suffix, marker=style['marker'], mfc=style['mfc'], mec=style['mec'], markersize=style['ms']) # Get the simulation time if (max((inlet.t-inlet.t[0]) / tau) > maxt): maxt=max((inlet.t-inlet.t[0]) / tau) # Plot the LES cf ldir = os.path.join(refdir, "les") cf = pd.read_csv(os.path.join(ldir, "hill_LES_cf_digitized.dat"), delim_whitespace=True) plt.figure("cf") plt.plot(cf.x, cf.cf, lw=2, color=plotter.cmap[2], label="LES") # Format figures plt.figure("cf") ax = plt.gca() plt.xlabel(r"$x$", fontsize=22, fontweight="bold") plt.ylabel(r"$c_f$", fontsize=22, fontweight="bold") plt.setp(ax.get_xmajorticklabels(), fontsize=18, fontweight="bold") plt.setp(ax.get_ymajorticklabels(), fontsize=18, fontweight="bold") plt.hlines(0.0, 0, 9.5, linestyles='dashed', linewidth=0.5) legend = ax.legend(loc="best") plt.title('Skin friction', fontsize=18, fontweight="bold") plt.tight_layout() plt.figure("u_inlet") ax = plt.gca() plt.xlabel(r"$(t-t_0) / \tau$", fontsize=22, fontweight="bold") plt.ylabel(r"$\bar{u} (x=0)$", fontsize=22, fontweight="bold") plt.setp(ax.get_xmajorticklabels(), fontsize=18, fontweight="bold") plt.setp(ax.get_ymajorticklabels(), fontsize=18, fontweight="bold") plt.hlines(1.0, 0, maxt, linestyles='dashed', linewidth=0.5) legend = ax.legend(loc="best") plt.title('Inlet velocity', fontsize=18, fontweight="bold") plt.tight_layout() plt.figure("tke_inlet") ax = plt.gca() plt.xlabel(r"$(t-t_0) / \tau$", fontsize=22, fontweight="bold") plt.ylabel(r"$\bar{k} (x=0)$", fontsize=22, fontweight="bold") plt.setp(ax.get_xmajorticklabels(), fontsize=18, fontweight="bold") plt.setp(ax.get_ymajorticklabels(), fontsize=18, fontweight="bold") legend = ax.legend(loc="best") plt.title('Inlet TKE', fontsize=18, fontweight="bold") plt.tight_layout() plt.figure("sdr_inlet") ax = plt.gca() plt.xlabel(r"$(t-t_0) / \tau$", fontsize=22, fontweight="bold") plt.ylabel(r"$\bar{\omega} (x=0)$", fontsize=22, fontweight="bold") plt.setp(ax.get_xmajorticklabels(), fontsize=18, fontweight="bold") plt.setp(ax.get_ymajorticklabels(), fontsize=18, fontweight="bold") legend = ax.legend(loc="best") plt.tight_layout() ###Output _____no_output_____
titanic_competition.ipynb
###Markdown [kaggle notebook kernel](https://www.kaggle.com/allieubissetroinuxsa/titanic-competition-v1?scriptVersionId=29609674) Imports ###Code import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt %matplotlib inline ###Output _____no_output_____ ###Markdown Loading Data ###Code train_URL = 'https://raw.githubusercontent.com/Data-HAWKS/titanic-competition/master/titanic-competition/train.csv' test_URL = 'https://raw.githubusercontent.com/Data-HAWKS/titanic-competition/master/titanic-competition/test.csv' sub_URL = 'https://raw.githubusercontent.com/Data-HAWKS/titanic-competition/master/titanic-competition/gender_submission.csv' train_df1 = pd.read_csv(train_URL, index_col='PassengerId') test_df = pd.read_csv(test_URL, index_col='PassengerId') sub_df = pd.read_csv(sub_URL) ###Output _____no_output_____ ###Markdown Data Exploration ###Code test_df.head() #test_df.shape train_df1.head() train_df1.shape train_df = pd.concat([train_df,test_df], axis=0) print(train_df.shape) print(train_df.shape) train_df.reset_index().head() ###Output (1309, 11) ###Markdown Train ###Code train_df.head() train_df.dtypes train_df.shape train_df.info() train_df.isnull().sum() sns.heatmap(data=train_df.isnull(),yticklabels=False,cbar=False,cmap='viridis') ###Output _____no_output_____ ###Markdown Imputing NAN valuesmethod = mean() Survived countplotSurvived = 1NOT Survived = 0 ###Code sns.set_style('whitegrid') sns.countplot(x='Survived', data=train_df,palette='RdBu_r') # Survived based on Sex, so we set the parameter hue = 'Sex' sns.countplot(x='Survived', hue='Sex' ,data=train_df,palette='RdBu_r') ###Output _____no_output_____ ###Markdown From the above:more females survived then male. ###Code sns.countplot(x='Survived', hue='Pclass' ,data=train_df,palette='RdBu_r') sns.distplot(train_df['Age'].dropna(), kde=False, color='darkred',bins=30) train_df['Age'].hist(bins=30,color='darkred',alpha=0.7) ###Output _____no_output_____ ###Markdown the dist is more close to normal distribution ###Code sns.countplot(x='SibSp', data=train_df) plt.figure(figsize=(12,7)) sns.boxplot(x='Pclass',y='Age',data=train_df, palette='winter') def impute_nan_age(cols): Age = cols[0] Pclass = cols[1] if pd.isnull(Age): if Pclass == 1: return int(train_df.groupby('Pclass')['Age'].mean()[1]) elif Pclass == 2: return int(train_df.groupby('Pclass')['Age'].mean()[2]) else: return int(train_df.groupby('Pclass')['Age'].mean()[3]) else: return Age ###Output _____no_output_____ ###Markdown applying the function to impute the Age using Pclass ###Code train_df['Age'] = train_df[['Age','Pclass']].apply(impute_nan_age, axis=1) sns.heatmap(data=train_df.isnull(),yticklabels=False,cbar=False,cmap='viridis') ###Output _____no_output_____ ###Markdown For now, i will drop cabin since its having many null values ###Code train = train_df.drop('Cabin',axis=1) train.head() sex = pd.get_dummies(train['Sex'],drop_first=True) embark = pd.get_dummies(train['Embarked'],drop_first=True) train.drop(['Name','Sex','Embarked','Ticket'], axis=1, inplace=True) train = pd.concat([train,sex,embark],axis=1) train.head() train.reset_index().head() ###Output _____no_output_____ ###Markdown Building a Logistic Regression Model ###Code from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsClassifier from sklearn.metrics import accuracy_score from sklearn.ensemble import RandomForestClassifier ###Output _____no_output_____ ###Markdown Train Test Split ###Code x = train.drop('Survived',axis=1) y = train['Survived'] x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.30, random_state=101) ###Output _____no_output_____ ###Markdown Training & testing the model KNeighborsClassifier ###Code classifier = KNeighborsClassifier( n_neighbors = 5, weights = 'distance', metric = 'minkowski', p=2 ) model = classifier.fit(x_train,y_train) y_pred1 = model.predict(x_test) ###Output _____no_output_____ ###Markdown Random Forest Classifier[scikit-learn Random Forest Classifier ](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html) ###Code model_2 = RandomForestClassifier(n_estimators=100) model_2.fit(x_train,y_train) y_pred2 = model_2.predict(x_test) ###Output _____no_output_____ ###Markdown accuracy_score ###Code print(f'Random Forest Classifier accuracy score : {accuracy_score(y_true=y_test,y_pred= y_pred2)}') print(f'KNeighbors Classifier accuracy score : {accuracy_score(y_true=y_test,y_pred=y_pred1)}') ###Output Random Forest Classifier accuracy score : 0.8059701492537313 KNeighbors Classifier accuracy score : 0.7126865671641791 ###Markdown Submission ###Code sub_df.head() y_final_pred = y_pred3 submission = pd.DataFrame( { 'PassengerId' : y_test.reset_index()['PassengerId'], 'Survived': y_final_pred } ) submission.to_csv('titanic_submission.csv', index = False) submission.head() ###Output _____no_output_____ ###Markdown extras```python importing required librariesimport pandas as pdfrom xgboost import XGBClassifierfrom sklearn.metrics import accuracy_score read the train and test datasettrain_data = pd.read_csv('train-data.csv')test_data = pd.read_csv('test-data.csv') shape of the datasetprint('Shape of training data :',train_data.shape)print('Shape of testing data :',test_data.shape) Now, we need to predict the missing target variable in the test data target variable - Survived seperate the independent and target variable on training datatrain_x = train_data.drop(columns=['Survived'],axis=1)train_y = train_data['Survived'] seperate the independent and target variable on testing datatest_x = test_data.drop(columns=['Survived'],axis=1)test_y = test_data['Survived']```**Create the object of the XGBoost model**You can also add other parameters and test your code hereSome settings are : *max_depth* and *n_estimators*```pythonmodel = XGBClassifier() fit the model with the training datamodel.fit(train_x,train_y) predict the target on the train datasetpredict_train = model.predict(train_x)print('\nTarget on train data',predict_train) Accuray Score on train datasetaccuracy_train = accuracy_score(train_y,predict_train)print('\naccuracy_score on train dataset : ', accuracy_train) predict the target on the test datasetpredict_test = model.predict(test_x)print('\nTarget on test data',predict_test) Accuracy Score on test datasetaccuracy_test = accuracy_score(test_y,predict_test)print('\naccuracy_score on test dataset : ', accuracy_test)```[XGBoost Model in Python using scikit-learn](https://hackernoon.com/want-a-complete-guide-for-xgboost-model-in-python-using-scikit-learn-sc11f31bq)[xgboost read the docs](https://xgboost.readthedocs.io/en/latest/index.html) ###Code !pip install xgboost from xgboost import XGBClassifier param = {'max_depth': 2, 'eta': 1, 'objective': 'binary:logistic'} model_3 = XGBClassifier() model_3.fit(x_train,y_train) y_pred3 = model_3.predict(x_test) ###Output _____no_output_____ ###Markdown linear SVC ###Code from sklearn import svm clf = svm.LinearSVC(max_iter=3000, random_state=101) clf.fit(x_train,y_train) y_pred4 = clf.predict(x_test) print(f'Random Forest Classifier | accuracy score : {accuracy_score(y_true=y_test,y_pred= y_pred2)}\n') print(f'KNeighbors Classifier | accuracy score : {accuracy_score(y_true=y_test,y_pred=y_pred1)} \n') print(f'xgboost Classifier | accuracy score : {accuracy_score(y_true=y_test,y_pred=y_pred3)} \n') print(f'LinearSVC Classifier | accuracy score : {accuracy_score(y_true=y_test,y_pred=y_pred4)} \n') ###Output Random Forest Classifier | accuracy score : 0.8059701492537313 KNeighbors Classifier | accuracy score : 0.7126865671641791 xgboost Classifier | accuracy score : 0.8283582089552238 LinearSVC Classifier | accuracy score : 0.7985074626865671
doc/ipynb/executed/demo_cachedjit.ipynb
###Markdown IPython + cachedjit decoratorWe can define cachedjit function in IPython (and Jupyter, which uses IPython). The only limitation is that the type variables have to be defined in the same cell as the function. ###Code import numpy as np from fluidpythran import cachedjit # pythran import numpy as np @cachedjit def laplace_pythran(image): """Laplace operator in NumPy for 2D images.""" laplacian = ( image[:-2, 1:-1] + image[2:, 1:-1] + image[1:-1, :-2] + image[1:-1, 2:] - 4*image[1:-1, 1:-1] ) thresh = np.abs(laplacian) > 0.05 return thresh from skimage.data import astronaut from skimage.color import rgb2gray image = astronaut() image = rgb2gray(image) laplace_pythran(image) ###Output INFO  write Pythran signature in file /home/users/augier3pi/.fluidpythran/__cachedjit__/__ipython__2823fa65cda0fe3f58b60391a9d2e13b/laplace_pythran.pythran with types ['float64[:, :]']
01-pandas-ipywidgets/jupyter-widget-ecosystem/notebooks/10.11-ipywebrtc.ipynb
###Markdown ipywebrtcUsing master (7b364018501957fb9c778c0ee3d850b88e52f83f), will not work with v0.2.0 ###Code import ipywebrtc as webrtc import ipyvolume as ipv import ipywidgets as widgets video = webrtc.VideoStream.from_file('big-buck-bunny_trailer.webm') video camera = webrtc.CameraStream() camera fig = ipv.figure(render_continuous=True) back = ipv.plot_plane("back", texture=video) right = ipv.plot_plane("right", texture=camera) ipv.show() right.texture = fig room = webrtc.chat(room='scipy2018', stream=fig) back.texture = room.streams[1] recorder = webrtc.MediaRecorder(stream=fig, filename='record') recorder recorder.data[:1000] room.close() camera.close() video.close() ###Output _____no_output_____
06_change_detection/04_map_to_image_change.ipynb
###Markdown Introduction - Map-to-Image Change DetectionChange detection is a *hot* topic within the Earth Observation (EO) community, particular since the release of the landsat archive and more recently the availablity of the ESA Sentinel-1 and 2 data.There are several approaches to change detection and these can be categorised as:- Map-to-Map- Image-to-Image- Map-to-Image- Dense TimeseriesThese approaches have different advances and disadvantages and this tutorial will demonstrate the implementation of a method for each type. What is Map-to-Image Change?Probably the least explored method, here an existing classification at the first date is required but rather than undertaking a second independent classification a methods which compares the image and map to identify the change pixels and then classifies them to update the existing map. These methods **have few assumptions** but generally **expect the area of change to be small** compared to the not changed regions. When a change occurs there is also an **assumption that the change causes the EO signal (reflectance, backscatter etc.) to change being identifiable when compared to the original class**. What are the steps:Again, this is quite simple. We need a classification and new input image from a different date and we will try to use that input image to find the changes between the two dates. 1. Define Imports ###Code import os import shutil import rsgislib import rsgislib.imagecalc import rsgislib.imagecalc.calcindices import rsgislib.changedetect.pxloutlierchng import rsgislib.imageutils import rsgislib.rastergis # Imports for data visualisation import rsgislib.tools.plotting # Import the matplotlib plotting module and set it to work within # a notebook environment. %matplotlib inline import matplotlib import matplotlib.colors as mcolors import matplotlib.pyplot as plt ###Output _____no_output_____ ###Markdown 2. Define Input Images and Directories ###Code cls_2018_dir = "2018_cls" if not os.path.exists(cls_2018_dir): os.mkdir(cls_2018_dir) cls_1997_dir = "baseline_cls" if not os.path.exists(cls_1997_dir): os.mkdir(cls_1997_dir) map_to_img_chg_dir = "map_to_img_chg" if not os.path.exists(map_to_img_chg_dir): os.mkdir(map_to_img_chg_dir) # File path to the 1997 classification cls_1997_img = os.path.join(cls_1997_dir, "base_1997_class_img.kea") # File path to the input Landsat 5 image from 1997. input_97_img = ( "../data/chg_data/LS5TM_19970716_vmsk_mclds_topshad_rad_srefdem_stdsref_subset.tif" ) # File path to the Landsat 5 valid data mask from 1997 vld_97_img = os.path.join(cls_1997_dir, "LS5TM_19970716_vmsk.kea") # File path to the input Landsat 8 image from 2018. input_18_img = ( "../data/chg_data/LS8_20180608_vmsk_mclds_topshad_rad_srefdem_stdsref_subset.tif" ) # File path to the Landsat 8 valid data mask from 2018. vld_18_img = os.path.join(cls_2018_dir, "LS8_20180608_vmsk.kea") ###Output _____no_output_____ ###Markdown 3. Calculate NDVI for 2018 ###Code ls_2018_ndvi_img = os.path.join(map_to_img_chg_dir, "LS8_20180608_ndvi.kea") rsgislib.imagecalc.calcindices.calc_ndvi(input_18_img, 4, 5, ls_2018_ndvi_img) ###Output Image: ../data/chg_data/LS8_20180608_vmsk_mclds_topshad_rad_srefdem_stdsref_subset.tif Variable 'red' is band 4 Variable 'nir' is band 5 New image width = 1281 height = 3659 bands = 1 Calculating Image Pyramids. ###Markdown 4. Identify Mangrove Change Features ###Code out_mng_chng_msk = os.path.join(map_to_img_chg_dir, "mng_2018_map2img_chngs.kea") plot_mng_thres_file = os.path.join(map_to_img_chg_dir, "mng_2018_map2img_chngs_plot.png") rsgislib.changedetect.pxloutlierchng.find_class_kurt_skew_outliers( ls_2018_ndvi_img, cls_1997_img, out_mng_chng_msk, -1, 1, 0.35, True, 10.0, False, img_mask_val=1, # Specify mangrove class img_band=1, gdalformat="KEA", plot_thres_file=plot_mng_thres_file, ) ###Output 100%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 133/133 [00:00<00:00, 769.83it/s] ###Markdown 5. Identify Water Change Features ###Code out_water_chng_msk = os.path.join(map_to_img_chg_dir, "water_2018_map2img_chngs.kea") plot_water_thres_file = os.path.join(map_to_img_chg_dir, "water_2018_map2img_chngs_plot.png") rsgislib.changedetect.pxloutlierchng.find_class_kurt_skew_outliers( ls_2018_ndvi_img, cls_1997_img, out_water_chng_msk, -1, 1, 0.35, False, 10.0, False, img_mask_val=3, # Specify water class img_band=1, gdalformat="KEA", plot_thres_file=plot_water_thres_file, ) ###Output 100%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 133/133 [00:00<00:00, 931.89it/s] ###Markdown 6. Update 1997 Classification to 2018 ###Code chng_2018_cls_img = os.path.join(map_to_img_chg_dir, "chng_map_2018_map2img.kea") band_defns = [] band_defns.append(rsgislib.imagecalc.BandDefn("vld_18_msk", vld_18_img, 1)) band_defns.append(rsgislib.imagecalc.BandDefn("cls_1997", cls_1997_img, 1)) band_defns.append(rsgislib.imagecalc.BandDefn("mng_chng", out_mng_chng_msk, 1)) band_defns.append(rsgislib.imagecalc.BandDefn("wat_chng", out_water_chng_msk, 1)) exp = """(vld_18_msk==1)&&(cls_1997==1)&&(mng_chng==2)?3: (vld_18_msk==1)&&(cls_1997==3)&&(wat_chng==2)?1: cls_1997 """ rsgislib.imagecalc.band_math( chng_2018_cls_img, exp, "KEA", rsgislib.TYPE_8UINT, band_defns ) rsgislib.rastergis.pop_rat_img_stats( chng_2018_cls_img, add_clr_tab=True, calc_pyramids=True, ignore_zero=True ) ###Output Image: 2018_cls/LS8_20180608_vmsk.kea Variable 'vld_18_msk' is band 1 Image: baseline_cls/base_1997_class_img.kea Variable 'cls_1997' is band 1 Image: map_to_img_chg/mng_2018_map2img_chngs.kea Variable 'mng_chng' is band 1 Image: map_to_img_chg/water_2018_map2img_chngs.kea Variable 'wat_chng' is band 1 New image width = 1281 height = 3659 bands = 1 Get Image Min and Max. Get Image Histogram. Adding Histogram and Colour Table to image file Calculating Image Pyramids. ###Markdown 7. Create Change Image ###Code # Define output image path chng_map_img = os.path.join(map_to_img_chg_dir, "chng_map_1997_2018_map2img.kea") # Define the input image bands band_defns = [] band_defns.append(rsgislib.imagecalc.BandDefn("cls_1997", cls_1997_img, 1)) band_defns.append(rsgislib.imagecalc.BandDefn("cls_2018", chng_2018_cls_img, 1)) # Expression to define change classes exp = """(cls_1997==1)&&(cls_2018==1)?11: (cls_1997==1)&&(cls_2018==2)?12: (cls_1997==1)&&(cls_2018==3)?13: (cls_1997==2)&&(cls_2018==1)?21: (cls_1997==2)&&(cls_2018==2)?22: (cls_1997==2)&&(cls_2018==3)?23: (cls_1997==3)&&(cls_2018==1)?31: (cls_1997==3)&&(cls_2018==2)?32: (cls_1997==3)&&(cls_2018==3)?33:0 """ # Run band_math rsgislib.imagecalc.band_math(chng_map_img, exp, "KEA", rsgislib.TYPE_8UINT, band_defns) # Populate stats and pyramids rsgislib.rastergis.pop_rat_img_stats( chng_map_img, add_clr_tab=True, calc_pyramids=True, ignore_zero=True ) ###Output Image: baseline_cls/base_1997_class_img.kea Variable 'cls_1997' is band 1 Image: map_to_img_chg/chng_map_2018_map2img.kea Variable 'cls_2018' is band 1 New image width = 1281 height = 3659 bands = 1 Get Image Min and Max. Get Image Histogram. Adding Histogram and Colour Table to image file Calculating Image Pyramids. ###Markdown 8. Colour and Name Change Classes ###Code class_info_dict = dict() class_info_dict[11] = { "classname": "Mangrove_Mangrove", "red": 25, "green": 200, "blue": 25, } class_info_dict[12] = { "classname": "Mangrove_Terrestrial", "red": 200, "green": 100, "blue": 25, } class_info_dict[13] = { "classname": "Mangrove_Water", "red": 200, "green": 25, "blue": 200, } class_info_dict[21] = { "classname": "Terrestrial_Mangrove", "red": 125, "green": 100, "blue": 125, } class_info_dict[22] = { "classname": "Terrestrial_Terrestrial", "red": 125, "green": 125, "blue": 125, } class_info_dict[23] = { "classname": "Terrestrial_Water", "red": 125, "green": 125, "blue": 100, } class_info_dict[31] = { "classname": "Water_Mangrove", "red": 25, "green": 255, "blue": 200, } class_info_dict[32] = { "classname": "Water_Terrestrial", "red": 62, "green": 62, "blue": 200, } class_info_dict[33] = { "classname": "Water_Water", "red": 25, "green": 25, "blue": 200 } rsgislib.rastergis.set_class_names_colours(chng_map_img, "class_names", class_info_dict) ###Output _____no_output_____ ###Markdown 9. Visual Change Result ###Code sub_bbox = [523000, 550000, -877000, -854000] # Get the image data using the get_gdal_thematic_raster_mpl_imshow function. ( img_msk_data, img_msk_coords, lgd_msk_patches, ) = rsgislib.tools.plotting.get_gdal_thematic_raster_mpl_imshow( chng_map_img, bbox=sub_bbox ) # Create the matplotlib figure fig, ax = plt.subplots(figsize=(10, 10)) # Use the imshow function to display the image data within the plot # the extent option defines the x and y axis values. ax.imshow(img_msk_data, extent=img_msk_coords) ###Output Image Data Size: 900 x 767
NLP Project (Solutions).ipynb
###Markdown ___ ___ Natural Language Processing ProjectWelcome to the NLP Project for this section of the course. In this NLP project you will be attempting to classify Yelp Reviews into 1 star or 5 star categories based off the text content in the reviews. This will be a simpler procedure than the lecture, since we will utilize the pipeline methods for more complex tasks.We will use the [Yelp Review Data Set from Kaggle](https://www.kaggle.com/c/yelp-recsys-2013).Each observation in this dataset is a review of a particular business by a particular user.The "stars" column is the number of stars (1 through 5) assigned by the reviewer to the business. (Higher stars is better.) In other words, it is the rating of the business by the person who wrote the review.The "cool" column is the number of "cool" votes this review received from other Yelp users. All reviews start with 0 "cool" votes, and there is no limit to how many "cool" votes a review can receive. In other words, it is a rating of the review itself, not a rating of the business.The "useful" and "funny" columns are similar to the "cool" column.Let's get started! Just follow the directions below! Imports **Import the usual suspects. :) ** ###Code import numpy as np import pandas as pd ###Output _____no_output_____ ###Markdown The Data**Read the yelp.csv file and set it as a dataframe called yelp.** ###Code yelp = pd.read_csv('yelp.csv') ###Output _____no_output_____ ###Markdown ** Check the head, info , and describe methods on yelp.** ###Code yelp.head() yelp.info() yelp.describe() ###Output _____no_output_____ ###Markdown **Create a new column called "text length" which is the number of words in the text column.** ###Code yelp['text length'] = yelp['text'].apply(len) ###Output _____no_output_____ ###Markdown EDALet's explore the data Imports**Import the data visualization libraries if you haven't done so already.** ###Code import matplotlib.pyplot as plt import seaborn as sns sns.set_style('white') %matplotlib inline ###Output _____no_output_____ ###Markdown **Use FacetGrid from the seaborn library to create a grid of 5 histograms of text length based off of the star ratings. Reference the seaborn documentation for hints on this** ###Code g = sns.FacetGrid(yelp,col='stars') g.map(plt.hist,'text length') ###Output _____no_output_____ ###Markdown **Create a boxplot of text length for each star category.** ###Code sns.boxplot(x='stars',y='text length',data=yelp,palette='rainbow') ###Output _____no_output_____ ###Markdown **Create a countplot of the number of occurrences for each type of star rating.** ###Code sns.countplot(x='stars',data=yelp,palette='rainbow') ###Output _____no_output_____ ###Markdown ** Use groupby to get the mean values of the numerical columns, you should be able to create this dataframe with the operation:** ###Code stars = yelp.groupby('stars').mean() stars ###Output _____no_output_____ ###Markdown **Use the corr() method on that groupby dataframe to produce this dataframe:** ###Code stars.corr() ###Output _____no_output_____ ###Markdown **Then use seaborn to create a heatmap based off that .corr() dataframe:** ###Code sns.heatmap(stars.corr(),cmap='coolwarm',annot=True) ###Output _____no_output_____ ###Markdown NLP Classification TaskLet's move on to the actual task. To make things a little easier, go ahead and only grab reviews that were either 1 star or 5 stars.**Create a dataframe called yelp_class that contains the columns of yelp dataframe but for only the 1 or 5 star reviews.** ###Code yelp_class = yelp[(yelp.stars==1) | (yelp.stars==5)] ###Output _____no_output_____ ###Markdown ** Create two objects X and y. X will be the 'text' column of yelp_class and y will be the 'stars' column of yelp_class. (Your features and target/labels)** ###Code X = yelp_class['text'] y = yelp_class['stars'] ###Output _____no_output_____ ###Markdown **Import CountVectorizer and create a CountVectorizer object.** ###Code from sklearn.feature_extraction.text import CountVectorizer cv = CountVectorizer() ###Output _____no_output_____ ###Markdown ** Use the fit_transform method on the CountVectorizer object and pass in X (the 'text' column). Save this result by overwriting X.** ###Code X = cv.fit_transform(X) ###Output _____no_output_____ ###Markdown Train Test SplitLet's split our data into training and testing data.** Use train_test_split to split up the data into X_train, X_test, y_train, y_test. Use test_size=0.3 and random_state=101 ** ###Code from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y,test_size=0.3,random_state=101) ###Output _____no_output_____ ###Markdown Training a ModelTime to train a model!** Import MultinomialNB and create an instance of the estimator and call is nb ** ###Code from sklearn.naive_bayes import MultinomialNB nb = MultinomialNB() ###Output _____no_output_____ ###Markdown **Now fit nb using the training data.** ###Code nb.fit(X_train,y_train) ###Output _____no_output_____ ###Markdown Predictions and EvaluationsTime to see how our model did!**Use the predict method off of nb to predict labels from X_test.** ###Code predictions = nb.predict(X_test) ###Output _____no_output_____ ###Markdown ** Create a confusion matrix and classification report using these predictions and y_test ** ###Code from sklearn.metrics import confusion_matrix,classification_report print(confusion_matrix(y_test,predictions)) print('\n') print(classification_report(y_test,predictions)) ###Output [[159 69] [ 22 976]] precision recall f1-score support 1 0.88 0.70 0.78 228 5 0.93 0.98 0.96 998 avg / total 0.92 0.93 0.92 1226 ###Markdown **Great! Let's see what happens if we try to include TF-IDF to this process using a pipeline.** Using Text Processing** Import TfidfTransformer from sklearn. ** ###Code from sklearn.feature_extraction.text import TfidfTransformer ###Output _____no_output_____ ###Markdown ** Import Pipeline from sklearn. ** ###Code from sklearn.pipeline import Pipeline ###Output _____no_output_____ ###Markdown ** Now create a pipeline with the following steps:CountVectorizer(), TfidfTransformer(),MultinomialNB()** ###Code pipeline = Pipeline([ ('bow', CountVectorizer()), # strings to token integer counts ('tfidf', TfidfTransformer()), # integer counts to weighted TF-IDF scores ('classifier', MultinomialNB()), # train on TF-IDF vectors w/ Naive Bayes classifier ]) ###Output _____no_output_____ ###Markdown Using the Pipeline**Time to use the pipeline! Remember this pipeline has all your pre-process steps in it already, meaning we'll need to re-split the original data (Remember that we overwrote X as the CountVectorized version. What we need is just the text** Train Test Split**Redo the train test split on the yelp_class object.** ###Code X = yelp_class['text'] y = yelp_class['stars'] X_train, X_test, y_train, y_test = train_test_split(X, y,test_size=0.3,random_state=101) ###Output _____no_output_____ ###Markdown **Now fit the pipeline to the training data. Remember you can't use the same training data as last time because that data has already been vectorized. We need to pass in just the text and labels** ###Code # May take some time pipeline.fit(X_train,y_train) ###Output _____no_output_____ ###Markdown Predictions and Evaluation** Now use the pipeline to predict from the X_test and create a classification report and confusion matrix. You should notice strange results.** ###Code predictions = pipeline.predict(X_test) print(confusion_matrix(y_test,predictions)) print(classification_report(y_test,predictions)) ###Output [[ 0 228] [ 0 998]] precision recall f1-score support 1 0.00 0.00 0.00 228 5 0.81 1.00 0.90 998 avg / total 0.66 0.81 0.73 1226
examples/mnist_nn.ipynb
###Markdown Digit recognition using TT neural networksThe TT layer is applied to the MNIST dataset.Imports: ###Code import torch as tn import torch.nn as nn try: import torchtt as tntt except: print('Installing torchTT...') %pip install git+https://github.com/ion-g-ion/torchTT import torchtt as tntt from torch import optim from torchvision import datasets from torchvision.transforms import ToTensor from torch.utils.data import DataLoader device = tn.device('cuda' if tn.cuda.is_available() else 'cpu') ###Output _____no_output_____ ###Markdown Download the dataset and store it to a subfolder 'data'. ###Code train_data = datasets.MNIST(root = 'downloads', train = True, transform = ToTensor(), download = True) test_data = datasets.MNIST(root = 'downloads', train = False, transform = ToTensor()) ###Output _____no_output_____ ###Markdown Create 2 dataloaders for the training set and the test set. ###Code dataloader_train = tn.utils.data.DataLoader(train_data, batch_size=1000, shuffle=True, num_workers=10) dataloader_test = tn.utils.data.DataLoader(test_data, batch_size=100, shuffle=True, num_workers=10) ###Output _____no_output_____ ###Markdown Define the neural network arhitecture. I contains 2 hidden TT layers (with RELU activation function) with a linear output layer. A sotmax is applied at the output. ###Code class BasicTT(nn.Module): def __init__(self): super().__init__() self.ttl1 = tntt.nn.LinearLayerTT([1,7,4,7,4], [8,10,10,10,10], [1,4,2,2,2,1]) self.ttl2 = tntt.nn.LinearLayerTT([8,10,10,10,10], [8,3,3,3,3], [1,2,2,2,2,1]) self.linear = nn.Linear(81*8, 10, dtype = tn.float32) self.logsoftmax = nn.LogSoftmax(1) def forward(self, x): x = self.ttl1(x) x = tn.relu(x) x = self.ttl2(x) x = tn.relu(x) x = x.view(-1,81*8) x = self.linear(x) return self.logsoftmax(x) ###Output _____no_output_____ ###Markdown Instantiate the model and choose the optimizer and the loss function. ###Code model = BasicTT().to(device) loss_function = nn.CrossEntropyLoss() optimizer = optim.Adam(model.parameters(), lr = 0.001) ###Output _____no_output_____ ###Markdown Start the training for 30 epochs ###Code n_epochs = 30 for epoch in range(n_epochs): for i,(input,label) in enumerate(dataloader_train): input = tn.reshape(input.to(device),[-1,1,7,4,7,4]) label = label.to(device) optimizer.zero_grad() output = model(input) loss = loss_function(output, label) loss.backward() optimizer.step() print('Epoch %d/%d iteration %d/%d loss %e'%(epoch+1,n_epochs,i+1,len(dataloader_train),loss)) ###Output _____no_output_____ ###Markdown Compute the accuracy over the test set. ###Code n_correct = 0 n_total = 0 for (input,label) in dataloader_test: input = tn.reshape(input.to(device),[-1,1,7,4,7,4]) output = model(input).cpu() n_correct += tn.sum(tn.max(output,1)[1] == label) n_total += input.shape[0] print('Test accuracy ',n_correct/n_total) ###Output _____no_output_____
datasetHandle/gov/spider.ipynb
###Markdown 政府网站公开数据爬取 爬取漳州历史天气数据 ###Code import requests header = { 'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.61 Safari/537.36', } res_list = [] for year in range(2020,2022): for month in range(1,13): url = 'https://lishi.tianqi.com/zhangzhou/{}{:0>2d}.html'.format(year,month) # print(url) res = requests.get(url=url,headers=header) res_list.append(res) from lxml import etree # 2020年每天的天气情况 day = 1 csv_str = "" for month in range(1,13): dom = etree.HTML(res_list[month].text) values = dom.xpath('/html/body/div[7]/div[1]/div[4]/ul/li') for one in values: high_temp = int(one.xpath('./div[2]/text()')[0][0:-1]) low_temp = int(one.xpath('./div[3]/text()')[0][0:-1]) weather = one.xpath('./div[4]/text()')[0] # print(day,high_temp,low_temp,weather) line = "{},{},{},{}\n".format(day,high_temp,low_temp,weather) csv_str = csv_str + line day += 1 head_str = "day,high,low,weather\n" csv_str = head_str + csv_str # print(csv_str) with open('weather.csv','w',encoding='utf-8') as f: f.write(csv_str) ###Output _____no_output_____ ###Markdown 福建爬取福建省生态环保厅的数据 ###Code import requests import time header = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.61 Safari/537.36'} url = 'http://sthjt.fujian.gov.cn/was5/web/search' params = { 'channelid':280067, 'sortfield':'-s4', 'classsql':'(dockind=10)', 'r':'0.3624881561901028', 'prepage':100, 'page':1 } res_list = [] for i in range(1,177): params['page'] = i res = requests.get(url=url,params=params,headers=header) res_list.append(res) time.sleep(0.1) print('done') import json docs_list = [] for one in res_list: doc = json.loads(one.text.replace('\r','').replace('\n','')) docs_list = docs_list + doc['docs'] print('done 1') csv_str = "" for i in range(len(docs_list)): one = docs_list[i] try: line = "{},{},{},{},{},{},{},{},{},{},{},{},{}\n".format(one['s1'] ,one['s2'],one['s3'], one['s4'],one['s5'],one['s6'], one['s7'],one['f1'],one['f2'], one['f3'],one['f4'],one['f5'],one['f6'],) except Exception as e: # print(e) # print(i) continue csv_str = csv_str + line # if i == 2: # break # print(csv_str) print('done 2') head_str = '水系,点位名称,断面名称,年,周,起始时间,结束时间,pH,DO,COD,TP,氨氮,总氮\n' csv_str = head_str + csv_str with open('water.csv','w',encoding='utf-8') as f: f.write(csv_str) print('done') import pandas as pd import numpy as np fujian_df = pd.read_csv('data/network/gov/fujian/fujian.csv') print(fujian_df.describe()) fujian_df['年'] = fujian_df['年'].astype(int) for year in range(2004,2022): year_df = fujian_df[(fujian_df['年'] >= year) & (fujian_df['年'] < year + 1) ] print("{}年记录总数:{}".format(year,len(year_df))) year_df = fujian_df[(fujian_df['年'] >= 2017) & (fujian_df['年'] < 2018) ] print("记录总数:{}".format(len(year_df))) site_list = year_df['断面名称'].unique() print("站点总数:{}".format(len(site_list))) for site in site_list: one = year_df[year_df['断面名称'] == site] print("{},{}".format(site,len(one))) ###Output 记录总数:3520 站点总数:58 姑田溪(龙岩-三明交界断面),55 尤溪(三明-南平交界断面),55 干流(对照),55 翠江河(宁化-清流交界断面),55 干流(周宁-福安交界断面),55 饮用水水源地,330 水库、湖泊,110 富屯溪(三明-南平交界断面),55 黄潭河(上杭-永定交界断面),55 西溪(龙岩-漳州交界断面),55 干流(上杭湖洋乡涧头村),55 干流(寿宁-福安交界断面),55 九龙溪(清流-永安交界断面),55 沙溪(三明-南平交界断面),55 富屯溪(邵武-顺昌交界断面),55 北溪(控制断面),55 西溪(安溪-南安交界断面),55 濉溪(建宁-泰宁交界断面),55 黄潭河(新罗-上杭交界断面),55 鱼塘溪(明溪-三元交界断面),55 建溪(武夷山-建阳交界断面),55 干流(宁德-福州交界断面),55 干流(仙游-城厢交界断面),55 北团溪(龙岩-三明交界断面),55 北溪 (龙岩-漳州交界断面),55 干流(宁德-福州交界断面),55 干流(南安-丰泽交界断面),55 北溪(新罗-漳平交界断面),55 北溪 (华安-芗城交界断面),55 北溪(厦门-漳州交界断面),55 桃溪(永春-南安交界断面),55 干流(罗源-连江交界断面),55 干流(屏南-蕉城交界断面),55 沙溪(永安-三元交界断面),55 沙溪(梅列-沙县交界断面),55 金溪(泰宁-将乐交界断面),55 富屯溪(光泽-邵武交界断面),55 富屯溪(顺昌-延平交界断面),55 建溪(建阳-建瓯交界断面),55 省界(浙-闽),55 建溪(政和-建瓯交界断面),55 均溪(大田-尤溪交界断面),55 干流(南平-宁德交界断面),55 建溪(浦城-建阳交界断面),55 建溪(松溪-政和交界断面),55 建溪(建瓯-延平交界断面),55 干流(闽侯-福州交界断面),55 大樟溪(泉州-福州交界断面),55 干流(闽江入海口),55 北溪(长泰-龙文交界断面),55 西溪(平和-南靖交界断面),55 西溪(南靖-芗城交界断面),55 干流(长汀-上杭交界断面),55 干流(上杭-永定交界断面),55 文川河(龙岩-三明交界断面),55 干流(连江-马尾交界断面),55 大樟溪(永泰-闽侯交界断面),55 干流(闽清-闽侯交界断面),55 ###Markdown 山东爬取山东省生态环保厅的数据http://sthj.shandong.gov.cn/ ###Code import requests import json url = 'http://fb.sdem.org.cn:8801/wryfb/ajax/map.ashx' param = { 'Method':'SelectSubList', 'stcode':'0', 'type':'WasteWaterGis', 'isall':'0' } header = { 'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.61 Safari/537.36', # 'Referer':'http://fb.sdem.org.cn:8801/wryfb/WebGis/WasteWaterGis/HistoryData_New.aspx?SubId=29941%20&SubName=%u79D1%u6CD3%u79D1%u6280%u96C6%u56E2%u6709%u9650%u516C%u53F8', # 'X-Requested-With':'XMLHttpRequest', # 'Origin':'http://fb.sdem.org.cn:8801', # 'Pragma':'no-cache', # 'Cookie':'ASP.NET_SessionId=y5zc211bfydoziryjxzimesx; ASP.NET_SessionId_NS_Sig=oenCV6md0Dtq6Bby', # 'Content-Type':'application/x-www-form-urlencoded; charset=UTF-8' } res = requests.post(url=url, data=param, headers=header) site_list = json.loads(res.text)['items'] print("站点总数:{}".format(len(site_list))) import time url = 'http://fb.sdem.org.cn:8801/wryfb/ajax/WasteWaterGis/WasteWaterHandler.ashx' param = { 'Method':'GetHisChart_New', 'strID':'2326', 'strTime':'2021-07-01' } header = { 'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.61 Safari/537.36', } month_data_list = [] for year in range(2010,2021): print("爬取{}年".format(year)) for month in range(1,13): str_time = '{}-{}-01'.format(year,month) param['strTime'] = str_time month_res = requests.post(url=url, data=param, headers=header) month_data = json.loads(month_res.text) month_data_list.append(month_data) time.sleep(0.05) # break # break # print(res.text) # doc = json.loads(res.text) # print(doc) ###Output 爬取2010年 爬取2011年 爬取2012年 爬取2013年 爬取2014年 爬取2015年 爬取2016年 爬取2017年 爬取2018年 爬取2019年 爬取2020年 ###Markdown 中国环境监测总站 水质自动检测周报http://www.cnemc.cn/sssj/szzdjczb/ ###Code import requests import time page_list = [] for i in range(25): if i == 0: page = requests.get(url='http://www.cnemc.cn/sssj/szzdjczb/index.shtml') else: url = 'http://www.cnemc.cn/sssj/szzdjczb/index_{}.shtml'.format(i) page = requests.get(url=url) time.sleep(0.05) page_list.append(page) print('done') # print(page.text) from lxml import etree import requests import time url_base = 'http://www.cnemc.cn/sssj/szzdjczb' for page in page_list: dom = etree.HTML(page.text) for i in range(1,21): xpath_str = '//*[@id="contentPageData"]/li[{}]/a/@href'.format(i) line = dom.xpath(xpath_str)[0] if line.split('.')[-1] == 'shtml': url = url_base + line[1:] page = requests.get(url=url) # 获取下载页 doc_page = etree.HTML(page.text) # 获取所有a标签的链接 all_link = doc_page.xpath('//a/@href') found = False for link in all_link: last_str = link.split('.')[-1] if (last_str == 'doc' or last_str == 'pdf') and link[:2] == './': found = True # 获取下载链接 download_link = "/".join(url.split("/")[:-1]) + link[1:] doc_file = requests.get(url=download_link) # 下载保存 dir = 'data/network/gov/cnemc/' filename = dir + link[2:] with open(filename,'wb') as f: f.write(doc_file.content) # print(download_link) if not found: print('{}没有找到下载链接'.format(url)) time.sleep(0.05) # break else: print('{}可以直接下载'.format(line)) # break import os #用于获取目标文件所在路径 path="E:\\project\\mvp\\Graph-WaveNet\\data\\network\\gov\\cnemc\\" # 文件夹绝对路径 files=[] for file in os.listdir(path): if file.endswith(".doc"): #排除文件夹内的其它干扰文件,只获取".doc"后缀的word文件 files.append(path+file) from win32com import client as wc #导入模块 word = wc.Dispatch("Word.Application") # 打开word应用程序 for file in files: doc = word.Documents.Open(file) #打开word文件 doc.SaveAs("{}x".format(file), 12)#另存为后缀为".docx"的文件,其中参数12指docx文件 doc.Close() #关闭原来word文件 word.Quit() print("完成!") # import docx from docx import Document docFile = 'data/network/gov/cnemc/2018-51.docx' document = Document(docFile) #读入文件 table = document.tables[0] #获取文件中的表格集 table.cell(2,0).text ###Output _____no_output_____
resnet_binary_model.ipynb
###Markdown ###Code # git clone repository !git clone https://github.com/yasohasakii/binary_categorical.git !mv ./binary_categorical/* ./ !rm -r ./binary_categorical ./sample_data !pip install tensorflow-gpu !nvidia-smi # import modual from __future__ import print_function import numpy as np import os, glob, time from keras.layers import Input from keras.models import Model from keras.preprocessing import image from keras import regularizers, optimizers from keras.callbacks import ModelCheckpoint from keras.layers import GlobalAveragePooling2D, Dense, Dropout from keras.applications.resnet50 import ResNet50 os.environ['CUDA_VISIBLE_DEVICES']='0' np.random.seed(1337) # assert parameters batch_size = 32 EPOCHS=30 train_count = len(glob.glob('./PetImages/train/*/*.jpg')) valid_count = len(glob.glob('./PetImages/test/*/*.jpg')) # def build_model def build_model(trainable=True): input = Input(shape = (224,224,3),name="kfb_image") base_model = ResNet50(include_top=False, weights='imagenet', input_tensor=input) x = base_model.output # for layer in base_model.layers: # layer.trainable=False x = GlobalAveragePooling2D()(x) if trainable: x = Dropout(0.5,name="dropout_1")(x) x = Dense(256, activation='relu', kernel_regularizer=regularizers.l1(1e-4),name="dense_1")(x) x = Dropout(0.5,name="dropout_2")(x) x = Dense(64, activation='relu', kernel_regularizer=regularizers.l1(1e-4),name="dense_2")(x) x = Dropout(0.5,name="dropout_3")(x) else: x = Dense(256, activation='relu' ,name="dense_1")(x) x = Dense(64, activation='relu', name="dense_2")(x) prediction = Dense(1, activation='sigmoid',name = "dense_3")(x) model = Model(inputs=base_model.input, outputs=prediction) # model.summary() return model # def train def train(train_count,valid_count): train_datagen = image.ImageDataGenerator( rescale = 1./255, rotation_range=40, width_shift_range=0.1, height_shift_range=0.1, shear_range=0.1, horizontal_flip=True, vertical_flip=True, fill_mode='nearest') valid_datagen = image.ImageDataGenerator( rescale = 1./255) train_generator = train_datagen.flow_from_directory( './PetImages/train/', target_size=(224, 224), batch_size=batch_size, class_mode='binary') validation_generator = valid_datagen.flow_from_directory( './PetImages/test', target_size=(224, 224), batch_size=batch_size, class_mode='binary') model = build_model() model.compile(loss="binary_crossentropy", optimizer=optimizers.Adam(lr=1e-4,decay =1e-6), metrics=["accuracy"]) models_save_path = "./models" if not os.path.exists(models_save_path): os.makedirs(models_save_path) checkpoint = ModelCheckpoint(filepath=os.path.join(models_save_path, 'resnet-{epoch:02d}-{val_acc:.4f}.h5'), monitor='val_acc', mode='max', save_best_only=True, save_weights_only=True) print("Train files: {}, valid files: {}".format(train_count,valid_count)) print('-----------Start training-----------') start = time.time() history = model.fit_generator(train_generator, steps_per_epoch=train_count // batch_size, epochs=EPOCHS, initial_epoch=0, validation_data=validation_generator, validation_steps=valid_count // batch_size, callbacks=[checkpoint], use_multiprocessing=False) end = time.time() print("train finished, cost time = {} hours".format(round((end - start) / 3600.0,3))) return history # def plot modual def plot_train_history(history): import matplotlib.pyplot as plt plt.figure(figsize=(20, 10)) plt.subplot(1,2,1) plt.plot(history.history['acc']) plt.plot(history.history['val_acc']) plt.title('Model accuracy') plt.ylabel('Accuracy') plt.xlabel('Epoch') plt.legend(['Train', 'Test'], loc='upper left') plt.subplot(1,2,2) plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('Model loss') plt.ylabel('Loss') plt.xlabel('Epoch') plt.legend(['Train', 'Test'], loc='upper left') plt.savefig("history.png") # def test def test(model_path,image_dir): model = build_model(trainable=False) model.load_weights(model_path, by_name=True) for layer in model.layers: layer.trainable=False images = glob.glob(image_dir+"/*.jpg") images.sort() cat=0.0 dog=0.0 for imgs in images[:2]: print(imgs) im = image.load_img(imgs, target_size=(224, 224)) im = image.img_to_array(im)* 1. / 255 im = np.expand_dims(im, axis=0) out = model.predict(im)[0][0] print(out) if out>=0.5: dog += 1 else: cat += 1 print(dog+cat) if dog>cat: print("{}: dog, score{}, inage nums {}".format(os.path.basename(image_dir),dog/(dog+cat),len(images))) else: print("{}: cat, score{}, image nums {}".format(os.path.basename(image_dir),cat/(dog+cat),len(images))) if __name__=="__main__": history = train(train_count,valid_count) plot_train_history(history) test('./resnet-04-0.9722.h5','./PetImages/test/Cat') from google.colab import files uploaded = files.upload() for fn in uploaded.keys(): print('User uploaded file "{name}" with length {length} bytes'.format( name=fn, length=len(uploaded[fn]))) ###Output _____no_output_____
docs/python/jupyter/findmybgc.ipynb
###Markdown Run as usual. If you're following along, change the filepaths in the first code block. Using `FindMyBGC()` ###Code from socialgene.classes.findmybgc import FindMyBGC hmm_filepath = "/home/chase/Documents/socialgene_data/test/long_cache/HMM_HASH/socialgene_all_hmms.hmm" gbk_filepath = "/home/chase/Documents/socialgene_data/mibig_gbk_2.0//BGC0001848.gbk" findmybgc_object = FindMyBGC() findmybgc_object.parse(gbk_filepath=gbk_filepath) findmybgc_object.annotate_with_pyhmmer( hmm_filepath=hmm_filepath, use_neo4j_precalc=False, cpus=0, ) ###Output _____no_output_____ ###Markdown Run the comparison (all-vs-all) ###Code findmybgc_object.compare_proteins() ###Output _____no_output_____ ###Markdown THe results could be accessed no but it's not super useful in this form (shoing the first nfive results below) ###Code findmybgc_object.protein_comparison[0:5] ###Output _____no_output_____ ###Markdown So convert the results into a pandas dataframe ###Code findmybgc_object.protein_comparison_to_df() ###Output _____no_output_____ ###Markdown And view the results ###Code findmybgc_object.protein_comparison ###Output _____no_output_____
generate-backgrounds.ipynb
###Markdown Generating backgrounds ###Code %matplotlib inline from pathlib import Path import matplotlib as mpl from thesis_cover import * mpl.rcParams["figure.dpi"] = 300 options = sorted(Path("data").glob("*/*.pickle")) def generate_cover( learner, save_fname: str, npoints_interp=1000, dpi=300, resolution=(4096, 2160), cmap=None, ): data = list(learner.data.items()) x_size, y_size = xy_size = (resolution[0] / dpi, resolution[1] / dpi) fig, ax = plt.subplots(figsize=(x_size, y_size)) fig.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=None, hspace=None) ax.set_xticks([]) ax.set_yticks([]) cmap = cmap or get_cmap("inferno", 0.15, 0.95, 1.15) npoints_tri = len(data) // 4 if len(data) > 4000: npoints_tri = max(npoints_tri, 4000) im, line1, line2 = get_new_artists( npoints_tri, learner, data, ax, xy_size, npoints_interp, cmap ) ax.axis("off") ax.set_xlim(-x_size / 2, x_size / 2) ax.set_ylim(-y_size / 2, y_size / 2) print(f"Saving {save_fname}") if save_fname is not None: fig.savefig( save_fname, format=save_fname.suffix[1:], pad_inches=0, dpi=dpi, ) else: plt.show() plt.close(fig) folder = Path("background-defense") folder.mkdir(exist_ok=True) ###Output _____no_output_____ ###Markdown Pick one using a widget ###Code import adaptive from ipywidgets import Dropdown, IntText, interact def get_background(fname, cmap, x_resolution, y_resolution): learner = load_learner(fname) cmap = get_cmap(cmap, 0.10, 0.85, 0.85) generate_cover( learner, None, cmap=cmap, npoints_interp=2000, resolution=(x_resolution, y_resolution), ) interact( get_background, fname=Dropdown(options=options), cmap=Dropdown(options=plt.colormaps(), value="inferno"), x_resolution=IntText(value=4096), y_resolution=IntText(value=2160), ) ###Output _____no_output_____ ###Markdown Generate all ###Code for i, fname in enumerate(options): learner = load_learner(fname) print(f"cover {i+1}, npoints: {learner.npoints}") cmap = get_cmap("inferno", 0.10, 0.85, 0.85) generate_cover( learner, fname_out(folder, fname).with_suffix(".png"), cmap=cmap, npoints_interp=2000, dpi=300, ) print() ###Output _____no_output_____
Illyuvieva_Tafintseva-2.ipynb
###Markdown Hi, our names are *Illyuvieva Alice* and *Tafintseva Albina* and we decided to write a tutirial on a subject of ***ARIMA model***. In this tutorial we want to illustrate how ARIMA model can be applied in python, how we can measure it's efficiency and make predictions. We have chosen data of COVID19, this data contains infromation about countries from 2020-01-22 : 2021-02-01. Data is collected in the way that all indicators are summed with the previous ones, so each day does not indicate the statistics of that day, put indicates the overall statictics for all pevious days since 2020-01-22. For each day for each country we can see the sum of number of people who had confirmed COVID19 (to that day), number of people who recovered (since 2020-01-22 to that day) and number of people who had died since 2020-01-22 to that day. In the next sell below you can see all the libraries which are going to be used in order to upload data, to modify data, to construct arima model, to extract train and test parts from the data, to get predictions, to estimate the model, to visualize and so on. ###Code from IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = "all" import numpy as np import pandas as pd import matplotlib.pyplot as plt from statsmodels.tsa.arima_model import ARIMA from matplotlib import pyplot from math import sqrt from sklearn.metrics import mean_squared_error from statsmodels.regression.rolling import RollingOLS from statsmodels.tsa.stattools import acf, pacf from statsmodels.tsa.statespace.sarimax import SARIMAX from pandas import DataFrame from sklearn.metrics import mean_absolute_percentage_error from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_squared_error from math import sqrt from statsmodels.graphics.tsaplots import plot_acf, plot_pacf from statsmodels.tsa.stattools import adfuller from sklearn.metrics import accuracy_score import sklearn.metrics as sm ###Output _____no_output_____ ###Markdown ...Uploading data ###Code csv_url = 'time-series-19-covid-combined.csv' data = pd.read_csv(csv_url, index_col=0, parse_dates=True, squeeze=True) data ###Output _____no_output_____ ###Markdown Now you can see visualized data. Therefore, it is time to tell you what task we want to solve and how we will need to modify data in order to complete it.So, we decided that we want to explore statistics about deaths in Afganistan. And make predictions about number of deaths in the future. We say, that: H0 (null hypothesis) = number of deaths in Afganistan is going to decrease. 1. We will take only data concerning Afganistan (trim number of rows in th dataset)2. We will drop all columns in the resulted from '2.' dataset, apart from dates and number of deaths3. We are going to create column 'deaths per day' in which deaths per day will be calculated smart, right?) ) - the algorithm is pretty easy and understandable from the code That's all for modifications with dataset, and below you can see the code for everything described above. Enjoy! ###Code data = data.drop(['Country/Region', 'Province/State', 'Confirmed', 'Recovered'], axis = 1) data = data.iloc[0:377] data new_data_deaths = list() old_data_deaths1 = list(data['Deaths'])[1:] old_data_deaths2 = list(data['Deaths'])[:-1] new_data_deaths.append(list(data['Deaths'])[0]) new_data_deaths.extend(np.array(old_data_deaths1) - np.array(old_data_deaths2)) data['Deaths_per_day'] = new_data_deaths data data = data.drop(['Deaths'], axis = 1) data data.plot() ###Output _____no_output_____ ###Markdown **Examine whether data is stationary or not:**1. H0: time-series data is non-stationary2. apply adfuller() function to our time-series3. make conclusion (if p-value of the test is reject H0) ###Code result_of_adfuller = adfuller(data['Deaths_per_day']) print('p-value: %f' % result_of_adfuller[1]) ###Output p-value: 0.286124 ###Markdown P-value is greater than the sighnificance level => process is non-stationary => we have to difference the series and whatch with what level of differnec we will be able to achive stationarity. Also it is vital to do, because when we will specificate our model, we will have to spacificate order = (p, d, q), where d stands for the level of difference. Firstly look at the original data: ###Code plt.rcParams["figure.figsize"] = (20,3) print(plt.plot(data['Deaths_per_day'])) ###Output [<matplotlib.lines.Line2D object at 0x7ffa483d1d10>] ###Markdown Now our data with the first order of differencing: ###Code plt.rcParams["figure.figsize"] = (20,3) difference_order_1 = data['Deaths_per_day'].diff() print(plt.plot(difference_order_1)) ###Output [<matplotlib.lines.Line2D object at 0x7ffa48457890>] ###Markdown Now our data with the second order of differencing: ###Code plt.rcParams["figure.figsize"] = (20,3) difference_order_2 = data['Deaths_per_day'].diff().diff() print(plt.plot(difference_order_2)) ###Output [<matplotlib.lines.Line2D object at 0x7ffa43b99810>] ###Markdown We decided to take difference level = 1, as level differnce > 1 does not make much changes Now we specificate our model. We do not need date columns, as we are constructing ARIMA models regarding time-series data of deaths. Lets start from finding parameters for the model. 1. AR term (p)2. MA term (q)3. diff term is already found aboveAR term:required number of AR terms can be found by inspecting the Partial Autocorrelation (PACF) plot. ###Code print(plot_acf(difference_order_1.dropna())) print(plot_pacf(difference_order_1.dropna())) ###Output Figure(1440x216) Figure(1440x216) ###Markdown ![Снимок экрана 2021-03-03 в 12.14.35.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAACcwAAAMyCAYAAABaHxCDAAAMTGlDQ1BJQ0MgUHJvZmlsZQAASImVlwdYU1cbgM8dmSSsQARkhL1EGQIBZISwIgjIFEQlJIGEEUNCUHFTigrWLaLgQqsiilYrIHWi1lkUt3UUtahUanHgQuU/GaCt/3j+8zzn3jff+c63cu695wCgV8uXyfJRfQAKpEXyhMhQ1oS0dBapC6DAHBgCY8DgCxQyTnx8DIBt8P739vo6QFT3K24qW1+P/9dmIBQpBAAg8ZCzhApBAeQfAcBLBTJ5EQBENpTbTiuSqTgDspEcBghZpuIcDZepOEvD1WqdpAQu5F0AkGl8vjwHAN0WKGcVC3KgHd2bkN2lQokUAD0y5CCBmC+EHAV5REHBVBVDPeCU9YWdnL/ZzBqyyefnDLEmF3Ujh0kUsnz+jP+zHP+7FeQrB304wE4Ty6MSVDnDut3MmxqtYhrkHmlWbBxkQ8hvJUK1PmSUKlZGJWv0UXOBggtrBpiQ3YX8sGjI5pAjpPmxMVp5VrYkggcZrhB0uqSIl6Sdu1CkCE/U2qyVT02IG+RsOZejndvIl6v9qvRPKvOSOVr7N8Ui3qD9VyXipFTIVAAwarEkJRayLmQjRV5itEYHsykRc2MHdeTKBFX8dpDZImlkqMY+lpEtj0jQ6ssKFIP5YuViCS9Wy9VF4qQoTX2wnQK+On4TyE0iKSd50I5IMSFmMBehKCxckzvWLpIma/PF7smKQhO0c3tl+fFafZwsyo9UyW0gmymKE7Vz8TFFcEFq7OMxsqL4JE2ceGYuf2y8Jh68GMQALggDLKCEPQtMBblA0t7T3AN/aUYiAB/IQQ4QATetZHBGqnpECq+JoAT8CUkEFEPzQtWjIlAM5R+HpJqrG8hWjxarZ+SBR5ALQDTIh7+V6lnSIW8p4HcokXzlXQBjzYddNfa1jAMlMVqJctAuS29QkxhODCNGESOIzrgZHoQH4DHwGgK7J87G/Qaj/axPeEToIDwgXCN0Em5NkZTK/xHLONAJ7UdoM876MmPcAdr0xkPxQGgdWsaZuBlww0dDPxw8GHr2hlKuNm5V7qx/k+dQBl/UXKtHcaeglGGUEIrTP2fquuh6D1lRVfTL+mhizRqqKndo5J/+uV/UWQjv0f/UxBZi+7HT2HHsLHYIawYs7CjWgl3ADqt4aA39rl5Dg94S1PHkQTuSr/zxtT5VlVS4N7h3u3/QjoEi0XTV+xFwp8pmyCU54iIWB775RSyeVDByBMvT3dMDANV3RPOaeslUfx8Q5rnPssJjAPhVQGHOZxnfFoCDjwBgvP4ss30BH49lABy+JFDKizUyXHUhwLeBHnyiTIElsAVOMCNP4AMCQAgIB2NBHEgCaWAyrLMYrmc5mAZmgfmgHFSCZWA1WAc2gi1gB9gN9oFmcAgcBz+D8+ASuAZuw/XTBZ6CXvAa9CMIQkLoCAMxRawQe8QV8UTYSBASjsQgCUgakonkIFJEicxCvkEqkRXIOmQzUo/8gBxEjiNnkQ7kFnIf6UZeIO9RDKWhRqgF6oCOQtkoB41Gk9BJaA5aiJagZegStBqtQ3ehTehx9Dx6De1En6J9GMB0MCZmjblhbIyLxWHpWDYmx+ZgFVgVVoc1Yq3wn76CdWI92DuciDNwFu4G13AUnowL8EJ8Dr4YX4fvwJvwk/gV/D7ei38i0AnmBFeCP4FHmEDIIUwjlBOqCNsIBwin4NPURXhNJBKZREeiL3wa04i5xJnExcT1xD3EY8QO4kNiH4lEMiW5kgJJcSQ+qYhUTlpL2kU6SrpM6iK9JeuQrcie5AhyOllKLiVXkXeSj5Avkx+T+yn6FHuKPyWOIqTMoCylbKW0Ui5Suij9VAOqIzWQmkTNpc6nVlMbqaeod6gvdXR0bHT8dMbrSHTm6VTr7NU5o3Nf5x3NkOZC49IyaEraEtp22jHaLdpLOp3uQA+hp9OL6Evo9fQT9Hv0t7oM3ZG6PF2h7lzdGt0m3cu6z/QoevZ6HL3JeiV6VXr79S7q9ehT9B30ufp8/Tn6NfoH9W/o9xkwDDwM4gwKDBYb7DQ4a/DEkGToYBhuKDQsM9xieMLwIQNj2DK4DAHjG8ZWxilGlxHRyNGIZ5RrVGm026jdqNfY0Hi0cYrxdOMa48PGnUyM6cDkMfOZS5n7mNeZ74dZDOMMEw1bNKxx2OVhb0yGm4SYiEwqTPaYXDN5b8oyDTfNM11u2mx61ww3czEbbzbNbIPZKbOe4UbDA4YLhlcM3zf8V3PU3MU8wXym+RbzC+Z9FpYWkRYyi7UWJyx6LJmWIZa5lqssj1h2WzGsgqwkVqusjlr9wTJmcVj5rGrWSVavtbl1lLXSerN1u3W/jaNNsk2pzR6bu7ZUW7Zttu0q2zbbXjsru3F2s+wa7H61p9iz7cX2a+xP279xcHRIdVjg0OzwxNHEkedY4tjgeMeJ7hTsVOhU53TVmejMds5zXu98yQV18XYRu9S4XHRFXX1cJa7rXTtGEEb4jZCOqBtxw43mxnErdmtwuz+SOTJmZOnI5pHPRtmNSh+1fNTpUZ/cvd3z3be63/Yw9BjrUerR6vHC08VT4FnjedWL7hXhNderxev5aNfRotEbRt/0ZniP817g3eb90cfXR+7T6NPta+eb6Vvre4NtxI5nL2af8SP4hfrN9Tvk987fx7/If5//XwFuAXkBOwOejHEcIxqzdczDQJtAfuDmwM4gVlBm0KagzmDrYH5wXfCDENsQYci2kMccZ04uZxfnWah7qDz0QOgbrj93NvdYGBYWGVYR1h5uGJ4cvi78XoRNRE5EQ0RvpHfkzMhjUYSo6KjlUTd4FjwBr57XO9Z37OyxJ6Np0YnR66IfxLjEyGNax6Hjxo5bOe5OrH2sNLY5DsTx4lbG3Y13jC+M/2k8cXz8+JrxjxI8EmYlnE5kJE5J3Jn4Oik0aWnS7WSnZGVyW4peSkZKfcqb1LDUFamdE0ZNmD3hfJpZmiStJZ2UnpK+Lb1vYvjE1RO7MrwzyjOuT3KcNH3S2clmk/MnH56iN4U/ZX8mITM1c2fmB34cv47fl8XLqs3qFXAFawRPhSHCVcJuUaBohehxdmD2iuwnOYE5K3O6xcHiKnGPhCtZJ3meG5W7MfdNXlze9ryB/NT8PQXkgsyCg1JDaZ705FTLqdOndshcZeWyzkL/wtWFvfJo+TYFopikaCkyghv2C0on5bfK+8VBxTXFb6elTNs/3WC6dPqFGS4zFs14XBJR8v1MfKZgZtss61nzZ92fzZm9eQ4yJ2tO21zbuWVzu+ZFztsxnzo/b/4vpe6lK0pffZP6TWuZRdm8soffRn7bUK5bLi+/sSBgwcaF+ELJwvZFXovWLvpUIaw4V+leWVX5YbFg8bnvPL6r/m5gSfaS9qU+SzcsIy6TLru+PHj5jhUGK0pWPFw5bmXTKtaqilWvVk9ZfbZqdNXGNdQ1yjWd1THVLWvt1i5b+2GdeN21mtCaPbXmtYtq36wXrr+8IWRD40aLjZUb32+SbLq5OXJzU51DXdUW4pbiLY+2pmw9/T37+/ptZtsqt33cLt3euSNhx8l63/r6neY7lzagDcqG7l0Zuy7tDtvd0ujWuHkPc0/lXrBXufePHzJ/uL4vel/bfvb+xh/tf6w9wDhQ0YQ0zWjqbRY3d7aktXQcHHuwrTWg9cBPI3/afsj6UM1h48NLj1CPlB0ZOFpytO+Y7FjP8ZzjD9umtN0+MeHE1ZPjT7afij515ueIn0+c5pw+eibwzKGz/mcPnmOfaz7vc77pgveFA794/3Kg3ae96aLvxZZLfpdaO8Z0HLkcfPn4lbArP1/lXT1/LfZax/Xk6zdvZNzovCm8+eRW/q3nvxb/2n973h3CnYq7+ner7pnfq/vN+bc9nT6dh++H3b/wIPHB7YeCh09/V/z+oavsEf1R1WOrx/VPPJ8c6o7ovvTHxD+6nsqe9veU/2nwZ+0zp2c//hXy14XeCb1dz+XPB14sfmn6cvur0a/a+uL77r0ueN3/puKt6dsd79jvTr9Pff+4f9oH0ofqj84fWz9Ff7ozUDAwIOPL+eqtAAY7mp0NwIvtANDT4N7hEjwmTNSc89QN0ZxN1QT+E2vOgurmA8D2EACS5wEQA/coG2C3h0yDd9VWPSkEoF5eQ13bFNlenhpbNHjiIbwdGHhpAQCpFYCP8oGB/vUDAx+3wmBvAXCsUHO+VDUiPBtsclHRxTH4VwfFfwHbs332PgPTewAAAIplWElmTU0AKgAAAAgABAEaAAUAAAABAAAAPgEbAAUAAAABAAAARgEoAAMAAAABAAIAAIdpAAQAAAABAAAATgAAAAAAAACQAAAAAQAAAJAAAAABAAOShgAHAAAAEgAAAHigAgAEAAAAAQAACcygAwAEAAAAAQAAAzIAAAAAQVNDSUkAAABTY3JlZW5zaG90OqeATQAAAAlwSFlzAAAWJQAAFiUBSVIk8AAAAddpVFh0WE1MOmNvbS5hZG9iZS54bXAAAAAAADx4OnhtcG1ldGEgeG1sbnM6eD0iYWRvYmU6bnM6bWV0YS8iIHg6eG1wdGs9IlhNUCBDb3JlIDUuNC4wIj4KICAgPHJkZjpSREYgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj4KICAgICAgPHJkZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9IiIKICAgICAgICAgICAgeG1sbnM6ZXhpZj0iaHR0cDovL25zLmFkb2JlLmNvbS9leGlmLzEuMC8iPgogICAgICAgICA8ZXhpZjpQaXhlbFhEaW1lbnNpb24+MjUwODwvZXhpZjpQaXhlbFhEaW1lbnNpb24+CiAgICAgICAgIDxleGlmOlVzZXJDb21tZW50PlNjcmVlbnNob3Q8L2V4aWY6VXNlckNvbW1lbnQ+CiAgICAgICAgIDxleGlmOlBpeGVsWURpbWVuc2lvbj44MTg8L2V4aWY6UGl4ZWxZRGltZW5zaW9uPgogICAgICA8L3JkZjpEZXNjcmlwdGlvbj4KICAgPC9yZGY6UkRGPgo8L3g6eG1wbWV0YT4KgaaN9gAAABxpRE9UAAAAAgAAAAAAAAGZAAAAKAAAAZkAAAGZAAJPWnlR4W0AAEAASURBVHgB7L0HgF3FladfBCUUkBASygElkJBEFiAQIudgMgO2cQ44zXrHs/bsfz27s7MeT7SHsWeMbRzAxibYBpOTQASRs4gSIkqAkEAoCyT+56vm4Ubu7nff6/e6W+qvPD1q+t13w1d161Sd+tU5W7wXJVkkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAKbOYEtFMxt5jXs40lAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpBAJqBgzoYgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAp2CgIK5TlHNPqQEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJKJizDUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpBApyCgYK5TVLMPKQEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAIK5mwDEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJNApCCiY6xTV7ENKQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgII524AEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJdAoCCuY6RTX7kBKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCSgYM42IAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAKdgoCCuU5RzT6kBCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCSiYsw1IQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQQKcgoGCuU1SzDykBCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACCuZsAxKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCTQKQgomOsU1exDSkACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkICCOduABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCXQKAgrmOkU1+5ASkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkoGDONiABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACnYKAgrlOUc0+pAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQkomLMNSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkECnIKBgrlNUsw8pAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAgrmbAMSkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQk0CkIKJjrFNXsQ0pAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCAgjnbgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQl0CgIK5jpFNfuQEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJKBgzjYgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAp2CgIK5TlHNPqQEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJKJizDUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpBApyCgYK5TVLMPKQEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAIK5mwDEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJNApCCiY6xTV7ENKQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgII524AEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJdAoCCuY6RTX7kBKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCSgYM42IAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAKdgoCCuU5RzT6kBCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCSiYsw1IQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQQKcgoGCuU1SzDykBCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACCuZsAxKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCTQKQgomOsU1exDSkACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkICCOduABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCXQKAgrmOkU1+5ASkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkoGDONiABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACnYKAgrlOUc0+pAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQkomLMNSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkECnIKBgrlNUsw8pAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAgrmbAMSkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQk0CkIKJjrFNXsQ0pAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCAgjnbgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQl0CgIK5jpFNfuQEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJKBgzjYgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAp2CgIK5TlHNPqQEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJKJizDUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpBApyCgYK5TVLMPKQEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAIK5mwDEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJNApCCiY6xTV7ENKQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgII524AEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJdAoCCuY6RTX7kBKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCSgYM42IAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAKdgoCCuU5RzT6kBCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCSiYsw1IQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQQKcgoGCuU1SzDykBCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACCuZsAxKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCTQKQgomOsU1exDSkACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkICCOduABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCXQKAgrmOkU1+5ASkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkoGDONiABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACnYKAgrlOUc0+pAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQkomLMNSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkECnIKBgrlNUsw8pAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAgrmbAMSkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQk0CkIKJjrFNXsQ0pAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCAgjnbgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQl0CgIK5jpFNfuQEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJKBgzjYgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAp2CgIK5TlHNPqQEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJKJizDXR4Au+9994H97jFFlt88Lu/SEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISqISAgrlKaHlsmxBYtXpNWr5iZVry5ltpRfy7Zs3atCFEc4jlenTvlnr36pm267dt/NsrbdOje5vckxeRgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABDZ9AgrmNv063Oye4PkXX0lPPjM/zb77/vT0vAVp0auvp3fffTdtvfXWadDAAWnSTmPTvnvtliZNGJtGjRi62T2/DyQBCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJ1IfAZi2YI5Xn+vXr07LlK9Kbb71dmOC2fXrnKGZdu2ydttxyy8Lfq/bAtevWpTeWvJnWrF2bGmUfbfZ03bp2Sb16bpN6RHS17t26NXvcpvbB4jeWpgUvvpzuffDR9MAjc7NYbuFri9Nby95OG9ZvSFtttWWiboYO3iGN23Fk2nPXyfEzKY0bMyr179d3U3tc77eTECBa4pvRht99d33asGFDoafeeuutUp+IpLhNjx6pW7euObpioS928IPWvfNOjh65atWatHrNmkTft27tuvj3nYbf4/OtttoqbR0/3bp2TV2jr6O/o6/rGSx4/7tFn7fllm2fmnl99EErVq5MK1etjnunr/5TquiOjB1WXUJsvF30kT236dGRb9V7k0CTBBgDvB3juNyHlnnvSpFoaeu9evbM44YmT9oOf3wnhP9EzSWKLn1IvQu95NYxju3apUuMFbtmW0K/yri2o6W3X7FyVYzTl+U6Xl/QTm4Vz9G7d89sG7pH9OGO9kzV1i8bRBg3lGzN2mwjsZNhL8NW8kMdMk4o2ciuUa9EYGbMgJ3s0b17u9hJokHTxhvufU2MeTYdO7nVllu9H726Z7VV5/cksNkRoN9ZHTbr7Xiv6XuKFPqiAdtvl21PkeM9RgISkIAEJLCpEGB9gTkLY9134/cyU9Ps1+rRo1v4YbbJPvxN5Tlbe5/4ypjDMO99e0XDPL7IOXvHOsf2MYZgnmeRQD0JrAz/w9IK/A/4U/BL9w4fE3NwiwQkIAEJSEACEpDA5kdgsxbMMZlFiDFvwQs5YlnR6hu346g0cviQEKz0Sl1isbHeBTHNQ48+kZZGCtIiC4V9+/RJw4cOSjsM3H6zEIrhZHjvvQ3poceeSNfcODtdd8vtWTBXTpCy9+5T0kEH7JNOOe6ItEtEnWsLcWO924Ln37wI0IYRgT717HM5tTCCiSKFxe6Rw4akwTsMSNv3365dFr6L3GdLx/DsLJYjEuSHvg0RwMsLX02vhzh26dK3sghm2fLl+V8W41asWJWdD4gf+oQQgj6YFMz9t+uXdhjQP40eMSz169snC8C2DAEt7zzOtLYQSuDwe/GVhQkR7+LFS+N51rf0+B3msy4hlukZjp1ddh6fxcYd5sa8EQkUJECk2eeefyk73N9d33IfiuhlwIDt0pDoO0cMHZJFYgUvU/fDEIXRh7z62htp8ZKldb7eFgnBXI9twqkbfWjfEFEhpNq2N6Ljrnnxhv6z4SeOjZT37VWwFdiFuU/Ny0JqhNVFCkLAbCcHDUwDwk4iINvUygd2MsbA2U6GMHvV6tWZx2uLl4SdXBabfhps5LK3V4QN5WdVftZsJ6Nue/d+306GKHpgLDCNCju5fdjMLl22Cvu4VUMdYy/boI4RtWY7GZGhXw87+c67xeqyvesNUTli0kk7j0ujhhu5ur3rw+t3HAJsOHz9jSVpwQsv576oyJ3tsH3/tPvUSdn2tKdtKXKvHiMBCUhAAhKohACb3J94el5kQVmc56YbYgzfUmkQkfdPw2LT9/Chg1s6dLP6jDnOkljfeDV8Z/hDEc4VKSPCB7rHlIkxj+nSrvPTIvfqMZs2AfwPT8a7vDLaJhtEyhWCNLAGNyLeY/wqFglIQAISkIAEJCCBzY/AZi2YY9DLAtMfr78lXXz51YVr7yPHHJoOmbFvFqWxE6zeBUHff15wcXoyRDV5obBMQIYdRw1PM/bdM+0xdZe08/gd6317dT//mhCi4Iy/PoRyF/zq8lhsW5Qj7pW78Pb9++VIc+eccVI6cL+90rAhg9pE4FjuvvxcAhAgSgyOoatumJV+ddlV+b+JUlakEDVnysQJab+9d0sH77/PJteuESsvD3EIDrKXXnk1fhbl93pR/PeSpW+mVbEjFzEz/d078bPunXfj33eDUUPkHKLMIVbGUcZCdsNuvh5puxDL4aQgyiSOChxqLG733bZP3UWFy95enmbdcU+6JyJgPvL4U4UjbRSp73oe07NnjyyiOOfMk9L0vXev56U8twTqQuCyK69P19x0W4jM3syRblq6CLt9p+6yU5qGoH7/aR3KmUl6+Vl33pvuf/jx9Ojcp1t6jNZ/9r7+jWid9KUNUeYiAllE3huEo3fY4Nx3jojNIcMGD4rIZN3bZVECW4GdvHn2nHTBry/PkQiK2kkWoKZMmpCm7TE1HRybJ4i8vCkVROTYQuwiDnvsJPbylWgn2EkElsxj3sFWxniCfxHdYyu3iMih2MmuYR9LthLBF0z69ds29/mN7eTIsJOI6OodnXVNRE6cdec9ac59D6dH5j6VVq5cvUlUyTYhLGXh42Onn5gOO3C/TeKevUkJtAWBZ597IT38+JMxR78jPf/iK4UuuevkndOXP31WnpfTT1kkIAEJSEACmwsBhOS/uuyP6Z4HHs5zUzaLtFT6b7dtmjpppzR92h7ZZ93SsZvTZ8zx2DR830OPp+tn3Z4Wv/Fmocc7+IBp6UufPjv1jHmdUeYKIfOgKgnccc8D6VeX/jGvRbFuWK6w9saGkIOmT4sNam6wKsfLzyUgAQlIQAISkMCmSGCzFsyR8mpJLLD+6Je/Tf943k8K18+XPnVWOvOkY3Kqz7bYOfJwiC/+6tvfjcnkY4XSdE2eOD6ddMzh6dAD901EWdvUCxH2Hn7syfS7q2/Mgjkm1+xIK1IQyvzFycemow+dEeKi3U05WASax7QJAdJ3vrzwtfSzi3+XzvvxhTnaWtF2TUSgSTuNSyccdUj63MdOyyKHjh6lgVd21epVOVocwpaFi15Pz73wUpofkaHmL3gx/050NgQCvOOVFhb6STs3IISyCOXGjh4RgtlRadKEsRERdGhEoeubo1nUK+0oabMvueLadN3Nt6fb5tyXIwZW+gztcTw2DDHx337jS+n4Iw9uj1vwmhJoFYHv/+iXeWzw8qJXs4iopZMRdevAffdKRx5yQDrthKMiQme/lg5v08/oDy+94rp04613ptvvfqBNr83FWHSgDx0yeGAaM3J4Gj92VJowdnTaadyOWYRMlDb4Ia5rq0KqP+r1t7+/Jn03xumIwYraye6RnnuXiAh21CEz0mc/floIwjp+NFbsJOnIiRSHnSQ6xYKSnXz+xWwvX1n0WkOap4IRaRvXFeME0pgjmiP63phRI9LYHUdmO7lj1Dl2sk9EpKuXuHDlqlVhJ69LV99wa5o95/7YtLS88e112N9zJNvoK/7X17+YzjrluA57n96YBNqaANHfb7vzvvSL3/4hR9Qpcv2Z0/dO//b330zjY4y+dYh6LRKQgAQkIIHNhcDrEQH6O98/P2/mYszO3KWlQsaIGTE3PeGog9PJkRmlsxQ2D98b6xvMe3/xmz8kWBUpp594VB5DbBtZddj4ZZFAvQhcce3N6TvfOz9HR18S2U/KFdbeDpmxTzo1fEz4oC0SkIAEJCABCUhAApsfAQVzTdSpgrkmoNTxT0TVuDQiyNww645021335bRURS9HVJQ9d5ucBXMfP+MjEYFq26Jf9TgJ1JUATiF2rf3+6pvSH665MUQAxS9HarltY1H7pGMPT//rr76U+m3bO0dbK36Gtj2SZ0Pk8Mz8BemhiEQxO97jx598NqLkvJUX/kldsTZ+iCRH2rmigoiNn4L0gbAhkg7Cjl4RAXRIpOMj3eiM/fbMUfkQ0iEaqLXAUMHcxrXhf0ugbQgomKsNZzJybrFFiOZCEEfUzu6R+rtf395ZcLzvnrumww6antPYIkpuq4JoDDt55XW3hGju6hBTF4vCyv0hAOzTp1c64qD9w06em4YPGZxTerfVvVd6nZKdRCD3yBNPx3j33tgs8tQH0eSItoytXBeR5FpnJ7eIBaZIMRp1zA/j5CGDdkg7hzByRkRj3jUiMCKSrIedVDBXaavweAl0bAIK5jp2/Xh3EpCABCTQtgQUzBXjrWCuGCePaj8CCubaj71XloAEJCABCUhAAh2VgIK5JmpGwVwTUOr4J9K9/OCnv0q3RqoywrZXIqYhZSOpxYgw940vfyanoarjrXpqCRQm8MTT89Ivf3tFXhR/8NEnCn+PA1nIJo3RESFg+G9f/GROPbzDgP4VnaMtDiZS3JI334odoxFNLiLJPfnM/DQ3nnvuU8/mFHNE+WThv14FAV3vnj3T0CE7pF0iIt/Encbm3X4TJ4xJQ0MggFiAY2pRFMzVgqLnkEDlBBTMVc6s6DcQz/WLVNfjxoxKu0Uavd2nTEyTd56Q04wQdavehRR/v7zkD+nm2+bkdNeVjP8a7OSWOdX0f/viJ7JgGgF1Rys8E+JxUq9mOznvufTEU/PS42EnX3jplRxZupqoq0WfE05ElCPCBZFr2RFfspWMn7tFpL5apTxSMFe0Vsofx9gJASlpeIu2D+oaMaxRvcrz9YhiBBTMFePkURKQgAQk0DkIKJgrVs8K5opx8qj2I6Bgrv3Ye2UJSEACEpCABCTQUQkomGuiZhTMNQGljn9CYPN//+WH6a57H0qvRYj7ShZMs2AmFnURzH3n//t6XhCs4616agkUIkAbnnPfQ+l//9MP02MRSQZRWTVlr4ieePqJR6cDIyrMlEkTqjlF3b7DM5JO77GIJHf73ffnCEGIARYvWVq3a5Y7MYIAIud84i9OyqkvSD/XJdJBsYjc2qJgrrUE/b4EqiOgYK46btV869AD901HHHxAOvbwmWlUpLquleC4qXvBhhCJ9P/80w/SnPsfTm+E7aggEOsHp0QEdvpHjk4H779P2mu3XT74e0f4hWdE8PT4E8+mu2JMQCS9p0Iw99rrb7Tb7Q3cvn8aPXJo+sSZJ6XDIzofdpKorbWwkwrmaletpPgife/Klavzv0XOzPtKauWePXsUOdxjJFCWgIK5sog8QAISkIAEOhEBBXPFKlvBXDFOHtV+BBTMtR97rywBCUhAAhKQgAQ6KgEFc03UjIK5JqDU8U9EpfrO989Pd97zYESqeq0iwRxRuIiOcvShB6a/++ZX06CB29fxTj21BMoTIBIIws9Zd9yb/uk/fpKei/RrayPVWjVl1Iihaa9dJ6ezTj0+HXXIAdWcoubfifX//I4idH3wkblZGNgQKWdhenvFirQmosq1V9mmR4/cH0zeeVzae/cp6aD9p6Uxo0akHWrQLyiYa69a9bqdnYCCubZrAUMH7xARTUeF4HjPNH3a7mm/vXaLdOC1ER03fgqEZK+/sSTd88CjYSd/mh4NYXm1toOocllcHqK5kyONeUcpGza8l5597vlEhFkE9I/MfSoiyi1Mb729PIvN2+s+e0Q68969eqXJE8elPafukg46YJ80YezovOGktaI5BXO1q9VXQ1T59LwFWUz6+JPPlD1xJKKPeu2ZNwzwPlgkUAsCCuZqQdFzSEACEpDA5kJAwVyxmlQwV4yTR7UfAQVz7cfeK0tAAhKQgAQkIIGOSkDBXBM1o2CuCSh1/NMz859P3//RL9LsOffH4uILFQnmiB41ZPDALJj71tc+lwZ2wLSVdUTnqTsggXXr3kkPPfZkun7W7emnF12WWPSstmzbu1caFFHTvvrZj4Vo7rhE+rx6Rvwpd5+kCFu+YlWOInfL7XenG2+9K93/8GNp4auvl/tqm33Ogj+CgAljRqdjj5iZ9tt797RbpBnsGWK6rl27VH0fCuaqRucXJdAqAgrmWoWv4i+TvnPi+DE5+tgZJx2TdhiwferTu7bpWRHMYSexI9hJhOXVFu530MAB6TMfOzV9/uNnZIEfmynaq2AnV65ak+3kHXc/kK6/5fawk4+n5yP9akcq2MmREUXwuCMOSgfss0faM0RWvbbZJqcyr/Y+FcxVS+7PvzdvwYt5I9ElV1ybbrrtrj8/YKO/MPbZvn+/dN53/mf6yDGHbfSp/ymB6ggomKuOm9+SgAQkIIHNk4CCuWL1qmCuGCePaj8CCubaj71XloAEJCABCUhAAh2VgIK5JmpGwVwTUOr4JxYRL7zkilgQmhPRRh6pSDDXvVu3tEtEkyLC3OfPOSOnlqrjrXpqCZQlsHLV6nT5VTeka264Nd1yxz1pWUSTqbZsHYv+iLy++rmP56ghpNrapkf3ak/Xqu8RLWftunXp0blP57Ry9z30WJr71LMhoFuZ/96qk9f4y4gKe27TIy8eHz5zel48JlUrKVurLQrmqiXn9yTQOgIK5lrHr9JvIzaj/9x1l53SkYfMSAcfMC3tNnlipadp8XhEZdjJP14/K6Kx3pNYfKq2cL/YyU+ffUo691Nn5ZSUiOjaoyAEXBuieSKDkX717kg1S2S55ctXpjVr2y/6alMssJOI5hBZkfb9lOOPzELJ4UMHN3V4ob8pmCuEqdBBCuYKYfKgOhNQMFdnwJ5eAhKQgAQ2KQIK5opVl4K5Ypw8qv0IKJhrP/ZeWQISkIAEJCABCXRUAgrmmqgZBXNNQKnjnxYveTMRheOqG2alS6+8Lr3zzruJxdRyhWgKffv0jihSB4VgbkY6LIQxpCOySKC9CKxfvyEtfWtZOu/HF6arQggwP6LmVJtmrvEznHXKcem0E49Ku0ektIHb92/8UZv8/m6kmUX4R2o53lWinZBabvGSpW1y/WovQh8xZeL4tH9Ezzny4ANypLm+2/ZJRKastCiYq5SYx0ugNgQUzNWGY6VnQWC8c0SaO+X4I3L/2b9f39Q9BFatLetjfLcuxNf/fv6F6fI/Xp/mP/9SWrFyVWtPm048+tB0dqQvx06SWratC+nYV65ekx4KO3nXvQ+lG2+7MyF6eq0VUWbb4hmwk6Rk3X+f3dMRBx2Q9tljasJOVhORVcFc7WpMwVztWHqm6gkomKuend+UgAQkIIHNj4CCuWJ1qmCuGCePaj8CCubaj71XloAEJCABCUhAAh2VgIK5JmpGwVwTUOr4J6JWLVn6VrrkD9em73z//EhltTpEc++UvSJRRQYN3D5940ufjigoB+QF0i5dKhfClL2QB0igIIE1a9elRa+9nv7m7/8tXR0R5og0Q8SZ1pbpkVb00Jn7pVOOOyKNHzOqtaer+PurQgRAyrzv/dcv0q133hvPuDitf/fd1Ponq/hWKv4C/UTvXtukz3389HT8kYckIs1VE31IwVzF6P2CBGpCQMFcTTBWfBKEVPF/WYR2+olHp6kRcY4op60t2MUVK1dmO3nx5VeldQU3SZS77p677pIOmbFvtpNTJk0od3jNP1+9Zm16bfEb2U5ee9PsbCfXxVi2FmOAmt/sRifcaqst09YhJP/MR09Np0akOewkorlKi4K5Sok1f7yCuebZ+EnbEVAw13asvZIEJCABCXR8AgrmitWRgrlinDyq/QgomGs/9l5ZAhKQgAQkIAEJdFQCCuaaqBkFc01AqeOfiDayNhYa73nw0ZzCas59D6UnnpnfbKS5LbfcIm215VZpckSO2nev3dJJxx6epsbiKCnESDFlkUB7EXj+xVfSQ48/mX54wa/TnRGJjUiJtRCVjRg2JO05dVL6yy+ck/YIUcBWbdTOWejnZ/bd96ebZ89JN8y6Mz07/4W0avXqmogAtt56q4j21iWnhSNyUbeuXXPaOgR6qxDOhiivtQXRB0LaqZN2ymnnTjvhqLTjqOEVR6NUMNfamvD7EqiOgIK56rjV6lu7Tt45zdh3r/Sx005Ik3Ya2+px1ssLX81jPOzkDbPuCDvZYGdae79ElZs0YWy2k6QY3TLE0qH3q3sp2UnGsDfPvjvdcOsdae6T87KdLBItudwNIvrGhvXo3j1179Y1frplO7l6DXZyTQgOy28wKXeNBnFkRGSNsfT0vXdL2MmJwZKozXxWtCiYK0qq/HEK5soz8oj6E1AwV3/GXkECEpCABDYdAgrmitWVgrlinDyq/QgomGs/9l5ZAhKQgAQkIAEJdFQCCuaaqBkFc01AaYM/LXz19fTMvAXpyutvSbPn3J+WvrmsIdockaxCtMOSXZcuXbIwbts+vdKhM/ZLhx+0f17g22FA26epbAMkXmITI0Aatmtvnh3phW9NTzw9r2Z3zwL5uB1Hpm9/49wQfe2desUi9pYVLGJXeyMI196MFLMXXnJluvyq6xOCwLeXr6jqdCz4d433Ny/6hziOhf9tenRP24TQtVfPnqnXNtvkd3t5RB7iGm8tW54QBOBsI3IfvxN9cl2VUfu49i47j0ufP+eMLLStNFJfpYI5xLsIAvv26ZO267dtVcxq8aVeEV1v0MABCbt20P7TanFKzyGBNiXQWQVzCKOJ+kX/sW2kn6+0MG4iTSjCY1Ldr1m79v0IvsXS3peuh2hqQkQc+x9f+Uw6YN89Ux/sTytE2w88Mjddd8vtkbb81kjzPbd0mVb/i+CaNLL/66/OTccePjPbFSKn1buQdv3NZW+ny668Pv3q8j9mO4ndrKZQ3w12MmxktpPdGuxk2ErsPnYSm4loffmKBjuJnaZ+idb8gZ0Mm7kh6r/Sgp3eceTwbCdnTt87p2pFsFe0VCqYy4L2eOY+vXul/tv1rUicV/SeihzHhhsi6n3hE2em4444qMhX6n6Mgrm6I/YCBQgomCsAyUMkIAEJSKDTEFAwV6yqFcwV4+RR7UdAwVz7sffKEpCABCQgAQlIoKMSUDDXRM0omGsCShv8aW0s8CGIWRCpH5+evyDdH4uqzz3/UqS4WpJFM1vHot0OA7bPwiEinkwYOzqNGjEsp1tkodQigfYmQFrhn138uzQ3xHKvvf5GzW4HcQLRc77wiTPSYTOnp51DvIB4tN7lqWefyxFzEDcQ+bFBwLa+4ssi7tu+f780dMgOacKYHdPYHUekMRHljfR+fUIIwruNsIHIkQg8iJhDejuEcwhnSQf7TPQJDz7yRFoYKW8RByAGqaQQmXLg9v3TXrtOTh855tB05snH5sX5otFzKhXM9QixQ//t+oWod3o64ahDKrnVmh6LAKPH+4LLgQqLa8rWk7UNgc4qmCN1dP9+fdOJxxyWDj6gMrFrjnoWkduWvb080Xcten1x7kcffPTJtPiNJSG4WlO48uifBw8amD52+gnp8LA/u02ZmKOBFj7BRgdefeOt6b9+/psQlc9PRJurVcFO9t22dxY9IZjbadyYLDar1fmbO8+CF15ON99+d7o+7CQpy7GT2KhqCqKxIcGa8e240SPTmNEj0oDtt0v9QszVYCexlWEnN4QQMq6BnVyOnQyB3vMvvRIRYJ8PEeIT6cWXF2U7WqmdxB5yrb12m5yFY2efdnyOaFfUTlYqmGPszjMTEfC0E49qlRCzGt6l72y9VYNQkbEJ/DtCUTDXEWrBe1AwZxuQgAQkIAEJ/ImAgrk/sWjpNwVzLdHxs45AQMFcR6gF70ECEpCABCQgAQl0LAIK5pqoDwVzTUBpwz8ROWPJ0rfSk888l15e9Gp6I35f/+76LKhhYW1kpKecMHbHtF3fPqlnLChbJNDeBFi4Xhnt9oKLLks//Nmvo/0uyxFganlfRBk65rCZ6ejDZqQjD55RVyEAgrWl8d6RivXiy69Kjz81LxbgF1b0OCzqd4sockR/HDZkUBbIkQp1dIhceYeHx9+2jYV5IrtsXFjkfzfeeQS0y5Yvz6IKRAmPzH06CwJefGVhCBKXZJFAFoYUFM8RsWdgiPQQsH3mo6emHSLyGgKLIqVSwRxil8E7IDI5MX3xU2cWuURdjtkiYnNuucWWqWvXrbPQoi4X8aQSqCOBziqY6xtiYoRqX/702en0k46ujHDoiekbl69YmaOE0n+9ECKqR594Os196tkcAXVZCK1WFxTOEeEOUdMxIUQ79fgjm+y3y91g7tPDTv7299ekf/6Pn6Ylb76VVqxcVe5rFX1O1FLu8ZhDD0xHHnJA6te3ftE93w1xN3aSVKwX/+6q9NBjT+ZNHpXcMCI/7CTi8WEhKEdIToS30SOHJVKxjxg6OD8D9mTjUrKTCPQQRr6y6PX0QojmHok6fvrZBQk7+eprb8QY+s3cFji+SOnatUu+nyMigvPnPn563MfgiHLYt8hXw2avSpdccV26OqLsEima+2qpEHEWgdopxx+Rvn7uJ/M4v6Xj6/UZdhJRINH9iAzbEYqCuY5QC96DgjnbgAQkIAEJSOBPBBTM/YlFS78pmGuJjp91BAIK5jpCLXgPEpCABCQgAQlIoGMRUDDXRH0omGsCShv+qSSAWb9+Q9qwIX4aLfIRqYoFRqJRsbhVNOpFG96+l+qEBIiE9vKi19L5v7wk/Sgi55TacC1RIATYefyYdNyRB6dzP/kXOYVZLc/f+FwIKR6MCI9/vH5W+sVvfp+jEfEuVlIQpxEd6YB99khHHHxA2j2iEiGUQ0jHO0zEt3LvcIkj10aUyH099uQz6abb7kp33vNgejgECkSka9xHtHSPpetxT4g+SC+4U0TrK1KqEcwRFfCTZ52SvvrZjxa5RF2Psa+sK15PXkcCnVYwF4LiYdGHfO3z56SzTz2uKsIf9KExjloXUXyXvb0i3XDrnenXkTr0mYhGtui1xYXOSySw4UMHpeNDbPzXkZoVMV+lBQH0ywtfSxddekX65x9cUBc7SeTViWEniex5bqShJkVrvQpiwwcimtt1kYb9F7/9Q1oSosT1FdpJxGlsBJm2+9R01KEzsp0cG5HlGiKuYifLj3U/qOOIKPju+rCTUcdPz1uQ7eQddz+Q7n3osQY7WcG9cd09d90l28kDp++Vpk7aqRDGigVzMa4ZNmiHdMZJx6T/8dXPtptgrvRwHclOKpgr1Yr/ticBBXPtSd9rS0ACEpBARyOgYK5YjSiYK8bJo9qPgIK59mPvlSUgAQlIQAISkEBHJaBgromaUTDXBBT/JAEJNEuA9Gd33PNAuvyqG9JVITKrRyG15nYRLefoww5M/yMECwgBEKXVurDg//Iri9KFl1yZbgxh2v0PP55TIhe9DmnjEIrtPGFM2n/aHmnSTmNzGmXSKffp3avoaf7sOEQBRL5DuLbgxYii8/iT6YG4t7vuezgi6SzKgoA/+1Izfxg1fGiaMml8RJk7LR0yY98PRAnNHJ7/XI1gbtjgQSGYOzl99XMfa+nUfiYBCbRAoLMK5kiNSV/6l19AMHd8C4SKfYTweN26d9L8SHX/cPSfv40U4neG3SKtJ8LjlgoCLqKcEeX0//vv56YhVdifVyNNOXbyD9fcnC694tqWLlf1Zwi9iMY6c7+9s7Bv7I4j6xKNFZZEP77wkitCMHd72MnHKkpzizBr6OCBafyY0Wn6tN3TlIkT4vdREfV0+5wStVoA2EnE5TlFa9hJIgoifr8rUqojwOK+OaZIoe1N3nlcOufMk7JQf6v3xXstfbc1grlvfg3BXMeI7tbSM7bVZwrm2oq012mJgIK5luj4mQQkIAEJdDYCCuaK1biCuWKcPKr9CCiYaz/2XlkCEpCABCQgAQl0VAIK5pqoGQVzDam8iNrE4ho/RHbbItL6FYkK1QTSTvMnFiIzt4iO9178j1SIMGMRtyOXfN9Rzzk6CmupW6TE4mgpuklHvvf2vjfYkSr0okuvjBRk94WQ66mytxR4c3S1HhFdBcarIlJNOcECJ2WR/eAD9knfjEgsE8aOTgMj3WmtCyn8iOL2D9//cbr7/ocT0fOKLrB379Ytpzjda7fJ6aD9p6UTjz4032OXEPvVurwQKWIfDe4IFu6+/5Gc3m/9u+/GW1e+dI8UeL179QzhxxfTaScenXqHEARBYktFwVxLdOr/WUMf9b5Nem9Dfhca+teGPrb+d9D+V9gQEaQ2xLNvCPtCX1CyLR0pKtLGlLCH7zWyLQ33/aexxMbHN/XfCuZqI5grsWUBA5Hcv/zwZ+myK6/L0VGLpmY97MD90v/8+hezCJrIaEUL7++zz72QLgw7Oev2u9N9EfWsSKG99AhhOP0zdpJ7L1f4zt5hg4hYtltENq1HlDlSyS548eWwk+enm2ffnd6O1KNFo8sRra9nzx4JOzkj0tyeeNShOfpqly4t26Byz93U569E5Nunnn0u28lb77w328l3QjRZxE4S/Y6U6f89UqUSKRXBJClLWyoK5lqiU9lnm4tg7gPbje0KWxCzyfftV2V2oDJ6m8fRsGvgBztmlalhPs68Mvo5+jp+6lnaQjDX4GtoGN/wkAi0N6X555/XE/P/hrl/w/yfeqpnLdXn3Bs/F1ehvW0KY8/GREq+LN4h/BulummL96fxfVT6e373ow/g/t/j3qNs8b5PqfT+V3rOzn5847aQ/XT4uhr5N+vF50P9eNTpptfHlXzDDXac58l9QSN29bZFtaybD2xOvFsU3quGTSFEdS5/pU1FMNe43eE/CEv0of673nWmYO7P21JDnbzvz/nQmLhhzFDvOvnzO6rtX6JraDRubVgP4Qo8Fz+17G83bt+lc1cyftwUBHM8Z+N2w+/VPGtta7q2Z/vg+RjvvP+8pTaTxz157Nmx19NqS8SzSUACEpCABCTQngQUzDVBX8FcyuId0meVfljg69GjW0Tq6JG6xSKaUSCaaDjxJyJgrYmF6OUrVwbDDXmht2cw2yYWHTtqQdDwbkQkWbV6dUIsxaIvTqNeISjq0b176hbiogK+o476eHW9LyZ070Zkntl33R8L5z9KTzwzP0dAK3dR+LI4PjmiyvTu3TM9FGnd3nzr7XJfy5/vOnnndHqIvGZO3zunbyv0pQoOevDRuem2u+7LAsCnn12Q3ikgUCidfuKEsWnv3Senw2dOf1+oMDAhTquH8weBxxtL30xzIsLczbPn5PSxby17u5BoAUcKC/+f/uip6SPHHBpRdManbcukGFQwV6rltv+X9wxBackeIZxBzLFN9E8IOrqGfWLxYXMuLPStXbc2rVq1JtsX2i/9M8KbciKW9uJCva2NNKDU14pVq0LstD73B9RZz222yXVWpG9QMFdbwVy2W1EX18+6I113y+3p6htuTQtffb1QM9l3z13TZz9+ehZ7jYvobUULdY9t+c73zk8PPPJ4em3xkrJfpZ/eeuut0qSwK0QoffCxJxKLVEUKgnLsJBFE99lzapGvVHQMovI7730wXRSRWInWx/PBtUiBG+lODws7OW2PKSHoGxjj6xDPF1mlK3KBRscwHl22fHm2k7PuuCfbSSL9FRHow59+9exTTwiWR8V4ZXzafrt+jc7+578qmPtzJtX+ZXMQzGWR94b12W6tjDkGtgCxDfNIbAA2bOsYC9ej7VfLvaN8r2HcsyHEzWvy/Ax2/K1rl64xF++W2TEOqvd8vJ6CObrM92IRn36K8d3qtWuzOGjb3r1zn7gpzD+pE8QXiNCZR1NPzP9p4/hMmP+zaWhTG6M21/7wB+XnCpvFPBY70ZELz9Ewd1gV/67J9YAIn2dgfsr7U2Qc2tbP2LD5MvrOaE+reDeifeGMYWMaY2jaVYPARw9NkbqhHfADR8Ypq1c3RFYu+TaxR7Rl7FOtC9cl8i8bLVavXZMjPfft0ydvQmCM2xHbX2MGcfu5j1u3rmE+Rz+3Nn5n7s27BDvmoTzLplCoj9J7xdwU/rxXbAphTEIbKFcnm4JgDruET5f2ju+Ato8d6kEfEs9Klop69yEK5j78RjSMiTd8MFbgXaKtNWxQamh/XeI96uh29cNP9eH/YvxDX1Gyu+/E/DhGejEO6pJtbvd4x+g3GD+Ue88+fOYP/1fuV9kAGDZyZfStq6J98x5vE2MT3mX6oyIcO7pgjufkXabPzePkeN51YU+qedYPE+xY/0UGhDUxB8BOro1/aTeMnXk36Kt4XtpNkTrtWE/m3UhAAhKQgAQksCkSUDDXRK11JsHcWxEVY1mITJaGWAeHHINxFrhxWq8I8VRe4IhJNo5fBqs46Hr37JlTK/bdtnekjto2EWUEx2Nr1/oQv7wUqS2XRUQrHBnlSq8QCvSNlGUjhw0pK3bhXAzEl7z5Vnpm/vN5wlHu/HzO4uDokUPzM2+c/hJebwejN99alhDqwLJhchhCwxjs46RoiGTVK/NiAscPjqV+ffuk7fv3S6SvbMuBP5NYJiEwXhosqHcio6xkshkOldJ9c09MNrNTNuoWMVE/6jtSgpJWk88aT3KZ4Ly1bHlaFIvui2IxtkhhgQyB1fChgz50riLf7SjHsEi+eMnSLDz4hxACEM0F0WS5As+B22+XDo1oPfD8ze+vTi9GxDSckuXKiKGD0167T0lnnnRMOvbwmTVjlx1rIUq6JFLl4TyYE1HbiooTmMDSNhAnHBMpYxEDDI/7rHfBAf3yolfTXfc+lH59+VXpyRAsUgdFCm0c/kceckBE+Dkkpz5s6XsK5lqiU7vPaIe8Q/Qn9K2kFkTIu2IFi13hEIu+ih8cKDh9sUn0R/xgD+hbSWWJM5j+tXE/RbRERAj012vCzpUrveLc24V9GzxwQLZzzR2P+IR7fv7FhTnqVHPHNf479zV6xNAc/ap0r6XPEeHyvMuib872Jf7l3nGsrliJkH1VdjQiGOwTfQnPnm1L9NX8vn3/7SKCIoK0Dz9/6fz1+BeH3nLuOcQ52Bbqj3vGJmIr+RdOLICX7CD3imCYVNO53uJfIonhwG9cFMzVVjAHW+qLsRBRUf/9/Avz742ZN/c7gu0z3hdsE72tSGGBADtJOtZ/+N6PIuLZgkJ2kiigA2KcNHP6tDRi+JD0m99dnZ6e91whOzkoUptiJ0897oiIInrUh/qBIvfc3DENdnJDCM9uSb+LFOzYyZcXvtrc4R/6O0KDfpEu9oBpe+QUp3vuOintOHJ4ze7tQxdr9B/c88sLX8vp1S/+3VVZ4Eca+aLlwEhve8TB+6cTwk6OHT2ixa/RN11yxXVZhDl7zv25D2vpCwgFhw3aIZ0R45nOmJIVMRRi1cWRap6+vvEYcNFri9MTT89Ld9x9f9RZgcjFYVMYT37u42ekfffatSXs0eZSjEH75wjAA8Ne0C+3tmQhQsx9sFt5bhn/loQe2GyelYUtIgVgv7PoJmwYcytsAffO2JifPmEXWIzvLEI65t3Mt5eF3SzxKy2MNQjmGvjRPrrGImNpwZGxNxwzvz7B732GjIs2Hv9UW7+1Esxhc5gX41t4M8ZgDXPn5Xl8x9yT51+zZl2Ootu7V8M8kzEY7YPnZJzE2G5AzJ26hDhj43FCtc9XyfcY51Evb7+94v02/naDCCbEN43riSjAeYwa7ZvxTsl/0ieea9uoJ8aq2Dc+2yLGO7WX6FTyVCn7fQq3v65bx2aVHlnQmOsl6mjjd5eFzbb0bfC0LLIytqdtMWfgnSr5tFbEJsaVMXamH9oyBCN5oXmbWFQPfwz3Tn3g0+rfr1/uexrPGyojWfnRvBfca77/6DN5//lpeB+iXcU9c9+ISrNgrivznmhXwR1xD+9+gz+uT9o2noN5QT2i1Vb+ZJV/g/eLZ39m/gv53yJnoE8YFXMp7AbvEwWmjHtKPjo2JHLet5dHO2AeGe8w9mp9iLlLooNSP8qcpH+/vtmngV+jUvEK1+bc9G9Llr6VN/ZlQUeev63K9chzcr/0cz3f7x94l/hv+gf8qtxXW7bDEuvS5jTeIeZxmduKFfm+sx2PtkhftzZ8qlnIkOd0tMUGETfvUx/6uGzHox+Pfg473h7P0tAO3ot6jzFJPAs+YPrut+N5SvNS/J+Mh7rHeAP/LHYG/zL1Q/0zN+WZtt4KcfCfeuqOJJjLbS7G6aVyAABAAElEQVSeY/n74y/qjJ/cb4R/ttTeS4I52laDfxfbFH6EaPMIOOkHaX+Myxk/1KJ0ZsFcaSNAaTxMneRxXfQPq6M/yO9TvEu8GwhO6b96xHiuQdDYYFd5j3JfEW2QtZha2VXWRLCV+AF414uU7WL+OnrEsNxesD0U+gv6Amwv/R39HmsM2LNSX5s3lMWxiJ+YazAemhIbsEbH/BcbXERoW+pXWbPAL0ifjo+J6+Abgyu8OT/3RvvGNvYKX1h+j2O9ineZa2/ctjuSYI73hWfi2ZZFX0WboX5Yl2sYCzQ8Z4Ng7v1njf6KZ6b95H6L8Uw8K/0Z/XKtSsnX+dwLL6cXXlpY6LSM00ePHJbwyVAnpQ0+jM3ol2gzpfrkuUvthvb57vp3c7/LeL97965p2JBBeYM7/VTJ1he6CQ+SgAQkIAEJSEACVRBQMNcEtM4imGPyMTcWYx5/4pn0UETIeOmVRREd660YvL6ZJz5rYjDL4DingYjJHI4CBrrbhyNn1IjhaZedx6Vdd9kpUl9NyYPYxo6EJrCW/dM9DzwS6cGuz4upRUQvI0cMiQnXhHTaCUelSTuNbfH8PCuTq/sjsskPL7g4vVhwoD9tzynprFOOz4uEDPYbF8R9pBh76LEn06NPPJ3mPvVsvsaqNaszM7RPTIJ7hmOzT+yWHzpkhzQ8Bvsjhg6J6Fs7p332mBoDfqIjtZziqvE1W/s7k0kW5liEe+CRuXnhlMkyk+Z14bBlR3Op4OBi4smEc+L4MWnypAk5athOY3dMo2LxunEKSxxHRFy57ubbc3qy0jla+pddjqRMYxGW39vDmdbS/RX5DKfhQ48+ma65aXa64NeXhVCEBc8/MWzuHGNi0XmPEBscd+TB2Sn6d//8g/R4tB+EBeUKE2BEBF/7/MdjYfT0zK0W7HDgMhknAtBFl16RJ+r8rUgh5d1uIaQ45fgj00nHHJbbdGlSXOT71R4Da0Sgz73wUrol0vxddf2sdONtdxU6HcxGDR+aDthnj/T1SDm307gdW/yegrkW8dTsQ0SQ9EePR39C30o/9dzzL6aFr72eozixEJnfsag/+iich9vGQhd9K5ECd528U5q6y85pZPRRvCuNF1UR6vzkokvTY2HzFr+xtOw9c46GSFD7ReTEKc0eT7/KPV8cos2LLv1js8c1/oD7PuvU47IQBdE3jqRSwYn63PMvZZvycDCgb0Doh0OQd5Lnx23PO8biDgK5YYMHhkh1SAhwhoVYYrdss3Ao1cq5Wrq3pv5t/B6WbAuMn32uwRFLnTbYFu67oc64d2zimFHD025RXwix+MHe4MxvXBTM1V4wB18csvc8+Gj61t/9ax7DNGbe3O+MtY4Pu3XEQfvndtbccY3/jrMc0cVN0Tdf8KvL00shMMvvcOODmvh9xLDBOYrqMYfNTPz+f/75hxEp7aFCdhLnMXby0x89LacULRIxoolb+LM/0Zax++f9+KL0Xz//TRaIsthdpLAIuvvkiTlV+V+ccmyOFFVkoaDIucsdw1geYd8tEWWOiIJ/DFtZtCDS3yNE8KRmpT9sqSiYa4nOn3+GKO7aGD/eFe2asWTj92LtOw0LCqVFoT//9p//hX51SEQsZIG3pYJo7cBIBzxj3z3T/iHgZANPawvPwiIK9or5ELaAvzEuRrxAOrCG58MKNMyPEHZgp9lgQVRI5pVspJkwdlRe5GKBrTMUNowhYp371LzMjrn5ywtjTh6LSby78GuYWjBHaBAfMLZhUxYCsh1Hjchj2J3H75jnwywsbTz+qZZjrQRzecNW+BXmL3ghPTz3qcTY5ol43gUvvZIXBPm81P4Z27Foz9yZzUXMMRDFTA2fw/Rpu2cxBgLkti6M86inJ5+e/349PZvb/BshxHgnxmYb1xPPgSAO4d/AAf1zG6d9087HjxmVN+owl+a49iyVtD/eXuZP9DWMWwcPGpjf10kTxuW2Nz7e40HxrDxXLeamRbnMbzRmnhdzBuZs+LMQyWCjiZb+Qf+T73/LvPCPPwsfB/4kIqTTD20RaSbbqkq4p9cWv5EWPP9y9snx7hOtfmFs/qK9NbwXpXcD9vSdDVHq+4eAYWTMY3fZaXyaGj6aXSaOS0NCgI5wdlMsiC6wGz8IPx3vWJEyedL4dHb46SZOGPPBxjeEuSy4PzN/QfZzITjnfPgKmEf9qa9pmJM0pHvbMn+fOQnjHDaETGVOEvYJoUrRwoan115/I/q3p2J8/Ui6N8bY2EGeLc+F6MvjZLn/DjEDIk36uB0G9k/jdhyV7d9eu+2S+4uNhR1F76E1xyGqoo+DXckeMZdD2MDctMGWxxNgkKIx8o7zk8Ua0e54f3aOuthlp3FpQvhV2GTR1Eao1txj0e9yr+vWvZuefHZ+zPufDn/CE3lj5bzw3TKXpq5KNof3ivkyftmhg3dIO8e949diXspz0Nc1Ho90FMEc988PfvP54Seg/+CHDaRE8kaEn8de4V/kfw3jB9JSb5l9vMxLxsT4YZfwn9CH7Bx94dDo0xHk1qJ0ZsEc/BkTMx7OP1EvbJDhXWp4j0r9eq6V/B7xrtDWGMNNGBdj4niPGDOMjzExm/hrtRECYdsjc5+ONZFfp3nPvVioqnefOjH8VsenneIdZ0xGYT7MGP+R6GPve/ix3O/Nj36W89Pf5feLvoKS+4qURVBf+OSZeS6MH7ax/6vhwA//f84BL6Kj064fjKws9K/PRL+EL+9PY6+GPon5/pZbbpX7VTbll95jfE0DYqPQxtfrKII5nhP7tCD4PT2vof9lLDAvxsysYRGcgHWCEtNS31vyg2JHGAsgRmSsjA9zUGz4rVWhrrmPX/72inTJH64tdFrWkfB1Hjpj33w/JaEl4zLszH1hH6nPh6L9sNmWzQ2sO+b/NVQnI87cdqbvvXv6YrQb+qgh0UdZJCABCUhAAhKQQD0JKJhrgu7mLJhrEEwtzU4jxAOIpXAgERmMHVCEbGenTt552ciRUMKUnSIxkWOXDimyhg0OkUI46KZO2ilPSNiVWe0k+9Y7700/v/h3sXD7TKGdK2N3HJGdWp8665SyqSmZXDBBJcLJ/4sIJ/MXvFR6pBb/nTl9r/SFTzQMzpl0sBjIAB9x2DMxmUHEwOSXSRwOT/hunMKSBSEc+yxgsfOHncCI5hBN7R4OOZx8DPxLk4gWb6iKD0uOw6eefS5PNJm0P//iKxGZ67XsxMOhXKrvjU+PQ5xdbwMHbJd3B+GIRZCBuKjk8Efc8HpMWO9/+PGIuHJj4YVYJv3f/fZ/j3Rfx2XHO21rUyvs/Lo8osxcc+PsNOuOu/POqCLPwKTv5GMPz1FAqJ9/+o+fJgSjOFfyRLiFk+BMYZEMsdynzz413sOY/IfIpLWF9jsv3gucJ1ded3OIk0LkUkbAh8O3d7Rr2jFRfKaFCJSFh7YuOKIXRJu+8JIrYiL/hybfw6buiWgPe4Zj+tt/dW52im4s1Gn8HQVzjWnU/ndEAUShYhH1sSejj4pFVETcOIFxxrHDkndj4/eDfgOHDH3roLBJQwYNCHHNkFh4GJmmhF3aMRY/ENNREHH9yw8viL5qbu77yj0Fjv79p+2eTjj60CwsaO54hKY4f370i99mQXZzxzX+OyJhbAspgceOHpkXFIiygtiABSPsywsRdXLhq4sj0uMbcf5l2cGP07BUeHYE1zj/sC/sEGcHMPeNQ4n3kkXmWjrMStfmXxbyWdDDnlBvCMgZT5RsIraFnceN77n0fe6dnezb9e2b6wznMKKoKRN3yostiAiJwEJRMFcfwRzObNKcfvPv/iU9EO8EzthyZfyY0enwg/ZLxx1xcE4LXu54PmcnMWODq26YlWbdfk9+V4p8b/cpk9LJxx2e30F2FP/jeT8JO3tPtpO0vZYKYxf6c8YXX/7MR7OdLCciaul8pc8QsMyPxfgLLrosXRwR7xjvNdW+S8fzL22dazPWw05i/xkzt3WhD6VfxdGN2I9xHwsN5QoiVsZ8f/uNL4XAfM/c3/JMTRUFc01Raf5vL73yamwUui6EjHenuyNaYeMxF4uctHMEp+Xae+kK1AvzDcaJLRUWlIiui/D1sAP3i8X6D28Iaum7pc9K/T/PgM3ivcAWMI5lMZl/c2SEWGTZ2G6XzsFCD3MM7DcCV0RFLMRhD7DhvDPDhwzOorDm2lzpXJvavyw8LYqNAE+EkAPbueDFl7PQIvN7Ixa6mZO/L1Bo6tngURItbRdzb/gxH0DANDoWIXlnmWcSEaRoaqqmrtNawRx1jGBkQUSlQCjHsz4fz4rfgWdlDIG4ZeM2UprrIPxjLIDfASED4yU2nSFwoq2UxglN3Xst/kY9cJ+08YYxzsvp1Xie16OO+Dvzj1I9bfwMpeszTkN4gzCVNs5YlTZOBPuJ48emUTGvHhbPBqu2KrVof7y78Oe5aHtsLOS5GHfzXCzY4t+ox7tbivBOdHYWl/lBCLMwi3TfygvP1As+LQRSG9vphr6ySxbmsog+OO4dvwbtCrEU943tq8e9x1QmxLBLs4j9iafmN/SdYZvzux9tirbFnAgb3Vyboq0gUmJshD9u8A4x/4mNM+NGj4p3f3SIF8fl6M0ITzeVwoI5G6Xw0yGIKFIQlzGX2jXEEMNiEw6+jOdCfJj7mvBx0q/S/2CTiGTzgYijiZMzTsOPST9DVGP6TjZLISQiShG+l+YK/Rj9Aov/9Olc94WoUzYqYAfxD9IG35eO5HZF/00d0scRCYkNyfRp9HGTQ/TAGBgRRC3Grs3dN39HpNEgVpyfozgTQYi5d8mOM5cjEht+oebaY0P0qG7ZDiHipk0OzRu5BqdJIfgZG/ac+mFcXo93qvHzIZClPhDXIApi0x1j34WLXs/+Sp6Humi8SZjvc1/YSlKCD9i+X9TFwDwvJRL0lGgDCBrp03jW9hbM0f/RhxDZHtuEyOaF8E/nPiT6D/oQojXR7porPCu+Z0RzDX0IY68d8nNmP2/0IbTLlnxjzZ279PfOJJijTdHueOfpAxAw5nEdfXr8UDfLiW4Y71Jz7xHi+S4xXqANfmBXo0/Aj8V4jneJDSbY29a8R6yJIOalr+Vei5T99t4t+tozsiiLccyC51/K4m78P/h+XgyfHeM6+lls18Y2t3QN+r2//MI56fSYD/NuYWebK4y/WGPBLrCOwTzjlYiYzibaJRHkgTknfvSNC2zwjTG3wJbnPj18TPjGsPG8xwNisymlvQVz2Af8n9QD45gXYh71SnDkHabd4F+EJ+9Sc4XIeYwxmcsxDmM8QAS/8dFnMRbAv4YYGN9jtSVHhIt7+Y+f/Co2H19W6DTcF23m+PAZMS5cHlFe57/wYvYZIshm/IatYdyGII+5QHOFTV60GzZHM860SEACEpCABCQggXoSUDDXBN3NUTDHxIwJGgNvdp6R4ufm2XfndIaIEaotiMEYtBIt7ZRIfcVuKERVDQs2W1V02ptnz0nnh+AAJxeLLuXK+NidvfceU9IXzjmzbNQLnCJLYrfxbXfdl/72H8/LTu9y5+fzQw7YJ33lsx/LO8FJpcXAnkUt0nE9+cxzeVJc5DwbHwMfJnLHHXlQOmzm9DRt96l5dyl/r1WhzpmskoKCSdf1s+7Ii83cPzu/mpusl7s+k3QcKUcdMiOntBwcAhXSBz7w6Nx0aaTi+v3VN5Y7Rf4cR833/v5v0jlnfqRmO+YKXbhGB8EW58f3fvSLHCEEp0hLEz0uywQeJ/dHQoDz1c9+NBZJhufdVD+NqDtESHsoHK1F64XIimeefGwWqpZ2+rXm0XC4zQrR6uV/vD7dec+DhU5Fe8WBc+TBB4Qo4ezswMER0taFumCnMNGLmMjjMMcBUaSwQPK/v/HltM+eu7aYdlPBXBGalR/DojuOIJzZRJW78tpb0uywTzjJyr1PTV2Nd4x0FmNCUH14iAFIf0jUCESlT0Wf/Q/fPz+chI+FM6p86l5szIx994rIiUekg/af1tTl8t+wrdiX8356URZ3NXtgow9wWn3lMx8Lu3l4iNqGZUcRzu6rERWFKIhoGSyYVVp4fhZg2OVPBEuiB+0xdVJVNrmla69bty6/Y8/EAiX9xe+vvik7hlmIqKYwlsApT5rkQ2bsk/aLKHkIH7GTRPPi3Sb9MgsALRWccwdGnSEGoY+sReSklq5XyWc4lLGRN956Z7r97gfKfpVUUzh5cRCeHbu661FYTPrW//3XHGlueYH2Rn8/c/reIfQ8LAttyt0TAiAc2tjJ38XY4PlYiON9aamU7CRCnr+MSKpEd2AR4ae/vjxdf8vteUGyaN9AdLqPn3Fi2MmJ2WHc0nWLfMYmCTZ3MM4hYl6RwrtOBAf6kC99+uzsNG6PyCGMLbCTv/3DNemfzvtpXoSgny1SELD+77/+ck67zuJac8IOBXNFaP7pGOYUCC/pE+hHN17A/dORtf2N+jv28JnpmMMOjCinB+RF+qJXiGaUxbXYJxaviBBwbUSXfmbe84XsarnrMDdgMwMREugD9txtcl4kZFEN4VFzba/ceTvK5yzmI+LBxrPQCDs2YdEWio7/m3sW+k5Stg6PxTFs//4hcJ0ethQhfa+ePfNiGcdUUqoWzP3fb2YbjhCbuQ0b1q6+4bY81is6Pt/4PhEqYOOPOGh6tN2ZkXZ7co6sSwSoSp9r43Nv/N/YmFWxgIfQb25s4rj25tk5UiobGRArtLYgghkdGxoODR/AfpFCeY+pu2TxGeM3nqXWz1O633q2P97NPiGgI8oc7+60PafmCMK8u9RbrdLPMmdYFsIE/FlsNrvptjm5L2JhvTWF8RaRlhiXM25GmEBKM1KC1aLwfsMfgRJiHrIeXH/zHTkCFvfe2vefcTRZANi8dkz07xNiDoOPpmsXRNRtPzevlBkiDqKK46fj3yKFZ/1qbIqYFHN5xiZEk8PHeVVE0mWDazXzKK7LfKRXRPDGt0lUZaIqIbCAceN3kzksdfpMRDtCLHf5H2/IG0jp44oK3Rs/J0JhNhPtu+duee6JUGtM+IoQ0TSOmN74O9X+Tj+GSGFhiDMQ91wX4+s7Ym5CH1durlXkmmyg7h9Cw0Nn7pfn0nvuOikLs7BFtYr63Pg+Su8X4hPEzfg9r7r+1g+ESo2PLfo7G/Lwex4+c/90YGyiZm66XTwT6U/xJ1xz021Z8IEQs6WCnw5/wglHHRwbgY5o6dAWP+MZ6f8QONKH4NOl3rBTPHdrC74S+hCyL9CHMPZHtNk1/H3ViG06i2AOYeLyiI7Fe8SmzOtivPBkCLwQz7W24FclbS7Rww6LdwmfFr8jZsQPW+mYGNEuPqs59z+c/va75+WNmkXukTaB34rNLAixmQvju2b9hvbI/LJIKSKYYzMa/SpzDKLX4WNCNMd6RjX9KtdE8JyzRoRvkDEXkfsQmV19463xLv84BH8Lc3CFcs+AiBo/1anhY0LAWE3hPYYXIjTsFFEwr7/ljnTXvQ/F3HxxWT9JkWviQ+P+mOfh40ZQ3zv6XuZR1RTGLbSbf/3Pn6f//PnFhU7B+OkrYZ/ZZMlmHiKX0m5ujjEb2Zn+FPm3/OkUzJVn5BESkIAEJCABCdSOgIK5JlhujoI5JvK33XVvXpAhZDYhwhH7lNvN0QSeD/0Jhwe700gJx6R633COTo8JVWnn94cOLvMfHVkwhwP7zWXL82ItzjgmwG/HLjEiC1VTmNyyYMrkhTQmLFwxASPdWWNHXDXnLn0HZ/+y5cvT7BAJXn3jbdl5RMQmdioVXWwunavxv0w4e8diFmHOJ4RT5bgjDso7mUizxoS2swjmcIYigPh///ajdHM4DIgO1dxuuhI/duP3CUfoWScfl/76K5/Ju+qoIyaPf7xuVrosxGrlzlE6FyJVxCU43ojC0dqCk/Siy66MlHcP5118Rc7Hzj3EREeHeHJGOBKZiNeq/Ra5fukYHA/8kG4uC67m3JdTWpY+b+lfBCCfP+f07MhECNrc/SuYa4li9Z+RCoo2h5PoznsfyDtHF8ffsFlF34WNr45jjAU6dloS8ZBoJEccdEB2siEUue+hjiOYOyqEXSwi4hBEDES0nldD8El/snG00o2fs7n/xraQqpVdtTOnT8viZmwL/12r8sQzkdY7nML0XYiuXon+f0U40xDSVVNKKTRYmMDJh1hiRiz2H3Hw/un8X16SfnHx7xXMVQO2he/gdMYB/Vd/+4/ZwV+k7hCRIvI/Puz+zBZEpKXLIqBirImdvPK6W/IiXDlnd14wDDHBCUcfkr71tc/l8SX3RlvDsX3ZldfnBefSNVr6F6HcwQdMy3aSBfDWFhZxsZN3zHmgcBpbFjRYdGXxCYcvi7rN2ZnW3l+57yPImhN97R+uvSmPDdn4UaSwW/1z55yRRRA7jRuTF42b+p6CuaaoNP+3TU0wxzgLu3zfQ4+HzX4wb5R5JqL4EFlneWycYU7Z2sK7gTCKaNxEPSEyDaIBouHR/2Db2+v9ac2zNbB7L6fmQjxGX0JECRbH3grhKgtntSjML5mzIhZnQxti3aNjwWzvEB7yN+YhlZRqBXP/+nffzO3h7liUZWH28RCdEemJRbeWImW0dG8Ip7eM8Q3RwGgXzD9J0Uqf1FL0p5bOufFn0cRzGycNIfMiRE2le2dRmPunLltbaONsCBgYz4JwbkKO3jo97bXblGjjPcpGiaz0+m3R/ngvEWYRdY53l4gipDZkHDc1IoCRmpbxeWvLPQ88GpsO7s9jefofNoPmaEoFouS2dG38WfQ7+LOIMkfKasaivEOtLWweoN0//PiTeVMMi8WIjBHK4U9qKRJU0WszjsY/Q+q+vJk1fHJscCCqTi021hW9j2qPq04wNyUvyNOvvRrt4PaY/5OpguidZM2odh6FOG3rEMeRkj776Y4IgW70ofgMeHdLhbaH4JmNTvzgC2MjMnVdTT+R7V9cF3EeUYEQtLN5d9yOpCnvU7psq//Fjr++eGkIme/Pm8iwRzBbHIIIBN21EAXzriNmJhpVTrse9vuAeKeYQ+RUt+FPrGXBt8n7dXeIaG+NukA0hxhwTQhvqrY50Q6wLfgTxkXkP/oDfG9EASQ7xHW3zG4zwRyRtHgO/CWz59yf08wivGcMsXLl6hzNu7U8G/rvXg3pLGP8MCPmLNhYfClEsq20cL/3hs+FTSG/+M0fMqsi5yD62L/9/TdDPNon+8uLfKc9jmmwqxuyWDaPieM9IkoYY523w09c7TpB42cpjYnxH+cxcfRBRNnCrtK3M46oRDRXrWBufwRzsekLX89z0e54z3hW+kBSHBft7+gXykWYY72Cd/m2O+/L12Hdheh9iOiKXmdjhvTpRAodFPNZorwjtobh/TGf4V1uS8EcdmlRZJAgujjXZzzAPIoo9mwyKecnafxszf1ORDnazPDIBjUxfIAHh8+GFNP4uqsp1QrmiPLPGleu0/A53RObrPALsWGvEj+vgrlqas3vSEACEpCABCRQLQEFc02Q29wEcwxImbj99vfXxIT1rryLpRY7Bxujw4nNBAih3L577xoCsIPyoJydxkWdox1RMLdPOBs/euoJsXNqeYSQfinSid2dowA0Ff67MY9KfmfnHlEAjoxJ27Hh/G+tE4mJJE6jReFAQYTBrngEc+yyrYVDtvRsOAyJWnDUoTNyyG8m6wiWbogdnUUKTplNOcIcEeVwMJ53/oV5Ml/kmRGYjQ+n5xknH5PO/eRf5EU/3kV2hl4aabn+/ccXZsEJ6WPKFZyPTHzZWb1v7JTjPatmEbHk7GE39r/88Gf5XtjBV66wIMc9fDmcJzgPcc6z87o9C45DopTNue+hHMWiyL0MjFQ8LChMjr6LVJbNMVQwV4Rm8WNod9glIhvS9lhQpb+qxhHW0lURYJGW4KSIiMX7d+kV1+aoFFy7XGFxvl4R5ugvPxpRw4iew7088PDjebEF53KtogzRlhEa8AwsLJOitbUpGeivFkcK7lvvvCfdFLuKWbjEkVnLeiMSCYtUpH/8yLGHxU7t27OYkPS0CHJaKkaYa4nOhz9jN/x9EW3xW3//b/ndK+K4ZEc2wi+EnqQWLVdYPCQN/L+HnWQnepF2gvOfd48oduxMZic7DmTSbJHWlXOR8qWIQxmRKM7hr0RE10Nn7JcX85vr41t6lpKIgnaPnaTfKtKHsLu6fyxef+nTZ+V3EDvZmvRGLd1j0c8QaRGZB+EcUVGKFPpO7OSuIUDETjZeMG78fQVzjWmU/31TEczx3jLvQdBABHDe5Vmx0PNERDjBHtSzIPJCsEKUBMa69EEIcmoZkbue98+56auWxRwMdojlmCchViUVUZE+sdr7o/8hZePRhx4YAue90+7x/rLwX0ka02oEc4iTv/7FT+axwQ2xUE5/iXChVoU+HOEkfRLii+PCJmUxYIiTW1OoJxaDSYnFYjCbGB4P+8Umw3rW07YhEEdUclTUE4uatHF8AzkqWzxra0t7tT8EPmQd4N1lHMrmLlJe8lzVlLxgGxtqrohI1KRRIyoLc7NaFtoW8+kxIfabFlkMeHcQ+RD1smuIf6opbAB6c9myiD70Ut68SlRJNtwhNKhH4RnYOMP4f3pEmpwZG9oQCDDfrfYZ6nGfG5+zGsEc6eZOj0g/bD586tkFWWRBv1qrwlytf/i6Dj1weo5seUSMfWkLvJZsQEVMS39eEkHTTzBebG2hDrk2UZ0OmbFvFnYgmqNP57NqC+N8xHBEdX8shIX0cYiCsUdF5gDVXpf7po8j8invFH0Bm6MQqVYTuWzj+yBzxsuRqpGocrfF5hr6bza21rLfxpbi96I/w8YRMZwIbwgNywkzWxNhjmfg/AgcibiFL/+W2+fkPoTU2vUotD36QfpA2uCB++2d51L0IZXU1+YqmOMdX79hfXozxLG8S2zoYh2DqH+IgWrZ7jauXzZdjRo+NNrhzDwP513qE3O0opsGqhXMYUPYJM28hY2S8xa8kMXBG99fuf+mXTUnmGP94vWIlIhPjH51TrxfjB9ryZP+k/cY8Su+wZdjo+ol4RvEbuBbKFdaE2GOsRjiQsYCjK2vuWl29r8wHqjlMzZ+BsYCRHdD+EoGBkSw1aT6rkYwlzcMRpshvXlOqzsv0upGFMZqioK5aqj5HQlIQAISkIAEqiWgYK4JcpubYA4BCem/SBNAdDkmI/VwijABYWCM0/qTZ52cF1TZ9Y2wpkjpiII5HKb7RhjrZ+a/kBcWmSy8ExOdWhYmMqQtIALAuZ86K42OSfCAcJRXWxoWtd7MaUp+/MtLs7OfHczUeS0nY9Q3k96+MUknjQmL0oTaZkJUpGzqgrnZsYsZp/dV18+K536+yCNnURnpS3G2Ud8UxHFrQwzwm0iT9u0IjV9U2Mi7xu5j0qSRKqJnRCTAwVVpoV2wY+/Xv7sq/c3ffy9HtmIyX65w7V0jYgA7x3hHqM/WOHHLXa/I57yba6N/Y0dnOedl6Xy0Yfoo0m60FHlDwVyJWG3+pd0h5CWVCELkl8NRRTusdaF/pW5ZVCFyxMJFr2dnZpHUJXUVzEX/uW+kdqGvZ2cpC2fYl1r20bBkYXK7vn3TuZ/+iyzYGTGUBYrqFis5H44uREM4+Uhxjgi76LvG94sU+hEEOYiLiIxHxAN24i5esqRsmgoFc0UIp9zOEDreEWkgv/vvP85O/iLfLDnMD46IF6Q2KVfYSYzgEaHbo+FgL1KIrMGub+wkmy9Y4KW/IErENdFnfPu7/x6pH18vtHOf77KjnLTbJx97eLaT9PmVFqLDrI0od9j7v/l/38s7wfnvcgXBHhF2sJOHHLhvbtftbidj4Y2IfatXr82O+3LPwOeMLbCTiP02TknW+PsK5hrTKP/7piKY4/0jPSXpzC++/Ko8zp8fi2WMt1gAqmfhfcVmDRo4IEdo/NjpJ0aUn+E5Ak89r1urc2PTibyHIOHXwY6Iayyqwo7F5HqWD+ZpMfaZEhFJzjrl+BzpYfzYUYUvW41gbmhET0eow3iL79NX1iJiUeObJlpI7xCasYj2tc99LKehR5zcmkI9LQixHPV0e0TwYaGWv5VLt9eaa/Jd+lfGqogZiRxDGyea1ZBBA6qa1zW+n/Zsf9QR6UwHxXPtuevk9LEzTqgqA0HpeRCcEsXnD9fclG687a669T95DBobwHqEvTsrNrYgEtg5/FlsEqy0wJ95PeP8C397ZRYnPReil3Uhoqtn38kz4CcgwsxREQX+8BCWHhTRdhEsdtRSjWCOaHr4nxCvEcGmNdHEmuNS2iBKtLevf/ETeW5CND/EUoiXLo/sAK/FRsNabkrlXqIKQxjcM0czOzc2XSB2oG+tZgxbejb6Yuaa9HHM4+jjloYQvsh4tnSOav6lPdLHEaWMcTF9HON8fm/NnLR0L/eHwAYhP1GoEQKStpRxSy1LKeogvgGiUL4S9g3/KnPTcvatNYK5kuCZyHIXXnJFbEKal9s6Pq9abuDemBV1xpx6cIy96ENIrXtQCLorqa/NVTDHnJCxAWmY8Z8+GO0PXzBrLEVTk27Mu+h/8/7Ttw/aYUAIUHfP7xJtkg2iRUq1gjnWd4iy+Hpsknnl1dfyekiRDd4b3xP335xgjn4c8eENkZ6Ud/nt5Svr0jfRp/cLe75L+Jh4BjZkslmcIBPlSmsEc0TvfCtErr+85Mp0bfS/CGCJMF3rtaXGz4C8msj9bD7k3nmXeY/xr1VSqhHMIa4dN3pUrHN1zxtPEDEXWWNo6r4UzDVFxb9JQAISkIAEJFAvAgrmmiC7uQjmcBYsiZ247O7+/VU3hHCqtju8m0CX/8SCGrtYDj9oek6DNSjSchSJPNURBXOIwXDmsNup1ruYG/PDKTF54viGnfIRCWja7lPDSV75DlImfSsiagx1flM4k2+efXeeANZDiFK6fya+RCvAIc5Ox9cinWCRsqkK5thViAMcZ+NFl+K4ejY7D8o9M3VM6HzSfxI5iQgClIbzbYhdvnek/7jgV+mpWFRDyFCucD7ENkSqOzqi/JEeF2FQpQVxGWIlIlD+43k/yZP2IhGuEMnhPEOEwHNt7kXBXO1qGKcUi5KkVmSX9IIXXikbOaw1V+ddKUW1IFLVqmjzRdKf4YCsV4Q57gnbgr1cGLvTa5G2ozlGXcOxeli8q4h1jz/i4FiU7V+xuBUnMBFi7orojb+L8QQLEuyqrmch0hg7qbk2C1FZsF5GYKBgrliNsJBEhEDEbCw+F43IQQqTT519Sk5lstPY0c1erGQniQJzwa8uy8L9IhGGeC8QY38+0n8SVWOXELqWFge5ZwR+P/jpr3IUERbPyxXOh9CLzQgnHn1o4p57RmSiSgsCM+wkqdO/e96PY6yzLBbjWHZouRDVkec4Kewkv2/uRcFcZTW8KQjmeO9IhUR6ylvvujcE7vdkW1BkYakyGs0fjaCI8e3YiDSH0Bx7Rnooooy1tNGh+TO2zSewQxhH+sjZITBnQZ+xz9JYFGzLwqIVqbz2if6bqOD0hSx6I2YqV6oRzNHHDonFXFKSvRaCgnoVBAyIZY4/8uB0WPgcikQ9bepe6MupqzvvfSCnuqOe2CiHD4X5XluVbjEeJPLHftHGETcdGWmI+0aUNqIwVVM6QvvDBvPuDouNdby72EPeX97dotFW2ZRBXSBQIho7G/OqjVJSKUcWmFlcPjnSqiN+b0kwvvG5EU4wti+l6yTyFVF3EdC1VWH8NCHmMjzHcfGeIMgcOXxIW12+outUI5hjzE/kL+Z09eJKG0ZwsGtEJKIdMJZDPHJJbHS8JewhYshai+VK4LB928Wmj8OjLzjqUDZczswpGEufF/2Xfgxx1dyIiHd7pGHlvhH7wJzxbVsVbA792X4RLZZIR6RaHzViaO4PqrkHBBjMLZjL/PH6WxpSRNY56i3trU/vnlHnsfEkRFPMTelrWyrVCuYQ4iEiYmPhrOg/6EPY8FfrTDEt3Tv1NSHmToizsLXjoj/Bf1GkbI6COeqadkcwgttiTMy7hN+WlO1tVeiTsJ9E7iLyMv7YmfE+MfZCTNdSqVYwx/X6xCYFRF+taX/NCeYWxXtM9HN8TESWI91rPUXlbKjDNwhLhNbYEHyE5Uo1gjnGmLwLpZTsiAIfmftUtln13pDR+HlKAQ6Oj7Um+t+Rw4cWtifVCOZgSwRl6pwo263ZJKRgrnFN+rsEJCABCUhAAvUmoGCuCcKbi2COCTURPRDCXHTZlXlxrxLHL4NcUq3irEFA815MEIsIaRB6sSMS0dw3vvzpLKbpF86FcqUjCubK3XMtP0d0NiAccH/9lc+kMz5yTI6qAvtKCpMZFr7/+Qc/zdHPlkao+NZMTiq5dqXHbqqCOSbv7Az/3n/9Ii/c46AtsjMXJ/u0cFgTEY4w8L0jpUfjguOSKDw4/Uj1WqTguDg2nKdHHzYjHRk7xipJs1Q6Pwv/XPv3V98YwobLC+9WPfX4I9MZJx2T01LgCNzcS6WCuZ7bxIJhRIc4O6ITfC7EH21Z6LfZPclCQqV9SD3vE/vDD+0NQe+V191SuK3X876aO3c9BXPNXbMef6c9sLiDwOBbX/t8dnzjKCxaqDPEEYiViPBFNMyVkZq1IxYFc+VrhUVcRPT/+bOLs2OaXfE4/8sVxoSkwPv6uZ9Mk0Lw3dKCCQtz74RQ5CcXXZYj2L29olhKeOzkzuN3DDv5lSx+wMZx3VIhGgYpnInuSqSZIgVBzbGRso+orkdGOi0i2FVaWBx48NG5EWHu1nTBry8vxItrcM2PnXZC2j3SNxGRYnMvFQvmiF6xw8BYhD48fe3zH/9AHNkWnOgXGYd271ablGDV3POmIJhjXoE45byfXJgjPxA1rN4RNJpjSX2xGPjxMz6So2qQlorIsY37iOa+2x5/RyzzRggSzvvxhXl8T0SaIgty9bhXFqy6d++aTg3Bx9c+f068dwOy0KTctaoRzJU7Zy0/R7xAO0DI/amIbs+Yt9L2QJ2sWLEq/eCCX0eavWuzAIN23x6F+6eNIyojlTfjUKIrVvpM3HvHan8Nz3XCUYekL8ZGL0Qy2OIiz0VdPBqLy9j+n1x0aYxHV1QkZOQa/OSNiKEWyD6t9+cj5eoYAcK4HUdmfxYLtoiXis6rGFcRgQp/AdFysjipQAT3cvdUzedsRCBi75ER1QvR4pZbbtXAo5qT1ek71Qjm6nQrTZ6W9sr7ODMElLSJ//rFb9J9EXV1Q6RmjOZUt8K4mGsfFyKHb/3l59L2222Xxy6VXBDRKX3c76+5MfuusONF0g9Wco1KjkVEO3H82PTl6OP23Xu33IcXfa9K1+E9Zgx1Z8xNEdnQP3TUUq1gjuhTCIfoQ9i0gHC4nlHlWuLHRl/mUmyIR/yMYL1c/705CuawRy++sij9R4zrbrptzvvR1uobLbi5eillUTjz5GPz+AcBFDaqpXqpVjDX3D1U+veNBXNsjsTPxIbMm4MnPqbnnn+p0tO22fHVCOZYJ2DjERuFzzv/wiyCLeJ3qcdD0c9mn0hsnjksIs+SrpV3uVyh3S+JjA//+p8/T//584vLHV7zzxXM1RypJ5SABCQgAQlIoAUCCuaagLO5COZI+XLRpVfmBQ4EOExGihYG0ywysoOOidfyWCxkpzjO13ICLNY22Yk5NsJ2sxOSiTWD3HKlswvmELjgQPr8OWem0048Mu04cnjFAijqmZ3MRHVBLMmu0SIix3J1U4/PN1XBHFH0Xnj5lUS62wsjwhw7w8rtLOV9GhGL5XmyF4vCY8PRSn03LuxORAzwXz//TU5V2fiz5n5nt9/ECWNi1/GBsQBxZjhRK09HRGh9dqyyM/fSmMiXe5bSwgMLHkQBwglIpIDNvVQqmMPBjWhuYkTfQyDZVoW2hnCSqBtEFOsX0Sk6SiEMPwtdV0Vb+/GFl8Z7tLCukTtb+9ybi2AOm9yta7c0ZdKExPhm7z2m5F3JRfmw65ZocixUYl+ejcgrtU7DWvReyh2nYK4coZQXmB6PKBNspKA+l0fKk3L1Sb/SPcYnpEn99je+FGmhBuXIbc1dDSE5C1kXRtoRbBrnL7JTfPjQwTkSyl+GnSSax8YRkIhWxA70n138u4jucW1zl//Q33HOYydZJMZuVSNcQzBKCuKrY0GOCDflxBQlO/nREMv9ty80iFO2jYjFm3upVDDHYg+ChPE7jkp77DqpxUWeWrIL+USkc+yZRo8clu1ke4n+NwXB3B33PBDRqufk9EGIa4nQWsl8sqb1FsaMaGlTJk7Im7JOP/HobNeYr7a0QFjLe6jkXGwOIFrZtTfPTg9HVCyi0VSTwqqSazZ3LHxgNzXGATOnT8tR5kj7WY5bRxfMUff0IZ8665Q8DyIFN1FFKymkuJt1B/V0e7rngUcafB11TjXc3P1RH9hbIsfstdsukQ70yPT/s3cecFuO7R+/Xq/xoqxoa2oRoqFSKUWFJi2prJC9997bSzYhMiJFaCkqShRKGSkNFSq8Rub78v//vsfj0u1239d53qNndR2fTz3Pc49rnNd5Hucxfsfv6KC9azMVOfgkNROPW+zmn+4Lu7qpisf6dO+sNdxYsaZNnQA0WKmJZ1FQRpv3TNi8gNvDskxSnpaq5ofIPiGeBXuxS4hVAFY6WEARfKoOaq3uw/iHjpyluTRBax9QBXOMpLnLx3ZdT7bvs0aqVako0NX+BjiGyZCChOIkxR0wR4xuGxU7VqlcIaAQmJgNMZQNvR+iDzh325bNDEQLcCmqYCXVM4WFDXt/4quvq6PBa7aP+8z/VMfKx2vosnIq5GraqKGtLfZyYlqsNx9hHWGH0+KcwhyYm3xYp32OvSE+kw1gjnkFEzjPCyDRkuUrTYds6PmW7v4pTsBHo6j7MBW57LRjOSc7VWkEzL2hOfeKOriMnzJNOuATs+t8GMfTjWsur4d23W5iY2+pzh99ehxkBVpRNnFxA8wRD1/344/BwypGG/3iy8HiJcuNjSyXcdmQ380UMMd6pV049uXLU2cYIyG2gCuftqHugTmzs2yBJtK9Rx1+qMVadipH54noM8aAuejxid+NRyAegXgE4hGIRyAegdI1AjFgLsXzLOmAOQxzkpOzZs8Nrrv9fmsV4NNOlIAiQWZYzmiDtm3ZshZchJnmOyVUoYSHLptqWdpuQhMfJQStoXA/4rAuVvmNgc6/dLKxA+bCcelxcAdrm0cFq29AjOdN8JfWak+OfsmCeDyn4iwlFTC39NOVVuVJldgEBbF8hGAAjIsdVdlNoCnVc4WxiWpf2qLyDA2c6kjYAAQAEEWF2IWnH2+t7Ag4ZiIEUWGNGjd5uiX1XMF8gGAkC8468ajg5MFHFAQ4dR2lXTIFzFkQSwHhrZSkKUxAIYF1dHjrFk2C048fJGBLhWLzaJjftPJ89oWJwSMCvLCHFFWltM+glBbAXHivJGBhcioAsTcLX3b+XLx0efCWWBTuHz4yeGf+B5ZkLKqAvetiY8Bc6hH6r8Cq65RcQo8BgICpYPobs4MP1QLcRwi+VxJjZje147nwjBOMmSjKnoPpFqANzKVjXprscwr7TEsxTQBs6yf20tpqv5gsgHVgRb1TbVkfFOiWfdIF9uM6sUf31bEv0D4JeM4n4Z14bnRXwT45zYBDXEeUYN9wjuMG9dY5TzCwYTJIPur7JfW9TAFz4T7JWJUR0DvCRcj7kOwoEEQTgfQAZ9atXTPvx/c5IOvkBTGt0tpp9twFf0m8k0yBCfIHtT/yBYiw/wMK2WrLLSNPD9NS+9YtrO3jfkrCA55Ilp9+/iX4TkDRp8RSzjqG5aQoGWkSrw+gB6xixw7oFezfurm1GERHFRehWIn2QwBsn3j2RRu7bH0y1gjPa/PNNrf1Absf4ONsgXe0oKqqZNlxA3sHB6nYBn8/qoVXcQfMhc8cwMXRYphroCKVCjuVC1+O/EnB03fffy9A6Kzg0ZHPBR8uWmKt1SK/VEhvEo+BPW9gn24CJ3SyOe4LcMrn/Mv37QLcKqf9eLDW7iECb9EeFDBbOmF/Bwhz3b/vM/Ap4DnX3Kdok/HbQXMdf2hb6QtsAOJZgIRgesH3/Xz12mCN4lnouihhfyKeBcMXrH+A36PsHxgL/yOG/+dUvPjo088Hy1VwAhjMV9DjFCiwJ7I20W343ojtC7qHX3QOGGoKQHjugthQj8AS3Kf7wdbGr46A6oW557ruv7gD5sLrp5ADG4+55FMIEn4v158UPcHuz54H2MFH8NXYiyiiRccBCAb4XhyEeQ0Iq6OAqOgDChjKC4TlI9hG2CQAAO9/dGTwxeovDQzr892i+EymgDnAKfg6jwko/OwLk0yH+LYcZq3zDx1obesFti7QIZsZYJfYC3oDuxIdgn/oU1xdoEM2UYy6XdBTsYQWAmhVrVwpUoeUJsAcep2iT2JYxICZf1+K8a84CIWyxPtYR8R4qletYiDbVNdW3ABzzGvGEp/+pclTg19/kV4VILa4SiaAOfQv3X6InT30+Kjg7bnvW7Gw772x5ogdsI631HqmOwR/E/9nHbN2Wcv43ut++Ml8SJ/4HDYGLL/kJCjIoK0vr0VJDJiLGp34vXgE4hGIRyAegXgE4hEobSMQA+ZSPNGSDpgD8PLNt99bYPHym+4Mli5f4QS3MQxU0FaX8dxegZjmjXGCK1j1KUE6AkLQws95d0Hw6ow3Ba6ZppZsBYZ5iiG0l8KA5ZCj+gUXnzUkgEkiyhiPAXMFIwlzAkxkJIJgifIR2E+ghx/+1BgBUcZYEMSVRPY57ob8TEkFzM1RUpOg4wy1g4Olx0eoDB7Yu3vQVWADHG0ChMnCuiURRtsFEmxUo9EGLkpwpGGM2LfZ3sGZArAxd1KB8aKOsULMdqMU/Jn0yutio3xTwbTowDtBmYrldwxOHTzA5ijXwL/SLpkC5hgPxoW2b5voGRWW/FNtdnZS8psAyEVnnmDVwIV1btd5CNLDDAWjIcwXPkEd1zE35PulDTBHohy2QxIuJJZ9hVY3owWYeEMgfAAexfm5xYC5vz9VnhfMqIvVVhG2ozfU9oR9jP3Ftz0ger+Z2IhgWRnYp7uT/RbmDfZJWNlY677SXwUWPQ850NqXo8eShX2SZDmB9QcVfGafxP5xCfYnc599ktbosCRkIiRFRilpNV7AcvZJF4AJAEAF7ZPHivXoDDHMbSz7ZKaAOZ5BODbGnlRopsQ/DCRGwo99EsaWopCvVYgEGBnWRIDJWqp/CkkkipNgFflMetdHAFkUtDerGvlxxhymMWzG3ervYgCQ5C/AejxvwUfBE6NftHn/sxLUhQkOSL6exL+ZK4CH9hP4A9annocckPIeEr9TmL+v/eprGzvYMkaqvRTJ/Wx9MvzmLbbYzBik+Z2kFQU2LpBPuvsNW3gBnocFvpVYvgASpZOSApiDraxz+zbao/YzZud095P4+jcCTs0TkATQKsAmgNAkIIuD8Kzx71qq9V2H/VoGPQ4+wNpA+lxbPuefz/ky+Qz3RcKXOMcBSuz31H1VrVwx7SEAKGBPXH7jnWq9+Lb85P/9RU+m+qIBahXPwjeGTZF4FvYv8SzsIVjmiGe9rlaO45SgBzwX5ftyzYBPaCd77cVnGgiPhHU6+WLNWgGT3jO2f8DGAKt821ijm2Hdw5evpxgQAKIdBWoFPMn+QHv7L6VfPv9irQCenwSr13xlusXHLtehrb0vHSBogQ5gtmD/TXcnhft6SQHMhTYLY+4z7vkaReYEoDls8ENlJ/sI8459HHa5x6TjKPpkPyoOwjii43aT/QVwHx1H0YyPoLspLiUejW+KP1OYz8LnGhM/kylgjuf0lnQIjNaTp800neUCCofnMx0rHUJRAXEMQMOAh9EhBSDxdWJG/MrA4R8JJE4hBK/7CM8M/2mv3RtYUWZz2dDW6jrNl0sTYI59Yp5ag49UEcnYCa8aaNlXr6cZnry9jE1MgW6BTdxK++qBVgyR6gTFDTDHmLKWZyquTnekqL041f0U9muZAOaY/7DikuOi+GjFys8zAvZiK4VdQ2CFo9ALdlOeNT4ioFpYThctWWbMfPgZvnqizNZbqzBx56BXt07BaSqw3kwgcNZ3OokBc+lGJn49HoF4BOIRiEcgHoF4BErjCMSAuRRPtaQD5nDeVq76wmifr7/jAe9qaVqzEKzYe49dDaiFQQ7QB+OZIATJEqp8YQ0hgb5w8RIlz9ekGMGCl/ge/4494rDg/NOPKzDwI1gPSgpgLqxaJsFAELOs2jMQOKWim0o9kreACkhQ47hkGsCB6aGxKkcvPnOIPQsCHy4hsEIinFasEwV8ykZwyGAaIKCynaqnCTiHwWUYBrkvHDPui+psFxOZ6xpKGmCO6stftQamvPZGcMvdD1uF7lo5qS7B2d1mmzLB2ScebS2QKqsCkKRmOnnm+QmqSp9sDEC+jBRQ8ffp0TnYv1VztfDZPd2hU75Oq0Xa5hGQo/LYNV+ZI7QLPn5Q32BA764pj1kaX8wGMFcU40DwmQQLCbZLzz7JWAeL4joSz8mcIpg9+935wa33PGKtyUjoZSLoQdgTt9VaQvfCgErLcIJGgaJ/sGfBhgOwe60CwVTUEwB2zeeoaygKwBwt0HfQGiPAjS5GLxNwA6DDPaFzCNoCdHKxvCbf25b/2iIov1M5S7acf9px1t4rKjhGku9HsRvRUvPxUS/YeV0g3uRzsofAkAH4iXuDmYNKWQBFBN/W/cDess7YN5gTv5BY1P1mK6UJMFfQQmy74BCB1NqIMTITISDO3IeBjWdGEQXzhsTzMtkLKz0BOJwT0C/gr8MPPcRYUimowDZMJZyTecNecvPdD1mizmcfwx6AAeH0E46081RRYtDWdqqT6LUXxdz0rOxQmPJoa+kjtRQYhoXoAOlGGF8zEZJL7JOwynJvroQj85xANIlNikY2FskGMFcUY6MpbftkC4FRLj37RIHG6hTFZVgrp8/kR6H7vpZtbZvZH1fyGYAIJY9emzXH1pHPBWK3n3BkX2MLiP681rT2Av5RLJW8nrHv2a9J8HB+mGEzkTBhu8O2BaxO2MC0sitTZqvgdwFef9L+9Z30Pr7EVwKjfiffidbfmQj6olrVSmbrnHjU4Uriqk009kARC2P3kdgySKril72lZFkmgp1TpVJ5MXqWt+IU7AD0CSxZ2Hf44uh09s1VAjVS9EJ7e2wEX0Aj+/7uDeoaaOmYI3oZEAsdnEryAZjjfPjOlSqUl5+5rdk2Wyse8E+dk+dO4o896vPVa8Rm/21WdhsFZvuo3fwx/XvZz1T3kvgaPh3MXyPV2nvy1JkGxvJh2QmPwT2FyUzsG54R66+s5jgxAZ4H8xrfGSAQAA/sKd9kZngewGS0J6edOIlaGNrYk9NJrvOP+2JtcS92X/rJ7yEDqN2XYgLY2qxd7g+gYSYxAc7B2sWW4L4aNqiTlokbu+WdeR8E199xv+mkdPed+DqAMNj6AUTvsVt9Y2LHnuK8CNeK7/vuex/Kjpionx8ISLQqct6h0zq1bx1cef6pBkTBNk8lrMEFar864pnng+kz51iryFSfS/VahZ12ND0Gm13tmtXsPPg8JLYLWENl02leEWsiHrNEBbGLVIhEW8DPBdKDycYl2MjoklOPG2gxP0BY+AbFQfIFmONZVyy/UwFQSP5UWY0f+g2/6fs/5i7FFvj1xExz8RMTx405QvwMcFL5nXaw33l2xAI5t4Hg13xpsVOAuZmCbTgWe/apxw0IBoshFD0QzunE6wh/Nx9A8wXQ9osTp1qhJ/MmE0HHMT/QBbA0hrqA19AF7EXcF11IKAD48Ue1Os4QdLzjDtubPsB26SpQKj4i500nrAGe3+33DVese6b5NpkC0umqUmbrLc3+YY+1+/rjvNixBfFc+dyyzRgz7ilT3Z14/b6AOZ4Z7FoUNuFz4G8AavMR9h0KWmHtrC8dUlOxuupVK1scgTHFPkI/MX48M4C3FGTAP/yZJAAAQABJREFU7oVvyL1iV7gEXYT+RoccsF8Ls1XSPa/SAphjz3h3/oe2ligEm6vfMxGziTWnya3suOP2mm+yF7SHsK/yzFlL34rtFjvIbGI9H/bVTHQTOq5alcpB21bNAmxifF50YbLkEzDHnMNewGbdWbYK97P1VgUxHvyKAtIFCt0KmJFhQyNG0KlDG2vHTA6FNqW33/eo2bLokEzEzi/WtXJi1kaPMK7szVtrbf9X5yHOhI2Cf8X8xgbj/LmIL2COOcMzBKj8wsRXrVCRa/ER9BFrGVugbu3qsgWqmg62e9MzRX+xjk1Xaa58KiDeJ8s+DT6Qz0iujnt1gWCZL2W22lpMv+2C07SnYGvCUJ5O8gmYgymPOC66imIG7Gb0SgGLnnKPuojffv/N4pzEOtk/d6lVXUUxrW2Ok4uIJR6BeATiEYhHIB6BeATiEdiQIxAD5lKMbkkHzBGEoc0WNPV3qW0VLSd8hBZY1150RrCdAiIEttMJxjhtZsZPma4A8zvpPvbn6317HKQWFkcY1T7OTDop7oA5glI4F5UUhKuvli+wMjSoU9sql7dQguN7Vf0ClFuq4APVjrAp4fz+T6CNTILwJERgu7jp8nOt0jKqVQ5jiTNNYgtQA4mZRUuWpxviv72OQ/IPBfgIdOys4Af3VKvazkbTTZAFR5vADQBMgsmLly6z43NvOPeZBvsSL4Cx/Pc1FwVH9uth7INRQb/E7xXV7zhsBM3GTpgSXHbDUAsO+lTBEaQiUEaQvVvnDsay+EfMPuWt0Jbr1ddpD/S8BcJTfijpRQIVAOUAA/RQVWHU8ZO+Kgd7RfCA2lm8oupjWna4gjM48LspuXFUv57GVJV8vNL6dwyYy/7JEjQioTtVc+zGocMsyZPJ0Qg0hqArWrZQjU7lNG3FAAdq0lqrcAJGJJCYx8tWrLSEXqb6N/G6ChMwR9AP5heSvg3UMrKhABwAU6uIGQPQE7qH+5v/4ccWSGc+kqhgb3Gt2fCe0LHsL8cIxH79JWdZkBMm2HRCQpQ27IAcCfj5nofjcS6eG6DGctr3d5fOIPBXTfsKSXOSPrAnwZJB0ppWaNwbCb91SgZmcl+J11+aAHPskcx7qvnRu5mIpoXG8HcLvK9Wgg5AqQvkle74XAOB90vEFEyreAL+PNtUwlpnrtLi7rIbh1rgltdcQiIH5tXLzj5ZQOxuxlgQZRNQkU4bS1jsYM/wERJ+sOTBYAeTXdTxk48H6O/+R59WQcJrwXsfLHQG3AnmsoYPP7SL2TjJxyutf8eAufw9WRgh8bGefl6AHhU0uIT5DJh86HUXGxDC9fl077NeSQ6PFevWVbfcY3sAyWNfQTdQFAKAqK5a/mEvovdJxgCEJbGNnscmWKIED6BAfmcvwKfw0RfhtXDPMIudc/IxwR671s2YYTk8Tr5+skcydjw3xo4kNEApX8EX4xkC6CUp13jP3WzsKsjOIfEY6l2ScGtlA7whRo7pb8wx5vc1a7+y5KDvPo3N0ahhAzFmnWH+Q6rEKtedC2CO58O/zTff1Pb9ZnvtYTZOrRpVzQbYYvMtjOkGhvSly1aKiWOesWaR0ASc4HsvXCc2BfbT+acNVsvCFrwUKZyDfYTnhO/MvuUrAFWwnZjjAAcAH9ZQy3v8aOY4eyasPbA0Llvxmeb4Ys31FQbs+OnnnzMCXjB+JKLPOfnYoEPblkoWVrJYRKprzXX+Mb+YZ4B+aup+uK9qSmhig3BfvM99rbCYwEpbu3QxIM6EfZFJGzXui/a5rN1Wmu+cg9eShXjDrDlzLZ4113Ovb9W8cXCd4lnEUkjgpxPAeC+9PFUsVdODCVNec+oe2pnCUsveTvu7ZGH8WZusyStvvitYKLuWJLNLYGjaROzg2Cetde0ksLGXSZqnGhOOBwABGx3gBkWssNigb4hLuNYNz5ECCBjzYPSKis25rj2f7+cKmGOs0KGAAImJAMSsV6em5m4Fgb7+9eeaBCT02huz5XMskh9FS8rciqsYA/Qp+x5gzQbSQ3s0rGdrCODznz6c9ByMy8xn9gU6dWTi6/Dc8BMvUBEy85DWsMbMm+YhsNcC0Ljq5rs1R14WQD0zJjHOB6invHQrugDAAns5ugBgBQW0ADWXS8cBuqKQmn2cc6ILXPMwvGyeG//OFBvzEb26mi4gdpZO2OsWaK5fd9t9Buj3PQ/H4zz4wYD/WMP4prVrVLP7ws8CtIFvSjwXUC2dJNBB7A8AVDKxTxKv3xcwx77HHvHcuMnBFTfdZcAX5o9LeFaATWDQbifA1CEHtrNxTAfs5XjkCwBo0TGD+cFej93JeLrGFD+wn3TIQQfsZyyo6eyH0gCYYywAelGQjr1AzALAoa/wbLAJAD+jH1hL2AoUeVSWbvpduhxAF8VfxLCwiZl7rCXWcCZzjvmN7mNfxX5MuU/pwr/CfpQeulzx7AUf+XVLSbxfdupNpIvIFwE4paVnG+1d6F4DK8tmxT5iHReA5NQCWDYC48a+BTkA32X+USh+rVquZ1osybhyjO2li+rvUkuxwdoF+kn2UkXpLGJkq6UreF7kRogxMaZh0b9rjifeb+LvvoA5bCKAxNgCdGlYt+5H27cTj5X8O8+P+yL+RwHIwVpfFExiCwCSSyf4aF/IDuM85NOwC77X3uaTp8DfwB6AkRebLJ3kAzDHvaF/mTO1FM+lvTjPjnnDPkPspOzWsntkE0FEwRj+KH3IXoNtyrxhTlFYFEs8AvEIxCMQj0A8AvEIxCOwIUcgBsylGN2SDpgjCExCD8Dc/cNHGpAgxW3++VKBcf6PoF/PQ4LrLjnTjPIoQ5RWEwQQaP0JVbxLDlIVEQnQxns2tEqSdJ8vzoA5AlI4fs3kvDRttHvQpFHDgsprvUaQAAcARw9nAmeQ5A+gDVogAKCDncRXeB4EpWgTRcuSiqo45viphIAULA2vCGB1k4AoVBdl4sRT/UUQBycJev2GcuIBOFDFyn0ZmEKBAu4rrCL9RM782wJ1AdJjHhC0zQQQGN5HSQPMcf8Ek6jSfeiJZ6361MfZrqOEYZO9Gor14FBrfcTzjRIS82/PWxDccMeDFlj1aQ+E81ixwo7BcQN6B8cf2c8CxjikPkIg8I4HHlMl/GwLCrruiQBPE60BnGtazG4sEgPmsn/SzOGXlfCnnSEsmASwMhHAu/Xr1rKWLVRN76iASVnNeapYAUjByvPTT6p0pypc+pdAN4mQV8XuskRsWj5MkKmupzAAc+gDEi11pPOb7V2wtzSou4uBkqziUnoYsC77OmAUC/5pf4FZa6Yq0A0MJd3kK5yvr8DxF4hhDtYwEqPphAQIIEcCcFPVRjcTIUgPQGKfxntaApBEBMA5Kvq3/Bf/trCkQMiAtlZMJQT72DNJXsOuRCVwplKaAHMk5WnnzDzPhgFEW7cC1b9YkgfgaCbJ7MRxb6C117zJXsHRh/dUG8f6CtpuljaJS4DzHbG2YH8+rH2S/cy1p3CuGjtXCRrtvqud40C1aXPtk1RPw6py49AHjMWFpILrPATWmYf9lZCD8ZXgK2vPR0is33H/o8EkVcKzZ7oqt1lbTWUn0uqkb4+DfU5RKj4TA+by9xiLCjCHrUvCDkYEWh/TAtA138O7Zj3hU1B8006tEGFZrKi/YcXCp2ANksBBTxT4FD8oUfiNfNaPbU/7SOeFMc1X0BMkIDt3aG3tBdsJ0FuUgq1DURn+LGPHvbB3uwQfE7Y1GLdIPJIoq6niJZJH7Jn/EtgjkU0oZFzAtsHfBKgwedobxmhHUtIn8cjYkag6Y8igoFXzJpa8SqV3cwHMFbBZlQ3atGxiAECKHWA0xD7gPfwUiq/wMUn4AZyft+BDtfKeY2Br9IlLr4djy9yiiILCv4PUaha/OZ23xTGZ4wCbHhzxTPDx4mWW1A2P5fpJoUYNtfxsJwDV7mprTLKvrHxn9mqug/v65ZcChl7mOawpFCfB0E9sANsqEwGcQrtZWmgCUOAcqSTb+Rcei7VaR+sJYNiuSpwC9imwQwvui/nB2g2fF8BX7oX7Ih5ADCITYdwK7kv3pvtKFesABAYAFcA6sS2XcAwSzMSz6tepnXasOA7AHq772RcmBvcOf8oJZASIcJTsoBayh3ZVcj5ZDISpa4Qx55EnRgerZDf4rEXAniTIu3beX2CXfQwguY1sc5h70glzmPORkIdR5kUB/2gt/LlsLvygKOE5Apbdv/U+KqDpZXo66vOF9V4ugDmAcsxVwJLN9t5T9mQDA00QuwMExd4EcANGS3QN++t8PStYQJm3nFsmc1bCeAI0AKCCz1NvlxrGmlPADPgvA4Lbnqdz4+cAMMPXmaNYD0VJALF8hPPw76RjDheTbD+zZ6OAZQBFWDMPPDZKOvUtmy++4BvGrQDk1UQgrD2DyirMhCWcvRz9AwCI8eTa0XEAygCavT7rHYthwYoGeNRXuK+O7VppL1dbbekCALTpBMDhZO2xz74wyUCi6T6X6nUA6RwbHUdHFYqgt1WxThn8LNPdsIP+HPyga6f1OTFvdDfz5H0xR+ILEPfMVHwBc8xNgNzYXw9LhzC2PufD3wYw3l0g2OayIQBZsx/h56QT5gIAf9o6A9AaNXaixnWmzVHXs8MnRYcQ40eHpGN8Qv+9pXgxOnH4U89ZfCbd9SS+3qd75+C2ay5Q7H0bA4kmvlfYvzMW2Au0/31Qawmd6xOb5TrZj3j2gGgpOCPWw75n9rCez9aac+hy2PXDGBagdEBsM7SW2P+w8TIRCkttX9U6glE9WdBzuQLmWP91xHxGTIC2vLuIEZXW51sR39lyC7PvAPSil4k9wCwNYxi+BHOOPBP7LwzWL02aZntwJjEK5jVznhbVrGViCNhl6ENjKtP8JC4QsmACFv589ZcWX0L3wmYMCCsb8QXMfaI95u333rd1jI3kA8xGt1KgiA1DkT2FCxXKlzNbIJV9FF4/xwZoiy0wS6yUFFphmwEudonFX8RifLTyFAdKB6OLUwl2H/OGItp7Hnky1UecrzH369auYXOGPbq6gKPbi4mc+8YnMFbCTWX36BIK5gzshP+z9cH9M29oHesbu3FeUPyBeATiEYhHIB6BeATiEYhHIM0IxIC5FANT4gFzckQIAE5SwhLWMRfDHIYx1a3dD+qgyuzjg6qquHNSMiu5TYs2ghUuIcDYXpTtgMwwytNJcQbMEeAHxNb9oPZK/jdRgLxu2kAm4DGCLASLqFwmIA+4jASHb6AKiuqTjukftG/dQkG4WmkdA4KlAELGvzzNHBhfYB5OB1Wq3NPeclgIVBP4wElL5yiFz435RMAfxgtAFJ+u+swqxML3fX+WNMDcl5rzVGAC+oEBzjfA2aZFU2sNY1VUCpi4BAd/4aKlwTW33mMMOlDzuwCJJNsIHgwWYO4U0apTLU4wxkdwqG+952FVW8+x1gyu5BRzkzY3vcVmR1BzY5EYMJf9k2at3C8WQyqmae1GQNhHCHptv902lmiFdaF9mxbGVEoSKZ2eStS/46QXCazPVfCZSn6C+5kIQR3W72FdO1oSK913CaYSRBo6bIS1lUj3uVSvE/whWUaShz2Y4F/UPsk9sL9MUfCe9pBvvDXXgqkE1F1rNzw/LT6HqA0WAdxU1cfh5wDlDXt8lO1fvsyl7C1UwpJMtqCiqkfb6R/B9ajkH3sZALkJquDGFmDPhJ2FZIHvfXHdpQkwFz6HovoJYB7GjI77C4xyQJugbctmFqCOuh4CqrCdvKS1B2CVSnIfYa70OPgA2TzNgz0VuHUJSZhVamXJPsmcYZ90Md4yNwnew6Z8vhg62Cej2BcSrwEQLvvkpFdn2D7pSoKTIGGfPLRLR9Mficcqzb/HgLn8Pd2iAsytEQgLFnGSg7AukdzyEWxOWtEBGmC/pn1hZbGJRrVJRbezdwEyo30hybN3lGDiGkh0+Qj+Ksm6I/v2DAaJsRp7OJ194HO8XD6DLRCO3XiNnS9rGfdAgrtrp/ZqMdvCgD4wbvoISTKASxQjkCRbLCYN2Od8BHu+v1gwYS6jPTGgvGTJBTBHsQPgSXQ7TICwYWPzpBP8SxLTY16aLPaTd+13fGcfIY6BTr/+srMNNACjFDo/leCL45+zTzHHXXGS8Bgk82gtjJ0GqLGT9kZYzJj76c7Fd5njgKemi9UKXwt2VMCOvgVu2DXM8S5iHjtl8IC0LK/Zzj/GCsAfAJKWmgfcFy1Bua90a8nWrsZxmWIQrN3pWrvY29wXdpuPkNwG8IqNTZvJzZQo5TkmCrGUmWJAIp41T7EHlwBWBLR0vopCaGVLm9N0gj0NUApdByCPBG2U1BUQ6oC2+xqLE889WbB/xqjAhHn1ymtvOGMjxlQo3534y4E6LmAhYmWZiFSo2ciTpr5uMQoS8xRquuJNgJ+aqkjn4jOHGOiBuV3UkgtgDj+KmFYvzaU2KjatXb2a+QPp7gmwxiIVPzwnvcm+M2fu+7ZOM/E3ODb6DMBVhzYtxerVVs9vtyAdWyKfhzGH9cGco50oz4tCKJdNyXdDoQh5YN/u1uViJwHA0gltPV80HTfd4sLpPpf4OrE52iTW0fpvrLlIAQs6wQW+AuzC/Ie5j7gn/z4VMJDYma8AsGcfOlU6rqHirOzlqeTp58YHT4x+0VpkAkD0EfxPWsMD2sbfABiCvsM3jQJfsL9+sfpLe17c0xz5pgCa0LWZiC9gjiIjdAj70hTpEFexQujXwBTJswJUDXAuE2HO82+UYvpjJ74ioNbbBhSMYqdizIjPAHK8SDoE+4ViqmQpDYA5nneBXQcT6XTvZx/GsLCJYfHqrH21qsCaUfFZ21dlLyyUbqKQuUA3LbB91XfOwd6FvQBbI3FhCu8SbbtcAXPoPICn7FnEtlurIGIHgZ5SPf/k+ZD4N2x6w0aMMjAq7W59dS9+uzHGax0DLGMOcj1RtiV2JAWasBjTIWn2uwsMUIYuzkT3cv2+gDn0BXFPYlo+bZWJuVDUxzrGBsMeiIqbJY5l+DtrduEnS421D5AvLZ3D9R1+Jvknun0HrWXiIrDiA4ZMVXSfC2AOfYGuJSfIvGmtPRpdTIwpynZOvtb473gE4hGIRyAegXgE4hGIR6CwRiAGzKUY6ZIOmMPwpwUI1Vw33zVMSefVKe7yry8REMUBoFUVFdMEg9MFSQn8/qpKlvdV9URVu0vKC2xWpRK04+Wt5Va6zxdnwNx+ShQfoIQC/6i+JgGUbny4P8ZonQAaVFs+NeYlC8R+//0PoiP3SwAxVoerZVhHOU0Ej0jyppKC4NQcC+SMVADJt/UPzhCB6sEDe1uAj5YyVJNGOZvh+Ume/fjjz6I3XyamufeD4SPHBHPFCBAVWAm/m/izpAHmAG/cfOdDwaRpM6w62NfBhl3mlMFHGIDEp+0Jji3tbmDngZWLZJUrYUSOi/lIso1Wc3vvsZtV+iWOd7rfYcv5933DLbhJos0VsKAdEEF2GClJ3mwsEgPmsnvSzCf0FG0fYCQtqMz9n9fBSNjBftlp/1YWbERH0f46MfCX6kDoXwKLBDlpoUHVNHsVgehMpDAAczBRUQFMEo79hSr3qMQV40nLli+//lr3tCwYKnZIAqrfaX9xJcjCe6caF4ZI9hbuMZVwnhcmvRpcqZYwK1Z97kz8hcdgbyE4vJ/OQbsnEua0ISEgFrVn2n0p4QLAAHAeLTpmKJG0QBX9PE9fiQFzviPl/hzrDVsEdtTDunay5IjLRsAGufmuh6ydI4wFJAd9hETjWScdba2RSHy6hPmCLoGBARA7ba5IbLkE3dFBa21Q3x5BYyXLqMT3EVgl2CcBzLFPAmyPEoLeMEYybrRK31gkBszl70kXFWAOVpo7H3zckrUfqXjDV//SQhCwB+AoftKyiwSaKxnCWmbtsoeR4J+mJCFgJlqQ+wj+EcwwJx/bX2xpR1lSJp3P5HO8XD4DAOrOB0dI/70afCSbw6eVGucD4ANoqLl+wlTE/VDU5CPs+/gI+GRz5i4QyP1ZY9FgXF2CriXhSQvInnpuqZ5VtoA59vuDxfQ2sE83A83BBrL5ZiTH/gqISrxG/Mtv1aILpjljOBzxtLEH+9wLx6HN3sVqHY7twb2lSzZiQ/GcsEmxDZl7PsJ+CLgJ3wd7jTkeshJFfZ/r5944D6xWsAO/KFDqu9q3fITnAgPHwdonrzz/VGOswdZKlmznX3UVMwIuo2gDZnsS7uz1qZKmiefkvgCdcV72YIosWLuwv/oISdStxEYD6/GV559m9m9y0h37c8689+V/D7P57Tou/nAdMbsTz+og23pfAUmYi6nsT9YO8Szayr6rOIZrnsHIXPWPeBYJ+2QBhPXve4eb/bN42adO0C/zE2BsN4376ccPNIYcCk6yEfwb5jLsLxQrcF9R90MMhtayF5x+vLG9sF5SjVE215Ltd3IBzOFzHCJ9A1gOv8aViGdfo3gL9iYASncNe8L0tcu2S743dEITgQBg3D9YAAcY2FKtzfB7xMp+F9PS6rVfGxPb0AdGGKMYc8dXWKe9ZFs2b7JnZBELxSv3qdMIYA1f/5f510isURQ6s5fD2gSY1gVEZ7ehtSQ6DiAMOg7/m1i0r6BLaaN75XmnGkvaltJByXOSOU1XhLuGPW7AcBgDfQRADb4p4JouelasZZ4VPkHyORKPV6Aj/mtsVIBP8E0B92TKpukLmFsqADI6JGQ+ZK+KEnxe9PWgPt3F9HZYRgWzicfFXFj1+RdWrIYOoaVjVLEEY4aNwnqjRTB2S6oYZ2kAzOH/YRMDSmYtuUCM4bgCBm+85662jrDxABiiG1LZWeF3+Mkcp9AUplBabQO8ArDHuvIRdDv2AmA5gOMA4hOZBnMFzFFoSUeawQN6GZgWncE5o9ZR8nVzj++LRY+2w7SG/VKs8cxBH+HcLaT7usrOw27ZDhZCnT9qXDkfaxmdzz6D7Yd+en/hIjHdRTOyJl+TL2COApa7H3rC2Ex9Ol1gC3Bv2AKw9tGVwRXvTL42/uYeYbd+bORzwT0PP2m2QJQfh26H4frcU44NBghkSVySOZMsuQDmyDlhZ7KnEFNizjBH/0FsMPlE8d/xCMQjEI9APALxCMQjEI9AMRiBGDCX4iGUdMAcDgGJRNqzXHbDHQbuSXGbf3sJBwiHl0TH7jLYac1RTlXvJE2p5sGwLXBG/mHOHInRbzwCPAStMLypqEoOhCZeRHEEzOHYElABhHSYmELqqbqYgLyP8BxIYsAC9KQqId9WwJc2DD6Cs9JDATEo1WFVSnR0E7//jRILz4wdH4x7eboc6tneVd2w/0CjTpUq4AmebVSFZeI5w995/kuWrQzuf2yk2va9FaxUoMU3iMAxcG7/fc1FwZFihCDokomjHV5DYf0kGUAC5/rb77eEwDo5oy7WN+YO1awk5nF+rQWRgn8+gjMPix0JFVjtfJmxaJXavk1zm6uAXn2EpCwONWyBBC8IKkQJwWGOPVDBuUOVkNhYJAbMZfekAa6tVUvqywSYgzXk1//+6gTXEryhBVkrVeSio6gCBzyXqTCXCWzTfoEqTxJ6BI1cczw8z4YGzBFAalCntrV4ovoYwIGvHmRvoS3liGfGKuj3miUUfZPzAOXYX9hbSE4kC2NEYAwG2UtlQ3yjthVRgfPw+yT/tpfN0E8gYdiFmqoVNS2JMhGeDaC51998R/vaNFW8TzTmOVfSIDxHDJgLRyL7n9h52GpU/zJHqHQmMcfrUfPTgrQCe7NPYvdQuR0VpOUKsW22LVvWwCIE9klikTTzEY5PYnicAHPPKjHom3DcQy30aI2DTQdLkI/AQEQLkkmvzLB90rUeYCVmnwQIMFA6bGORGDCXvyddFIA5mD5JCF4t5kaYb2Auc+2X+A7oi05KRANSYU2RGMlGANkC+gIIC1DL5/wAZNBLR/braczcALOiWo1nc10+38H/wda5SmP34sSpBth3Fdag/0gcASw78eh+xkyUKunsc/5v5acALoKJa5rsefwW176Jj0LrTQDL2PSpEo7ZAOawbfAVAHXwXMrtoFZ+KgbwEQArtMoCaAHYgySxT7KRYxOvOFvA625qbVl956oGmkk+J88EW4k5/tSYcfacmPdRwvzCp8MW7a1WcdhrxEmi9sN0x6OQjhZ/zHE6AVDY4To/x+JcgBvPPfVYYwdOZk7LZv6Fez2FKb27AcBplDE7UXifsLDCDsh9keDnvlzzP7wvnte5pwzWM6v8N9AFc2H+hx9bPGvmW++Gp4v8CdMl83ovPS+A8cSz2JMp3iir9VZW8zO0Z9BvlkRX3Mzh/gqAKXCufBOKJmFLSRSeIc/2qlvutvWPPeJivSVBjj1iSWSBXWhhl02CnOvAJsFPvU1gF0CLFMu6Yge1a1QLjhvU2xi0uY5s5nPiGOT6ezaAOYu16Jn20vzFX4ShO6pLRuI1Yp/io6JrHhTTEXsf89hHGCuA2TAiEyNsJQZNuk/4CmtjyfKV1q0DcBSs2q69Njw2/lV3teujfS/xvGRB7+NrP/LkmOAWFbCwFvHpXMI+Djte726dxZjXwoDc6eKPUccCgALwa/RLk4InRr1g+7jP+RlTWObOPXWwxSZhLUvck1hj2JgAugDMUZDjWmM6pOnuemo/3rfnQQIENzPgXNT1p3qPZwNzI3YR8QTYATk3sV4f8QHMcT+w7V51813BDOk6dIhrTgB0xdegCI7ODy5gY9S1oi9gXbxNgD1iyugQ1/0BQjx+UB/bO4hhJAvzvCS3ZEWvLtF+jb1ALAmb1DUmAM0pDsB/PqwLNvGe1sEleWx8/v5UgN65Cz4KHlIRNTrC5/wcl7WEXjrt+EHG/gdYL5RsAXOsJY7LGuqk/ARdRmgxm43QKQEg/KXX32G2vk9xHXkD9D3gZPZM2MqwMzMV9CPreIqK0mFVXL5ylbMwPfEcLsAc+8rPWssUvt8k4gr0oU98jv2jtRjzju1/WFBf9gtrORvh/LT4hbDhbgHByc/4tJ89VjYI9u3uDeql3EOzBcwxZ9hXOmvfggGT/SuWeATiEYhHIB6BeATiEYhHoLiPQAyYS/GESjpgDueaylICMGdder05ei6HO3EYCNYTtCRwSisV2nTCEgNjBsGhTQA36QsYwPzLlxRHwBzV4yRfThPgCZpqKuMzvWfYcubMnW/BOKqvfYTK5YPlVHTWP1qhpauSp9UB1akkjKks9nE4uX6CbP17dTEnHsa8bIVgDuxNnH/qjDe9AXucryQB5kjYvPn2e8Ed9z9qoBuf8TJKfAWjB4hd4fgj+/p85c/P4FjD8gEIgGCnLw0/ATmSBOeoSgxAgM/qJKhJUJX1R6LSpStIOtSpXd0qFwnQbSwSA+aye9K0Vvtk6fLg+jsesBYjPkchaE9VO4DMC8843sCmmQJ6w/MQXF+9hiTWPcHzE14JftbaonWMj2xIwBx6mPbnJCpJDgBWj2JeSXW9sPJQBY7+JdnztRJ+PtJUwFr2FfaXvdSSO1kI9FFRjW6/7t/3eusfktUAJS4+8wS18WwlEIUqqbOwEUhCAIZ6TgBLEgdfCkDsy84aA+aSn2bmfxvriYLrgCjOFGNTJdl+ZVQ04RIYoeap/fHt2idpO+cjACuowidZf/oJg8y+9Pken/mvAsIAbGhtxT6JrvGR8rKvalSrGpxz8jFiCtpfNp37W7TMYp80cKr2SVcimuREHbHckMA9Tky+G4vEgLn8PemiAMxh07OnXHTtv73sQe4WJirs3RNk58Lyhg7OheGNBOE9An1hk77/4SJnQjwccVi/YD6GARkm5MIWCmm49ouuvd1aPrlsaa6vQP9VM4AC+i9bOye813D9oX9hTnL5hOh6wCWw7hytZFmqBF02gDkK8GjLS1ITNqRshAIHQPu0pfNlYgN8SDu/rp3aBXWVvE/HkoGtdLHmOMzsPs8JfxUWExjesG8oaiNxm62QsL1XAGwKAt4Xiy7rzkcoaByoIixY02AHS5Rs5p/t9Vq7JEm5LwoE08UbEs+V7ndAmgA2YQdcoAIsX/anNi2aWovJfdTpALs7UQACfiwQB/EsCrt8nlf4fYCzPCviWTDRWDxL8S2St5srnvXPf25q+39BXIf4TvjNzH/yDJd9usoAABQM+AjxJZgdacFGXCYRHOTz/eTPYDc/+eyL1hIWts7vpJOihHWKDdS5Q2trk5br+aPO5fNeNoA54nS1qu9s7FowbGUjCwTIZL8ZIyDUG7Pneh2CsUJ/017uivNOCXZWp4ZMdQJFr+998JHpuQfUEtjXNwWIg32O3k5VTIZ9ClMna/Haf9/nvWbQcQAAL5IugCGZvT3TmGfi4D03frK1XEQXfPb5msS30v7OnBykdrOwrjcTIIY2haGgX1bpOEMffMyOG74e9ZPrB0BLAR6tQxuoXSkxhmwEEApr7I77HjX9/cNPP3kXCvsA5igWo7gaEBE/faRB3VqmQ2DTRM/lKvj+6JBJApGyFlyMi4DkADB1VGEVe36ylHTAHHPuA+3R2MQz1BrcZ/+hSwBr6WgxaZ114lG2jrIBnoZjScEWa3miAPbYxK6CrfB7AJP6H9bV1lEisDZbwBwxKmzUIbL1jxNIkjmdaVEk18YYfvbFWhvP626710Ci4TVH/aS4n7VMW3r86i20jrO1mb/XOn5Ha+wqMdy9o+IcH0BveG0uwBzxKoCN9+mZEfv0le5izMOPIX6fDRAw+TyTBQgcOWa8GPzetSKN5PeT/2ZPwcY9sG2rlF1qsgXMASCl2ISiSAr7ySfGEo9APALxCMQjEI9APALxCBT3EYgBcymeUEkHzHFLBF1mydG9WiCB99T+A2CVrxAgIQhUfqdyRh9OAHqbMmWsbQRgOoKPlSvuJBBdFQOT8dlsgw+J11QcAXM4mIALqNLC8cwmcERlJ8xyVKdBqe7jbDPOBD+o3iLZkGp8OQ4tFi6/cai1JCPQ6Kq05NmW22E7S2KQ3AKUQsV1tkIyZuEnS43WHMeQ63EFV8JzlSTAHODTCVNesxaFVBr6CEE/Ko6ZNwRUMxGeIxXvtJqjDRyMkT//7G7nSzChcsUKckoHKwDexgI2jHOUAHIY+dw4C469riSbizkPfVC5UvnglGMHBFSjbSwSA+aye9IkuqiKfUhtwqjo9BFap5AIYA4fcmA7YwnJRvdyLoK1rKXHR5FEmmpBaN/k5IYGzFEdS+KDVioAeDJN1nFvAHkAzLHXw27hs780EkiONrDsLSRgk4UA8TxVNFNFDxDPF6xWEERsoeO2N/aVTTaBOTT56O6/uQeSpLPUpuMJBe5nqQ0OzCU+EgPmfEYp/WdIMFeX3cP6I/lBZfxWW26ZluU28Uiwv5AoJln+wcLFiW+l/X0nrfVWOs+BOh/ttDJJ2FLlz9qmhSP75KdiV/Kp4oYZZhvd5/mnHicGpI5m28LsEiVrxdzCPjlhynSBAd92rgns4kqqfKdSHCDMxiIhYAdbF5YhX11b2OODXsL+hW3z0rNPVHHQX8EnhX09qc5XFIC5j2XPz3jzHSWkHzcmxVTXlfwaflLbffcJuirRwzomOZLJOk4+HnMGmwE98ujTz9ua9tnXWjRtZHqL5G1RPE/AMoANhw4bYUzUyfeV6m8AMz0O7mAsnvu3bpExaD75mNgEFPh8vvpLJfZ/cDLMAZAjuct1pGO4zQYwx7EAYWHftMgyiY+vRauup0a/ZP5J8r2m+hv/BN+WpCOAMpjAkmWlWlVhT9BizRfYRDEAcxybtIfsG1jKUoELk8+V7m/mM3N8yvRZwWNPP2dsTLRqdAnjCpDEkqpih0+UbOYfOrBtq2bGcGixBjGo5bJ2AepwX7Q6e0xrF5vU575gNwM0RqtE2hInCvt8GEOhc8Lar75yzuvw+8ROAE3CwlROiXae2zZqL8k82UlxFuJc7NMAbGEmMyb4JOa48Fiun6w7EvB3P/SkAQFcn+d9fJ1WzRqLTaaWFaoKsufztbSfAUjxnooW5n/wcTBfICUXYJEx2UfswbCVDejdNadnn/aiMngjG8DczlUq2j6OLde1U/sMzrb+o7SvA4hy36Mjjdl6/TvpfwNYWk3gS1hzKCwhNpqpTsC3Wr3mq+AJdaG4WsyEMIf7CCxEMD+jB9AJyULccaHaqcNA/uCIZ5LfTvs37TUP2G9f25NqVq/q3RY83QFh7MOPYx8HfPV///e7/qX7dMHrrAliZrCuspcnxj8B3c1V62R0y2i10fURgEowcwLoApACyCdbHUdsHd907IQpBnJE1/kyEvoA5j5c9InZDrRx9G1rje6C8ZTCI+KOucr369ZJhyw0Vs/5WhMulvCqlSuYnwgrYSpwfEkHzC1W0TsxCFq407HAR9BJ2AvEr4jj52oT4+dS0EzXEeY+MRofm5gYE7486wggbCjZAubIF1TWHBt8RK/gCLXu9I0NhOcNf3Lt2Mow9j0g/eQbV99FbHas5a4CebdrvY/p22xjg6xj9mzAoa+89qZYEN/zGlPuwQWYI2bP+oVdE2ZAX2kpfxTGRkgrsgEiJp+HttGs5QUffWxA4+T3k//mvhhfnm0q5sBsAHPo2irKGZAPge2wvvYr3y4CydcX/x2PQDwC8QjEIxCPQDwC8QgU5gjEgLkUo10aAHPcFu0VYcSAVh0671yEIBBgK6pCqKKspUBK/Tq1rRKYQOR2YtOg3UqZrbc0ByobB6Y4AuYI4uLsEiQncZyNEACjde15V9yk9qkTjBXFBUqi4h/Kc6p9aDORqmqVIAZBvnMuu9GSuP/VeVwONO0/eG59exxkSYVsnlPiGHA+2IgAlMEE9Inae7oqmsPvlwTAHPdHsP7Rkc8btfkCBY98wacErlo3b2KtEgDeZCQWUPw/a2MAA8KXar3oW/1GIuDU4wZa8rLuLjVSMiwkXgvBvrFi3hovQCBtSVytBgCkbKtznDHkyODEow43IEW2wcfE6yjuv2cKmGNtoTcJDKRK2m2o++WcAG7btGxiQfx8BFBzuVaqpNHtBLd9WUJIwMMSQuCGtZNtBWl43YB4YfckiUeLAqp1fYRzw3xxmBIxsD+kExggvxKghkT57ao89xHWDHsL/2htQTA9U0E/EfSj6vhc7S8rBH71ASw3VCuxtkqM9FKAm1abyULSan1L6EnW2iH5M6n+JkF+9OGHBrvWq60EZe4VpAT7AGGNVNu0l1Up6yMxYM5nlP7+GUBkgLwISBOwJfkB6yEJZJedULBP/p8SSBO1Vz6nRMsigef91hiMpa1bNBb7SwM7n+tcf7nyP/ZJ2CtorbdSexksOz5CAuPUwQPNvmKdc59RQgDc9kmBUye9OiOA3TFKsNlod3jcgN7WQm+zTamE3yTqK6XivUwBczxvdCHzz1o25oYX8B5DTsM+2URto0l2p0o6ex9sA32wKABzM8VKNEWglCcFUkL/+gig6xOO6hekYofy+X7yZ0iwfqMWRmPUzo32hrQS8mlZWV/MMexnxwikSuKnsIUE4LSZb9nY+SZW0bEwXQIaJvGdkf4rpBvMBjAHc+0pg48wv5lEfjaCzU1C80613sMG8BH0OCxTXZSopjCAPS1ZAHNTvMEc923xiX12wpH9BLrY18CYubCwhdcDuxTs88xxYjQ+RVHY8zB1kNBMBidkM/92qVnN7gvQHADDXOcf/iPtx2Cc577QYS6WQ8ajpgpG9tqjQXBUv0MNOBeOUfizgOF1tAEc3577vpPhNfxeqp/sN/gUMOcynsSzYCPcVS3oDVinokL2oq0FtmT++Pq3MCJy3zC6AOrxEbO5tD5g6PzXFoD2c9sAGX/AFGsVM/Bpycp6webD3jtNcYOitlGyAcyxd3cTEApWa5Ly2QggbeYYbGwAdLFpXQIQkzlLizn2HN82sInH5XmxtwFsO+/Km2VX/uIEKPF9mMQOaNvSWNjZQ5IFljB03JgXX/YGlnGMwWJt6qO2nuiCxBaOycf3/Rs2ttVrCtoUG8O77s8VYyKGhY7jmcLQRew5FMBL+KZjxk2xuEL4etRPfMITjz5cQOcOtsZT7QlR30/13jvzWOtvqaDrBQOWpfpM8ms+gLnXVawwedoMYz71BRHtsN22xjJYduut/wIuTD6/79/YX7R3X6P1QIzZ9bxg/AJcc/ThPa39evJ5Sjpg7s133rMYN3oBAKiP7LlbfdnEfQMAUNiluQpF1DBGUvTJvkpxuo9NjG8LaA79BKgyFLQbMSsKEi6/YaixwYbvRf2E9WxPtf8dqCLPnuoAka0Aomcdw9j+rHQURQw+0lIdGU48up/FKGAVzVWI52MDPj9+ihVnuuZ6eD4XYI64+vRZc6xLAoWnvkI76iqVKqqIRfECFZzmKvjj7G2sY5+WrNjqLcVeTFelZAZjriUbwNymsrX2EFiTIt3BA3oZu2+u9xV/Px6BeATiEYhHIB6BeATiESiMEYgBcylGubQA5taqPRVBOxKJj6vKxScAlGI47CVLaimxRaXfFgpekNyisoiEYPUqlS3gRvtWAGY4hwTdMg28FkfAHJXOBKZxNAmmZiOMO07GZTfcETzz/ISAyk9ADlFCiwmcFlpl0PYSBoBkIbC2dPlKC7IBdPJ5vgRsAH5QbUarnHwIDiaVo7fdO9zm2wqxvPhISQDMAT4h2H+z2r0NUxUciQBXK7bw3kmYA1Ckxc1WCqxmI4APCVr993//9a6oh2UOSnWCuFQ+b69gWpQQeHl56kwLxBA0cAUMSB7gAJMMIwhJhfrGUC2WKWCONiI8CwKJDQRSLSzh+cCgAMNKNzFh8HyKUghGURULkwcgbh8hAXL1hacLSNNELIllM95Lks9B+6vFagsLsBcmKpg4fIRg44YEzA3Q3tJNQXz2GZg/sxHA16+qOvZy7S8LxeZHcMwlJFho6UWb8cQgavi9NV9+ZWyoMNfBrOnb/gN9cNZJRwcE8Jn7uQqsXiQN7iJhLnYvH4kBcz6j9NfP0GYFdhUq0GEEIhkH4xDJVHS9Swjms0/SMoa25RQI+CT/OS7sD7SuKVNGCWrZlNkI9hXJTvZmFxtCeHz0JAwWBYDV1k7A6jfffvfHPjlNrZ2nOJlbClrXbCoAQA8DxKCHsQVKu2QKmAO4gK4AMAGQN5sWzlmNqXAJJGbZn0nMFjWwPNU9FAVgDl+C4gnY3Wgv6iP4FFecf2pQT/slwI9cBV+GQiOSg9fedp+YIz/zSvbAytBoNwG1jhuQsjVYrtfl+j5gQxKAL0x4NXhfoCyX4CPTJvK6S89Sy6w9DKDj+k5RvJ8NYI49BObGPZRcBRSdjfyodnck+ZgDvixJsHIM6tvNmF1IFKcCQr/z3vtWPMEc9217V71qZZvjHfZrYTa1L4gq6r6Z47SCBKQDCxMtxV1+PGOJXUrisZ9iA4mS6fzju+jcK84/xVgA8+ErkIj/TX4ziXie23x1OMCOcwmxiXq1awYnqaUz8Ylk+UHAG0CFL0x61VoxAgrLVgySprVHOzf8dGxG4lkA5KpZQejOwa6KZ+2usdlDwAfAkeznLqFdH/Yy/o4vwy77H/E0/EWfc7iuIdADwB77r5jmsIdc7H6AvmB/oZX1BacfZ0BC5zk24AeyAcyhQwepTTGgY2KQ2QgxOWzWC6+5NRgmNnTiIK61CPCKoi5iLT0POUDdGspmfGrWy//pXE8rNniZ2nAC4ABk5pKmAvq3a9U86KPi11TAhmUrVlkhFUy/FIr5yqXnnCSbtafpuFRxR9/jhJ/D94axDB03Wu21V8u3dAF9WId1pQsYU1ovsj5CgbXpJekAbBTWm49QsHil7BNAofimubTGDM9HZwZA8bfd87DpuvD1qJ8+gDnaoNJZ4kXdI+fwkc2kO1jHFnf30FOuY4IV/e23/+k5Kf756y9aB9HfYI/FfobZdYgKJ5KlpAPmYEEbL2Zx7AVfECPx+yvOO9X2kHwAT80mll4nVksL08UqZqHlp0toPY5+OFU2MboqFB5pNoA5WCdbqRD8sC4dxXDZKjxcxj/Rr+iml16eZrFB3yJWOrawlunIQOF2roKu5Zk+rcL0W+99xDt+4ALMEWMk5okPQ4Gdr7COActt8o9NcsXO2yl/l/7FHoDYgHXoEgCR3Bu2AEWMyZINYA79xN4MwydkDejBWOIRiEcgHoF4BOIRiEcgHoGSMAKlHzAnppT7h48Mbhj6gPfzIOnbr+fBxjiQTQDE+0R/fBBQ21mX3mBtLHwqcQkIEUjo0KZl0HTv3dOeDkDVVwq+PDN2ogUYaY2RS5Ax1YlIpkKfTxvImtWr/BlghHKZNhf/EtDLN7BcHAFzgCWOH9THqh1rqfInW4GB6DoFjGBgWaWWAq4WdySdYEqAdhzAXqrAFeAt2h1ect3tVqnlc20kJQ/tcqBVcMNgly8hcATtOK2cfEExJQEwB7iRoBVU/FQX4ni6gqj5GtNsj0NwH/AqrbGYu6zDqCT012LOeeud+dZ+cbjYgVyAufC6mJdUGcImlW1CLDxWSfiZKWCOqmgqztsTUBcwo7CEJCx6l1ZDAKPyAVzK5dqnvv6WKkgnBdNUjU37Zpeg60hUXX/JWQbcyUdwmzULQxQtAqmmRW/6rOMNCZiDCZD1eagCj7vvWjerZEs4lq+JPY/9BQZM9nmXAGqnTRprmEBWssAONkp2wwQlWmDMcAG8SfqReDz9hIHBuacMVoJx07wk3mgnRcL8utvvVxLrGXtmruB9DJhLfpp//9vAXKpeBky90447qOK3ohU6UDlOkLR2jWoZMYdii9AOFVD5g489E/z2+2/ORO3fr6pwX0FPAipuq7Z2rENYVqL2SVri0H7pRSVNhotxlnaHPkJhAuAGCi42hkBxpoA5inDKaR7uK2YYxsrXX/AZ+6jP6PGbzgKozD4JeK64SVEA5mBRJDGIPwZLQpSwhtD7MPtcd8mZYkirnJdkdHhOAO53PPCY7Ws+hTi0xGugve28Uwf/JTkYHm9D/+R6GT/aRi5assx5OgpNmoqd7/pLzzY2NGyC4ijZAOZo5329gIAN6u6SNRCQBN8vYl+6VEASQPM+Nhsglv6HdTHAHC16UwHmKPCC9WOy5jiANZego+rUqmFznP0CH4u5nw9ZJFuUOU5bcR+7lCKsqpUrBRR2HiVGn0TJdP6xdvcWmyxrt/Eeu+WFnSi8HubM0AdGWCs7mOddAlivquwQ2BZ7de30t49TvAaYCr10z8NPGvMgf+dbYB2lkwIJeXxbmIJol1pTBQVbAGyTrZtOGH+KzrjGRWLCKgnC3AZEhC8AEAFbviglG8AcDKcAdUj047NlI+gWwIXnXXlT8MCjT6tI6H/OWAg2AwUXVpyoLhSpdI3vtYwRA/s1t95rey5xJ5c00bpt22ofi1UDek0WbIfnxr0ssNwM6zKS/H7y3wA2AZBSqHaUWML5PV+2GLHoodJx3OPCxUud3RLw4XZWwQ6+8fnayxMLMmkvOEpxVNYYLSpdQvxg+223NVB6Lx1vM+4rD7o7jL9SCM2699mbfABz6A98DPYmWAJLggDyIdZFEe0ZJxz5t0su6YC5cZOnWREEz8RVRIJdgE5tq3as12tfxa/MBxttOKh0SsBeIG9Em3KXlJd/D2sYNjFgs1CyBcwRO+osgDB6DxsvWyHOzDqmuASmRoo0o4Q1u7lig4Dpr1NsECZYgFi5CsWgxJgeHzXW7EyKGHzWsgswR4HumJcmCyz3usUdc73Owvo+tsDee+4qW+A0Y/FLPm82gDnmP12a2CcP0rxhTsYSj0A8AvEIxCMQj0A8AvEIlIQRKNWAOaoFv1IFzn3DnwpuuMMfMEeVFFUQJFHyUS3vmgiAjc68+DprzUGQxiUA5gA9wQRCBXWUYPhPnjbTWkRQfezbYifqmMnvhYFjflKdAliOak/YgQDT+To1BEAAN859/yMvBiACY80a7xEMUZsUAmZRAgsP1VQEpS+/cah3MBOwC5VZJFap1MpWaN91692PBKMFHKHFDAC6KFkPmGunQGa3lIA5Wm588NHi4Opb7/F2yGBuOlbtagDLwSaTL/nw408MFMNcIxnhIyUBMEfQm7YWMAMyP0uCEFiAZaFd630s+AnQMyrIT4UdeoEKO1gCfRl6OohevUvHdmoLsq8AFtmDSUvCmHKNmQLmSNxRdUuLSnRIYQv6ONTNhX3uxPNRXQnYFMYJdJ9LCLzSjoXkTeM9o/W661iJ7xO0naE98KxLrheod7EzGcJ32WM2FMMc1eC0hOzdrVNQR+fJJdliDJ+qdMeWALjkkvp1ahoYkSQZbaOThUA9rLQE+zi2q80rCQ7AV7TRPVOtmvMlBFVhKAAUPvTBx+x3VzAzBsy5Rx+bDGAqledNZEPS2mlX2bvoK5JkmeoNQOWvaZ+EQQJmhJIi6GhsaJKEsJSQNEwn+BOfLP80GDv+FdsnfZgcORYtwgjy2xhnyVKc7pqK4+uZAuYKGG4qmM91/mkw3GxSqLdVXPbJVDddFIC5kbIDARNR/OICXwOuwFc5SMmQqy84LaiYZ+YAWLdoVTdDLLU+jE1cC/bu5eeeHHRUwUhhC+xS+Aow3yz9dGXk6YFb7Si2MEBdl+l6U7EERR6gEN/MBjAHsOy2qy8w28Y3BpDuli646lZjBmbvd+3/6PS+Kng85MC2YtHdOyUQljgANinPCQCHSyic5PlcdeFpdkzX5zN5HyaoEU8/bwCJWXPmOe8P9iXiKrSRJlaVKJnMP74H880+jRvZ2qVoI59CTOCxZ54PKJjxYfHD/uW+LjnrRAM8proWnv27anv6hJ7ddD3DeYoV5VsMBvmH7wSYuoaYBQ8XABNfl+tjv0onmY5/uuMU5uvEBiiqovjthsvPiYwVFMZ1ZQOY20cM3fgejQUigwEpF7ngqlsEmHtGrVF/tgLJqGOhF2iLzP7XQfHYMprD2Qog9ZvufNC6RsA06RIXYI7WkbScBsz1llpKugS9wv55+XmnBAN7d3N9PKP3KQRHFwAmf/PteQGtqKMEgAVrrZd8Y/Zy2B9DgR308WdeCF5Tu8N5As+5BL1CXPoq2ScwCedL8EsB8Z916fXGdve7ioSkniLFBzCXif0VebJCfBNQIs/oLO1J5502+G9nLumAudHW1niS2cQuECP+Mx1i2ouJFpu4uoDW+ZS35y3QWhobAJyb/4Eb7I+OoiCOdUTHmlCyBcyRN4CpEVZp9G22QmyHGNPzaq1MbBBmzSghX0CMqXvnDlrLp9rvUZ/3fQ+bgpg3xeLEBinQ8CkadwHmMtW/vte7oT+HLUCRwI2yBbjHZMkGMEdhRu/unc0mJ56aTevy5OuI/45HIB6BeATiEYhHIB6BeAQKYwRKNWAOVhSMOyqTr7z5Lu/xpAVmTzEd7COGr/I7lvP+XjYfBKwyW8GMC6+5zdpN+BjqgAmooAaQhmHrEpx6jPcp02dZ8vv9hYtEke/HjuE6dvL7BF1wamAIgv2urYBZtWvurIpdNwVz8QXMDfwDMFcx+Xa9/yb4dts9jwhU9rLovz/1BMzt8QfDXGrA3KovVgdUW940dJgF/n0upmGDOmIBGhS0bLqXscj4fMfnMwCuAMaQLIddwUdKAmAOsMijcqRx6EkAlAQhAUzin5Ydpx8/yBgEqNZNJ+hJkv9PjXkpuPKWu63VnItRimPRyqypHOrjB/axc22iwHt+OBfSXan79TChRkWzC+QTHg2A4T8VjCGpF1UJmg1grmqlisHR/Q8NTjt+YHi6je7nOLVceFQJwXfmvR8s92jxVk3AZPbec08+1tp55WvAAE2v32s/DNatc7e9KVmAuUcEmPsgL4C5lZ99ETz85Ghr/0GAlqrbKCkIylYKjhvY29i6oj6byXvher5GLUhgFfn++x+c7WFLE2AO8NBmSh5hh5YTkDRz+YcxPgGOIxBaZqutDJgJ+yRJJI5bscKOYoaqZHYbYINshMQ1++RrYnD0SWJlc44N8R10fgOxyPtKk3UAAEAASURBVLBP0pqYtnvpwIKwy7JP0m7tKvkTtIB1tZbimmGqAYzH2thPIBJYpNKdY0PcY7pjsrawS32KdDiG4NeyK7RPKjnGuKW7h4wBc5qXVcVQDcilOLSESzdeRfF6UQDmHtNeDfMD7C3YPFGCbqqmZBzMAeeKwSLfzAGwaMC4QpvY2e+62WTwPylsuu7iM43JIOraN8R7AAKeUBKQsVuhPTRKWD81lEyF4fVstTGHuaO4StaAuWsuCOrWqhEJRPa556wAc2J1RqenYo6ksGv4U8/Zc/Ip4gBIDhPbeacc6yxS9LmfxM/A4sgcB2z1sq7LFQNiLwcEAmMMIKFEyXT+sVZaaYxYuzAz5lNglbP7UtEF7NIuoe0i93WVwOtHquAynVAo+PHiZVb8OP2N2Vb84mKnSXcs1+uwhHNNMLbvtfuu2r+bBrvVq2MtTFMxsWUy/q5zF9b7+L+bak9nzG+75sISC5g7DcBco93M3spl7NA1Dzz2dPCTilqx+aIEv6fHH4A5ADIUKmYrBYC5YX8A5qLZljiHCzBHUdgjT40WsHSOsVG5rgtwKH7A2ScdExzWtaPr4xm9j52MLqA9IS0lfcAxrLs+3Q8KrtVezr4eCizPDz8xWrFsgeg9YnIwUcHyda50dye1BMyXYD8DnrpIsftxaqFKTN2lu30Ac5nYX/m6l1yPA0gMn4AxvvisIX87XEkHzAGup2Uydp2riAQfiX2VQmLshaoRsde/DZTHCws+WmRriXg7BSUuwe+vsNOOwbViuyOvFUq2gDlAVEeL2ba5OhRAKpGt/CbA3CNax7BOAqh3sWoCuqJdetdO7WWvHBtsJ92bL4FZlPg37bgB86L7XeICzGWqf13nK6z3yR2Qv7tNtjtdKJIlG8AcdhRsyACWAVmmssmTzxP/HY9APALxCMQjEI9APALxCBSHESjVgDmcV0ATtIWAoSRMvroGHqrpgxVspc1Mvp2dxHNzPZ+vXmtVS9eqnZpPBT3f3weHRSxhzVVV6Rtkh3b69VnvKMj4lgUsVqz6LPhODj7AGF9GqcRrj/qdRACU5LCZUX1JpTnXDKCG4Fw6iQFz60emgGEuGjAHEJKE0i13P+zN6kbLtXPkxDN3cmHMW3+lBb8t+3SV0arDTDF+yvTkt1P+XZwBc6xN1gZU/DcOfdAYD2npWJKE9ddHVV0wURLciBLulyTpFTcJCLDGr3Uz4AqquQkKcY5tt9mm0Nlhku8JoNz369YFKz9bbeCG5PdT/U0gppyqmXdUS6AdFDROp6FiwFyq0XO/RnuRR54aY4Bwn7ZqtINs2WwvA7EA8M2nEPCkvRdV6rRhcMnGCphbseoLa4FKcoM9xgWYo41D7ZrVDBwKo2K+5Xq1ZL1X7LP/EWOwq515aQLMkRxHzzYR0yLArkykQI8VtGfeSgkngpS0c6P1GXs/7dkJjubC6GX7pGxsgHKwOJPAWuvBjpHJfWzozxKEJzFHgoMilCgblfudKADAldonlyxbEfznW7cOIdkHMJEgP0xzJFlJLhWlsIYIfAOM/Vpryke4ZtsntUcyh9K164oBcz6j6feZogDMPSKg9DNqxw1IytXyENuJgigAcxTi7CTGtHwKLcbxJ14S6J4EuUvQ/VUEvrzx8rOtJafr8/l+n+QfjHgwvfq0s6XoBMYFCiqwNYqrlDbAHMy5Dz3xrD0nfFeXsFdSAHiG5vjeal2aTyEGxBynsIR57gJdAFIlCX7h6cfbmku8lkznHzZbG+15ZwgwXlf+Yj4FhkVAgNwTgFeXoEtovUh7WJcNyT6MLT9ddgfgR+yO779fZ+wwgDPyLRRS0aa1m+JZB4hZHSAmzyB5D2T8Yf+Z67H+832NuRyP+ziqX8/gjusuNlB8LsfK9bvZMszFgLm/jjytpoeNGCUmNjFRebSdBlSzi/QBbPjEbfMpxJ8nmI5jL5+q9ovRDHr4JDCW9VGnlxsvO+cvgDkY6rivN9+Zp0JwNzsoYGfYs7FP6IqQT/lCuvtStWQd9/J0A/y4Yuk+gDkK1UYJnPWu/G6X/ZXPe8nlWMT8+Xe+2OUuO+fkvx2qpAPmYEIbOWac2cSuuYufxL56YLt9LYbF/MunQH4Q2sT43S4BrARb442XnW0tjsPPZwuYg7X3pGMOty4QNavvHB4u458A5ljHdN1hv3T50tj26Cd8aArc8KPzKU8/P95ig2tVJLTOg1DCBZhD/z6o+zMmQA/9m897yeVYrGPYlO+4/mIjVkg+VlaAOT27k44+POghwCYgy2wLM5OvJf47HoF4BOIRiEcgHoF4BOIR2NAjUKoBc4qpCST3uxjmngguk1P7y6+/OhO/DHgLsW9Br99PjAc4PhtKCPpRhTJ1xpvGnkJyxEcI1p18TH+rePVtPwC7zn+++TagdRZMP7MUdCABsUYsGd8q0JhvIQC3TdmtLTFLMpIAKEYyCeB0EgPm1o+MD2COQDuBsBtuf8Dahq7/dvrfdlVLMBwXWA18wZbpj7b+nUWfLLPA+DgFxXyqyflmcQbMURFL1foYAQAZ32+++84AdOvvuPj/RoCfdnu9unaydmeuK2b9PfjYMxaoW7o8up0Ux6LCnkBM/0O7BJ3atzbmBZIIRSkwHc5d8FEwXhW/voyAsC3t36q5QIV7WmtnAgapJAbMpRoV92skzUaMGiuGmPlerbZhI22m5ORFZ5wQNNq9gfsEnp+ginTW23ODcy+/yVpZuIBXHHZjBcyR7IcxbJISnW+9M192U3QSkr2dZMCQo/opmNrf84m4P4aNhB13xY13BncOGxHQFtPF+lCaAHOA3GBIgP2jiwLFmQqaDFsMXc1+C0AOhjDYXGgtFiY6Mj1u+HmAlOyTABAAzK3UvPFhXQu/Xxx+0h6kaaPdg+4HdwgGaZxhgIsSqvof0D5Jm6uPZfe4hLEHNMc+eUjHtrZP5jvY77qG5Pdh+gSMREEAyQofoaK/XevmFkSHyQWAQyqJAXOpRiW714oCMAdD2mgBP0hMu5KD6BMYKgHMXXzmENNV2d1p6m/BPDFq7ITg1dffNHBT6k+tfxVWmuo7V7YWy53bt1n/RiH9BvvkSDFVzHr7Pfnan0WeFd0Ly+e+Kg64QLYOSf3iKqUNMEebQoCNb+o5LV663Dns+AiwhDLHaf+YT6E4YdQLE4JJr7weTFVBI3ZqlLB3b6uWb+eICerkY/9qa2U6//ARKU7hvogN5FPYG1m7MM7T+tYlgPrZY64471S1B+3m+rgVRMHySjyLvZi2zSs++3yDgEw22YTCgy0snkUhLXYufkqyv0txEG0wuZ5PVdBYEgQ9tJlswyMFmIsZ5oKgtDDMWUtkscVOFbujT0tkimmY07D89u1xcF6nLrH3UQLh04Fi6ozZKtKILj4FdLStrofWj9eIcRLdEApAWXxTdAo2rEso7qhRvYriCUNUCL+f6+Pe7+Obrvp8TXDuFTdZvOlnsTUTZ48SH8Ac7WYBEb357nslpvgIHxP/EoY5WmonS0kHzD0jACN72ZvS68TdowR/j331gLYtbV/NZ2E6531PbO6spSmyYVgLLimIzZSXTXzaX4CwzNSvBA6je8rlNwwNYK7zkdbKHZxy7BEWm8ul3SyFAbRBpcie/dJVuGWgdcWYYCm7+MwTrH20z/X6fAab63HFKIkN0vUJgK9LXIA5CDAeHfm8kVS8ozbyJUVYxwUMcxfmjWEOm/UUMcDCcFh3lxqKh2TXyaCkjGF8nfEIxCMQj0A8AvEIxCNQekagVAPmwsd0/6NPB9fceo8F2GAgckm9XWoJUFQASst3EDHx3DjXVAhBrf30c+O8A2ztWu1jLVx2U4CzkgLumQjVIavEvkRbVphjYGLCAYQin2o2nJZf/+sHLHSdl8TjZmKaO6hDm+CIw7oGeyrgHMXYFwPm1o+oD2AOxrOPPl5irGAwB/oIjAZH9OoW7K/EJ6CUfAmBqyeVKJqqxJaPE895izNg7juBSEkS0UZi2IhnnaxG+RrHfB4Hp7RyxZ2UfOhuIBYS3Ix5OiFxTgKdatnZc+en+9hfXidhQOCAaspeCm5WEJNOFCj2L1/O8x8EMAki8cxIiC1e4k6EcQmAjgH0tteaoO1SDJjL74NBr498brwxqfokJ8uWKaNgXH1LejcT4JNgbK7C3PhRe9/rb74TXHD1rdKbnwRUuLpkYwXMfb56jVpUjAsmKolLhayrvTG6pawA8rQIO+vEo421LN06co154vsELmmPceXNdwf3PvKUseHyLKOkNAHmtldinAr1M4YcqX27a9RtF8l7tPSETQl2nGGPj3IGvYvkIh0nJZhbqUJ57V+dzK6G5SaKAY5EJIBs9krYO3wEYBEsy9hdnAdQDwn4ohDWz0yBCygGYJ983zNRAgPJ0f17inm7tbELxIC5Df/0igIw9+wLk4LnJ0xRK7fZzuQgezMgtc4qmLhaiW10VT7263BkZ7z5tvTKswa29wGn0lZul5rVLWlLwrKwBSatZ198WWP3VvCJGChdAnAWvXCVkpl77Fovr2PnOncm75c2wBw2zTNKgE/THPcprCHpDDMGc5yEMUnxfAktYR/SHEcXk1h12TesN3QxrIS0+U6UTOcfx8J/47723K2+2W2Jx8vld2ICD8kmeG3WnOC9Dz52Hoq1UEFAA5jzKFb1EVjgKe74+JOlZofAFrjqi9XWSpr4CPEsYn7/9UiAu86HPbu54lmsV4D1jFsyKyTss88KzIBd4NPqF13JPlpOoH2OXRTCNQBG6HnIgcGFZxyf17mdzf3EDHMrrQjFNXaulqzslwB9YGCc6cnOij644rxTgkF9VDgilrd8+HDcB8Vp6DhiM+g44mtRwpqoUL6cWsN2sr08EZhKnIr2iQAB8T1cAlMdRUfouB4qimG+5+O+KN5aKn1zwVW3GIMm7HLRnmlgRWUwynbrvP9f2L4S7wFgFuMEe+YX6vbgEu6H8dlBwMBN9cyKQtgPYT4dPKBXcKIKspOlpAPmxo5/JXhu/GSzF2DljhLA1WW0jognXn3hGRZfdBVhRR0v+T3AZfjas+bAsLgk+e2//Q37P8C2S9Qq9+AD2/75fvaAuSbGQom9gB+brQCYI8Y0Vr4G9hjMblHCHEM/AZhjLe8oW5+5n6uwbn/86RcVUDxvYGnAvS6WX87pAswtXLxUeb3xwWTFP31a56KTYLengI/7LCphHdevU1N2WOqC5WwY5oixnHrcQAPM1akNYG49ALqo7jM+bzwC8QjEIxCPQDwC8QjEI+AzAhsFYI5qtJvvesicz2+/+945LrRPatKoYXDJ2SdaYsj5hSw/gFH+nBxjqu6owqUVoo8ATrn07JOM/Q5nKBMhGEs1zW+//2asMZ+pQm6xAvtzBJAhkEEw4hu1mcIozofI/g/q1Kph43lM/8OM2SzdcWPA3PqR8QHMUQkFKOjCa24LGDsfqVa1ctClYztLcNGCLF8CSO7e4U8ZK8XHi5d5HbY4A+YIVD2rFqUTXnnN2C0IzJc0MQdcAYVj+h8anHfq4GB76YooRxVGjPfeXxg8MOIZa6Xjc78ELEgm7deyaXDmkKOsDTNJy8KWAr32uzH/DH3gsQDmge89aPW5ThJhVFW3UquBykr4pgumxgxz2T1VAr9UkaKjfAJ8zCmA6jdfca7YV/bOSyKJvZY1DbD46lvu9WIV4W43VsDc6rVfBmNeor32a/bcXAlH1gz/zjzxKEt4AprdNA9JZdYwa/mmO4cFtAr0kRgw5zNK+fkM9jQAG8BjsEBhk5Q0CfdJmFgvP/dkaytZpkz6CujPv1gTzNM+iV/xrJggfIRzsE/iV7BP7iXmzJ12zG/7Sp/rCO1/Eo7X33G/fA4xTHv4RBx758oVtU8eb8wJ7JPp1nfMMOfzJPw+UxSAOVoo0jYNsLQrOchdsF/jk950+blBDSXm8gn8AAB0y90PBx8p+cS6cwmMbbvV30XA12OCtvs2c3087+8DwHpJLHP4DSTMXAIoYe89dg1uvvL8oLGKNaKAuq5jbcj3SxtgjqTzC2IDY47PE2OLS9DftWrsbHOcRDj2Da/lQ5gnzHGAZTB7uwBzxHxqVKsSnCCWs0EqhkqUTOcfCdpGu+8a3HTFOdqbdheIOzVraOI5fH+H1Yr7miNmaRhNXcJ+WEMJePZHwFu+gm3/22+/G/MwHRNgdiOexfnnzv9I9uOXToCO77l44ux97N8UHgLASRRAPC9MfEVFsDMNxJf4XqrfWe/ldthex6sv/3ybVB/Z4K+hvzcXQKn1Po2DvgIq8ndRSgyYyw9gboniui8otgzDO2Bcl6DP+Hf9JWcFgwf2EpBzi7wBaCmsQReMFph8qQDCP6oIKkooWsGWOLTLgQVFLGLDDoWuGsQTXp460+KN4evpfnJPAFBuUjyBbidbCJiajznOPAVsTTGXb8GyD8McLJUwhcLu7tPWnWKcKrJ79tqjgTGHpxuHDfk6YCZ0CKy+xJeTpaQD5gBCvzRpmuyF14JlK1Yl397f/mZ+ERPFJoZRK11x0d++6PECc+2Wux8Ru+LHzvnB4WCArle7pq2jA9u1+vMMxQEwR9EWc30K+S/t0y5hXGnJylqGxQ+gd67y8y+/GkAZZu2rtJZdHQzC87kAc3Q7eV5AS+YMdoFLDHSqtbxnw3pBbdmaRSWs42pVKpstkOo6YsBcUT2Z+LzxCMQjEI9APALxCMQjUBQjsFEA5sZOeMWq2977YKGXg0GrKhLlJ4t2unWLxkF1gYzy4WAnPmCqWEgu3ifWlOfGTVE1+qfOZCPXwLUdoiqhK84/xarVcmWp4BoAyH0mljkAezgtBAK+EuMcFT8wzwEUoTUPNPq/qFUlFTmZCJXDMMudc9LRVuEEaAfnIFliwNz6EfEBzNH6bPmKz4JzLr8xwKH3qYoqr6B0M7WUgRq7/2Fd1p8wy99wun/XfKCimpZssJUAcPCR4gqYg3mKYOPt9w0XCOAtC/D5sFH53HNRfIaqwoG9uykxt1tQrWqltJdA1S9r/fb7H1WLoInBd9+tc1LTE4xkLeNYt913H2vNCgPm5mr7l2qNpz15jm/QbvoTtbQeoaDH4/pHgNYH5Ig+3UsJS4ASJCwBNaSTGDCXbmSiX6e123i1aibgyB7sEuYUrdBpyUrQkSBzrvsvQbD5OjeBufuGj/Rmc91YAXNrv/raksmAJwD0+7bZBBSP3VSlUvkgH20nPxNIgrb1DwrES5LER2LAnM8o5f4Z7A32evZJgJUwqvjOk9zPnv8jsG/BNMo+gP5JJz/IZsY2fuCxp62lDPvmLwq6uwR7p2qlikFbnaeT2rrRxpxq7nTAM9fxsnmfa4X5ChYLrv8nVdb7tJ/Bz6hTu7rtk/u1bGb7JHo6lcSAuVSjkt1rRQGYA/iKPzFGbVlhbPKRVmLdOl9FGbTyqaj9OldBtzAv8d2vvuWegH3ApwACOxSW4BOPOtxaTeZ6HZl+HyDWy2L0Yezme7BqsYYA+F2iArgWTRoFFOulWVaZXkpeP1/aAHPzFnwkNuo3rPXwbLWc95Gdq1S0wiMKzYhn5ENvs18C2GOOW1sy+TEugYWN7gJHqYVm7+6d//LxTOcfX6Zg6PzTBltxCveVTq//5UQRfwD4Y+3OEJsz90U3g2++dReqcu5d69UOTjiyr1onto04Q/q3AOJ8rz3u8zVrgy9WF8Szwu4J6+NZX6vVoWJasnHZtwFzZCIwt+9Ybnu7Tp7B1ltv+WfinvZ2tKcnUe7D3IpfU6v6zmqH2sNijjCxFLbwvPHVARvX26Vmzs8/1+uPAXP5AcwBHH1FRWpjJ7xqAHjf53LOyccEA3p3NR2Xj9Z5sJNTlHH1LXcbYO5r6ThXbIY4DDquuwCp7OX4dKEAMAaQDtAGFlwfgWXugtOOCw7t2tF801zj5pxzkYqVadF5z8NPGijY5zp8AHMwUk2UPwXLL/Fdl8BeBuj5yH7draVuPkHPrnOH7/9D18B1AORGnyVLSQfMAWanE9Bo2XW+nSsAVFGoTEEEujVXYV8l/wIAi30V4J6LqZFz8kwaiQ1uiJj/EotIigNgjoIFYoMUL2Dj+0j7Ni1sLdcTCxr2cq7yreLc2CgUyLOWAd/7iAswx9ql4CcELbuOafuw1tAAxeopQELn5cPGdJ03+X1sAQDG2AKpYngxYC55xOK/4xGIRyAegXgE4hGIR6A0j8BGAZiD7pnA+8vTZgQfLPzE63kSlOxxUAe1H2oVkEij2g6q7XzJWgHQln66Mrjj/scsmEGViwvwRHKt4k7lgm4HtTeaekBVG0IIaFCpu0zXh1NG0mSJKp8JwACuI3Hxww8/WZsL32ocKOMvPnOIUexTGZSqbWMMmFv/NH0AczjQq8QQeK4AczidtAigzW+U4AjhQFNpCXU+1ZZUKGYrOJfffv+9VXted9u9mi+fGVjJ53jFFTCHQwi45vIb7zTae9pJuKr+0Q04mv/cRP82cFsE1hyJFtc1hc8AQNj+rfcJDj2ko4EBwteTf3I8/t390BPBk6NfUkBwmQKd0a0zwmMAPCunlhBUqA/o0830VNmyZQzolD+tGZ5t/U+uF/AxwctXBIZiHbzqUc3HEQhQoItgMbvozBOM1Wz9kf/+WwyY+/uY+LxCQnDajNlqyzrOu10zybMj1faItmqNxcqUC/NKOEcIzk3Q/CDQDgDcRzZWwBwAVBKeBDLRBawxHwHM37fHwdauAnBuLslX9rIPFi429l3sN9rz+UgMmPMZpdw/Q3IaG/byG4Za2ycf+4P5QBCYPXJDA6qxTUjcu+zqcCRgtWzXqpntk/vus3f48t9+hvvkcDHMwXq46JPlAe3ffAS7t9x225kNf9ygPtYqartttjHfIpe14jo3VuGv8jFg+iGJCsCRpKOv0NKqUcMGtk/uo4KLKIkBc1Gjk9l7RQGYgy0a0Bzze9Eny7wumHai/Q87JGgjMCXsS7nOZfYbfFASlNf9+z6BYH7wYn7gOmiZ2V/tqwG+FrYAkpmhdsfDnxrjbesA8jt2QG+z0WlzlevYoZ/wYfBbACbwd5RwPnTxVltuISBs6lZQpQ0wR7tCwGXMcV/QBcVmFJkBdG7euFGwhRKaufg2PJc1X35lPiZznBamgEtcQhs0zt+nx0EClu33l49nM/84HvfVYb+WQXOBNnNtH0dB5Wrd1yvTZ9naJYb0v/+5iyzr1Kpu5+daiLnlSwAVUAC69I9Y1nL9JJ4Fmx/xLN5fp/XC/ugz/uF1nXxM/2CIWP4ACIeFVvg6r4lRm6ItOjb4yG716wSXnSPAbNO9jN1Wy3Gjlhgwlx/AHIzqs9+db62nR6r1oa/QDhnmRHRBAYA7twmJL7l85efB9bffZ0VzrDHXnlQQ/9wz6NpxfzE5dv1LvBifA9+Ultqw5/kIdvcAMULCTtWs8R7B9jnEzrl2/r2uawBERUtJHzZZrtMHMDdLBYbEwmG8wv7zEYpY0CEN6tay1qw+3ynMz5R0wBytf2F2G/7UcxaT8Bk7ngV7GYXEMIvnatfB8s+++oIAsNfffr8VN/vkXwCe4s9iEzdP8N2wCr9S4Rcgb3z4BbJdfaR18zy1ZNUaIqYDqOzJ0eMCOpz4SOM9Gxqgt5UYURs2qJPTuLKO0ZOwVcKqTZzJN17gAswBAGRsR4uBnwJ0XyEWfaSA+OXL7fAXoLDv9zf052LA3IYe4fj48QjEIxCPQDwC8QjEI1CcRmCjAMzhzMJy87CSWzMVzPYRACAEEkkAExSjzSCv5UMw0nH4MaKpXPrw4yVmpPN6lBCUgyWqS8e2wVFiwgD8tCGEJOOvSlj8+OPPRp3/408/WQCeKsFPV31hYKIZb71j7XoIbvkISWwCjD3EbFa/Tq0/A4yJ340Bc+tHwwcwx6dheMF5Hq+WaCs/X+2s3ASkRrUltPnHDext1YDllQzNVn5UoBmnkATsSLX5InHsW6FVXAFzC5VMman1effDT1iVuM/9UBlOy1MqsrYRUGxDSkEA8jNv0BygsLqqFjtb7RKh5I8K3KCDCPjTDgL9RFLBRwBAQI/fUAwjBGW6dt7fEuxbS2flmoCJOj/AQdg5uWYD+S1dLvYAPzAU7Yhat2giUNa+lnyqViU9+x7XEAPmop5E+vdImL23YGFwl4CYU2e8mf6DCe+whho1rG8B7qPFWgbgOmreJnz1b7+yn7F33fnACKvaXvapP6h3YwXMAXpZqNbaMOXQ4vgngbF9hKQnweFjB/QyxhwYNLJ5biHDEK3q7xz2eLBw0RLvCuQYMOfzpHL/DCysc+YtEMD6SQMf+DAP05qG1mPYN6mql3O/qvVHIBEOQOxnsY3+/nu0bc23dlKAGpv/jCFHCjR3oHPezppTkNhinwTY6SOsB/ZJqreb7r170E0JPABoMHlgD20owYZZsvxTA2c8OfpFY20kUeArJE/bt2luhRZce5TEgLmo0cnsvaIAzNGGFX/5hjse9GpXyR3RTpHEHAlCEu4AsHJhhcXWguWOVs8A3GGCcvnHXAdAG2zPDm1aGmMTrxWmsKZo03bj0Ae92uBxbRSaoAO6q0APZp9cxo3jATaENQ2dtELPEl8+SmjBtMP225q9TuI9VWFgaQPMwTYGcApWdJKkPoLfzBw/+MD95Dv3CbbTPpaLb4ONAxsZvjNznPZ7PnOcJDFzBb8lMQHOPWQz/7aVrb2rWA5hnR+s+8q1ZeFPAtKzdile4r6IEfncV5O9GgbdO+u+BNxrJNBtvgSwBj5AQTzrJ4tpkfRlnyKehW1JXBBdS4cFn2vl2mCCoaiHeBaMc8iKP+Jjt907PJj+hh/7Ve0a1YLjBvU23QXgNxt72U5eSv6LAXP5AcxR2LxCRc4PPTHafDjf6QHjJGAU5iRFJLmyHAFMRh+gC94ToNTHFq+w047Sce0FTm4jfdDCmJjD6ye+Q0wf1nFiPj7CPTTctW5AO+3BAqdXk52fre7GzyHuBNiaf7SY/c833/lchhdgDr9q5ux3A3QIbTd9hOIACnDYD9BHxU1KOmCOvRkANPYCIFQfIWeEvdBHBYQDVUTMHMzFtvuPwN3YC2YTT5xqORmfvQqAWzdsYu2rrO1Qihowx7WzjmkjP1SxOQq0fQTQKUzW7L891bKZdZztnsk6xl4fqhgTOTnWss+Ycp0uwBysusSmH5WOuFedpHzlmCMOC3p36xxgC2CXFzeJAXPF7YnE1xOPQDwC8QjEIxCPQDwCG3IENgrAHO3FcEJvunOYVaRRleMyigkak9jaV4GDQX27W/IeqvF/io0LoEW2AsCIdhEEDx5VtdIqVaEAgPERAutUyHXu0MYom9MB+KjmXb5ylQUqf7UK83QU02L7UJJuG2Mdq6qk3ZaRl4FzQUD2AzkYswSSIiA4+90F1tLCVZVDhd8RSqZ00fXTeoaEabLEgLn1I+ILmPv6P98GwxQ4Gjd5ekAVHK0ofQRQw0Ed9jMGRZhDADxl6nQyHwgwP/7si+bIE1AgWO4rxRUwR8UbSQzYlXyr3qDcb7rX7ta+ORcAos/YoctYe7ATfCfmDZcA5iu3w/bBhWccbwmJbbcpE8kqSML07XnvB3cpiMAzpW2zS1+G18C8pR0jc4ugK4kY9JZLt4Tfz+QngIjPV6+1gAuMKOgPXvO5VvQ7zIqDlPDo3rm9sZEAeIySGDAXNTrp3yOYTHuCa267R1Wc07zA4Ztttmmwg55Hx/1bByeJCbOa2qJnGzwCSPyhmGXvG/6UJbEBf7n2q/BuNlbAHAlG5jttKq5S+491Sr74tKUGxF+lUgWxl/YzcG6VihWCzcVimqkYe5mCl+hg2mQQLPZpe8l5YsBcpqOd3eenC6QMwwPP6ONPlnodBFBaUyXGa8qWJvC9IWXlZ6sFpJ5thQQAZl2CjVpGwDXa6AD82Ub7JD5AOmHvgdEHIDCV8t+v+9FbrwAIhrGos/ZJ2k7TlhE2j5CpJt05s3md/Zu1DEMC7Kvsk1/KJ/JJXGITkpCAzeiwrp1sn4RtLkpiwFzU6GT2Hm3mZyh5CkPM5GkznV/mee0oW+/26y4yAEymNj0nYL7QOv0KMSy/+fZ7xlTmsqkAwjKn+/Y8KBjUp3uws4oPUvl4zhvQB0j0wxj88BPPys6dY4k0n6IVjk2iifaGsDZRKFLYwr6F/XzFTXeaT2asmwJGRQnt4bjWbrJDaWdeTkkyCk2yERLT7JVPjHrRfAR0oAswh86hBTVtoGCHTZXULW2AOWzAb5XQ5DnBfAwbn2uOAQJljtPWjCJKgMPZzjH8dPYkwCwvTHzFWEp9Wg4zJ1o22ysYcmQ/MS+rdbjAVomSzfyDvZn7Iq7EfQEaZ5/ORohvESN6SGsXkMwiFZ/5FlsAAByidqwAW6rL3k8U1hT7LXZplO3OM2I9waKP/xAl6DSulRblxLMoXoUJm3P4gP9Zr726dbJ4FozYiIH05etcefPd1jqPY7l0J3OIQraDxBbY5cB2WcViwvuk5pZrJ+bp67cTh4FZEh99Q/jp4bX5/owBc/kBzDH31skmBVh2450PWgcKVytUntE2ZcoE9evWki7oa+z77OWp9gTX8+T86LQXBEh+7Jmxpgso8PUR1v8JOj/tFxtqL2ddhwJgg/lNd5b7h4+0+e7qrIEdtJ2KWdGZtKbcWy1Ms22T+bWKgmGofEQssthlxDx9mSl9GOaIl8xd8GFwlXQI4CzG0SXsA+zfB6vAH1Ag95uN7cd50FfEdhlj9il0ikvw8bEj0B+p8hIlHTD3g2zijwTuwl7AJsWuc+l1xoS1RBvgY/ofajaxa09KN87ocpgVsYlfff0ts4l91jLHA1x/rEBY2MSJc77IAXO6NvxQCs9oMTtf4FCffZd4NsV2A+Vn4KcTb8qGPILnR0E44NS7FWOiwCST/IULMEfXKHJ7w0aMCm6+6yFvuwJgYyfFPSE1wBbLdh3z7LlHWHS/hNhCv3ssZVvHrGUKRFIV8cWAOUY2lngE4hGIRyAegXgE4hHYWEZgowDM4axR7XHxtf8ORo2dGPz4808yzKOD2EwADNUqAsLgaPQ79BAFtNqYEZlteziM15Wff2EtxsYL4ESlENcRFQBMnIg427C0EVyD7psWrakE527k8+Mt2UG7zN/StcPQ/eHg1qtdIzhcjkdykDL52Fw/SXOcRQIhOBkPPvaMAnM/mIOd/PnEv3EeDz6grY1hZ1UNpgrIxoC59SPmC5gjQAv4cpyAKLTY8g2605Kgkub22Scdbe3BttziXxaoXX8F7t8A6y1UovwmsSlMVaD5J60rnyRseOTiCpi7T0E4kigffLTYkk/h9Ub9BCx30jGHB3uopdLOlaOZyqKO4/Pe7Hffs4TEuwKzEex3CXoMhq6TpDt6HHRAUK9ODa379Ak5ghaA8u4QsxQJ9iWqkvPRl1wHgVX0I4wjTTQmRxzWNdhzt3oCPOV/TBYuWmrMRiNGjTWAH468ry7lGrfUmFx2zsmW5N22bFnp000jhzIGzEUOT9o3AaiTnDz/ypuDp9VGhWCs6zlpygqYvokBNA7t0jFoq1aJAHuzkamaw6NffNkAI1S0so/5Sl3tjW1aNBVYpGNkmygSlbS3GDpsRHD7fY96HR6Q8qmDByrJ3ymoo/NkE/QLT/TG7LnBbfc8ItD0B9Y6PXw93c/6dWpayx3a3VCBnCy21+u5jVI7iQuuusXA9z5tWdE1BNtCYD9sIdkAJ0isPD9+igGyJk+faXPG97nFgLnkp7lh/h6hJNijTz9vgWZfVk8YK2gFv4/aItXYueqGubA/jvr+Rx8bIAG2LNa9S5i7/AM00Fct6+tpjUSx4LEnUjDAPglwkGpydJuPANjeRO3bAc3tbu0suxjgvqYS/VxDPgUbAVuBfRJg3w9ijvZJTHAN2GiAoc45+RhjjQRYwd9REgPmokYns/d4djA4PyUmFQo5fARmi9uvuSjoccgBWbEvoGcBrAFwh/1h9ZqvPPbrgrXTSu1QO6tlJQUT2TKdABLkninYmP/Bx97JaNYNuuUsMSmTmMS+K2xh7L5Wkuza2+61/YuxcyW8uW5AqfsLHEBik7asAH6yERKrsApxflrQk7Rz2Vo7qKClmbFdtlfbri4pwRGlDTDHcwLkcM2t92ptvfj/7L0HnFzVla+7ESiBAkkBJCSCCCIKEDkIMMlgTLYBAwbnNA4z12/sCff5N/Pu83jG9tgGh3HCGBNsTA4mmpwRIKIEAoRAgayIEAL5rm+XDmq3u6tOdVUn6dt2063uqjrnfDuv/V9rhXDq9Syaq8Wc/Q22F2wYCMxwCupIIV0XaWFp48wdZdIUch3aCoep//aNL+c2wnjcsnSs/fG5fXIq5eK5cKjrSKHtIbbluW6/Z3J2puSeahWei3Uwz7VhiPVYQ7Ysl197Ux7/5i1YFHPsOy3/9Fc/0+eZU085/qjIxrDtX/2trX8wh7+z7J2wm7yVnUO+HdH6F4SQsYzIj2iWRBuE2bjNxuaPx/5BlP3//R8/zP2ffU+ttHnY4DbdZFQW3xHdFpF+RwRK3ADX59D/zpjnEVgs/0vYOWvghzXOwDglbTJqZNPXH21xr/Y7BXPNEczR7/giwtH3fnJOFodiL6xVcAjH6bMYCxjnqjmOtPd5CDWI2kh6YkQj2OHLOF0xFrDv/rd//HKkeJ+YMzfwu6Iwn7F+JYrrDyIKG321jGCNPrVpCFBw2j4sbOiI8TpSSF3J3pQ0jvdNfiQzLjHE5UuVEczxfOxX/vU/fpTuDHHWvBiPao2h6w4dnMcQMs8QoYr1RMfHkOWx358fqeUnp7khUq4lRuTBiBpejCFtOUL1dsEc/IlWzroKxxXWC7X2ULRZvvacuFM6YsV6gQwcHSkIuxBPEm3/oUefWDGv1v4krv/JyNDwT1/9TO5Ha8dcUxSmhe5Mycp9MDc+Eo7+2JgejOALb5Vw+OeZ2D/Tfz8UAnNsTJtvuknxWKW/08/oxzjxVuq0fHRZLlJLMEebYT5GsPztH/xP7sdlBHmcwxFxmrXAjmE772iEz+L6rMfuDxsI/bjWOMJzbTZ2dNpszOg4xxjZpuOOgjkoWSQgAQlIQAISWF0IrBaCORaJLFR/EQvXK2OBPOWJaaWFRRizML6TLmL3XXbKB/jjwhsbr9CWXm/VGgzeyxgqHnlsat7s3B/pUp565tksSqn2vpZ/Q+DEAcRXP/fxbKQl2k57G2JEU6SfZRPw/MxZEZllacuP+qufMYRwiJkX53Fwx/PWKhWDxfIc+QVx0dyImIehvlrhOocdFF60YXz5UHjOEE2jdVEwt5JIWcEcHoBEAUMwh1c3IrYyhfrAuHzIAXuHx/weaY9ddswecBg+ahXaF4IGUi1wAIuoig097aKe0tMEcwhC2AziDXZhRM3D8F0rqhF9EIMzxvNvfOUzYTgf0+kpWadGCpnrIt0NG328HcsUBKsfyoLVSZHmYr82BastPwejNf3x2rgOHsKMn2WMnXwGBg0O1TcaOSwf/iE43narLfL4hUEeXmXHzpb3RORMomdwKAMDovs8/tQz+YCGMagesSb3wbj32UhjcfCkfbJYrlbkUAVzLWuj/M/Mv6RD+s5Zv0iXXH1DjsCC6LpMYZ7daovNsmBuvzicJKUEgoD25j4+My6XDYqIWZjrb4vx6c933puvWzZ1SnFvq6tgrsLxL/mQ8ns/PicLo4nwUaYgRiViDQeWB4W3+87hzc/apU8IBKv1MeYPDKhPRJ9+8JHH042xfnk06g9DdT1zi4K5MrXU8dcgAGGe/BkOE3EQxjxZyxDMnMC4j5GZCG6IqDsipKznrmfEgTGRYpkny4qN+HzEPkR7QfhTRI5p77qse5knc0TamCffnD+/tLiczySqHc4jO0X6aSJpbLt1zJPjNk9jQzADr44459BXGOdII0RUBOZJvhgLX5o9p655ksMvBCIcxBFRh/upNvbyTArmoNCcQmQl6o3IEkRyLFNYw//bN76SU5qxdyWSbr2FqKznRgSVG2+5Kz0QYzEHnmUK0R6YMxn3OSgkddLgEPWUOfQhYjL7CFIj3RUpEu9/aEpEUX6jzGWzIxtpET97+keyaI40o21FRyj1YQ2+iP0R7HBkgh3RWcsUDpwRKnGYv1dEEaPv4ehSprA2J+IIzBiPGPOImMFYwHqoWqHOjo0oJOwLOIRsq3+vaoI5eMCMerrsmptivfFY6b0zAsPN41Bxv70mhjPFxNzGR0Zd0caZ56qVhYsWRRufldc3iJqI4Dj9+ReqveX9vzH20sY/FFGEvvnVz2ZhWFtOkx1tf9hkOCxlL7vvHrvk5+J6ZcYP5hsEWojT6b/3hW2gbHR29opc56PHfDB98yufTQPXHvA38x5pxHF6wZ61IJxA2yvc67D4rK997ozclhEIIB6pVtifsO5ElPcfP/h56awPONLQZ4gEw5hXFGx9RJ1l30y6N5xpqhXuGWeZg4L7qZGSmXUA4rWOFOYLHMmuCWdcrl3mgJxxBhHmzrFe33LzsTXbcEfuq573KJhrjmCuYE5qZJxbEKTMiDTUZQrzDmMBjqAH7btnFm6wpyuTVpJ5iLGAiFF33js5R5BC5FO2ECF35x3H57GA+ZC5vPWwSrtGiIIQjzEBcV6ZgsAYG91eu+2c+xu2qU3CmZMxotrYjTiKcRUBPxF/2U8g7C+7Jy7ujb7GuEGKTBwB2ytE5GcMYS6nH9cSZ2HfYwzBUY0ou6QCHRG2k44U1mCIbLApzwxbW5kxBKakz2Xsakvs39sFc3DE5s16Acd0+lIZB0Let9GI4Wnc5mNyP9p79wkJ0dy6Q4aUWp9ic6QvkTL8rvsfyjb3sm2OSH/Mq2eeclz66mc/nm2yLdfEPUEwR9tCHPq9sLnfEeuhGdGXy7Q3uBJ9DXsgERU5m6MvrxNrh7bWr7yewmcz19PGH4s2juj13lizYGMiOm49pZZgrvisy665Mf38t3/I/bhM3Q2OaK+0F5xZ9yf6fPTljtjLiWCMw9WtkX0FW1pZrgTkYF275eabhsPgXztk8EwK5oqa9bsEJCABCUhAAqsDgdVCMEdF4oF2R4hLSBPxx4hyM3suAovyAh+EZKPC44L0fXjRcxCAUYGFLF8YEogYw6IUQyyeq2xy+cIA9GIYsi4NsQAe+2yGy6bOLBohB86k4vhcpOLgsLFaYWP/mwsx2N+SDbO1xGx8NpHGeC68WzAcVDMe8Iyw47AUg8ULYYTBC65ayYK5MC7iafWhww5QMFfDiFpWMEe7xguQTfx3fvSLNCfaNRvCsoU0lByIYqyeOGGHSPGySTaQs7GmXSNwYGNNO0awxNesuS/nzeaVYRBmw8lheRkPz9b31NMEc0SG4JAZz9WLr7iu1AaTqGRElCP6JEKAtoSgrZ+70X9zsIhgjHQ+pISgP9baDFOXpH37wH57xRhyUs1Q7xiDiI6B0ezsiJpF6hrGlY4UDGiMl4Sa33Wn7UJsPCwbNjjsqYyfCGkqBks8BxG+4RnPGLOccTS+06YxaDw3Izwt43ALA8DTYWjhgKCewrjGOL3nrhPC0/jAdHAcEJb1+lQwVw/pv34twtPzLr4iXXndLflQrayBu/gUDPcYmvF253CKtAwcNFGXa0akJsIo5Hk3xifGIlIVPz716SzQwzN36tPPFR9V13eutTpGmCsgwY5om6xbEK/VUzDY74YAIA7jOJDj8HJACITyoUuutz5/tVZiTbRo8ZJoIzfn6DikpWHtVG9RMFcvsfpejyGWNcDZvzw//fr8S2rOPXw6RmxEyojzvxHzZEc8wuu7y5QjPTFPXhARLrjPsh7WiLtZCzNPIqquthbmIIhoA7eEIPfsX/4uG+Lnl4ji0dazIM7jegdP2ivmp52yU86gOARDKJHXYzHOMT8SDSjPk8z7MVe+t/y9PFcWew6iSpEyiqhyzJMIZ1ij11PyPBl1NmGH8ZV5MubuiTuVizqkYK4e0tVfS1osov0Ssen3l/+p+otX/JVDYSKRI+TYOvZ2pNZpfeBc64NwvCGFL6KvSyLKKE5ftdaYxWdyeDshBKCVg+EPZNEp4s9iT1HM16zzWNfRh5aFCPfJSJtOWsQbQqSHYJq/lb0mcw2R2UjRdFKkD+7OgjMA8yVCBdhRh2Weg37eL+ZHbAxE99llp23zWrl/8GT8bOsgkM/NAuaYN98Ise4foo0gjnglDnfLrNf5TEQypFCftPfuWdje1ni3KgrmaH+0cdrbJVdfHwKvcuKAom1xWLt7rEuPPuIDacdwNkSw0HJPEyN13sdkO1CsS98JIcmsyDBwR4hIbrrtniy6oI2XtUNhg0JYTb/+9GkfaTcicUfbX/FcRDXZa+KEEHUcnKOsDs59N+agFTaucImqPBf2gLh/2h8H0HdG38UegBNXPc+FrQOb1nEfOjR9OgSvbQncLowIm3+IrAmTH3kiR3Ut7rWt70SF/fqXPpHFI2NGj8przrbadPFe+hBtAXvZ9yMSF3Mne+xahcNsRPVEuWkpmEMgd3WIm6+NLBLXh10Gu0KZgh2Og2o+b6/dJuTUhoyZ1e69+NxcD7FnJ71dIZhFaFOr8Nnc+9996rQczYufy1yv1uc28ncFc+Xa38SwpRwQIsuTjzuyqg2DfdQtIZpgLpo85YnS4w11yJp9p+1iDRhjDqmoBw1aO6c8LsYC5g/6DmtQMpm8E3M5AnHm8VvuuC/s7XdFOsA3s/2wbJtgLKVvffq0E/N81N77WJtgU2c8LSvOLT4LMQg2BWxAOAuzRmLdUtij6APZ9sT6JL4QiWBrQlTL3A7TstGki2vyvaxgjvHn6utvzWuI6yJSbLWomi0/Hyc16gmndMbUASGaqiUGLN6P3YRnYi/DOI7oHufUWoW9yB4hWCKdPPY9hJaty6ogmGPfS91jE/3jVdeXTnkNC/oLczf7ymNiXsUmmtfEzKnY2lfYsP56TfxuFpMVa2JE6O9FHZWJ+Mc1C0cs1sOnfeRofvVXpScI5rgh1l3YmFiHsYYo+3y8lzM4zi0Q+TNGjRi2QQ4EUBmfYr/MmVwenyrncdghF731VhbyEl2O6HZlRcRcr2UpK5hD7MhYdf0td4Tj2vRSewEcnXDIwHmG52JPx96gmsNpcW+sZ7Dfsyb78x33JrJZYQeoVRjz+CJKN22GtUBbEfYVzNUi6d8lIAEJSEACEliVCKw2gjmMkkScwUvnv396bkR4ey6RurRsYQFORK6NI40lnnbbbjUu4ZlNBBw8WvHk2SA8j5eG8RBDNSmq8lcYy/Eg5uAOoRzCEwxqGFHrKSd8+PB0fBgUMaSx6a5WEJZccd2f88buhlvvqul9h0cfn4vn8olxnf79ESMgQmi7sOlYEJ7SP/7VBekX4TnD89bytsIYcdyRh+S0rHixEyWodcHQ9/OIWPfIE1Ozp1Hrv7f+Nwv63SO11+dDRFgrhQibMMKPs+H91n+elVMOtf68tv6N99KXP3N69l7CeNTRghiAlHmXhJc0KS5reR2XFczxXBjI2VT/98/OzWlEaedlC0LGIUMG5ZSZeDLtvH0c0kRkMNr1iPA4R3zAJp5DH6J4zZrzSngwP5ceIvVfeGUhqqIt17PJLe6NNvaDSB11xsnHZoNCdxtq6aN4/XMweXu0kzIFowfGPSInnhieneuGsb6zC/XNofxZv/hdeK79PtrSkhxdotp1YUub2idEt//8tc+l8VHXiI7aK4yXHLw/Hl7CRKrEaEdEwTIHf60/k4MeNt6k+WDsYtzcNIS5m0bYd363QUTLXDtSxK4dxr2BA/uHAHNpNmrMD69hhLgYqhg3MVhODe/5mbOi3a04EKwV2aj1vRDRh3H6mDjk+szpH833A5cyRcFcGUptvwaDKQY/jGLnRRpHhKn1FAxIRPFgjCJVIgbvPPdG+iWir9IuOZjioBgx/NSI4MqBEfM8Y9fCENB1pDDHrM6COQzmHA5UBPi31oVwQKwjiHZEuipE2UQKxFCMUZMoLesNHZwjRnJARn/moHLa9OfSUyGcePaFF/O6pSOHEgrm6qqmul+MIZb1Bul6ORAuUxCWT4p5EvEka8xaa9gyn1nrNaxJ50cKt3NCWP79n56T58ky7Yn0RohvmCcxitOe2lubMO4wHz8d6fVIHUwaRAQDHZkn2V9gGKd/MLaNjnmSSDOkR0EQtGGsmVvOk6zDMV7nOXLFXEk/mh1iRuZJDgL4NxFv6vWeR/jBvIzB/nNnIrAflefNWsz5u4K5MpTKvQZnp9difPxOpAokgnSZwriLcxV1d+LRh+e2w/61vUJbbd2+aVuzX34lUhb9OfYt5+SIhbX2eMXnc4hOH2KvvHVESyT9N2M/7ZpDPPZ+7BkWxrMxX8+Z+0p6PtoqovbHnpoWDkCvxfXm17WnQORDRIZJIapH5NmdhWebHU5MiBRgx7zGGFGrcBjGAd/GI0dkEdt+e+2adonorNuGo8v6Q4f+TXok6o06Ie0dYvZ7Hnw4UrI/ledP1u6suWoVBLlc45uRsovD7rYOyPiMVVEwBz8ifhBhhL05EZHq2U8wVmM7YT269bjNsrhj44gmU9nTrJujVlf675uxT34tj8fPPPtCbuMcEjM2cw98lSk4YxGBDOcDBNXsZdoqHW1/xWfRBuirxTPRf0eEk9Pw3HfXy2Jtngv7FjYBIjwx/xGxhXX9q2Fnqee5mFtOi76LQxVRKVuPRdwXaWuvCkEbkZ2wo1UrOGUQQerQA/bJ4x8OWqw/2isIRdgf/PHK69O3fxip02IurWWf4bMYX4nohD2rpUiEzyP6FKKTs0JEz76VOqlVaE/YEieF4AXh3J4hWmQMzRG2qrwZ1kTiRBgPJ5wnSflbJv0ma44J249P//oPn882tPb6f5XLN/1PCuaaK5iDJ1Gq/vtnv8mRkN/G/hz2nTIFQcrQiIa11RZj8zhH9GPWpNgGN4i2OiTmj4WLF+f1wSthd3whxjXmu8djHmdewj7IHFV2jOOeTjn+QzmdPGNcNQfUJ5+enu65/5EcaY4sG/WUQRHFiSwtReYD9vk4rDDusWfFBk42g9fCdsAzML5NXbE3fSlEz/Mi1XJZoXPL+yormGO9Pj3sxAj0sPNxVlGmzrj3UbF+QEi5X4izGEN4Tuy81Qr1wzj1RIwhOEITlR87CrbFagWxHKI87OT/8g9fyA7XCMdbl1VBMMfekfUC0TtZ1zF/s04uUyr218Gx72VNzFqBNcPmf7Umpg7eXxPH2ptoa0QIZ17FER771l+i35ZbLaQcfY01MWIy1natS08RzDHfPhw2JiLI/uq8i0u18+JZELhiG+RsBgcG2GJvoh9zJkd/IGUz+wn6MfYsItqR4emp2G9wbfbQHSllBXOsiZ4MWyT9mDMu1gK1xkPOywrB44GxHtgz1jPbj98y9e/bLzsJt3e/fC7tlH58d5x1sn97+tnns/28vfcUv69kHeqfoxefduLR+VyorXFDwVxBzO8SkIAEJCABCawOBFYbwRyViafYk+Hh8bNzL8ppIxDIdKRwGDA6okqx+a0I5tbPh1kcBCyNxWoWzMVGuxDNEe55RhgsiM5VZtPb8p7wfGPRj7fdSccemTcCRGqpVogGQCh8jGcXRQQqBAPVCikxuHeEAccfdWgaM2qjbPRF6MLhC8bGigfrsvCqWpw3jc8+/2I2MOKVywaglmAKI+bHTzo2Gxjx7GvLMKdgbmUtlRXMFe8gVcAFkUb01vCYf2hK+fQHvD/OaeKrTxhaKpt5hClZMDdsw+ydTd1m43gYxGbNfjkb/qc/90KHosoV98v3niSYi0fMESgR89wbaY2IXlarwG39ddeNdn1MTuO2a0RTwsDY2YVNMSJGUlLwxQECorJahYNMIn98/UufzEKAWunm+DxEYs+FeAUPwGtuuC0biWoZ0ardB+MJhwKbxBiDeKY4XGKcgx1pBBCXcuiOUXLeggW5qHurAABAAElEQVRpfnyfG22PQwAOmspEzmjrHjBaYYBl/DkmUlCdfPyRlcgdVGSJomCuBKR2XoJxGWPjnfc9lKM4IIzCkFVPIaIcYkfaLSkLirmX+TEsUDml1stxcMQB/LQwcBPNg0hzZQ6r2ruP1V0wVxz4nvWL89L5F1+Vhayk3KmnFIcTRLIhjQaHLevHegOhI6m8SOmJ8Zk10jPPzci/66gRk/tSMFdP7dT3WubJyVMeT7+NeZLUawiqaxWGV8b9Yv2Hc0Nb679an1Pv35kn+SI6DQfXzJNvRFurVZgncYr5+hc/maM1YIznd9UK7Zh5kvR+RL0gQkRH5ymug6Eagz9jXRbMrTgAeH+ejLlypWCuMkfOizUAzhKMs8yT9UbxLJ6PeZK+yTxJNGiidjEv12JQvF/BXEGi8e+MtcyT//bdH6ef/ebCHMWl1l6LQyTmxp1DOEaUqM1inUUkafZxRBthD8qhCgfZ7CU59OZwpqUAh36D8ANnCQ562Cuz/qqncOjD4SnrvC3jEIu1HtdB+PluRA7moL0QzCHufCn2FlyDa5cttEnuH6eVr0TaKQ4iETF1Z+H2OSB+MMZJ5k2i5dUS+bS+X7gh2uWQjKiTrJnZE1b64RrZlkD9McZwmE8KNRxaWKMyZ5cp9HMOGYlI8/kzT84iPX7XVlkVBXM8J20coeaPop7uum9yFnvU3f4Gr5MPwlkrYhNij4E9BdEx9hLqhHGZA/CZs+ZGG3+59EF7URc4ZrF+oo0jPOdAmH7eVmlG+2NvPnjQoJh/KimWR4YtYNiK50Ik0VIwN2Pm7MigMCfNDmc6eNZThsQ1dtx+65wyjvmG52qrDZLq9ebb7w171jX5wLvaNRjjNogxAOHsCWHPoo0jPMv2rJjH+sU4yBhKH124KATJUT/PhjgFexnRGYmYV0YMgwPrRyMSC/fN5xeF9sP69Z5I3fi9n/wmr5Fq2eCK9/LsjGH0/X322DWnj2QdMDDWT4zPfDHmcX+M4YUYACcGnBiJZsN4U6YNc61NNxmVox79/efPyILPsnN8cb+d8V3BXHMFc9huifrGGEdUJSLWlhGEtqxbIsttsN56WTiH7SYL5qKPDYmxD5EPghTmcpxoZ8QXY0EZwWbLa+DAyZjzd58+NTufjhwe0aKiv7ZXuCZz339FOsfb7rovxqQlpfpty89jnYszCmMrjpyMP6yV6GfzYk2P+JexG/Ef+1PW2GVFUi2vU/xcVjCHzYIxhOhQ3wtxFut5nrdMYd3FmEe0OcYQ7PmsufL4EbZ+vhP9tLIODKebEFBmlmHzfOjRpyKowOTSkbDYp7C+IyIuY8jwmCeYH1qXVUEwx5jK/IYz9Y9iTfzE1KfzmrX1s1b7N2tw5rwxYQehzdGPhoXTJ2tWsmowH7F3ZA/HegERI2viMvNRcV34048IgsB6gTS5w+MarQurbIII3PPgI+lb3zkrMjI80/olbf57vz0nRhCB0/IchT2n0cI6ljn4kojaR19m3CjjYNLyuth8WB+zVyZLDv04C+bC0QR7MvtzbEwvxTrl6Ti7YKwqYzNveY3WP5cVzPEs2Li+F9FrEaNybZ65VmF+ZrzA2XTv3XeJ/rxd3gewNyj6Mv2PtkG75OyRZ+KshrUA+4G74hwQ5/oy6wHaIesB0vceG8EtaEdtrcUUzNWqOf8uAQlIQAISkMCqRGC1EsyxQcAL7p4HHskHWxjfOlIwKr2fJimMlkX45yIlaxFWm00vXyxmOfgos2htfT948+0aG99T4tCKTSnhu2uFZSa9DdF1CIdPKh82X9UKi2LunagvGP7wFsNDl40wGw8ONzGwcNiIgeKBhx5LN0eoZwwIhZd0tc/nbxg+/v4LZ+QoeZuN3aRNYZGCuZUU6xXMUcek4Lrg0qtzOtGVn1T+JwQppP/BEI/BnHZdbJgwupFyYVn+Wtbh9tzybnqKYI4DBvomG/Z//95PsjdWmc00G0o2tP/v//pSOjwizHFIUi2SR8tnb8bP14anIwf0RE0sm3INY8JHIhLewQfsnfYNY1qtksevGLtIZY3xjj46s0SahvY+tzLWrJnHz3yAG22MsSenZA2ejG0cZuBJiUEPY0CRag7BMWNpPcajlvdB3ZB+9YufPCWPb+NCFFG075ava+9njDqkBeJg5bZ7Hqhp1OGQa/RGI9MnPnZ8GK5Ob+9jV5vfYzQlAsp/nf2rRKrPeg/gc9uJuZdxCqNwHqdoOzmdRaSNJi1MjFO0Ew6TuB5thXm/o2V1F8wV/Y9IlhwmPhuHLWXGxpa8WS9xiNk36iyvm4o+H/W2PNdZJeUya6TKYSVpyjpeawrmWtJv7s/UC1EimSc5ZC5ziMM8yVj4v//XFxORkpkn2/Jcbu6drvy0Sjqq6/M8yfq1TGEdzDx52EH75tQotNtqhbmKdfdtd1VS9tx8xz2loxi39bmVebIy1lX2GpX00y3nSdYs1EfuQyvmSsY+1mrvRl/injpSmCeJmPPFSOt5wL67x8HOpnWtaxTMdYR62++hjvn69+/+JP341xfkSIGsgWoV5kj6HAdlzGFEUhkaB0qs75eEQxUpV0mVxgHJtltvEdFIJmYxW8vP5bqkEGLth2MUYox6Cm242CtX5uuYB/LYH0KfFW03t9V4Hp4pt90Sz9byHjg4IurN4ZEaEdEXz9uekKjl+zr7Z9ghAiTiJKnNbgiG9RS4FYdi1Bmi80rUvhFZ8LMoDtNfj/pjT84BFvtzHOVYK5ddH3MNovkcHSlg9479PgeM7ZVVVTBHPRElragnUmeV5VewgiN7WeY0+h3jJ3ua2Fzkz6rsXyo2oKKdc916CofrpL0j2ufOERGQ61Urjba/St9do7LW5rlW9Nv8XEVK1hVr7fxMMd/Ql+t5Lq5B391/74mR/vzktNW49ucZ5u17w27341+fHxGPp1Z79Pw3xgCEtBtvNDzv9Yiygz2LCIDYVipiuYrYlM8jdRk2s1khUKglSC4u/slTT0if/NgJafOwZ2WnneIP8Z02RJRXHM34bA6vyxbGSsR9G4a4ePOYh4mqPSKEKIxtHGrzdw7HGcPnRCSih0PkQhQZBMeIkMuKDWi3hx64T7YrkvYNwVBPKArmmiuYY6h5Z9k76YaYi0gTftX1t5ZKOdyyLdBW6Putxzh+zxq02HuzhyvGuHrHUdbcjAefPeOjORMJYxxjRHuF9S2RO3/08/MiquLteW/Kvr+ewr4Um3rL5yLKK3ao9+1OMa6xtufZuGY9Y1zreykrmON98Ls/7CS/D6d3InrjFF2mwIwxAmENc/pWsX5HdIOQm/EDpwX2ZEuXLstrQMR4zO/s7Rn/WEuUEfRwL1yDSJsfDPvnUYcdFGcFg9q8xVVBMMeDUfdEKGO9cPWNt2bxXJsP3M4vi3mVPR2Cetp4XhMXKVnj87GzM5fmNsfPda6JsavSjw6JCKufO+OkLHpva03MCqQnCOZgit2OFOY/+vlv0zMhTp1b48yqNV64Mj71XStsTPH8LdcrzOekVy72Gtk2GNerd3xqfc2ygjmeD2E7/Zj1JVmuythNuF5eV4bIFcHrRjE+shYYO3rj9/syewKeB8cZ7NOIHnGeQWjJPEpfLvucROmmL7MW2COie7ZXFMy1R8bfS0ACEpCABCSwKhJYrQRzVCALSMRFGLBJvUh6TERfPa1geGUxTEj1E8KTlehVHHyUKWzq2QiRXhJPKKIDlFmgs9EgEgjX4cAMAR0bYlIlcpDNQhnvJzbYz4QBEy/jMhtrPpPIQP/0tc/myE5D47naOoBUMLeydusVzBV1c9Gl14aw4YrcphuJcLLyTjrvJzaDPSElK+mL8Pq6+Irr0n+e/cvczjFW1CoYo8ZvuXn6f/7uUzk6Y/bcrGLgq/V59f6dFEJE+TnnwkvTIyFEKlMwluERTyRJwvWXLYgjHnlsarr8TzdlD3Y8iTng7A0FYwpj28Sdts/RGY790CHZE5EUOPUUBXP10Gr7tbQbjEYY7klZFCZIzs57bGEuXJ1TshYVg1c1Avyrrv9zIrpoR8U4xed15ncFc51DlwMj5kkOCv7zrF/mtWCZAypSAmHk/XrMk6QuJ6IxB21dVTjgIEIN82RZ0Q9OIrvHPEnKNSLjMbeXKUSSeiKM1syTd8cBP+NdIxEpylyzWa9hnuSLyBSI6ZknWd8wT/L7skXBXFlS5V/3wzhI+vX5l+SDkLIRxAqR8rCIXkEUFdLvcZDEgTYpv9gzcDBC6q5D46CEqHStC2seIicSpfHq62/JEVeI2NATCofCrGePjWjB3P++8RwITXpKQVhO5CfGAvZlRJkqW3etn4EDfeY1xiUOBt+Jw276GfYMDhvLinyKz103Poc0m0SuP/KQSfkwjgg/7ZVVVTDH85Lu7rmZL2WHoN/+/vI8xxGVoycU7EBE56eOjjz0wBiXdyktbGpm+2s2C/ZjOFEee8QheU1A360m2EQchqAtRwKMA2cOg2sdBDPW0WeIgrNliEZIMT4k2j2RzBHDLA2BKalMSZn2TKxpiQJTRmxGX+Qzvvipj6XPnv7RECIPznaz1oyITkWqZPo/6V5pZ2XsZXwO6yPWSQjxGJeZgxFAk0KZ52LvTaR36pjDccZpIn3VYlLcI5+9zjoD06ei/yOY3XqLzbKtr/h7d35XMNdcwRx1yX7t+ZjHcRZnjHty2rNZLNWd9Vxcm7bOGLfHrhPyXM5YsF2kIS9TiJx3Uzhx4sx5ZexNO5oqtcy1mvGaegRzXI++jQDm95f9KURad9QV4R2RFGJ7xlUEt4MjSiDCe8YR1vMIiBiT3ojoW1yHM4J6bMbY9REkf+rUE0OctXceQ9rbJ60qgjnqBCcF0g7jPHt5OCwzzjeSdYPPbFZhbiJCImtinL3oS+3ZOXuKYK54duzZN992T47Cdm/s15nLGhGnFp/bWd/LCua4Pn2NtcCtkeqYTEBE+WT9Xraw11ln7bXzWoAUtEVfZk/H+h9nGSJsIqDHEXlRRCosu9ZgnBg4sH86ZNI+eT+wTez5q4nnFcyVrTVfJwEJSEACEpDAqkBgtRPMFZX2UETjwkvoTzffFmHIn8wGqJ6yOGczi1CNw/oPhzELL6FBcRDQlpdQ8Tytv/Msj4ZH2s9/+4c4JHwoGxu76/nwatti0zHpG1/5dPZmbX2vxb8VzBUkUvaC3nPijunD4bV36olHlz6wRYxyURxu4ZnIYW131fnKJ2n/p54imHtz/oL0SHhpX3rNDelXv/tjaTEIoe73iQOMM085Pqe/av9JO+cvb4SBa1p4sRPth4348tg419IecSiGp+lpHz0mfTP6I8bKMuIF2hEHCoSUv+HWu9Iddz+YQ+cjpO3phUMTDh4+cfLx6Yg4eMIggNGw3qJgrl5if/t6DoxfjAiFpAo/+5e/y2ISDvB7alEwV6kZ2j7eq9+LlBkIgTD29dS5RcFc5/QmDleIEHlVRJtinix7uEIfwrj8iZgnmS+7unCo/GIcCBGlC9FPmSgRGKiZJ48/6rD0L3//uTxflJkneTb6xhV/+nOOzIXQlEheZYSFXc2l9fU47EJw9PFYGxwTQgbmSZx26i08/x/C+eCaWIvefs+DNSNSYvQfPXJEOum4I9M3v/qZuvY59d5bb309hyw4eD386BNNc/CiPR95yAFZjEOENg5zWxfGeL4ujMjV1CmOGYhMygozWn9eM/9NGqutI5LG1z53ek7Jyp65bB9t5n1U+yw4EQn6vD9cGZGxnsrOZt3JDtsC0WXGbbZpIjX2x086JjuRVHsG/rYqC+Z4Pto4kUgRVROpnWg7Rdvn791VOOwmzd0XIrLcyTE+IpjiULxs6Wntr7hvHAI3imhv9F1E6fRdxIHtFepi7iuvZXsWES8fD9tW2QPh9j6zo79HtErk8C+FYI7o4e0VDrIRy1985XXpf879fU7LzB6aZ+nuQpRfUmt+/UufCP4fyOxrZa7oqntWMNd8wRx1R7vDeYQxjqwEj0ZkxRzNv5vbI8IrBF3HHnlwjAcfz6Jw1oRlCsJRnLhJHf+9H5+TXpg1Oy0JEXlPLfUK5hCaMYb84ryL0/mXXJXHEFJWdvcYgvvM8BDyImz8+pc+mfYLBxvG7/Yca1YlwRzs+fpjZCTB1k6EUoIwdOe6rmjvBCbYPM5avhYZLT4waa88r7Z3btTTBHO0axzy/vtn5+Y5E2FWvdH1Cg5d8b0ewRxDLOuVKbEHOCvsnvdHlqaecj6D+B/hKxH16cuI6dtrM3BVMNcVrctrSEACEpCABCTQUwistoI5PFbxLL0gNqFETpkRHsYsBHtCQcyxeaROPPnYI/NBAKGSicjW3ma0vXvmYIMDDjZ2v4/0swgT8KztqkJQCu6ZdD8YRT+w/14RGn6Ldi+vYG4lmnojzBXvJKLBw7EpIxoFQklCgfeEjXxxfy2/9xTBHN6Vl1x1Qz7gvuXOe4NXOYM2aQjw5Ju0z+45tVXLZ+uKnzmEfynGsO9GVDzGMA4UahkY2AjjzXtibI7/4Qtnvp9yucz9suEn7cujT0zNB+H3PPBwmhJec4UBqcxndOVrGHv4mjhhu3TQfnulgyItzw7bbpU9LqsdzrR3jwrm2iNT/ve0T+ZZ0hSREp2ohaQu66lFwVylZjDazw7v1UuvviFHBqTvE5mzJxYFc51TKwi/Lo15kuiQiAtYW5QppPU8PqIkHxDz5FYRyaSrC6Lu+fMX5lTQl197U54nax22I7zhIO+IQ/bPRuQxESEPz+4yhUO8l+bMzeks7wjB2N0PPJSjsrKu6O6Drrbuv7JO75N2GL9lOijW6MyTu+y0XZ4n6xFnFJ+tYK4g0bzvHMrdEWngz73o8ixcbkY7KiOY4wm4FhGYSKN+3h+uyA5miGW7a1/Bmo7oeYeQPiiibu23164RMXhU1QPb5tVEfZ8EuxfCcYnoGbC7PRxNFi5alB306vuk5ryawzAifh4a6bpOOf6onAaTVHi1yuogmCMyx1MRjZ/U82QgWLBwcU6LVotNZ/ydNk7/JP3qMewxI23phO23qQib4vdlS09rf8VzkQKYyGb77TUxp4teKyLO1RJs4WyD6PTaG2+PKFmXJRzdujrKOfe/w/it8r0TVala6jLYI84nMh4RbhE83x/fWY+UtTGUreeyryvGzn1C4HJcRJCFP1Fk14g2VT6GbNmrdex1CuY6RzBHbdBnyDhyZTh0/O6PV6aFMcZ1Z8RYxjiEm4xxB8fak2i3OKtUE2y0bFX0MaJCMr+yN0UIiAN8Ty31CuaKdJLMv3fcOzn9IVI8Pzlteowh73bbXoJxuk/YEQ8/aL8YBw/KYwgRxKnL9sqqJJjjGWl3iOofCgeW38a67oEQQC0MwVd3Rd4vxvUD99szO9mzJt5iszFVz416mmCOCPZESmOPjrPV3WFj6onZn4o2Xo9gjvdw9oa4l33UVeG498cQ0rMW6Oo1THH/fGcfhW3juMg4c8ikvfP4y9hLe2qvKJhrj4y/l4AEJCABCUhgVSSw2grmqEwWsBz+EdL93licY9hGSNddBUMBaSq22XKzHJHjpBDMITDDA6TaAra9+0VQsyAONliY//J3F68Iu76gvZc3/fdrR7SKkcM2yMbFj51wVBq98cgcLry9CymYW0mmo4I5DoIJEf/r8/+YrosIik+HRykbnM4+3KLtrjNwYPaiIk1QmdITBHNsVqdOfy6d9Yvz0p1hkCoiC1S7fwxDpEU545Tj0pknHxvpizdJ64XHfHcUxqtzLrwkDhFuS5PDUFjWu/bQA/eJez8u7Rzp1zYbM7quW8ebEgMlXr1EtkNwSBQfPOrDjtTthbESQdzQIYNyaHkMAUcddmCOckm0y44WBXMdJfe373vq6eeyQeyqiFhI+gUO4GuJPf/2U8r/BvtPvzgspt8yFtLvy0S2UzC3kjFCmCkh3Lgp0mYgwueAuWyUsZWfUt9PzBEc8ufDx1ivUWe15jIFc/UxLvNqxvYXZ83NKdEY90kpUqv/EMlozTXXCs/lw9PnIkLO5mM2qZp2rcx9dPQ1GOLPueDSiPx2cz7kmL+gXMo9IuJ98mMn5DTmjAX1FOYL5klE+KSfRtxOOh/2HbTnnlBIjUeaplHhYT5p790iyschEX1qbJvRxsrer4K5sqTKv45owlNjziQ16133TY5ov/NrjoO1Pp11bK0Ic8VnsJecNfflLCZizffE1Ok5nVdXRhjmCKdPpCNdb+jQOGQfmU748OH5oJ2UhR2JGFw8W2d/5yBwQYjkzv/jVem6sDfA7s1Ig1ZLtNvM+yrWP0TyQXj1oRAaMi73jX0ba6JaZVUXzPH8zGdLo51fGNEcST3/ZNTTK5FG953YU3flcE2/xOaDkAQRyakfOTqNiZ8ROnak9IT2x33zXESSo+8eefAB6ZSwCdF3y0YxZQ2CiPHWu+5LP/71Bdm20ZUH6ojHRwzfIB2w9+7pjNg7b7nF2BwZq1adIPTjPs8Nkd8NYZMhnR+/43m6srAvXieccTeJ6Hjsh0+P6JLDwz43NKIY9qSiYK7zBHMIenB+uvmOe9IFf7w6i6+we/O7WvuqZraRQnQ1JsaCXXbaPp124ofTTjEvMR50xNaNsyh7U8btq8MWNi/m184WAjIesH5G/EpfZv6ota6vVzBXMGe/8uyMmXkMufOeyTGGvJjXD13pAM+9UDeM10TY5Hzi+BDaMIYQmbpaWdUEczwra1/GddZ1nF08GRH4OW/p6mjiiJ5IC77Jxhvl/dsJERV95AhS8K5TrUpyNpLXY496z4OPpG9956zsiFP1DSv+SACEL3/mtLTTdtvkdOdl3lPPa0hdiliOc6u8Vg6Rb61+Vc/nt34t6xJsTLRtxkD6cZm5uV7BHNelbSBavi7SK5/3h8sjTfZL4cD3er5eZz5j62dmP7DGGn3ymdzEGH/PPOXYGH/H56j6/K1aUTBXjY5/k4AEJCABCUhgVSOwWgvmMIIi9Jj+/AuJVJa33X1/uifSjXXlwrVlgyJNAoZRNqKHHbRvFntwEMCCviOF52BDff9Dj+boWdfceGt6LFJZdFXZdJNR+Tl4FiI89QvjQjXPQQVzK2umo4I56pzIP0/E5h2ev/395VnQ1JnRgNhorh+h4LeIqIgYEF4oGTGqJwjmFi16K02e8nj61n+dnVPxlImag/CGfkn6iC+ceUpCoMGzdEfB8M64hdc9BoZ5MZ6VKTtuu3U6kMg/YVyp5iXf1mdhUMCrF0/lyY88nsPnY6xcHELJrjS6tnVv/I7xkrQ5PONxRx2SdpuwQ04vh1GkkXpSMNce8fp/j7ATIzDeuVdHikeiMJCSobMK4h1SLCLixmiF0Iu5v1ZRMLeSEH0bMTTj5cWRno/UrMwznVkw+g4ftn4WODI2EymsltBAwVzzawQj6bQY75kniZpGXdRaJ/eNAyXmyU+ddmL6xy99Ks+THDB1R0E0wDyJofriK69Pc0P0XaZsPW6zHBnvuBUR8sq8p3gNoty3l76dpj83Mx/mkZrtvoemhAPD26UM8sXndOZ31pmMcXiY7zVxQto+Uiz1j8h6RLTuaFEw11Fy7b+PAxwOhUnNekNEE6YdccjdSGGdVFYwR1+nz7O2J7ok0dKIDNuVDmbM4f379U+7RvTDjxxzeNpjl53S+K23yG21o3vkRviVfS/s3o36g929D07JokPmTSLAd1Up1j+w+/hJx0bksvH5sIy9G1+1yuogmKOe+EJAQlrW8yLSHBFkSBPWlRHB2KeMGb1RtPEPxtwT0T7DqSnvMUsIG9uqx57Q/mhi9N2txm2aU38RSWpCRM/rG/vmajahls/Dc7AGZd/JGHhNpDpmLdJVZb11h6TDIqol9iyiK5Eut0ykcu4ZIeZjT05Lt0WUUNL4IZp7q4szWqwZYuPNw7mPtcxB++6Z9tptQr5/ft+TioK5zhPM4abxl2iP2Omen/lS+l3M4zeGAwxjXGfaCFu3L/YBOId/NMa4Dx48KY9xG26wXofXndiksCmwvr/smptyFKcXZ81pfdmm/ht7PY6y9G32R/MXLMj29moX6ahgDjs+NotHn5oWzjd35THk1Vg/lLFXVrufev7GGN6nz5ohlAq7Wowh+0d0SiKgYlOrtf5aFQVzzEfYIljXETn4vIuvSM8+PzO99sa8erA29FrWbv0jYwjiNVJqMqZvv82WiYitCOmqFcaCniiYox+zt2C/TCaKBx5+vFOdacm4MixsgziOUJ/YBenPtUpHBHO0GfZyz8XYizDwwlgL3B5rgrcWR/rZ+H1XFfYDrF2I/H9EjL84Bo4YFgxK7PsVzHVVLXkdCUhAAhKQgAR6AoHVWjBHBbCARWTyxFPP5Eg3GOCefvb5nHqQTR4h0TuzsOEZGN7EozcakUi9umccWnFwte3WcXAVhtNmGLOIBPPMczMizPVt6c6ITjA9NnWLFi+uubnv6HNjAMUwsNvO24dB5IjwaB8fUbhqR7FSMLeSeEcFc3wCBloiGDwZIhQiGtx9/8OJAw/C+JfxnFp5F9V/KtouG3TEkRi8SIdwb3islSndLZij7xOBDyP22b88P/r9jJoiAJ6LKGUcpBN5BnErHDAmdUdB/IPh87qb70g/+J9zsyG0TB3jyTsuQvZ/+TOnR9q5SdlYwHPUUyppredm4cyUSJUzLfjNfHF2jjyFJzN8u7pgxBw1ckSOnDcxxh/GUsbW9Uqm06t2vwrmqtGp728Vw1FFzH3nfQ+FEfjONC36IikLmt1sEMmRTnH/iKCEgRsBOUbOMgfWCub+tl5ZTyCQ5aDltog2xMHE/CaLHZkbhgwalLYLAQ/19lisz0gFzcFOrQimCub+ts4a/Q0HAaQVO/tX5+c5vszYzpjLPEl04U+desKKebK+OabR+y7ez3yENzfRsX4Y8ySRZMvMk6xpOGT+/Bkn5TTma3I4VOc8OS9Eni+/+lplHRbro2mxFn9h5qz04uw5aXkchHX2HqNg0PI74+DI4cNinhyfdp1QmSdZw60f82ejNaRgriXp5v2McwTr6+sjStHvIzXXaxH9qpED03oEczxFIZh+JtZ5d8Q+8u77STX8RHo9UjXXGpMbpcCYvkE45ew6Ybu07+67hgPWHmlURCwvmya50es3+n7GSyLezHxpTo5kfVewwzmPNNedHaUV55GREVlur90npH123yUOunfLkXxgWrasDoK5ggUHtnNfeTWnwKOe2D8jMCnjYFF8Rke+E7Fo4MD+IR7ZLu25606J9GrbxPzJYS4HnI2U7mx/2LEGDVo7hK4xz8SBPmItnOs6Gu379YjSSoTbG0I4cnNEO8bhFZHVe8s7L8I5h8lbR+rSk8Ketc8eO0e6u7F1iXvgz31Pj9TWt4Zw/94HpsTYGWn8wmGP6LedWbAhIlbcafuwL0a7OjREf1uP2zxHle3M63b0sxXMdZ5grqgThCEINknziU2YMQ47EgKazrTaIEoZEPZu9gUTQ7x9UIxxiLhxZkP402jBhlBZI92RhTbsTWs5WNV7TQQ2g0Msu+fEEOxHBpj7Jz+WzwywXdRKr9hRwRz3yHkEkYUR27OPYT+GA/zCsOV3dlQzbPoIhBHL7bvHrjGG7JMzUwyPcbFMWRUFczw3a2LWdc8+/2L0pQdzP7o/Um6yJi4juirDrr3XDAjHpnVDxE0/2jvWdfQlhPask8uUniqY495xnp0S9p7b4zzu+hDBPh/7ZWw/zS60aVKSkvKe6LXYBl9+5bVSjucdEcwV98+an7U/9jP2UqT07YqMDdj4EcWx/iKi56EH7JMd5rGRcw5ZpiiYK0PJ10hAAhKQgAQksKoQWO0Fcy0rkkM0Fq5XXHdTNmpzQIH32PLw/Gi2EYGFKx5AeAJhzMbDA6/VD3/woIjE1remx1bL+y7zM+HD7w5hwp8jOsDV19+SZoYh4a0lpOqseFWX+Yxqr6mIhkjBRTqRgTmlLJvqk487Mg3boFwaRAVzKwk3IpgrPoVN2UuzX86Rx35z0WXZ4M/BFpv8Mgfexee0/l7Udf9I54PB5NQTPpwFnhimroq2ddk1N7Z+S5v/7m7BHByuv+XO9Kebbs/3zYa1TOEQ47Doqx869IAs5ijzns56DfWIsQzxyr/8/z9IM8LoWeYQlU0zaX/+/ZtfSaccf1SkVBhQ2tO/5bNw2E+0uWeefSHGlntzFJ/7IorGkjgEWBb31dnCOY6RSNO1Zni+YtTbNiKO7B6RR0gVQWSGgXEoWMv7teXzVPtZwVw1Oh37G30QIcvv/nhlNgJj7CYa1PImHH5xyFgRbw+PKEpj02dO/2iOhPmjSGs3OQ76STFXqyiYa58QaSYZ74ncRYoYDgsYjxqZW7gadUZksk3HjIoISJNyvREplRRCL82Zm9NZtX9XKUdjmRSigMM/sF/2ukb01FMKnIjOR5QADqtqFYRNo8KY+bXPn5FOjbRF3VVIK/qnEGVT3xxWlynUH2ta6pBD2u4urE+Ym/7l2z/M0V7KiHw44OsX65x/+tpnox2eFBEqB9R1UF48M33i7bcjGsFLs7LXPH2miNTH2rwr0rSy32Cu5JnGbb5pPmghstzeu+2c50n6XTOKgrlmUPzbz6gczr2Tbo093A9/fm44Qb2QRd8IPzsy5tYrmCvuiHF+aazvLrv2ppyamwNcot81a/wvrsN32iztkjF8mxCsfPJjx4foZucs+GpWe215vc7+mX6+9J2lOa35+ZdclR31ENERXaLRfVnLe2ePRv3S17cIxxic1j524lFpn2CHMKHeNfHqJJiDI3VBBEcEc7+LSHOPhINAscZpZj1xrWKdirAR4ciZJx8bKTMPym287CEmn1OmdFX7415oY/TR9eNQf3SIWz8RffeQOKDFWQsRXSOF8W7ylCdydJarr781p5NDAMRY2CwBOvVCRCX60C47bZuFptizGIc6WliDvBnpta+NtJG/ufCyEM3PDeHzm7n/80wdGcfbu5fCxkgbIgPAGRFZ8oMH7xd7oc1qplBs7zO74vcK5jpfMFfUI1HlEJwyxt0Z+5HHwzGpSEnY9LYYa09SdyIAPipsZ2dGWuMR4bRRNiVzcc+1vjMOXP6nm2O/cnsIUh5Ib8yb9/7apNZ7q/0996cYC3DK3CycS06LVNmT9tktHHDOC/vbXWnWnJez7aLaZzQimCs+F0E3Y8hF4TTBXvLFWXND4EN0u2L/Xbyy8e+V1LnUW+zHN9k4R6c9YJ/d8xhST7TwVVUwVxBm/cZ6gf3xxVf8Kc9H2JPfi7Uy81Ez+1KxrqMdjtt8bPrkKcen/SLaH/NqmQhhxT33ZMFccY+PPPZU7stEtWYNyh6jGeuvfPYWNnBsFAhfsQ3iMI9tkLMx5qBapRHBXPHZ9GNEr7++4JIs8H0pnOh4xs6wm/PM2DJIy45d5vSPHpOjyzMm1VMUzNVDy9dKQAISkIAEJNDbCSiYa1GD2esjjFfPhAGBtA8Px2KdwwBSURGdi4V6swqbHYyIeN3itYXHHYv3MaM2zgZUNufNLNw7HnAY5596+rnsnYZnYVlvmlr3gjCFiCLbRbQx0r1goEe8QmQ5PKHKFAVzKyk1QzCH4Qvj0bTpMyIi0FPZ+E+bfiGigOEV19HChmtYHGDttvMO2WN81x23z4cweE9fcvUNvUYwx+HBT8+5IB/6PTl1eukoSXvvvnNOxbpLePbhqdXdhed44OFH00/OuTA9GOHrn50xs+YtcRjQd62+6fNnnpzFZRwC4G1XbwlbUDbULVi0KM15+dUcOYfrP/zoUxHhcHr2DFwcwmPaYrMLYyQGACLjIGzaIVKwIozaYtMxaWz8Dk9LBLzNGksVzDW7BisRXhGm02Y4lMRr+sFI80vkx0bmXIxDRLDYfvyWeZwi7TDe4K/G/P7ds34V/eUxBXMRfRER1n57TuxQxWIQRjz1QNQXnrmkm+NwAgFtR0o2zke9sYbA+3WvMGTuFOsI6u1nv7kwnXthpBdXMNcRtA2/55wLLo10kCHwmDa9lDGZC1KHX4j5BYEL69vuLhihmed/GvMka1+iUdQq+ZAs2uSZpxyXTj7+Q2n8llvE4fPQWm9r8+8caDHW5Xky0v4hFCb13+Oxx5gRHvTzFy6seeDW5gfX+GV+hpgHx44elbaM+ZFU5cyXRJhlnmScRBTQrHlSwVyNCungnzl0Y603O4Tej8YhCxHe7olozrTjMgc8rS/LwVvZlKwt38sBIE5kpK5EtMeBD/sKBCxEvaONN1poixzIbjJqo7RDzOGk/mI8YZ1KtCfSuTWrvTZ6r/W8v6jDOS+/ktc8jz7xdF73PBTsEPA3Iy098yh7NNgh9NllBbstIlLmsGDH2qhedqubYC7XEzaTiGqCI+XjTz2do+rSxmn32IqaYQ+ijbNP2X78VmEz2SZHxqaNj42oJwidGJebWbqi/eW+G4fRONPtwHPtsE18xTounmvURiPzc9EGGy1EaJkz99WwZz2bxx4i/FA37NMaLdQL0SvHRSQ5orBiz+JZsGchbOxoYQ3yTojmXgw7XLbLRBQdIunwRcRrxDDNKtwn9gnscbvtskPc/9YRLXd06VSyzbqPej9HwVzXCeZYT2C7YR/3ZKztH420wYxx2L+JuLQsnDkaLYxhRDNljMPWvXM4Mm639bi8J2CMw0G8mQXBycxZs/MzYA+7b/KjsUd9LPctnFM6Ulgr4cSFPYGIeGSCIbMGv/uvs3+VU8F2lWAOcRZjCHWGLb8yfkzL3+dHNOtmRqzEzrt5jCETJ+wQX9vH3mGrfG6BrRAmZcuqLpirzKvvZQE0dnDW54i96Eucu7BeaLQUa2IyaNCXKvNSZU1MtHDWxNh1y5beIJgji8nMmNMnP/pEttlhY+LfOLvBvN4CQ9otNuMdtt0y92P68/itxkVa3QdyX2bsKLOfaoZgrhDQsxbIa8y8FpiWs0CxFsBm0IzCeot+TOaq3WMtwHqGNjQ0+nG9ThkK5ppRI36GBCQgAQlIQAK9hYCCuTZqig0nxisOgdlws+l5c/6C2PQsymG2WcjyVdZzCOMbHrV41+Gpxfex4a1Fiof9Irw5qVjxoG/U67aNR/mbXxGNitQiPBtRTp6MDTdRLzjkYBPCYTcbkWqbETYcGEGK52LzPGL4BrEJGZ0X44gTWJyXDQ1e3KSCuYJECq/LIbGZ2zF9ODzNTz3x6IbSFdCeFyxYlKMLknoBoxgpwtjEI2bCy/QvYchtb/vJJpPNOO2W1CobjxieD1sP2HePHBkRo8rrb8yPFEMPp4uvvC5ded2fVz5IOz+xrSe64g/+zz+nM8KjviOHOO18dKlfYzhbvHhJ+j///dP0h8v/lFPY1jKm0e4Rfx7+gX3TP3/t8zmU+5DB65S6Xme/CO+4q2+4NafgvS02/mUKz0OUPFKy4nFWr6dZW9dgDMFrjnRTHKJOnf5cei3GUlI2stFGwElkS1hj2CxbuFeiFSDMJTJe0R5JSbBtGDs4kNkjDAEbRzSmzkrTpWCubG117HWkT3ksDiZvvfP+nK7otfDynJfn3Ug5UqO95Dkp0iWuE9FNBw1aJ1KvDo45aEykqtg5Ug7smNNcMWdhyPyPH/w8z39GmGtMMEcts55A7IjIkYhzOXXGazG3xGEL/RyDIFFs2ytEGc0pu+LggQM+5j3SKO8Zqbv22GXHWFeE13TU2w//57fp1+dfomCuPZCd9HuiPbJ++P5Pz8n86Y+sF6oV1gsckO0bUZOZJ3GcaOSQudq16v0bDiPMk0SfuCGiy5YtpC454tBJ6YMH7Z8dW8q+r73Xsc5m/YXgCbEMB5RzY002P9ZpzJF5rsz9h+hz5SOIVSJCtJgnY65kTCSF1LYhPEV0hKGf1D0brt85kRcVzLVX6835PaILDiA5CL7jngeiDU2JlFAzs+CSdsWYTHrC9sZd+ieOBqyhiP542IH7pQP33T0LXOq9Q8b36XFtDgdJn/R8RNBEEEr7LdpwERWi2mfHLcV+snJP7DMGxxzOOm6baLNEgOBQi8jOa4YQhza+KhT22LBjzUO0SYSPHLrTf9ibVNbJtR31cn3Cjr4e7NiPs5ZHaL5/RB7hkB/hTyNldRPMtWaFuBkHINr4k1OfzeLGbA9aUU85E0H0y1qF9Q79jij866wzMLdxxHG08YnhgLZrpGNlX9wMQVmte2lW++M62IIGYCPI6++183NhA8LpgZTfO223TV7HsU5vZkHwgz0LsQ/z+ZQnpmUR76JIU7j4LexZceiMbaPK4Tr9h3rpG2MLYxB7+g3WWy/P84jN2EPQfzYeObxpt87YzLxO32efjO2ECIbYHfMemX1y3Hsl2nbtdkV74d6JgDsobIwIeUhZjUCJCLKkwkVE2xU2xkYhKZjrOsFcy7oiSizz0Z0hPCXVOnMRtpw8H2XbMNkpqtuG+TxEO4wF2GmwdxNBjiwjpANm7YmIG+dq9nWdWebFuMC6iEjeRKiaG+sSUmWy7iYLA2uX9koeE7A3xTPQl9iX0v/3jn6EWI7n4Ploq9/+4c/TtTfd1mUR5op7RrCNvY0xhDMKbG5EqMIpsGJnq5xTFNFri/e19x37Wr8YxxknGEP4jsCWsZsMOGRsoD4Z6+stq7pgriUPnIOnPzcz5iLWxJNzG8RBAlt7sSZetiJSWsv3tf65WBNjc6YuBscXa+KtYh2c59UItsDemnrryLyKVYYUzOxBv/Wds7LjVut7aOvfOFh++TOn5XbBOVZXFBzKnpj2TLYxEQVz7iuvZpsy51ZE9qsmLMNpmsh7g9dZJ/bB2AaHZKexvWJeRzy2ZURbZyzi3OLbYRvsSsEc7FibMK4iBKQvUx9TwnmYtcCCGMMWhx2A80bsMGXGX8auvB5b0W7oy0OHkE55m1hjbh8OsrvmsYy+3JFCG6bdfP+nv0k/DUfWMgW7/Zc/c3o67shDwmlv07xPKfM+XyMBCUhAAhKQgAS6m4CCuTZqgEXpsneXZRHZG2/OCy/jN9LTz83IHqFPP/t89ux64cVZlYV6CSMpESk2Cg8gojqw2SmiyW2y8UbZOIfxFCNpVxwEsDhnA70ojAZvzluQD7ox2GFwxJOaRTsbvva8p1mMY3TLBxrxXIQBR6yCxwreZxvEIRx/GxBpX+oJD041KJhb2RibKZijPbOhJILJ3JdfS9OiDVc85qdlQdNLkRakPcMs9c2GE48sNpZ4JVHPO8QXIk8OZzA0024wSl1+7c3Z23Llk7T9Ext8UroimPv4ScfUHfGg7U8t/1uMSjOiD3//J7/OYfQ51KtmVOeTafebjBoZ6XIOTP/whU/kSDMYK3pCeTUiezw57dlE6kLSa5Qp1C2HaqRY+OInTsnh/cu8r9prGDdguTAOLBYteistisM/2hcHToj6nolxFKMs4jMMl7WYF9figJdxhXZIJM7NI1oGkeTw+GcMYowdFAYRvJU5/OiMomCuM6iu/EwM2BiAMWzTNzmEx3uaKCy072rRV0gzxli0VYxR2xGRZkU0GqKsMJZi9Ka9E8VOwVyKSD2bZcN/IxHmqDn6LyINhD4vhyc164hKnVUOLqnLakLkLLaPlPSVqHIRfSDWEmPiAHmj6NP0Zwx/1JuCue5JyfpGHJiR8ufsX/4uXXjpNbGOYG3YvgCSNsH4S787eP+90t/HPDk2xFk9ZZ7E8eWpaKMXXXZt+p9zf8/tliqs3Tl0/sInPpYjAZV6U5UXMU9ykM86fGEY/XPkuYjYmOfJmCNJcc48SXQCxr321uOtL8H8x6E5keNIG7UZ82REkiPSDOLTDSOaEU4PrGXqXZ+3vlZ7/1Yw1x6Z5vwe7UccW+e2Q5QlDlfYwz0Yh6aIzomKxYFpW+MuYymHQ6zdR8daFkeJSXvvlqPzrtuByEmM/xw+034R05LebWo4YU0N8Rd7Zf6Nsw6CovZK3FIe4zeMQ3XGfoRxRNkiGgJiouERAZE2i9CI+1+VCnMnB1Cw41DwqWeezcI5xHOw40CedRF13lYp6pOI9ayJt455nTX9dtuMy2tlhAmI6MpGeW/rGvxudRfMcSiLgPHNiOBDdLBKPT2X64qIMhxuZsez9ioqGCIkwZmDNr5lpFLbZlyljRPpkzaOqJyDcVp4V7XzRtsfbYPnGhprbJ6L9TeR5Oi77M14LtZ4tMHOeCbGH0Rz1A19iGiXk6c8mdeh7DWJOMeeor0D9dx/OEyP8YWDdFKXVuxZ2+RD9Y0jmg8COhwAmh0Ji3vn3tgHs84qonzR75n7+Te/J5pUlWZFFWRhDw6qCPvGbxX8w4mMNjYmBA2ILHi+PggresH4qWCuewRzhXMM/ejlV17PYxyOHMxFRJJlbcEY115foh3Sn9izMRZgn2EuHx+iHiIzDx+2fm6LjHGsQTpjPOAeilJJk7k0O/Mi4n90RfQmvjMuIBBsr7CHGTxoUMyhG+e9aSUi05bxXBVHE+wJ2DBfjXG/uwRz3Hsx9hENkOjuz4SN7cmIuMmchCPbczNeyhEEcaKoVXgm1hHMSzgq4GDD3oE1GIIb6q2jjs2rk2Au20RC3FRZE4doM+qBNfG0cB6eFvVD3SDmZE1RrTCvMqbTlzgzen9NHPu6EXlNvE4WbXa0H/UmwRyssCVzFsf8WNiYsDfRt2HdXuF8DdEuNibOLpjfGZuwUSBCZG6H4RWRxrk7BHPcN22GPso6hjmfZ+LZ8j6Avhz281mzX857gWrjL5/FuES74ayCNsN+gC/68cgRG6YhMa5hT++IyJLPVzAHBYsEJCABCUhAAqsLAQVzNWq6MCKwgMXjblYcanEI/HJstlncYkDAoM1il/D1LEIxBvSLAykWpRxMsVgnmgPRj0ZtNDy+RuQFbXdH3WCRjpGeFJ0zwxgyO56N58KjBc9cngmjAwt0BAkD+hPZKSI8hfGziAKwXkR4wlDPAR0L9EaMimwO7oo0Q2wMXg+hYq2CMXZsiGeI0semslrhWVnos1nFGxADd5nCRvWAvXdPI0cOywbVMu9p6zW0oz/fcW8+3OKQq63DrJbvw7Nxs3g20oCQ+rRZh5sYyF8J8QnGLw5nOQwn2hyHnEuWLF1h9F+eDau5zld4im4UUeVGbzzi/XoeHalVMCoVm3XqDg+t6/98R/bobPksbf1Mv+Dw4L++9fX0sROOauslnfo7Ip5h7Lrlznsj7fL0UtfCEDgs+jFiwQP23TP3hVJv7IIX4YFGn7k7PNWJIlimcDDDAQFGsQ/sv2ekutqwzNtKv4Y+RxTONyOs/stxiEsqMcaYOSHaJCoA/fGdZZWxkzGU8ZM0nBjlEFjgMY/Ha/acj/6AYG74hhvkaJbcK1E0+DdjEXXT2QWjDAeHCBMZR2qlmS3GflISIkq0lCOAcYz28fzMl3LEMsTpiBUx5L8/30ZbYZ6lbSDOLrzASXOOUZs5CSElv28p1nkp5vAbb7krC/K4Rq3CHMMhFEY2RDPtFcZzosIQ3fHWu+5r72V/9XsMoswtpKihHTPedrTA6pY77sspQYgKUKuQ1o4DTeYWjIeNFsQ/zHH0b+prRtwPrJlniyimHPwxHjBv9O/bLz9vEX0gC3xivuOe6OeII1qWW++6P0fhmBcH1owV1QoC7nFxWIj3Nan8aAM9pTD3k66agygM6bUKUQyGDhmSDtpvj1xXtV7f7L8z5iFWJQLSQ5E+tExBYM96FyPtgRGFtrvXuS3vmbGFeZKUzAj8yxbWuWNGbRzz5F55fCn7vjKvK+ZJxiPETvShyjz5anZqyfMke4wYY1ruNdZYsdcoIuMwV1aiTK2d01bSxxHJka6HsZB5kj1JZxfukTb+xNRn8uEGgqpqhTEcsdauE7ZLRPLrqCG/2jVW1b+xBpnDmBsHwIUjQo6GHutbhBi0HdoXTKl7hFOMKYgoSM+JuIXxnwjhjYiqchsOIS1jBW13VjhJEMF11pxXsuCT/QWiOfaUtOM14n95Hoh76pf3lv3zPdFe2Vuw1+BAi7GDdVQv0Hl0uIlF9UQdLc8HZRV2cKvYGioRfiqRWpk/WR8TChx22BhY+zBXMj4RAQduo8MRj+8IgKj3Yo/W4RuMN+L0gmjivohMj/2jTEGwc/xRh0bK50oa2DLvae817C0Zr2lntQrthbGElHlEOWukXbe+FsJlnANyPUV6+KKeEPnQ15bGQXnRxvNaB1sQ+5j+lb5XOAhgB8rtPGwmRF+iHrtr3Kur/b0T7S8aIGtqHN4qbbB/COYG5zmGdHFFG0R8QV2wzu2KQtuYHSlaWX9iz6JusGctiHFwSdi0so0uxkvWqsyZtAvmy0okrIhMTQSaWHsSVWnTsWHPin7UFWtH7hsnM9aG3HMx92NzZM/D+nlpjJn5/uM7r+/Th/6/VqVtBWPuk/6+Me0q9kCMncz/jJ/sk3pToR8RCRg7HXaqMmXTsD+yx+a5aXeNFMYa0p0z1tZyVMAeumNE7ilSjDayh8OJ9abb7snrU+buWmVMPOuWsScl+mqzox9i/50dUbGYvytzediGs50Q23AlOhvtEftwZR9eyaLC89OfRsSaE/sMYxx2YWzfA2K8aJYdsxabln+nb7Emxc7N2MBemb7FWuXtt7HfV8ZslMrc34DoT4wLONrxDGNH46A5OjsY0M9a2rippxtujUwtYUOct2BBFrC1vHbrnxn/sSfsuB0puMe3/nOH/007pS4Y79h3sybkGbG1LVhY2NrijGLF+osFFXa2PDetGMeHxNjHOoLIlJusGEPIHsIY0ugagvuD/1Ox773/oSkxrtW2vQCDqHasIVjjdNf82NFKYZzmC+ds6oN6yeN7fEcwV+zrWBdXUh9XnFlYKzBvsibA4aGyJmZeZV08MtdRZU3c+LzK/h5bzTU33h5t5ZVSj8p+gbGWcywclbuqwBIbE/Mk+x3OrnDUx6mMNRkR2Cpz5Lsr1scVjvQ5OCIYw9kaGxOCstZzO5l3bgjbIIEk2K/UKtisth63abYxMU40q9CPcXhizc15Y9GXOa+gzSx5/znJ0vJuFsL3ZZ25oh8X7QbbZbEWYBxG9Mq41miBMe2GsxOid5Yp2OexdRLhHltnV9ghytyXr5GABCQgAQlIQAK1CCiYq0Wojb8XEbtI+0D0h3mxWF/IAUX8G0McmzsWp0Niozk0Fuv8uzcsEFmosxjGSI+hjmdDiLM0RC3rDh0am4z1cmSCQZFKhOdsdBPdBlp/1cUEqHMMYNQ5nk1vhLgJ4wbGFNJdcrCGqApDGHXeXiEVynl/uCKniCKKU62CWA5jzL9948vpI0d/sNbL/fsqRoA2hncsY8yCSHWNWICfibjTF4NlHGZg0KCd4AXYMsKI484q1hhKPA7tAqMYYxTGRtJhkfaliAyBcXvdEBYh1rF9lADaBS9BzIHh8c15C/P8Qh8nci8evxjhOeBDKIMAv7cZw7sAn5eQQI7mh8NKsdeozJNEpVsUB5VxSBnzZJ4f82F/zJMxV9KfGAMdB1fPBsTainaCyJwDF/7NwSgHRwiUOTwiqkK19XwzyHHAxbUXhZCcQ1vmb74jYGG8Z43H/TAXIAoeOLD5UZya8Rzd8RkVdhG9L/bfMMPGwHcO6JGMFTYG1jww5CAM4YJ9vutri30Mh7XsX+h3fFFP7K1p4/Q7BGW0c9Jg0Re7SkTWURrttT/67l9CGItQaEjYuHimYg3Hc/Wk9pedOMKehXMc9g3qJ4saQzxDPXBoTqRNnoGxsCfVScEfp1zSssGd+6dd8VywRtyX21bcP/N+RVTSuIiio23G9626BJjHEavkMS7P5dhrSG369vu2mmKMGzJ4cLTPSCvdg4WajNeMBdg7GRvmxzP1WaNPXjvnNdIKewJ2z540ptXTwlauv97KcxJjSD6niO/xUO+vCTmj4KyCFOFrh03F0nkEinEdu8j76wXG9ehLtDPmVc6OKn1pSK6P3nB21HnEqn8y4xL9mDmebFCsvRBzsQdmfsw2pujLmCw9bQAAQABJREFUtHH49ta+XIy/PB/tJq8F4ryRCLWsBbCpEQmXtczgeFb6cVc4kVevHf8qAQlIQAISkIAEej8BBXMdqEM2PfH/fPhbeGzhjdgywlxfvD1iIcvXmuENjgCppxc8olmYE4Wj8GYnBRcGYTyaOJwbMKAinPKAu3trkzaI4ZT6KlswCFFvLQ3DfA6GfeocL0y+8ztSd+A9jsGIL9pvtTq/PyIPkLbtgUcez6lEat0Tnml4P/3z1z6Xjjni4Fov9++rGIHKGFrxGCTiSMUzML6HyIa2liPMxSEGm34OM/jO73qrwWMVq74ufxyM9TnCEnNTtBfm2txGol0wNxXelb0h5VCXw+umCzI3Fd79RWSs5RFJB6EP9ZUjlESkuZaRSrvpVr2sBHokgco8uWKvsWKebLnXYK5cK8+Rfd+fK+1PPbIqu+ymaDPMkTjCcKCS1/PRTvJ+NI+7sZ4PYXm19XyzbpZrI5xeuU8mwty7lQhzcQ9FJHbmAwRfXXFPzXq2zv4c2DF/ViIvr1gjB0t+TyScoj5l19k1Uf3zqQ9EzbTrit2E9WlEp6Keok2zd8nr01jrFMKs3rCPaav98YxFhLm/an/RHnuajYv7Zw1a7BuoE8Yifkf/wa5BlDnqhn1DT6uTgn+x36l8r/R/xskiAjv3T7tyf1y9n/rXjhOo9CXsNczflbmI74x7ha0mj3G0RWze0T57Wn9q+fTc9zIiz62we/JclDwuxDNk2yfjQg9/jpbP1NbP1FthO6nUXeWcgmh6a0aUyjw3cU4Rz8qzM19ZOpdArhPWxO/3oxV1EpeFf9GP+sV6obecHXUusfY/veUcmc+tIrgDczw2Jtr2+7bBaOM9eTxq/wkrfynG39ZrgUqEObJasR+orDMZf91L1SLq3yUgAQlIQAISkEA5AgrmynHyVRLoMQSycT48p0mZMPeV10vdFwbhHJo8vK3wqmZT3oxSbP5vj1SE/3nWL3O6SlId1CqkkCDF4Vc/+/H0wYP3r/Vy/y4BCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIIGmEFAw1xSMfogEuo4A4dxJt3Thpdeky6+9qdSF8QLdcdut054TJ6QjQqA2bMP1S72v1ouITvf6G2+mG265K/3HD3+eXpw1N0eqq/W+LTffNO2+yw7p4ycdmybtvVutl/t3CUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJNIWAgrmmYPRDJNB1BJa8/XaaN39h+t6Pf51+/OsLcooeIr1VK6Rg2nKzsenAffdIX/zUqWmzsaNz2O5q7ynzt8VvLUn3PPBwuvam29NFl12T3nhzfr6fWu/dbecd0pGHHBDR5fZLE7YfX+vl/l0CEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAJNIaBgrikY/RAJdB2B5SGOW/7e8vT/ff+n6btn/yqRopXfVSuRkTX179cvR5j793/6atph/FZp4ID+1d5S6m+vRXS5s395fgjmbktTn3kuLV36Tqn3HX7QfumTp56QdtpumzR2k41LvccXSUACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIoFECCuYaJej7JdDFBNDGEVHuZ7+5ML4uSrPnvpIWLlpc8y6IMjd2k1E5stshk/ZOB+y7e1prrbXSmn361HxvWy+Y+dLsNOWJaemcCy9N9z74SI56h3ivWlkjlHt94nonHXtE+scvfzqNGL5hWnfI4Gpv8W8SkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhJoGgEFc01D6QdJoGsJXHr1Demiy69NDzz8WJo955VSF19n7YFp9MYjQzQ3KZ164ofThhusl4aGYK1f375ZyFbrQxDqLXl7aVqwcFG6/6FH0213P5Cuu/mONP35F2q9Nf+d6wwZPCh9/KRj0r/8wxfydddaa81S7/VFEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISaJSAgrlGCfp+CXQTgQcfeTzdcud96aLLrkmPP/VMqbtYc80104D+/dKWm2+adt5xfDpw3z3SxAnbp41HDo8UrQOqfgZiuWXvvpeeeXZGuvWu+9Kd9z2UJsc9vPr6m+mtJUuqvrf44wbrr5smbD8+nXDUYSGaOzZEemskos5ZJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQk0BUEFMx1BWWvIYFOIEAqVoRyZ//qd+muEK+9teTttHz58lJXIsrbsIgut9vOO6Sdtt8mbbHpmDR82AZpaPx+wID+qX+//hH9ba30bqRYXfL222nxW0vSokj7+vqb89OT06anO++dnJ565rlEWtayhVSsXOcjRx+eDp60V9p7913KvtXXSUACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIoCkEFMw1BaMfIoGuJ/BeiNnmvPxa+u6PfxVpUW9PL815OS1b9m6pGyGqW5/4WjPSoa47ZEjacouxaetxm8XX5mn4huunDSMS3OBBg7JYbu6rr6WXZs1NM16claYhkps1J7386uv5WmUFetwU6Vh323n79K+RinXCDtum9dYdUupefZEEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgASaRUDBXLNI+jkS6GICkSE1LVy0KN14693pTyGYu+r6P6f5CxYlUqfWUxCyIV5bf7110wbxtfbaAyM9K1Hm+qV33303LYrocgsXLkrzFy5Mr78xL665OIR0S+u6zlqRCnb7bbdKB0UK2DNPPi5tOmZ06tevbz236WslIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCTQMAEFcw0j9AMk0H0EloWgbc7Lr6abb78nnfWL89ILL85Oixa/1X031MaV+0Zq18HrrJOOOeLg9MGD90v77jkxrb/u0DZe6a8kIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCTQuQQUzHUuXz9dAp1KgGhyby9dmp6YOj0izN2Sbrnj3nTfQ4926jXr/fBRG43I6V5P/+jR6YB99kgbRLpXotpZJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQk0NUEFMx1NXGvJ4FOIPDqa2+kJ6dNT9fcdFu69sbb0txXXsupUzvhUqU/su9aa+X0rrvvsmM6eNLe6ZAD9k7bbrVFWmONNUp/hi+UgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpBAMwkomGsmTT9LAt1EYPny5WnZsnfTbXffn64JwRwpWp957oVuupvKZQcPWieNGb1R+sgxR6RPnHJcGjJoUBowoH+33pMXl4AEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhJYvQkomFu969+nX8UIzJw1J019+tl0y133p/senJIef+rptHDxWwlBXVeVtdZcM43eeGTadptxaf+9Jqa9Jk5Iu+60fVpzzT6pT58+XXUbXkcCEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAJ/Q0DB3N8g8RcS6N0Elr7zTpo85Yl0a4jmrrj25vRiiOgWLl6c3n33vU4TzpFmla/+/fuloYMHJdKwHrTfnunoD34gDR+2QUJEZ5GABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkEB3E1Aw19014PUl0GQCRJN7c96CRLQ5Iszd88Aj6e4HHkqz576a5i9Y2OSrVT6ub9+10toDB6Ydtt0q7b7zDmmv3XZO20WEuY1HjkgDQkSHmM4iAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCCB7iagYK67a8DrS6CTCCx5e2l6/c0300NTngzR3MPpmedeSDNfmpNeff2NtHDR4rQ4UrX+Ja79l7/w3/pLv75904AB/dN66w5JI4ZtmDYZtVHaZcdt08Sdt0/jt9wifrdB/R/qOyQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJNCJBBTMdSJcP1oC3UkAIdx7772X3l76TloU4rinn52Rpjw+Nd1x74PpqaefTc+/8FJ6L6LREZGuI2W9dYdGBLlhadedtsspWPfebZc0bMP106B11k79+vU1DWtHoPoeCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIIFOJaBgrlPx+uES6BkElod47vXX34y0rK+k6TNmptlzXkkvv/paei1+9/qb83Kq1reWvJ3eeeedtPSdZemdZcvS8veWpz5r9kn9I5Jcv0irOqB//0i7OiCtO3RI2nD99dLwEMcRRW6T0RulsaM3TpuOGZ0GRsS5Pn369IyH9i4kIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCTQioCCuVZA/KcEVgcCy959Ny2NyHPTn5+ZI829OHtOeuONeTlV68KIRrf4rSWJ1/Tru1YatPbaadCgddLQwYPSBiGUGxsCua3HbZajyQ0dMnh1wOUzSkACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQmsIgQUzK0iFeljSKAeAsuX/yVSsb4XArm3crrWt5Ysichy76R3QyS3bNl76d333k1/idcQLW6ttdaMr7VS3xDP9e/XL62z9sA0JMRzAyLqXN+IPmeRgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpBAbyGgYK631JT3KQEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAINEVAw1xA+3ywBCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACvYWAgrneUlPepwQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQk0REDBXEP4fLMEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJ9BYCCuZ6S015nxKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCTQEAEFcw3h880SkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQk0FsIKJjrLTXlfUpAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpBAQwQUzDWEzzdLQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQQG8hoGCut9SU9ykBCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACDRFQMNcQPt8sAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAr2FgIK53lJT3qcEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJNERAwVxD+HyzBCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCfQWAgrmektNeZ8SkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQk0BABBXMN4fPNEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJNBbCCiY6y015X1KQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQQEMEFMw1hM83S0ACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkEBvIaBgrrfUlPcpAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAg0RUDDXED7fLAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAK9hYCCud5SU96nBCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCTREQMFcQ/h8swQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQn0FgIK5npLTXmfEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJNAQAQVzDeHzzRKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCTQWwgomOstNeV9SkACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkEBDBBTMNYTPN0tAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpBAbyGgYK631JT3KQEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAINEVAw1xA+3ywBCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACvYWAgrneUlPepwQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQk0REDBXEP4fLMEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJ9BYCCuZ6S015nxKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCTQEAEFcw3h880SkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQk0FsIKJjrLTXlfUpAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpBAQwQUzDWEzzdLQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQQG8hoGCut9SU9ykBCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACDRFQMNcQPt8sAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAr2FgIK53lJT3qcEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJNERAwVxD+HyzBCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCfQWAgrmektNeZ8SkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQk0BABBXMN4fPNEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJNBbCCiY6y015X1KQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQQEMEFMw1hM83S0ACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkEBvIaBgrrfUlPcpAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAg0RUDDXED7fLAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAK9hYCCud5SU96nBCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCTREQMFcQ/h8swQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQn0FgIK5npLTXmfEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJNAQAQVzDeHzzRKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCTQWwgomOstNeV9SkACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkEBDBBTMNYTPN0tAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpBAbyGgYK631JT3KQEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAINEVAw1xA+3ywBCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACvYWAgrneUlPepwQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQk0REDBXEP4fLMEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJ9BYCCuZ6S015nxKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCTQEAEFcw3h880SkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQk0FsIKJjrLTXlfUpAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpBAQwQUzDWEzzdLQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQQG8hoGCut9SU9ykBCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACDRFQMNcQPt8sAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAr2FgIK53lJT3qcEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJNERAwVxD+HyzBCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCfQWAgrmektNeZ8SkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQk0BABBXMN4fPNEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJNBbCCiY6y015X1KQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQQEMEFMw1hM83S0ACEpCABCQgAQlIQAISkIAEJCABCfxf9s6DPYor69ZlgggigwCTDTgnHHDOOc945rvzG797n/GMc8452zjbBGNjcs4Z7vue0hFNu6PULYO9jy11q1Xh1KoSz15nr712IBAIBAKBQCAQCAQCgUAgEAgEAoFAIBAIBAKBQCAQCAQCgcDZgkAI5s6WOxXzDAQCgUAgEAgEAoFAIBAIBAKBQCAQCAQCgUAgEAgEAoFAIBAIBAKBQCAQCAQCgUAgEAgEAoFAIBAIBAKBQCAQGBICIZgbEnyxcyAQCAQCgUAgEAgEAoFAIBAIBAKBQCAQCAQCgUAgEAgEAoFAIBAIBAKBQCAQCAQCgUAgEAgEAoFAIBAIBAKBwNmCQAjmzpY7FfMMBAKBQCAQCAQCgUAgEAgEAoFAIBAIBAKBQCAQCAQCgUAgEAgEAoFAIBAIBAKBQCAQCAQCgUAgEAgEAoFAIBAYEgIhmBsSfLFzIBAIBAKBQCAQCAQCgUAgEAgEAoFAIBAIBAKBQCAQCAQCgUAgEAgEAoFAIBAIBAKBQCAQCAQCgUAgEAgEAoHA2YJACObOljsV8wwEAoFAIBAIBAKBQCAQCAQCgUAgEAgEAoFAIBAIBAKBQCAQCAQCgUAgEAgEAoFAIBAIBAKBQCAQCAQCgUAgEBgSAiGYGxJ8sXMgEAgEAoFAIBAIBAKBQCAQCAQCgUAgEAgEAoFAIBAIBAKBQCAQCAQCgUAgEAgEAoFAIBAIBAKBQCAQCAQCgcDZgkAI5s6WOxXzDAQCgUAgEAgEAoFAIBAIBAKBQCAQCAQCgUAgEAgEAoFAIBAIBAKBQCAQCAQCgUAgEAgEAoFAIBAIBAKBQCAQGBICIZgbEnyxcyAQCAQCgUAg8OdE4MjxE8WBYyeKkeecU4wewdfIc9L7s/VqT5w8WRw9cbI4zisvxZiRI9J1na3XE/MOBAKBQCAQCAQCgUAgEAgEzgYEThJ/Hyf+PnjseHGcCU8YNbIYBb8428eREyeKo1wYdCnxpJ4RI9L7s/26Yv6BQCAQCAQCgUAgEAgEAoFAIPDnQMBcyIGjxwvoWJnjgbP8GbjYn+Pu1L+Kg+TlzM+NHVXmsEZIOmMEAoFA1xAIwVzXoI0DBwKBQCAQCAQCZy8Cu48cKzbtP5yEZeNHjywm9YxK78/G0FxCeJSE1l7I4WHIxjEUc9PGji4mcF0xAoFAIBAIBAKBQCAQCAQCgUCgewgYe7vYv/ngkeII7+f29hTjEc2dw6L/2cotzDjtOXqs2ANnoq6oGDtyZDEZvjTyTyAE7N6TEEcOBAKBQCAQCAQCgUAgEAgEAoHhREDh1aYDhzEQOFn0kguZCGdJXGw4JxHnahkB81jcqmIr3Hn3kaPFjLE9xYSekYgdKc5q+SixYSAQCLSLQAjm2kUstg8EAoFAIBAIBP7ECBiQ65awZs/B4pPNe3CCOJGqjhZPGlcsmDi2mDdhTNFDIsgE15k+JBjHScqtR/i3bt+hYi3XpMvcWAjGVTMnFosnjy9Gm6g78y/lTIc65hcIBAKBQCAQCAQCgUAgEAjURMDF/nV7DxXf7zxQbD90tJiNYG7BhLHFefCLSSz+j0M8dzaMzC12cA3yil/hF1u4thnjeoqFXM+l0yekJFRQi7PhbsYcA4FAIBAIBAKBQCAQCAQCgT83Agdw+N584Ejx+da9iYfpNrdo0thi0cRxxbzeMYm7hHPZmfEMKGg8TJHZJu7X6l0Hi9/2H0rFWXLMJeSw5nK/7AIVIxAIBLqDQAjmuoNrHDUQCAQCgUBgGBBQ3HUy/df4ZNZfdFsU1epcGs20OuRNngt8WP15o2MM9Xc6QOguJ5F64Zdtxc5Dx9IhL5gyvrh0Wm9xxYwJxfRxo4uJo0cN9VRd3d+E1n4c5XYdPlp8vX1f8d2O/cXKXQdSO1YTc/fMn15cjWhu6pjRQTa6eifi4IFAIBAIBAKBQCAQCDRGwLjNtp2+1hplLFzG88MZF9eaS/4szZVvjbiIczWe7yYPKbEr55HnVus1zcVCkVq/7NJn8qNjfFtFDP7Z1j3Fiq37ig0Usuj0LLe4El5hQc7s8RTkYNN2JidrvBYdq7fDjdbsPlB8AVeywEjXvHm9Y4vLuZa75k0t+uBJVv/HCAQCgUAgEAgEAoFAIBAYHgRyPNwsIjYS7qbexHloDZXmM8hL/32sXnKJ338+yBPU2M04txVOI5Ho5jxqTK3uR83ueZ5ntx2t8zzqM9mMWffvYzVYzk1nuZ8oWnpz/c5kJrAfY4QLpowrLp7aW1yOEEsR1uSe0V3lq9Xzip9/j4D5uENJLHe4+GHHgeLTLXuK9RRnHeSza2ZOSjmsq2ZMTN2SzmTO/Psri08CgbMHgRDMnT33KmYaCAQCgUAgUIGAQb/uYQaUVmA0GqNJwIwkQdStgNLTW6GT5+JsMjFrNK/q35lMk7yXr7zn527Ou/r8/mzl0dq9B4sPNu4unv95e7EP0dkJaPMUhGVWIF0GmbqWQP0SxHODucZa5+z0Z+Jv0vVHCKHJuW+27S9+JqGlENA5jxs1orht7tTi+tmTkwjQdrMxAoFAIBAIBAKBQCAQCASGHwHjttPiaD9gnBZnEhSPMi4mw2U8f9rvys2H9XuONct5l3FnrQkYyyuecs7dSM6V8yiFXM5FTuLI+PT/mDiFBUSjmIQY5t+XW3fvu87Oe2lb+hkL/q+t21H8vPtgse3g0aKHWFxhmUIz43ELchTNGaOfqUMH7l2HjyWOtGLbvlSIs5PCnINwp6ljRiWO9Oh5M3DNG58EgWfqdcS8AoFAIBAIBAKBQCAQ+DMhYLxrHGyOIMfDlbFuZTw8Sk5hLNwlPuEczFFYMJLj8raxZvJG6/IIr8OvzCW6kddwnpX41ZqvPGYE38SvW9jVOm+jz8zBpHmnG8y3fKPzTgm/c1KRfLf4j6f0fuf8VD519av30vyO8/B1uAYQFV9s21t8unk3HGZXci47wodT4C7nIpS7ghzPsr6JxdV9k8JMYLhuSp3z7CFntZHCsg827S6+2b6/WEPB2T54pmMBruzXYPrw4ILpxazxPWeNO3udS42PA4EzFoEQzJ2xtyYmFggEAoFAIFAPAQnJESosftt3OLXB2U2yQhJQOSR8iZBARubTRnQWSRiFUZ22Lva8kjQrdnRMMMB1bu0QSKmSc+0ZOSIl1XwvCR3DzxNxQ5uAm9sYRH8m3CT2meBXXm8n3psIssXQZzgmfIRg7t0Nu1J7Vo/tuSVTFyOU0z3hRpJb7VxjJ+bX6jF8FCSrn/Qn53SX28CzcozrE1ufgSsghAr/bjl3SrousY4RCAQCgUAgEAgEAoFAIDC8CFhJbVHDLxQ3KEhKcTRTMM50ELYR/45IcfzUsaOKWbS/nDB6ZEocpQ2G+Ztx5gEKSmwtaty80zkTYxp7Vg7nPRHuoYOajsbOudPDli0Wt/xCu9NtOJ0pUDM7l7FLThF8JMfo5fwLab0zk0V2uYTz6+YQjb3c17V7DhUfbtpVvIGrwXbEcvuOHE/n7oXjiMud8IobZk8pFlOYI15n6jiIG4PtjJ5fuy0JAG3HeojPTNSNBd+lODXcRxJD8Z8tc4bD4fxMxSrmFQgEAoFAIBAIBAKBwHAgoFjpKF9biNHWs+5r7Gl8XLlebTysAM14eIYFGxPGprjc+K2Tg2nAD46k4pCN5AiME09wgnZCbulPueZfrv8rrnIN2zh+/Gji+VEji9HMezSfez1u6z5DGXZnsbvMBto/biVWzxPOh4WGFeNGjoSLjQS/nmIK8fofGbObg/Eei7F8TC509HgpnnPO3m/HGOYsXosmw3+4750uuvIs3mP5oDkhv8rh01YOnwnvjzmd6WN7ktjJIqGh3rP+wzd8kRfKs1//bUfxPmK5b+m+sxveClQ8O+ckYwRbfd5Efuee+dPS85Tn3fDA8cuuIGBRmWYPL/6yPXVK2szz5LPuczuZNZDLpk0oHlg4vTgfl3ZdAWMEAoFA5xEIwVznMY0jBgKBQCAQCHQZAQnxHpItb5F4sR3Oz3sOQJZKQjRw6kxKcCq4Y87UZF+8mETGeMhSJwmABOTg8ePFJ5v3FO8hMPuVhJVJv3YqhuApVBkpjhuVyJzESYeFiRDi+STZDISt/jHRNp4vCZ/X0emh2G8dyZ9Xft0BrnuSc4KomvDx/z6I8flTeosHFkwrbp87JV1jTsh1ei5DOV6q5uO+vKtLHm1lV+I0twWicaoS72QxhwWSSyCGD0k2SGrZGipGIBAIBAKBQCAQCAQCgcDwIqBL18+Iql4iZrN1516SHoSdA3GbSaKxxMVWVrtAvJyCBxNdfu52wz2MMxVOfYXL2A/EmKtoz+mcD8MHzM8YOzuvUSSxFMvdSwLC9qNzurCwrcBwPQU7L/+6nUr0fcV+Yvl0/v4sjAkj8iTFZHjEbM5/7/zpqZ3LePA0wdbNYYJoEzhZJf8RgrlPcDaQNzkf52WxyniKgu6eP7W4Zc6U4kI4hnznTB0K/cT6ydVbU4uczQfLJEaaL9cqvtfNnpSKiq6fNZlk5vA6SJypuMW8AoFAIBAIBAKBQCAQ6BYCxpZ2SrF94Tvrd6UWhrvSmrxnLGNdO5Do7OWauy0ob6XjyDzitukdXge2iMKC7a9w9PqIHIFiLgUvrUbchubmB8Yi9BrHmr9fPeQKzA/MGj+6mMWcZ1P4ohmAeQGFa+MQ0Q1VCGa8/sPO/SmnIb9xwnnOiuXkXBa5yGvET15jEc4fNQ70i9Q+hFuIt65civ4suioHTIPYfCr391zcrBUZXdU3AZEh2Y18YR2YvOfYxj3+dse+xHN8Bh2VDNVtgI+Wp6PIQfSm3NSNs6bg1teBCTQ5RBJCwhWfXLOleId8lQKsw2CXciPMyRzTRVP5e4CHPbRoenrunGuMPwYB1xdsnfv02q0D4kYeH3Jypcu+ayE3nTu5uAGeuYzWrJ18lv+YK46zBgJnHgIhmDvz7knMKBAIBAKBQKAJAqVN8ZHiJZJDKyCikqNEQisDe6JKA0sd2ZbPmlRcR0B5Ha8Ko9oRszWZSkr87IecK5bTOWEtrYZ2kfhLhLVyPg0OJJly09JhjvdEvSaxTLQompPUS5AnkUSS0C8gSTifryyic78WT1V3Fi4grEXsJ9l8FVx/ImGpY4YjB+eLCM5voPLoRgJ0bbsrK/bqHvgP+IX33WTmlxB9SeFnLFSspUrHzySr55xzMmG6AIJ/+7wpxVUzJiXnvKFi+AdcapwyEAgEAoFAIBAIBAKBsxoBk0krEZ09vWZr8SOLxHuIow1sc6GDUa5JLtteziX+vWvetNT+0paeJnCGc5gIU6RmUunN33YmZ7ctOLsd6Xc1KNlHOSPj8/OInR9eNKO4BIdmY/dODWNdY/d1OGk4l1eI3X8ghjdp6O/yArpxr/GvCTdFc4+c15cW2mdSBNNpV43Ka3MOOi7kKnn5msLCNDnnzn+TesrE3/0U4uhsoBv4Gd2SlSScDhIvgvVHm/bAlfana/S6xVlX8IXc75tJOt1JIlbOphtIjEAgEAgEAoFAIBAIBAKB7iDgerzOXu8ilnuV4m8L2A8SsyU9Uj9NME6TMhibXzS1N61r24ZyMXFbJ4d5ia+22/5yLwXcu5L7s61Z22Erbps6y6CoGoXASwGdP5fF86VjtPGlojl5htxI9zR/HqybtQUhuo+9Drf5DNFXmi/ffE1fEAsLXXTnEzM7zlzPl2I+5zacw3u5du9BRGr7i48pyllNDkaHOd2+5TwOX+Qa03B0E5/HF/elfJCdczo1Xc8k1/mFubzHvf4SM4fv4WTy1szDmEIazsd7dh5OdzfT5eZ+BHzysG5i56nNlYnPCxSleV91SNcRPeWcAGI6fFAO5r28nnyZzoXDezdLfOJ7iYDGIJpYWAj3OffrZ+6dz3SZXzwn5TMtHrwdnqnI0QI4/02LEQgEAp1DIARzncMyjhQIBAKBQCAwTAjYilVBl04UkiQtpQ0iB0hJ/zzgAemzi6aOT+03718wI7k76FLRqXGEk1i1o9vdK+t2ENAiNCOZMgLy0RbROOnW+RpO7ek1lQK6sq2T9ucGyLqizUutZntS0CzZGmzSUOwk9p+TzNIp712uxfay8M3EjhUYmmi7bEZvcT+thmzL2smkX6fuReVxJIcm6b7avr94C/vxb3k9yH0qCbTVhSOKaZD9q2dOhCBOSYTD6+wUea6cS7wPBAKBQCAQCAQCgUAgEKiNgO1HfiSuf2L1Fgo39hV7iKNTUcapcDglPpK7ASKke4lFLYQxvnehOAvrah+9c58aW1qVv4b4UgeBF+Ehti06RKLuVCrMrcrhAratOf9GkkY3hoW4MnRqpNid+F0eZOz+DrH7GhbVByAbeFMK5oxxTXQ9jGDORfalJGwsyKnYrFNTS8dxfttp7eT9/C+ObCu5v7ao8l7lBOa5uGRcQNJSB75r+yYlF8FuJo6GeoHe2f1HjyXHvA9J0H2Ik7Wuc4oovV6TTCbDTCLejQhwyaTxJDB7fsdPhzqP2D8QCAQCgUAgEAgEAoFAwBV0W6DKI8pCltcRzCU3Y+IyQs5Toz88l18smjS2uBKxnAU4V86YUIpRTm05pHdpXZ1uLRZWvLVhZ+IJx1hYr5xKKyc4tf3p77wmMw1j4D8W1xtHLyGm95p0srbVZxbRnNqz+Rl/Q6SzgoJzC3A+hldk7NIxEnbngPXJJNqzYOkeHKvvg49No02kuYJ2ztV8NvW3MOa29WpyE9y4s/hq675iA/khUxdlyVC5r1O2sGgacbhde/61ZFYqGOqkYM7YX5OB7+FiL/66jcKlA8ndsBQ3nX4N5lbkhbN7e4rbEDo9vmRmKmTqRucgz8zUCoWaFqJ9smV38S5GAhZWpT8YN+CGyVnE5gHuYxKP8hzJF2P8cQhYgKdjoQJM3dk1f1AImm4Y3/1b0wTkDri8Du1yaYWyMQKBQKBzCIRgrnNYxpECgUAgEAgEhgkBLcI/xnr7fYLItbRw0nZbQlQd2idex+eKzC7F+vrvi2emBFEn22+eJpiDnCuY24EzhkSjej6twJPnfNq+/ODRTCL1JNe5UclaXEIsOb6UJFwSz3GdgxmHwM8E0CvrdqYquNVURVnZkgYTktjNmdBDm6HJxaMk2kz+DLZybTDzG+w+OoBYafjCL9tTMnETiwCHuVYXScTZCj2Tl1bmPLKor5iAM4TJxBiBQCAQCAQCgUAgEAgEAsODQBbM/XvV5pR02E0cbdxbWcSQ42MdyC6nBYmu0bfPncKicXed0ioRMAm2h9jyHRI0tln6csve5GhgwqTWSIK5KeMqBHOdc7EwGaijhsIti3a+wxViC21cUpKmajLOzi/x1P3sJpwNrqOtbR/xfDcEap7L5OBPuw7iZrA3CQstxEkxOL+UI+mifTH8xVasV3E/FfAl7nQaAaq6kDPgRznnr/CJT7n/z63dnlobWTjlcOrib2HRcgSdtsw5n/uvOwgfxwgEAoFAIBAIBAKBQCAQ6CACCqh0ZLKzyEfExF8QdzYKuZSeGP/aPeURHKCN1Syo75RQ6JRgzvh8F4K5I221ZK0FjXH1wDX1vzHetCuN6/I6SM9ARLOIWNri+kumTSjmkiswxh/Yr9aBKz6rFMwp9sscrHJ/56Hj9xhi+JvPnZoEhxdQvKS7nfMZjmHMvfnAUfIWO4s34T8bEcvtxVGwejjX0wRzSzsvmJOL/Yq73Oc4yz23dhtiubItrEhUo8GmCTtFanbt+dviGUmsNgMe243h3PaC1SfkzXQrWw0n20JuxFyIQ3zmYsCgA/oj580oLpzS29VCqnTS+NYUAZ8Tn/EfEABbmPUyQkwdM48hpPPe+TdtzuoqBL92frqagjNzgcP199f0AmKDQOBPgEAI5v4ENzEuIRAIBAKBvwoCBo8SYq3N38Q17BuEc5tJDhnsVxMSMfFzx1iqMLQNfwgicAUVZEt436lRLZhbo8NcHcFcmg/fKiuffjePfgJT63rc1s8NhnUxsCWrFUGSnGUkmy7j1eRcu8kv57sR8vQ8iZ/3Ccp34gBxmIA8S8eserqib0IKyK0kk5R3akHhd9ffwQ+8N/sgF69hK/8B1Tk/kEzcixDQ++CzJE5TqYi7EYc53T/OpdprypjRHZxBHCoQCAQCgUAgEAgEAoFAoBECpWBuf/HvVf0Oc4jSagnmjN9MDs0mCXQlcamux4uIg20nUy9ubnTedn+no7UJpZfX2SZlb/Hb3kOnWqDWOJgx+WIEU38fcJjrHP9QLGcSzmTcazhcb0KQZsJIjlCNhbilL75dPH18Wly/B1e3BbQosnik08Nk4cHjivl02NhdfAC32AXXyEOnwAlUwy8nYfTQwhmpeKWPRF8/BcqbnZGviiPlEl/DKZ4hObYK5zyxz0PtpNX+S0kgPoDLnAmNCaNHtc3N8vHiNRAIBAKBQCAQCAQCgUDg9wgY2x4kHtal+nliMvMDa3GBrhULu7fbG8cZg04n7nQNWNdl33eqcLodwZzz8SvZgflaa/QHx9Wxfd7Uz0fRPUVXOd3LFpDruJ6iDXMEs8f3tHxdWTCnuOpj4ndP67Erz5vm2n9ii/aXU7ykq/JihHp2vOm2aMcYezPcx+4xpQPX7iQuOlrDwS9hyw52lZk/cVzxrw4L5swnaDzw1bY9xYeI0t7AfGAHzto+X5WY5fvkfPzcTkS6Gt4HR7gUYeNC7le95zXvO5hXRVcbyPG8A0+0HatdkGzHqugqzY9v5saupYDqLjjhPDihHDvGH4+ALnPmOT/GGfCpNVvgmUdOcXxukfdQ4wzd6++cNzW1mB6Ov78/HpmYQSAwPAiEYG54cI6zBAKBQCAQCHQAAcnnIRIwL+Hk9hKuYVbzmLxqJt6SEsyCPOqqYBXGDRDITo1KwdyrzEvBXD2HOUmVXnj1nCgyeZGmpK9MZiomK9FKo59xTcQVbRaJGe3k7+FLAVg7tt5WXf1Gouc77LlfBlOtzY8z0XwePBHSMe/C7llhmVbdnVpMyJfSrVcJtZVVXtNHkNj3rPBDDOhz5O/kg4rmrqEt60NUF16Enb1EMUYgEAgEAoFAIBAIBAKBwPAg0Ipgzpn0h76paEQHBQVzqRCGReMcQ3dzxj+ThPuGFqPG+z/ixrz/6Im6iRHn0U3BnMUuq5mPYrk3+DpIq1hbFBHW1hxiJw/pGz86OaA9hmO0ca9Jwk6PgySQdHm2PZFiuVW0AzKhmfmaTh5yl9tIUv6DlkS2lHKh/2wZ8rjVtL99Fdw/g2PYiknYxVh+oQhx2rhRydlc55JzaY91tnCns+UexDwDgUAgEAgEAoFA4K+NgPHYduLhFbTlfGL1liSW0wm6Tig8EKeNwiFtLF+PIJi7kzX0RQiqjEU7MdoRzDn/1EqUV2PIyuE1pOvozwmknxvlB9he3tFLfkAX6evIeehkZpF9K6MVwZzHMc513uZXFsPFHlpYtvOc0jO6q8Uh4uN5VxF/y310EvwBLmY+w99V33M/Kx3muiOYs93pPgRob2DkYOcj53MALtaIzTgnDSCWTh5fXD97Ei59U1K3IAuJOik29Lp38HdgjkcXctvsQhETHkwh5UHkZOZ4bpszFS49sSt80HPFaB8B+bq8+Ytte4v/IphbtfNgsZmcXeLRPOg+8XbRsmjQPNZVuMzJ59s1zmh/ZrFHIPDXQCAEc3+N+xxXGQgEAoHAnwIBq2QUPL2IsOsVklU7qJI5TCDZCrmYjHPYhVT7m5wxwdYDKdFOfKijFcGcxMhqHRMos6jympMsk6ku6qd1BrwGxQe4FhNefkm+dIqw9Y+ku3Lkn9JxuYbxo0dAiientkYXk/zSecNgudnVuf8Jjv09RPNdHNg+Jqn1I0mtPCRavbgiON9Hcee7GuKtG8XZFIgr/luLA8iX2KS/SGXVr7wX21PjJM9Fb2rLavsk33uv+D9GIBAIBAKBQCAQCAQCgUCXEWhVMDcwDWK05DLHAr8tPa/H4SA5LHcpeDO5oRjt8624COCYZpvR9TjNVcfnA/Prf9MNwZyxu7HtehbOP6XyXGdo24PmYpd64av7+WVL24WTxhYP4uymO7UV6q3wqP5LaullJ0ma9fsPFc/9vK34mLltpUperKRd3iI5mc4X3rt74GRj+EUW07V0gj94I3HchGuDCdq3N+BizT3w+uRUki95ksVL95FEvBnBnNxMN5N69+YPvpw4fSAQCAQCgUAgEAgEAmcdAhZH/0Trwk+INV+kW8oW3MdcP280/K2iJmPS23FnugXB0JXEw304kXUiFjUelC/osNyoJauFPq6zz0T4YgcXRVN5WGQv79DBzBzBPpyN9x49VhxkHdtrNtysHPlHj+lxjPNL96lSDDiNovpmo2XBHAeCFqUcxAwwswPNdfCwJYjAdLlzDt0YcrHs8PwsboIrdXimBaqnq3VGMemmYM7clM+brVgtzl+35xDPXv3CJTFxTl6HOSHxuheXOYWNk+AIlfffbYcy5IQb4IlvI+TzOZQnZoycg0VKGi88sqiP/NiUZBpgq9gYZwYC3iP/HbE47xUEmSvIZa2ipa7Psw+Rv5/A/fPfjrsRPZrHUoRpQVqMQCAQGDoCIZgbOoZxhEAgEAgEAoFhQmA7Ark1uCkomHuTNptHcJuTDOREjyQgEzSrj/LwncFjHwGlFWSPU0k2EVJi0miooyXBHBMY4/lxddAJQ4t0bdMzJ5bwOl8TTDoy7IYQb0cYaBWJ1tl7EM5JjHMyLM/Z6yq/ThY6bSyjsuQ2kk+Kvno5X8Ylb1/96jkldTqwPYuYbCUB+cZEOks65e9n4opwAcf++5K+ZBkumcvzrj7emfiz+OhCaPXZk2u2JtKxhcSdV+jvJKzzJ45hQWFiejZsnTQOAtkJMeWZiEfMKRAIBAKBQCAQCAQCgTMJgXYFcyaSLOiw9eXdtJGxrY3OBp2I62vhcohClt3E4lbp+6XTnLFlTj7U2sfPuiKYI3g1gfYTiaJXaQ3rIrpJI2P+PB9f5UPJc4HtjXcd+XUmiRoLiHSeuBaX5bJQJO9dbjvY755jA1zCBKYJrS8RlR0GPxf5TWrZishEkY4G16fzTzqreEXGRW72G9f5CvdAh24TZyY3vUYxUDR3I84ROszdxHVOo/K/GS/Lx47XQCAQCAQCgUAgEAgEAoH6CBhrGZ+/T+G3hQvvb9jNuvnR03YwJsuDMDQNX4xJXete1u/Editr6PNxa9INuGKXvGtbr80Ec54/xcN8uxah2RWsQ9tKVbFZHuYHLKJXJLeTNp9bDx7m60ix/WCZG9Dh2vO4lp3HqXdFcpk7j1afd8+bntp/XoBxQLPralUw5/nSNfBqjuUGO/gQ55rjmIqbnXmOTg/PdxTuY2ccxV9P/7yVNpWHU+xdyX8qz+s+3RTMbeN+rKXj0X9Xbyu+gItpduAzlXH2NXOxfJuck9soTpsKL3gYdzD5UHn/OyNY8xzyLvNmz4DTClzK1uDKl8Wgnt9zz4FD25JYwWi1YJNDxPiDEfA+rqM4TzFwcjDkGZNnmhN0jMYh05ymhVkln5+Ufk6/jG+BQCAwJARCMDck+GLnQCAQCAQCgeFCwMDexMSXBPyK5T7etAeiVgaLJRkpEgkY3U/QJM/5984xVftDTG4iefEIxERC3Ik2RK0K5sZx7jkIs24hoL2fKiwD3ExavApJ1GFIoEmwwwTBB0i86IRmEsyk01pIjmI6iXHlyPtOo7prPhVq9+MYYYvRWQjddNFrNI6eKFsmvbdhFxb2WxMJl+i5l+RSAn7J9AnF1Qjx7iIhuRgXik4sIjSaUzd+Z5WhZPZ5qg51mlsFpg6xy4RxLoTxQZ4Lnw+JfrROSouQs0QAAEAASURBVBDFt0AgEAgEAoFAIBAIBLqKQLuCOeM34/pxuHgpSLqDYpiLSAZZad2NsZUElQK1d3AT+5hKfeNxHa5zkU69c3ZDMGdsrnhvxbZ9xTMUgqzefSA5uDmXHPWLjXzIbf0y1uX/NHzRVcD4/lZi3uS6TYLQfYY6clz9A27Vn+B+9/Zvu+AxtksqY26PXzrcjStsCXslRUQLSegN/cxDnXn7+8vZvA+2ZdXBetP+IziAHEuiOHEwgWeC8hqSoQ/BzUxMNeNl7c8i9ggEAoFAIBAIBAKBQOCvh4DxrWvXz1G0oNvyT7SftI1hHsbFunbxksYxxSb9wbBxscvqOrFdSjx8P47AFolbfJO3z8dp97VVwZy5gHtYY3f92XlUtoQ1jiSdURxhvd68RnKZIzcgX1qLEOqb7fsKi8B30462Fhcxpu+jOEUnaY9/Ky5iyrFqbZuvrx3BnPs4x1Fgu4A43qLzB3CMnkeOxQL7Tg9FQuZGzAXpmPb2+l0pBvc+OmrxCH/TLcGcx1aE9vX2vcVLv+wofuDZM7dSOSzAN6fg3OUMebhv/t29YJYND2ydW+s68n6tvgrJDp6Lb3lG/kOOZxU80c5M+dhitoD7dCX37C74s/fOv5Mo6mkV4eHbTsOQ1fy9vwHXfJecXe5AlTn/mJEjMc2YgGBuSmqvOxUHd25ljEAgEBgiAiGYGyKAsXsgEAgEAoFA9xGQzEoyFI/Z+uYzqixMxuRg0JjQIH8siTOrZSRGmyGQlXbsbmu1kw5vt+OqsAxisGgiSZohBpStCuZsxzofInwXtu9/I0mkYC5VX8mY+ofkJSW2+FmiraucLha2Sf0cFzjf74AkZ6Kf95MUmZSbgtX6AyRlbiR5uBRxm4nERpcn8V5Hi1IJ51NUH+mOIGZ5HwWHuiNYdXQt1Xc6eTDts24IsRVo70AybKVlVZrPiJ+Luc+N1TlWePlszEN4GJbkZ91tjgkHAoFAIBAIBAKBwFmIQLuCuXyJLhjb2lNR0i3Eq7YX7SHWH2psn49vnGjM/TNJkfdsfUos/h0JCJMicpNmIXE3BHPyDmP3T3SHRjAn3zmAA0VlEsyY1sTLYeapQ8V+nKu9DufrNZnAMaa3JepjuAtYKGIL0aEOY2rnZ5z9+vodxde4y9m6tnJu0ynwuQgn7H8smVlcOKUX3tb5xNpQr6OV/cVTbmoC4zUSGTpZ23rWhJMYi/VMHEOuIFn5ONe6mIRiZTK0lXPENoFAIBAIBAKBQCAQCAQCv0dAAZUx8FPEwrbE3EzhgvF5jnXND0wmvs0F9ToD29LUGI0QrrC8fgoCk0XEZw8umo7T24RUVJ+L2n9/xtY+aUUwZ27CeNFCfjvgWJheLTRznjk/oN5K8ZzXsJbWnxbNfL9jXxJD6byGFvB3nEQucB7HdX37IQSBusFlLGpdyWAEc2JtEc4FxPW6fV86jUKYDuRYqudnnmIzjm5v4/Kt49b32/eXIjQm4BxqjfI+nyySsQBz+tfSWanISgzEf7DD48oBvtiyF264KxVS/Qovqxze2/GjR6RuPT5zihuTnQM7u79DrqrT9o2InXQIm43AsRPFSz4zPiOfYxSgE9965qYpw8Al8+YS7tMdPBfXkOM5f4o5sVNFV+Xs4vuZgMB+8nWK5l79dUfxOq1Z7UClu6TPjv+GKby0Fet1sycVj5w3o5hF4aBrDzECgUBgaAiEYG5o+MXegUAgEAgEAsOAgKRTNwcrip6nvc8aqmSs5JfMpmCROShwkvAuIeCXhHwFQdhXkSBymm6rk8FlVpFRzaOzwVDJQTuCuQVJMFe2hDWQrSZEmTwZ/Kb3vDkIudF+XbHXZyTHrBQyQeM2eeS3CuR0gtNt42oEgVprN6oUEtOvwNRj65AgzllI5n7O7/6F01K7qyUE4lasDBCtfPKz5NVr/Y7KL1tpvfzr9lTpJZkUO69VsuEzIXG8iORrpyq8zhJ4YpqBQCAQCAQCgUAgEAj8IQgMVjBnDDeDBIPJrkdJOhn7mnCqjq8He1HGiSYZjJWfhn/8tONAsYHFamPhVuLhbgjmDiCA0y35AwpAXid2N3lm7J6H7+ZNGFssIklmJfpOFto3MmcLYox3/b38ydZPJmoeWzwzOQ30IWQb6pBHWIzzBk7gtgFajzN4pfuF8zwPPqGbgW2AdNRQ4Hg2DnE0YWarow8RU+rWretHJe+yWOpCXOb+sXRmcTHJxNm4f8cIBAKBQCAQCAQCgUAgEBgaAhsRIVlQ/yzx5lcIyBRVGbcbn/vai5BL8ZaxmJGmoiYdo3PMnOJh1oB1p76bNfTraClqcb0copUYv97s2xHMPUohvS5fS8lh1BLMpXMwUefqynXKi5DjML5+FyMBzQSM8xVlVcafXqNF4X0Ubuhg9uiivmIaxgKNisLbFczl64dSpCKc5XCKmxB/XU8RU87T5G2G+rqJe62j9gs4On/NvdYxDf1gyu/UO3bCDBw6LZgz9jcHJAd7AW6oOM17IA4Oz6soT4c/nyfb6H5NDucYPMx98zY+o0twNbySwpqH4LAW1ihqHMqz57F9RmwRq4j0NURW2w6Uz7xn9hkZhQPCcoRyjyGwMsdjAVWMMxMB76W5P+/jKzxvP/PvnS2afdZ8fhyzuX9Xcz//saQv8fkJo8/OQrQz8w7ErP6qCIRg7q965+O6A4FAIBA4ixDQhlyC+zFBvyRpC0G/hNhAUUJhqKhjwdze0lpagmj10RaEZjoruI1fDrfTglpyKilWVDaUxFq7grm7IcR/J0lUSzBXzvD079l+/HucC6ymepPr2gUhM3CuHAbMoxHhabmuK9zNiOYUfdWrkBMzK50UkCXXNcR4JwjIHR5ZsiZpl7y5gGDFU28H3CfSCf6AbybwNu47UrzB9T61Zkt6fsQwPz8+RTfjpHcz2PlczOJ6WSuJEQgEAoFAIBAIBAKBQCDQRQRaFcyVUeqpmN4pjSOON7Y3vr6OJM0FuJbZ9rMTQ5HZJrjEJ/CPZ37eRhx5GBHaKf7hOWrNKZ+704I5Ey0WgOhopmBOIZ8cqTJc9b3tpVw830lLnvX7D+GKtx8XulPJNPmT/MDtbGer0FB3vqEO57L90JFUhPP0mm3MlSQenxlPZ5x0M1CodyeO28ba9XjKUOfS7f29Hr8s4jJ59xLFON9TmCNv83PvgwkzRYEPwqUUCdruK0YgEAgEAoFAIBAIBAKBwOARMM76lpjL9XELFhRTHUNB1a8hScIkBWLLZk4splH0rducjmBrKGxwzdv9Hb5OYs38arZT7HUb68FyiKHkBzolmEsTrPjmXL0+17D3EtObG9Hl+Aec5lzXrxTMmRuQg0wGA/MCD9OFZg5dVCyArzdaFcxl7Ixz8xBDjQnEz9zBJLiZ+YShDq9D97zvuEYd07xeXb/lZ87DOeT5eK7KOfm5eaFOC+bMK8hbFTC9RDtgxXu2Aq6ci3kUi7l08rbQymd0L27gmbOVcytzU0vhBhYRXTq9t5g+Zmg5CI8rNuY8PKddinZTPCVIIiYvMUdkzshzWsjjzzHOTARMz/k3IOdXHGvBnJ2T+GhAMOff3uXTJxb/pDjLLlMzEADHCAQCgaEhEIK5oeEXewcCgUAgEAh0GQGDQe3Wv9q+t3gfy2ud0A6QrDJ4NOHj4G0SwWkFbhLGoPLFVO1zMFkYZ/LodlZV2XpIwdzNEJhzIQlDSax1WzDn9dma1QSUCwL/xXLehJ2OEpWE0CSaxH5Z36TUkvWuudNSAtHFgVrD7X/jOCYAda5bhSBPfPwSP+3r5+JQoWBOG3dJX0+dY3l8dkkkzETRgJTPgzHEX2doX2vNxrm4sOCr97XW8NQm1UbxlY5T60C1duz/TAx1HFRw+O/VmyG5R9LPHkby6DXbNmn5zMmJ5M9HVFkPuwaniV8FAoFAIBAIBAKBQCAQCLSBQEuCucq4ryJWND4cS2x/DckuOcCt505tWDDS6rSMRy3OsWBF/qE7sY5txrmtulN3WjBnQsb2U/+h8OPjTXuKDbQ7rWzDRIhMnDyi5AEkrLbBHVaTWHqfpIlCu9Mg5IeFxLrGvvewraK5Vq+rHobiY4HTK2D13NrtKWljfO+8HCO4WSbSbiWhdhXnTW7OlZMqN2vpu5zBe5FeK56Hyp09tA7SJXcoeUj+vXG/9/gYr2n0vzjXEseSb+Tt672apFxNAvbJ1VuTo4OJNBN0nls8z0UUeAs8ynZLCjpjBAKBQCAQCAQCgUAgEAgMDgHjN2O/93D3fW3dztSaVAeytCDdf0hDOp2zbqc4Yw6v43DVepVtdQU+SuDsMRy+jqEofBHFDYqIHsV1a0rP6CGJvbolmEsT5psz9/q/pRjmY/IDb63fUazBeaoyhve6jH97cZuSGz1AS1YdzCxUqTdaEczleL4fvnQo5+O6+QTyBQrEHkCcNx9xnoLFQYb4A1N0DV/u8y487O31u1LHmG1JHDiwSd03zqsbgjlbZK7cdTC5fr2J85cCtcR1OF/Og8zl+i+j7ek986fDiw4WL5PD2nzgcOJiWUYohvJEhYyPIV6Tx57HPRpKIZHPhYVKtmIVL4WkFkx5H3wmFDHOg/spbPScE3k+muXC0vPWz7d8nx5AXjJfcr618jyV+4lP2tf9K0bJleSuZcefoVx7xWE78jbPX54odnLGWoNHv8xT8UZ/wIG/ETZ2P3l6NqbI+6d9qrbPv6t+9bTf4FBoLlDRnG7mxzhomg7H9xm6kDzo35fMTKLL+eTwYgQCgcDQEAjB3NDwi70DgUAgEAgEuoyAJGkbpOQNyMhHVFaUJFc3hZJ+lUH2OcXl2F1fjzPYMhI+EgXdF9xWm/bKAF4ypz25IjBbl2qBPZSqmuEQzBlom4CxquqJVVuS7fd2BF+Vw2uWYFyCW0QixbQX7aO6pIfFgVrDJKBJtP+s3pIC8FSp0r+hx3Jh4VIwVXi3nATPWALxRpV2tqzymCY9rZ5KZLGfVChQFOPJkGhd8CpnJIndSRLPRQ4Te1ZAJTLSv69T8hK00pd0z0HgOJHjNCN21dcshkcgFu/zDGnbryNErsTzVP5+AZb9tuuVPJ5PpZfnqJxr9THj50AgEAgEAoFAIBAIBAKBoSHQTDBnHD+aYNBkkCvEJieO4ySRfubUtpcxAWBMfx+CLJ2kG7kotDJbEw46VevE/CmL1F9TuGMc6TA2dCqeX15h/H2Y7XW3yHNyu04L5hTLrSJ+fZrima9YPN/XHzM7H2dmzD8F9wjd2x4kYWV8/SOCvxdwQNhAkczR48fTxPP2ui7YvtV2tiYKh+q6LV62xRKzt2nLamxd8gGKeoj/TdRYsCQHMynk+QYz5CkWDskdTFzt5jr9rDKZ4f0ZD9+TfyhaE5d8Prf1/uqAt5X9U7FPeWuLMWBocdWMse7T3HVB3mLL26e4JyYzxMDEUE6ITYW7yFG95nt5Nn0+xD9GIBAIBAKBQCAQCAQCgUB7COiwZtyni5YFGrYnzcXk/aFcWsddTLtJhWI6/dqm0DjtA4RXu2hpeNR+ngy3V1jmOvVycgm2qbRwejqx22BHtwVzzst5K4Yy5rYDzw+47Rlzy1BSjM9bi1Rcz9bZ+Z4F04qLENXMZY2/3mgkmPOYcjF5jSc4Kg/jfOmU/b8zV3DxtN5i+exJxQ1guRT8xXYoMa8xtvfXlpR2ECrd3HCu7r8Ij+2cjK3FveQCZdGKaHRaMOcxf0EA996G3anl6Yqt+xLuniedj99DI5KJwbXgroBwPcVNb1O49A25qXUUFVXyRN9PHTsqdbm5EYdDi2sS3+U4gxmKC3UXf4IiHh3mzHfkzkQ+H5og2CZW4wid2c2LyWPrDfcR161wG/mS7/k/jTHgPp7ny5yT/Kr6XnteeahCwU18mdfjo9OGuaFejjETnibv8u+uwXRO27ebP3iJrjVYbKZDoHxRV8f8vHtubl2aq6JUXSzn0P7YPJV5M4drBrt4fnezr8fphy39TrxmwMFbESy6wy88N9/xN+7fum7mh7jP+T5493QzvJ9/6/xb9+/cudW/q2kK8S0QCAQaIBCCuQbgxK8CgUAgEAgE/ngEDEwlb8/ihKal9DreG7hnKVNJ3M5JJOMuEhG2u1HE9sGmXThC7E7JC4PFHDD6ajCvsE5SfPvc0mWukRisEQrDIZgzuDYJ9yUkS8HcKkSAm/afLpgzYJZcXARJVTBnomxmHcGcx1Nw992OAzjWbSl+2nkgkQCv099J+M6D4CootL3r5YjIsjuD29QakgCTRAbwJrBy4kq8rWSTnNvuSec6yZUCO6udMgn5GRIgkdpp4orzZzKS7hcR/4Sekek4CxG1WbGVW8RWC/Bqzc3PvC6fmy8QHSqm1M76F6pzMmH1fCa1Lpg6PtlZX0pF2BQIj2QjRiAQCAQCgUAgEAgEAoFAdxBoJphTCNZLTGb8qDjNGPYgMaQxnPGdsZrOBhY73IkwybjVmNPfDyaM85gmBywssdWmLTdd8DfW9nh+mWCwHaxz8r3CLeckL8mjk4I556T4zeKZ19dtRzhXVpjnczm3icTKtkTSxe0+Cmd0fPsRzqDAzsSaMbbL+Bk3i1Fs5fIQnEG3gTkTekhcDE7E5vx0rjah9SHFKXI2kfBzecU4jjuJpNA/qYC/jXskR8lJBTZpaZh8OYDQzWSQIrW1JK0UEe4g+VkmccqEVb5HJiSmcx6TpRbcyEdMZjirjfAo3fB+tlIf8MTPCYthH3zgYhIOJiCaDYV33nuTGF73avA2yeJz4CHlLx5Hjvo3xII+K2eSg0Kz64vfBwKBQCAQCAQCgUAgcKYg4PqxApI3cVZ7g+KMPfx8GAGJsW0Zy50k3htTXMK6uN1SjItLwdyWFKMa+2XXrXRNxH62qrwU7nAfwjILqC3qMJYczDCm/HzrnlTs/xYuX/IJP8vHMzZ03d440SKSuxAuLZ0yLomZ2jmfeRFFNMb4X5MnsPjDkWJ83iqYs1DFdrOKoy5hfXse6+j1RkPBHHM1fnUt37yJa/lyHmPgfF2cGFe/nmIx/Ot+OMgyipgUaA0mz+KVuHavWM4cyDtwi0/hFTlez+c0r6PQynMoTlJIlLH2GJ0UzJkvOExOxnyHMb8Of2t5lhBWJEj97rzGgPmd4G0eZRniNO//Z7QDlh99T2vZ/ts0sI9FOjqE3cz2DyJ68ufBYOYBFXYp8PoPgjnNJhSSiofDaSrSyjmeJM4Dt0remjas+Oa+hyi2sv3xD+SMvO/peBzLZ0uh6YXkT3yuvNcOhXEKWjW9cC7r4Gr+zZk3y3PJp5BDy8vM7+iMtgixqsVKYuC88n3O2w/Hq92t5M87uAbnr6Ob6xS7jxxNGJZolvfa++T85bS6VJrz8t8e1y32Hz2R1hHW7z+UuKpzT/vyzb8T85aaVGgK0WzIM+WrT/K3bh7Lf/PMmTm8vboUyuPNA+pUKM9kajECgUBgkAiEYG6QwMVugUAgEAgEAsODgOIrkz1P4oT2HQTDANZI0/jPENFklEHqvZDbhyHEVriYUPmWbSXQryOO0gJZspQTRM5c4mx1jcIySd1EgvLBjOESzJ2AnK3ATeIJcFDgZlvWymGgbFA84DAH2UoOczUiZXGTtFiRJsE2+LYaKQ9jb6vD7oHoSrBNQIp3o5hbMmHy7i2qv7Qot4bGOUkgZ3BPrDC7BXGiJEhSYOLROXwj0eT869nfxY99utOxX3/8n87pJSRCBnmaOQ57c+7b5dNJZCGe072uEcnL1+Sr1/0j2EkeS8K6f4BI+DuTeRKdfy6dlcitZKcGfB4qRiAQCAQCgUAgEAgEAoFABxBoJJgzfh+viIlY0vhRkdVPCMdcPK6MTE2azGbh2VafKRnAorEL2a3GiPkyjAdN0hgvWmTxOnHtGuJbRVCOHAu7mD+L840fPSIV8RjLJkFaxQJ/pwRzzkkcLAQyQbiC+D05Q/uL/mHsLEZX4wp9G/G2iRdjex2Vn6c9qg55a7kOF9HlQw4xs/BEAZvbm9gZrKuGcfsaMHhubVngJFfpP02au8K8mcTV/1w6s7iFc5lYMfnWzjDppChPdz2LhxTNldzBSvtTrgce1S+TVkkAh1DOghu5iImdcVyzgr5vSADZclfs8phOMkmuovDSSv1mw8SQCak34Zzeny9JkuoKLs4eVm6hc4KCOcWCujGYIIoRCAQCgUAgEAgEAoFAINAeAq47f0as9T4uX59s3p2cnBSPGPfleO7CaeNZx56Es+/0JMaRO7xKsck7tDRcQWxv7FgpxzFeMz9g3Kcj8NUU1xvDesx2h6Kg4RDMKcT5gRj2KcRRXyEqMx418CwFcxgMEGMbg5YOc1NxnpowKIc5r9+YdiwioLnEx/If8y26l23ly/OJkxjmrjAKDxVkKTzU7bndYUx/hPyHgsAXf9mROuKs3QOv6L9rnksKYTx9MUJAX3XgNh7X8TvPp5OCOZ8xHe6+xFVOt0KLdvw5PyOeS94n33kEIaS86jxyC3uOlJ19nqHLzSfkIcwfeX15ju4j99Dp+x+LZ8LlRieM28XM7c2dicMza0oupsDNczk8n22K74GPLOfemAtLnDD9tvY377P5Nx3+FH/6Xj5seVIPIlO53M39XZvMqXkq74E89cdd+5PZhYVNttH176LfqH3gZPLAcfBoXeEtbrqYZ/RS8lAe648oMBIpOb9dqvz6FVGqmCqgU2Tr9ZVo+ndWil51cVQ4KNezDe+V4KoRgziImQVrHiMPsbuQ/JrPhzm3Vlqoyis3cownVm5J/+ZlN3OP6XMp//fvXOdA10DGcG8GK7rM84zXQOCvjEAI5v7Kdz+uPRAIBAKBMxiBHIgqfPucihxdwXQES0SQeWeCYSJkLtUo9yLuspJJS2crf9aRUHGfF0jc2CpU2/CcIDLQtQ3RUgJVWzdZTWb7Jo/Z7hgWwRzzPXLieErcKZhbjSBNglo55Mcmvq5C4HYjBMhKNZNetQJlsdW2/TMWC16iTZPVZCXxKXE1uSiBkuhZmdcKNla4WW3l8Uw+eZ88j5RiwqhRifTodmH1kMRdl4yfIFESkq2pYseKsBPsBwErd0x7+8374nWY+JJwp6QXVXi23tJBRFI5ClLQyv2zGvFbEm0vMk+rczKB9JSSsjm9Y1NLVitzJGqSyBiBQCAQCAQCgUAgEAgEAt1BoJFgzjhtIskHF/lNvBjbfrplNxXrh1PcWDkjkxTGiC4W30HCy21N7LQzjF9NMLxP2yZb7nxNzKhQy0SII8ekFlXYapPl85R4W4lATOe7yor4TgnmTBodoFJdXvMqX7+SpLF6Pw9nphP0AvCxHeu1iOZctDem1p3hbRKEtgv9Qtc3tsuRrSGu872ceNp97kBoN484uBZ3yOeq92oixAKn/8BTjLNtO5SH99BEgu7Vj9Lu6jocvhWOtRJjC7s8TqcCjy9/yMkHExiV3KG8Q/msJSZW+ZvAs3hHQeX5zKGX5IyCPgV+Fu94DrFwf5MOcgsLqkwCNRtem653usspmHsPrPeSHPN4Dp8BHTlMwD5OIsyk2GCLtMojxvdAIBAIBAKBQCAQCAT+WggYqxkPW6Rt8ciX5Ah0vTI+r4z/jC2vJ35TsOWXcZ0xchaxvYnT12ZchvM6sChaEqP7lo7ADxD/2UpzPPHbYOPhfK5uOsyldW1yJc8g3jLuzmIkw0+vzfVxXY6vA4P7EbDZonYWWNQb9Rzm3F5MFQbphGY8Ly7yHtf9PZ9f3gM/d63/OtqyXgevMN63XWW7OHqf7ajzGbzlBdbtbWtqe8/+0DqdS34nz1Mo5Pw+RDy5DR7WLcGc4jFFVPKpV+Bi5mMsTMrXLuYWdikq1Ll7Odffh/vdQRzatpDveIrOPh/QJtWCL7lmvhZ5mDzFDkg6zGlWoPPYYIbFW1+Tk3kFzL7hmZAHcqqElyIq2w0/uAhThL5JxfkUEDUr35EPy7WepfBKx3Xfi4PDfc3N3MTfylV9E1JR0g4c7szvWHTm82kh2V4MEQ7CYcWn8u80HYNr99nQHMF7KdfXJf4yzBHk87rNtcIVPdZgh3Niakn8uA4RqjktOaKGDt4rOai5MvOJ1fOXUTP1NH+xWMCc5ZD+jYiTzoIKWxXoDgxOZv5RF0LFda5tNBs+L87lv3Bs+eavew6le+t+CuYm8zd2AX+brn24DiDPVdAYIxAIBAaHQAjmBodb7BUIBAKBQCDQZQQMqA3u39m4EzewXUksliqYKs7rNqmSg0SPAedtBOsG1BIDg1IrYazkUZC1n+BeMuJgtyS+kojowrB85uREUIjV2x7DIZgzCDZQlzD+m5asJncqq5mctFiNJil0I4sDVidZsVKPYHj9VuV9DNl7iwUDXSpMdLnYYCLN5M41ELzHcUJYQgBv+9NmIxP252idK2GvFMy5QCE5U9QoicxVdxIpSdRh2L33stHIv/UWSY5d+HABRAc8bfP9zPM0G1bjJDtryMZHm/YMkEj39KuPa70fgqudtUStlWM2O2f8PhAIBAKBQCAQCAQCgUCgNgL1BHOGdcaHCo6MI00mLKBIRgGYMaSxsOGj8Zvb6ZamaM4EzR0sGF9gcqiFGLZyVvtJftji03Y7b6zbSZKjbP8qRzAW9dVkUGr/CvcwOWDy7ntiX4V18hDn4+iUYG4vHMAKfYtSFM3t5pwmMTxPGR+fJA4ehXtEL8UuM1Kxyzxwkhu4wL4C54l3SdLovC2nSPF+mmG5v9heSsxrayivy0X/fA39mzV8cQ4mqCxy+r8rN1MQc6B0neBzf3ccXqZzx2XwNbmATnYu5DfjXd5bOZ3tU20HZDsm3eV2c00HTVy4Qb3R/yvTM5nb2ELXdjlyBsWN3md5iBebsZRXLgED26fKK1sZJkV0U/gAkeXLJJQqEyMmgsTTqn/xtW2RSaEYgUAgEAgEAoFAIBAIBAKtIWD8arGIQiBFVBZfb6AoZGCNn8P4Xjc5C7WNu1wntkhBPmHxvbHaU+QHFBYZtzpy/KcgTA5hrHY3RfWKnfys3ZHXuu1q0i3BnCGuTss6Yb/067ZUTHLypFdyOieaTLxp8cfDrG9buK6TV73RSDAnrhoTLEOcZlH5DI6rcExukQWL5dnLe7CE4var4GwaGshHjLtbHV6buQ+74Xy4aVdy+rYQRVzzvfKcFsGcx1zkfLaj1eF6E11sLKSp3E6jhPkImf5FFxkLqmy/24x/VM81z+l97umHxPrmERSP+Uzmc2Wucylcxxa4Cr8ULjnvfczpeZ47HQ4VlOlWlp9b9/e9/Mtn1jyEzmN+3u7QcfBjuJItbBU0iotPuRx5IjxxCbz4kcUzUsGXzmbNziGWOqU9SY7nWfDdC/+U87gfh06GAxZcySEVm1qI9BnPhZxVgeMxOBynTl/5ZPXOKV/SKVB3SIVkN8+ekrjjuJHl32+7WLS6vffw8LGT6d8T76+COfNFPoMpp+UFNB1ucw6tn0cmdzm5pNdpy97UMhrM8vDZteBOPqxwUYFds+HR/bfvWe6DXNP7nIWLzr8Xg4o5cPm7WPtQEKnzn27qMQKBQGBwCIRgbnC4xV6BQCAQCAQCXUbAqqKDBOgvkrB6jYSVIjFJSWWALfE1ML8DQmIFj24KfiYhMIB8BwL3PIH9z5ASk3H+Lg9JieItiZzExNY7KclVsU3ettHrcAjmvBYd3BS4PU+QbPLOAD5PNYXn/GBC5h6qxxTLXQFRM2DP21Reg+TmXQJt3TM+TpVYR1PbWnHTul1CK1H7H0jlPIioODUbWTDn/E4XzEEA2Vlb6osgfiYzrcRycWPbobICzPk4vD+mGTPp9tUPq6/BajkXL+ZPHFNcARG9lXsn4WslAbUTkqeN/xMID3WCOEDFk9V4+Rxeq8+TwsPlLAi0Ww1XXkl8DwQCgUAgEAgEAoFAIBBoBYFmgjkTWCZcbiVev5RY/wNi169JpJgsyq1SPY8JEGNE227qOHALsZwJiJF8luO8RvMxHLXNp4vlOlCYDDJmrUzSGBc6lyvhDy5Mr6HK2+1+QCzWLcGcrVwUo72F4O1DkjSHcSvIc3IJ3vhZIZhzeoiF8kUkhkyKeT1yJxfs3yVJ89KvO0h26MqmA1qJiLG2ySSTKA8tmpGSi+ey0J8TOY3wyr8zjt9HPG1y638RzMm7dhFvewbnIL/QlUJuIedS2Dfa+L7JTfHeej9MjprE+AWsxUIRndefR+IL/OA1+QyUfO6cdN6ULJLf8LtRcBC5kY7SCg5NluaEQz6WhTM6kD/WhmBOLqjznVX/z5IQ2w7n9Jqdj3PRIft6Eno6R3jsdkWceW7xGggEAoFAIBAIBAKBwF8RAfMDa4kD7T5je1UdlI01c7xq3DWW9fApxL+PUTyi85VFNrpXGW5aZKNT8ZM4shnnbzt4mM9PrTUb3/ew7V2I5e6YOy21TZwK/2h3GJ9202FOgYxCHsVy77Geb5tPcwWV18Im6bpnwQ10OH4UbiAuOpnVG80FcyNxo6ZYnTzDBcSyuUgkt0H1PhiZi7W84vwpvcntyhabrbho5XkZl8unXofzfAq/+nbnvuII8brXlI/vuRQdmQPy2BYH6XBtjsH9nYPbZo4zVMGcfMTnR2e5j/o5nzzCk3geh+cSG3mOIj6NB+Qb3i95wnvkpj5AAGhOJxXrsI/zzGMOgsZLyG3YMlXOIH+Q07QzvqRAyhzYp/BkuZiYHecAzk3DAfMxD8P15GFZ1NXo+ArmdsMjnyLHoyBxD39vmXd7bOh14pwze3tSDkmHv9/gnPtxlCv5lYJC/uNaHM7jlHSsvP58r9zEazZvNB8sck5L3iSObULR6LIGfue/GfsQY/5E21RbNXtvzLVZ+OTfsb93pO/973327OzkNTmnzDPd1jUIOV+vhWf8Ug7uvXebfBwxaFcw576KRl/HEMQcnv8Guj7h8LxjOJ9O6v7b9Qj3d9rYUWkOaYP4FggEAm0jEIK5tiGLHQKBQCAQCASGAwHdFCRKVlG8QWBoRcVAcM4EDE4NRiVgtg61CkfRlMNw1EBUJwKDyq+22hroMMF5GaimbXhr0mQJSTUdBB5Z1JdIpcF4O6NtwRxB7N+Zr64TrYixJMM6SazYvp+EHO1+IEAGyy4YONN0rbxaSafYy2SXzmtWW9WrKjGotqWTZErXCQkbH6Vg2+BfknIjFT3/s3RmSupYodJsNBLMOc80P4J474HBvWJISYgObmlxgvmLvQTESqSjqNhsres2flUPt7NiaykJPgVztpCVuPt5ozvoc7Udod6/V25JlVclGQJL9hMDXUxu1KGPZ8LEbLvPQ/U84+dAIBAIBAKBQCAQCAQCgfoItCKYm0ur0HspCllOEuE7El0mBT4mSeRidHWcOJuFe+NDBUpWb/fSkshF+EbDUPMogaDCp7cRy33BYrStd4yZcxTqEXLbnGuZxw0kamzZqrjuB+bUacGc5/X8uhEYsyvM+w7hnMkX/k/xrokHY3ddNEwc3Y2Ibybxti54DhM6JpJM0rz0y46yrRE/Gy875Etek21w7qJgRKHhJXCqVjhKOgDfnKPnMHn3/4ivbRlrYU8eInj59ImpIOUanKGtpm8Wr3tPTdKswBFbPqebw274j8mbU3ekxEDqpmOEvNDiIZOj/myyRnGgnEMO6f11eOW+S4mb8iM/TiM7zLUjmHOuuhGYaHmShJ3OCnIZuYXXqQO4z6FJMF0YMl/N54zXQCAQCAQCgUAgEAgEAoH6CBwgnvuSOFPB0jt0oCk7lZwqADEWtTWh8ezjS/rSmrjFI3k913jYNWvd6b4grtSBy1C4jIZ55Y0xW4rvKbi5kTh/Dnwix8v1Z3b6b4wJ2xHM6UamI1sra+6Gsbb41CHZuF6+ojBKMVf1PCeyhq/Y6HY66jyAm5WimozF6TMuf2pFMLecVqvXwhPkCp9s2Z2crzU2cE0942iE3YMrmN183F7xl4Xozs94vdnYxrUYU7+AQMviqM3khE5I0hhynh4OMoFrc73ea9M5zYIaHa5t3doNwZy5KPHx2bFTzwbee5/NPDgzL0suZh5G5zB51Lnjx6Rnynsmb/sGnqgAULGnxUDu73Bf39kdyHauctfb50xN7XTbbaspD3mF44ubWHjuzGN1E5cr6kAmR27kNui8HKcEc1uTYM6iq1IIV85ZYwTzJwpV5WY658n/vNfySJ83r8Hcl5d7jAkd4fk9Qq4nu7eVKKTTpW/uN5n7axGY7YQVzukS2ejZPbV3e+9cQ/BefMB6gu6Tri/IGxNfrJgYU0rrCD3wOfm1gjiLsPzcdrPi5N+lOSz3FRf/PUnYc5yKQ6XPBiOY89+/DzB80PTBIrjMsz2H+FpcaNctHTJtvTyRtY8YgUAgMDgEQjA3ONxir0AgEAgEAoEuI6CDgFbDr9DaxiSJiQ6DQYffDU51FLsa8vX3xX3FAtwUJBmVw/1NsljNY+smRWYegl3TsQwsdRK4BbJl+9FpEGpFdO2MwQjmHme+JnJaSUZZNbeOSilbp65A+LcaQnwULHKySCwkYNMJihdCgh6mms6kjDbp9UiFFS7P4dznMa2usypMUD2O+/RB7m6BmPwTwZwYt2KhXk8wJ9YOSZMJo0SU+Ob903LbSjeD+0kE9OOxkvb8khTnpI23jnAKBE1qOTxe+a4kLX1Ur10IIbX11q0QSwl0o6SoYj2PbWtbr982v5I+5+cpXFhYRjJPsZxk1+PFCAQCgUAgEAgEAoFAIBDoDgKtCOZMvNxPEsFYz8SQQrUXSVxsYqF7H3FdZfW6ArAZtFJ6jHhbx2D3NeZsNExc7OI4itKewSXMCnnP4zDu9EveYFxsq6driRUtTnF7i1C6IZiT9xijeq3Pr92eeJHzyiPPy6IUi0dKd+SJxZSe0Slx43Ze1wGOYZJRYd/XvOpEYaycY+pc0W8CzGNYSOS1thoByx90+vgMzvWf1Vtxcj6Ukgd5niYN5CbG1Ra4LICvNDu2sb/JqRdpNeXcLXzSWU4ekectN9C5OhcNWV2ve9tUKusnkEBTcOc93EasbwLO9+JZJrny7E69iudgBHP5+uWcOljn6/eZhPpwL0ak1kziKsa2s4oRCAQCgUAgEAgEAoFAINAcAddpXcN9ff2OJBqxqMV1XeO2HBMaM9umU5HY3xCNmCcYSxDmOq/D2M/ClvcpQv9gA184NTnK36a3SeSyCDGR3VoeRlh0PvGarlGV25Rb1v/uebolmHMd32v4gdyG7nI6G+uQVa5nn5qT8afxrO0yFRzdjPBIHPqhOLVhxbtWBHPX4DB3I8cyf/I9c7CY5wsEZL9pTuBNqhgKAJdiTnA7295PS1hj9Xr5iYrdOO7+hJ+u2ooBDxK35wSA8ba5g4Xkflz/vw1OqPDKYqL//al7gjlFgZ5Dvmf+RAGd+RiZlNftMyL3vAMu9gj5mOr2t0Ij95LP2dp0DQVZirW8T/n5tdhHzCwO8zgW15ibaHXIid5DTPU8eR6LvzbvP5J2dX5mj86f3Fssg4vdt3Ba6tKkCK3ZqCeYy3N2f++p15GLkMTF6/A+KXzzvaJGxXKKVs1vKfi00Mq81gm4nQ9m5d+YHHQWYlWfX40tdD73OJ0eq3GW+xZjircRoK1SeKo4k3tV/e+K93YC+SrXACxKm02+zM9GMent/dciz9RhXK5pe958jDxnf3Z4P+TCcuKruB+ttGR1P4u/vtq+NwnmXoEblzky5srxFGuOB2P/Jh71+aPIsDo36jFiBAKBQGsIhGCuNZxiq0AgEAgEAoFhQsBA0qBvJQGrJFD3iO8IYv2sMshUfKXN93UE0Q9AwAxcJRmVw2qRlQTBL9OCyGTLHoiNJMtg3EDYJMZ4Al9bBFmJIfGaiQCrndGqYE7R2fxJY4u7cReoJ5gzeD7GvBQHSiZsFfQzLg0/QahcFFCUtosA3O0cfi+/IEBgsYwqHJMxtj3qBYu8OJA27v8mjpL4pyFqJs5WQqYk2Q6xUcgnwZME/xMRoYK2alz7D3XaSzPBnBtL0Mvk0TkpoWUCUwt1nfHEx2SmjnMKG62YMdHlteusUVYQlq56Hsvr9j5KVFwM0H76Xr4kMbpL1BsuJuynbZRW/G9TmSj5VaCXBXPO42LcH26T7OLW1yzBWu888XkgEAgEAoFAIBAIBAKBQHMEmgnmJrLobstRBXO2STIMXkV8/xJFNT8RxyrWMibMcbGxpBXgd80vRWSXU1Hvon2tuNjZuZ9xp4vlJp8s1tHd2URB/r3v5BqLiVudh1X6LkbbGsUWPd0QzCkQ063sc4RYuhqYbNnJwnz1tZpQ0THgJsRuF8IHxsMBjLcd8h3j/h93lUVE75Lcsn1sGUWnTQbiaZOMJtZs66rgrJXElkfw+LZ91TnhqTVbUtvU7AruNBTkZb6mI7jxf7OxBncJkxiv4ZQgFzCx5LU4yisri3FMqOiaIJ+YQwLDe6KjoC7b+7l/cpw9R44iYjsMjzqM8O5gSjY65346VR6U7x5+sIK5LdwnHfYsyMkOe94D6268ftsg3YDLho4YOmLHCAQCgUAgEAgEAoFAIBBojoBruHKFZ9dupZh+d7F6F0XkxHHGmDkm9P0SxG5X9k3Abbl0+VJIkn9vzOf68nfwBmPhV4irPW7OD6RZsLGF6B7Htq7G+tOIK+vxh1ozN77slGBOgZzX6Tx1sDKf4br7l7hhaQ7gOrnnMz/gdRrH+uqatkU95h4U/13A9YhPo9FMMOcauy1ZFczZ5lXnNwVgFqH/sONAsY81dqZSzoNXXZ8tbr8FUZtdcMyzNBI9eR3mAXTQMg/0NU7ixtYe0+tyeAm2LlVIZUecK7k2hZR2zemWYM7nZgVCJQuk5Hy62Xk/8nzEXoe1mXCQu+GdtjxN7W+rclOKxOSuFnt9w/HWcR+9J8oxvT7NFBSKia9czNyOnKTJbUvzcI7OQ+GXgjz/PmyP6vBzvy7F6duiHfMmFi7JFZuNRoK5gX2ZYJb9KYwzt+OzJ9dLnIxncTyOg8pbfZb3wbd1v9MIwlcFZs6/cvj3Zp7nEvIy5ngumTYhmUNUbjOU9yUmReL8HyGg/YxcmwV4+b7mYzsP74uFTrpA6hroM607n3kify8/tYvRHvimzoirEJJuod2zgjbPk0d+52eDEcw5N1tKK/S1PW6ZFyz/NuSaPjv+Xdp9SyMN82IxAoFAYHAIhGBucLjFXoFAIBAIBAJdQkBCZILIpMMzBIImwjb0J8LyKa3XGEeAfwMkKbXPJEEk+TIhUTkMxq30eJrkjTbLmziOCZyyAoYtDe7ZR7vn+6nwMKG2SNeD0w9TecjfvW9VMGfAr2DuToJY2/wo+Kt2mJMkKpTbffh4sjXXXc0qkm9IGEkmqp3WxMH5mxjU/tuKEq9BG3qD91qXYYBuJc9/aXWrYM7qJpOEbishN9BeCCFQeKewzyScnzUbrQjmlDxKJrVQvwzSoxX1VZDc6ZCO0c63f85up0231WTeN5OXCvsUzVUOSYeLIJIVyYHVUksmjW9IDny2DmCb/TzP1tsslKwkgah1uHjJZ1wIWAwZ8nj/wGHPY9fCsXIe8T4QCAQCgUAgEAgEAoFAYHAItCqY09nNdiOTiKk3kkR5i8SAIq1vSNikwhriOGNDv4zrUjsXkip30rJnFovc9Yog5B7yhXc37kyJOJ3CjiC2yuvc5TFPFhdNnUCygdaixIgKtIxbdVew/U03BHO2XzEhZnLwNUR5JhVMXnhehzG9cWsfyb3HiVlvpPWR7hrVLXy8jvUkAjyW8a+JPJMWAuWRPI5ObSZ7boZT/YOCmRnE5o0KUNIE+r95LMWGn5B0eBZ3PhNcchpx8z7II25gbiaRTELqAldvuI9fxv46hH9Ky6eNuCTIUXI87u/lUGNIwMglbEV7BfzH5Iyfe06TB14XdCLtq9hQR46PmaPuECY4nGM+pvPxuIMRzHkenRJM2P2blqw6Yug+IS/1+COYjO2PfHZ8fm3VFCMQCAQCgUAgEAgEAoFAoDkCCsWM43QxNi5UDEQIl+IsYzfjPtfXryQmVGxk29CFxulVh9alaQMxpfH+UxRQG6sZa7va7bYey4Ib19MtRPE4xvutFpB4OmPLVgVzxsV32JKV9ffTWigyEefimrhr9bso/JADGMtbRKIZgGvjrpef44b9w7dAkeJh42MLwBezPm7bzGajFcFccpgDX3Marqs7nxdwwNZhOrcZTTgSF8tV5F3LKRa5HywV7ZV5itoz8T7YYeZlipbexF1uI27VZZ6i7FLjDZJPXIRrnQI8xYy6sMnfLCzqhmCOy0iCSvMGb/y2o/gGoaIcJz9YXqscYBICKkVV98yfntrf2rqzOtcjfxMvr812tiu27eEw8BW+0n3jWGKmOMznTmHbIvJHmjw0G4nv8Ny9yTyfhof9gnBLtzN3VRDq73Uzs8WpOSP5ko5/zUYrgjnnnv7+4DoK8TRvuBq+I++ZPMZWqiMK7Qzczi/no9Dzc55hn5ufyMfYytS/58pLFYuFXP/18Mdbzp2aBISVv+dQgx5ZhKrwrMyJHUy80L+dfA7no+jTfJjdhzSU0FhiCj97b52f28pPdc/zy0KvT+Cvtt9VEKnITezdzmt3+PNgBHOKSdci1pQfPwXXlHdmru28vQe68CuY89+Tdo1AytnF90AgEBCBEMzFcxAIBAKBQCBwRiGgoG0rwb0JjWchXxIvXR5y4Gr6RcvrqZC++3B40B3uYoJyiW3eJl9QWcEi6cK6ncBSRwVFdMSoA8ME23kE81eTbLkV8mfwahtOA85WRkuCOQ4kWbTa5kIIntVCitxMoAxEzmzjfLNNteR9J8kcW4ZKAiWLx8DG4fTFwRBd8juPwP0m5m4bVRM9VvbUm74BvRj/hwUCycFayJSCMYfBu8knyZ4Oa7a6tUVtNdlLG1d9aySYc75eqmTJBYfLqBS6ECLoeWaR5FP8KFGsnHMmHlbpaMtu5ZqVUlauef/y3XEfE1ImRRVPKhxU9FhvSCrEWCL+LolWk6wKET2G83Qutve9jeTq/5B8NFnY6rNQ75zxeSAQCAQCgUAgEAgEAoFAbQTaFczpFmcrJl3I3sIt+B1iRAVQxnfGh8ZzjpkkBOQI99HeRlcvEzU5fiy3KL+76OyxFKUZF7rIbRzq8LuJMt0abiDOVFDmMS32MCx/h1iyGw5zxro6YeiC4eL4p/CiAyRbTDTkeclhFLYtIhn2+BLaTxELu7BfXUDk9ibaNhw4nByWrU63Et5rNF3iEY11vUaTNI8tnpHa9eio18qQv/zUL0Z7jkSNPM55lsctUrGMMbptYozRFfjVGyk5yL217dGbJKfkKeLgwcorL/ecCqdawj1dznyXk5jxmHKWlMDgJqfnoH8fcdoD1zF59C7XboJmZX/BUGW6yOMPVjDnHL/i2bEl6yqOvZ1krvdBXPk/cU25yr0kRi+HBznPGIFAIBAIBAKBQCAQCAQC9REwNtNJ2sKUZ4gxv+U1t2PNMaxCqkm4Cxtr6kilWG5GDYcl41XX2S2M0aVa4c5mYmPjtRyVufatQ9iNxPvXI/ZSsDOWNfJWw7Zmgrl0pZzMuV9O8bgOxDPH/r5bjuG+gpt9xMS2sVQgt4tY03he3uTvKrmKa/muZU9m7jqJyQmu4sscRL2CoUrU2xXMiZOxb8lR9iTDA93e8pw8tjmAxeRajH8VMloworNZNU/J91jHOotlbPNq/sP75X3hJRXKW3BzFTG/fEKx3CTyHnKOz9i+G4I58yb7KLa3MEr3cfmhuYNTzwI8ih8sylIkpov0zZg6+JnXXjnERb6pUEwHvbdoLyyPLNlNubE46Jotz9TN/CJeJ/Nce7xGw3uv6OwN+PBT/I04z2w24O98lnTlu565meeQD5vzajaaCeacr0fx78XcjiJG82kWb+nM6N+lpzn111XORWHkRv7u5Lj+LWoOoanBaTPiB7ndBeTP7uFvWqdC51z97DS7hlq/9x5uO3QEYw26DjEH+aH32rnma9KYwfuqQNOcY+bYY1Awpv/6Jyu2Gln4ugXup6jNPJtFVAp7vS9u6nEd3o/BCOb8W9At3SK6J1ZtTs+9/wY4PL6PiP/+2X3rYubsPY4RCAQCg0MgBHODwy32CgQCgUAgEOgSAhKjbDVsAspqMgPBHGSaflEQNheC9AhEycSOwWCtqi+DUgNfSdyHm/bwRTAMSZGs5OMZsM4gENdV7QGcK7TA1q2u1vFqXXIrgjn3M7C3takJNh0cDMYHkjT90fNR5mKiSFGYwj6//DklnYzA82Bfj2cVmkRRYiIp1l1BMtxI4Ka7nMTnP7juvUmSUTJlNZd4iIXCw6UQMx3m/oZgrhahzdOofG0smDuZCG7vqFGJqEl4FrGIIeFNFUeevGqkq+WbJNxqQh0xdB2UyEuYB7BzP/aXiEs8HuwnCPVIpQsY3rPXScIpmLPVredQwOg5rbSaA0n1+v8PgjlbUjXCs2ra8WMgEAgEAoFAIBAIBAKBQBsItCuYs8WLoaMtUN5mUVpx1a/Es7YncaQYkldd0ywqsZreOFkXZuPnHHa6nWIq3RoUO+keoDuYbTwd+feTUnHK2HQci1NcwLfq3Ji6dJjrfEtWF8Ytmnnul23Fx3AYHbeNYQcmn+Z3MiUF5QEPEf9eihDLuDpfX7qI/m+K7Yx3nyT+V+S3cd+RxK9ystHNTA7oAH0vAkOdKWw/myvoK49V/d6q9++pqpdvvUSrIQt9ygRNGf+PJ5Y2iaTDtvejUZsY7+mWA0eL53+hsAcXBhM+lTwwzZN7uADXgZs4poK5y5mzziKNkihW/supvHYTVbZW8nmp3Mf7PTjBXJG4m4m+J6j6917pQJGKo3zeuCEW41wGpnLNZbyGYK76KYqfA4FAIBAIBAKBQCAQOIWAcZnx5HeI5Gyb+Drx/hraWioTMdb1y20s+Dh3Qg8O0NNSnCVPcF27ehhGG1N6PNtXWkBha0hj4Rw7+2p3mAsRd5kbUIBnkbrxdSujJcGc5+NEfayHT0cs1wunMGasHF6X69aKA42N9xHDWxjk8VPxjBswfDG3YP5CgY+uXLYrLTvPKMT7vbFA2rHqW7uCOTEXSwtQdNR7bZ2ucIcHivE9vBxrKtenM5cdfZYjPrQYp9LdjE2SMM578ho8zFas6R7zef8lpntjLuUi4n0FjDqO51a5W+EM3sf/Xbk5Ca8qOYPnn0b+ZT4x+L+WzsJkYHK6j+LVyjAfs5l4/gW42CuYMGjkUAqrytyBMxRfcxt2qFFUdSmcrN7hFWqtphBI7mqBkW12vZ+ZmZrrUnwmZ7DYS6OFBeR7muUjzG/5bMhjn6KTkAK0XeTRnAe/Ypzs785Eu1e4k6YFzY7pXq0I5uRR3l8NHBLXhuMoKsscyONUDzmu81Uc+QE5OvM8itYcGTunbb7LXN+DiAfvxr1vIs+cIryhDI+7FSHbzwjbnkEw9wEtgH1mFL1lTuyfYi/39QLyYrrLmWfSMS+tIeQJ1piEeO3EDfIZ7oFcc93egwnD7CLoLv57NhjBnM+JgjzXBP7fqk3pufR8eTjn6xD43gNWl/Nv1zxwC66Z0YnXQKA9BEIw1x5esXUgEAgEAoFAlxHYAQl5b0PZhlOXucMEgQaVDl8kh/MQNFnpYfWEyaFa7VjzNN3eRJiVPC+SxNkAeTCwzHGuIaaVWB7jYRI5Ep05vZLW3xPsfMzK11YFc55PEZfExKSOI8+B2DwNXyQ0ztmvY+nzslrFbf1REqXDni4KVqPp1nbl9IlJQNgHGfQcA8dNRz392yEI9x4EeU9iZ281je1u/cxgWpwlvhJRHea0cJfMtkIoGwnmPK7ERjcI8dW5zko3xWgG9o3mKyE18fY6STOraazUkSxU0iRxsYrpfJwmbCO1rI/EWZ2EobgeJmGmY11q9cQzJpn0vngcBYceKwnmzp+ZnDqqW1udjmj8FAgEAoH6gyiYAABAAElEQVRAIBAIBAKBQCAwWAQGI5izytx4cAXtcXSRljP8wuK3yZE8fDd17KjiYuJaF7wt2DCmy0mCFGtzDF3HdF1zwd65mHjIR/F48o5rqMy/iQSUbg3GxsavxqjdEsyZVPkVzvJfBFjOazNJKKaVYvI0Oybgwr2JGVuSGrfadqlezC5WhxG2PQ8XMlGjI5zJt8rhNZ9Hos0Fd501TLYZv1eKyiq3z+/FwTl6H0zW7Dx0LHEK4/9R7C/HEn8LcWaTzDORWW9sJhmwEkfpV3FysNDJ9lnOXa7g/MRd9+dLmNvDtHiy9c+83rFN+YTYHTt5onifOb5HcuR9ioZ24C5QeW0efzCCOR85HUC+RTD3b6r+f0Qwt5mWXz5nuYBHx4g0ZxIZPkORxKj3BMTngUAgEAgEAoFAIBAIlOvixpjGl28Tt33JWrACprwWXK4jn8Pa8qjiAtbG72Ct+W6cfG2JWRnfZSyN84wHdS/+avve5Cxte9OSOpTCJbdxf4U6unI9TM7B98ayrQxj1kYtWQeOweQVuY06xzj79DXxk+WFpbiXpevES+QszjPzHOfplx8q8JtFDsMCmqtpvbkEcc+5/CxfqYXDwBwq3rQrmDMf4ZwsxlHspvvfSriF98fp5/nJu4zb70EAdtOcycVFU8zfnHJNEy9doC1k0fHrV0RG24nPc1bD6x3D/uYSdPmWoxhHe83+rpuCOTnhSgSadqd5i3yEQi+vOQurhE+eaRtVC/ftJmS703pDYZaiJ53V/gO/syhIw4icwxEzeZfu4fKmfK3NHAItXLLYS1MAxVq6Jmo8IddgusVJ+vbehkObYjld5vrAsh5frJx7K4I5uc5S8jCPLupLxVa2ka3uHlR5TN97neKoq7sOczq8/0Ze6oR/nIz0/PBWkeQU8NVt715EkootW83TpQNVffPoPjPy669YP1AEqYFCZv35uVXsN308To38Ldk2WQ5n/qr/z7LqqKd+9PlQZPlUKk7bVaxFjKsoslOCOc0/XO/4v4hDN7EuIEeuHP57ZSvfZThXLkDEGFyzEp14Hwi0jkAI5lrHKrYMBAKBQCAQ6DICVploM2wL1c8JnE04mLTKw3DQANfkkMFgTg5JIEyg1BrubgXJt7Rj1Z7atp4Sk7y5R5egKGK7C3J9KxbVuitYsdRKgNmqYC7P7dTV5E/K18r5nP6bkjA4R6+zF3KpM9s8Ko1sL6X19UICeO3IdbvIx6k+Rv55PwkdLd2fhIy+y6LDJsiUokSvtRTMjUr4KpgzEZWwzTs3eG0mmHNuuuuJ8T+p7poEwa1V9Vd9Cue0G/InMXifBJdtt3LVWN5WTCUwEhnbqOo6qPCv1uKAxEzBnBWFCuY+YfHl94K5nkRQ/4/zZGFkqFVMeZ7xGggEAoFAIBAIBAKBQCBwOgKDEcyZBCoXvWnTBF9w0VvBki7Nxv7Gw8aHbmdrVpMsLri72G5LV8detnXx+Q1cEXRHM4HmQnfe11f5gS1x7ibRcwUFKrquGV9aid5NwZwuDT+QeHoa7mIiypazXpAJDs9t8YwJKMVyJlVKJ4n6SRqxkFO9k8SBtnjdk9rRnMaziJF1zzPxcSeufLagEatmiRrj8o85nqJDBYTG1d4bY3hd/nTmkFcomKvE33tQPeQTtlZ6B46ygiSmyQeP473wusVep+7UkokkxgJi/6m4MTQbXr9JOZOuVv2/x/FNyFVyBbcZrGBOfnVKMHcgJTKqBXMKN+VWOlC0wjGbXVP8PhAIBAKBQCAQCAQCgT8rAgpCduPYZPH068TqOjbtYW04i32MDY3j5iAOMWa12EPnYWMsf1drGOvpAryWYz2LuMi48DAxXBlrlkXUCtgmErsqQNNhzuIMO7u0MtoRzBnXO5+6I10E34iD88jXZYzpOrVxteI4Xc4sIDdHkIvTM05530avgxHMOaujiLUsWLIgXZMCY2GxdMr+3vmay7BlqTkcW7MqKpNb+PsD8C4FTDp9PUdXmZ3kKg72i8j4deIT07hG4/27+8VArvtbOOXafjcEc85LfiQnUdClwM02sfk2eE1u43NiUdVVWVjFM6IrWr3hsyFfkHPq+G23H53VKgVz3jONBWxj6/N8J/ypXm4jn0ceZptTi5ae45k2/yVv9O/AeTrbO8x1IZqTN+r07TU0G80Ec+4/ChD8+/jHklnFpTqb8bfYynBepXB1X/o7XI0wMRtbJHzZQA7ZO3pEEoF57xfhvNeo6KrZeT2n9+DHXfuTGYP5oO/h2Bl/93cbn02dGm/g35RHEAKKl+K9ZiPdXwVzcHf58BqMO3SI9Pge1+HfxmAd5lyzUFj6vys3wTOPpGOXRy2/W9ynYO4qHPH898rzxggEAoH2EQjBXPuYxR6BQCAQCAQCXULAAHAVgjaJktUmVsY4UsDcf04DTUmWiRwtqs9FPNYo6eD2JrRWEYD/F1c12+X8BjHJoaO/T198k8Bp8W31zVxInK2Jmo12BHOV58rHzWeoFcsO/I6NTYyVduJjU8WIibHZXLtJqHEjrdACp7xDPniNV+3cTRA9vWZbctTYSkIuCdDY2eB9Aomxy0gI3o5w8CGSOq26q7UimLNSygD+n5Ap2+rq7NdsiJktY7+j1ZMJrhd4NiSZktV8uW4jMdUZToe56xIJtN1r3uLUWUIwdwqLeBcIBAKBQCAQCAQCgcAfjcBgBXPO28p8XcmMaz+iuGLD/kOpVWqu5nax2HjwahJouhu4mJ+TXhbp/MSiuYK5z6kw30NywcSP+xgTywOMs20x8/clfVRrjxtoJ2o82U3BnEVD2R17DRXqns+o1i/f2wLIQpTb4SwmEWyHk4WA4lJrGC/Lr2xdZPLR5Fhyme6PqBW5jaPgRAGa4sLSdXtMcrWudbz8mQVPJrpsq/M+LuG6TWTBnMm8KSQaboNX6DA9g6SXRT71xo+IBN9cvyMlqHyfk275ur2XS0nMXI/DhO1NZ+IA0EoBjtduIiMEc/WQj88DgUAgEAgEAoFAIBA4cxAwntR56kVaYtqO1Zj/KMXPleveiluWIBJ7gLg1t05sdgVpTRzu8F+KyBV6WVBvTK/4jlA4HV9B2nkUp9tqUuGM6++tDGPNlhzmOJixafryW//I1/b7lexT699uqsOawjiLxRX2WUg/g58nsp7vdTD9tsZgBHOewKnvAL/v4S1i+YZObGCZC3LK6zuZ1urtDvQYXX3Ox2VO4ZN8xiIbzRJc63f/I9xf96mcvmJATRPkJnYayvkP9++GYE7uoVjTa3qV5043MtvEimvlvOQ4dv1RrGnbUIVVblNveF0eW36nsO17TB0UzeV8lnv6e3Mw5+ESqGDuscUzEi9rVLx0ELzNpb0J9h53O8+2BWSVgrm7aGlq8dK15NB8bloZrQrmLCzTFOESsDCP1uqQv1sc9iRuez8gXFMcK8LiAAzJqVvjhVv4G5STXsLzY2HXYIfYHuLv07a/igstzPoZjj0SvAfuGm+SkznXoiOf/66kzkgtnPSUYG4bgrmdIZhrAbPYJBA4ExEIwdyZeFdiToFAIBAI/EURUNQmGdGSeTWB616qi2SriTiAiaTVyg5t1rVktm3QFJI6A8FtHdwkUush2m9CwKzIKG3XpSvl8J2JnUWQzMuxL36YoFgbc0los2O3I5iTsEhcT6v04OTOz6/Kkc/rpyaHdDzQ8WEZ1Uu6W8wnMWalkcRJy+gGvKzysLhpZMEcDnOQ0q1UplQL5i7vF8w9eAYI5py8ZHU1rh/vM99nWdRw4eQYCc3yySjJlM57fWD0+OKZSfQ4C/cJq86qhzhnhzkrxT6luisc5qpRip8DgUAgEAgEAoFAIBAYHgSGIpjTgUyhm4ItBXOKwfw5h9XGx8bdtogxoaTTsa9+pijNBW3biVqwo1gu7+eVW9hhAspEyAO02pkG58jFHsaT3RDMeVyvyetQ3PUhsa/CPmPeHNXKDUw0LYKr6DxwB0VEE/s5QaM75n62cNGxzuIk27LugGt5XL/8vVxrDIU4Fs6YWLmYJEhyIsgnZ5vq0YpgzuP9jRi9D5GfRTO1BpedBH0v0vroa1plmcRIk+rfWGzkPVfChUxe3sX8bIXUihO01xaCuVqox2eBQCAQCAQCgUAgEAicWQgYt+m2bMG7QqAPifON43QbziGp6+uKfy7HUUkh1hJidmPWZsP4/QBrzM8SC7+JcOYX1poV43m8fGyPYUH2ZRz7LuLsG4k7SyFa5Ra/P1M7grkRxNzyEY+Yj2os7DUqOBODyt/5s3MwP6CI7EJEPVf1TSiW8H7q2J7EURRbscnA8X4/w9qfDFYw59Fcr9fVzPzCq+u2c9+OlC1B+Z1zdugUVrrETU/GB7qS6YKmmcEb5GnkYj/By2xBm7GwcGkM16NzllxMh2bd6eQqbiMv6IZgznu4HX7knORLOqFtsxVw/73ymszB2Jb2JkRVchKd2xQr5rl7zbWG+8o/5Xgf8fXjjgPF0ZMl/3RfRV3e4xnkNmzJasGW19yoKOqUYK50mCsFc+XzXOJ/kuKq6XRTOrMEc8kwgxygbWQt6NL50ZFb3uZn/QaEg7rjaZihacRgh8+Lf/dy7BcQ4dpCeAN5Qp8zsRcr3qYuQxba3cR5FepZmNXsvjonnxv/HXmK6wnBnIjECATOTgRCMHd23reYdSAQCAQCfyoEDEyJXXETMNlVtvQxcLUyaUAwxwZWl0wiQXQvwb4JLx3LWnUVsOrpa8R4JtTe27hzwLFAIMvzn4Rkjk7VaTogmEybAek0YG40WhXMSa5s69RLksikViLjnFgyrE2zLaAk7gbZlcOfethvLpbKzkmyoGDuXARhEgj+byl4z8f0PJK/p7EAVzAmKalsyarD3IBgDuFgK0koj92Ow5x23bpL5KRjnlu9VwV92ab9KQRzO3DISyI/rtzb47MzDgI+HfL0NxZKboBY6rRRqwpLktRMMDcLS3vb/UZL1np3JD4PBAKBQCAQCAQCgUCgMwgMRTBnnGz8vJK2J5/iEvcyYqtNOFSf1taFYHEq/MHY+TFi/OXEicaIX1JhbstThVnGw5Uhv8mYKYixdG64YfaU4iaSIXKOzAuMJ7shmDO+1UlZlwXbkuo+YGGHMX8evtWpzRaftmM1UWNbILlFw8GcTRSspXXSc2vLVq8b4VteuHsOMBCOcy2tk0wOemw5SE5O1Tq+c7a90PtyLLhFpcOc7XQUtSm++zuCOd0vJsE1qofnNjnoPXmG5JROAybvzhmYVJlEGsM9uAGnAZMYtzA3j9WKI7iHkWOFw1w18vFzIBAIBAKBQCAQCAQCZw4COWazTaNr1gqLvqXjiGFuZaSr4EwxkUKaR1gHVkxTaw24+sqSCzKphtcQyyny+jG5Wx1Lm+V4WOGSDskLWHvXPUzRnM5mitUaDWPNVh3mzG2YH7DQ23yBQ25hIcp+HMKMr5nGwPCtfGAssfUFiOWuRMynUMv8gK5YWfgzsEMbb4YimJMTWexj+1Jdos27aFggscjTFzbFjLp230gcfwv8ZTPcS/HYS3A3X3ccKu9BFkzJu+QQd8Ahbgd/HcKN+/Md8LzdEMyJv/kNuY18ybaptonNgkqvyedsSg8ddBbQ5pRrssBK9+9Whrku3et8tr+C95gP8nkzuwGM6Tn32n2udZg7nwIpxZv5uqvP4XzNd+nu53zl1fsQIzrfEv+TdPrBYQ7M5bVnisPcbvjtz4gRFa6uAIf8zPisOG//JuSfzllhop2E2nGwq8bJv00NJPz35KmfaYkLH1a0VymE9O/LojTd5eTAfrXyb4rn8vhngmBuGf8uLIiWrNW3P34OBFpGIARzLUMVGwYCgUAgEAh0CwHJwTG+XsFZzuTQt1SXaIteBvflWd3GNkEG97bhkWjpsCZhTdvx+0bDBJEODbrLGSAbzEqwMulw7zFkm85FaHU/LhIG5Rfi6FaZoKp1/FYEcx5b4Zltnc6DzGrbnRJbnN15WI2lFfdGEnyJJFZci/tKGHS7syJLB7zrIQpX8uq1N5tf9Zxtb7oTwZzCs3cgaDpNnCaYg7ArzDOxlVqyMu+MUfWxKn/utmBOAm+S6yna6tpSNiVCITPOTbgUU9qy9lEWSkzwLQSrWsTGe54Fc9q+f0Jyr9phTsGcCUgFc1ZytSoarMQj3gcCgUAgEAgEAoFAIBAINEdgKII5j25st50ki/whVYwjnjO+rVwANybUCcB2PtfCIxTQ6RzwxKotKclwoL91jcfT00GXgDksNt9Lu9OrEY8tmTT+tCSZ5+yGYE53vC3wAoV/b8OJFPLZNjUn0pyf73VV1p3BFlG2J5IvmABsNqAdiXe8T0xtZf164uvK4RHkXLYCkg/Y9vQC+JCci9PWHCb0PtERj5j6bZI1CuY8hl8K5kw8yCsUK85E6FfLJcGpm2wzyfgUIsZU9U973UoWkrggCaRbOFZydNDNgXlVYlNzgnzodcm5ui2Ye2LV5tReyOfPJE+e2zx4iQLHR3Dv1iGjqbix3oXE54FAIBAIBAKBQCAQCPyJETDec/1eEc2zuDWtRFyU3Jb713/zpSsQm0esfjHr6xaWG18qtmo2ylXkIh3/K7iDRTe7WCM3Fs2hrnNQAGVRvW0Z7yMe1iVZYVqjYazZSDBnPOo5jA8vIH5XaDUVB+u8di0PUMSjq9lWhE974ScGsWnd2315476aB8wntrwHnrKM3MAseIHucoMdQxHMeU3ipfhJR8DX1u3EzXofPAlGxecZU3MaxsM3I5h7eNGMtP23bKeDoOd3e7fN1zqda1wKH9EsQYFdzv/ka5SLdVow57XofGfbVDnDW4gqFUHJn/J1uI2dj7xv8hGfv2lwHeP+RDjyBKtf+59fedJmXPg+wTDie8Sa+/nZ5yYL3Dy+eR6fj7vlofAGuVgW7FUfVh62i2PYZtS/l61wx71HFPjl6ZzEjXxacmlTYJmcw6sPUuPnbrdkFYeyiKsUzClgc2QYfRUHhYMWS93M37iiycEOeab39j346hOrN3MPDqdckH9P3ltx9x66RqCznAVzrgHkv81m5/Ue/tGCuTt5Xq4KwVyzWxW/DwQaIhCCuYbwxC8DgUAgEAgEhgOBsoLq+IAl+m97D6dA0wDfwNVBAVhyWtMBbdHEcaliQse2FEwb2TYZBse6q0nEbPeaLM4hWHn4zmMlQR6CK6uedJPIluZ5u+rXVgVzEqqZOFtcQ8CtGKtso1oK5myHZJXRCiqxVkHW90GKE2GqOJnVcwbuLghIFhWF9ZF0kjS2MyQ9Hv//s/cmDJJUVbtuAj3PczM2Dd3IIIJMKsqsqDjr5/nuT7zn3KN+CooiKgqCAiIgAjI189DzPDf0fZ+dtaujsrOqsqoys6D72VqVWZWZETueiGrWG+tda/8ihrlH072C/bbNZ22Ry/auSkLnjrS8/t5lq0v1Wk32TLSfYRjmqPD6ZeZNi/FOw9w8BGtuZnz/sjWFDS3qqbzrHBgzEeJUvlHR9Uw6GlLZhDDiGkAMnU+HuQgkDHNLs9TrTG48dO7fnyUgAQlIQAISkIAEThGYqWGO+I248K39h1uPJrZ7NuatF5OAIElTQ31uuFNockvi58/HCLY+SRiWJn3wrZ3FkHYiy7GOJiKiB4i3WeoIw9xVMaaRCGnGw4MwzJFsotsCibs/JOlBgc/h45+MKfCBGvPA0Ee3gQsTs6IHyrGeQjrhM27mk9TCjEdHguaAJfNYlS7blyWJ970ktOhgsS6JuPGKdDAOPkcX74/2tP70zq4U5ow1zBFL3xZd8cMY5ujyh4Guc3CeWBL3mXQJ/J/E+q/vOVg6BZLCGNWCeRPJtjuT9PlqdBqdBpYkcYlenGxwXGirgRnmoq3+nevpZzHMvRIjIkmYpmGOpOY1ue6+m6TrDRrmJjtdvi4BCUhAAhKQwFlKgHv3W1NM/lTu1d6fmJAiBIwuncUGxMOYqs5PbHnpsgVd7/92Q0jYSHT5QfaBEY99HeowRZF/4J49hdkYtjAuseTrqpikJoo7iTUnNcxlAhSE0MUKQ84FieerEY8Yndj8lcTBr+05XPIDxxIfN41Y7J/71sTTt1+0Ih3OlreuiVZBGxTTVreDnuR3MzHMsWnibOb9bvItD729s3Sbw7jFuaxhOnNbHn4c8zc2rCpmKTrL0dSALm51cBf/vOR6aDbAUqw0TKBACB3SZD8IwxxaBKPin2Lieyo6jA5wHAOjHgc/UVTPOdscIxsF9wsz33J9tt9a3t/tG+/B4MZ5xhTJssMURtWlaOvH2eXFaeZA/ggNxSP77KbFmDO5LjrM0Tn9o7okbibc3t7Jsg3ySLDE+NnUtN3mye/Q1pjaKGRiadr90XfMvXLgPXNy3Ojk/0ru5Jro5ql0gNsXUx/a/cFsm2vg7Rjm2HYukzJvHpknzSK+lOuAPBqNEaY7OI/7cgx0lfxZDHPoYI6Jc8J+YcU1iqGQv3m6y10XDdxrA4VBG+a4X/LU1r2t//Pa1tFu+k0WN0cXfz3G3uvzt7Ih1065Hptv8LkEJNATAQ1zPWHyTRKQgAQkMEgCBOEEq5ihMHFhYCLob4oBglfEEQEzCS/Ea/nFiASYbH50iyAZk5i/LXj4oWPwGxIxCOHbSezEfEVXu+Y8Oj7S6tUwx3ZJltxNh4V0QWP+mOBYkhUhciBigVbkdD17Px0VEOzNrgrMjeAd4x1mOeaHaCSBNZXBMrcIsp+H9Z/DGnMi+wInCTLmuXnF4tKu+wc5fn6e6PjrvmfdMMcNAwxzaVkOH8R1V8NcRNKRqNE/JRHJ0rzPJjGHaIItjPnMhREXtxfD3Lp0I5kzpqNIPV4fJSABCUhAAhKQgARmTmCmhjlmQGKBjgwv7T5QulX/Jca5o4l3uXldyyeIdTcmxt+QwhuWbaK78xN5H/EfIy+Pao3rY2pi6dbbYsy6JHEhcWJz9Nswxxw+jvbZkuQJRjmq3+nSUCc3Zu9oiPyC+JzuGslVTGnACi4UDxH7dw5+NSfxMBrje9ECGNPo8ID+6jZIQNBJgsKW38WAyHlgu5j4mN+ixNIkBdE/JH9IRHQO5tQ0zL2BYS5JpGYSg21S2MOyQnSnYNlYklWc18kGR8kxD8Iwx9wpRuJ8/SwdC1+NYW57kq9cMzUhRSL32iRd6NhHJxCTGJOdMV+XgAQkIAEJSOBsJMASlf+JkeqJxMK/jfkKsxwxXLdwrx0Hs3QjMXy3d4xH8JwSBxPPU1RNzFpjbj5B3Mjg8dZ0mqLjFJ2+LlwybzS2K2/o+MY8JzPMEU4TH35349p0/lqZhgALitmNfRGbHzxxovVqimee/mhfWTa2HH/mR36AI6xzo4vzNasWldVx7rpoVdE23L+fzpipYY59UkDDXB+OcetvyWu8lLgYY1iNhTk/aAlMZjevX5pmBkejxfg6XN6HruHYeFyUYhsKdlhBBg2CbuscgzDMMV9Mf/+TlW3+mQIsDG3MqfPKqvqEc4AOy5R7HlxqDPQT10steqqb4GWOjQ52G6Mf7o3u4forxfzZX+fgmsHIRo4DcxtLm+5J53XmxLayh9It7dZ0aEM/rU+hVaeu7dwmPw/LMPfbEcMc3eZgwN9HYZ5HroXrMMxFk98Rc+hMDXPkGothLgVO7U58pxvm6N5IR0nMhRQ7sWRyL4NzyfXzq3T5e+yD3a0t+Rvm3zKu/5FTXvQxJrx7s/0b1i4rucdet01RHYa5/5u585zzUwczpCiQ7X5h9eLWxSzhO5WLsm7IRwlIoKVhzotAAhKQgARmlQBigcouBOHv3t5RRCFCi993je8SCRah2FvMOubYCFLZbhHDY1459QOChwCZahIMc3SFW5ZKrfHGVA1zX0+lyo8i+trd8doHUbue0XKe1t9Pbd3T+jAih6VDGfVQ4cENAcxg16xcUpJPtOmm8xxCopdRxdQvtmwrhrm398aclyCeTyPKMOSxDBOGvB+lEwTLV42XIGvub9CGOUQrSz39KvMmEXf0BC3L22w4p1T+UV3Ick+cuw3pDNKtdTaiFNHy+3TAQMS8tPNgSXJVcb4w27kkidQ7I8Z+miqpRfm5Cvzm8fpcAhKQgAQkIAEJSGDmBPphmCNipgBk20j1NZ3m3o75jJiR1EKJkvNtRZIPyxM3E9vvTYdnqtmbgziYDg13JTFBRT9FNCvyc2eYTczczyVZ2R7x+L/SqY3lWF/edah0xWbeZe7NSfI8Eyqv5VvX1zvf3/gZVsTOJFBG7+A3Xq9Pl4UTSUKWuaIYZbyYmNiaDhF0AvlNEjWcT46HfVAcVIp9kqD5frrVkfihI17nyCYmNMzxfgxzFDKRNKLDHIkMDHS95DGy+QEa5k6WpZA4dxjm3oimpcsGCbSqIVhyCxMmSS8SJQ4JSEACEpCABCQggbEEiAf3HjueQvo9pRDjHzEtUfBNTNlttO8Jt4PhfsfD2FHIHVyZDlosC4kp7YoVC1sLU6wxXuw5FcPc95IXoJMVMeLSEaMbh4m+2JE4ku5mD+e+Nffad2SVlTo4Tt5HjLlm4ZxiQPtq4nU6fG3KvXx+P9786jY6H/thmCP2x7j17xjlyGs8Hi2G4axdoJ85ZafnZGIUztCJbG+aBtA8AT0Gt3pc6AaOg8IlOtGty/u7razD/vq9JOv7yTu8HE3D0qb/SedoOrfBuuu1lV9ONzcFf6718XJTvAYHdOs9l6ws18lFKTpCv3bOBX1UV9HBrEVB2O5cP4xqxrspxU/kScjzXBDzYS9d0wZvmDuRjudH0mFuezrMHWi9k3lzbFy79fpGS92wbllWgGpr0ZkuydqLYY5Vp2qHOUybvbCC9SANc2ht/kafjIn2F+mORxdEmmHUaxNut6Y47dvR2lfHkErXSocEJDA9AhrmpsfNT0lAAhKQQB8IJK4vQeVre1M9FSFcDUwEe4z62P6pHTTX5zwSHE5l1O3Vx26fRVwiTBDE92XZnM8l2Lwo5qvxRk+GuUyUSq8NSRJhmPtxTF0Y82pVD8cBC8TkK0my/P7dtjjbcej4aAA8uv9Mfmk6NdB2/s5Uo9HOfHME9oLzxhfto5/NE8QUwTbLHf05FUhvZnlaRCBMEJzzokg2xJCHkPrx5etay1mStEsVU3ObPB+0Ye6tiCeq1H6VeSP+EITtmyNtdiTQ1i6am+We1hUheOE4IpCbDyQkaSlOMpUlcBHw1TBHp4rLcxOEZNxPsi2MeBNdL50c/FkCEpCABCQgAQlIoHcC/TDMsbd2ouZkSdTQ7YwqbOLcOojnKDwh/iYO58b24cSADF4jHl+Zm+R0hP5WKrSJhUs1P3fuOwb76qdhjuWWMPcx71+mqwGV43RArrFux+5HNRBzns6oR1QfO7fBdik8uSI66NaY0+7buLq1MkmabpoAXfFGOD8d3g8kTt+WpB5s0TboKpbrIUlDtzoSYN26RPBetvPM9n1Fo7yRZBW6iM/XgYZZkHif7ghfLYmTFeX89FI0xPEwp8F0mDtZzh3L0rIk61thQUKmJiyp8IcjyyndHR1IQtMhAQlIQAISkIAEJDCWAPd5P0pB/QMpwHg6hRhvpvvYxyOF5GPf2f6J+I5RH9s/9f69Rpn1sflJYlPifZbcJH6979I1pegBw1e9l998P8+JNXvtMEf3NPIDm3L/uVmkz37RGORJuGf9fMxnryYuptA+mx/VLOwPXbMuheNfSBcuYm2WMF2U+L1bvM77xxv9MMyxbXihA16K2ezhFADRdZllb0tMzBsSE1OQPz/agAZZHFNbM7QNc7yPwia0RzmeGL3GMyiyr34a5riG/r1zf1lO9k/v7i5FVeyj67XBsTQGn53KaG6z+bxug+3BgvPLUqrkJ65N57D1yXN0Zii4JpgnKwixJGspGIuhiu2infi6Pt3MbglLdAjGu26r8dR918dBG+b2RCu9meK2X8cw93wMc3TGY87I7nr8/J0xbxjcHOPchTMwgqEz98Wg+df8TdUlWdHacGa/7BPeNKS4I/cAKBhjn90aMVRGzUeu40F1mOPfgy3JiXGf4P7cJ9h1ZERrZwLMvX2drGx9N1qb/OC6LsVpzbn6XAISGJ+Ahrnx2fiKBCQgAQkMmADdzg5FJT2Xyqk/xLxFy26qYUh8EPQ1B8ErX/0adfv1sW63BpubkthAoNFR4dq0YR5vTMcwR+c2BGynyD4YFtwcwBj2j9wcQDSwPBHH3Zwnn6PC6gupdiGAJ3FEFUwvgTzbIpBHSP05rdJfi2HsQEQDg/NB57sLYxBEkPzXpnWlDTgGwsnGIA1zh8Pl9dwsePyDvan02l5ETlO4Rv+1lsTYx42MH29an44Ty0r3im6d8ah4wyD4y3SqY/lfWsBTsYhIgg1cr04i644I0u+lOofz1GQ/GQdfl4AEJCABCUhAAhLonUC/DHPEcSQF6Er8nyRo6FxN16+jZGQyiOcS7pVvRLbEj7y/DoxNl+UmMzfm6az2hcT/xJL5/2mDOLSfhjlusGM6ezwa4MEUdexP1wU0QOeu62zr42kTm8YvCpKOz7F99MaaBfOybNKy0nX6wiRYlsc01znQFegAuoBg9kPLwIbBtukyB9P7qHpPlw66SnQO9kc+9NkkBe9PjE7Hupo4qe+FOZrkS9FmdJi7PV8kGDv1VH1/85HtM89BGOaYF50NSZD+7LVtRcsezBKtXE9cO+jaq6It6I5ABxCKsRwSkIAEJCABCUhAAmMJ7Eq3sS2JhymUfiExPMUknV24iOkY9bH908y+13i7Ptbtsw8MWxiVvp2CeuLZy1NgPt69d2LNGRvmsk/0CfoIFjQWwChDMcaRxNd1jvX4md+6FI+jXe6KIYrlGFfE9FM0T49Y+mWYY06s6EK3MJa+JK/xr+Q16lyYe42PeW9wFUHWPpaTuR8/p3Vx8hHfjjnxhjQxoDM1cX495rx7dBB/98swB2/m8pfkCP4S3sy52dVvdKd50p7rqcfma9N5Xph0+WBBE3Cfj4Yg78PyvVx7nTx4H3N/NPOmMx5LgW4f6UjIccHp89G0N2Yb37hkVSkMW5xrZrIxaMMcf9vko8hNvZhcYJnzybZ2ypSLCYwCN4qk7kh+iqVZ+Tuc7uBvE339t4/2lOIsugnSjKEa5tguzzFslsKskeKs8f7WO+fB9gdlmCOPRXMNVl36TQyGLLnLueV/zJm8FaZKitMuzYpJmHodEpDA9AhomJseNz8lAQlIQAJ9IEDQR4LhyYi/B5PUYhnSPRHIzYAVYTX33HPL0qAkSebkeTexNJXpICiOfPxxjFKflIAWo1jnOD8ik+TG1yMovpJW4HRe69bBoJ+GORJF+5NgIUmEsHwsbegREQcjODtFFN0aqK4hmUdreBJ83To2dB4XR0pg/eDbO1t/iWHu5V0HivDmfXBAfK2NCPlaBMlPY5hbk8qUbu3PO7c7KMMc80V00Nadlu4PpUqNjnD8vl4HiKkVC+a0LkpHEEx+iEkSet0SaHSXo+37z7NkEoJy+8FjpW081xzbYRmuG9YvTVeRFRGTq1vzyHQ5JCABCUhAAhKQgAQGQqBfhjkmR3zI0j50M3ggxrOnsnQJCQ9uYhPR8fqpJ6diSeLAWoxC/HfVykUtln1Bh3QbJB/6ZZhDhnDD/qlte8uNcApEjlIwk300d0/SgJv2FInM64MeoiSH2Hd/4mJia7o88HMdJLQWZn/XpkCHxNXVq9pM6uv1ERYfJp5+Joa5n7+xrRjdKEapg2NgOR2WVbp+9dLcyO/OFYsdBkcSASRO3ulYLpf9kBCgCzjms7uTGFiZpYr43WSDw0LnPBHN+UQSeH8tGuvYGG3He9ZFA1E09YN0/SA508vg2no/1xuGwV9EX3DtcfzwQztS9c8yrBTj3JS5X5Zkl0MCEpCABCQgAQlIYCwBikf+lS5fv89SpKwG0lyOlTiN27PkBIjZ+cpt8cTKzWh57PZ6+YntkpsgHia2J66ro73Pc8p94tsvanc9I5Zr7/v0/fLZqRjmWPpxc0eHubpvmguQH8EsR+z6etjsSv6E+/l1EDsTa9IxjHidDnM3xFiE0Yw8Sq+3s/tlmGNexOvkMdr38Pem89muLB+ZTnL5fWfEHn9U65wcDkeE+WdDzD4UsH+zaLHFMS+xks7pnOt++mWYq4X1FFv9KXkSeByIuapzz1WH8QjfmQ60GNfcoePkpjBBndoiTzm/dIS7csXi0j3s2rBZkuu+yaTNrlWukd++tbPkkz6KFmFwrbBNuhiin76ZDurokGZHw/LGLt8GbZijkzjL394fc+wrKXSj+xsD5lzi5Lwwg96Rvzs641FwNBMjGH+b/E3Rgf7XuUfwZpoy0NGdv596nrnUyAmRU6I4665ot14aSDBvtk/u7ldbdhSTK8ZFzKMl18QbMjgfaMJ7cx5uSNe/Tcnj9TJoIvF8Go2gY/+YnBisYMT1Q96Lf4/IC34/hrkLFy8ohtletut7JCCB0wlomDudib+RgAQkIIEhEaDTFxUltESmmwJmJtokE6yWIDmPdDxDEFBJQxJjSSqOxtFLPc26iIl8o3rloyR36GjHfmuAzEZ4D0EyHctou35nAnQSMt3aVvfTMMe+Of7dqRbhJgFCjdbrVL4QZDfnyA/M59II4S+nkuxLMfVdH/NcL2yyi9bD7+0shjzau9MKm6QcwTv7oRoNcfDTzetaGAd7EVODMswxp30Rqk9G1NB147FUfB2N2G5KU84Xxr7L0ib/p5vXtm6MEIRNU0TClsFNEBKztOB+LNcdSdUT2R5JLRgsz3m+9cLl6bC3snVbGHTrUtfekt8lIAEJSEACEpCABGZKoJ+GOeZCZzaKLX6TpAHdDUi4YQhj1Fia2JHBzyRoMF1dsGhBWX6FJZLWZnkj9Md4o5+GOW6wkzT4TbTQ0zFdsXwR8W+dK3NgvlS8r028uzE311kupvn6ePOc6PfslwQWyUm68nFTn981t0ssvTEa7NaYx76aJNz1ucnfOZjrzmgJusP9f699VIxu8G+zJelxsiTx7siN/FuSgGCpGLrONfdTt/lSCnl+n0TAc9kW88pHy7Hz3nZhz7mtq7Mk0ZfPX1aSPmtTQb+ghy4J8OPztcNcvw1zW5J0eSrFTnRE2ZbkS9GzmTT80BI3jRgG6RJB5wyHBCQgAQlIQAISkMBYApjDuO/LcqzExsRudRCvUzyCOQRjFQUY4xW218/08sgeyA9syfKQu3OvGKNUc2BaYj906WIVmntj5mKFl27F1cTR/TLMcegc/8u7D5QYm+VZ30xsfCw6pwTHmVelQ5y8PvfuMUPdt7FtxlkWTt2KyJvHVp/30zDHNsmxfJR4mI5tD6TrGffd0WLExZ3xfz0GnHM3r1sexhgTlyVenl/MkZ3vr3NGi/XLMEf3vg/TIRstxko8B2JeY3ng5r7JGaC/Lk/eYfXCOT3lSepcx3ts539S6JXmESxPyvmuPHjki+udZgk/pjlAOhyeHy3Y7bxiBPvDu7tSfLS/FC+hobiG0GEXLZ1frt/vpNv3FZk/1+9kY9CGubdyvOS97i9d8Q6XjvBBXAZzn5eGGRSJ8fdG0RWdE3vJTY13XFwvR060/z7RmjSPeI8VrhpFaOx/UXhfmwIv8kF0lVzYw4pL7BNDKNf4LwdgmENXP5HOeHSYwzzLzzBCg3MPY1muS8y33MMgb7o0x+CQgASmR0DD3PS4+SkJSEACEpghAQK7Hak6Qgg/kaDvrxFSBLCJ+UZFCc+XJkC+IIExSRoSNktiZGuKlulMg72wzA8VWnRyo/NENeqxPQQxhiuCzrvT9vr2VJVsyr67LUPUb8McgoY262+nqwLigWCYbgtlada81jx2qsVWRugg2G6Pwev2GL0w+nUz9jU5sQ9uQhBwP/lh+/g5H3whYBFkLHf0XzHM0V2jlyqeXg1zX4/Y+enm9SUJOdk8mTPVVttz0+Lhd3fGNLev9WJYcBOCY2+ODTk/16xa0vpubg6whC4CsuMt5e27cs1hQKQDBmKD5GB08Oh7Oda76SqYzhU3p+KnmxBt7tfnEpCABCQgAQlIQALTJ9BvwxzxLEmOp9OxDRMTnaxJhNE9gOiwxocJ/8ogpsSMRqX3l1OAQpdlYuFuibB6lP00zNH5eHT5qcT+VLvnEEbnWfaZOW5OvE+sS6c3jHMzHRwDhjlMemgOOn1TwV751O2vW5Su2zGpfT034m9LfNxepvbUu9AVJJb+la5w/+e1reVY9iTe5h0w5nxcka4AX0oCjA7OdI6YG71RkyJ1Pzy+HuMZGuXJxOgv7jrY1oYjLNgOXQAuTIIUA9r3kvSh60I3fdbcJs+ZI8f7aPTm49n237OP3Unesb06mOt0OsyRuHw5cyXJ+2BMmjuPtM8f2y1dP5JooePHd1KEdXnMgjNZTqjO1UcJSEACEpCABCRwphDgHi9xOkXjdCR7LcUjxGkJ3UYH3ZQwzFyae7/X5Z7vNfkiVm/GcqNvnuITDGPEseyX58398pxuwRcm5izxZ7o4XTSOeaefhjkOnX2jYTAX/fG9XWWZWoyEzfxFRYR2wQh1W2JtOmRdk3h7aY8NB/ptmGN+NEjAxPW7mJMw+lFQUqPu+shpaudf0sUvWoxlJW+7IPmXkeIg8hPjDeL6fhnmyA+9EC305+iEf2zdX/IQ6A4G35kGncYuW7aoLNeJga0X/VE2MME3NATd+F7aeXDUCMU1VAfP0F2cV7qSYdhER3XLpdCBjEKxp6JH4M1mOAT+btakEOxzKxdnyc61ravziBYZn2x7770a5tjeT69YX7YLl8lGmVO+PZf5okExKMIfY18dPON6pjECpjVMc5gVO7u9cQ3AC4714/x7QB6ns8MiPMiroS8xcpILfCNFdfxtN1nMz3kmx3ZrWMOLhhIT3ROocyZ3tS96mA5zNAV5O3+zdMjkGq5HxjU1nQ5zLCVLToz8IE0v0OsMtkfh2NosyYxh7rvRxuQIe1lyt87bRwlIYCwBDXNjefiTBCQgAQkMiQDB5HsRelTAEKi+kGCZMDWx6uggqCSQJ7DHCEUbaqo7qOyZycAw914Ccpb7eTit3hGf+yPm2CpfBNoEzVSRkeCg49hNSaKdv3D+acmdfhvmOC72vzsVTu8fPFJa0T+WpUOpyOrsrEZwTDUJXdFoD/+NGL0wuNENr8mRbTYH2382vBGviBMENyIDgQJblnm6KZVLP063tk0RhL0s9dqLYW51RBpBPIY5hHsvlTokEdn2r9/cXqrqSOYhdJrHxznjZgnVVrTMJqE43iXyUarGEEV0gMCsyXEjNdgGXetIlH07IgPDIF0g+nHzJZt1SEACEpCABCQgAQl0IdBvwxy7IEZ+e38q11NoQRX5WylEIfGG1iDmYyScbCdpoi0uTFfpey9ZUxJhV2Y5VpIT9X3lzR3fuEGPlqFb8cPRMv/JzXcSWsSN9XPE6JdnCZwfXb629YXEqZemG0a3UTRJqtxZkobO27UbXn0vMS3xOUkSCmSIT4lXZzpqkuHR6AyKl16KLkJ/dHZxphiHjtP3JWHx9WgNqtZZiqgOOB7NjXsSEP/39a1lKaCaFOO1j5OcILl5bZaIujefvz6PfL4Zy9dt0enu+Zyzv0SfsMQrCTfOJUzZFo8UNLHs1Hc3rk2XhIXpDHi6Pqvb45HPca5ILpAIeyLnjO4LLGfTTMTxvqka5vgMCd5nt7cLwP6Ya4HO3fUaIGFDd3S6FPzgcpbJmV8SGfmYQwISkIAEJCABCUggBOhIxkorv3t7Z+uRGOa2ZjWY5kowxFvcx6eY/saYwerSo8RZ/bhnyz1nOsNR/MBSop+U++PtU8O+ie7pdkVx9g8T11MI0u0+OfeX+9Vhrr33mHwSZxKfY/KhEOiFxMks/dhpMCKmRXtglLtp3dLW3ensfH70TTNmr9vsfOy3YQ5m6Ix/xwj21w8T08eERnHQCYxNea2qCJ4T52MI25DVczBGsdoN5qjJ5s32+2GYY/8sCVpMVNEer0SLncx5rIPrDrarks9Aw7QLdhb0xTCHgWtXrntyM+hAzHO1S3fdP4+Yx0puKmzQFHRBRx82B/P+R+b/aDTUK1mpiG515Dq4Trh2MYFhAMNsekmKjzo+3txUed6rYe6q5Ot+khwP+hQj6WSDY2bbj8X8hf58btv+MDg+RhdCH+PXhsyTVZ/II81Pfq42NMghleuLgi2MmaxKxN8ex4SpDl7kxZodKNkm1wyd52nGwOpF5AQ5t5UF78Fohx4kv/T95IYwyk5mjmQ+mOW2Jd/0YP4Ne+qjfekaeKRoRLbPdhlca1M1zLFttOX9b20vhrnXc62iPRkcD+eW88mytd+OqbKXJhrlw36TgAS6EtAw1xWLv5SABCQggUETwAhFUoj2ywSpHyaYJEztDPoxy30xS2ySYKG9eA2QZzq/XUma0cXg92/vSoInrZhH9o9wI5glYCawvSpB/w1FbK4s+0eA1mCaOQzCMMd2CYARSn9+P8uQJrHz2p6DuYFwnJfKYA5lnnkyJwE9ibhbEtB/KV904sP01i0RxYf53Kthz9JJv3t7R+ud3Bwg0VWjeJZJug4heNnq0skCkdI8ZrbROSYyzCHQaJ2/IoKHRB83GNZFbE7UTps5Mh/m+XwqvR6J6Hsj5+vgSHv8Oh/OEeZGkoh0A4QDN1G6DbZJlc8Lud4eioihMqfsp72rkhy9KIZD5kfl4MYI9n7cfOk2F38nAQlIQAISkIAEJNBKp+fjSU4cbP3s9W0tluTcVwxHbU3AjWU6DnAD/pu5CczNYCq9u1XWN1kS3x2I1ngjcd/DMcxhnKNgAqFRY0i2TZdhzHJXJt7nhjxV8nRvGy+GrvvgBnW/DHPtIpZ2goMuyNzwr4NnaB9u/pNE4uuSLKszUQxdPzvZI9smbUU8TIU/JrUPUkSTQxtlxHtIWHHznTibL4x/nIPmYM7E7L9MsociKHTBKOe8RuLhspgHWSrmS+nityjH0y3Gpvs4SxKRMH08+qe5TCxzYaApiNFJqNFB4ws5d2gBNEHn4FhKQi1mRsx4f35vT0lk0sUPrVUTdnyO7U/VMMe2SVrWJXLKMjnRKjU5gm4kYUPHjJ9sWlsSgiRxHBKQgAQkIAEJSEACbQKsKsL9XgoP6AxNUQNGMQpGavxHrEfnMVYE+WLuV1Pgwt3506O/qVMl/nwrhTbcJ6bTMffH63Kwdf/cdy/7TyzM/WI6fXXuexCGOfQKq9C8tudwKSahC9/WGHMOxqDD0TfnQPxJN7Ero2dY3eWqMGJp08lGvw1z7A9u70dXkMv447u7W89Fb5AHglHNuyA6eN9lOa8YwliOlXv6mKO66QS2W0eJ7w8dL0z+dzpco6GI7eHBNslDYHK7JLrlv2Po+mrpkj02TwJbCmrodsZyrOSoaCgwetFlO2glzv3mmCTJuWBKQitOpkXrPCd6ZP+HIkYpvKFJAKZCzm3neS1FWCOM6CK2JrkVftcc5HVeilGOHA+mSnRqkZSBgXEMU9V9+Swd1T9X/naanz79+WSGORijUfmb+E6KmPib3BytN9mggO2DGGL/xCpC+Vv/MM/Z1xgZl5NIkdF1mevd0VBfDneuh/oezjNGOc4XzN7JuUczBmcx2tFI4otrl7TWp+kFhrL6N4LN7N1wosjroZxvirM4Dka9bvg3ZXE+w9/QHWlMcX2KtGjKMN7gHJIXZB5w/3vMjxgWWdqZf0OYc90H752qYQ6DIfdKWCXpyRjx3s2/U/VeAcvAosnJW6I178nXwnSc67w2xpu7v5eABE4noGHudCb+RgISkIAEBkyAYJEglZbnv45hbksxQp0Yk8QiqETs3ZKkCsIGQxRGqBroznSKVKu9m25lj6StOUEyVWSIklHhlh0QbF+cpBSB8rciihDEVDo1EzKDMsyxbwRgaVOdaiwqjmCGCGBUDrDki0QiAu7OEdPY2ghDTHPdBu9vtxxPN4siyg6VYL68Ny+ezMYJuOlYd2PMilTPccx1n922OaFhLh9A3CxKggjzI4H8ZUl0UZE3/7zzilENocW8EBB0k0C0co7+mWojzg8mNzp31JsWzIX3k8QjaXhXjvubG1ZlaabxK70Qi/+JcPl7lqL9a4yIVJFxnbEdvhZFWJCA+6+IaSrHmF+m5ZCABCQgAQlIQAISGBCBQRjmmCpxNMaoJxNDP50bzMSTJJyIMxnJHZQb/VdnmdMbEp/ekaIOikQwp00W/rHtmRrmiHm54f1IKtwpjsFotis3xNuzK1MsnQGIT+nSjKHvm4nNqbjvpUtzewsTf2dfW2JQo+Pab4omOzxatV4ZoAFItKDJWK4WbdCpyUhAvJVEwUMxJz6Tqn0SNvXzJKzoCrc+xsSfbCJhtaK1IrE7Xfw6B8mPHYn3SVrRBZsCp5JEGXkj8+VTq5Ko+lz0CYVCN6ewCS1AgoBkKq/zPtjyWboP0GHw5cyJ5YpIaPB7zmFzBnxmqoY5dCDJP/Tk3xrL5KB72B5zolv6PTlvGOY4l5N1zMjHHBKQgAQkIAEJSOCsIECc+GbudbMCCAUIxMMssUi4XmM6Hpck1rtm9eIs0UiBC0awyQu7ewVIsfrOxJzkJ1j6lGJ1THM1TiSmY9DV+IuJOzHS3BpzF/FeM5wl9ux3hzn2S8xKJzIKizCfUWhUiv7rxHjTyCDOvDBsMKDRaQ4jEzE399zHG4MwzLGv/YnB0XmY/OiORwE7cTnnloEWI1b+QkxJdLG+JrmXUrAfruPPtv1ZmMy0wxxablvmR77l19EeH8UsR1x/zsj8eOAL3UXHMc757dFCdBybiGd7hpN/Z9tcM2gUzKI0NaADGsdej5/3sK/V0X+Y3Sjwh1Fn8dK26Kc3o3foVIfmpWM5jDgATj3dwtEjXzp/Wev61UtHNdN4s0QrweJXyRmhy/anoK0aEutncpqKJvzS+hWZ25LSgRFWXIPMueaROEY005GPk4fL3/rLycU8mbwMS9GS46KArR4v2+R63ZwVpu5IodxN0Z1XRfPxewY8yA2xHfJF5HgwZjJfXlsWs9ulyetgvsRUuzGGyfp3yutcj6wyRS6MDnccE5q8mT1DT6J1P5/rErMef0McE/OqGg+0mNkOwSlsyCnS4a9tejyW89ruApddlnnxOB3DHP82UdD2sxjmWJ0LnQxPcGCYw7xJ102WYv5acqfkAftxbTJfhwTORgIa5s7Gs+4xS0ACEphlAgSWdFOgmoTOYSyTScvz5iBAnXvuea17N6SbwSUrs+TOolKh33zPTJ5jvNoeEUzLdboB/DXLERGiE3TyxRwJZlcsmFMqcejoQDKN7m3N4HNQhrl6bFQ3UTXD0rVUwVBtxNxGtEIJvPmZ5B4JtDsj3OnccG1uJCyO8Bxv+dpdCej/E5Hy8zeydFK2yw2BctzZMaLq8lQJfSXBNkktWnbPOWdsJVadX32czDDHthFLF6W6jWomqmow4hHc07WCuXKj5HiU0p5jx1tbI2JYogpBQFeQ3UfbnSBGlUa2x/lZngQcyzwhrjH40bWhm1GQq4tr7J+57v6QSqbntx1ovRORdE6uMwZSBjMkc/pfV9BOfElJ5lVRVt7kNwlIQAISkIAEJCCBvhIYlGGOSWLAouIezfGnJMDoYEHXCvoZEDOuSvcvYl06BrCkDN3ARkLDCY+RWHmmhjlu0FMcQmKDpUIpZmkux1pi1+xn1YJ5rU2JTykMuStFJ3Q0aGqRCSfaw4vbjxxrd4d7Y3vr5dzsp/K/aiI+TqRMPEwBEYmaezMPugkQ19fBXJk/XTn+FvMfSc/6MvE6yZNl0VQY5u7IMawfpzMDZsYjYfLbdPj443s7E6sfLefr1J7aSQcq51mO6PokZ27MnEgcrQkninN4L5pif849S/yQRCGZQtU/Bjwq/nm9c/CbqRrmKPChK8lv083h70m60B2F66ImU5hj1Skszct54zWHBCQgAQlIQAISONsJ7XI5+gAAQABJREFUEHsRU2M2eSirv7yQ1UUowKhRGhETqQKMVRfEBMbqLxSQbEhhOzF8vwYGFAw3Dyaee7jEn0dKp+qmbYvQkQKQixNz0mXsW5euPi0mZzuDMMzBgxgZYxv3yMljPBOzEDE2X2Mjy3Nyj70df9J84BsXr4656rwSI4/Ha1CGubZeOlnMcpiJ/rltb/I/WW43MTo6ArMcOQLMaN+KvlgfQ+Jky1/WY2DbMzXMoRNLUX1i+D8mT7A3GoHiqsoT7uhC8g7fSE7o1pgQ6YBHAUy/wnmuK/IpmOXaDR32Z//suT3qHBak2QBNHO5jJaCVS9Lxe0F9S3lkWVLY/vz1rcV4ujd5Fa5HBrpndbTXrbkebo3u5Tiq+au8ocu3XgxzfAzthZGUpgM35u8TYx7mPoxr6B7OM6YvzHfbMj9WeEInos8xf3H8dbSP9Zzy2RtiAvvBZetKswUaQjB4nWN5MUa7p3I98bdAURTakeuB5g/8W8H+mQ+NN9D51ezGNvaH065o319E91K0tic6DuNbU5+xH44LZrU4i9zVsrlzShEZr8MWrYlR8e3MgZWzaPTAtY0WbA7ez+Bvdaod5kr39fyb+Mst21L41e7Ez7FyjfLIkrG3R1t/JXlAOq/Du5d7GWVCfpOABE4joGHuNCT+QgISkIAEBkmAgA6hhwHsr3RTSEC5l8RMjSCzc56SxFqZxMd3NrL80srW2gS8iKl+jUyjBOz/SWUW3cYeijAmcXUiv69d5njP/Dm0NJ+XIHt5KkuWl6QMwXYVR4M2zCHgEBVUCP0zAorOCATgVfjAA14ExJjmPpcqHLq4UV1C17nxlmyiCoYlj36RKhUq+D5IkqtuiyB+fT6LGLsnFT0cN0snTZScm8gwVzY88o2qwFWYEHOzg8C+iA4McznfCJ9jGOZyPWBmROy9E+FBlddRKgyr2BvZ1rwcNAmyL0UE0fWCVtnjJaK47hB8T6SK6YFUDb4dwUHSFG7w45g3xHh3bcTvD7Jc1BVpJc71hghxSEACEpCABCQgAQkMhsAgDXPE6RjkWI7137lBvy1L9xTDXOI+YlKWLcIotykxIPF+L93loEBcOVPD3M7EuhSHVLMViQS22Rl7tmPdFUWLUOHeTj7071ygNUiU/f6dXUlk7SuFOjm8MTfbiZWJ2+nqRqKSeJlkXDO5wA19dB1Luz4SnUeqgPiawZzRVN/Nkj23p5PfFTHcUTDTOXh/wvXonr3pDLivdHxAozR1D58hGcA2z0/XOjTFupw7CodIbtAfkO3Q1WJPCoRIxqAp0BMkwdhWJ2O2yUynaphj+5zDB9Kl4B+ZM4k7to++YDCn63LOKGiiuId5d9t3+91+l4AEJCABCUhAAmcPAeJp4lCML3R8wvxC9yhiJb6IzbhPzD34zyeewrxC8QjxO6alfg1uNVNUz/3iv6b4g/vv3IcvExjZCXNhHiti1LsnRf3fjGnugnQRbsazxICDMMwxBeaIVqCzFoY5vug8ha6pvHgf86SwBOMZ8Tqmoc+noJ4lOYnbu8WhgzLMMRdicuZM9zPMaZxfDFTMpZitknvg/jtF6xSawLiXwbUzE8Mcn0eL0WXsqbDE0EfOgN/Xaw8tg95heVBMkizFiy6DbzeOvcy723vQwuRn6NT9ePJkFFR16hV0D/v+cnJTmKNo6MDZDMYy0DjoEnI85NpKHiW/Y/A3xHV6da6D22Igo9kAfz8TLd3Zi2GO84vxjvMII64xNFnbMJdu4nGvoX0oVqqGOa41/s65bukuN4ZjfmBeaHOK2e5MPorlZ8lHMbiWOD90oqRr4ctp/vBRlnTld+3FfcvbynzoQElejIIzzLXknBhobfJhvw9rOrzT0Y+5MOpcOC54L8hcMOdiTlyVeSyNYY7rk9fJa/I5mk/QpZBGIDS4ADnzrNtiu7yfwe+napijE/y/YxD8fYrJWOqVLn38W8Dg3NPU41sb1pRuknDjd819t9/pdwlIoFcCGuZ6JeX7JCABCUigLwQI4hHEBPGPppqDDmoE4s2AjkCXSo7L0lXuO6naouU1QWkzKdOPybDfDxPQEiT/MvNBXBCw12RGDWoJrAlqv4awSMC+OIKJ9zAGbZgjEGZOb+8/HAG3v1Q9sbwUVTFjmbWDZUQQpjm6P3w+hjcCe7rMNd/LvBEUJHh+nZsStLF+NcbB/KoE8gTxVM4heEiK3ZnEFp32JhKuvRjm4EkCCXaY0RDDJJKW5pGfOe+IDtpZI6Z4pA0+vzslMdrPqBpCsHAD4JsRB5sjsKlqGjktHOKYQUt/tsfyTj9LV73daWePSIcLW4cHbeBvXre8dW/EI0IP4eeQgAQkIAEJSEACEhgcgUEa5mqMty8xIIYuEiO1w9iSxPPEktzUXz5v7pSMaMSNMzHMMS9i5+dSKf6XdL6jIIYuy/UGO68ThZJouCYJpG9hUktcz01xYt1+RqgkZDgHf0+SkATE35I0Kkmakf0wF2J0inBY4um+aLOb0tWtM1amyp5uAQ/HLPdgilOO5oY+26kjK8QkybmqJC9uTpIH9t06YaNH3k0yheWJfjeSHED3kOxpDroIEKvTWRotgV5bmEQGbHgvcf7BaCgeOVecszLyhpPt/FFzc0UPTNUw92GSNK+lq9z9VP1vP9A6nGNmN8yBL7os3B6zHMsCkeR1SEACEpCABCQgAQm0CVC0TocmTEv/k3vyFDkQt3W7J0+HYjpk3ZwYlBi+3/kB4jcMXRSqPxwzDQamGseW2DJTZp/EnnTpujv3jbnnfn5Mc3Xw/kEZ5tgH98bpbvxcdANxO/fyMSA1QtwSz5Y4NHO9OMUuV2aOGKS+mK7MC9OlrBu3QRnmKhfO8/7c4/8wuQzuy3OOWVWIDtR0iKaontwAc2PuvQzi+pkY5g5nTu+HHVoDjhinqlao55ufyyo5KRhiKWDyDyyF2o1hL3Me7z00RdgdLnQ4ZBUmjFeHR/4ORj8TNmhWVuy5J00l7s7fw9xoIDgy0JAYwTCeViMY2+VVXpufnAvHgmHuB+l6zXFMZDrt1TDH9jO1MhcKzzCUYtbkb5S/Ff6HAbB0dhvp9E1HOOaEMmvPvp2XIedENzn+1m+OKZBufnRWHznEcn7oBseyxCwTS2e3slpS3Ui2l80WJuhCDHM/3LS2NJOoHSl5nfP6dAySLBNMF3ryksei4ZgNm2JeDI6LOXGdYpaDF9ocOxwGW5YX5lrm+PiZRWHZPl/NUX/kmKdimONzz+Xfoyc/2ldWxnonecHR/eQ1DI9XJv/3w8vX5dpsL2fc3K/PJSCBqRPQMDd1Zn5CAhKQgARmQKDdTeFI6fJFRRSiqSRlRrZZA0kSQrecv6wEuHRTmIpw6nV6iE26D5Acor0xSwkRbFdxznbIp8xLQIzx7NYkOxAWJHgI2hmDNszBg4CYyhXEO8KHVs9UZzUTR5UbgolkD+YvKsn4Yq6d5i/evz1ile2RGKOiiuCdwTHzGSqpMMzdHZMgwgqD23ijF8Pc6GcjOjifpdtEmVs1Q5IohGlMckka8sjPzKvqHyQVx0jSjmOkquqWmNyoMKwVQ6P7aTxBxLx/8EgRn7+KgER8so/2Ebff+LWIqa/m68vZ3vowrKKssRmfSkACEpCABCQgAQn0kcAgDXNMk1gPwxRFO9z8p5sxvyWe5MY+MW/pFJBgs8abfG6iwY326RrmiG1PxLFFtfhfUjz0fG6Eb0nn4xqHs19i8VocwjKo39m4ppjlSCL0OseJ5t98jfmgh15KlT5dDR5K4gjjVzv+PpU4QA+hgW5LIc2XY/5iXiQQ6oAvOu+PMczdv2VHCpHaSy41TXHXJ27HOEbi7oKYycZLOJFUoWvGox/sLonLVzI3km2M5vGTyEC3zRnRFXNyThmc4RPREWjMEzlX/BYthFZgPug9dGBzW3xmKoY5dNjrOW8sj0PVPzoNZmyH7aJz0LP35dyhV+jO55CABCQgAQlIQAISaBOgmP61GJUeT1e3B2OA4ecx+YHEVXMSp2Oo+k7uTX8597cpEO+1I/RUOVMIwSo092/Znrj4QJkPgV2NFxNCljiS1VjoNnZnjD2b0zW5xrODNswRs6NBKCyh2xQr96AnDiaO57VTUXk7FqagfnW0w7fSHY3cAAa6biapQRvmiI+Ju7kvTzzP8xK/J1ZemPM7P3pifmJ4+PY64DATwxyNCF5N0Qu65+WcazqVMZgCsTxfzBtTJOcaE9emnGuWRp3KPLOZSQd5JRoloAvRPi/nnNbcVP0w8yEnsz7dtcnRoKUw0NXzyetoU7bxeHJcdG0sqznl9ygodMny5Hgo4vlRDFYXZjuYFMcbvRjmmp9FX2UX5TyiGdFk/AxRtBi6jHPP3wg5rjrKs7yPnn0bly9oXZ0uaV/NHDEG0q0OQ2DZTD5QryOu+1+nOItCrWqYq+9he1xbGNtoevHDTetKPo9Vq+rIdFrvJT/0yu5Dhdd/Sqe6o4UT72lui7/tXKKtuee0zYlsm8F+jpfjYaWqk6UJBOeHa5vzgLGV+db38sjPvRrm+CTXONr8zzmnL0RvYqRkkxjzGCUvFlPhTzavL/8OsYKTQwISmBkBDXMz4+enJSABCUhgCgQI6d4siYX9xbjEsj1UhxArE3LyOo8ksGgZfm9EHcHkZXRTyO/7PdgvpiwqyH4bcY7gxDTXHAS0BOgrUyVDS+gfJ9hGZPIzo2mYowqNhNeuJGEIqkfi6BLQ0qJ6w7IFra9H2Pw4pjsScwiWXgZcEBTvJ3H0ws79pfqOCiiqcgjGGe3v5WlJBtHR4NYIYgQ8Qogqn869UdnFOaBdN90gCPY5XrbF/Eks3RMRRsv7qyISSyeI9i5O+z6ZYQ4WJLPOy7mFJwKizr2z/KYeS31k3nyBi88imuhqwTKs12Rel+f64IZJvUlx2uTyi92pZPp3jvXRHOsf393ZCroxx8q5+GY6ZiCCEWhUW7FPhwQkIAEJSEACEpDA4AhgmCM59bPXt7VeSqxGNzjiRm68E5e2O5vNL0sf3ZM4mhitFq70OitiSgxOeOXyUAb7ILbkBj3PpzKahjmWdPlPbrhTiNKM/4n1Saz8KJXtVMhfunRh2QXxL52UWfqHJM1b6WCxLTF+cxIcNxXtdNvGYPatxKjEvxMVr0xl/p3vpcvCe0k6kLDE7Ebc3O66TQxOJT2cskxO9Mz16VBBsoXuBpybGn9zXFTYP/r+nqKr6I6NJmomFi6NFro+hVA/uGxt0XdNw11zTqVYKEk1Yvdno9Oe3rq3dAis3QFHTmHzI6c955Sy7zkY/aKD1iVhODdMSWBsSYKMrgt1bnyYbfZqmOMaYo50+HgiRUd/Cze6HHA9sR32TUfyK6Mp0H0kVs9PJ2yHBCQgAQlIQAISkACFDSdLnEinp7+xFGru1baLWkboJNgipqJT1UXpcPzDxFM3x7hEPNVZEN4vnvsSG7J86AMpsn4299wpuiYmP2XZae+JjssYer6b+JycBaYlYspqmKM5wF8SD6MNMNMwiA1rjMh97e8nFiY/wGopxNO9DrZBQT1Lbv7unV3FGEWntFJYMrKDssd8Yz/oiVIYnoYEX1y9tBSb87vmwDD3XDolk9N4MkYr5lq1UY3/MY19NR3+MGtNR48wp09yzjFvFSTZCftJmF40H8+nMqphDj31v1/7qORyanENO2B/mAUvif767xiKmDu5phqrU+hC9y46uqHFaObAHNCfzI/j5wuj4R0pFqL45cLkgqrumcpcJ3sv19iJ7LN2J6Tj4gc5J8E1OnjKdU9jA4xgd3HtRGc2TVJcf3Qtp3MaBUx0b4Q5n+VY6Mp947qlpTECGoXGCBxzt9GLYY5tYoxDl7Nv+KO3G9Mu268/81ifs9/ylY1Q0LQyptgv5Pq8Pn9PLM9LE4NOYyw8OO80fuBafTX6m2PsbCbBOYQV1z3mwI3JH1F01hyYNzFNwgpm3IOgM3n7GNrzrHPlc01Oo8/zhOuBvycKweDJfQ0KyHZlXjXnVbfDee7VMMdn0eJ/Sid6Ouq9Ge1KF0L2Xf5NyhPyfjemgI08Jcv1TufvssnE5xKQQP7W8w9K/ZuVhwQkIAEJSGBgBPiPDUHdPyNmHk6w90ICUkxWncF0u+qlbU5DEGOMWtOoBOn3BAm4McpRgfNUkjE8L0sijYbx7cAYYUFg+32SHhEWF2deBKrVMMdSnyTMSMDsSpKpKaL4Ly0CYEOMXVQB/STbmIphjmOGH1VjtKj/U/b1yHt7SnUJFXjdGBKwIzKowCPRdnmEFAKeOddB8I0JDyFP+3sSR4iD+h72+eWY0m69sN2pjiVam8dVt8PjRIY55BIiiuB9SdpYw2JnRATJwrZhMntiZ9kx++bp6Bj5HXPnmPgsHQe5UYLAuyBmQH433rzYDttj6V2WAKbd9jMRRPyOc8/cqBBDdH4vNyzuzFJRF41sk886JCABCUhAAhKQgAQGR2B7YsJXYpj7v69tLV3OqIYnYk3oV7TDshjkSEzRHYHE0nQMc3X2Y2LM/JK4czqjGuYoxCiGuVSmc8O+xqNsl1h/c7qKYZi7rmGY42Y8FfFPxGT1ULTDrhz/gcTE5YBHJsM8WfrllsTxFMDcllicnweVIKwmPjpO/zKGuXeL2e1YptQglEmRaLo01f/oC3QaJr5qXkTnsZ1novXQFs/mkQ4YTVMayRDi959sWl/0FB1Dug2OvyZSWfIUwxwd8ChMIpkKf/TP6PTyAT7DNx6Z9XnJwM1PjL88189V2ecNMfrtOnKizAktsKdTr+UzvRrm2P/RzOOxdIH4Q5KV6Ec0Gsda9p8JsK3roll+kiQGxV8sG+uQgAQkIAEJSEACEuD+9sfp8nS0GHswPWFYSXh1KrYLJGI9TCFXJI77Ye7Xck+eeKoZW/aTJff3iefo6ITpDUMVxd5jRua4NLElBhkKQLjffn7uIZMzIHYlDm4b5nYXbUBs3Iimy/GhF/jsNy5ZPWXDHHNhP/vDD5MheQzMbsS1FMGP2Vnee87Jc1qXxZR3bdjdkwLxzSnGwUjWnBMmwdIxObqEuY/G1/k8rJfEEHjT+qXFqDVdwxzzJkbuHM15dL420c/E4tsOHS8FSP/vqxjmjpRu4nV7XDurS7H9wtb/c8X6zL1tmOPYuM7+Gh0Gv3+EH8YpjI31s0yUghsMW5yje5PHuSw5lbqs50Tzms5rcOELUxRNDR6KtsAMRo6maT/jXKAF+TugeAkTIGawqtfQYpxLio2KnoNJdOfoceUJOux2VtZJrufKmD757Ojrjcn3Ypgjf0cXw/UxsVI0tTN/O3Qc55qHP4Ntt5+N/MAv8wuOhb8Dipkwe7GCELmeq6KdafgA+2x+zGA7bBZGT+a8PZWlSlk6+Wg6o/P7ui+2PS+fZ/lZCpfIK9KNrzm4fiiiwnj6r2yPawF26HlegyXbHIXDc8bI7zCSYkhdHH2+Ln//FMddmxwcxVQv59y9Fy3dZt/WhnyUbfZqmOPfx+0x3v0hDR/KMr2ZZ1liN8cGW9hxnwFN/r3L1pRzMKj7BMzdIYGzhYCGubPlTHucEpCABGaZAAEnATfVYySW3kkV/o4Ef43QuQTMVGYRLFP1clfMSyRSaovpQR3CRxFHr6cNPEuT/ivBLdVaiOIaD7NfJATC8s6LV5TkBx0jCMIRqggD2iQ/ElH9doL1Pan6aAb2bGdxjuuiHNedqUz6bpbmIZDttcNcPW7MZTAkAH8m1W50s6AlM4KkipEy18yL4J0qPAJoxNDVEUKY1Zr7LMmxzPWJVPP9T1rOb00AXuaezzJnxMEV+dwXU0n1zSQpOWZMax2ahV1OaJiDJElDuvJh3CNpRbvzjzL3bWFP4vAEya+8rx4Hoq0toNoJR5JdJOWo6Loyx3RFzgWJKJKHiJRuc2JeHAfMEFG/fXtHzu+B1hsRobzAa8wN0XtJWH07Lf6/EsG5MvviOB0SkIAEJCABCUhAAoMlQBU2yzE9kKVVMB5R4U9gR2xHiorEGMkyussR0y7PjfnZjtPQNcTRdBYjqfZq5k8xSI3/mTuxL0YpluRkOZ+LE2sy0BmYv57etrd0dDt0nGViTyXjiE9Juq1J3Hv7RStKR4MrWX4qiRti40EMbuCjJ14OfzrEvZrHd/cfbRvTSsTc1gXcnCf59MWYz9AzF+W8kChhMG+4kOChQAouLHVDbM9rzJzkBwVEfJakCLpgvCMK3sJlexIXbyWBwTXCEqgkxOiAdzStGEpCY2R+bAeVAiN01qokA9EKl0Q7XL5sUelIjfmOrnWct9oRPB8rgzn2apgjwUKBFN0b0LVFzySxwRzYDtcBeufmke6AFyaRxPXgkIAEJCABCUhAAhJolTiMzl5/TEE4y59+kPvRjBoXEs+xSsmm3P/9fMwot+VeLXE18VR9T785EldSlE6cTixLwTWF3qOdy0Z2SEEGRSB3J2dxU3IX5AqIcckPkFP4x/Z9ZWUYuk1xH785X+5101Xt2xvWlBVOSlFFCrinMjLNslrO63sOF3bsj9Vy0CLEoe1otL1F3rsiZiHyLHQmK6u0JP4mpq/zgj1Luz6SblYc9+gL2QTvIydDXoBVdzAhUbQ+24NzBd/nco+fBgAf5hgwTtXBcaMFyIv8aMTYSCE/qiRhfDlWipfQKpzjJjO4cIxoMVbcuS36E8PVwnAY5EAT06GbgqwXcz7oMsdx1sEZy+komuba6Kg7Mi+6zGE6Y868FY3E9fvL5HheTUEajHiNL9QmKyZdw4pOMQLelO5kLJ/aTV/2ZJjLZzGO0uUM7YPGfS/XIbm+Y4HMMqzouXoE7If3cf3TtXzZvLmlCQJ/A+gm8jJ0lpuXN5HnGW+wdDI5Hoy23DugQQJdyDGo0U2+GvlYQehHMcytz3kkp9QczIlu4eTzmDOs+HtCc6I9D6RhRWmmEajt7GDmns9Uncm9CJp7cPyX5hxwDBdk7o9ER7Mc7lv7DpX5cM7q8U/FMIdxj/sLLLH79zT44P5I6cAZLMyDxg9fzL89GObuTtf3VTm+buexecw+l4AEJiegYW5yRr5DAhKQgAT6QICkEsEzlS6Pphp/Tyr8MUo1Q2CEGEEsAo7uaCzLShDK7wc5EAL7Eww/meqUf2XJU6qLCJoZBLaIDr5o/U41CJUvn0uFFvMi+GcZo2cjUBHTdGzjOJErddp8FmG1PlUnt6Qz2l0xzVGtNNXjymZK0gmD31sxHFK5RjDP3Am8KyX2x0DgrVowr3SmgOXaBPPNZA1z57wgMn4TMxnJMQQZAT2DbdLe+4rcoPjx5nWta3OTYlkMat10y0Qd5tjO/Nw8oIrtqxHXdAhB4FC989reg2lXfaKY2rgJceJkO2F43jnnFpHEfDkOWluz9CoVUBjnMN+RDJtMEMAC0flibsDcn7b6JNu4icARggnxyY2D69L6+66IDG4CLOSa63aQQHFIQAISkIAEJCABCfSNAMUaxJEsOcLNbzoqM9o3mE+WOJB4lBvC3NjHQNeMZ/s2kSlsiNiWGJoiFrqyEY+ToGjG/3RjIylxZ27Wb0oi7fzcRGdwvCz9gnGL+JTk2qhdLsEp8SlFLnRRJkHzucThJGmmqhvKzqbwjWMi4fBykiwv5gtzGskOYmU0Da9zThaGP9rg7hwXx0dMXgdzJ+lGvP2bGCDRfbWwh9j73MTXmNJY2ogljm7Ojf6JYvnssvBBa7Fd9M8L0Wp06COh2e5i0KbHdjAacm0snHNOa2N0Q9EOmSvXD0yfikmRzhmPJ5mx88ipjoDMn7n3apjjHHKt/iGGOSr/0ZKcx7aCaidq0Dx0fqADBAVgEx0n+3dIQAISkIAEJCCBs4UAxQbEUnT6Iq4bG/+3Yym6GF+d3EC5Bx9TCvFw7aY1CE7EgsT3dJnDiEPMuC33j7nHX2K8kUCPmHxR7nFfH9MS98nJYWAqI2ZmqU/i+2e3HSjaoNuSrMSExMKsnEIsPR0DGjFyu0tfllONrsDswz12zD0cB1PlkcFz4nW6zNEFi/zA3MyhxqaYc17L58lpMPfmsWLOIZ+BoYkuWizNujDHPtsDXbI7uQSKfR5OPI7J6VgMWmXkwKFApzK6n339kpXlnj8GTM4RuQfyUhibyK9QCJNLbZQXR4ch8pIlC0sHQc5zL/mHmTKhgGpPrjW6/XEeuAa5HkdHOa7olegaCpDomodhjr8TziXv5Ni25O8KHYbepGFA0ad5DWbk3M7PNcdSpVyDrATEsXWOXg1zNDP4bjqcrcrfJlrohWhI9Nr+6GJ0GsdUDwFtjEGPL95Pjo3rauOyBS1WNCIXU81/nfNp/oxxjGsfY+BLuw+URwqq+DeFObD9ddn2ncnx0JmS7vTjNeKgsIt5HkwBG4VVaNjXooG5nujoxvzJU6GBuX7aOrO9ShE6E/40qqCgj/HAWztKt7ot2Q6rOtXzwmvw76XDHOcRMzFL8/I3+UKuB64DPp+GkUXvsozz10ZWg+JvkvN4+llkrw4JSGAqBDTMTYWW75WABCQggWkTQJQQ1NJVDBFHQEogOxrR8TTBJwa5dTFWUclDtcZE3cOmPZmOD1JVgrD6ICYuhCKVJMyNIJWRqZfntCEnkULSZXXEE/MluEYA0zGNiibMayzRk5dOjXyeChdEcKk+SdUMQfOY95x694TPmBNB+95Uz7C/PcdSuROWda6jHx7Z54LMGQGOWY4Knmayjc98kuNEDPw97ayfSBUTlX1sjNf4ojMeXd3oBEEQfmnMZc0udXV/vRjm1uS8suTpD1LhQ6UOpkS6O5SEVwTqvrBrtxynWubc0rECwbRw7rmpPJpTOr+tjgGQGxOIlMKwTmCcR647Ksb+GUMjSyZhaGQfoyPniZsbdDO8MWa5zTFCzolZbzrnZnSbPpGABCQgAQlIQAIS6IkAmoBEFDfX95a4dsR4lBiNGHxubnpzk5sb6Rfk5jc3wZvxbE876fObiJG5aU2RDUYuOhOQWGjGj8yR7msbEkeTpKrJMI6Xz3EjfkdMW8ih5uCY0UPc+GYJGT5LYqopLZrv79dzpkHyASMZy+TujimMzgDMp+x85BEdwJzQBBTS0FGjOTCToVHo7Pwkiag85yY/74IPS+xek2QdZkCWOJqsOAo+xPPoBTjTXXtv9kFyC+2wP5oI1UISBr2A9kFH0DGaZBedpOHH+fh75kM3iZkY5sBAFw+WwCKRQbKN+XE9MEg6LUmnkG9durp08vhcugNy7gd9/srO/SYBCUhAAhKQgAQ+AwSIOTEHvRejC/Edq46UQcCUp9zvJebkPvK6hcTSc4Zi1CKeI8ak8PqDxHsU+hO7E8PWwVPiPYxL5AfIX/AzoeDOo+04mlif7ZzKLNRPtztE0U2LDtrErt0MS6fe3f0ZtMhbEAdvO3y03FtHT40W4TQ/ljcTG68ZyWesS36gmZPgHvmuxNZoGvRJ57FiFOJe/Np8ni5a3XICzd0N4znHj/biPGG8JL9APD468pTCfWLwjUsXljxTM4dDJ230CtdhWY614/yiHTg35HA4z73kH0b3Pc0nzJ98EnkpTJt0aqv6om6SQyS3g0GrFi5xPur0a36KFZT+ntWEMFyV5E42AJ2iMfNZcjx3ppM512HVqHUfPPZqmKML+o82rSu6cGn0z9ZoSBpk0O2Prmh7ya+N5MgoXkLTL0qOBxMmRXDk/TDxMQeuMzhPNtCGdG4sf6M5h2/k/NMZ8ZloMv5WyWNdmTwPnenppFdzSN22CxMY87fE3wA5Kr7QmIdjoiPHxzUCY/JTaHu0L39PaEw0MeY/XsesSKOGx5Jbm65hjvOLbqZ47aF0Mf93zHKY5zh7/G2Tg0RLYza8N80o6PqIJmc+DglIYOYENMzNnKFbkIAEJCCBHgjUIJQAksCbgJTfjR0Ex+0AHkMUbZiHMco8Mp8jCeKPJujGQHeaKMlESLQwr/lJyCBoieP5LAErx8UXGr/zsxwD7+XziJOFaZ3Mz9MdiANEEBwRCd32x7bZB+k1RCIdF5oiqu47mykVVa+nkuahJLaeQmBku1VoYljkxsTtqTwqnT3SCYLWz52nplfD3D1JjP1k0/oSzMMDoyJGSkx/iPuD2TdQF6QzBEk45j4/iVFap3M99GqU4/jaYvOTstQuIvHpHBvCp5ohOQaSalT1fS9ikWWhEP+DrFis3H2UgAQkIAEJSEACEiBuHukilkTH8QSmxNXNQVyWULDE0MTRw0hYNPc/3nOmSRzLjXliy854vD3v3FxPLItuIO5l8D6SIcS+o50QOnbCMfL+9mc55o43DOhHYmeKWphXrWTv3BXHxfFwYx5tUY+rvg8e+5JwoGMgnTm44U/XEFI5HAZJBhKEdGAjWUO3EMyBkw3mBme2DzvmR5KQhAbaASNlSWJEp7Q1A/OkyKa9XzQayaPHZ2CY45yj91gi5+F0lvtXkhhvpCMHE8hL5YsEFt0B6bbwpfXLyzJA6BeHBCQgAQlIQAISkECbQIk5E8thKuN5Z/zPu4iH27mBdiw9rHiYWJ04k3vj3HvvjPGZW6ZW7rPXHAE/M47lc8SpxIt5mlG+8WTMwDBUi0ZmclzExm1NwZy72uXKfomGiYk758uLNf4nH8K2OsfosSauZs71WDvfN+yf4YtmwSzH885rqKmnyIkwb84G7+O6a+dw2jH8yOkbPQT0DUVbHC+fHdaox9Q+pyOFSx0757jQYGgxNBk/18HxUbzEEqOPpaP2w2kcgHmN65nBcfHZ29MpnA51rLKDaa25Dd7HtY+R9VcxgP0mXdP2Z5vwOrWnNIYIHzrE/dfm9XlcVJZTPXKirSPJq1HsRJMHrlE+txTDXDQf+hZzHPOAbzc9yRwmGhwN88FcSFfHx2NS+2u+jud3rG50a3Qm3czRYk0dPtE2YcT1RI6KbfOF6Q/TLPOtJkr0JdusOpPrg78hjHW/Sme/x9K9cLqGObjB/Z/p0v6rLTuKWQ7zZHZXBseNiZOudhRnXZeujyuiPWHokIAEZk5Aw9zMGboFCUhAAhLokQCBXWLIrmKzuQniPJIxww73EpdGPCHUm7MZ+7zMLTPL9MaMXj7LZziqfsSxTBHRXqY6wXyB2O7oMD5Pkk10k3gwhjkEBt3yjkQQVMGEkCcY/0rE1H2XrintrDvNjFM1zFFJhLirxwE/BGEKeMrISyXZhZhjHm12U7suEMBULGIEJGH3Rjrp1e5ysONGAZWAdLigHTmVi8zLIQEJSEACEpCABCQwPAJFH0wQg9cYusaDw5vZxHua7rx71Q0UvnDMwxpVV5R+GBPoC+bE3Eb+P2Z6JAxIMLAcK4U4f45xbke6DaBaUCPc0CdZ8sUsb0QhzdVJtNAhYbLROTcYkhgkqcFAO9SEyCntMMIvc0r+ZsaGORI+O9M1gaWvHnhzW+vtdEWkGIdTxCzQF5dmeZ5rs9TV13NsLF2Fjuo0FTJfhwQkIAEJSEACEjhbCRA3EUdP9x78oLkxt17j4Was3j4uTHYTz7BfeY/2/ibnyGzaOur0nESZar5NdLzjfXbioxz8q5NpqqpZmueIWdV8Cue526jvL5qi2xsG+LtJr722BCtarM6zOR3MbnQKZLnj38bstjXPKWaq+oj3fi76Cy327Ziu6FCGca05pmqYuybbY3WiwjVMwVoNaPwOrYTBrBrl6rzH05PNuXR7zvbJIb2VFaz+8v7u0l2Obmw0XLgszRDuS1EWS+luypKx7Lvur9u26u/YJmZKHrmu2kbSdpEW7NCw1aDI9kquLVvnORqxH4Y5NDRLVJPDeiBmRTQ0ua3y70XmxTyuSve8G9P44Y7kslgSFq7MwSEBCcycgIa5mTN0CxKQgAQkIIHPNAFEBhVZj8YsR9eFF3cebO1uJH8QBKtjLLtx7bLWdyI6NiydX7pBNA96qoY5usdRnVMHogRBUmvZiPVnegOBSqM3k8j6QzpA0J6bdt0k1aqOYLmkzRFPGOYwAk7UprvO00cJSEACEpCABCQgAQlIoDsBkgxUx7+5/3AxzT2YSvt3s9wWWoOkCPH9ebmxvyk3+G9M5+qvnr8iS7QuHk1AdN9q99+SgEE/1DGeduAtJDJm2mGO7nYvp4vB0x/tKx30qPgnicFg3yRSvrA6x5Qio5uSyNi4bEESQyYxCiC/SUACEpCABCQgAQlIQAIDJVDMW9FdFC/9OWayF3ceyNLHR4vBq+w4moWlhK9IY4T7Ypi7ZtWSkWVnT01ryoa5aLmL0vmsOUqOZ8SAxu/RSmjBfpi7MLO9n+VYyV898t7u1uvp/r314JF0uZvf+nwKluj0vXnFwtbq+awi1JxVb8/Rjk3zHJtgO+PNvx+GOY5p79GPW8+nMAvD3KM5d3Tp4/fsn9wchVhfOn9ZWQUK/czSrHB1SEAC/SGgYa4/HN2KBCQgAQlI4DNLgGQTATjLCj29bX+qc3a13o+YItnFoIKFFtNUsdAt4fo1S0qSq3nAMzXMNbfVr+csl/RMjHJ0zaNFN5U67UM6mUTdOUUg3rx+WRJ1y1u3XbCy/E6d0S/6bkcCEpCABCQgAQlI4GwkEFnR2p0leF7Kcqy/fH1b65XE5HQ6oNMasTYKg0TNxuUL0tlgTevLufG/ZM6ckggYBC/2N1PDHNvYn6QFHfNIYlCMQycBjpVeBFT3syQsS83eG720MZ3mWG52OkmaQTBwmxKQgAQkIAEJSEACEpDAmU0AzUI+5/WssvPP7XT7bpvmmke9IA0MWNqTHM/NKfLBfNXsiN0Pw1xzf/18Tl7nSJaZfWakm/kTyfnQ8ZtjvjrHcUvyPPduWN26YNG8srJQP/c93rb6YZiDOY0fHonWfDpd2l+I0ZEufTVPtShmOZabvfviVTm+VcXkuMRVksY7Jf5eAtMioGFuWtj8kAQkIAEJSODMIYCYonLm3ZjkMJb94d1drVd2H2rtOHSsHGSpAkp3hEvSpvvm9UtjLluRrglLkwBqJ71406fJMEfiCgPgMyPC8PkYAd9NS+vSGrwc0cnW8vlzW5clkXX3xStb16VN9xXpcoEx0CEBCUhAAhKQgAQkIAEJTJ8A2oKb/m+ly9wf3tlVljBFW6A3GDxiLluzaG5JaGAyoyvB4iQCBjHY60wNc3SXw/T34Ns702Fub2tLklB06WbbGOaW53g2rVjc+lq6y7FEzopojcUmMQZxOt2mBCQgAQlIQAISkIAEJDAOASQXS7Gixej2/RSFPsfa3cpY64cmAitT2EMTgVvTROD2aLG5WZa1ZkU+zYY5TGUsW/pE9NgLyfe8lZWFshJrOR6OBQMgxrllWVWoaQIcB1Vfft0Pw9zuLJv71r7Drd+9vSPa+UDrg3RozyJJpTMg6SqKzWhkcXt05q05Xxjo5tleri/nz41IoBLQMFdJ+CgBCUhAAhI4ywnsO3aimOZo2f1sOs39Z9eB0pGNwBwz2flJZH0+wTkVSCSDPq2GOaqNMMz97aM9rYeS1PpPEnQfpVU3CS3EH1+Xxix3bTrlfSOVOSwJtSpC0SEBCUhAAhKQgAQkIAEJzJwA8TgJjX8kQfNUEhpPplL+SJYHolIew9yimMmWz58TXbE6umJF6/IsX7osprNBDDTATA1zdJP7MMVE92/ZlmPa3/og2uL4iGGOXMWF0UlfyfKyLJNz49qlpaPBHItxBnE63aYEJCABCUhAAhKQgAQkMAEBtMvuo8dbv35rR1l5B+2CEY6cCDmeZdFh12b50tuS3/lmOrLN+4wY5lh+9fl0X2M1odf2HG4djr48P93krli5KCsIrWhdm7zVmpjL5uGiG9Loh2GOwqzXczwP5Hz9e+f+1q7Dx1ufcK7yNT/n5rLkrr4Ws9xNa9MRcHU6AuZMKjWHdILdzVlDQMPcWXOqPVAJSEACEpDAxARIYO1OG+vnIjz+/uGe1l/StptuCiSZCM4vXjq/9YXVS1t3XpRk0PoY5hqb+1R1mMu8qKZ6auve1h/TLe/FLAf1XipzEDAIDUQgVVS35BhYAmp9hBTH55CABCQgAQlIQAISkIAEZk4A/UCihhicJUyJybfFcEaBDkU3mONIZlCI85XE45csWdAa1LIy/TDMoYmY/2+SxGCZnHdyXIdzfBTp0EnucysWte7buCbFRUuyHOuCkohqaqWZE3ULEpCABCQgAQlIQAISkIAEJidADuRo9AtNEZ74cG/rXzv2FwMdS5fOZ3nPNA64Lo0E6Mp2V1bfmXvuZ6PD3Etp7oAW45hYKYnlZa/PcXwlx3FF9NgFi+aXHM8wm6/1wzDHsrJvplveb9P4gXNF44cTn3zSOjcH0j5XS1vfida8PA0g1sUgqM6c/G/Ad0hgqgQ0zE2VmO+XgAQkIAEJnKEEMJkdSmUOCSA6Qfz+nZ1Jan1cjvbCJfNLJ7arkwS6ZuXi1uWpbGkG5xjm/h2j3W/S6pvH0j0in+Q9LIU6PwJm7cK5rXuSFPvp5vWtZXPntBbmd4MaJMZYKumFzAUx9UaeU63DoG31XRetLKa/TSsWtpZmLsMUUmUSfpOABCQgAQlIQAISkMAZTAA9gEHu+dz0b3cBONTakWp5DGYXRVtcvnxRSXBcmaVMVy2YM7AClmqYI7HyeIqCHkviaGeSEs1lengPyYfN0QY/vGxtCoRWjjkz9Vie3b6v9eLOg61X0t1gawx0+3N8a6Jx6NBw36VrilmO5VibOmnMhvxBAhKQgAQkIAEJSEACEpDAAAmgbT6JFvt3mgg8u31/WYUHExYmugsXL8jKOwta16Qb29XJ8bDUZ7MzdlmSNUuE/s+b20uHuv15zuea+mZOEiksfUqOh9WI0HbDGO8m//RqdBimOfI8LC3LcXwxHb4xls1Gjqca5uD1aHTmlszvcLr50cmP85C2DmUFJwyKdPO7KcvGstpRcxxIIdb26OR/5ly9mFzWa3sOtvaEO9veHCPgjeuWtu7JKknk1shrOSQggf4T0DDXf6ZuUQISkIAEJPCZJUCXhAPHT7Se23Gg9btUtdBxjhqjGxKYI0A2JbFFR4hFHWa3t/Ydjgg70HpgC4a5g6PLLRXDXGjMTyvstUlCfWPDqtb/GoJhjhOA+W9vklivZEnWlyMQ6TR3Ise3LEm6IlAippbmWBB5DglIQAISkIAEJCABCUigfwRqoubN6IR/5cb/P5LY+ODQ0dYFWb4UXUGyYE0SG8sTj9PVYFAhOfMg2VDMclm+57H397R2JiHRXKmH92CYu2LlwtaPLl9XimuaJFhGFh2xN0sbvb3/aEyAB4pp7r10NtiQLtzXxTBHEoNEjdqiSc7nEpCABCQgAQlIQAISkMCwCaBv0Dyv7z3UeuT9XS3MZgePf9K6IfmQL6xZ2tqUbmUU/lDM1MyMVMPcL7Zsb90fExgFQkdHlnNlm7wZ7YZh7r8xzGWJ0IuHZJjDiLb32PHWC9Fi5HzWp6Mc+74w+nJuxCQmtWGPapj7+ZZtrb8UwxxLxcYwl4nAq/11MoViS1vfKoa5pcUE15wn+bijH58sRV1tQ+Ce1tv7jiZH93FWSFoW3by0fJ583GwcY3OuPpfAmUpAw9yZemY9LglIQAISkMA0CNANjg4K76bLHMa3Y6kgQgSRCEKE0P1hXn7uTAR9kCql11JB8+cs48rj8Wwn/y8jz8oyqKuSQKLV97cjDhBjg14GFcFyLO2r6WSxNVVH72eOxyM+EFAk6TZk6SeWZx1Ucm4a+P2IBCQgAQlIQAISkIAEzhgC6IFdMZl9lG5sbyRZszeV8hjTSGpclC86Trfj8XPGJGr6CQBJwvJDz6bT3TPb9rf+sW1fa8+RE0UDkFIZkSyt1UkYXbp0QVkm9paY+ToHOgltsefox+V4Poy2oFMdhj+O58qVi0rFv9qik5w/S0ACEpCABCQgAQlIQALDJkAzAfIiL+8+WAxmaLNLkg9hJaFV81n957ySJ2nOi25yB7Li0B/e2xWj3e7ynPxQHeinOak8wnD3rQ1rSofu9dF3wxhouqPRY3T6PpI5LU1+ia5yS+adV4xkw7fLtXXmoRjk4PV0dOa7+w+3jpw4OWqYgwu5sSvTze+2C1a0c1LRnM1R9SrHtCP68r3k5bYePFYMc6zytCHvv2DxvJKjm41jbM7V5xI4UwlomDtTz6zHJQEJSEACEpgmAToo7KMV9KF0l0sUThJraYQHCS3Mc90Cc5JFmOaeT+tojGnRBRnIgfbAYEdnN6qPbl67rLVgZFsjLw/0AaGH4NifY8IwR7KLCiqWhZ2FwqOBHqsbl4AEJCABCUhAAhKQwKeJAMU4dCrYceRYWc5nZZYsXRJdgLY4J8F4N23Rz/mjR4j/Mey9tudwWcaHav1OYxtdtNcvnFeW9Plclr7pNtgWRTnoC7TFwXTmRh+xNM7KJJ06i4q6bcPfSUACEpCABCQgAQlIQAISGDSByJbosOR4osPQMHOiW1htZ/GIUa5TDzEftNvhfIZiI7pq0y3tRExqp8Y5WcK1VbqGk+PBfLciOmhYAz2GFiN/dV605Hk5CB5na8CY+cDrlSylui1mPhjWGTFfvi4Jp2tXLSkFWmujObuNHFIp0IL5nhSd8UgDCs7ZAps+dEPm7yTQNwIa5vqG0g1JQAISkIAEzhwCJbGVYJ/20YgOkj+0fO4mpDhqkmC1aulgRBVi4ZRdrlU+Oy/VRyTI1sasNqdsq0qHwXJjLiTJEIbxy5V50d2OTnMOCUhAAhKQgAQkIAEJSGBwBGosTiKBWHx+NAHaYpiJjey2dLdjCR86xB0fk/RpHztFQixzs2bBvAmTPmyragv0BYNEzXwKi5QXhYffJCABCUhAAhKQgAQkIIHZJ8Byn+RtUC01x8PjeGmRonPyGTrT0SCBHBG/q4NyJzTP4uimdTF+LRrCKkJ13/WxzqeUXmUusynBQANjOsMVkxuad0Qj1vnySMFYMb+lgQOFY+ONqp3pbI7WJIc1bO083tz8vQTOZAIa5s7ks+uxSUACEpCABKZJABmE+CgiKM8nS/4UMZVioxrMn7bbKBekwNwE+fOjyIbRTaJzDgiYtrzLceWAZlNMdc7NnyUgAQlIQAISkIAEJHCmEiAGr7E4yZnZiMNJ9mCUo+N0WxOMpc28SB5hnOulsKZ9THVL4yedxu7FnyQgAQlIQAISkIAEJCABCQyHQNVh7K1osEm0WNU4x6KdWIoVA1e3gYmLHA+FQzRZOJsHOpelYtGbmOe6iU04zctXr+Y3uKNaYXt20z2bryyPfZgENMwNk7b7koAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIIFZI6BhbtbQu2MJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISGCYBDXPDpO2+JCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSGDWCGiYmzX07lgCEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEhklAw9wwabsvCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpg1AhrmZg29O5aABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCCBYRLQMDdM2u5LAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABGaNgIa5WUPvjiUgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhgmAQ0zA2TtvuSgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQggVkjoGFu1tC7YwlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhIYJgENc8Ok7b4kIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIYNYIaJibNfTuWAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgASGSUDD3DBpuy8JSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISmDUCGuZmDb07loAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIIFhEtAwN0za7ksCEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEZo2AhrlZQ++OJSABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSGCYBDTMDZO2+5KABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCCBWSOgYW7W0LtjCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEhgmAQ1zw6TtviQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhg1ghomJs19O5YAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABIZJQMPcMGm7LwlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKYNQIa5mYNvTuWgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQggWES0DA3TNruSwISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgARmjYCGuVlD744lIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIYJgENMwNk7b7koAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIIFZI6BhbtbQu2MJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISGCYBDXPDpO2+JCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSGDWCGiYmzX07lgCEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEhklAw9wwabsvCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpg1AhrmZg29O5aABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCCBYRLQMDdM2u5LAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABGaNgIa5WUPvjiUgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhgmAQ0zA2TtvuSgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQggVkjoGFu1tC7YwlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhIYJgENc8Ok7b4kIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEtNU+eIAAA66SURBVJCABCQgAQlIYNYIaJibNfTuWAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgASGSUDD3DBpuy8JSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISmDUCGuZmDb07loAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIIFhEtAwN0za7ksCEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEZo2AhrlZQ++OJSABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSGCYBDTMDZO2+5KABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCCBWSOgYW7W0LtjCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEhgmAQ1zw6TtviQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhg1ghomJs19O5YAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABIZJQMPcMGm7LwlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKYNQIa5mYNvTuWgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQggWES0DA3TNruSwISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgARmjYCGuVlD744lIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIYJgENMwNk7b7koAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIIFZI3BWG+ZOnjzZql+zdgbcsQQkIIFpEjjnnHPKJ/l3rHNM9Frne/1ZAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIIEzgwB+gfp1ZhxR/4/irDXMffLJJy2+Dh482Dp69Gj/ybpFCUhAAgMm0DTFVfNv/Y9e87UBT8PNS0ACEpCABCQgAQlIQAKfAgK1kKZqgU/BlJyCBCQgAQlIQAISkIAEJDAEAmqBIUB2FxKQgAQkIIHPGIH58+e3Fi9e3Dr33HPL12ds+kOZ7llrmPv4449bJ06caO3YsaO1d+/eYp4joPTG8lCuO3ciAQn0mUAVxGzWf8f6DNfNSUACEpCABCQgAQlI4FNOoKkHmKqa4FN+wpyeBCQgAQlIQAISkIAE+kig6gF1QB+huikJSEACEpDAZ5zA8uXLW2vXrm3NnTu3dd55533Gj2Yw0z9rDXOY5Y4fP9565513Wtu2bWsdPny4hYnOYHIwF5pblYAEBkeAf7dwhtdB98wqkOvvfJSABCQgAQlIQAISkIAEzlwCaAJufKED0AMMNcGZe749MglIQAISkIAEJCABCVQCzfyAuYFKxUcJSEACEpDA2Uug3idcv359a+PGja0FCxYU09zZS2T8I9cwF8Pc1q1bW4cOHdIwN/514isSkMCnmED9j16dIuZfk2OVho8SkIAEJCABCUhAAhI48wlUTVANc+qBM/+ce4QSkIAEJCABCUhAAhKoBCieQROYG6hEfJSABCQgAQmcvQTqfUIMc5dffrmGuQkuhbPWMEfQSIe5999/vyzLWjvMTcDKlyQgAQl8KgnQXa62Ua0JstpV4lM5YSclAQlIQAISkIAEJCABCfSVQNUE6ICqBTTN9RWxG5OABCQgAQlIQAISkMCnkgBJ8boCjR3mPpWnyElJQAISkIAEhkqgxgbr1q1rXXrppa358+e35syZM9Q5fFZ2dtYa5ggaMc3t27evdfDgwWKe43dcPA4JSEACnzUCVRAz75og+6wdg/OVgAQkIAEJSEACEpCABKZPAE2ASU6j3PQZ+kkJSEACEpCABCQgAQl8FgnU3KZa4LN49pyzBCQgAQlIoP8EiA2WLFnSWrlyZTHLNb0E/d/bZ3eLZ61hrt5EpsvciRMnisHEQPKzeyE7cwmc7QSqIIaD/5ad7VeDxy8BCUhAAhKQgAQkcDYSqJpAPXA2nn2PWQISkIAEJCABCUjgbCagFjibz77HLgEJSEACEjidALEBXeXoLsfzGiuc/s6z+zdnrWGunnbbE1cSPkpAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACn2UC1SinWW78s3jWG+bGR+MrEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJHAmEdAwdyadTY9FAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABMYloGFuXDS+IAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAJnEgENc2fS2fRYJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSGBcAhrmxkXjCxKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCRwJhHQMHcmnU2PRQISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgATGJaBhblw0viABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACZxIBDXNn0tn0WCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhgXAIa5sZF4wsSkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkcCYR0DB3Jp1Nj0UCEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAExiWgYW5cNL4gAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAmcSAQ1zZ9LZ9FgkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIYFwCGubGReMLEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSOD/Z9cOiQAAABCI9W9NAgr8TaMYFgIECBAgQIAAAQIECBAgUBJwmCutqQsBAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIXAGHuUsjIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAIGSgMNcaU1dCBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQOAKOMxdGgEBAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIlAQc5kpr6kKAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECV8Bh7tIICBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQKAk4DBXWlMXAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIELgCDnOXRkCAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECJQGHudKauhAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDAFXCYuzQCAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECgJOMyV1tSFAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBK6Aw9ylERAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIBAScBhrrSmLgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBwBRzmLo2AAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBEoCDnOlNXUhQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAgSvgMHdpBAQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBQEnCYK62pCwECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAhcAYe5SyMgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAgZKAw1xpTV0IECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBA4Ao4zF0aAQECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAiUBBzmSmvqQoAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQJXwGHu0ggIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAoCTgMFdaUxcCBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQuAIOc5dGQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIlgQEAAP//hpAu6wAAQABJREFU7N2JnyRF0QbgBna5l/telOFSDkFA5Ebx+Lt1EfxQAQXl8l5E5EZAOZTDr54cYima6p7umZ7unpk391fbPdXVmZFvZkW8ERmVfdL/ujJKCQJBIAgEgSAQBIJAEAgCQSAIBIEgEASCQBAIAkEgCASBIBAEgkAQCAJBIAgEgSAQBIJAEAgCQSAIBIEgsM8ROCkJc/t8hNO9IBAEgkAQCAJBIAgEgSAQBIJAEAgCQSAIBIEgEASCQBAIAkEgCASBIBAEgkAQCAJBIAgEgSAQBIJAEGgIJGEuEyEIBIEgEASCQBAIAkEgCASBIBAEgkAQCAJBIAgEgSAQBIJAEAgCQSAIBIEgEASCQBAIAkEgCASBIBAEDgQCSZg7EMOcTgaBIBAEgkAQCAJBIAgEgSAQBIJAEAgCQSAIBIEgEASCQBAIAkEgCASBIBAEgkAQCAJBIAgEgSAQBIJAEuYyB4JAEAgCQSAIBIEgEASCQBAIAkEgCASBIBAEgkAQCAJBIAgEgSAQBIJAEAgCQSAIBIEgEASCQBAIAkHgQCCQhLkDMczpZBAIAkEgCASBIBAEgkAQCAJBIAgEgSAQBIJAEAgCQSAIBIEgEASCQBAIAkEgCASBIBAEgkAQCAJBIAgkYS5zIAgEgSAQBIJAEAgCQSAIBIEgEASCQBAIAkEgCASBIBAEgkAQCAJBIAgEgSAQBIJAEAgCQSAIBIEgEAQOBAJJmDsQw5xOBoEgEASCQBAIAkEgCASBIBAEgkAQCAJBIAgEgSAQBIJAEAgCQSAIBIEgEASCQBAIAkEgCASBIBAEgkAS5jIHgkAQCAJBIAgEgSAQBIJAEAgCQSAIBIEgEASCQBAIAkEgCASBIBAEgkAQCAJBIAgEgSAQBIJAEAgCQeBAIJCEuQMxzOlkEAgCQSAIBIEgEASCQBAIAkEgCASBIBAEgkAQCAJBIAgEgSAQBIJAEAgCQSAIBIEgEASCQBAIAkEgCCRhLnMgCASBIBAEgkAQCAJBIAgEgSAQBIJAEAgCQSAIBIEgEASCQBAIAkEgCASBIBAEgkAQCAJBIAgEgSAQBA4EAkmYOxDDnE4GgSAQBIJAEAgCQSAIBIEgEASCQBAIAkEgCASBIBAEgkAQCAJBIAgEgSAQBIJAEAgCQSAIBIEgEASCQBLmMgeCQBAIAkEgCASBIBAEgkAQCAJBIAgEgSAQBIJAEAgCQSAIBIEgEASCQBAIAkEgCASBIBAEgkAQCAJB4EAgkIS5AzHM6WQQCAJBIAgEgSAQBIJAEAgCQSAIBIEgEASCQBAIAkEgCASBIBAEgkAQCAJBIAgEgSAQBIJAEAgCQSAIJGEucyAIBIEgEASCQBAIAkEgCASBIBAEgkAQCAJBIAgEgSAQBIJAEAgCQSAIBIEgEASCQBAIAkEgCASBIBAEDgQCSZg7EMOcTgaBIBAEgkAQCAJBIAgEgSAQBIJAEAgCQSAIBIEgEASCQBAIAkEgCASBIBAEgkAQCAJBIAgEgSAQBIJAEuYyB4JAEAgCQSAIBIEgEASCQBAIAkEgCASBIBAEgkAQCAJBIAgEgSAQBIJAEAgCQSAIBIEgEASCQBAIAkHgQCCQhLkDMczpZBAIAkEgCASBIBAEgkAQCAJBIAgEgSAQBIJAEAgCQSAIBIEgEASCQBAIAkEgCASBIBAEgkAQCAJBIAgkYS5zIAgEgSAQBIJAEAgCQSAIBIEgEASCQBAIAkEgCASBIBAEgkAQCAJBIAgEgSAQBIJAEAgCQSAIBIEgEAQOBAJJmDsQw5xOBoEgEASCQBAIAkEgCASBIBAEgkAQCAJBIAgEgSAQBIJAEAgCQSAIBIEgEASCQBAIAkEgCASBIBAEgkAS5jIHgkAQCAJBIAgEgSAQBIJAEAgCQSAIBIEgEASCQBAIAkEgCASBIBAEgkAQCAJBIAgEgSAQBIJAEAgCQeBAIJCEuQMxzOlkEAgCQSAIBIEgEASCQBAIAkEgCASBIBAEgkAQCAJBIAgEgSAQBIJAEAgCQSAIBIEgEASCQBAIAkEgCCRhLnMgCASBIBAEgkAQCAJBIAgEgSAQBIJAEAgCQSAIBIEgEASCQBAIAkEgCASBIBAEgkAQCAJBIAgEgSAQBA4EAkmYOxDDnE4GgSAQBIJAEAgCQSAIBIEgEASCQBAIAkEgCASBIBAEgkAQCAJBIAgEgSAQBIJAEAgCQSAIBIEgEASCQBLmMgeCQBAIAkEgCASBIBAEgkAQCAJBIAgEgSAQBIJAEAgCQSAIBIEgEASCQBAIAkEgCASBIBAEgkAQCAJB4EAgkIS5AzHM6WQQCAJBIAgEgSAQBIJAEAgCQSAIBIEgEASCQBAIAkEgCASBIBAEgkAQCAJBIAgEgSAQBIJAEAgCQSAIJGEucyAIBIEgEASCQBAIAkEgCASBIBAEgkAQCAJBIAgEgSAQBIJAEAgCQSAIBIEgEASCQBAIAkEgCASBIBAEDgQCSZg7EMOcTgaBIBAEgsBBQ+B///vfqI5+30866aRRHf3zeR8EgkAQCAJBIAgEgSAQBIJAEAgCQSAIBIGDhEA/buK9eEmVxE4KibwGgSAQBILAXkRg3MbpQ9m52Li9OKKROQgEgSAQBHYDgSTM7QaqqXNpCCB800qRv2nX5LMgEASCwLoi0Ndx8+gz3/vss89OHP3+nXzyyaNTTjklSXN9UNbofX/M+2LNM/797/XfT6rbNYuov99W3geBIBAEgkAQCAJBYFUITOM8ZArvWdXIpN1FITBtju90fu9m3Yvq/yrrmYTPTnFfZZ8OctvG89NPP22xk/7YGk+xE4f3Gd+DPEvS9yAQBILA6hDo2yZSzGuPysZZJ6hSdi3rA4XI+r2Oj3tJOO/41/f6r5Pqds0i6u+3lfdBIAgEgb2CQBLm9spILUhOxrCSKD755JNWaxlIxlAg4NChQycCAgtqdmHVkJ3c//nPf9rrxx9/3F6d7/cD2dOPw4cPj0477bR2FBHcrjANu08/G3362actmFLtbbe+eb6nP5Xk4jUlCOxVBNp91N2vQ87aOvSJ3qj7bVUOAn1Gx33wwQej999/v+k2Mp111lmjM84444Q+m4bXf//739G///3v0bvvvjv617/+dUI/0h/nnHPO6IILLhidfvrpTUdOqyefLQcB9wN79uGHH44++uijr9gY48+WGTNzYB575p4znxzqZkO1V/P75JNOHp166qmj0884PXNiOcOdVoJAEAgCQSAIBIEFI4Dv4Dg4ME5VcQKcp++341QVJ2j8p+NW+HHxogWLleqCwEIRMJfxebye3zA+v83l8hn4Deb6PMV9o351u5f694661Vf+qPvnIN03fX8N/vRNH5/y1+AD+3n8tXnGKNcuFgFjaK6/9dbbXfzkX23+i8cYP3P8yJEjLXbivjLGKUEgCASBILAYBOhfB3tK7zrWpbABxXtW6SfAx7qAg62CES525plntjUC78k3qRTGb7/9dlsfsM5Q8WDfxVcuuuiiVp811JTVI2CMi4t77d8XNS+Nm2NebmI+mEd4Pi6L9ztXBc8xD4rrmyPaTAkCQSAIHBQEkjB3UEb6834iRYxtGd4iTj4uInj22Wc3w7hKQljDghSQGXmug0FHFKsf48GqCmxUoFB/BDkqOM7Yez+NUFb7/ddGWD7cDB5++NGHXyIs/esW/d4YFRFCWBCXkJVFo5z6loWA+ezeLWfPfb1ORVIax1NwdBUBUXqG88KZfeutt0Zvvvlm0zXue06sRDfHVk4xJ/jVV18dvfzyy6NXXnnlhAOkT1dcccXo2muvbYlzdErKahBwL9TCS90TleDIafWZQt8b/3PPPXd0/vnnt3mw1fiXbVeP+SRp8r333muvZTOr7lNOPmXETp5/wfltbrGXKUEgCASBIBAEgkAQWGcEikdVjABvwnk8MNKPE+BC/YUGnKriBHg/3oNXOV9xglX4AOuMdWRbLQLmujlcvL44Pb9h3Gcwd+vhKH6DOb5Vce+UT8JHV7+6vS+fQh3uD/fOhRde2HwS7ez3e6Wwp2fqYTbY0DN8qtItFYOkT+Dj2Mpf22pc8vlyEDDGxvPPf/7L6LXXXm1z37gaU/fPpZde2mIn3pv/KUEgCASBILAYBIrb4BtsKh6vrMOalzVDOp/utz7Api+7lG/zxhtvjF5//fVmq5wjj/WBSy65pMVy/T0Js+Ixf/nLX0Z///vf21oD7uh6/RNnvv7661t9/p5Uz7L7ftDacy/U/eBewDWLjxtz46jU2v15553XuKbxs2a8VSl/2djjPO+88057lTin7qr/8OFTuzWxM0YXX3zxSBvWx+ZdP99KlnweBIJAEFhnBJIwt86jswuyMYRIliQMyRhfNoqbweMrr7yyBQVWlTDS77Zgdy30V/BboMp7rxUMZ/hbUGPUbZN/ckf6Tu12lTv9tJb4J5DnQCAQ3QoczpsoglQgqbDzWoRlN8mk8dEvCTJFhvUhZKU/S/J+LyFgTruH/vGPfzTyTyetupRj4F6+/PLLR5dddllzGunAZZfmGL3z7uhPf/7T6KWXXmpJb+UQk+vrX//66Lrrrmu6bFrAlvPDIX7hhRdGf/zjHzf14+eJV9/4xjdGd9xxR3OA6MaU1SAgGMSOSY50/POf/2yvnGKf0f2KeWkuCoYYf+PHfk1boDJnzCV1ut+qfk43u1kBEvWrx6LO0aNH22KAebabdk2bKUEgCASBIBAEgkAQ2AkCuEw/uQfv4VeIHeBX3hfnwYsU/EZinLgALiW5peIEFgQsDDjH3w4X2sno5LuLRECsy1yuB6qK1/P3+Az9+Y3XW+QS0/OAlBjSVnPZ/eK+ER90qL8W0sof0R+Lxe6TjY2N5pNow/20n4v+wwceFQuED7yMScURvNIp8L766qsb9vTMfsdnP4y9MeYzP/HEky1+8vrrr7V7yr3EPriPvvOd77Q4svmfEgSCQBAIAotBAL/B5a0PsK1ioTjLKte8yq6z3/wCD5yzBavQ/7DBy8T2jx8/3vCBmbUA8Vt87Gtf+1qzT5MwY+PwxN/85jej559/vj1Uj7/AWZ8khd99992NN3qQeivOuJiRTy3jCPTXB4qL4yb4pzGveWmczU3z8qqrrmrjb55uVSr5rtYdrBOUz9y4/uebzB0+9XDnC5/TrTtc3+YEn2IVyaJb9SefB4EgEAR2C4EkzO0WsmtYLwPI0P7hD39oTxXYdahfGFxBnW9+85styCOxbFpCRv+7i3wv2F0Bb4v7FRCshX7ETgDcNQhFBQn1D7Fz6IuD/AigoLe+CYBLOtA3Wfg+c14wZCtSqN0XX3yxkVSvgvRFWOq79fei8FAfYisYiQhdc801jbCTNyUI7EUE3KcSwThqgs5I+ypL/551H9N/EtI4jfMm1S6iH/SdYMFvf/vb0d/+9rem/2DGQZE4SwfccsstTb5pTpF6JMv97ne/Gz3zzDNNV5Vu/Na3vjW67777WmLgtDoW0Z/U8VUEjKejkq/tAsghZp85rO+//0HnEG/uhlK2RcKcRDbzU8CeTWPjxov5XEEn9bPz6mc/1S3ZnO1yTdXN4eYEszHf/va32xyrz8brz99BIAgEgSAQBIJAEFgVAvhLJcrhNbgTzuvgq1ecwGvFCfo+O7lxarxKnADXx6m89yrZpXbP8rfP+d2TFqFWhcNO2i3fx2u9179wv52gujvfNT7mL3+5eL0dxMtnwOvdE+JFNX7G0gNgkrbwenGk+mxcSt91n7z22mvtIS1180n4DRZpxdz688S9477w8A6fxOsq4oXj/diNv/UbPvQM7PnncPLeOUl0sHMdfPl29AWf6uabbx7deuutbdeXSfgUrl4VddQ41etu9Ct1fhUBY2feP/rooy1+wn829nS/uPENN9wwuv/++5ttkEyQEgSCQBAIAotBwLoeTvP73/++rVOysWzgKnk3m0AG9vvyyy4fffOGbzbbvuzYecWM//SnP7WH4I93CXO4B86Hj+F6Hqi2PoDr8W2GcFMPm/aLX/xi9NRTT7V1TfxOH9k0iXff+973Gm+UGBgOspi5PWstxgfXl8hWfNwr7l8xfGNefNEYWwsQv7czII5inXtSMfbuM7sL4rLuMX4zrm8++aw4qToOHfLrNuc0H0L9EjKH1h4mtZfzQSAIBIG9jkAS5vb6CM4hPyPJ+f/lL3/ZbTf/57aIjgg5GEcGUPKYwJogD+K1ioAAkiBgYSc8CTUOxhxZEJTSD2ShDuSib9z1B4FwCHIgkhUYr4Q5ZMKBYAqKI8JbJaEhEs8++2xL9JHsg1Rou08mi8DMMSxTL1W/fiIpN954YyPC5NaflCCwFxEwpznD9BDC7l5fdan71r383e9+t+lAzsey9R85JMlJan7iiSeaM2PBz3my0c8b3RNkd955Z3vSmWPb1z99HOH63HPPNYdY8l3pKk60AD6HmP4TBE5ZLgLlsNr5T/BDAjYbZ2HKZw7jVcX4V0BEMOTBBx9sTzgay/HiWvWwn+oXVHGfVZJ51V3zwffNIYs7FtbuvffeZmsmzavx9vJ3EAgCQSAIBIEgEASWhQA+IybgYYBK7sF58ShxAgsOFSPwiu8Upyo+jePw++vAscRBPEgnYQ4n4m97ct+r8/vJ9y5MvMJE0T+xk/C/Zc3k2doxhy2WieHxGfiJ3rsPar4bwxpHtRpDPp4HwPB6D1tNGlfxLQtmHrJSv3vK/cX/5DNov1+3e8b9cNNNN7W4FJ9yFQ+YzYbezq7Sdzjzo/hT8LHIaGG/9EzpFi15z1enQ26//fa2W4sHdIfwqTHzHe0opZdyHzY4lvqfcTC2x44da7FeY25czHcPLJrvYidsw7LjQ0sFIo0FgSAQBJaMADsrdin+zc5K6FHYwlUVNppNPuP0M1qM9Dt3fqclDbEByypkYIfwj1/96lcn1nCdK14mlo/vPfDAA+0hBvaJ3RovbBxOJyn8ySefbHXifzD2Hcl2P/rRj9oaQxLmxtHb/b+NDS5uYwl8nI9rHdyac5/rlyTGzYHf4ye33XZbS3qsz8dfjTXual3IGgG+44Eb9df6QM0p31W3eWBtzGYL/ImhtYfxdvJ3EAgCQWC/IJCEuf0yklv0g/FjEC3MI0m1QI8EVgCNURQs9bQoo8swChA4v5uFbAgcIy4gKECBJCMIjDq5kWiBuyKHZcxL9nH56nPn6xrEUWIcQsj4ezpEwolguJ17JNP5vK4fr1MSgp2aJM05yEPuaqO+V6/j39/O3+rXZ0/vGhNBSTtf7aeg/XZwyXf2LgLmtITTxx57rAX9t5Mw17+/h5CY9x6s+nzPVuSC3BtdYhqdsMxCDsly9Ivt0unA0jHkoIvpLA4RfTBtAaQS5p5++um2y5x69M+CID0i6SoJc8sc3VELbBhjzjAn1e5/gkLGio2z+FJ22OIKe+TwHeMmQOMnYSS1291h3A6wFRxqNvSvf/1rc4YFnySh+0wpO2ixS50VUGHrBUoky5v7KUEgCASBIBAEgkAQWAcEcFh+t5gAbiypp56Qd652wsJ1XFu8vmSfxS/AhyppDv+X6CJGgCuLE4gb4E542iz1Vdvr9gofHLQe1CCffusjLgiHvdy/dcN7J/KYz/wDcTs/xeWBV2Mnoa3P6yVpOSxmFa+X6Pm1r3298fqjR6/4ihjmAb/DvcRn4I9YqBOL40uo37xQJ5+k6jb//c0H5ZPs10U0OgTO9I24hTGQqCheaYHRPcIPK+zLX/NqAVs8lU/FXxtaZIQx/0ysUzvq811xPnFK73MffmXa7tqJ0osPP/xwG2+L1RaR3U+VMCd2koS5XRuCVBwEgsABRYBdFbOUFCYWTv8q+MasZZz3D31vHpuqPtcX37nrrrvahiK41bIKGXAF/OyRRx5payc4SRWf80sk6X//+99v6wP4x3iM2PVsHJv285//vCXM4TRwh7H1UWsMP/zhD5MwV+Au8RUXx71x8eL6uCHfFhc3Rvh48U28pA7jho/bYMU8GC/G3RzCX493iZc2rvDgjbHXrjlkvuCp/fWBmhfWnK695trR0SuPDs6r8fbydxAIAkFgvyCQhLn9MpJb9IOhRK5kk//sZz87EfBhCMeJo53lJGjdc889LUjMOO9WYaARN8FASQOC3wy5BX8kgSFn4MnvWrI6GPXD3Taxhw4famRBPxyucS1i8eknn44++XTzZ1u0ofguciEIhRAIDksSqJ9gRDB9rq7xgrBIcKifOKwEPnW6vsnUYQUvciyi6ItDQNI2u5IYBWqGSPAi2ksdQWC3ETCfPTVjO3D3+bwJc1+6x7v7vH+vlR6oe9i9OUupOlzPGZaQtqod5p5/7vnR7575XdsZbggbC3cSpupJHzIP9dN3BfiHEubsUlY7zC17W/lZxmO/XmPus0XsnEUvtoS9Y0vMQfPWwoqFWuPi1d8Knc8+sVnmJhvm+n5Rj0UXOwtyhjndnG/OsO/7TtWtLk532XeLM2wLe+g1JQgEgSAQBIJAEAgCq0SgOL8EFcklknsk9VhMs7iG84gT8PtxLIUvgB/hPQ7vHeXbq7PFCbrv4GQ4klfn8WnXWTjAkfAhvHuje5DAogQO5rOqa5XYbKft6juOiH+Wn4Fr8iv0Ey/cq/3bDibr/J2a93w5i8i1wGm+mtsWciu5E4+vxS594kdccsmlXdLclYO7iZv37h8LsXac4JNLxlO38TcPJI2qx6u6635yD9QDp/vxQc66T+gZvhR8vIeXz+ADj/KpvFpwVmDjb/rCT1jxveA2XsQV1Q13fqE6jaHFSTrH+yH/frye/L0YBNgP81+cXPzEeLsX2A4Jc37pIwlzi8E6tQSBIBAE+ghYC8TpH3/88cZJ6F9lVi7KLtPhFWv1dxV2tPwAr7MWdfgunnV19yscdtpi05cZJyUDHwf/O9btfsr/wdsLF5/zVcR1f/CDH7Qkfb+eNcQ5ChsJc7/+9a9P8El1VcKcOqw7hn/MOksWc514vZ9fxTWtEeCFtQaOi/PR8HCHsXHOGBs7SXL1YBfuOV7Ug7vKA1C/OYTrmA/qwFHV6cD3zXfn3Sveyw247NLugaoLd38jnXHZ83cQCAJBYJUIJGFulegvqW1EisMvMCoA4KcQEa3KVkcEXVMHkmVBXlCgkkZ2K2BDLk+Gy3iX6S5whCxLlmPcfc6YIwMMN3LAqLcA4dmbwanTz9hc9Pd5EUGk+8MPPmxZ+f9+/98nMuirPv1xPfKhvxLmkEPZ+f0kgv4QVcKcXeYckhO0py7BMURTQAVhheUiinqMk6Akgm48EJl5yP4i5EgdQWBRCJjTFXzmGLjXZymlg9wPgvwcC0fpLXXQD+5FzoT7uL4zS/11jQUjyakcD8HwZRdPFUkotCW9hUF6UCk9w2n5zne+05Ka6YRJfUzC3LJHbuv22B92xEKlHQTZPOPEjrBrbIgxpe/peX+bg8aYDWT3nOPQsgHjY68uAaannnqqOdv+dq+4zj0hwKN+jrX63SvsoOJ+UbdDOylBIAgEgSAQBIJAEFglAji/pKFKlOM/8B3sjsbXx3EqnoEn9eME+FNxGhyn+I7r+fC+X7vTeVUXnuZzvAnP8j2cS/KLWIEdo/yNs+3Fom/8CrEgsQxYwg0nvP/++9tPbOKGiTOsx+hKEpVM5SEzi2j+5jMYH/EmO5140MV7C2Xmq3tAweXFuZwfmq/8EQl49XCVBTX3hLnvvhHTKp+k6nYPVUzOubq/nNtPhR5wn1TMj29O5zhf/hrcJdBeeMGFo7OPbPprMICRa+BDV8BmCB9jyV+zEE6v+R4/rX5CV3xy3M/bTxivW1/cV0mYW7dRiTxBIAgcBARwcjbRr6yIj1oPVGa1gdYD+Ao227A+gOs6p+BLuJBj3ti+9k8/7fTmA9x0803N5uNbyyr6gHfgCP/3f//Xdh/jD7FX1T8xXlzE2q01jIoTj8voO+pKwtw4Mqv92zhKkMPxPRxTD2eQCi/EMS+7/LKWuGas8W7nzWvc0rx2ztzmv/WLuvm3kuTcWzin+8O9ovD9zB0H/llraFU3f8I5bfAvZr0f+zLkfRAIAkFgryKQhLm9OnJzyF2BYUbSIfjDUCoCOgxuBYmRKAZRsojAqa1dBeOGAj1ziPCVSxlvh5+mQ4hlvCPHCAKjzogjddolXxGBFpw7qyMEZ505OufIOS0YJSB46mmbC//6qg8tWe6D91s/1efQ53pfyXjqRgLsqHfjDTeOEOFW3xjZ0IF+whwcBRXJiDiQD8mQ0Ca4qOjfIop6yChwKXhGvkWPxyLkTB1BYBYEzGf3vCB9bTU9y/dc416jq9x7iL9FBPd83YeIPB3hPnS/bOc+kZBmYYwzPLTAMKus273O4tXfXvzb6DdP/aY5T3QkzOgqDrAn3OwwJ1A/LZiehLntjsDufU8wyE6vnu7ykwO1i6p5yhluC7LXX9dsCMfXfOb4mvcOc8DBefV3v5gjbKhkPPX//aW/jz78aHPhSx3uias3rh5dfc3VbW6zWZxhh1J1e61z/frzPggEgSAQBIJAEAgCy0IAr+F7S1SxC9bx7sE/PIc/b1EM91dwFgsFeA2eXAsHXh3OVyKROvkN/Ah1nIgP/GszVuAhO/Xja+IJ+BlfwIMMdhiyw4QFBr74Xix8KH2zi5IdJvgceB+fyU8x3XHHHa2/4YGrHV3zVBEXs4gmYY7fK35lTprv/EEHfm/em5P8g/J9y1/wWueqV+4di9N2urYrtQe11K1d892ucR4i3eh2HDT36/5RD//DK99iv/oM7hF6gK/mASe+G50BHw8dwUSMlN9WC4y1UAkbuBT+4/6aMVCP5CzjCn8Jeb4P94ceeqjFJbUzPm41fnldPALuiSTMLR7X1BgEgkAQ2AoB3JTNrd2vxL+VIfs5VBf97Ttsde3O5Trn8RfrmQ7rdbPW6ftdBLbxKrtrWePD//kVyyz6YHMRa7h8IX5QrZXi6vpFtttvv71xtklrhepJwtwyR27rtnBB41Lr9Lg4HoKP44TWtXBxMfyrNq5qcw9Hxw0d5nJxTXNhnDPyd/nQVT++aQ74nvvCepJ1cJwW5+z7EdUGOdQdv3Dr8cwVQSAI7C8EkjC3v8ZzsDcIlYCwLY4FxpAsgSClkkIEjZ1jnBlDiRh+mtDPAgjGMcSLLIgBAy5pBvFjxD0tgSgjDg4FIUBKEUGZ75d2Py1xzrnnNAMvAC5g6Joy5EUE9bmC4fom4C5JR2BYOwg1TBRBRslyN3/r5vZkdQUF24e9/4YS5vQBmUCeBSztTnXTjTeNuh70vrnztwKg+lj9nIfo77z11BAEFouAe9I96D7lIM9Sas77jqfgJQUh/f52H/rcvcwRtgMbx3HcaZilnVpsowdW4RjA5u233h49/8Lzo+Pd4iAHmV6j5yQy08f19Ni0BbskzM0y2su7hk1jc4ypxRe7e5i3xpZ93egWpW695dbRdddf15xX58zfmvcl6fjfzpfNdD9YAP3jH/44ev2N19t5NoOttAjKnksI9fesdVe7eQ0CQSAIBIEgEASCwDIQKF6DA4sVVPxCkg/uVJ/zj8UCBPolm4gViG3gx847vMeF8Crfw7sqTnAiVvD+B6M333qzxQrECSwwiEloSxsWE+wud9999zWfn4+wF4s4Dz/jJz/5SeOiFmb0T9znRz/6URLm1mRQzVNzzwOlxe3FsfgA/EFxpzvvvLP5gxa8nJuH1/O9xcTsWGKHM4vU2uT3SgCzmyK/Qd3uLWXI/xg6tyYQ7kgMcQZ4w4fPJkZai4x+kULybP10Kl0Ah3Esxv/uC0QHqf/RRx9tO/zxDcvP95NoFjCTMNdHbPffG5MkzO0+zmkhCASBIDCOAL5TSXN4OY4+T/F93F3yOX/Buh0b7Ly1RAnu9WtS02zzeJuuxa3YeclLXnHmZRecRP/wNQ9RlH/CtxHbbUlV3RoB3oDHDfWRjUvC3LJHbnp7Ne+tC9j9zTq99QJjZb7xa3Hx66+7fnTF0SuaHzs0tkPntMzn40f/4rFfjH7/h9+f4Pr4Jr/PmhJfwrzB/dUzXtf439N7lE+DQBAIAvsHgSTM7Z+xnNgTu9gIyjDEstYFm5El5E/yhaCb5Arn62kOBlogSAKYADFyuMikOSRPMNpuOBLmBOoQQUSZUUZEJa4w5IiCJBiHJ6AFv8niGq9IYRHDCoQjH/roUKdDewIhsPCqz67XjiQ3O+8gnFXnOKDTEubIBUtPdtxyyy3jX93x38aqf+y4wlQQBFaIgPsSgXefcgjmKYLWdJWnviUH+Vs99AZdJqnsgQceaE6xe2Zekl+LavTAvN+dpx+TroWNxSxOsafkHPQUfceZqeThknNSPUmYm4TMas6b5/WEl0COwznzTCCHw3r3XXe3Ldct9s4zd9Vj3ljUcV9wttl985d9M2/uueeetsBz/nnnjw6f+tUd6laDSloNAkEgCASBIBAEgsCXEbBgJi4gRuCQUILXWkSrxB7cif9du0aIE+A7Yhi4VcUJyq/Hq3zX0Y8T4E+ffPzJ6J/v/LPxtOLeb77x5ujjTz5u9XgIRxKRRJmKQ3xZ4r3xV+H605/+tC3O4Ir8CfEWO8x54MhCirhKyuoQMD/5yXwFD4iJl/F98Xo+whWXXzG659572pzk+1YcbFaJLcjZhUXClgVmc17d4n12TTPPzQV1e8jmoBV4wOWJJ55oCYv9+KRd3sX7PChrLObFHpbq4+fb6dHYGgvYi2E81O0w981vfrPpMjorZTkI8KWTMLccrNNKEAgCQaCPAF5OB+M9OLn38xTfsZ5ordMmIdYacRrnJQJZz7S2idsoPpu1uJadx5W9rsIuw8VapF/XefWVV0fvd7+k9dmnnzX/pK0NdD/ZiY/wTybJV/Hi/CTrrCO/+9fxafHxxx57rPlk1qmtbZlz1sDxTAlttZP0pLEdktQ9ZU3JQ2e4ZvnRruU/S7J0X+C0eD4OmhIEgkAQCAJfIJCEuS+w2HfvKihcT1sgj4I/CBcjfPjQ4dFtt9/WjLHMcwTMgUwxmILDjKifH7HovggjWkHqkun55zd3UZKoh9AqyOiRs4+Mjl55tD3ZKolNQBzZZdwFwOct2kVGBN8lLQiEebJWUaenTpASgcFJBHpawpxAs6dWBBdvu+22ecXL9UEgCMyIQNuBrbuHOXt+ysTiTyXMcRQ9DW+XBE9nbyeIPaMYu3qZ/ugXncXhp7/0hR7m0NgtY5KeKsGSMFdIrMerMWVz2GF2zwKwYmHSIu+tt97aEj0t9E7bOXCoN2wnh1vddkPQjrljzqibLZdEys6xr1vNnaE2ci4IBIEgEASCQBAIAruJQMUucFiLX/iSXbb47hYRFNwGd5LYg9+IE0iU48PjUOIV2+E5/AucWxKZZDmxCoktpxw6pbWjPTvYreqBmkXgjhtKvJIwJxFLf+ElzpKEuUUgvJg6zDt+oIQtD8O4F4yVxTLJcmJO9953b1tEExebd767v/7617+2hDkP2YiPua8qJubhT0lhBzV50k/h1sNN/DWxUfe9xWgPIN19991N5/h7O4UuE3M9duxY02/8NvdhEua2g+ZivmOMkzC3GCxTSxAIAkFgmQiIheI0frbUGgFOgxc5zzeQFOTAbZyflzMtsy9DbZVvhAfq23//898TvOTsI2e3NYJ6OGjo+84lYW4SMqs7zyfj34rfP/30023zGnMW17cZi4cnrC/jhsZ3nmLO8GftSMjnw2v5uYo1M/eDzXEkke5lv3YeTHJtEAgCQWAeBJIwNw9ae+zaIkUCPYI+gs4S4xhPi+Znn3V2C7YJNgvaMKaeyrCwL2iGXLZdb7qgkABxbdO6ExgEACWeCX7bdpbhFpxwnlzI6yUXX9K2nLW4L/nFgr8g0k6f6kA+tCOxANms3fQEAwXaJaJoY1JJwtwkZHI+CCwPAURfoJ8zbNfMoYQ5iz4S5vYq+acL6WEHvVW6UX8c9PNWJQlzWyG03M/ZHvaXQ2w7fQtUiuQ4do7Teu+995746bB5pCu7anc59Rt794V54mfEPEFmgccC2zxPps0jQ64NAkEgCASBIBAEgsBOEMB78RccSezCAhju5GE/nymS4jyoZreIje7n7MUoJK7w52flyEMyFucWJ+Dz18N8eJPFBe1O271hqM51O6dPHhp8+OGHG76wxkP1Lwlz6zNaNQf5uU8++WT7dQT+rziZRbQbb7hxdOd372xJo/Pyej6lZFBxPz6DWJz57t6pnVgsollYdm6vLSwvYhTtyCFZUVyULqIb6Bf48NUkzHnv3HYKvNUrYc44WDBNwtx2kFzcd5IwtzgsU1MQCAJBYJkIiJfzF/gNeM1QwpyHk/EanGkv8hrcTT8d7NXof5s75eFphw5vcrVp/aq14ewwt8yZOb0tPpkHJuwwJ9lTAh2+aUytZdm8xry1i+C0sR1qxXhbE7DuIGFOO3wL89/a91133dWS5bI+MIRezgWBIBAEOhvbGd7O1KbsRwSQKYFQxNFCuoBY/ZyDnYoY3vvuu689nWr71+eee64FhwSlTQtBIAvtstoF52S274RgqlOwT+D72WefbT+niCRUprvFfQlrEuUcjLdkOYlsPpuXJAyNKRkc+th/Ul2QaqugYBLmhhDNuSCwXATch8g/Z3jVCXN0SZVF6KeqaxGvSZhbBIqLq0NSGxv8yCOPtIRxSermD/tmwdfOpBZgasF3npbZMw62+8F9wc6zb+ymhHgOt51i2fGUIBAEgkAQCAJBIAisIwKC+RK6BPh//etft0QhT8grYhB8dfEIP09jIcEDBxJXnPf5IopFhnrAri1KdZWKE9jJC6/aywU/xD9xUbEY/LES5uzOfccddzQeutf7uZfHiOw4PF5vEU3CXD0Iw9cUIzP3jZV7Yd7C9+CPeHjVTu3icmJx5reHZS3OSZiTkLrO82A3fXAL77CBkZ9LpQfoAIml999/f/PXtnrQdtq4WMw/3v3MtEVMr/7m/xnPH/zgB81vs6C5KJ02TZa98NlujnX13xivaoe5ZfSv+nkQXgvPdYvNHQTs08cgsAoErHv6JS1JR5MS5iTLVcLcbtrWddU/bByckjC3ihk63Gb5ZPimtXjrXOYPn9bOb+Yrvo8PzluMtzX+3//+9+3hDO/5tni9NXZ5ADbH4U/HVs6Lbq4PAkHgICCQhLl9PMqCzoLMAm21NXEliXka2+K5RXQL6hLXJNYJoDLUlX0ucCNgxlhLYNtJQJrR9gQlMiBI6xVpk0WPGHhqW9Db77QL1Eno260nubVXZNYUQBK2IgpJmNvHN0u6tmcQWGXCXF9nAKz/9yw6ZJkgLythro/BeP+20qnj1y/z72lyk2PRslfC3M9+9rP2c6yVMOfnj9hiO8x50mu7CXN2Ta2EOXa/Eub8VJnt3NlViXkpQSAIBIEgEASCQBBYNwTwMhxfkoo4gZ8M9bekLqXtjt9xJgF+Cwg4jZ8SFfxfNGcjS58nrhvH3+7YSQiRACTe45cHxGHEWiQCSZjzkGTt6L/dNiZ9r4/n0DWLHsOhNrZ7btmyV8Kchd9KmHMORh6CEZfzoI143rxFX8wBSan8BglzHrCVMGce8EfcXxbr1i1hrj8O/fc1d+p1XkzGr7fwXglzkgvFMCXM0TcS5uza7b6B2XaKRVJt2GHOTnP0HP/PeK4iYa6PZb8/i8KzX+c870uuevXdkqle56lv2rXGeNkJc9Wveu33b/z9NNl3+lm//X5di8a4X/ei3/f74P247ON/L7r91BcEgsDqEFh1wtxe0D9s3DIS5vpYDM2IddbFy5bd2jiOiYtbG/fwChmsuVuD9wCLV7uqz1uMt13lKmHOw2iVMGedHZe1RmCtYJ3HZN5+5/ogEASCwKIQSMLcopBcw3osoL/00kvtCW0/KyAJrrZ4tUAv2CboLDjDOAucCswxrBLoFIEzT28LoEpi225wiOHXhiQBT4z7mViyMeSKgJzMeQZbANDTs57c3G4gqlW64P8EsyQVenJFIF9wEZ6eUEFiJBTCifwpQSAI7A4C7sNl7TBHP5VzycGkQ//7n/+2xTt/f/rZ5s9DcWoEuh0C6habtptcTFfSK9pqbXyeUMyRoSfpRG34e5pzsxsJc2SDB/k4XA5y1nsjXnLqPxwccCE7XTlN5t2ZMZu19sfS4mvJXRgPyU5uePdln0V+OBVW6vVeO+yfRUo/v2OBynl2jj3mEEtqK6w2pf7if+22f91r96Z91/cVfXFf2EnWbhTsdyXM1Q5zbBP7Oqm0+ru6VzlGk2TL+SAQBIJAEAgCQWB/I4AnWTwQJxCTwJXwS1wHR7H7lZ+Zt3ggmUfyit3RZuFlu41c+QrFLas9suF1lYS2HVnVjdPBAv8uPPC18gmGuJvrGk/sqGL3rr23w4Cfm8QVJUypW2wHthJ1br/99iav+sZLye6zoc/Hr9e2+stn0Af8u3i3z4pfe4VT8e7yoarN8bp3+2+yOcjqMK5k748BDMjtKF/HePRln0X+Gqf+q/bsMCdpy/3gvqiEOTEyMTl+w6WXXjoRCm3X0a/bFyTMicXxG/w8ayXM1Q5z7jELafo2XqpPXmFQf49ft9O/+/PHGMC/+eAfb46Dc64hA9yNAX1gDhkDss8yT43zOD52mIMNHdRPmBMXtSuHB5y0pd3xUpjXa33eb0NiloQ5D1Gp39iS33h+//vfb3FQcdEhbOtcYV9/VzvTXkuGmtcNU7h2Bxzcq4q6YSjeUPdkzXWfz9Om69VbutH7ql+f635xnUJG15DRvISNw98KzMnkgbOSbV55WkVj/+n/bibMqb8wpkdg3tcrPlPgARdH4V8Y7bSfNf6Fb40/OZxzuMZYaxO+JYu/Sx/MIscs14wNwVx/FpbmBfnNEf0pHa39wlI/+vE5nznU4fu+59Xf+qjPvlPXzSVYLg4CQWDpCLh/l7HDHP1IT5TuoG/6epwc9Ghfh9Ilp51ml+rup1M7PbodvaI99RYHIkcVdRYHKh1dn/VfS+ZF7zA3jgddDJfCggzN5p+y2f/TTu/wOHWTp518yiaP7su5zPd92csmlw2Bt8/JDlfH5lh+sT5QeBvTrUrZ35pDrq8d5h5//PHm9/YT5vi5uH75u+P1V5vj84nM1ZZ1fb4erilhzpiQ2Q5z9957b+P51gqGStWv/+NtDF2fc0EgCASB/YZAEub224h+3h+GUsKExC5JXl7L4AuMMrx2l5MMJ/jDeHqiW2Kd4JDgmcK59POtnqT00w8VnP68mZlfSh5EVga9QB2CoDDCyIftYJECSXzkKuM8cyO7fGES5nYZ4FQfBGZAYLcT5srBoLPKCa5grSRkCxhk+M9HXaLYJx83B4L+Erg9cuRI2yXTe3pWwI1TUg7HDN1reprjTT+2drr3ZCo9qQ06m3Ps3KSyyIS5wqQ56l1QER4cOvLBw3tBbYVMnHa2QzIYLBz9QOUydbtxJH85wWQnt5+/qUA8+1cY9wPx5NYH42ssfTbLePbnjvcObbCrFmAsxHBgFfNEIhubLNlaO8a2X8hmDh0+tNn+KYdOaXPT/Ky+GYPf/OY3zYZLmDOHyGpxWTK3hbVJDrG2XKvdofb7suR9EAgCQSAIBIEgEAQWjQAegyc9+uijLcAvuau4GX5i13tcyXHNNdec4GSLlmM79eGYOKXEJv0gt4JbiZ3g7njwNN4+1K56cD0+AR8Eh3WuccKOk+Kp6i9/o+pwDc6Oe372abfQ1j3gg4vinse7n4CUhIWLuk7ij1jPAw880B6mHK9Lna4ju6M48STfxrUObRf3xrtxU9joA45KHuOKWzvIULwbVtqB37J8hpKbXDAnO1mNK+z1wXufwbZkhxe5x2X3Ofkn4VRjVT6C9sofgY/2xOXE8Yy/zxVzX6xMUptdIoaKvpR82teG73tVPLQjJsdvcJ/ppzHwSw833XhT28Xuuuuva/L361ev+oxJjZt2tupjv46t3muDnOVzwgLu8Pj3v77wOT/6z0cn5hBfir923rnnjY6cc2R05hlnjizKGptp86ePvfYcil3fYOMeoZNcZ07CR/xUYulQwhzZtWfc4aP96kvV72+Yw19CpPfGxrWS5CTkGWP3tbqGivPkqTk2dM34uZLD/DXe7kWY1uF8zT/1uv/ck3QXfPvJiNMwHW8XJsaw4hrGUl/Jr351a0shY18+O6Y73H/qUFfpK3NfPMR9N8t9Ni7X+N/a3o2EOTIbe/0yzvpR2ItFeO+ouQcXmMC9jz/MzKnt3mt9OUqvvf/v7p56/98tlkMu8rlOO3CuOI7XGv+tsCafa+aZI+NjMe1v8hkr8jr0hW0p+2J+6Yf2YQZHh3tXP2Crf2Q0381/31WPMYC/682t6sc0efJZEAgCq0fAvbybCXP0jkM7dHnpHrpbLNxBh5QeZUNLl2zqoHM6/bNpR+mfeXlT2cX33n1v9N6/3mu8vpOoAU+nsYPshTYnFXqT/ItKmCtMyq7pP31ads3f2nNd4xRdklxhQt7Tzzi9/c3+l73Yrn2b1OdJ52FReJC/eBCuUTbEefIbL0fZk8JZX8iub1vZRXKwL+pTr/HUV/wGx8Q3rcVrG17qs5s0ru+VPeoX1/i+Q/s1n8o2enUNTmNt30NS1oa0r26bvXhQ3xrB0aNHv8Irqn7jUn3URkoQCAJB4CAhkIS5fTjaDBxDXNu7eoJUghrDyeDJKJeYJuAjGMN5ZMDtfvPCCy+Mnn766UY4nWNQkQO70wgOWdhHEuYlM+pCYm0JixDIcEeiFASEE4sQMNx2vEMK5m1jt4cSkcoOc7uNcuoPAtMRcB8i/HbDlHzLsaBf6At6Q7LtD3/4w+ZclPMwvcYvf8qR4OxyfDkxHI1yoirA6bWcHTqSDitnlVPMCXFcfNHFzYHlEM6qz7TPWaO3a7dPOl07dK8EKLsLcNKmOS6LTJiDL0wEFC0GwoSM/WS50uccK3KxNRYszjr7rBZ4hAd7Q9cLurIryqy4fHmUtv4LZo5y3uEhWM+JJ7cg8Ycffdj6xWlVyF4OsfGsQLEdFxzml3OwnyR32d9333l39PI/Xm5BcHPFefPJuFocgaUCBwsk5u1G9/NisCNHv/iucxaCzj6ymcSnPw6fVeDmeLcIys6an875DpntWCAh3RhMKvqlj/iBMUoJAkEgCASBIBAEgsCyEMBp7H5mJ16veKaCF+Fk115zbYtdfO3rX2tcBceZxMWWJXO1g1fixxY8cLzyS3DK6667rj2wgGP5e54idoPT4Y7iNHikc/oOEzETiykWn4pX44Xah5/rP/yg26WpSy7CRWEsWUoikFgM/HwPLxeH8WDFJB7qPP4rcRGv5JcM4V9+1Dv/fGf01ttvNRnI0X9QBUfVD3U64FKcm6+Du+Ks3juvLX3ezQIzGMGbv9D3dfiB/E0+BdldW3KTvXyG4tJ4PS7NJ3Qd+ScV9Ror8wZO6taGtoxT/WQnXBWJQha4+A3wGSrmgM/MOe3z4dSvXkU7/uY38Eech+9ZZ541Onrl0TbGxnl8fMsfMWfKr9PGtP4NyTfpXPk0xsB8cU8ZC3+XL17joE+u1z/z0hw+95xzR+ece07D3hjwacrnHOqLfhcWxgDmrnOf1D3ic3O1xtkCo53BYTDe77rO/ah9GJHdUfVr09xyH9IX+mnM1WXM7OxnjNUxLjPcjBMf2xzgV0/zSQtn9cNLW/DUP/2CKdlgSi5zrDDVBhnM4ZrX5SM6T95Z7kmYaLceyDamsFOvOUyHqVe7ZIQN3WR+mpsOcpYvTS7fpavEpdVhjM2DnRRyattOLHY4FUeHh37CWaL2gw8+2GTV/62K/lSfxBzaXH7nn60/NSfYjZrPxkjRP/1xL0r+pEfMB/OJXjQPx+fdVrLoG/yGxr/mAOzNATJrgxzGnW7TX3NT+8bO55OKz4yPeenaRRc7LMHvtddfa/PY/NAv91dhqa/mJln0gTwO92Pdl/BlF9lVc9O9Dyd4m1NibUP3+KL7k/qCQBDYOQJ09W4lzNGJ9DO9Qo876B06hy7q61DX0KPsUelQ+tNRvPaiCzf54aFux7lZbCh06Da2m2/08t9fHv2322mXvvJ9Og1fYxPxsUnF9XBaVMKcftKb7CZ7XXq4uA6bUpyCzaJPYVJ40MlsK/tPT7MZrhviPZP6tJ3zNZbGDQ/Ch2o82WR90jdj6VpjyZaUPSE/edkK2LONR87uuH43ntNss/a0U+s7+lkyHO+4OE5YNti4GtOjV3R8/IrLW3v9vpqTvq89Y1483DyRgEd21xgL7eGa3psDvgd7D2ZMWnf3XTIYL9cYJ32edb72Zc37IBAEgsBeRSAJc3t15KbIzRBWYEJSSe3mxvAx8II8fo5VAhwnGHnxGbIgOGAHHL+hzuAWERMk8B1PsyI108jAuGjqZvwr2Uzd5ENAGGxBAYRAEp+fOGDAEaZ1KwhN9SE/ybpuoxN5DgoC7kO6atEJc/SRgzPBmSnnj5PBeeIQc/w4IA4OoOs5DvRhOYF0Kqfi0ksubcFsi0oXXXxRc7Jm0Zt0JUf8ySefHHGeBI3pUM6aQJ8AngRmupzjNqnASMBXAjS9VQ6S79Dl3/ve95reneZYa9f39J9DyZFzFCZsBCzIDA+FTodJOZd0OR0PBwFfr/ohaOCaWTCZ1MdJ58lNHg4vLGFIbmNqbNtY2iGQ3J9u/gRH1VWyG8+S3XhyiMnO/vmbA0n+8aLtD97vfn78lX803LVPDsWrcTGH65z+c0ArEK19GPaLMdCWZEntw8/ccNS81V9jpD39cr7sq7linM3NoeI6fbL4YiGIPU4JAkEgCASBIBAEgsAyEMCdxCsE9fF7i9i4DH6Ct+JgN95wY/sJGe89jLFOxUIWnom7S3DCyciOS+LskmBwSNxxnoL/iZmo16I+/o3f4YS4nQUPD0Di1BU78TluLqZTiYd4Jx+Gj4MrOpwjIx4KY/zSAtAQDzU+ZBejEQuSpOc7ru0X/VYvvm3R5pV/vNJ46TvvvtO4d/kLZFSnttXhgBV/Qb/w3PIbyIUja5+8iy7kcJT/h6e//trro1defaWd4wNVMon+kd24lNz60PcB8W08upJcyI5/G7Mh+Y2vxUZz3z2gjTrMK2NVvF7f4aNOYzVpPukP/CR+uMaY+Fkm/g+5+W/OqZ8/ok/OG1N1O4x1v6hT0V9zzdyTsMR38L2dFHXXvPXAkUTLSpoyHuQmZ/nhNY98r+Zvf/7AxhjoPxyMyTj+xlB97hPY14Jv4QObWjztt8Nfcr/V3O33W52woaM2uoegPKxU/XBPVCzBmPNH9Y0MNZ/qu/xC/RmaL3Xv8+UtkBsrMg8VcsOqxlof6RCv5hVMfe4gg+uV0gvmjqNiGzCtBDfzw2eT2i551KvvtWMfPakPxkQysYPvSRaYS3owJvRHP/7inlDgTh7foYfEqMmy0zlYci4iYQ6OdHD1yTi/+sqrozffevPEPWfcHeP4w8bYn979hB99KGkO7i1J9miXJNslhJofxmhofhTu9Qq3ksO4w99r3fsVx3H/ORRjSg6YNlm6OW2emdfm6DjWNW98l34Sr3CfjOsQn2+3kE1fyG0+/e3Fv41efe3Vdh+Z3xWTco1r9cFcqTlMbnaFXnbfnH/e+W2M/nr8r6Pf/va3bSzgqY/sNbsNfzikBIEgsN4IuO8XnTBHr7ELdPRmLPzdpj9f7bghPYQz0q30OD1Khr7+oTvoHQddgjfQL3jT5ZddPjr3vE0Ot5UNhby22ETrqH/4/R9am3aO9t2K33roBd+ZVPSFfDtNmCv7BhO6GG914DSliwsHbbq+9HHZt2bbOruN37MXcGEz2A/2ZRbbNqmfk86TxUFuHAw3e+P1N1ryNdmdL5tc9tD1JbtX9sR44iA4TPEhmySwzc6zO0Pyi9Gzvzao8eoac6vsszEuG+wz9rP4+JDNdQ08jT+b5hr9sP6Da/bHqbimczvwwHQAAEAASURBVL6nH+ajNoxFv7hG0V/jgWOp3xjpW0oQCAJB4KAgkIS5fTjSCErtFmf7VUSGIVY4igIsN998czN+jGwZPgRH8MTPA0iyKMPKqDKSAhp33313Cz5NCuIMwYloIB/q5ZB6YpMRVxhihloSCLkk8TH2JdNQfas6B58kzK0K/bQbBDYRcB/STYtMmOMY0FH0EodPoLYCipwbQTgOjevKiRgaj3KoBDIvOP+LBCT6TcCTg7VV4XBz4ARsLW5wjulQOleQT2KxJ5w5UM5NKjDaacIcp02/a9HteJegBReYlEOpz3WwFQqMyOz7ztHnFSQQ9JW0zZ5wKjlsiy7aNU/gaLGwdgkQ3DDGbKSfqDrp5M1gc9kbcvsu2RXn2UhYc4oFIQQ5BOn1pwLWffl9l8NrYdMYwosDDiOl6q95BJ/Cr66pz6pedcKpvzj1zDPPNHva+vK5vON1+36//hqfqrc+92pczFM2WP9SgkAQCAJBIAgEgSCw2wgUZ5S0YjFIchj+puAtuLPEE/xXHAL3ws3WqYi1vPTSS4332Q0ON8PpcLf7779/9O1vf7vFT2bxA/r9wuvEZvxMLWz4KM7pv6QgC/sPPfRQW8jAqRW8HQf2cJ+fXq1FLOcdxRuL6xY3xHn7fLgvh/e14OcnIyXp6Vtd73PjiO9KxjCW8MCBLUzh3uSGie9os9qt8fe5c+rFuy2e4dzGfqNLPsLD++1pcxFFuzAhr2RHPo+kImNaSRiFlfaLqztXBznIzi8zP8Xb8GoLgfrAf3N+SH7+mnH186sW0viBZFKq/r5f0MewXTThP7hZQDUvzIHHH3+8jUXhXnV7reIz9dfhfL9tf+uDRTRxu0qeNGbbLep3wNpc57fBo/xN86cWo11X8o/LVbIbA/eZhUwJc2QUxxzHH8badL/C3tjTOzVGhU+/nWqjrhnvs+9oR7tirdrmy1vIF5t1X9Z96JUM4/Wru+bYeP3+1j8+qAffjK+5Nk0evq97ki9MBnPbObKQV1v0iaPqcd494SCf2Kx4rcVt89kBU3L4rMZkSF51GUsxab80Amdtlv4yj2677bYmo8/4t+5FMsLI9x2KdryHAZ8VxnfccceWMZEhucbPqdc9v9OEOXgZV/ec+vo6xfyuRfHqU8lR88CrfjqMh3vLGJtT11173ejrV329zW3jNW2eqFdddC8dc7yL4ZClsO2Pf8Xeqz6yOWqOOk+PVOzJ9VV/vWrLYVH91ltvPaH32oU7/E+94k50QcV19MX9WnGd0plkrXuo5kth7d5kR8RTPNg66sJWcHnqqadaX2FKb5BfrM311dcddiFfDwJBYBcRYKsWnTBXOpAtevXV15rufPnlv7fYPD1ufbVspK7RN0rpQu/po9JJeBO7d3UXB7/m6mtGGx2vraQ5104rbAdehNP/7re/a7+SQj51qwcXEMOVPDWpuJ68O02YUwf7xq6Ld5eN6/NlOthRmJBJ+8V5yI2n4fl4Bb6KV7BzcPL5ogt7hkvi+LBkC3Ej6xrsCBtj7MjsIINX58juUJxnF8hJ/uL6+oDrs5Xj8qvDmo45anMauBU2/fq9r6KOOvrX1ufOsVkeHDD+fDQ2EddiK6sur4V7/7tlJ+tc/9V31M1e3nPPPS1vwNxyLiUIBIEgcFAQSMLcPhtpxo2xlyghUCrBi8FEbBh2Bl2wmWEV6GCEywAjEIhOI2Ld9wRWBD4VARFGUpDUYjrjOU4EJkGJnHDKBXsl4gnWaEu7ZBIEECiRvU6uvkyT6lzFeX1IwtwqkE+bQeALBNyHi0yYozPpx1rs4vTRUdqg/zjDHAq6StCSo9DXUeWElP6kf13fFpa6p1c3rt5oQV1OVD2ZUzr3i1598U49nLeHH364BZY5oxw0wWEJWwLL3//+908ELb/45pffkX8nCXP6pf/qsZjAsePocZBhAgd4cAq9kq/6RV56HxZ1qI9jzCm2gMCpF3j1/UWVGstaHKjAMMfYvCGLQlayeDWuDqXkNgZ9+Y2nPpKXDRXo4NALqpJfPVXIoC1t+8lgmAmqKJxVNlbd5XTDrOonk78Lx6qzxr8CCRbgLMAYX3U51K1/xqbq9n11Gyt16+d43a5xjoOvXxakN7rAS0oQCAJBIAgEgSAQBHYbAfwFD5cowc8Wv7A4hJvgMBYl8Eb8BAfGx4a4zG7LOa3+Sow4duxY48x4GV+BrBbexTnEXXCxeQpscFo/UwsXCzw4HlzEYiyS/OAHP2jcuvi0ti3+uN6uTrg87ul7xRNdA3OFnI7i886N41sc3mLfnXfe2Tg87ksOpTioRajjXQJC7djFZ9CO+iQf8I1g4HvVBrlcg786/ORel3rRuCs+KjHG2OOp+Gx9rzW8g//0idziZBbM+Dpkt+DkHAzhUv5O+Tr+Jn9h6JXfUAeR9NH48Pv4f8ae/2Auq6dfzHVJTGJkZDBG6oQLPPztfRXt933SITz0TZseToK7OSApRL+Uvp8DA9cr+mtcHeNy1jX6bq64J42NuGDfD2oVzfGfsScPf9f8EX/kO9XDWT7X35JL+w6FTA7fr/mjPzCS4GUxU1IVOflO5l99t90P3W7jx1883nQPv819DAP4q0/b6qviM2MLG/WMY+9a2Bhr96Z2JYnpkzGu+7DGdWhsyai/JWe1Xa8+cx9axDS+YrRD12rLeNfCsP7x6d2TMNOXuh/1yVHjWP13DzjIq6/6zY/XP+MuNmyO+96QDGSGifG0SC8hVMKTto0P+eFkjCwkV7yBzjOm6jSWriezuuq8+cceSG5yX7luJ8V8MP47TZiDHb+frq4kRXEd58iuH3CEWY1zzSOfmXN9Pe2ccbrwggtHVxy9osXK4Ua/GLNJRX/IIgZi7MVx3GPmABxhaq7SD15LHvWRwXfFM+gOc8B3jIfDd8nsFe6OGiuxqrJ35spOS7tPOx1YNtb80R/6gXxkJTtM696s+UhG/ahDXfC0u9zZRzZ3MlKvcdI/4yG2I8FdrC0JczsdvXw/CCwHAXpg0Qlz9At+drzjhX9/qXugvlsfeLvb/ZZdLX1YurzsVOlynzvYYfqcfHSk6+lI/MwDL+LJ9CRdOq2Qg02R3G8DEjZZ/b4nfmsdVfK4+iYV+o8cO0mYo0vZA5jQmw6cGa/QdzqUPSlO4Vxhom02EB4O7+HmWjbNOjBMJnGaSf3a6nz1G4ZsMRtSyXL/eq/7Wd0PPzhhv8jCfpCr7DO59dtrcTd/w971xfU3On+l1geGuD6ctIuLw00xhuoqPNinKtov22bu9IvrtO+8BHBcE0/AncwRc7T6rf6aL1VHya5+7fRLyaBuax92W1U/3jcuR/97eR8EgkAQ2G8IJGFun40oQ47cyVwXeEZmEDWGj9PHkHsiUfBOolq/MKqMdiXbCdxxRhXGVNCpMsyRPERilsJAI1HHuiCyQDhHvUgGw8743nvvvY0oCWYVqZql7mVeA9ckzC0T8bQVBL6KgPuQDlnUDnP0Hh3FceHACMIJajtHb9JzHCHBaQfHiLNQDqDvc3IquYzOpN98lwNCv3FkBQ/pXzqPkzKpcB45cwK2AsuCzNpQlyCeRaPdTpgjuwMOFr3IIdCqj2yM/rMHbAinllMIl9Ld7Ih+cKg5bJy30vuwFIiUfO277NKiCtnYO3ZP0ITM+qD9Gg8Beu2Sn8wOwQuF3OXEV5BEINVcUHfNBXaK48i5F/TQ/+q7dtTBKWZLfV+Q3BjCw3l1u0YxF7QPFwET2I7PD3Vq2/hrz6Ihh5vD3w88mHtwJqv2yKRuGBurvpyt8c//cx1M9IvTbc6mBIEgEASCQBAIAkFgtxFoiwVd4sqTv36yJQ3hb/gmPlS866677moJOvi0c+tWajHfwy54Mx6GW+GYDzzwwIkEAn/PU/BSizx8Aglwkl+cgwGfZChhTtt4KznsSocXl18CV/Xhpa4ho7r4GLglLoiDOt8veKhYEB5ZDzj6TvFVnBbfFyfhMxzveLi2tKvPZMVhcV2cVJv1Xf1xnevV4SeSxI3e/+D90UbnN+mjJD0cVZvjsvXlnOe9dmHgISlzzoMofEAcHYbks1hEdriUrwMHcxP/rsN3yI6HV7/Jqq+SiqofODwM+n0of0OykPElEzzVjdMbL/jg9Qo/kix4/SQfynhpy+Kda9RnPpQ/4tXBP/IZLMhEZn6SPo/LqW316rsx1SfxQP3zve0W7cPAXK25o8/Omyfa4rORSb+1RQaFPHCBuTkDL+/1x/gZKwuw5qwEK+NYPp/vffLxJ+3nuMxXyUQV91SHushFDu2oT7twd9Q88FkVdZLX55UoaW7x2f75dpcE9vF/26WwL//YOCvq912+oDEoOduHvf9gAgdJZsbXHPXdfiETf1OylIQ9/RNboAvMbWMLC3JqyxypRW716HPfh/dd8pqb5p/v84EdEufIMEleYzGUMKcPHgQzj/ie9Jt7sPxY9ZHJoc+wLbnIeOsttzYMbrhxc2eV/j3l83mL+unxnSbMwU3swXyuWLr5pP7qU81nWOpbye4a97r5AQeYm4PG09wwVsZdLAh2Q/do9du8IgudbA6Yh+4RY2i8fJfOL1nUX/exOQJr96F7Qn+8JwsZFXPO9cbH/HGog54no1iCObbTQhZ6SpxOX8xp8xFOCvz0pfpRcjhPRjjoi36Xjih9ajzqvH65p8kvTmW9RF2FyU77ke8HgSCwewjQE3Sc9U9rBHQnvVr2zj0twdpBL4zbzCHJ2AOcjB5/8fiL7afi6RK6gu7GSdjQsp3qLF2uXXqGHqe/yOOc77qencdL8FscBbeo7w7JQv/iEXbgJg+9xbb6HhuK33gYfbcT5tgAffIwDntNH5NFv9gnOp9d8V4/q19sGDzgV3wNR2GjnIeBPkiAl4RPfy+qqJ/c8MODxc/xPXa2uCIbQnav5GYLHOTwfXbEwYY4jCnZjanr2ArYGwtrBPi3+vpjag7oMxn4HTDDycimvpJHv31PnWy1eob4lWvMORwcFye3OvBoGJNb/do1ZrAvvkpmNlP9xmq8uE7dZDBH1W+uG8+UIBAEgsBBQSAJc/tspBlH5M7Pn9ZiPePOoCJmDLgnnTmxjGq/MIyOIhPqYMwZW8aRMUVkOMH1hPcsZLOI1bEuYQ6JZbSREzKpk8PPKd3oCAZCsK5FP5Iwt66jE7kOCgKlTxaVMEcXcfQ4F+qk8zgtHCT6idPG8fPKMeYg9x1iOrOcP7rTYgunhINCx9Wihp8v4hjTw5yUSYW+XnXCHAeOHAIPTzz+RHv6XrAUVmSHA31Nd9PZnCl46a/i+xxIuND3gq2C5XDmmLFDgr2+N+QATsJmq/OeEnvjzTfak3eVLGe+sGHGjawW3CrxTF8cLRjarXeQ++NPNp1iTjCnVqCkku58XsFhfbdIYEzVp57qv75r1/e96rdzcGCXjS9nW4EbTODJIVXPoVMOtd01+v0158xHhzkFV/NUveonr8U2TngtiPiOus1dgQhyDpXu2b/RGWee0a6txZOh63IuCASBIBAEgkAQCAKLRABX/OD9D0a/+OUv2kKQBRg8BofBEXGY+vlBfHwdA/brlDBXHJxM+KukIFzx088+Hb3W/ayUHbUsFlmAw1slOYgJie/goocPbSbC2eWtX+BuPMon8nfxXvyW/+RXCnBRf2vTNS1x6+iVLSnGYtTpZ3zx4JH6ycu/MOYWe175R5e49M+32/tKNrPzhMUdnHkRhe+mPXycvOJT3sOELPqJb1/ZyW1XJ23j3s7rk7mpf3XUAiCfAbawV49r1QMDfZBgJDmo6tAXPgoez3fkG9QiF16P0x/vkp28d14hi/rwepx9UiEvvMlMTvVrB978E+MlCUWbfD594iuJEVocMxecGy/G3DiQQ/38DP3ZbiET/CwEi7PBkE4gtzbc//wXPly11ZdLf3zf2PFba27D33XmK8wlXZpPZNYHc8B3+Wn8KX4VnJ1338C+/DXX6SOZjCM/1j3DJ3R9Fe/LTySvtmpc1U0m18BeP+Gv/cJU/8RaL7/s8jZvOvfsK0Wf+IowqfGt+9DF2oApP/i5Z59ri8Ovvb65u53v6gM8jLGYRM3rfsKUuWYMyFYL9fQybNWvj74rgZk/z7c2TkPF9UMJc/AzNuawcYYFvMlXc7d8Xu0ZA/csmby/9pprR1dtXNUWqX3ex2BIjq3OqdN9u9OEOXPR+Eoo8DPIZFb0SV/hVovpMO/fO+aGexXm5qCYUsVP1KGfEjHNPw9iwr0ff3FNlXffebfV8cSTT7TEBjq51gTUIY5hgb/iOOZUyWLMzIEafwkG5qs5YF7AylwyPsaQztCnSiIxN33m750UcrhvzGVJf5IQ6Svz23ir39yBg3tbfEpsCU764prqi77DtXSfsTY2zuurPtV9lYS5nYxavhsElo8AvbnohDk6+HjHv6yJ0oF0EZ1Nt9HjdB+9x5axW3RiFfqEXmHT2E314Bh0F91ET7HBt3zrltF37/puq5femlTKDq8yYY59Yo9gQQ59Yu/oWbjoD5tCF7N3zvnMoZQuZlfocbjg/GzKRsc38RKvxfMmYTHPeeOgPbarfp2ln5xWfINNJDtOVdyObTWm5Da/jCe52VL20PxQlzaMqXlhPrDPDusEvl/zQj/NIbbHXPA9r2++8eboL3/9S+MN2oCz75hfbBuede45535lbQAOroMXudkv9oyN05a6zBt8xDoXrLWpnHXW2R3eX/xiThunjnD2/T7n1Gmuq9+crbFsleS/IBAEgsA+RyAJc/tsgJEyznX9NnoFFRh8RMDivp9k5bA7N1SQAERIwKCIkOs4pYiQgKqd5pCCacSu6kYEEApPXXt6EFFgrBlcTjoS4OdE1I2grGuB5XjC3Hg/ONgCR4soiJcDxiEni0A0dewHBNyHgs+LSphzD3MqOFF+QpOu4lBwCjgqnCevAnJ0HsehHJ/Ck0NCr9G9gnocds4KB4uepTttZy3RmDOo7kmFo0OP07+cG44pGekBAUj6Zbd3mOMUcoAFJh/52SPt6Xt6nD6CA0ws1HAEyyGGSekpjp6+O9TDuYSHIMFZZ541uuzyy5q+16dJdmgSPkPntefgeAu42ymwbJfz2uDAcjrhD0eBfecdJ8azW/OwoKj//+l2O3nzrTebg208JUPqi/HRT84jm2Vc2TCOJHwUbbY6PndWy8k2lmwz+8pZdp35xB5bILGDyhmnb/4UT99hVac24eXwHXPOvK35Szb9FsAoOfVL3QIXEjY3uiDEUOlCGaNTDm3aGsENbaQEgSAQBIJAEAgCQWC3EbBojWPi9XgMfoTf4DD4M872wx/+sCWqWBA/wdl2W7A56iczH2AddpjDLR0490cfbiZItQWfTz9p3FgspvwUWIq9WHCx87MYz5CfAwo8FM81JrUQVhDVw5aSRHBcbePX6uYvtESxq68ZHTln049Sl0MhKy5bvhQ/jN+Ay+Lq+Ds/jO+0qLHHyy0kSZZ74fkXRs8+txmfIgMOzNfBnfF7XL8emCqfgexkroMPyHeEA/8Dvua0+mAGX3E4+IoVwQXO1X/jY87zMbyHn/rgKZkPHj5T4LHR8fnada+dHPiv/AUyw1j9+k1mC2hk9NNetbgGWz4dP8mYkbX8mvHqXWuhUR8cNZbj183yN7n49u59fidfjrww57dVMk4lw5Cp2nOdgw6BkSQ3frg56Jy+mq/GUvyT3+q97zt8F96wdRgv59zLsPfKJ1cPHMuf99NnFRMY7yNsXAsXGNW4Slz97H+bPwlJRnEHcUXzhCzGy3wXY7164+rR+Rd8eXeSase12qg5ND5G+i1O8uwzmz/J7EEyc8n3zENt8DmNs3muT+R1VF0wqTnpPjSvzRcyw9n81D4sxTYkEW50c1Ibjn5RD/93/CdZYVP3FazUa36KMZjj5DQftQMbY6Bd/VMnnXDRhReNzjv/C/+73+6879VPj+80Yc5cpsPsAiSBWJ/0T9zAXHbom77rl7EszGo+mhPuS3ibK8ZTvxXfpZPuv//+Np/ho47x8o+X/9G+Ky5gMb50svE2Zu5xWJvHxsLYVz2wgLF7osbfveX+NJecd604Pr0mOcB8Ug/bUGNW82lctln/1mfrEnSDmJgYj7bhpQ1YaF/yn7lovpDBXK65WH3RH7iqz1w+3ulpuqbWJVxnnPCNJMzNOkK5LgisBwL0rPt60TvM4QBitxKk6B42k+0pXkKv03l9nQMR+oRMlVxV3IYup+ddf8H5F7Qd1R783oPNHtBpk8qqE+bI7MCZ9YV9oz/1kd4Vb7bOjDPDRl+GMKGHcSJ2BbfyyjbRu+y+etiosomT8JjlPHmNGbvOfki6ZlOdIwf7qz3y8lHYERypbw/JoR7XG1PySpIzLxy4JpuvTjaRXVePnfLYEX3RjlK2SH9xTX+ry0NCuPif/vynhoXzbKd6yPWtm781uvCi4YdjyKf+ktl31U9eY4NHuC+scZmLzivkvKXbpXfTdl+9aS+79YDx9QdymN9sY41nqyD/BYEgEAQOAAJJmNtHg8yYM4hIDLIok76MNyMq8OWQcIEMTCIinFME6JFHHmmkgqFVN2PMMUaG7FLHSZ2W+FHQIgICcxWAQJIYc6QCSRG8Egjn6DLG61qQoUkJc4IN5Bcw0p+dFtgYM/VKxPB3ShAIAqPmlCwyYa6cIDpKMFDdgmecPw5xBRPpOo7CUPCPPuOAvP7aZsKyp3kF9Sq4Sa/ZPYz+9USwe3qS/qWzV50wRwY4cN4s/gkw0tv6YbcFTuB11286ljChn8b7A1eFQ6g++tNiweFTu58oOrP76Y6zN59SGv/eduY4/DmFnGEOfO0K4DyZjV0t2G10AX34c+Sr7Xq1mQZHkey+S2YBCg62Q7AEFj7nPLKHEubYRE4tnV3FNf0DDuYYuyr4zcYqviPYUsFZjvUkO0jOOvp1w5cDL6Dz2GOPnVjMMC7mMNnYbE73ib6WoJ+/djWPTjr5i/rHPs6fQSAIBIEgEASCQBBYOALFtR599NHG4So5CF/B3yTN/PjHP26BfX9P4jELF2yOCtcpYa7ExmMdilcc/I9/+uOJn1KSxMKngSle7OFFiUA46JCvo57ioBWXKC5qEQr/touyZBE+EZ6NJ3sYBE++5OJLRqedftrg+FU9vmc+WPDBbdXhqAVJMiyi8PP0/8knnmw+w0t/f6nJrN9iL5IwxMv4gHg0TPg7Sn/+kVvhg5TvdLxLxJAoo34+hOL7sJCo5AEWdYrFVan+1yufS2KKnU3UZaELJtp2P+DzEhzx+748VZ/XGiuvVW+9ihHylfgN5FS3MdV3/pJdT267/bYTfe7XW+/L95vUfl231Svs+Ehwk5hl/qjTnNRXi6jjiTDjdaoDZhYw+WuS3cQfjYm6xCz9XBk/XKzOOJfchUm9qrsWGMtfc/8Yf3MR7pLaLDial0NF3XVUvf1XMkpclcxkjrjWHDEvHnrooabrxEjrPhtqYwh/beh32zXxd8+Mnnn2mROJg+oXq7RAaowlXfE5+1gUJtoreY2NOuuBNL4wfedaMkqUgq0krIoJ9OV1T5tj4wlzruUDV/uwVZ97hJzuFzKLP7uGPMbZYTxg7/uu6cvdb3ue9+pcRMKcOefeNYfcY+Qlp3lsfPn8+urckNz6qQ560DyhU9VTusSYwcmuq/Sq96Wbqr/q8J3SyeIPxoGuh+u9997b9Jt7rOQYl6U//hULseguRiUW4nrJI3SRGIZ7yzg5at6O11nyzfoqFkUnaNf8I4d+6K8EC8kZ5ov3ZHG+5lO/DX1R4ErXVQKo9RLrHOJ1rjGXkjDXRy7vg8DeQICepXMXmTBHB+Ne9DC9R3fQ33S514onD+kcqNEpdIvv1k+O4xR0GN3IhtGbdDm9o75Jhe6jt3AGep0s6tH2Rhfbpgd38ydZ9YWNrIez9QM2zpEdZ7bGQSfjtnTxkP6veowXfOlkHINtKruoT4so2urjdrzjmBV/Jx++CztjgA+xz8U3tN+XX10KWWHPbrBJ5hvOqh3XsCG4obHwkAa7pG9VXNM/cCt83G60bH0lcMMAt7J+xC8z5yYVctbRr9vYmHv8M+vw3sNdwQOsYZg3OHF9f6iNIa45dF3OBYEgEAT2GwJJmNsnI8ogMoCMtsAaA87JdJ4jjBAwiggBQsPZnlTU47ucU4YbuUDIGEtERkIYAiBIh1hsVdQluCJRQEIDosGYM8y+L3AjSCu7H3lZ14LUTUqYQ3gRD32YBZOt+ggHpFl9iNwkIr5VPfk8COw3BNyHdMqidpiDDz0puClQKvGI80ZHcvj6AdtpzgSdJoD4xutvjH7+fz9vuo4TVIE9+lJgU5ByKLhZ46RtDs0qd5iz6CC4zSEnB8z1gy3hvN1z9z2jjas3mvM2DRN9govvNr3/WbfF+CmbT94vUtfDzPiVE89x9be2jR9HXiCf7RLgMLZbte+7bCGn2NNjAgPsIVw4s2UPObKcTUmEHOJJTr7+c4jhKYhdDjtM2QxBBjsHsrGc7XmK/pu3FtbcF5x2cpORg83mS5grh3ieunNtEAgCQSAIBIEgEAR2CwH8Ba+XMOdBDfwNZ8Iv8XBJSz/60Y8ah8PfnF+3so4Jc32M+DkwxT/FiSwA4qE4q2QkMQcYixXhoJO4bL9O73Flh/iIhTyJOrBwzqIRfvvAAw80/0csalbujX+T2fWOWrAZb387f5OtFrt+9ctftUXId97dTGwzv8w3fJnfYLGL3Fu1r07y8g/gah7zGyRw6Ys5K1bED+EviH3xTdQ7VHB49wVOD1f3h3PqkUwkSQWu+P289wNZxeXMBX4Dv8aiLln0lV9DRot0W43XkOzznoObg8/Mf6lkHD6VeUkmi5hkmdRXfar4pTmoX8ZB8q1iLla/LGTWnJokq4V3DyCpS7IR+cjAR7OzF1+ebMZ0nlLzRDxUzNbY6rd+ue/4bGKi5p8kv0nzY6hNdfMH9Zs/bJFUPLhkh4F6ja1ERLGIWeKLvi8uAAdy84XhA294iFXa7fC7d353dMaZX+ymUjKKAQwlzGlb/+Cq7+byRhfzdG9UUpnPHTXuZNFPh/NVR7W1k1d1LyJhTn+Ng3uWnvG3PtZcpm/J7ZhUfAe+YvDG0TyBIRnNXXbRPKyFfvVXqTlmfonlq6N0Mn1m7CV9iknRd9PkUKc26R5jXvWJs2hHn8SF2A331SLHQ/3mL1tlPpt/5qG5QG7yS/w0p83t4gY1VwqP/qu+wNbYwEV/JKHQE9oTk6GX6yFGOqiPbb+uvA8CQWB9EKAvF50whzNXwlv5JeccOWd07nnnNt7M/k/jJdAhF05Dl+GF+Ldz9BB9SYfRx2wfuzdJf/UTv1aRMFe600MWteOefjlvjUM/2ADv6VH2eVrxvcLBezgWL5uEwbT6hj6DMf2O63qIgg01jmTDXXBpcuP61gvY5j7fGKqzcLA2UpwIT7RWoD9kx2fYZrwIHrjWJDwkHVofYItsetNPmOMvsHPqmZYwNySnc2Rlq9nQY8eONT5iTiu4gHrJCQNyLwr31kD+CwJBIAjsAwSSMLcPBlEXitBxqAWJGEeGnOETaGKoOdYW9jm305w/DiMCVMlhjDfHn9H1PeSCI8mAe4pgK+NaQWSBcAa7DDXiIMjC0ZUogAhs5bSvcrjgWZh4SgSBhon+kxs5RL447DstiBaHHYGRSOHvrXDeaZv5fhDYCwi4DwW6Fpkwp98VnOTs0IHluHmdtdBtHDHJwZxijpRz6uAE03UPdU+O03Xu6aFC1646YU5g1GIKh5zepusqkKgPnoSzoMTZWodiUYnNe+KJJ9oTWuSFO5k9MUaPWiAQjBfcmOS0TuoLZxYe6rcAxqbR/aWn2VULV5VcOVQPebZKmIOr4OykuTFUr3PmjHnH2Z6WMGcRYt6+T2oz54NAEAgCQSAIBIEgsFMExBwkFtiBCHfG82vhQczCgoOd6C3M87XX0R+uWMc6/CTr0HjgrHjotIQ5GEt8gPGs8Rg8W90SGiqZQgKQghNLzhDjkaw0yyLakOyLPFfyWliV/GNXh0pqwY8lolg8s4hUMs+KBTnVb2HTApqEFQefxHxWPzwsgsHZK/9waD7zw/B6SVu7lTAnEceYrTphrsa3/HDzFCaHD3U7ix2anlhU3y3sYS1JkS9kjPXNmFgI5Q9KApTsJp45zb/33WUkzPErzRf95fuJD4gT8FvnTZhzH5ozFub130IxvaRu/Re/5QtbJBWvdD/OU8jJF3bPuN/NUfeGe8YukpVEOB4HNa7GYXyHOfeDsSGLhCfxTvede4QvPHRfzCPvvNfCD14ebJMUyGd33+qjeIe4t4fPxHPgOa3ol3ns8F5fja9Xx6xFbMN4ksmCvPoU7RtHukpcpj+W+gFziRliOLCvhDCxZXrHDom+B+NZcNaHioPQm2TShjUG95X67Jzpnpqnf5Nw0F7ZFbEodqt2M3Lvmi/mm/iL9mE7Sz+qPXrC+LLX4jpsgDbhmIS5QimvQWDvIEBXLzphTu/pBXHWTz/Z/EnOxh0Oz/cQB1tp7QKXo3P8XbaBPmY/xWjZ6Ek6edUJc8XP8CKHtQrxd3paH+zIzIbrAxswjz7ejVlm3MyJ4uHWbtlB59gp/gm+wYbcdONNjRN6qH/Won5jwi5LxmOnzBP1+wwm+Jb6rZngEUOYrEPCHC6wDmM2K/a5LggEgSCwLASSMLcspHe5HY4fIiaIIetf8JnRZvw49oLNdoXzRKpAxlbOrOx2BAC54HCrz1NdvieAI1HOUwRIACd1WjCREypoUz/xWs4+0iAowkjv9YQ5fYENHKZhMcs0QLIkdSBXAiECAvMGAmZpJ9cEgb2IwG4lzLnvHBw/r+7nuq9nxcl36U6Bao7T8e7p1dLDHDP615Pj9N54QLnacP06JMwJJPYT5jjK9JCkM0FaC5cSnhcVHK3+b+dVgNvCj4VWjnE5q6ecfEpb8Lr9jtub42qni0kO67R2y77CQxBdcLUcbgsF37j+G6MHv/dgC7L6e6iwe0mYG0Im54JAEAgCQSAIBIGDikAlzNUOc3g0joWD484WxJMw97MWk5G0gI/j3pIGJHXxK8R6JJ5MKvwTPHQ3Eub4TPhxJcyJRymSKcglmUKyiaQTi43GdVUFDuaWhB8PQUr+qGQSfhk/weKl2JSFPzjPK6/5K/ZVu+5ZEHNOPTDgOxUmEoOG4kYHMWGu74ObHyef1PnhJ8+W0ON63y9fSyKah3Ql9sDd2EpGk6joASdxNnNxUtmLCXP0Aj+TPyweXAva5rAHp92DN9/ULRDffNO24pV8YfeKxfKKNWtTkpG4gHgljN33/eKaSQlzFVcWW7C4LE4iGUy8YdmFblhUwhzZ1ecwL83BiivNo08kH4uhH+t2hzEnxcB8n66XoAAzh/lcpXSc5AyJduaBREqFTpOYQcdZ0J9HFg+B0vPiLOyIca1ETPrMOoO5NqTPSrZZX+lo6w7mmX5oW3IGLLVpntXO+TiCducp9Ks6Ja9IboaRupMwNw+KuTYIrA8CdMZuJcyVHqcj6PHS5bP2Hi/h51hTpXPoY5xQfWxfral6P6nuVSfMwUA/bMxSOxOXTmZXrrv2utFtt9/W+jOJ186K1yKuIy89z4ZYHzA3YK7ggxKja218O0l+xk79bDQbpR38yDmfVXI6zmnNRJvGdrwkYW4ckfwdBIJAEFgfBJIwtz5jsW1JGGUGup78YrQRGkSB04p8CajaFU7QeZaEAd8X8JQc8Mtf/LIF/t7713tNxgqACo54kkCCQP/JtvGOVMJcf4c5MiMN/R3mEItFONnj7S/qb0GKSTvMaaOCDvW6k3YFPoyVMRPYS8LcTtDMd/cTAruVMLcojDhKHElBRUE4f9N19CaHyU8eccwE/IbKOiTMCVIKbnMwBVthLhAhIClIaSFJUJttEbSln/SxjqF+7dY5ds7Pz7B79TMxZV/YJTbKVvfsi8XF7RRBYUEB9bMBxtY40fXakEBuB9eNbjt9i21DhU1NwtwQMjkXBIJAEAgCQSAIHFQEKmHODkT4lUUNnBPHssNcP2GOf7wIP3vRWEu0sLOVxTDJBCU/efnxuKh+9BMbZpEB/7RQhot7iHHdEub0Aeeu5DB+Dyyc01cxIgkddszi+1hI40vwF4yj12UW48LH8WCTXSHgWYtolZyJz+P1/t7OXNOGOmFiEU1iRiXl6b96+SUeipRAN5QcxHc8aDvMLWIeuF/sylUJkZLmzDFzUdKYxUsP6Vq8HMK9ZNiLCXP8TMlyfhlE/+kN5/ST7253PQ94XbVxVXVzrlfzmu8rxiE5VpzYPBW7teAvacoD2nR2vxiTSQlzkhbFEtwLdMSscep+/Yt6L56wyIS5RchFB9AfP/3pT5tdYSsV81dsH27i8f6uoh/GStJoJcyJYSj1c9N2g5MwN08hh7gQvdZPmPMQqLF3LCphzrzSV8mZ4juSEpxT3MfmmV359cG9Pa+eZgP6CXN0BpuVhLl5ZkSuDQLrgwCdtxsJc4voIRso4RznpMvpMzqZrmbz6HE8Ge+ctE67Dglz+lGbs0jkLntUD4JI4tYHm6vQpfpSawPz6uid4o77WL9gA/mV1grwF/JYD8A72BCvO1kfYEskQuJF2ihM8CBrDx4kYKv5QkOJ3UmY2+lI5/tBIAgEgd1DIAlzu4ft0mpGXiSleUoWIRAoqYQBgQi7wAkQFYGZhbAgcIidwKeEOUHg1994vfVJ4EXAT/DXIQA6aVcdXxB8EJC0w1z9JKv6ERbJBZ50e+ihh9quS0NEYmlAbtHQtIQ5fSE7cjjtidUtmjjxsXoEIIydpwDVOcu4naggb4LAPkVg3RPmOE71NBN9R4/SDxLk6GC7ZHCOx5/AruFah4Q5TqZAuKDow8ce3nxi6j+biX/6UbuM6o/AJUdTwJLemrYAUX1c1Cs7x/7ZDVVCtkAJZ5V9oTNhLJBssZLjOm33jWkyaUegVv2CxRYKjKvz9D4bqB3BfklzQwWmSZgbQibngkAQCAJBIAgEgYOKgKQAsQI8ThyjHtLg94ov4Fg//vGP2+K4xIB19IfJf1AT5szb2uX5uWefG73y6iuNm1ss4xPweSxKedCGzyCGVP4CDs1HWlbho5lfx7odmyxeViIGv0EyCRklfni/3bmmLr6JeJykFbucidM5r9/6b2cLi6TVznj/+RxJmBtHZeu/YQxrvhpdIsnUPOSj8gPFQx/qYo78QWMxqezFhDlzW6KcHcDMPfMcHvruAWV9dy+6B7dTyud23zjgLDGUPrbYL17pwUD193W0e2EoYc69TzaLyRJIxT0tvPe/ux05t/sdWK1bwhx8YfeTn/yk6Vj6SqGbxM/pELj34xv6AXOL+GLvvm8R3/jBmI6jf3x/HqwlSNBn1huOd7+goA1xFnNKfeSoBIntjkF9j+6z65skBHPaXBZHIW/1wZxhV7Q5b3GvJGFuXtRyfRBYXwTWOWGOThbfr4Q5MXY6mQ7FASWa2SSD/WQXh/TyqhPm2A/9wK30A+fXD7LiUtYHNroYOJ0sodnaADvFxvMD6Omhfu3WjBKnhxmuT2a20xwhh/VndtDDE7jRNC44Tb7iRHimxG6J5DiEfuo7DDyAY3zttj20RpKEuWkI57MgEASCwGoRSMLcavHfceuIi6AaJ1ZgCHmRPMBQM8oCzQgYR1aSw6RdjcYFKQIg0U3SxAvPd87xi8dPPDGtbskBCMCNN9w4uviSiyeSIESAUyrD3w49CCO5BUgFVTwd5udEBMS3S1jG5d+NvznrsBCIQowQMf0ooihooD+CPTspsJfsoS6E025Ok8jzTtrJd4PAXkTAfchBsztAJS5xON2HntrneEpK2+37xr2v3Xr1XqmANR3hpzvpZ7JVwhxdJ7g4Kcl4HRLm9MWTbxL+PN0rCY2jWbqJE+hpdQe7YhFC/xyCtpzRUw+fOjrt9M0E4u0EM2eZm+SBF5w5xOyguWFM2BI61FPUEuYsUA05qrO04xrtSHhjAyzqCuaW4w0HtlDgWuB/qCRhbgiVnAsCQSAIBIEgEAQOMgL4Jo5ZCXMWkmpxHHezQG53ZgsceOYyE6xmHZeDnjCHH+Piv336t6MX//biid0zjBV/x2IR30eMRLyk/IVaTBP3wNvxdD6TYzeKBBS7vUlAqUSMikuZX362UmKRnSF24jPwTyyeic1JMBEH41vxh/TZbl98hprT431Nwtw4Il/+G77lg5cf7tXBDxRv5IfD3hw0t/oJc5W0+eVav/hrryXMwcOcqQf2aod7PTKP+cPiIl7de9sp2oAvbNzrFqO16by57MExSVju9f79a5wmJcwZB9+1cE02umFVRd9WkTAHv8IWVjWvnRNrlpgo8a0wh484DN0BOzH+fsKc76nD/K+fydYv52FMD0s2E69wX2xlT+FCRxp380sCsLiQIu4mjk9neoheXf2xbxdt4z9rB2I6djPSD/NMn9TtYU1rEOI71g62014S5rYxKPlKEFhjBNYhYY6OLf1butzfZOPTWEO0foGjiCO7xrrFzTff3NZr6dJJa36rTpgz9PrCDuBV4uF4Ld1M7+Pw+L1kND7b+NpA8Xt8xPtJ/VzUFIMvu2dHPzakdvQjq/XVStQn707WKWCC6xtbfL9sIzzYZQlzHiiACe4/XpIwN45I/g4CQSAIrA8CSZhbn7HYliSIliCzwAgnVoIbw8t5FBAR8OPEcqYZaeRknqIuCXiy5j2t2HdYOdwCI3ffdffoyq9d2cjGkNMqMClQ4mdK1NFPNEOmOL6VRCJwsq5lKGEO/oiXAI8nFDwZsjFhh6F5+qVOJMuTCYIbOyFy87Sba4PAuiOwqoQ5DlG/CB46OMBeBd8qqChQz3my24TzpY/7um6dE+b0Vb/Iz/nTF4FLDr/P9Ic9oa/pKHq8XvXLZ0fOPjI6/4Lz2zX+HrINfTy38x7e7IlEcQ4xx91Cq/P0J/snoMpZ5aDPa//6MsFD0JpDzJYJXBhbupmOZgvZWk7xUPH97DA3hEzOBYEgEASCQBAIAgcVAUlMuJufZJXEJPaAV/OF8UcPJdSDMPim8+tWDnrCnPGzI5BdjXBycR9jyGfAky2QSa7gI4iZ8BkcYlV8CYfPfWahabfGuBbRJMzZdQKP59vwG2751i0tAeWWW29pDwPtNPYifmZh0ZzmT/ED9IsvwleQ7OKXBOAwXsTbyFo/SWiB1Tm+lCQRyTIeBrKbx7z+lTERLzROEmqMFV+KbPC3kEg+SX078ZvG+7STv8ncL/42dn0/3HvjKC5qUZfvKskIPuYf3cFPe6jbYc58c25S8X3Yi6/yLdXbXwD1M1vqo5/mKeRWl3nBn7RjF3+SjPxUuo58EoI8kDbrfaBeepQONa+PdzuAwUO9xpBPrD6JVjsZU7KTl442Z+reseAPW3LrQ39OipUOJczB0/3vwW763X1AzlUVfVtWwlx/PmvXYbz6cxq27nlYmyd0CJ2gmHfTEubUb+6L4Yi9iz9oA96SGh588MGmf+hj4zCtkMM9RV/Y8c1Yiu3Tj8aaDqPL6I3+uE+rc6vP2BLysyce3oSNPmlTvKUSTLS/nQJn97V4jns8P8m6HRTznSCwPgjQU+x2JaXhpPSR82w1O0NPOdjVWW3rVj0c1+XaG9Ll+B9OYk2VDbWesdcS5mDBRuKP+kFHs0/6UVwDxyguj08W72fb/c3mGA8czBgsymaMjxOM2apj3QP1bCCdb6y06eEBdssaAXu4UxmskRSfxvu1o04cE1e87du3jTa69WHYjMaeBYIf+4yLkxOPY6vZOgmUHuLxMMJ2bJ162DY2FA7WMcxDRb/Vy5bCYzfHYnxs8ncQCAJBYK8gkIS5vTJSA3Iyxow/Iy3wwpnm0ApgMNKcYklcghc3fPOGZnhPOnnMSg/U2z/FaCMcyCeSV8ERbQu8SJoTsGPQkYAh8imjX6BPQoN6OPuVaEZGWf5+Qx6RYLzXtUxLmEMGBS3bjnsdsdlpgSOiJHi2W8kmO5Ux3w8Cq0BgmQlzHA2OL73nELj0N/1F9zo4Hv/P3n04OFIcfxsXmQNMxoYjHWCicQDbcCYYbP/fP/JrTDDGJscjmZwzxx1vf3qvlkGWNmi1d1rtt2FO2pHU4eme6qrqmh6vPvd9f3PCMVDIPX+XPN5JAXNkPCPO/MKZ6I5iRjL+2kQ+cbqXjCL/GcMOMotBbI7gAPfeK+N4ngtheHOIMATNLxysdQeZucWiEmOYsaqu6jxr0u/mV3epMTr1rT4nqwXMCZZmeN50000Ti8AsAXMT0eRkCIRACIRACITALiVAh6Zb1uPX3KFPv6Kz0RnpWPwEFg4sGmxFl9suxLs9YM4CIf3bAppFSzYDG4LPh65OV2Yv1MEeYDc4vKezsxn4gSyq+durvp7kW5q1H4eLaPR5ujmbTh3tkGRR1UKaemy1XDo/+8nCIh4YSca0Raramdri4XhKwNwKkVp85o/EhKzQX476uxanfaYffZc9aLHUAqFk3OG8rAFz1W67dAqYE1yFC/+DccwGdq153cq45hso3tUP+FbAnCCuje4w5zpwne+WgDnsyBvjtMazv41xh/MO49p5B9bkKvlRQbN4rxUw53Nl8UMJnCR/yCFl6n9+GgGx115z7eiivReNzj7r7NFpp5/Wx4XxUkke5mXBcXxBZLsbQv2tXvqO75vfgw+Ez2VeiQ6gLGPZ+gM+kjEjMK+CjY21WRLOCZibhVx+EwKLSYCMOFoBc+ZbMpCdQq56T16bE72WHC9ZXjK+ZHLJf9/fSTvM6Xltpke/8PwLnffrb7y+GgBo/qCz07cEizlqbcB76wHWTNl09DGfla7vt8P5Z6ujrALR+OytE+gXcxr9p/RvOr/6bDWZr4w9Nyx4NT60xXy1f//+ruOYI08/rW0M8+MU24uteiZgbqu9kN+HQAiEwPwJJGBu/kyPWo6ULIZsPXbA1riUmJqkKSF9W9yL9o4uvOjCmep16PsWGPLdt6MDzeBmLFP8KH0UDg5O+btLjfFK4WCIjye/oVhRWBi+FAN5UFg4bwT1CWhwx5j381SWxuuylb+xHn8kqz7QDorfcEv6rZST34ZACEwn4DrkNNyuR7KSn65rMqoMXvKLg5DTsoxfxnEZyF6dr98K4lJPBrFzEoNwpwTMFX1tstjF8W4O8Ip9OQcw0j4ykOx3MA4ZxdpbBjGjmGy3GMZY9vk8Fjz1k74RMC5gzsKIOdD8pHwB3QLmGKtbXXRTljHw9NPPtLnsntXgQW039+lbZVmMmZQY6gmYm0Qm50IgBEIgBEIgBHYrgVpgcuOf3bgEB9Ch6W0ON1zwEwhk2tdurpvkazjW7HZ7wBy9m54sUImuK7hCQIK/h/aS79VCEntBX/JX8SkJUqtDIAQfk88Eh9C15+EfYsOULm43B3Vmx6iXHdUEf7jRlF6/1fIEbFk88wQIPHCQtNlYtmhnwS4Bcz9evfpBf7AvHWw6ssANS2Vbkxdlo6/a4d+23bmav9LvnfMbNrtX53ZDwJy2enQnXyufLQ6uGzLUtTbP68i1gWsl14wgLAFU49eOa8y1YDGZvW4HFvVSJ7a6gDlPGqkF9MrzaL8ad+T4/fff333r5IRxhh+5JGCbz5tsUu+NJIxqLHutcWk8868Y3+Pjuca089jxRdUN8fKQ+FGm7TBX9TL+tcdjTXH3Xt76zs3qDv1Fzmpfn2+PX7mxUL0P/3C4++zfe/e90SuvvtJ9QGR6MRG8zndPbnrPvzOvZAcjPndBzWSoMmvMkJ094K+NOX6lWVIC5mahlt+EwOISICPIiu3aYc78QB6XvUKW8wuTzfziZIo6+Jyc9bej5Ljfksn81s75rmOnBczhoH3vvP3OSLAc5ub3ml+qXeYZMrvWBryaN83zdARzjvfmU/MH/YRO4DfzSOpj3cJ8bvc3/M1r8jd/OAResy+3mtg5B9o6ibLo+hgpS3s85Y2OY75mz4zbFQmY2yr9/D4EQiAEto9AAua2j+2250wJo6S5c4zTz0RNASsnRjkiOCFP2zPbFveMZZM+hbB2hislgOIzdGILTuCQGlcE1IlSadtzjnB3V1C0yvClKDG2OSIoE+O/33aQGyxAGxIwt0FY+VoIbBMB1+F2BcwxbshVhq8yyD0OesYMA5cMFETGGCQH62CElSEmj6Eh7G+JkbjTAua0T1u1neHJgckoNO84h03x0MZqaznna/cIbXfnscM8wVC2c8RWZT3m+qYC5vTZMGCOM1gQG2NVnbZihCvr008/63PtPff8X+dhLMpTexIwZwQkhUAIhEAIhEAIhMDGCdCv6JoCPRx8GvRv+pWDDmnBoXaV4WtYtFSLM3wdtSMOHdcikJ34BUbRf/29mYQNfdtCjOABerhz5YNxw6JgE74UizHTEn2ebaJu9Zg7uyLQjWsRy2MR3fQx600tbAA2AduAr0f+FtLUX3862Fj6ephqQQ0bPiuLaBbQ9u7d2+0GAR2z1mlYjvf6STCRwCK2A5bYqLu266d5BswJ/hQwZ4cnvi9JexMw11H8zz/GBhvcGGJrejVu2HpeLTj7TvXbuB0uw7JdyxbXt3yixtUy7zDHJp0UMMcW13Y2uPdbtb2PO7JFyg+jFf+G/OQt+Ioccg0Pk77ajQFzxp22G7c1nr2v8ewcn4XxXON4+Oq38nCu5Ka/pY0EzBn/riU31gsYsJBPFpPRJfMvvPDCHnTmBseTTzp5dMKJKwFzylQ+vwq/D1legSHmX8FxguU8ys0hv3nOy2T0E0880etufUNd6AKuY4HGFTCn3rMkPLPD3Czk8psQWEwC5Oh2BcyVDKZ/kIOlj3hfcpFcHZffw7/JbnWkB5JnPvP3TguYqznJ3PXJx+2xp+++02Vp6frmNZzMP9pYc5ZRQ37T8x01B7mhnr+ezi+IznfI+q0mN9ELzqcTmf+qLvJmT5pD7PI8r4A5ZVXAXM3d2pKAua32ZH4fAiEQAseOQALmjh37LZf8WVu8p6T8/e9/7w5QTkDKQDlCyrDkmOMQnSWVkkPpGSo+ziuH44XSwXjltOWsVd4wURoYpu4sfPLJJ3u0PyXL79WLoiJYrh5N4ffzUJSGdZjH+wTMzYNi8giBrRHYjoA58owBy9hl+HIQMvwYxAw/ZZJZ5BgZSz6RUyVb/U2eOeTFUOSk9Bvyz3mG4U4LmNNTxUZ78MCHwxUjfzvvwM5RjGqOYCxyonJqMoY51C0achA4P+vcpG7Yqo9Fr3vbDqa1w5w+EpCnnHntMPf994faWLDD3NN9NzvtNy70fQXM3XLLLT34W93Gk/mzdrWwYMn5LHF6Wwy0SOeRYxYaNut0NnYFc9rO3c6L5RBXt2LurnjObeeSQiAEQiAEQiAEQmARCNAX6W10OTqWhXI6ViW+BTfU8RMIePH3VnTHyneerwmYW6GpH+mk7CC6aNkNZVvRm4f2QtkMfs1WYlfpX3q13YM8xqgCcZzfqg5rXAk6tIh2rHeYM5750AQzjSeM6PV8fG6MVW/nMBKc5HoQiImNc5tJrjcMLCKyG9i77FVs2U78eRYT3cyqP45GUiftM24E6DjYdLX4atx8+823o+8OrgQduv7LDvdaDLxahGaX+o1XebOrdkPA3PgjWatPBZ8KOMZg3n2KOV+ua9Y1Om7DstV3U8BczWfGn+C4GsvmCNd0jcuSffjVePbqKDlHnvIfyMf1YWxLGwmY81vf52+wmO96FyTmulJH40C/ueb5qPr1dMLK9d5/e+j7fj26JtVbPdSt/DkC5cgffoZ5BTmUrBEw5yb72hDAGMJEOYKNyU2BzQmYK2J5DYHdTYCsm3fAXMlQMpAOS3aWPC85TsdQNhlFho7L8tJNyFy/odOQpQ6/2WkBczXK1L10Nno+HdWBk/mKTunzOtgFGOBBltMT6Av0fIHbdP26qd78VtyqvM2+6is7zNH1PZJVfSVl028d2WFBNmukAABAAElEQVTuD30N31yOy1aZb7aP8v0QCIEQWHQCCZhb9B6aVr92g9m77707cteVBXJ3sDK8KSJSTXo18dX5admtd36Yj7wclEiKIQWH8cqJzREzfve071IkPWrFneMMdopn1Ymy5M4CiosofL9nEC9aouRmh7lF65XUZ7cRcB0yyOb5SFayjAFbxpWFBA5Giz3Kk8jUE5sjkRG357Q9PbBJcJOgYTKL4ec78uKQtLsC5yiDmPzcqQFzNb5KjjN8OQcYxw6yvO6Wxst7cxHD2CFpP5nujmRO+3qcBibj80WVt5FXxq/yGML3toA5Tgz9pQ/kzfC+5Y+3rDyStd01rX9mTeYw7bOYqyxjEAd5uiOudpizwDQpGQcJmJtEJudCIARCIARCIAR2NYHmvjjw2oEeNPfwww/3AAs6Hv2Rfm1R3s15t99+e/c10MUXKSVg7qe9wWag97ID2Ff0Z3pzBUCxH5zzt4U136Vn09/p1fqcfSUwzGKOIC4La/xOW0kW82pnIcEYylWmg/7ukazKEvxTvq9Zy1OOXZ34vyzklk2kDeygBMytkDVW8Dc+2N6YsZfY0ews3IyJWohmNxobAn3KDmdj4qrPjCf2vCAteZQMWfaAOawEzNXNycXNWL7qqqtGd/35ri5HTzl1vrIT3+of7x3DRI7vpoA5Yxn7ClQT/KX9xjcfCh54Ga/dp9TGMl94HcY2+We8k4n8LPz8fm9sS64BstGCP9+5305Kri1l+q1dLl968aV+kz1fieBTZTiqD48/rvlJWvfVNVl5+ly9+HEEyVUgM//HPIIbqpx65YcbPpK1dAHMhjvM5ZGsRSyvIbC7CZCV8w6Yox+aV+kj5LgALDoFeUomkZOll5DZ9JE6yEtHydcfDv/QNzohy+VZc8FODZgz2rTfYb7THvo1vb50e4HWWPHVe1/rA+ZIyTyIEZ89vdthbjHP4LaVpFzziF2/3YxlfEjKNG9WwBy9cKvJmNCvgvPor9U+eun+/ft7gDcdzDw9rh+xhYwvN6+wSYwNv9d+v7GpzB/+8Ieuu222nvJRt1onoRcb05K5W77m0wTMbZZsvh8CIbBbCCRgbgf2dDdiDx0evfzKy32rco4RRnkZ4CZYTiTG9FaVjSEe5VKGGOsO5cnf3V12rmGw21b37LOa4vFTX0lXUkzWFAH1pVB9993KhH3KKSu7D1GSTNx225nH9rjDus/jPedCAubmQTJ5hMDsBFyHjKB5BcwxoBgndbe9BY4KvCoZx3BzlBFcTk2vDD3OQkYRI8xvnn3m2e6YfO3117pxyDja6QFzeozhpX2YmQscjF/O3DKQyyj2ygg0V/gONpwJODDMLErZJYHBNm48bnR0qI+yBWHf24LYzIMMcuf1jfzt+vanP/2p989W5kNtNu6eeuqpXpY5rNplDuS4FjTurudJiYGagLlJZHIuBEIgBEIgBEJgtxPg2LfgJejD4lQFB1gkp4NbPLBrsAUmC+Wz6o7bwXmtgLk777iz7yJ8yaWXbPomETo3/XonPJJ1nGvZDPRf+nL5j9hcdGh2goA5h/fa+fnnK4/cPP74ld3mPOLRTZX8Q17ZEOyJWZNyLBrdc8893adjoQ9jh52e66kJbJOtlKN+xjDflx0TD7QbXHGQp/FswU7AnGOSz4t9xZ7ZDTvMGRfaasHRgVvZj+w2NrZFTb7Nsru9Ggv1N6YOSR/Lh21o8RLzymNZH8nKR4uj8cLPqv3ffMP2Pq7LTjd13X333d0uxm3eiSyuYzxv19ZuCZjTVvLNfGDsuf6NZePb9c9XZMzWzm7eDw8+Jf573zP2yU2/t8ukYA356Gvf22jAXOUhYM414Tj0fdttp/nq5aPf+DjIa3lLztV14zv8X+Zg16EbH/n7tcFnW/GtjI+V+psvzpoBv7t6q5/kGhdAQH4KOjY/zJIwqcBp+dM9tJ2csNuReaB2/dcXSSEQAotNgIyYV8AcWUAe8vtWEL91gvfee7/NqyuP0Sa3zaVkIjlYctJ5772S5eQoeUrmvvDiC6N//vOfXde1nmG+2MkBc0ZEsdIWcrXWBsyDw/UB7+n95jPv6SsY40PG4sDGE8hmfsEPt1mTsvSd9QGPJf/2Wzfx/9DLqyeb3XzTzaNzzj1n1iJWf8e/bx6xNkXXx0S7rHtYgzCf7Nu3b3TanhbYPtYkPBIwt4oyb0IgBEJgoQgkYG6humNjlaGQcPAxIjnhKIcmW0oFhYOSVoasiXorysawRhRRTk1lCShRD3kzLikBAuauvOLKbmj2MgcKAYWIMuBRdI8++mh3nHzxxcrOTSecsHJ3AWcOJYkDgDEuj3nVfdiOWd8nYG5WcvldCMyPwDwD5sglhp3FE05Nj39gHHNIkj0chBZN7GwhkNdd2oxjBnAdZKyDw5BxxCFqN4H//Ps/o+ebU5JByGHtd2TcX/7yl77zGcfjpES2k68Wx9wRxcmsnmQ7J57Flbvuuqs7LpU7LWkHA5Hj3FwhD22SD0cjR2APcN7CnVXdSG7B2998u7KwwwAu4xhTMp/TmNFqvigDsmQ9ZycDeVZZLz98GakWv5RnwU1ZDG39JmDOjiTmqVpMmcZsrfP6lUNVwJy71cyF+kqfGxsCvi22uVNrUvJ79dOveSTrJEI5FwIhEAIhEAIhsFsJ0KssStGT+Dboc6U30nfdBEEHdtPFvuZ3oNPRwRYhJWBu/V7Ql3xJFtXYWQ42HRsBPwEM9OzPPlsJLmkmS/dp0bHp8frd+60EaLBR+LH+7//+rz/1gA1x8KBgkUNdf6+dJwRibKUcNAQtsRnYYMa1ttciGntBWR4rOMke3A0Bc8aDQ39UgAx7rnbBwJ/dLTiWvermJKz4Odl0XtniZd9hKz/2r93APdUCf/n4joXtZQ2YM96MmUceeaS3G0cL1lZn+TL27bu82/12B+PXOJqJTb4bAuaMPbLN+DN/8cHwLekXn/FLYE+G8U8Yz87VOK6xbK6ruU0fkon8DgLdyErJbzYaMEfG8itVACm5pFzjQhAqHwx/Bj8FGeVv15I6+Nw15zp0/QiS0wbvXVe+ux3JeMGOD0u91U1Sppv06QHWDHCcJSVgbhZq+U0ILC4BsmteAXPmLDJRfny2ZBA5bO3SRh9kJx2R/OGb5+MnU2ttgL5RR+klZBh5RpabI9g7ytnpAXOTRkTNhaXH0rvNQ3Q7uh75Tu+jf+s33zev9CfDNL+9dQLzI3azJjdOmPfubQFz9EFlHT5sR8Dj+9xpDcIagXK2Oo8daEFy1mzoX274kJ+5ynhguwiYu/SSS0en7jn1f5qDg/WB7DD3P2hyIgRCIASOOYEEzB3zLth8BcrRKPBMYAalixIiMWo5ljjgOJcY3VtVAqqGFD1lvfbaa0ci9T0C1ta2x/WyKAMi9t0dMG5EU4QoLu6OoxBQLChNkvpRiDgQKEh2HmIMUzTlsygJY05PzrdSvCi66k7Z0m6OOE7QpBAIge0h4Dokh+axwxyZZkGOMexOVtc1A5m8YvwK3HVdM4jJVfKVXHLNk01eHWRYHeQz2cxRz9n39dfftO8sZ8CcHsZKMB6DlzzEtPrInV2cDQ5OX2wkcxNHr51CvHLKzjJPKVt58ueAML+YV9RHPzG+lXHHHXd054ZzsyZ1t+BlDrD7CeNfOcaB9nDcCpYzd01K6pmAuUlkci4EQiAEQiAEQmC3E6Bn0eHskmTxgc5Ez5Lo2hbpLagINHKTgsX7reh18+Q9LWBO/eigfAOXXXrZxAWTtepBr2an7MQd5ia1S3/S3dkMZTcInNPG1w68Njrw2oHRgeYjEjQniI19oN/p8nRsj2tybtZk0cxCpZts3CSlnBW773DPu552UD60WcvRxroRyyt76FC7weikk07swS52zDOO+b0svo6nWmhc5h3mjAXs+RXZVvUoVudcN/p9XwuMFeTFDvc3Vvq/7O+yxcuGlCcZwle32wLmcDOmtZ1Po+xUi7aCje10wubm2ziaiQzbDQFzh9v1/fEnH/fxrB/4JLTb2BRMQabYJdN45jc2nvnpawzXmB76lfhT5GHRf7MBc2QQ9uZRfhLjYiXo44vu21IXgXDK56Nw+L6kDq4z15vDXOvVdenGS0ddc9sxllzD6mw3JoGHxrZrW73MAdYb+Nz3XtTG8gwxe3QNgRv339duYnzpxc4FL+3LDnPb0aPJMwS2lwB9suScNQJ6HhnlPFlrnZLO5ShZO61G9ETrljWfltwkI6xXmkPJT68C5wQwk4mTZLkyyC4yR8AcWb7sAXPVZvOJo/R9Ad+C2Oh6NT+uBLKtPBkGW3LdOrZ5Uj/Nmug/OONt7lsJUFzx21/advtWBttMmfpt1lS6fumbdH3J3G4t6c4771wN7ja/jKcEzI0Tyd8hEAIhsDgEEjC3OH2x4ZqY8E3GgjIochQNRq4JmzHOuUQZ5Bxh7M7LoFWGSV1wiQh6Ds7a3lZggsASyqi7vigE4w5NiiIlibLIeUip5VygRLZ7TLtSISiFU5kh7D0FdDyfDYOa8kWctKXu+KOMKYeytBarBMxNAZrTIXAUCVQw1jwC5soRaadOcs1dtxJ5wAjmWHYHEkO7HnO9njwi58jmknPkMxmzjDvMTep28pVjk6FqviDrBSJyNpi7JLLdHXV3tMdU3XDD9V3Gz2IUK8v8weh++OGH+05zgtqc00+YM7zd3WWO4uydJclPP7pj36KO/q35g0Fcd8QJltOuScmck4C5SWRyLgRCIARCIARCYLcTsLBikeqp/7SAj2ef6frjt998O/r+0Mrj2OjmFu4tdPAT0Lcs+NP31rLfZ+FqgYc+r050TUEnFsSm6aoVMFe7CNP5JDribbfd1n0b/DMCJzaTlC+Y7IH7Hxg99fRTozffeHN06EggGRYCvOxcbdFurbzpserE1hGEUEFcfB90ZXbOX//6164z8+FsZQFpM+0rm8FNNXRkdtiT/3py9M6773T+eLO/akc2/Y7prIl9oqwHHnig+9Deffe9rs97VJNACcEY+/fv73o9LrOMK32GNdvHja0CwoyPGkduvlIGX50yja3xtBsC5lxjfIkCYthwAoOMdcwFFFkwtaugoEJj3fjW92v1Cb/ie++/1xdI2eEWSo1lY9oYZxPe3R5NWsE/49zrbz5KwYrsPoE1rh/XvzqwKQWfyW9S31Uek17LbhUQrG7GR7VZ2wQGqh8Zx1aeJm8m5V3Xt3HHPyxf412+FoX5aMkLtqp81+I4Kf9Zz7kelj1gTr/izwdBtnqEqqAvsubUU04dnX/B+X3OMpb5DMgWY2cjvmfsBPhuNmDO9UWOGA/Gm3qpjzIFIBsLfCPmT33kMM4lY8P3jB3Xjrm3AkJmHQeb+Z2xaz6w3sDvwgejfhJ+xnDJafXcbKJXvPXftuv/kYA56xP6UFsTMLdZmvl+CBx7AuTdvALmBHaR5XRl8pOeQjaSh2QP2UkO0bvpp2ToWnKI7GJPmJfvu/e+0QcffnAkgGs5d5ibNBrIV/OPtQD9RLcyJ2Gt77A1H9k45Ybrbxhdfc3VazKdVMbwnLmPPckmE8xGBz/YdpO2w5x+s24tmM16ufltllR2lTnKnM9+EaSnreZ4eix90c0+2mYOHU8JmBsnkr9DIARCYHEIJGBucfpiQzUxATPqGL4cQV45h5z3H8cSB5yJmcG3GUfLehWg7FEYlWmHHQY8pYdRzYFEKeCIYoRzBo47kdTR9w+0O4cpFpRGyszB7w6ODv+wEuDAUOUkcghWqaC5eTp1KDfqQXEqBY0jSRvW4pWAufVGSD4Pge0nMM+AOUaNhQxOca+MNjKAY/7GX93Y5ej1LaCLXNqoc3m3B8wZAWQ9OctRzxiugERzF1lukeEXv7hw9Je2KCDA+qSTpy9CrjeilGUuYgxzalgMNFfpL4apnd84VfvC6tnnzHQnsnnCnGHessChHI5xzhHzHKPbXWruNjQPTkq+n4C5SWRyLgRCIARCIARCIATaYwXbrsxvvPnG6Nlnnh09/s/HR59+8mlfZLGLTOl1bmgReEC/s2gl+MVn80xsjY8+bI8M+ubrrtNaYLEAQq+c5JNgT3gUj92O6aH0X8lCmsf+CFa55uprVm7c2MSOOHRptokAr6eferqXsUwBcxjR4yXtdAOMtlp4onfrVwuS/Fr8WzhaWJs1sQ/0jacd8KMpxw0++lQ5bAULXALn6POzjCv585exSwR8sH20RVJGLaJpD3/ZpEW03RAwx17Ghj/QoqbFVLaSa0awnCBJi9OucfaWPpp07Q3HAp+oQKX/PPWf3r+7KWDO2Bbcx+42vt9/7/2+4zt2Z519VverGnO4GnNrLfAPmW71vXote8AcOe2aFfxpzJEtX3z+RUd35llnjva1YGm+iNpdbiNj2Y+/+vKrzu6ee+9ZDS5w3gI/P7n+vPXWW7sP2/lhEmQmYMANpvwX+oA84+MSZG0cTJM/w3zqmqvX4Wfb9b7Pv+2mS3XnQyJPyQZzhaBSPPld6AHmg83WTV8JiDFf2+2IzJB3Aua2q0eTbwhsL4G5Bcw1ddQ86gYVOpzALnOYOZMNYHdg653WDv0trSd/ut757coOsPe2Hc92Y8AcTmSsg+xlJ9WNEuQxvc8NUL/5dXtiy42/6hsWbEVHMR7MgewJ8zEfPJ1TX9HD+ezNIeaSab57dV4rmZPorW580BZry7WzrzzZE+Z969rGyqT2JGBuLcL5LARCIASOLYEEzB1b/psqnYJhYuZMNDFTNCgcPxz+YXR8i5anyHEeU+QY5JSO9RS4zVSAM4CiIeBN+fXohOa+6mXvOW1PN9wpkRSEs886+3+CE/yeQ5Qzh4OM8f7Jx5/0gDl1YchTQEX7cwTIpwez7Wl3lZ4yu4NU3lV/zgM79OFImSqng3IoUOowiVsC5lBMCoFjS2BeAXPkqV3PGML/76H/1+80rTtvGTkckO5w2nfFvlVH/XotlyfjrBZiyEhGIJnCUOLY46Tk/OeknJQstFhAsHggOJmMJLvId0HQAszuuuuu/vu1Fo0s3nHcMhI5SuVBrsmHg/XPf/5z35FU8Np2JOVxEOCrDoLMOCCOP+740bnnndt28Lu41eHO0a9u+NXoxJNWHq8zaz3ciWzRi1OVc8M8o+wTjj+hB7Pd+Osb+92AHt0hOG+SfJ9adnOccNS++967/e6xmnfLeWKsuNPQApsF1Wm72Jm7EzA3lXI+CIEQCIEQCIEQ2OUE6gaFV195dfTv//y738zyXtsFjE7pxsDSp91QZ7cxejXdmI7tBoZN6XdjrMvPQuejhwuA+/KLL3u5/Cp28VeuhR31GCa+DTou3d1OQOwJdfZdwV50d8FeghXGfzvMZ/w9m8JivhsV2QTslspXXvTPLe8wd/qRHeb+dvR3mBu210ITv5bdlNhPdHt6/M/O/Fn3b2HoWMv2GeY36X3ZJh5ZyS5hH3380cedKT8Tnf7mm2/uXI2tSX09Kd/Vc0dsBkGf8mb/GE+1UMdOsFAniNJNrsbspPHAdvS7ZX4k61dffd3sIru4PDF6sC1q9oCYdrUJWqkdpLDin9vQdd3Ykx92J8Rd4BJ/I77yXPgd5k5a2WHO9XzNtddseoc5Y5uPkz3MD0F+WcDlp+WjxdGu+ca33U5qsX917G7wDTnpYAd7tQiM8bQ+8r1lD5jTRv4pvm3jjv/l6za+sTFn/PLqX/bdbMwh0/wEk/B//tnn3T8lyII/Rf9K5bteK2CO/DB32EVN8Ie/yTPlu7nd/GkcuGHc9aGuwwV9/VnHpLpt5zm+MDxL/hnH5kLjjf8MR8EOZKh5cFjvterl964TPjLXiSAH14mAZp+ZW7LD3FoE81kILCYBc/9cdphresSBttb56GOPdv2BfCDf6WrWVu0uS3+zZkhubiSpG5uAn5o8Zi98/sXnPV9rnWwE9oEbsMnoSXMpfVhd7GRGv6EjqhfZJ+jL3G53NvlNS2Sfujz00EN99zybBaiX+Zs+YP2V/qEe5OykekzLe6PnyVn2FR1fMBvfONlufcJ89Lvf/m50w69u6EFmG5Xrk8rWVvOIXQLp4vz35kDl6ze6OF6C2ej6ypqki0/Ku87Jj26jDIc5S5nKoLvKW7+yH80tk/JPwFzRzGsIhEAILB6BBMwtXp9MrRGliFLDEUvBePu/b3eFy6QsCICiwwAWyc7hxwCed1IHzlqORoEJ6qJ8iVJFMRC0J9DEHWAC+YapFDUKGmcCRxbnKOVC3qXEUNIofxQMrxSoM392Zs+P8/S4449bzZYjaJg41IdJmfJWBoOYYkYRZihzzuHG4UBZqsfYTlJoKEEULo4QTlbKnXx9F2/KpWBBd+wlhUAIbA8B1yFH11YfyUrWkAVk2EMPPtQNnu8OftcNVUaUgDILUXaymCQPftK6JnLk5/cWfDjgyAlBueSO35MzZMxOCZjTnpLX2kq+lzG5EQO65C4Hb5eb7RFbHn8hYO6Cn1/QZbvHVF137XU9YG4jef6E+eAPstiY4ETwuFRyniNBMPkvLvxF585gtWOEfmCUb6Q8bbBbgTnvwGsHRv98/J+j115/rRvcpplTTj2lz0+C/v54y8rOqhwdk1IC5iZRybkQCIEQCIEQCIEQWCFA9yxfgwUO/oa6+eT7g9/34DU6tcUr/gEH3Zrefk7bRfiEE1cW/cf19uNGP/oKJvkJ6Hv0NH4WCyD0d+XS6SU38dUhWGFc16N30hUrsM3OeNqhPnwZ7Ilbb7m1P5pvvYAvDIqDhTW2igAYfgt/+0z5swbMWbQTfOfmQXq9AIqzzjxr9Le//a0H02ifeg+TMitN0p+7vkxnbofv4s9f0/vhR/SVxcRXrLVVwBzbwQ2V6uEGTEE+dp3gZ6LDbyWpI98T+88OcPrbzoZ8S2wEQRiCSX59469Hp51+5DGgxs867eh99v2hHqRi3HqssDJ6IFhjot4W0YwFvjLvsZzEczcEzLHnDxx4rS80PtQCQvULv+GeU/eMrrv+utUAI4vU6yXs/Z49KHDVGMeevw/fRQ2YY7e6ptXR+PjFz398JCtbeVyOrXUd+sxY0/66sZocY8fi6joXiGjskUkC6OQ/XsY01sXYgrtyjFHJI0cFm06Ta+TgsgfMYUJ+4c7n8dKLL3U+5LQALHLrrrvv6sw3Ir/4L+wkajFdkIR5hfw3P3Xmbf5bb4c5ARbvvN0e59p2pyOP+KPIe9eCcbBv374ezGceOaMFTevDqlvJpeH4KDnltc7X+z6/riMfe8U3+I8xe/D7g52na+SN11eC2jyRRv174Mr+P408BULw3LRghCqurhs6xLfffdvn97qhU1Bh9xm160c+CZgrankNgZ1DgAyeR8AcWUH3t87wysuvjP779n+7Lm9d9YLzLxjdfsft/aZ6Nsi4HfA/tJraTGaRL+SxQDfBagKtBFR/f+j7HuC2kwLm6FkO87pUawOb0SPIXIzdYETnx4JPXXA5Xf/6664fXXnVlT3v/2G6iROHmj5OD6SLW7e2VuDmFXW21mwOFWhoHVh/kv81z61VjDFivH3w/gejF158oc/5bCrnfCZ/c76APGX0NfGma03KOwFza5HOZyEQAiFwbAkkYO7Y8t9U6ZwTFazmjit3xH719Vd9YqbEcXzc8sdb+l0P/hZEN+9ECeh3ODTDtd/h8O8nV3fzYSwLLqHgcAozxMd38/F7B+eQtlTE//DOMQoXhYUio03ulKBoUFJta3/6aad3pcr3lDlUPlad4EcCWCh0X371ZVdM1ZuCxujmuPGeosqB1J2jTTmjNJ2659RVh8GQXwLmhjTyPgSODYF5BsxZLLIgxxHpfRlRAmD7DnNNHliIY/hMTYzhJmfI548+/qjLFo56hqDHOZExZNVOCpgjoxnCDPoPP/iwLyKRye7Ot5BhAWsodyexYTRy7AqM/tcT/xq98uor/U53AXOXXHpJX4xiSApi67K8OWBnTRygFkncQY09Ga8v9Asn8DnnntPv3uMgvuTiS0ann3F6n2OUN7EdRxwcxoPHoehLBrGxYq7S1yedeFKfo3qAeBsntVPEMJh72B6/4RTgHJCP8SZx/JrnBFoL0jQfYb2ZxAmuryyocvCY6zhncDV3Whi58847u4PcuaQQCIEQCIEQCIEQWEQCdFDBB/TPp595evT8c8+P3n7n7f6IO0EEbH96uZ3HLLC42c0it2ATjx782Rk/67Z86e70PP9J434CZdEflffpZ5/2BRXBVHS0d995d/TNt02Xanor/4YgKrv4K8PiyjDRd/k2+GeeefqZXl+6qTq46Y9PhF1B/60nAEzSP9VHsEDVSbDEqwde7TcY2pWBXirJd7MBc/Ry+uyT/3qyB1D0m1iaTihADrO77767L1bxv5QPSX0cfXGs6cZ0yK6zD25cVB86J/vMDkjqz+aha/PZ0IsntdXvJPlL9GtBIWyyvvNF078FcLDJ7OohOMIO3esuUvbcpv+jvO7LamwFuCjTLobGBv2bv8lNkL/+za/7uDr3nHM7764/TzFV+gJiW5wTOPn6G693u0cb7HamPHXGxCLa729uT2Jo48C4mMYFT3p97bBkoc8536+nMNhhiX4/LY9pBNSnAgbZDXxixpv22TWlAvosXG6V9bQ6OO+aef2113vAnD43xrRFH1x9zdWj2/502+jSyy7twbBrtVF7jO1vv/m28xYkV7ua41Z5LsoOc+rbF3GbnBCwacGUeML6/PPOH91x5x29D/RzyTC/cdR1qE0+G7c5jUP+4RdferE/whkHMszv5M8mFKhpgZ5/w5g8ue1st9bYbpdFX/Rnx7I3yTll2CFHUCx5dv4F5/frdFI/KXs3BMwZz26sszOqG8KNR31UO8x5OoDgLvJ2rSRYTlBXvz6OBG7rR/Iff8n8s17AHPnhsbz3tt3p/L6vGbS81Umf8Y30G9KbvCf/BSyMB8xV0LN+NUYEXro++S7Idm3x3tgav0l+rTau91kf762ufEger2xO5bc3vtVDueYDN13y7VgfwEQ9x8egvLp8buNQIPv7H7zf/TDk/ltvvtXnLXqF72hbAubW6518HgKLR4AOMI+AOfOdfB7+x8M98JnvlnwgX9yU42bvW25d2WGOvJiWSoaVbWDX4eeefa77qunx5lN13mk7zPGLs5fMLxI7jO/6tPY0sC57p+jIvttlcbMP6GjPPf9c17/p3njQPcleu/fR/za0YYFM10hkunlTf9oBjt5L32IPmrf63NwC2jyJRtl0ffNY14em5CtPfWcucUOCDRLo+mxUn5kLrT1YHzBO6FxsqvF5qbJPwFyRyGsIhEAILB6BBMwtXp9MrBEFg8LlbihGry3fPSbE3Vcm/bPPOXu0rzljOeEsvM/bcB1WSj0Egjz2+GM9OMFE35146tECKi686MK+Tfq117Tntf9s8vPaKUYcqxwK2kRx4nzhsHMnhiQgwe85Sik0nEheGcU9ILB9zujvik1zZktdEWu/d0cBB42DA1wdOc3cCUBx4tBVPkeTnetsld/vMGh3E3tswSQF2Pezw1zHnH9C4JgRcB26luexwxznm7vHHvp/D3UjigHISLLTAmdyd8a1nQC6M/CEtnPYwAjshnCTy4xdOxNYJGEsMcbkaZHEHVN9ca858HZSwBy5ibNdTC2ukZOck4w+zgJt4VQlf4uJechiEy5+z6D+4MMP+i4WT7Xd5d57/72ep99x8pqnLBxxHq9lmG5koPU7sVuZ5hFzCgerhcC+ANTmhr4T3OVtx9J9l4+u2HdFfyQsw5ycN1cOjVjGrvnDnKZPzR3mKMa2x7Ka/yQ7XjCuLSiZPyzUerTstJSAuWlkcj4EQiAEQiAEQiAEfiRAZxJwQPeih7708kvdfqeb0jEtatFB7QBm8Z+vQKATP4G/6al0Mt/pRwvskLqe2hbi5VEH3V8QyIcffdiD9Oh6Aqo8Do9OK3jsogsv6gFzt+6/tec/HvRQNy64aePpp57u9aVHCrYr/dkuBvRQeq/FN/pw6b+lO2s3PdNiDFtH8BW7gu9C29kcvqtNmwmYq/wPvHqgL/A82W54FIxH/z3l5FN6HT2lwCKPmzj4QtSt90Pz2agTPV+9Mdcmv5UvvVldBcU4/IatcO55547OO/e8HrxI35af33Sdu/Vf2QzaxC8keOHlV17uwWZuqsSUHwiv22+/vQdG6Gdt32rSN+rMpyZ4yRgz3rRF25TJVhGwpe/5nSqgpNf/SAV83yE//cOutHgmwEMA4BdfftHra0GRzcC2vOl3N/W+w3hakt+yB8yx0diZrhlPzjAGsGSXCerqjyj75VX9RifXcl0rxazGtPHGJ+qatQu4m6YsTruG3DTre8bfIgTMqbs2CtZ59plne0CkceJa0D51LPtYQJtrk5+z+xramGBbG39kh0VYY3Kci2uVvDAGXecCj8k4ZRiH5KOgyCuvuHJ03vnn/STAWIBUu8x76td2W9zuZbegO/nKx7Vpkdv41Fc//8XPe4Cp/NxQN14fcnbZA+a00TVrFxs+D4voeGEhoMB4FvTrFX/nh3IE6xrP5BDftXFBlggqtcsR3sqR+KXWC5hzfbkmHv77w13OCRRzjfXroQVJykNQs0AHMs88pz8ldfNfr2fzAZnHjEOf9zmgyUPjjy9FcIBXefl8HvK5V6LNEW++9ebo5Zde7oEV5kFtUn/lCKgQMCy4wg6z/DJ2yfNZsfVd8kHf+K1xKwDGvCog/rPPPxsd/O5gX3twXZIT5DTZUzcxutaSQiAEFpuAeYq9QPZaI+DDJQecN6+6pungDnLNMTE1uUM+CLAS4Ma3zJdPDpB1N//+5u77JXP83fOZsD7w3bffdblDzzRnHmhPKqHj2gFWndxQ73WnBMyRpW6GMZdrg/lJopPTIwSb9fWSpqv1+WOCrm/+sTbCV0/vdjM6PQ0L9ttll1/WA+b2NZ+9gO6S4xP7aQMn1ZnOpM7WcOn5dEQyX7JJinnEDTLmEno/XX/a+oA5wjxCD7J7q/lZG6yLO2e9xJgwh/zmt7/p6/JsUXratJSAuWlkcj4EQiAEjj2BBMwd+z7YUA0oVJ99+tno8X8+3h0g3bHX7j4ro1EgQwV8CULoCsZAedtQIRv8EsOTE+DfT/67O7lE1wtAozAyVDlw77zjzv7oDPWaZGh2paspHRRIDlsBDl4ZsoIAOQS0gdHt95QXgW2UMvlz1jLYOZIoNX3Ho2bY+x1WFDKOVnVVN7sNCcijPPvMdyg9FCV5chzZIeiaq69ZdYiO4+AITcDcOJX8HQJHl4DrkPG51YA5tfa4H/LLHfyc1/Kt4DAGFFnKwOYUtLMaZ2FPzZhmPJMjZCFDSYBWGcLkjDt5e+Bu+560UwLmyGaykyxm0NpJVCKD9168d7T3or3dENQeMpjcL8NYsHM3JpsxjCUng7nK7nIMzOar74tfFozcQcYwtahWzvle0Kz/tLw//uTjbhRzcghys3BVRrF5w0Kq3SmU61Gt5hTt0q99zmx5lGO1OzjaoimnAOequaoczZyxl192+Yqzuu1AYReAYjGt+vLNDnPT6OR8CIRACIRACIRACKwQoIvSGQWyWezwSDl6up3fLH7Tv6VaxKejCwZZDZhrgVYWb/gQPDLQK/3egs+4n8BCh0ULZfEZfP3N110XrAV0+u6ll1y6YhP87rfdDyHvYfJd9aI3C5j75xP/7Aso2sCXIQ+6ooCJfVfs6zv7CDKgT0ra6/eCIuiudE7ttiilTuXfsGsdXZvuvZmAOfVQBl+LRwfR7enmgiLUTz34QepRfXbRw+yLz9vOe6189aAnC/ZgH3lcpL/dsEK/pevbMUJwB33fYpdFI/q2hUV1lZ+y1F1d6rGD7Dpl9JtT2g1H7DIctFMeAof+tL8Fm7TFNDp719c7tdn/0V/sN3o5HgJdjAGBV33Rq/WX3ZfYDHam7gEZ7eZU/Vh2D6Y1lviZBCmxGf77Vjvaq3GkHGPF7+1QaEcktiXeqzblhGbUWFjmHebYmmwtNwHfd+99PeBQu/Wv69h1wrf5y6t+uXoza/V9jR3BRYISBb+ww40dN8cag90PeMSnaOwtSsCcsS/4x86ZDzz4QL/Wy+9pfJd9ee111/ZgNnU3Vtm4bHPXkOCkfftW5IixVFwMpRqTAo0EhLquyBLnjW35seVdx/1abmNTcGv5VX1HMnbZvRacyUbXh6BisomMct3yw7pGb7r5pv74MXnIf5i0TfkeRedas8jsGvI91xM/y1/+8pfeP9p/rJL2knN2gu833jXZgBneZIHr127tPVCi1XuY9KnxVvKfv74CrvnGBSEL1LAwbw5wU7h8u/+jyRFlK8v4J0vMc8YybgIe7ThHzmIpkSnrBcz1/NoNnQJItOf5F57vfnDnK+kHR5fLLShudRytDIH+dw+da2PCd4w1wXX6yfxFrjsu3ntx9xHpT3751XyqoBlfzQNYCPqza6L+wUAAn7HGh2+3TuNYYLO6VLC3InGVh8BBvjpy2bVn7GJdQSv8deabBMzN2FH5WQgcYwLk2lwC5lo7yF2yXDD/C8+/0OUEmWbOosPxY7v5xQ0hXd4dmTPNA2wMMopOS7+hc9Mz5cnW+O7gdyN6vPqSPzslYI6MNMexxez8TeciXwXLaQM9mX3ADqvAeXPLqq7fmJDD9Ad6wIG2RkCPkKd5UMAaG8SN6OwGc9w85hF1tC6jvNrhz9/kv3nEnKXe7Az6vvnduk+3NVq/qoM2aD99yBykDfpVILtdXJ3X5+YktpFNY2y64HXSzR7DSyUBc0MaeR8CIRACi0UgAXOL1R9Ta8OwMzl7dMKLL7zYdy+ibFEwKCacK5wVDHFKy3YmCgElz50BHDEMcU5C6cQTmiHd7pil7Nz4qxv7XV91F/J4nSgf3QHUnDCcJxSPepQe47acwl1BbY4FCinlhTLC+eDvOhjxvtfr1rhQXCiklBsKkeAVu0A5p1yOCoqYRwgwsiloFD13SXYnZlPwxpM+SMDcOJX8HQJHl4DrkAE6j4A58ofBxyDmSHz1lVd7Yxg3jCWygUzlZPe4inrENOOLgUe21EIb+Sw/Dk3yyCvDkDNfEDDHImOMU5hBJv9JSb7kKYctg5KBrTxGOqf0jTfeOPJYD78n/6YljDhIn3zyyS635NFlacuH09adswxSbRumLpebc5xM9ohTi2rkKCaC28hMgdB4kMfkZZe/zagkf7VbH3FM1q4YHOyMZnddXXzJxd3pbBcLbTA/zCvpD3NH7RbBOObkVx+PnFGW9jLuLcYo3yNFxgPmLLT0nUYaQ4ZxD4BsfStpMwft1b+8us9v+lKeNQdNawsuCZibRifnQyAEQiAEQiAEQuCnBOjQ9Do+AjsD0E3plxajBBHQOyW6KD2v/AS1G5jzFcRBB6YL18IHfZs/g8+A3ufwvgLS6NgCJQQ7WMypg/9AvsNEd5aXBXkLbI88+khfSGEX8NXIix1A97QjsYCyCphTL79XvrayAdgP3ivH4Ts+d6NPPV1AUJsb/tgVFnm0fc3UAjPkTTf++8N/7wuC6izZWUoe9Hu2j12R2B1snNrtjh5Pf77h+hu6/qtNeAqoeebZZ3rQmRuHBDBhbhGKvoyfQAbc/EZ7ej8csRm0U3vZDH3Xn7aYZTcsebDBBJgJVlG/YrFmOzfwId70cvq9wBR2Ay52jTAGBA2pr10zBBN5ykE97rdshupz37foxV7wamziVjaXeguS0QY+Oox95piWyp5Z5oA5Yw8nturDDz/cA2PYbBLbzDWyb9++VZu5rmPcXfdkA/vOLlHY98Ci1hc+N848MlR/6Gc2Glvt97///ejuu+/uvlP9Oy1ZeMdesJlr2nh1PbjG7HZotzD5qdNmk/oJ6hNkev8D93fbUDuMB/W2U5abutiX5XvoQWstYM7vjD/2q53oyCRyxLVZqfi41l1T+BrbbhwmM7WFPS4PY5uMc7CRlV/jUj6dcdulz29do3Y+02fkmqBFO0CSF+riqD6qunjVV3wZyxwwp53GGd5kyWOPPtZ31tdvxp4+4v/ogQVNBuBUuweW76RY89/UIV+BufzXmJOV+s/YXS9gTuCGOc4jvdVJULDr5OvmDz+p+XTKb1H93V9bWWSvvnf04gfvfcfvXAvqYMy4JrTLterVWBqOo57JjP+ov/mHP0oAi2umfG3WHNz4bhybZ8ytPWCu1amuB6zIeDKZbKmx24M6WjvwFCQ93ImSry07zM3YYflZCBwjAvSJeQXMmevM++S4R0Kbn61LkivkAzlHnguYs4kHWURnVAfzAFku8LnrhM1e8Xt2QCW6Cr2F7JeXADwyh747TcdVJ3YQvzzfPh3R78ljstfNBTfddFPPr8oZfyUP1dFc7OZy9hR5ql0Cx9zUw55QD3K15gb50HPJz3898a/Rv57818pN6a2t5H/ZDvRl+VgvMUdoizoWF3qE9RI3N9BltMH33ABAP6br17qLds0ltWlM3d1wYHyYR/jjle+8tpuvrG/Y5Y6NM35DvblQG/SrPhXoZ53A3GLHPJx6YPwFP++7UmuHm6zocT4bchxvUwLmxonk7xAIgRBYHAIJmFucvlizJpwelBoBcx6nUQYzZcLE7o7gW2+9dWIAxJoZz/BhGdGcAhxJnFwUOAqRROlQH8qfwDnG6zSlR16MYUYsxdTd491heiT4xGeVL4dBT03hpHiUIua18i9F0O8oNpw5/qtUShFlTr04MDmb9jVFk2N3LccXZbcC5gQJclwrT56cAxS8P/zhD91hVOXlNQRCYL4EXIfjAXOudTLBdcjY++tf/7p6B/9aRorfMUD7HcEt+Nf17ZzrmkypxS1OcYYj+eB69x1GHiNTfchjB8chuUKWMJjJbE46skJeAubUbSMBc/fdd1+vlztryUlyziKLYLfNBsxxkspDko9gtWkBc9rOICbbyTl3/mur837Lyah9XhnJ5L3zxYXxiYl2Myr9TYaTuerPiOTkvfLKK/vvSnbPY5RoI+6McQtgZRSbP/WZz9VT3fWnOlWf1jjpjo5W51rw0pb6Hecwp4A5Qzu0Qb9isF6S77SAOXlyctTjPzaS37A8853xRz/gBDGmywlgodL85q5487L2J4VACIRACIRACITATiBAf7PL/tvvvN31an4CujG9h35Nx+w67o/mfl+8ol866FQWbug/dFnfpzfRy/zttw56oMN36bf0O4tZdHZ6H12N7jhNj5KXOtH9H3nkkX4ToIWVXkbzRcibnVA6qNe+ONZW0pT/zbcrdgUdTv18v3Zo0098JfK2aOP7Z7cdzzYVMNfyoNvSkemKbnqk4wogVJZFLvWr4Bl1Y+ewCSwM0fUFJdghms6q/trmM/YCW0O/KMN5OjOObAbc/N0DRdqrPih9W/5+41Cf0tPVgy9JG3FQnnrOK2FobLEp+aDsnPFK2+EO/74Y2XYIx0C92XZsBoFcFkbVw++1g76t39W//66d0wZtNYbYCg46+EaDrOQjTzeHWSC1qOacxM6V1x133NHH5WaZqHcF9rAb9Jm81Vl92TfsRMFl2r9dyRgxBvj+jJ0XX3yx20mYapOyjTfBMPjrf33hd/rNWHEYPw5tMNbqMM4PtEAx35Gf87MEzLHdlGn8G88C5m677bYN9+Ukfq4ZvlPXYe1w73vqSWZpq/oqDwdyThuNTX/7TD1cG+TSpH7CQzkC5tj0yqsxqj3DMVrXpzaqg/+MEwF2ypYPWSBP59XR+NY3xmNdp86N10V/jgfMKYNc1k6Bdou0wxz/C2ZkgnGmrWTRWjvM6TtMXafGnGAEfgj+BwkTfYmX/ir/g/PKwBVnjKuPXBtseH3tM3lXnbAjA/iE+P/lPUz6SL5kE/YCBQRZvNd2w1GG8ve0YDNjQPL9etVf2uKcV4e8nPda53zu99ogaHJf8zW4qZNvnRzBbcupVcu8WPKKn468JhvLx28sGUfaVFydk9TV9a/NXqsfzOuY9cCJ5qvCVnvIlwTMbbnXkkEIHHUCZFMFzJlXyQiJ3KJ30WkcZCa5VbJvUkVr7nzssce6H5weT4aQOXRkgeJ0k1ofKD84vZ3MJWvMmWS5epHXfOB+SzbRdchln5FF5OZGA+bUib6kjtqmHdYXzE/WXeU3LZUsx8ccZa4yt8iDDPXbaQFzvscGscmAg8x0Tp7kLh3CIR/tJUsd5rHS9ekv5Dc+WJkjMKz1DRuYmCP9bp6p2k0nZEPhb34WjI2jnf7UWf+UrkmXwaXPJW0e0lf0IW3Qr+pf/M0l2q4f6PrWOeQjv/USjuYhujgbxDiTLzby0a/WlzHabNJubbYecu+993Z9RDskesUf//jHrrvhvtrWzRaS74dACITAEhNIwNyCd24ZsCZ2xjtjl2OrHKkm8yuvuLJv+8p5uVFH3FabrV41wT/44IPdEUMxNTEzRhn4Jt/9+/d3w5PhPC35jfZQLClRjGJKpPzlSXGgkFFATfK+T3mpid3rMPncoY4cPnZGKiWIQqMu5bBgFKsrbpSztYx7yh0FtYLmSklUF/lRaig0+iEpBEJgewi4DskJcodx4TokF1yHHJqMvb/97W8bCpgjI/yeMVF33zIGyR3GnTxLdpC1ZIRzyvO5u83IF3cJ21mTLCFTGI7kWclrxpVzV115Va/bpZets8Nce+TKvffd23eYI+/JM/VgLDH2777r7tGZZ62/wxzDS8AbuVVyk1EvD0F3FqAYTMPke5iQwZz4jCz1r8UIRpxEzsvL4T0uPiPLO5sjshozxnPdkceQ1A7llgwflr/V9+pvztCPDFAGsUUCfWrsMIxrXlXvqoO5QrKziDzKoPQ5g1cbGL+166D22HFC/4/PQZPagIm5jSPefM7pLRlXnOc333Tz6K677+qLce5S20zCXPs8suTBhx5cXewznxmT+5oTWzBeGcSbyTvfDYEQCIEQCIEQCIFjRYDORoeih/IN0E/p7YIR6KflJ+gLH00vP9QCnSR6qWNVz2vv5eWg5zmk0mfpY/wE9D06Kt2Mrlc6q89K3+0/HPun6ulmEQEqB9piVAWpqKM2qA/dTD5e/T1M6kqvZjPwTQjWUw96ubzo8zjQt+367LE/f/3LX/tjgNRvvYQRfdGCF78SG8Pf9EhlV/nFrDj5m/+ETinAxQKfOkp0a/qt9nplo6kjv402V3vLZmAXqD89u2wG36uy6drKYaOwGdh1+sTv5520r5hYSGMzYGInPzvl+azaoH52EjliLvSq1FjSHnnpV+3Dhk2q7wRbCiqqhUA81ku9Tp+tBMw9+tijfdw7J1XAnBth9jX9fnwMrZe3OhtL7N7a2c340j6LfoKwLN5ud8BcsTNe1Ie9ye50jasPpvocT3aYV7YwzjV25IG5sY85G81CtvcWz10vrkd9qD/46SwIG0/ym5bsZGWHOfbaeMCcQEUBc2QEmTFLcm2oF58iWWHckRGuB8kY0XY7P+pfMk1bJW3rdmNbHOd7JJ8wGE/4aTd5yeZUhoV/9nG36dtOYxaK5e/3+t8xTFjLR9k+U6eyibXfdWo8OtRjkkzze9cUv009iq3y0hbyhN+GvNuIDBvWb57vtVWfWFjmP9Hv6q5Nxo4dfP5855+7rFXv8YSRcYuxPjWGjGfyUb/ibPwaMw7jT97GsvHg9/Wd8jngiotrhHwis8kB/cWm58/hax9yqz7Tz9pDJjuqPSv+pAvb+F3ZHV87amz5rTFTfa5uda2po0MbjdXhnKa+GJEbfXedNm9pn/ZsNQnotlukMYyrdhjT5i11UT8cMfE6PoadK+5ksDFrXvc91x/ZQ5fAQJ19ph11E+N2zDtbZZLfh0AI/JQAOWDOd02ba8gHyXlzlWvasZGAOfKPzDb/05MONN2WXDdvkpHkGllhLnSQPc6RncpzOOc79Fk6Cd3EOfk++uijPV/vyXj6tPXD9XaYM4cLmLO+UDoS+SZQy/xEZ9tIwBw+bsTQLnKcLDSnedqWubjvMNfWGYbym+zH1JxmbmODmWNKb8GMfuxxrGQmmesVC1yGh/PmQPomPV+Q9b6my5K9mGnTPBPZ7jAXC3bD0ZxoHqk1Du1TR2U7MBm23+/1vfZ4rTnF3FvzinZoj/7eaDvou+pCF3cTE57yVwf6nX4V2Ea/2mySj36id91zzz19ntNGyXwtX2PPnD3e3s2Wle+HQAiEwDISSMDcgvdqTfAmUAdDkXJiAqR0MVBNcg6GM6Vt3krGNEQmdAa8u6gplN5TligXlC5KA8cUxY0isV6iNFL+5MPAp0BQYgSeeKWkUWYoKsVl+Cp/Zddh4neUY4fCyungVX3qzhB1pbj57lpJezlQKB0ODgvt9TuKOGWVIk7xSAqBENgeAq5DMlCwnLuj6jp03bu+GSoWEdYyOoc1I0/kyVhhFHPqMooZVeQNWesaJ1fJXO/JnSZp+mNRPDrFY2Ns433hRSsLa4wkcowxKl/vyWqLJnfecedo78XtcUdTgogPfndw9P4H7/ddBch7Tjx1IKM4yBlOt/3ptv57u0FMSxhZfDBvcLDKAyNtkIfHyTC+yMNh8j3GFHmLA8ek+svP4Tw57XsSLiVzncOmzjOGyUaylkOAs4AxjAVG25XUw7hgFDOGtcF8ov7mEvWvvh3WWZ/aOUIfO9SfIawNDsYlZuY088aeU9udYxv0BWNqbFmA4dBhwEo4mI84cPSrwEtG+GaSudN4ffyxx0f/eOQffc7UPm0wZtRXfwvY1L6kEAiBEAiBEAiBENhJBOjrpdvR6eioXmsnB68+pxOVbreWn4DuSk+ih9FL6cMWcM4686z+qEL6E7vCOd/ZiH9FeWwKdbPAbzGevqeeztMF+Q7Ur2+A31QydVAXuh+dk66pbPomm0YdtM1ClUC32rVCffl+7rj9jv60Af6O9ZKy8aGHOgRh8LnQ7dVptW4tI3XSZvYHnZeuSoe3iMPnoa7qTt+kW2sjnVvbS99W12pz8ZansqqP6rwytKnare3VB2VrrNe+WT5XD2MLB3XXBu+rDdj43FF17nZg41N1H3LSBnaPo/pRH7Ip8NpIwvSLz78YCZazOKouxrbycMF//637R5dcekk/t5E86zvqbmxa9GSnsjPlrW76QP/aMUwwDptxO5O6sMlqfBuP7Oay1YxHu7p4DKm6jNcHU2yLNTvT364lNrSbxthe2ue84Kzbb7u9j921bk5SDwvTrhG/V09lu8bs6MW/qZ+VP0vSLnYb+XCgLVorRx8ba66XGmdDGUEGKb+uQzuMsa39bfxNSupdi8TkkHHtKLueTCIPirPv+08queSVDCAju3xs7cZSuQ7s+TR8Pilpi7L5bfSJNssTT7/hN+UbcSPgRmTYpDLmcU49yTC7OvIHkQOueWz5AMha15wnu0xrK46CKsiRCizA2vguOaiuNZbljbnf6d9afCc3BHY5lIWfMWInT/2Jn2DieprLkJv+VAfXkTq41o0t5Ruz5IdxY56pcaMOEgZVT+e039/yJJNcR9ri+lQn732mPuqgL9XJod7ObzUVH3OJMvl0tMl759QJv9VrphVITpZsNjZLJpu/tN/f5I4gBXJQXytHH/hOAua22mv5fQgcXQJkVQUVC0gzl0pkg+udTOL/tmMXuUc+TEtkgYNuRNaQo/QAc6c5kyz0Oflmbqz8nJOvv82Rpf+R495Lfk/uWEv0nrxRJ7Jz3759q3mN142sUxc6Dd++OmibOlhfqABq8m1aIiP9xlzsZgJzMTkoD3PP3ov2ju64845eD3JzyAhfstZ8Zn7DBg9zgcNcUHK4ZC8OyizZrJxiY/6hO6gvP7W/x8uc1o5Zz6uf+azaQO6XLjSuf1Wdq0+HbaJfDnWhmqv1sX7fjK6vXGPLmpZ1G/2hbKzo+fqV3lrjZzNtl4++onfZVVB/6Uep9GE3yFgzU96wvzdTTr4bAiEQAstKIAFzC96zJmmTO8NdUBrlhHIllQPTRCo4zUTqeSU/+AAAQABJREFU3NFK6sVop/CJ1Fe3moTVgxNF3RjlG5nktdXB8KaQUXQdFESTPScGJaIURAqP8kqh0W5KmEP5nBHeDw3lUsac4xSkHGxUQVCnA82p5aCQq2cpNPKj8HFecWAkhUAIbA+Bkg2MRYaF65AsouS7Dskdji7Gl+t/PeWfzHEdkzUMCQ5Ghgvjj6yt/H1HXuQK+cIYYlyWESxgjvHxszN/1vNjgJGN8iK7fN8ikLqRh347KZFpyv7Pv9tdr2++0Z2c6qhc+V+x74rRr3/z6/5756Yl7SGnGPlkljwkv9nXDHIBWhzA5OB4KibkrXwYc9rAOalu5D4u6loyWB5D+au9nKXaSjZ6VZ7zG+mX8Tpt9m/95eA0tuX6Rx9/1I1i85Q2aVvV36ukf2vuwEkfMYirj72vuWOzbTBGjS/O7grgU6byOHIYq/oEn7X61W/Gk/prj/EmqFubzY/mNmNG/TliLt57cQLmxuHl7xAIgRAIgRAIgYUnQDd10KfooHQ5izQOOreD/sNOGOp3vk8flOhF9LfS9byn69HD2A/8BGf+bEXXo/PZLaF8BRsFpDw6WNWLXVHBBUM/hjoN66Ie5U+gt1lMoqNbnJGXxS03wHgvf5/ZgY5dof4W+9dLxbB8K/w36kY/5V/BVb20mS6qbHpp6ZIVoKM8DOnNwz6RL5tBfnVUvuo83hdVhnKqD5Sh7cp03nfWs+XWa/d6n2uDMWPsaAObx3jif9IebVB/h+9ph75TNxwc6srOMY7Ye95Xv/gephtN+sFY0d/8f8a6czjIX2CYBWDjZLNstFXfsEXYDdpXeQt6MaYs1Amc08btTlg61IONxnbF3Tjv3NuNZLVrpB3XOvN2w5jrpcal8YKLa9Z1ID/XnYVCdji71bXVF5V//ZvRKaee0vtsWtsspGKDkXGAGRbyrkX3ujan5bHeeeNIvVx/6lqLt/q9xlnJK2OrbFIL7A5t1l7X51pjy/WsLPkaR8Y3JsW45GWVqa3yM6ZLPilfWSUjjW082fmY+N60Oijf9cQ+LVlozOpH443flAyrvNbjtl2fq6cxV7uO6XfjSLu0la/dNeeaHgaoDeuDnd8Yt8ayAAeHvEr24+x7xaDkhzKMZz4TfYuxPle+35PVZIEx4/fkpOvUTfOrsr+5esyBb/33rS47MMdeneRn/PMHkBv6c7zP5GusYFFt8Vvn1Juc4Acydlwf2oaZ7+jPCuITVKot85IfVS/lkxPKNx96LZ+U+qlHcVUfRwVmuGZq/Oozv73//vs7U9egMnD0PeMxO8wNR3beh8BiE3Dtm0MPNJ83uUfeSs6bq8lKx77mAyf3yIn1Ejlec7Q502H+LD9ryUn5kXXmSfOxuYwOSE/rNkWTteYMMkp+5hj+ee/JZXUiO8n+aXXzXTLLHOC3ZKHytYPMMj/xJZs7pqWS6bUJizlKG+WhfuTj7377u74BgLlnyMhvHdpAZzAPmFtKz/e+5givxUZdhnoEGWtNwGEeUmbNFfOaL9Zqv8/MZ9pgLnGoOxZecVV/3zF2HPrEXFJztT6ueQVv9S9dzPd8f6OJTmZ+N5+a4/UHdvIwfujj1peNk80m/aV99C5jTp9pl1T68OWXreyA3eu8/iWx2Srk+yEQAiGwowkkYG7Bu6+UEwogY46C5jGAozahmZBN2JSkoVF9tJpUSpN6UTAokJQKyaRbwRLqRonYaKIkyIfCQmmg8DLGKRSUG0qMz31WyqK6SJg4KKzl6KEAUpQ5wE8/Y8WxU59vtE6+p1wKjbY6KBzKpUzqB20sB/dm8s13QyAENk7AdUgOlEzs1+Hhdv03mcgIc60LlmO8bMZgIUvk6xpnXJC1Q+cmmeNaJzuU4yBbykhSLjngczLsyy++HL3z7jurhjV5REZwcPqu701KylEuJz3DpgdIt+Zpi93HGJYMKOWv1b4yBBmy2nTkhvH+G3dHy4MxLJ9pqeSw+jDU1ae46IehUSmPkr3FCA98zAHlCGcMDw3waWXP67x6mitwVPdi6pyxU+1QHp766eST2gLdSW1+PaUFRZ6+spCq77DSNu3cbBv6mGgMOZd7Hb5cCXzH49Q9K49l1Sfy3qzDQD9ph7mYM0fbarzqY0axuwb1heskKQRCIARCIARCIAR2IoHyjdB1HPRdPgK+Au+H+h3diK5HB5PoVw66HH2PzkVXpSvR8+jndL3S9zar6ymj6kdHpj/Twel99Oiqn3qpU9VFfdSDrlx+E+/VkW7qd3wPbB950vHU0XfZFewRbdloUjf1qbrx4VTd5K1eylaGeuEiUER5ylJf9So+1WZ5qB99W576RFurr/SF/CVlFOdTTm67/DUfDX3VQlTZDMMyNtq2Wb+nDeqmrmUzlP9JGzAru8f38MaoDrYDTm6cwqpsrFnaIH9l6W+LpLh6NCEdHhv50+vxmkWvL3uI3fBVs0fq0Zzaoo/ZmhY01X27U40d3GtR2Nip6+W7b9sNWodWFvnUx9gzbsq/x7bBHRefGVeHDx0effzJSmCNa0/exjE/HVtLO31vWtLv2Pd6NHteHWuXO4vafK/qsFYe0/Ku8/J0PVRfuL4/+/Sz0Vdfr9ys5zPtLRmlvcaU/tHmug7Voa7Dynv8tca2MWUs8XNoW90YqCyMjHPflWeN63ote3LI2mfrjW/5GWNvv/N2L5esPq79h6ffW3Amw/DcjAwbb+NW/1bPb9pjagWb4WP8kdHap27kkr7HXb2nJfmUzY9x79cjc1PJWN/RZ/IZjmfXs/7FWDk1vtRFnVyv+snvjQHXqcCM4kZufPD+B6Nnnn1mNdhWn8vriitWHttn58iSr+PjRr7jh7Y4J2+HMaNNFuAFpFqMN27UVdCGgFKPLdaOqtc0Vps5rw7qgiEe/Fv4mm+00WEcaxOmysbXvO7AS7vJAfXlL0nA3GZ6IN8NgcUlQDaYtz/68KM+15AH5pnDPxzu8pusPP+88/vu0WTEuOyb1DJ51hxdstx8TQaSwz7zHbKPvCFb6iD/zBlkb+kKvq9efPzklzzMq+rmoNdNq5vvKps+6Ldksd13WxO7fKsALnPHtNRle/sN2edpNniVzCQr/dZcXD7vSflor7LVB4duezW9xet3B1fksM98x3e1B5ua57DQzpLLpSdvpD8m1WeWczWfVRtw1b/aYG4YHtox1Ie0w2GudmiHPq8+3mw7aj6r9XT9oX7ywck4orfitNkkH/Mj3cGY816fSNY4Lvj5BaPzzl0JWOz1zhrBZhHn+yEQAktOIAFzO6CDTXZlYK9OolTA9mg1E3gZ7gz6o53UjbJB+aNcdMXtSCU4Qkqp8DpLkj9FRf4O7zHwWuV673tSGcfK836o4HDGOkexnCUpo+rhdbytFE3K0qxtnaVO+U0I7DYCFH1HycR+7a9c/t2paTeIkombNVqwJFfGD7KNnCFjXd8Mo6GT03uyZrW8Vp8uo75bkY1DY9RvyaFp8rpknvZ5PKt8empySxlVvt+vlrfyjZ/8i9FQXlXAHPknD4zkN60ew8yGzIdGpPdYlQwuJvL33kEmOtR1rfoOy9uO99qgHzrXVm/vHdUGZVb/qveJJxxZCGu7GKi/c1tJP+lX48nCV0uYmCuVYWzMwkneDm1zaKu/OYr0cQX/9TbMOP9tpe35bQiEQAiEQAiEQAjMm0DpP6Xvuqmw63ctwKb0VX+XLt11oiOLN6Xrdd9B0/Xormvp57PUXf3KplBH76uudLVe9hFdWfmO0jmH+rnvak9f8GjBQP4uPwvdUbs2q2PLQ11Kd/RaOr38qj5VJ/Xyfr1y5It5tXeoa/e+aZ/JQxmVp2Czk470gTJ8dixTtQEP3KsN7DKLgv42furQj44hs/U4rdU+40YdauwIAHNOYucq14KXMmfxa1V7en83e6Ty5lucVxvWat+0z7S5xmGNzRqTflN2Wo0brw4chtcLm7fbd0fscPnWmHa9+O5a/dN/e6Tvy17D2W/8fiN5TGvj+HmyySFY6+tvvu7t13Z1UE9tU161ub9vPs1Z+r3KrnGlDO0TkOicv4esuoxs5WPnvbGhPn3cVWbrvR7xiejXunb8BMvqz+L5kz5cL995f66eh1f6QT1xadZ1L6XGjj7wfiP1xNFBZmNb8hAH15v2V5/i2vk2OYiFMqp/SxbU9SBP1erfP2VlvpCX7/mOADaP/KvH3+orQYl2sbe7nMf/+b5jlkR2CCzwaEC75tsFU/uUI9jCLnx33nlnDxbZ1DjZRGUwGMoJ41bb1U0qmYCR98W5+k193cB43333dU6CFfDzvewwt4mOyFdDYEEIuH5d/13OfrOyS5eqkeE1z1gLJGtLtm606vImY0qWkx8O53xW82PJGXKHHKejDcvy3aHsMu/7bf2OvJwml/1O+5SrjfLq01MT435Xetu6Mrf9rMvOphtV/fmL6X5+q97qX7JyLUbqJI9eJzc24N/mTn9773P5DOunnsP2bqScteqw1c/Usetf9JMjc4i6a5fD++rfmqeH7cFqWp9tpG6l/632qyBIadCveHWdYOWTTf1bbaFfKqsS7vLtdlfTO4bjtL6T1xAIgRDY7QQSMLdDRkBNpqvKkXofcdyYQE16W5mst4JB3Sgbjp7M80fqpl51zFpGKZfK8b6OYrJabiugyhoywcV5j3GgDM6aqh7T2qqcKnfWMvK7EAiBtQnU9e86JAN6KtuCA/BIILFrfpZU+ZZcq1fn6xqv67zLlSnyVz3rt95L9X35OKalkjFe+2/HZKry1/q9fP1umM+wrI3Wo34jr/H8Ku9qo+9WvmSthbzi5fx69a2ytuu12lD1HbbHOX9XfYuveg+PrdZNGavlDwxi5Ra7WTn9JO9WTneitAq7HqpdykgKgRAIgRAIgRAIgWUhMNTn3MxmR4k6t6pz0YtaGupDdKLSv0rX87djnqnqMHz1Xh1L9xu3K8brMWyP99Kw7uPf30j9h3nS6S2sVB2rXsPXer9e3vId5l3v5b9q1xypv4Wn3vbjmr7d7AZlOObdB+vVefzzaW1Q/2I07LOqc9W//h7PdzN/q0OV5f1Qr5f/Vvxa1Sc1DmvhVf2qDdp3tNOwzcMx2dvfKmNcqNeQvfeTxr/f1+H3vtO5bcCGrt95Xb1B9ohYqPLnNUbVrdqtP+o68b7qvHqdHOl318pWUo2r3r6x8tWlOHkttjUu6txmyq/2FdcmZVtn+r/1SWsLptK8mG6mbsPvDuvpfV0X1Q/V9o3U0+8d2ly8S8Y6L48aS0O2NZ7H61X59Hq1D+s3xc55O/54XN9DDz00OtAeTWg3Igv6dqG7+eabe7CcRy1vpP7D8ofvtcUObwLm7DLn8YcCMJRzxRVX9IC5/fv39x3djNvtSNpaTGtMCSquRzcrt/oKn2JV7RackIC57eiZ5BkCx45AyQKyofSlXps215ABJQtmqWHJ35I7NU+TRSVf5O8Yyvbxsoayy3vfrd97nZZ811H1GH6v8qiyh59Nel9tkNcqpwEj+TnWS1Wf4l6vlf+wfcV+2A8bLWe9emzl8+I6rLN20Pv6uWZPDvtn0vt5lj/Ma7P9Ovxtva8+0hbvq7973gObq76f1xAIgRAIgR8JJGDuRxZ5t8MIlIIzrHYpd/U6/CzvQyAEQmCzBMblDNkS+fIjxW5sH/mzs1nxgP/4hR3wrtpQ/VqvO6DqqWIIhEAIhEAIhEAIhMAEAuM6vK90XXUDi0ETspvLqfE6Hev6VKOG9RLI0nXh9dfM6ufrvg7zry+vtUBY31mo177etLJwWbZCvS5UPZekMpPGzKJcL9uFWJulej1a7R2yPlplbhfDRc13yFgdj2+Bwtwm80h8GR4t98orr4weeOCB0euvv94fxVYBczfddNPouuuuGwmYI3dnkVvqbxcej/J74oknRk8//fToxRdf7OfsVuNxrHax++1vf9sfVydI4qglsrnVz3/rzSsJmDtqvZKCQmApCQxlecnSel3KBm+iUUM2frYj9YnBfLIj67+J/spXQyAEQiAEJhNIwNxkLjkbAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAgtFQJCCnd8EzD344IOj1157bfThhx/2x+Gde+65PVhOQNsN198wOrk9ynWzu7/JX1CeYLn33ntv9Pjjj49eeOGF0dtvv90DIk4//fTR73//+x4wd9VVV41OO+20mYLyjgbUBMwdDcopIwRCIARCIARCIARCIAR2JoEEzO3MfkutQyAEQiAEQiAEQiAEQiAEQiAEQiAEQiAEQiAEQiAEQiAEdhkBAW0Cwd54443RP/7xj9FLL73UHztqdxzBbHv37h1d/cure+Dc2eecPTrjjDP6Tmz1qLzaHcmrvCQBcnXYWc6jV999993Rm2++OXr22Wd7/p9//nnfTc5jXwXMXXPNNf0RsHa2qzwXrSsSMLdoPZL6hEAIhEAIhEAIhEAIhMDiEEjA3OL0RWoSAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAmsS+P7773tA25NPPtkD2l5++eUe8OYRpYLmLrnkkv5I1osvvnh03nnnjTxGVWDbySefvPrYvApyEygnP8fBgwf77nWffPJJf9TrgQMHRm+99dZIsJzgOvnZVc6jWC+//PK+u9xRfRzrmlT+98MEzP0vk5wJgRAIgRAIgRAIgRAIgRBYIZCAuYyEEAiBEAiBEAiBEAiBEAiBEAiBEAiBEAiBEAiBEAiBEAiBENghBASvffrpp/1xrM8///zoueeeGwly+/rrr/tucmeeeebo/PPP7zvAeUzrnj17Vo/aaU5T5SNgrnaV8/svvvii5+1xrA67zfnNWWed1YPwrr322tFll102ku8i7y6nfQmYQyEpBEIgBEIgBEIgBEIgBEJgEoEEzE2iknMhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhsKAEBIN9/PHHo1dffXX0wgsv9J3gPvzwwx7gZvc4wWyC3ATPVcDcaaed1oPf7EQnCZhzCIpzCJj78ssv+/HVV1/1c6eeeuronHPOGV100UWjq6++ugfNyVeetUvdgiL6ScDciy++2HflU1c77V144YV9p7y77rqr78qHV1IIhEAIhEAIhEAIhEAIhMDuIZCAud3T12lpCIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACITAEhCwM5xHqH7wwQejt99+e/TKK6+M3nzzzZGgOUFvtduc3eEEiNUxvsOcgDn52GXO4dGsznmM6xlnnNF3qtu7d2/fVU7Q3M9//vPRiSee2APvFh2joEKPlL3vvvtGFTBXbRMw97vf/W6UgLlF78XULwRCIARCIARCIARCIAS2h0AC5raHa3INgRAIgRAIgRAIgRAIgRAIgRAIgRAIgRAIgRAIgRAIgRAIgW0jIPjLTnCff/55Dwx75513+i5qdp7zyNZhAFxVwm8q2SFu/BBQZ7e1s88+e3Teeef1x7r+4he/6IFydqs7/fTTF35nuWqf9r/77rujRx55pD++VnChpH0XXHDB6LrrrhvdcsstIzvvCQJMCoEQCIEQCIEQCIEQCIEQ2D0EEjC3e/o6LQ2BEAiBEAiBEAiBEAiBEAiBEAiBEAiBEAiBEAiBEAiBEFgiAgLg7Db3xRdf9Ee0ChATGPbRRx+tPl7VbnMeuWrHNTvIOQTKeTSrQDGPVxU0NjwEywkqEywneM6jWQXTLfpjWIdda+c8wYPPPffcSDDhZ5991nfPq8fVXnbZZaPrr7++76anbUkhEAIhEAIhEAIhEAIhEAK7h0AC5nZPX6elIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACS0ZA0JzgMEFxHsdq1zmvdfhb0JyAObuuHTp0qBMQJCZgTqCcnePqEEBX733m8ay+t5OC5TRQOzF5//33e0Ch9ksCBQUAnnXWWT0oEAfnkkIgBEIgBEIgBEIgBEIgBHYPgQTM7Z6+TktDIARCIARCIARCIARCIARCIARCIARCIARCIARCIARCIASWlIDAOUFiDrvIVaDcMFhOYN14wJzgsdP2nDbac9qeHkjmbwFyFUi20wLlqnuLhzbjYSc+qXbXs9PcySefvOMCAat9eQ2BEAiBEAiBEAiBEAiBEJidQALmZmeXX4ZACIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACITAwhAQJFaHwDhBYvXqvc+8VhCcndVOOL7tsHZCe207rTl8NjwWpnEzVKRY1GtlUe3LznJFJK8hEAIhEAIhEAIhEAIhsLsIJGBud/V3WhsCIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACIbBLCAgUG6bxvytwzneG74e/yfsQCIEQCIEQCIEQCIEQCIEQWDYCCZhbth5Ne0IgBEIgBEIgBEIgBEIgBEIgBEIgBEIgBEIgBEIgBEIgBEIgBEIgBEIgBEIgBEIgBEIgBEIgBCYSSMDcRCw5GQIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhsGwEEjC3bD2a9oRACIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACEwkkIC5iVhyMgRCIARCIARCIARCIARCIARCIARCIARCIARCIARCIARCIARCIARCIARCIARCIARCIARCIARCYNkIJGBu2Xo07QmBEAiBEAiBEAiBEAiBEAiBEAiBEAiBEAiBEAiBEAiBEAiBEAiBEAiBEAiBEAiBEAiBEAiBEJhIIAFzE7HkZAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEwLIRSMDcsvVo2hMCIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACITCRQALmJmLJyRAIgRAIgRAIgRAIgRAIgRAIgRAIgRAIgRAIgRAIgRAIgRAIgRAIgRAIgRAIgRAIgRAIgRAIgWUjkIC5ZevRtCcEQiAEQiAEQiAEQiAEQiAEQiAEQiAEQiAEQiAEQiAEQiAEQiAEQiAEQiAEQiAEQiAEQiAEQmAigQTMTcSSkyEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAstGIAFzy9ajaU8IhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhMBEAgmYm4glJ0MgBEIgBEIgBEIgBEIgBEIgBEIgBEIgBEIgBEIgBEIgBEIgBEIgBEIgBEIgBEIgBEIgBEIgBJaNQALmlq1H054QCIEQCIEQCIEQCIEQCIEQCIEQCIEQCIEQCIEQCIEQCIEQCIEQCIEQCIEQCIEQCIEQCIEQCIGJBBIwNxFLToZACIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACCwbgQTMLVuPpj0hEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAITCSRgbiKWnAyBEAiBEAiBEAiBEAiBEAiBEAiBEAiBEAiBEAiBEAiBEAiBEAiBEAiBEAiBEAiBEAiBEAiBEFg2AgmYW7YeTXtCIARCIARCIARCIARCIARCIARCIARCIARCIARCIARCIARCIARCIARCIARCIARCIARCIARCIAQmEkjA3EQsORkCIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACIbBsBBIwt2w9mvaEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAhMJJCAuYlYcjIEQiAEQiAEQiAEQiAEQiAEQiAEQiAEQiAEQiAEQiAEQiAEQiAEQiAEQiAEQiAEQiAEQiAEQmDZCCRgbtl6NO0JgRAIgRAIgRAIgRAIgRAIgRAIgRAIgRAIgRAIgRAIgRAIgRAIgRAIgRAIgRAIgRAIgRAIgRCYSCABcxOx5GQIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhMCyEUjA3LL1aNoTAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEwkUAC5iZiyckQCIEQCIEQCIEQCIEQCIEQCIEQCIEQCIEQCIEQCIEQCIEQCIEQCIEQCIEQCIEQCIEQCIEQCIFlI5CAuWXr0bQnBEIgBEIgBEIgBEIgBEIgBEIgBEIgBEIgBEIgBEIgBEIgBEIgBEIgBEIgBEIgBEIgBEIgBEJgIoEEzE3EkpMhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEALLRiABc8vWo2lPCIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACITARAIJmJuIJSdDIARCIARCIARCIARCIARCIARCIARCIARCIARCIARCIARCIARCIARCIARCIARCIARCIARCIASWjUAC5patR9OeEAiBEAiBEAiBEAiBEAiBEAiBEAiBEAiBEAiBEAiBEAiBEAiBEAiBEAiBEAiBEAiBEAiBEAiBiQQSMDcRS06GQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAgsG4EEzC1bj6Y9IRACIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACEwkkYG4ilpwMgRAIgRAIgRAIgRAIgRAIgRAIgRAIgRAIgRAIgRAIgRAIgRAIgRAIgRAIgRAIgRAIgRAIgRBYNgIJmFu2Hk17QiAEQiAEQiAEQiAEQiAEQiAEQiAEQiAEQiAEQiAEQiAEQiAEQiAEQiAEQiAEQiAEQiAEQiAEJhJIwNxELDkZAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiGwbAQSMLdsPZr2hEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAITCSQgLmJWHIyBEIgBEIgBEIgBEIgBEIgBEIgBEIgBEIgBEIgBEIgBEIgBEIgBEIgBEIgBEIgBEIgBEIgBEJg2QgkYG7ZejTtCYEQCIEQCIEQCIEQCIEQCIEQCIEQCIEQCIEQCIEQCIEQCIEQCIEQCIEQCIEQCIEQCIEQCIEQmEggAXMTseRkCIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACITAshFIwNyy9WjaEwIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhMJFAAuYmYsnJEAiBEAiBEAiBEAiBEAiBEAiBEAiBEAiBEAiBEAiBEAiBEAiBEAiBEAiBEAiBEAiBEAiBEAiBZSOQgLll69G0JwRCIARCIARCIARCIARCIARCIARCIARCIARCIARCIARCIARCIARCIARCIARCIARCIARCIARCYCKBBMxNxJKTIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACy0YgAXPL1qNpTwiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEwEQCCZibiCUnQyAEQiAEQiAEQiAEQiAEQiAEQiAEQiAEQiAEQiAEQiAEQiAEQiAEQiAEQiAEQiAEQiAEQiAElo1AAuaWrUfTnhAIgRAIgRAIgRAIgRAIgRAIgRAIgRAIgRAIgRAIgRAIgRAIgRAIgRAIgRAIgRAIgRAIgRAIgYkEEjA3EUtOhkAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAIhEAILBuBBMwtW4+mPSEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAiEQAhMJJGBuIpacDIEQCIEQCIEQCIEQCIEQCIEQCIEQCIEQCIEQCIEQCIEQCIEQCIEQCIEQCIEQCIEQCIEQCIEQWDYCuz5g7ocfRqMf2n9JIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACIRACIbCTCRyn8scdN+qvO7kh21j3XR8wd7hFzB1O0Nw2DrFkHQIhEAIhEAIhEAIhEAIhEAIhEAIhsK0Eul+j+cAUEi/YtqJO5iEQAiEQAiEQAiEQAiGwUATG9wSJPbBQ3ZPKhEAIhEAIhMCxIEAdOK4Fyx2fgLk18e/qgDk65PeHDo8OtYi5Q7aay05zaw6WfBgCIRACIRACIRACIRACIRACIRACIbCoBGplbHzFbFHrm3qFQAiEQAiEQAiEQAiEQAjMiwBrIJbAvGgmnxAIgRAIgRDY2QToBSccf/zopBNOsMlc7q+d0p27NmCulMbvvj80OnhIwNzhKYhyOgRCIAQWn0AtjalpybfFr3VqGAIhEAIhEAIhEAIhEAIhMC8C7hr9od8MOK8ck08IhEAIhEAIhEAIhEAIhMBOIFDrA1kb2Am9lTqGQAiEQAiEwPYS6HpB++ekFjB3yoknJmBuDdy7OmCOI/m77w+PDh5uu8y193EsrzFS8lEIhMDCEjDpDZ8+3qRZguYWtrdSsRAIgRAIgRAIgRAIgRCYPwH2QA+YYwskaG7+gJNjCIRACIRACIRACIRACCwogb4+YOuYltgCCZpb0I5KtUIgBEIgBELgKBGgFXgU60knCJizw9wwkuAoVWKHFLPrA+a+9zjWdhzuSmTUyB0yblPNEAiBAYEVU3hloluRYjGKB3jyNgRCIARCIARCIARCIASWnkDZBBoaa2DpuzsNDIEQCIEQCIEQCIEQCIGfEKhl8NgCP8GSP0IgBEIgBEJgVxLgJ6QbnHh8O1rQXALmpg+DXRswB4nAkh4o13eXmw4pn4RACIRACIRACIRACIRACIRACIRACITAIhOwOFYLZYtcz9QtBEIgBEIgBEIgBEIgBEJgvgRqO5CVG2nmm3dyC4EQCIEQCIEQ2HkEetBc21nu+BY0F/1gev/t6oA5WCiReVzJ9AGST0IgBEIgBEIgBEIgBEIgBEIgBEIgBEIgBEIgBEIgBEIgBEIgBEIgBBaTQALmFrNfUqsQCIEQCIEQOJYEhModeWr7sazGQpe96wPmFrp3UrkQCIEQCIEQCIEQCIEQCIEQCIEQCIEQCIEQCIEQCIEQCIEQCIEQCIEQWIOAoLnsILMGoHwUAiEQAiEQAiEQAmMEEjA3BiR/hkAIhEAIhEAIhEAIhEAI/H/27sPBkqQ40HjjzbIGb3UavBVWwOIXSfdvnwToJAQSIFhAEgINMngJv4vn3q+G2CuK99pNT89Mz5e7Nc9VZUZ+aSIyMqo6AhGIQAQiEIEIRCACEYhABCIQgQhEIAIRiEAEIhCBCEQgAhG4mgQKmLua7VqtIhCBCEQgAhGIQAQiEIEIRCACEYhABCIQgQhEIAIRiEAEIhCBCEQgAhGIQAQiEIEIRGBDoIC5DZA+RiACEYhABCIQgQhEIAIRiEAEIhCBCEQgAhGIQAQiEIEIRCACEYhABCIQgQhEIAIRiMDVJFDA3NVs12oVgQhEIAIRiEAEIhCBCEQgAhGIQAQiEIEIRCACEYhABCIQgQhEIAIRiEAEIhCBCEQgAhsCBcxtgPQxAhGIQAQiEIEIRCACEYhABCIQgQhEIAIRiEAEIhCBCEQgAhGIQAQiEIEIRCACEYhABK4mgQLmrma7VqsIRCACEYhABCIQgQhEIAIRiEAEIhCBCEQgAhGIQAQiEIEIRCACEYhABCIQgQhEIAIR2BAoYG4DpI8RiEAEIhCBCEQgAhGIQAQiEIEIRCACEYhABCIQgQhEIAIRiEAEIhCBCEQgAhGIQAQicDUJFDB3Ndu1WkUgAhGIQAQiEIEIRCACEYhABCIQgQhEIAIRiEAEIhCBCEQgAhGIQAQiEIEIRCACEYjAhkABcxsgfYxABCIQgQhEIAIRiEAEIhCBCEQgAhGIQAQiEIEIRCACEYhABCIQgQhEIAIRiEAEIhCBq0mggLmr2a7VKgIRiEAEIhCBCEQgAhGIQAQiEIEIRCACEYhABCIQgQhEIAIRiEAEIhCBCEQgAhGIQAQ2BAqY2wDpYwQiEIEIRCACEYhABCIQgQhEIAIRiEAEIhCBCEQgAhGIQAQiEIEIRCACEYhABCIQgQhcTQIFzF3Ndq1WEYhABCIQgQhEIAIRiEAEIhCBCEQgAhGIQAQiEIEIRCACEYhABCIQgQhEIAIRiEAEIrAhUMDcBkgfIxCBCEQgAhGIQAQiEIEIRCACEYhABCIQgQhEIAIRiEAEIhCBCEQgAhGIQAQiEIEIROBqEihg7mq2a7WKQAQiEIEIRCACEYhABCIQgQhEIAIRiEAEIhCBCEQgAhGIQAQiEIEIRCACEYhABCIQgQ2BAuY2QPoYgQhEIAIRiEAEIhCBCEQgAhGIQAQiEIEIRCACEYhABCIQgQhEIAIRiEAEIhCBCEQgAleTQAFzV7Ndq1UEIhCBCEQgAhGIQAQiEIEIRCACEYhABCIQgQhEIAIRiEAEIhCBCEQgAhGIQAQiEIEIbAgUMLcB0scIRCACEYhABCIQgQhEIAIRiEAEIhCBCEQgAhGIQAQiEIEIRCACEYhABCIQgQhEIAIRuJoECpi7mu1arSIQgQhEIAIRiEAEIhCBCEQgAhGIQAQiEIEIRCACEYhABCIQgQhEIAIRiEAEIhCBCERgQ6CAuQ2QPkYgAhGIQAQiEIEIRCACEYhABCIQgQhEIAIRiEAEIhCBCEQgAhGIQAQiEIEIRCACEYjA1SRQwNzVbNdqFYEIRCACEYhABCIQgQhEIAIRiEAEIhCBCEQgAhGIQAQiEIEIRCACEYhABCIQgQhEIAIbAgXMbYD0MQIRiEAEIhCBCEQgAhGIQAQiEIEIRCACEYhABCIQgQhEIAIRiEAEIhCBCEQgAhGIQASuJoEC5q5mu1arCEQgAhGIQAQiEIEIRCACEYhABCIQgQhEIAIRiEAEIhCBCEQgAhGIQAQiEIEIRCACEdgQKGBuA6SPEYhABCIQgQhEIAIRiEAEIhCBCEQgAhGIQAQiEIEIRCACEYhABCIQgQhEIAIRiEAEInA1CRQwdzXbtVpFIAIRiEAEIhCBCEQgAhGIQAQiEIEIRCACEYhABCIQgQhEIAIRiEAEIhCBCEQgAhGIwIZAAXMbIH2MQAQiEIEIRCACEYhABCIQgQhEIAIRiEAEIhCBCEQgAhGIQAQiEIEIRCACEYhABCIQgatJoIC5q9mu1SoCEYhABCIQgQhEIAIRiEAEIhCBCEQgAhGIQAQiEIEIRCACEYhABCIQgQhEIAIRiEAENgQKmNsA6WMEIhCBCEQgAhGIQAQiEIEIRCACEYhABCIQgQhEIAIRiEAEIhCBCEQgAhGIQAQiEIEIXE0CBcxdzXatVhGIQAQiEIEIRCACEYhABCIQgQhEIAIRiEAEIhCBCEQgAhGIQAQiEIEIRCACEYhABCKwIVDA3AZIHyMQgQhEIAIRiEAEIhCBCEQgAhGIQAQiEIEIRCACEYhABCIQgQhEIAIRiEAEIhCBCETgahIoYO5qtmu1ikAEIhCBCEQgAhGIQAQiEIEIRCACEYhABCIQgQhEIAIRiEAEIhCBCEQgAhGIQAQiEIENgQLmNkD6GIEIRCACEYhABCIQgQhEIAIRiEAEIhCBCEQgAhGIQAQiEIEIRCACEYhABCIQgQhEIAJXk0ABc1ezXatVBCIQgQhEIAIRiEAEIhCBCEQgAhGIQAQiEIEIRCACEYhABCIQgQhEIAIRiEAEIhCBCGwIFDC3AdLHCEQgAhGIQAQiEIEIRCACEYhABCIQgQhEIAIRiEAEIhCBCEQgAhGIQAQiEIEIRCACEbiaBAqYu5rtWq0iEIEIRCACEYhABCIQgQhEIAIRiEAEIhCBCEQgAhGIQAQiEIEIRCACEYhABCIQgQhEYEOggLkNkD5GIAIRiEAEIhCBCEQgAhGIQAQiEIEIRCACEYhABCIQgQhEIAIRiEAEIhCBCEQgAhGIwNUkUMDc1WzXahWBCEQgAhGIQAQiEIEIRCACEYhABCIQgQhEIAIRiEAEIhCBCEQgAhGIQAQiEIEIRCACGwIFzG2A9DECEYhABCIQgQhEIAIRiEAEIhCBCEQgAhGIQAQiEIEIRCACEYhABCIQgQhEIAIRiEAEriaBAuauZrtWqwhEIAIRiEAEIhCBCEQgAhGIQAQiEIEIRCACEYhABCIQgQhEIAIRiEAEIhCBCEQgAhHYEChgbgOkjxGIQAQiEIEIRCACEYhABCIQgQhEIAIRiEAEIhCBCEQgAhGIQAQiEIEIRCACEYhABCJwNQkUMHc127VaRSACEYhABCIQgQhEIAIRiEAEIhCBCEQgAhGIQAQiEIEIRCACEYhABCIQgQhEIAIRiMCGQAFzGyB9jEAEIhCBCEQgAhGIQAQiEIEIRCACEYhABCIQgQhEIAIRiEAEIhCBCEQgAhGIQAQiEIGrSaCAuavZrtUqAhGIQAQiEIEIRCACEYhABCIQgQhEIAIRiEAEIhCBCEQgAhGIQAQiEIEIRCACEYhABDYECpjbAOljBCIQgQhEIAIRiEAEIhCBCEQgAhGIQAQiEIEIRCACEYhABCIQgQhEIAIRiEAEIhCBCFxNAgXMXc12rVYRiEAEIhCBCEQgAhGIQAQiEIEIRCACEYhABCIQgQhEIAIRiEAEIhCBCEQgAhGIQAQisCFQwNwGSB8jEIEIRCACEYhABCIQgQhEIAIRiEAEIhCBCEQgAhGIQAQiEIEIRCACEYhABCIQgQhE4GoSKGDuarZrtYpABCIQgQhEIAIRiEAEIhCBCEQgAhGIQAQiEIEIRCACEYhABCIQgQhEIAIRiEAEIhCBDYEC5jZA+hiBCEQgAhGIQAQiEIEIRCACEYhABCIQgQhEIAIRiEAEIhCBCEQgAhGIQAQiEIEIRCACV5NAAXNXs12rVQQiEIEIRCACEYhABCIQgQhEIAIRiEAEIhCBCEQgAhGIQAQiEIEIRCACEYhABCIQgQhsCBQwtwHSxwhEIAIRiEAEIhCBCEQgAhGIQAQiEIEIRCACEYhABCIQgQhEIAIRiEAEIhCBCEQgAhG4mgQKmLua7VqtIhCBCEQgAhGIQAQiEIEIRCACEYhABCIQgQhEIAIRiEAEIhCBCEQgAhGIQAQiEIEIRGBDoIC5DZA+RiACEYhABCIQgQhEIAIRiEAEIhCBCEQgAhGIQAQiEIEIRCACEYhABCIQgQhEIAIRiMDVJFDA3NVs12oVgQhEIAIRiEAEIhCBCEQgAhGIQAQiEIEIRCACEYhABCIQgQhEIAIRiEAEIhCBCEQgAhsCBcxtgPQxAhGIQAQiEIEIRCACEYhABCIQgQhEIAIRiEAEIhCBCEQgAhGIQAQiEIEIRCACEYhABK4mgQLmrma7VqsIRCACEYhABCIQgQhEIAIRiEAEIhCBCEQgAhGIQAQiEIEIRCACEYhABCIQgQhEIAIR2BAoYG4DpI8RiEAEIhCBCEQgAhGIQAQiEIEIRCACEYhABCIQgQhEIAIRiEAEIhCBCEQgAhGIQAQicDUJFDB3Ndu1WkUgAhGIQAQiEIEIRCACEYhABCIQgQhEIAIRiEAEIhCBCEQgAhGIQAQiEIEIRCACEYjAhkABcxsgfYxABCIQgQhEIAIRiEAEIhCBCEQgAhGIQAQiEIEIRCACEYhABCIQgQhEIAIRiEAEIhCBq0mggLmr2a7VKgIRiEAEIhCBCEQgAhGIQAQiEIEIRCACEYhABCIQgQhEIAIRiEAEIhCBCEQgAhGIQAQ2BAqY2wDpYwQiEIEIRCACEYhABCIQgQhEIAIRiEAEIhCBCEQgAhGIQAQiEIEIRCACEYhABCIQgQhcTQIFzF3Ndq1WEYhABCIQgQhEIAIRiEAEIhCBCEQgAhGIQAQiEIEIRCACEYhABCIQgQhEIAIRiEAEIrAhUMDcBkgfIxCBCEQgAhGIQAQiEIEIRCACEYhABCIQgQhEIAIRiEAEIhCBCEQgAhGIQAQiEIEIROBqEihg7mq2a7WKQAQiEIEIRCACEYhABCIQgQhEIAIRiEAEIhCBCEQgAhGIQAQiEIEIRCACEYhABCIQgQ2BAuY2QPoYgQhEIAIRiEAEIhCBCEQgAhGIQAQiEIEIRCACEYhABCIQgQhEIAIRiEAEIhCBCEQgAleTQAFzV7Ndq1UEIhCBCEQgAhGIQAQicAKB3/72t0cOaV6f9KQnHTmkeV0+9E8EIhCBCETgLiJwSMepwlrX3UVVStQIRCACEYhABCIQgQhE4BgCswYYH9ecOvZ/fq4h0msEIhCBCETgBoEC5uoJEYhABCIQgQhEIAIRiMA9R4Dz8Ne//vXRr371qyeC5UDgPHzKU56yHE9+8pPvOS5VOAIRiEAE7n4Cs1FGx/3mN7/5PT1Ht9FzT33KU3dK7+6vazWIQAQiEIEIRCACEYhABG7cCMr25+tyrJM1wFOf+tQjrwXNrcn0PgIRiEAE7nUCBczdxT1gHKAMn+3dAmep1vrOgjGUtq9nye88555Ul3HojlznKeOirxnmXhmh62PqM+coeziry9RnbZyet27r8tflXXR9t/lt67P9vc8RuNUEpr+vx8CMQ9/NMXJs++yMxfn+vGNw8u81AreKgL5M1+vf+zY9lTv9+WlPe9qyAXra/jzjZBwpY1P4fp2e/OTdpupTn3Ik/xkz69+v6vvT8pngKo4n70t3PgFt+8tf/vLopz/96dEPf/jDJ+w4kj/96U8/etaznnX0nOc8Z3m982uThBGIQATuHgKjW8duv5MkHxtn7KrbJRs2bLJf/OIXi37CjEzsjLE1TrL15PHzn/980XE/+9nPFhtSfeRDz9FxDzzwwD1l192u9jypXO07bT42v8/rpL213dj63p82yUsZ1hFzk4DP6yQ/NuzTnvq0pZwnPblIyjWf3kcgAhGIQAQicPMExuYZ2+Tmc7y4HNhCY2+dZGdfXKm/n9PaXuOvksjC/mejPWXnlz1JNrYk25+v68c//vHvFcDP9eCDDy5rATZl6fYSmPGgzebY2ujae7nR6XfrwJPaf2okH8fk63W+m3O86vfrNeb6t95HIAIRuJcIFDB3F7f2ONQ4QSm886ZRuhTv1jD022mV8HnLd526cNyNQ9jndbkcuuMYXH9/M2We99oxWrxOG5CdEesYJ6Tfxggh8zggGSDq4xhjxO9znFWuYacfTJm3mpF6TX2e8YxnnCk446z16/wI7CMwY2vGoDnQ+DOHeG8crhcCMwZnzM04XBab5r6n3Ahk3VdW30XgdhLQ16d/m+c5PUbXrOWiI+mV++677+iZz3zmE/p8fc72/Yyj0b/yVobPxtYk8738J4Bo7IX5/Sq/Yu8wt+Dj8Hn4jL6lC3F/9rOfvbTDfH+V2dztddOO2vPb3/720X/8x38s48p32k47Pve5zz16yUtecvT85z//bq9q8kcgAhG4owiwP9gaY7vfSTpz1rizVr9d4MbusMnFNsOMTGwNgW7sDrIel/D90Y9+tOg4r48//vhyunWQPF784hcfveIVr2gtfxzES/htbH02iLZ+7LHHd+Pjhl9J8X6XtBt7XNvpB+zx04ydGW/G3GOPPXbDlv3VDVt2t4225C0f+S/9677n3PC97TZkSxGIQAQiEIEIROAiCbB32KiO8b9cZP43k9fYWl5PsrNvppxD17LZ+Bof39mCP33sp0vAm+/Iwtd737PvW/yyi2zH3NdgHeGmUL6ub3zjG08Ux9573vOet9j/bprh4y3dPgLadsaDNYBj9rX8Jmkz7c1G1158lT6fZg0we2bj65f/2t8/eVhfyH/2E24fkUqOQAQicHsJFDB3e/nfVOk3nGmPHX3rW99a7hYYRXrWTClZDlcOWAqSUejw3Xw/m+OLQXbWAk44n/JWlx/84AdH3/3udxfDgPKWKG4HY87G5f3337/IeUKWt+RnfMk6zv3ZOPfK+cyZzQHpve/GwJl6YIvnBBwwTBk5DkbJwvvpO8f3LnBn6n1SRcikLEbwd77znYUjQ2vKPOn68/yuTMcEZthM1i76SCkCt5LAjMGZM8wbMw6Nu5/85CfLMd97NR5cZ0yZ14w1Y8441G+fs9sQeMYzn3H07GftxuJ9z17OmfnONaUI3C4C09/pFX2bjvz+979/9D//8z9PbHaRzXmShe1DDz109KpXvWrZANXHj9PZsyiXv01UeuS///u/l1fjafSwvI2J+59z/9FLX/bSo9e85jVLWXTAVU7DZ+7IxAd7jMwtw8c8gTMb5UUvetHRH//xHy8BVr5rDrmze8gEEnzlK185+sxnPrPoE99pu3EivuENb1ja9M6uSdJFIAIRuLsImGutnelV63/68k7RmdbkApJe8IIXLHbVZZO1znF873vfO/rmN7959PWvf32x/9gd/AeCuK9du7bYHOQ8ZOuxD9krNso+97nPLXmxZSR1VL/Xve51R29729sWH9ChfC67/vdSeWPrs7vH3mTra3uffe+csfWtY61f2eKCHb0/rt30I2PNetl4Y+/zGSljbP3Jm61vjcyWvfbH144eeu5Dyzi4l9qjukYgAhGIQAQicIsJ7NyXP/7JjTUAu/Sxnz622/y7sf93i0s+VfZ8qnx7nsDGXr7sZC+RDfif//mfR9/4r28cfeOb31jWBfYUX/aylx29/GUvP3rFH71i2dOwx3Eo8fNaR/zzP//z0Re/+MUn9kWst/gs3/rWty62pPqWLp8A+9vaTnuz0Y0F9rnD51/8fPcwiN/cuJlXm2l/a7eXv/zlyzrQmuC4tfOsAfQD+fFjW3fbV7Au2PqzrSnGn/3Sl7702Lwvn1YlRiACEbg8AgXMXR7rCy/JBjpl9+Uvf3lxfE1gyFkL4hxbgrV2hiAF7KB4OWApTI5Zn53jN06545TyWcvnxGMYeLrHv/7rv/6B4laWO5//6I/+aDEOyXWZaXFS/mZnyPz6V4tsjA3GK5k5HX3WFgwQ3/vMATkBc65Xh7kjXBADpg518coQf/CBBxfez3zWM5dzcT7OAYqBNleuu0X+5V/+ZZFDuRfZPlvW6sPwEpzBYf+mN71pMbIZ6rey3K0cfb53CCxjcLWYsPlj/Dn0f+PP2JtxyPg3Br3OvKhvmr+MP5sB+u+MQZ+f+9Bzj57/gucv3zvHeJ3AuatCejiqz8xL3mPT2EXizknah27U121s2ey0oWwDbTbR9G/JuZL+bIH7rne9awmao1f04W2a842R2TibvL2uN9HmWvM7R8prX/vao/e85z2LA8m4uaqJjsPH/IK/g4MBe4y2AXPGj+Bxjicbz+wV882+cYX/tIHXOeckfX9VWd/OerGX2NECCT72sY8tesS40xY2om1Iv/Od7zwSNFeKQAQiEIGLI8CGcdOfYLCvfuJ1EDgAAEAASURBVPWrS8ajDy+ulPPlNBsGr371q5dNifPlcv6rbGCwM/hFBHRb47M/fM+2o5/e/OY3L7aejTP2xr7ExmDL8LF84hOfOLp+/fqSj3OtdVz79re//eiDH/zgE+ueffn03a0hoH2mrce+nFd2p3Wt9nOeQ9JuAvrf/e53LzYKX8yhzVLX6EdsWflO3tYUbFo+K+VP3tYM+v61XTCmTVT9oyfs3pq2L9cIRCACEYjAvUrgt7v9te989zuLXfrNb3zz6H++/z9P+MRuNxNrEfuP/+t//a/bsv+o/uOjFeT2ta99bbHj+SftUfAz8lG95S1vWexB+xqHEvvv69e/fvSZz37m6O/+7u8We0/9HPxb73//+5d6CsIqXS4B7elg6wtgG38/+58/3l7X7ClPm2l/ffP1r3/94qM87qYpeQu4++GPfrjY//K17ubLVp71hTWAJH/+T+sL/d7a0A1VU+7lkqm0CEQgArefQAFzt78Nzi0BJfpf//VfR//3//7fxdAchxelJq2da4cKmXMpR4fPnG6ccYymF77whcvhvTssKGSBc4ccc4fKOe57zjp3TnzpS186+od/+IcnjALXkIdcr3zlKxeFzSgki+8uIw1DzkZyjrPR6xg0HP5jyAjOcYzxM9dPPcjNGTkHlphyfDtE8T//ec8/evChB5cgRZyPq6s2JwtnOgNYnyCP8hzSOEEvitfUTfAER+qHPvShpX3UZcq8qLLKJwIIGFOCGCZQVT93p5SDw98iw+/r8Tfj8KQxaJPpmc945tGLXvyiZXHwohe+aAmc078FBFmUXIU0HIaTcTzz0EnzzFWo/91UB22lfTg49O9/+qd/WuZ4gT3GAH3jd+eNfvCes8Qil+ODA8T7fbratcYHHUb32og1luQvb3rFOdLkr68YDxbnH/7whxe7gO66ikndjRPOhNnIt9lsnqFf/Ya3NDrPZ/YSB8PDDz/8hE4cfmtOM0+NzeYcfLXVWnevr+n9rSGgvxtjAub+8i//8g8C5gSI/umf/mkBc7cGf7lGIAL3MAE61SbQF77whWX9T//t05m3AxH76douaEjANLvnspNNDJsZ/CKf//znF3vEdxJbgb1HP7lpzaYGv83YI2tZ2Sau+/d///ejj3/840f/9m//tvgNnOMaTyh4xzvesazl50ah9fW9v3UEtA170ziwrmWLGw9sT/Y/3xN70XnTtl61v6BJtr72d7PGPlvfdexMNo4bK/mK+A1tmLF9xpZd52/8uUmW302/MAbkX4pABCIQgQhEIAIXQYDd4eBnY+OyT9gpbJCxdy6inJvJ441vfONyY4qgIfugl5WGDXuNTfj3f//3i+02awCM+GTdpMtHdZKdxp68vrtZxnrik5/85BM2Jc78xW6Y4b/kxyxdLgF2OFufP95YcJOUNcB6b2ts9BkX7H17xuz/D3zgA8sN8/zI2+Q6tr71hbyt//QneW9vlpG3wzX2wATk8Wfbe5f3lL0to88RiEAErjKBAubu4tblALOJ+3/+z/9Z7symcKVRaGuDc77bVpdSlLzO4VxBJPN0OUrTU2smqGsC5yjrfcp5W8ZxnzkCOYQFBDz66KNH//iP//iEc9B1IzeHrru83/ve9y4K/FYHZ2Exm+Y2yBkaAtMYM4wYn+fpVrg7l6zbQx3WjOfz5I8fBzXGuLqLV105J70XkMBxqS0d28QRyvHpKYN//dd/vci4DZgbmbbXnvcz2bUbed3d8tGPfnS5w+VWt8l55e26u5eAcaWPCxKau6yMP33eeDQOGf0WGvvGoJrPHOK9vrs9fG++83SuCRJ+8Yt2wau7Pz3ps7Hpd/PdOi/X3U3JmDU3mG8tkjCz4WaOUXfz0N1cv7upLU6SVZ/XRu4As7DlROLo4CjRjnSBPqnNHKMbbH6alz1hzl2HNtS2m2j6v3w4TyzKPdXFAtpY8r286CX5O2Ze9738/blXeph+Ou5uxpPqeKf+jo+xIXgQc7qVw8q8MzYWpsN+5gXX2WAXRG6DneNp3TZTX/OUtsXfvCZP57G3tB32055zTa+3jsAEzH32s5/9g4A5dlgBc7eOfTlHIAL3LgE6c+wQ86+bvnar6KMnPfnGzV5nISMvaV69H3t2Xn13lmTjiL3jibqe5HbZCRtrnb/9279dArrZC2xD9XGwz2yW2cywYcKW32c7YMK2s1lyKGCOzeLmN7bIzfp1LpvT3Vye9tQ2bH12OJt8bs5gG047j70/9qY20t6eMMdG0Ve3tj4ubFn9hp1vLeGwdta39BXXTN5eJd+z+/l39C2vbaIuaPonAhGIQAQiEIELIMA2dbBLBHL5c6H22Z78pN1+1xmXAWvbf96P7T+vZxHZWsT/f/Inf7I8aVfg3GU+aVcd+AvZbvNAETc5T1InduD4HMn3yle+cn7+g1c2HxvzM5/5zLKmkL88HBMwZz2RrfcH6G7pF9rY/hYf81f+5StHX/3aVxefs/ayPtDG7HRrMzb6eg1gb14gp4BJPuTt+k8b2zNg81tX2Gt345T1xq9/tXu4y29/88QagM2vLAeZ7B/wgc5f2JD3ecbRLYVX5hGIQAQugUABc5cA+VYVMQFznorBoOJcGwNolB4FOwbRPjkoxfVBOc/nycv1guUYZYLWOM8oUXc2ULA3kzjzOITdOWFjmtGsvLXSJwfFrXx3QEwgwPqcm5Fh37UY4CkYx8Y5Q4OzmXy422Rdc533w93r2rhQh+HKeJnD95LXcVxeu3ZtMXoZvowhm+ja0e/bpL3WAXNk43wdhmQYmbbXnvczWckvkEhw30c+8pFlU2ECK86bb9dFYEvAONOf9XF3nVnsWUwz9v2mL+rrk9b9Xb+fceDV+HO+V+NGH54x6Xq/Ocw1FozmOmNR4IuFyASursubcu/0V/XCS7CceWw2TNTTXO4uokObbXd63a6afNqKXtRGnEecG/q7ttOnZ3NLsJp+6XX0vIA2wY+ehkJf0tGuWSd9nk4TBCZvZVisGxPGj7zM5fqDYxbh8jEG5OuONuXcrP5fy3WnvDcvcFTYuPQnEATxm4N8P3xwxsWBAd28njtsYLKZxrExdXOOfLTtPOVD3jjTpXT+IV0/efR6sQTMi+ymAuYulmu5RSACETiOAH1ojU3XesLnpz71qRs2u82y06Sd6S8Px9j368vY6us1wPLbjSX3+rSD790w88pXvXIJSmLzXHZi87ET/uZv/maxQ8Y/o07qzL5gvwtqeuSRR5a1y9beI7Nz2RkFzF12Cx5fnnZh67M/bGSxx22IuhlTG2tf9v7c2MTenHWodva9jU7rN/b4vraXl/WzzWhleM+3Nbas/MbW96pc+bBr2bACRo0D6+JSBCIQgQhEIAIRuAgCbCDHBHLxR7J5BcyxRU5MqzXA5OV1neTD3vG65Pn7P69P/f33u7yd72YZf5p+/mrH75906z6pB3+hvVFP4LZGYiuyCyeRzz7hyCho7lAqYO4Qmdv3/bSxPv/P//TPR49+8dFlLMz+Flucjc6Xzz5n8/M/a3f9QACnPXlBc7M2WNdG/7HG8ORq/chTHK0rrSVnfTFrgPHpW3OQ6/7n3L/shb3mta9ZylCmoxSBCETgXiNQwNxd3OIMJ4Fc24A5hiGlKtDK5jZleEjJUaQUMwfa4489fvSTn954RCvnqu84oaVRqBxnNnY50bwy1Cj0Q/mfhJfiVof5s7LqJC91kChth/I9vcWTcxit7oKg1M9b7nFyqbMnMc2m9vXdU2Y4GUX/CyzAzDlkVHdyOMg4Rg2DZr2R7nzX4epuX0+YcWDsYNSopzw5Raftrl27tgTu+MxY2iZGD9kYQp4wh58ycGEMkcm1jKphus3jrJ/JqT7k0R/cfcNpf6va46zydf7dT0D/Mi70ZwFy5ggLCmPSGDQ+nSPp53MYf/c9+76jZ9/37GVRIfhEvzRO5Weuc60xaKPOe+PSODIGvbrG+BUQav4UxOLwXl4XNY4us5WMWYtlDG3KCD4096qXAGQbbuYJnEq3l4A+aJPLnG6DiwNJf9V/Zz6ne8292kyf12fN+V5HV7IBtnPyjCuB3xbONqr9KVZ5a3s6yyPezef0rQW0eV6fl7/86CdlK+sq9hfziwBFAVT4GDPmBvXHQpCpIFrsJ2BuOJg3xvbSLtu5An9zkBscOMDMbdoWT3aNuwTlgW3pcghoD3qmgLnL4V0pEYhABIaAdTD7np3z6Bce3e1Q7f7f6drTJjqVHc++NZeznyS6l16mT+ntWQewhU+bnvvQc49e/oqXL5tRfB6XnaxR+B0EEgrcZ5uoo6R+bL1rOx+BzTJP/R1bbSunOhcwt6Vy+z+zK9e2Ppt/fG/6KxuTLc4+5MPRvmx0be9gJ/qNHer9etyMn0aQpHwd7M3xD8nfOmLyZ+uzXeUhb2tq5ZEhm/T295UkiEAEIhCBCFw1Amx4fjb+zq9f//qyp7XYMqdYBuxCeJanZPFh8qU52FXylKwB2DrsG3YM28Y1u53FEzE6z9Ou3TzPT85ffpk3DowNJ5jQ+shND3yT6zUOu88NE29/+9uXG6XtjR5KBcwdInP7vteW1nna1zqPP94N7fovW59tb++Jna7vsdH147HTfda32en6+noNoFbylp99H2sA/cd6Wd/i43/B819w9OKXvHh5L//Zt5bPM57+jGUvbXzd27xvH7VKjkAEInC5BAqYu1zeF1qaTb5DAXOca/OnOihcRuI2UZgU588evxE0wpj6wQ9/sARScMxOYApDdIxPytRdp4xHdzJ45bTlXDtrUr7ADX9uTsAcQ0HwirxmA1q5681qm8qcw4K0GA1z3lnLPnT+MOHEFFRgY1tkvs8cjRKWDoaqunMqMlq8MjgcmJOPoS6pg40Bxou8hq/PDHx5c4Q7jwyum0ftvuMd71iMJUbLNjG2DgXMkY8sHOqc/Vjt6wfbPE/6TD7tMnXndGWw7TPWTsqr3yOwJaB/mXOMk3H2e4S0O+/NV/qefqy/mSsEpiyBcr9bODz4wINHDzx4I9BnxqDzjC/XG2/y9icoZ4E9AazGofzJoAxj0NiZxagxddFzzrb+t+Kz+tAXAoD8SSY81d8cbi71J6cEAp1nHr8V8t7Leeqj+rqnilhEcyRpPzrBfC5Yiw7UNy14R/9azOqbc+xb3NIX+rrNVwHWdAe9r6/TYfLXJziI2BCjx0ZvTN534xg4bZ9ih9D7n/zkJ5cNRrwW58HvbB/zAf70MZ2vXfBZ85/P2zLpd3OMYDn82W/mIM4Qj71/ZPeUGHmys0qXQ4BeKGDuclhXSgQiEIE1AWt+QWGCeWyW7faoTpXoW3bRr375q8Vn8V/f+K8nboKRAR3MXhdQxGfBvqFXXeM4TaKLXc+XQkdfdmIL8hWw1x599NHFR8JnIKkL+8zThPlFPAGPLbjP7lNfdkdPmLvsFjy+PLaHDSx/cpetzx6UrMP0V0+O0LZuYtEPrXPX9qY+7lyv28TWtI5ma/pTx/K2cSaN78aNUvPnXPWl6T/yc7DzWxNuyfY5AhGIQAQiEIGLIMA+nf3M5Qm7P/jhmdYB/JpsZbaUg90ze2nWAGNL8ZeyZ2btcJLsznOwvyZoiQ12WWnWKphc3z24w02dfPd8thL7zN6bQD7+Q4Fz/PiHUgFzh8jcvu/1W+tfTw+0N8PfbF2gbT20Qd+zxtPG44+39pu+Ofb/vjWAWslbn+HPttduHahfyV+eDj5t6wvr3dnjkr9zZi1wKP/bR66SIxCBCFwegQLmLo/1hZc0Bub6CXOCPSi5UbLvf//7F6W7T9lRmgxNxuUYnJQ1By0l666GxXjdBZeM8UmJMj5tpntEsYOBdlYjUtlkdUeJP3s2jxr2nfw57iSGA5mcT5EzCG1WC/CwuX+RG8sj0zjvPYGHoeGpVvj4fYwIsjC+1Z0hjQe5cXCQfxyb6uFadZmnzKmTJ/oJWMD4W9/+1hLEY/Mca9cKiJinzjBmGMbbRK5DAXOChcinjRjTY/xs8zjPZ/WRHzknKGlfHztP3l1zbxMwH5mDBKwy8M0RFnoTzIaOvqffWUCY64wVfd0jpOfpcuYG/XP6vXFlvMwYNNZmvlvG4O8W2353nv4sX0HBAuY88noWJ3dbC5lXzWt4fuITn1iCsGy8WYjZNHn3u9+9LJjaHLn9LTtPXf3Yxz626GB933yrv1vc0gmCHOkDfVz/ppckr3Psq4m+PXebWZzrA/TS9PVru+Bq+kLAHB2mP8h/8p7XKW9fGXfzdzjbuPzSl760bFIbM+Yj+h7v2Zz2qj2G/fAY9vN5y8K8Yt6xAW4cmne0iQ19gfH+vDk74iLtmq0Mff59Avp/AXO/z6RPEYhABC6DALucbc8W+fGPdrbO7r9D+nMrD7uW/nRTAR8CfT2bSWwXa3GbAWymWaNv8zjus/UDO4juP6uP47h8T/sbNvSTQDfrITfv0VXsCP4P6xOBcuw2PoJD9ju7poC501K/vPP0eYFsbH3tyzbX99mWAjT1W39VYfofO338LDNGvM77teT6jXW0J0u4IdW6YjbL3Oxh3cyPNrbsrJUnj8l3X95zTq8RiEAEIhCBCETgZgjMGoBPnk0vndb2cD67n9/OIS9+O3Yvf5q9Ontg7GS+vLGhTiuvPOY4ZGOfNq/znMdnyO73hDD7stY71j78hB5YwV/LVhRgNXun+8rBaP70rZs08Bk7z/Uf/OAHl5uD7KWULoeANcD13wVDahNrPkk/dcMMX7/21c763nYNMO23T1rtK2977Paz3ZRm7Sgffcfawn66fXXBcr6f/Gbszed9+fddBCIQgXuFQAFzd3FLHxcwR7lShB/+8IefeFTrvqoyuuagSBmZjE1PH2JYUbbzFBSR8BQwpcoom6cTuUOVM3oU7L5ytt8pi0Pv7//+7xdFzlk4gQE2jxl+DAPfcYI732eGLwffww8/vGwycxpfVCKPunu6jMASG+eC5dRb3ZTPyOC8VF8OTU5H8k7wHqejYwybNZMJ2BnOWHOQqp+n2rgzRvAbOeTBiBGkI6BFmfsMYXkdCphj4JNNMMz73ve+ZXN/gh9uhpk+II0hhcu6njeTd9feuwT0K4cxYTxY+JqDxsg3T+lrFhI2FPRt428C5swZvp8xqK9P39Q/5S0P49DYcxhrFujz55fnyWsWqK4XDDtP0jQeJ7+7rZXUG1Nzms0Tc425zlxKT/hTkMdtuN1t9b2b5dU2ngQooErf10f1RTrmbW9727JBSi/QB/rjWZJ+rR94tL8nnBkD+oYxw5kkf33ibu7rZ+GxPnfmBxuMAtrYADYZ8aFLbU4bJ9jjQ8efNeEtKOCzn/vscjehuY4Ol7eAOfaasgqYOyvZ859vfBUwd35+XRmBCETgvAToXTb5rI99Pm1yLjvWOsETeT2xns72PZvGJoCAMptldKwbbM6S2FdsL7re62Un9XDwx/ATWJ+oH1bqwh/iicP8EezBQ+vw4dQT5i67BY8vj93B9yVgbvxs+hx/z/h+2OTa9qz9z7rWDRlsfRtx/FjsT/lb17oRzIaZzTh5H+o7x9egXyMQgQhEIAIRiMD5CcwaYF7PkpOAOXYxW+fTn/704tP3HbuXLcX2/9CHPrQElfGtndXWYf/PGuCs156lHofO5SO0zpl9Qj5ifkl7IfYhZy9S3Y7zCRcwd4jw7fleG/prR/a6BLTxPUv6mrUrX7y9W/b6vr8wdpzU+r78BVnaZ7evba/Yd/bJ7JcJkHzrn7z16HnPf97Sl47Lr98iEIEI3MsECpi7i1v/uIA5CnYC5jhVT+Nso2AlrzYRKVcb9zaPGWg/+P7OEb37bxJHLaeeu1RFwjPUTmtMct6JrOcolL+6jIErAM+GNJk5iSl6v1H0NpM9ZcfGtVfBMheR1JkxKXBm/ta7sidYTr044BkZ6qreghIF7XBmMnDWdV+/38q35swQ5ti0cc6ZbZOeYawsxpIgBpy3+U+eJwXMMaTf+973Hn3gAx9YeB5nTE+evUbgdhAwvi2WjXeBXe6K4fA3HiR91zELYHdEGYfmN/OCMbMed+v32/qsx6ByZ76zIWUcmgcsRv0pJoEsxr0y7takjupm8WRj0dxu7sCvgLk7q1Vt/monj1DX/7UdHeOuP0+M1e/N63TCWdLoOPnTcfKXjBOOlrEX5K2/32sJZ2PC+OB4E6zIRsHNhjQ9zMHA7uDMOI8u/cXPf7FsfuP/sY9/bLE5lFvA3O3rbQXM3T72lRyBCEQAgbHJz0LDNdYH/BR8CXS24DLfs9/paU/MddMYu8ba4TzpuLXEefI76zXWRYKd1JW+Uj/rHXXc53/Y5j+cCpjbkrl9n7WJDVBPDXRzjPfamN+LvcmvJtjTExJP47/b1sRGHFvfn/Fib8p7GRdPe/rRq1/z6idujuErLEUgAhGIQAQiEIHbSYCNctZkn47d70/PuznAjcGzZ8iXyfb/sz/7s8XHyWY+j+/udq8B+AnZcOplHYATu1Dwkzpt9z/2MSxgbh+V2/cdf/PcrPvoo48ugXP6pjWdNYCbZR555JFlHauNz5LGn+3J6x6SYH0xa2Nj4qUveenRRx75yPJAlvOOibPI07kRiEAE7mYCBczdxa132oA5keln3VznoGWUefQvh54AFs5WBpvfGGs28G0iu3ODU4+SP60hSnFT4Dan5xHK8pX8+UNBc/JTPmcfA1jZ6sHB56lPnrzmPIbszRizDAtO6HVdBZeoP5nkPRH57sq1Ya7enO/+TAuj9bT13nY3ZQ9rDk6BOmSRH77azp3kh/I/TcAcx6uAOewO5bOVq88RuGwCE0TLwS9YzngUsGJ86LcWhILjBKqabwSuCpgV7OO38/Ztc5lxKHDVn7ARvDpPmLOwMN8o46xz6GXzO64884QnGeDKqUB3qHMBc8dRuz2/+RPEFs/0nqBtycavvm4e12YW02ft7/q5uzDpc2NMUBjdRn/RYxbnnEp0zt0cHHreVjNGOKM8eQ97NwmYEySB8eac+ZMFHAznsTnMcdoUe38Sl40hHwFz7J6eMHfe1jv/dQXMnZ9dV0YgAhG4XQTYsHToaQPmLvKJ9JdZZ7abg42izt6z/9hujpNsEefjVMDcZbba8WVpE34m9r4NrXn6g7Us3w+/mpsmrT/Pausr2TpW3p5c8YUvfGHpO/rJs575rKPXv+H1y02nbghTVikCEYhABCIQgQjcbQTYthMw50Zg+waHAuZOegrbnVx3tr/9EMek2dc7jY1YwNxQuzNe9VF2vyfAuUneOlY72m9m949P2F64dd5Zkj4if75sPm3laH/rDn9NyB72hz/04aM3vumNS5knrSHPUnbnRiACEbhqBAqYu4tb9DQBc5xugkzOG+xhg13gCoUu2EIgiQ1GhptALnduPLKLgOfYE0B2GqVOYctTEB5lLpBDfhQ2Y8FTdPzddkEC7pCl7BnDs3mtPhx97hp39/jNBMtofoF4gtXIYyPbn3XhbCTPHAwMT73ztKlru6e+CVg4TV3P0r0w4BAfY1i9TnKGO58hxNjCSZ+YTXhBDzb61wFzFy3zWerXuRE4RMCcYAwKUrHgFTBkrtG/zQnGgv4sWFWQrKdsCewR6HNRiQyO9Rg0Xs56Z89FyXNR+UydzG+4msvN676/rIA5ZR1Kd/pC7TjZ1emi5be5ZZPLQQ8pX+Am/SNgy59QsvF7GgfJmjn9Qo8KUPdUV0FzM7bocrrtf//v/73oXU6lOyldRhuwa+hOT/uYYEUBbhL2guXxn6ffnocPG8YcZwwKmGN7mGMmYO4jH/nIMqddBv/LYHoeRnPNPvkueqwp63YEzO2r29T7VtRx8l6/HieD8y5LjrVMF/n+UP3u9npdJKPyisDdTIBNQ6dOwBy/AhvH2Ge38yHwJfAVuOHgVgTMHZpncL1T5hoyXlbA3N3A41Cfv0zZlcW/xR5346ibJSX9ll/tox/96LLW5Xs6q60vH/lZSzgEzs1a2pjw5Dp/ecCmnHV0KQIRiEAEIhCBCNxtBNi2fKWf+tSnlv2DdcAcm3/9hLmzPNjjtBwu0248rUz7zrusgLm7hcc+Rpcpu37LH+whBvZn3EBjzWivS0CbgDk+5/PcoM3etza2pz17w/bUJPvC9tLkbe++FIEIRCACxxMoYO54Pnf0r5cRMCdCnfHpMcc2kZVJCTMqGJ4Cx2zycsBx7J0UmOc6AWGecvPpT396CZZjMHB8MwoEwAjyYyhw7F2/fn3ZwJ4gNtczJpT18MMPL3fJ+uza8yb1U84EEjAqGTIMF4E6Nq8FlgjSEayj7PMYMCfJp25zOJeT9CSHO6OogLmTyPb7nUxg5gSbXeYFwTz+LKs5wW/GgEWvDS/zgic9CTAxBi86AHTGn1dJ2SdtVsy58zqsXXvS+J1zD71OnvO6Pu+0+btWYI6AZ0FYHtHtSXqu3wbMXTRP8ip/2tL7qcvIP/PczbJas7mI91u5py5eR3avI/9FlCmP4wLmPIFMwNxxTx09JIf6cCqNntsGzL3rXe9aAuZuVp8eKv88309f0X/WfUhe0wbD/6RxelL5AqcEyAmY42RY/0l2QXJ0PweDJ1yetywOi//4j/94wolBf7Mx5k+yTsDczdgzJ9XT79u+PZyH6UX168l3Xif/42R0rmPd5nOd+WneH5fHWX67zIC5dd3mvVdpmOtb5+1fp6338N0nw/Alg/e3Kw2XeSXHyHaSTNN3Zs6Ya4ftafM5qZx+j0AEbh8B4/t2BczN3DlzzHxGw/xy0XPNOv8hftp5zLW3OmBu5BseZPTdyLh9nTrcCa+HZCfbWu6L1InKFDDnZkdPmJuAOf6mdcDceZ4mTe59AXPsJ/b9m9/85iVgzpq6gDm0ShGIQAQiEIEI3G0EbmfAnP3MsR/nFT9246wBvF5EWue/zm9s1PV3+97f6oC5kW/WAD5L5JvXsaHnu+WHO+Cfkd3ryD8yz+tFy87f7C+teUiEva91wJyHwkzAHB/xWXnxLRsXbs7eBsxZX9hH83CaAubugM6XCBGIwB1PoIC5O76JDgt4GQFzDAeKl0IX4CaoRYCZ7ylxgSw28f15VM433x2XRokLCpg/Q+HJUowUAXKcgwLmPDnOY2n/8z//c1H4X/nKV5b3Uy6nnyenuXPcn5Tw+TyJseuO9H/83D8efenLX1qi/MmoHAbKyEQeTkaR+Rf5VKvzyLy+hqwFzK2J9P5uI2AMMuw9JWKeOPntb3/7iQUnJ78/v+oJTxYQ7oo3N5x1AXEruJi3jME5fJYsrMgt8GUWWWctf/IW7DaLcnmo9+S9L0DZdevDXCYoRLCcJ8wJnDOHy0cQkD9vLVjquI0Z56rHaeqibPIq13vyOzCaesjPQX7HcJoyTlPOWXmedD5ZyTzHtCnZ5zvnSOQb2ek87XEe2decRj7to50cv/eEuZe/4ugDH/zA0ate+aqj+55z31LeXLN+xZU80rQBuR103fyZJoGpZCb/PGHuL/7iL5axdZwed438lXMr0rD2io9j+tB8VhcyOPQd8moPn6cdjpNv2nH4q4cy3CDALjEP2XDk0JDPPGHufe973zL/HMrb9w4yeB3u88ph5c+iCVZkU/me7GwYcxvbx9x2iP/kf1b+yllz1bfNCWvGGEzbDldMh+fUyXmnSVPmtJ1r5DHttK7D8NEeIxv5Zgw613U2k6e9R67TyHLcObcyYE69po/N++GhbvObuqijY+o49VtzOq4ex/02bTF9QNnkGBnIIa3l2I6p6XvHlXOac467fn4bViOnz/Im0/QFnydNvYanNnWoo99cr/84pv9MP1TndV6TZ68RiMCdTcDYvqyAuX1zqPlpfThn5hpzlblm5p2Zz72eNa3LUGfJnCUvZZgTj8uXTBcdMCfPo93/v/7NDRuNXKPbZh52DjnJN/P2zOE3w+Os/Lbnk8sxcpIdYzrD+/l+GI/sa9Yj/2l1hzyVOe3nPVvQkyXckLoOmONncvOENa+AtkNtO/J5le/US335B60lHMpQP/XgK3ODK9+ZjbPjnrq4ruNp67ll3ecIRCACEYhABCJwKwhcVsDc2G9jy3llM7KtvJ+DrcTWmjUAu3FsKd+f1ZaactnXUwaO8pHv+DVOyvdWBMyxO+cYn9KaB5nmICcma9/LcLkV/eKkPLEc2b0n9xw+S8OYnGvZtePIfhJ3+YxtLl/vJf5me12eMGeP283U8mKje8Lc2972tiM+Z7wOlTHyeZ26yFs95k+y8mnbS1g/YU7AnKdM2wM6lOQ5+atrKQIRiMC9SqCAubu45S8jYA4eyl2Uuqeu+POp8+f8KFAb7R/4wAeWP3tCwTMMj0s2oDkGPeXIn6EQuOE7ZXhym6h6T47j0GMkCAYTeW+DmdNvjBhl+xNyf/qnf3p07dq1xalIsZ81MXZF+DMolONJdtIYCZ78wrCwmS0Sf5y9Zy3nVp3PKCpg7lbRLd/LIGAMmssECJkTBPUw7I1B400AgYWDoNVX7h4jzcF/nrF+K+pigWLDbg6fJXOXJ3CaH2eT5Szlmw/NdRwBFrmzUJ68MRHMuw0Udp1jFn3kkQ/G5lxPFxOYSF4MPa3MvGZ+82c/D6VZKM7C/xB/ZSvTQk2Z5FYH7Tnf+d38bcEpP5zUxfvhprzLXqDhRF5yO8hLdqyGp3PIRU5tQHYB1N7Pd+qBzyFGwxirYSJfnyV6yFMnHPPnxfR5gaLve/h9y0L6Wc/eHzC65qp89ZC3Q/JkQfnrB8abc7DG3/h6ZPfn1dVF22yTc6fu6u39SXXc5nGaz8N/2gKjGV9+m7Ewc4MxoA2mHfD32746TPn6oIPtIT9JG3vPeSGY0Jw0AXPYswNsMtrMPJT0Acf0gWHv9Te//s3C/9+u/9sSvCooDz+8Bav603GcIyfxn3qfhb++pS7Db/r1mjVZyC5/8usTZPF+XpV52oQvpuavH//ox8tlT3nqU5Z2EhSoDynTQT6yaGtzhYNtSE7fT50feOCBZU69/zn3LzI99WlPPa04B8+Tv7ZmY/7lX/7lUrZ+oK42lDmU2JmeMHyWpE7afeo179VNvWbsY6SvqiMmdAb2U+c1p7OUvz6XLFOeuk37T5/w27b9jStla3uyOJxzKPkNs7P0y0N56TtkIqf+47N89RuHsT7lqBuGzp9j+pDr/eZ6/di12Lre++nX+B9Xt0Ny9n0EInD7CBjXbINb/SdZzTHbOdTcab5R/syj5njnmWvMMebymWfMpb4/z1wz87V5zXwmmf/oazpRGd4fmsPIJA8BWv4UvCebTYCWaz01953vfOcSsD8yntSqv/3NDf32s5/f4EDPjZ1Gx8y8q75jE2FAr4xO8Xqc3CfJcN7ftRP5yOxY68T57Bw86T1MyLqWfb5zziHuIx/+U5Z89VvJDag2yvwpMRtaknz5nTzN2HpX++7L33fYOt/7qdP0D2sHtr4bZJThe32G/rPm0958bvroNsnPoW3UWzmuLUUgAhGIQAQiEIE7hQDblv10K/8kKxuO3caOY/eP/bj154zviF3Gth5/js8O9tT4Lk7LT7nynf1S5UtPftKNIC5rDXYi/9px6aID5jAh19i22sExayO/sSNnraL+ZLUOGN+S786zJjqunqf5jew4jr3v/chOfrJLZCefdcDIrg3n8P1pbGNt+Ktf/urIeokv+De/vdGm1mH21v1VJesBvPQbD4nwkBa+R3b4vjQ2ut/JMX3TudYDPntQAp+28WF9JllfXNvtm1sDWGMcSuotb20k/1IEIhCBe5VAAXN3cctfVsAcRDYTbfByvM0mPmXN+WaT12YvxUuxHkoMFE+T49z2hDmb0owUhgTFzEDwtKPJS/4MvPkb786n8Bmq8rKRyaBwvo1NCt01Z0mC/8aJ7NXnMUIYROrEqLBZ7gl6fjtrGWeR56znYlHA3Fmpdf6dQsA4/ulPfnp0/evXF8PeUyzNCfq1ZH7h1LdoENDz3Ieee/SMZz7jThF/WVQJuJ1jFogWPOSeDQ+Lw7MkcyIOxrYg5Vkoy0Pegtuu7RY8NrrW85HyLfbMsxby8vDZvPm1r35tuYPpO9/9zvK96wT/yENeNoP2JW1kMe5JB56EZdG4LtM1znEoT1n+nKUFGrnJ4Tu/kc9Cbhah9MVsoinfYTH3vOc9b1mQnnYxuk/u03w3cpOLrHQbHYAfmS2gveqPDu2i7uQiOx2BB7n1VYFPZFcn5xxyBMjHIUDbglkZuMjbdxbOnq5KJkkZz3vu845e+7rXHr3wBS88evozbmySreuoLuTRP7SrsrXDuu8oR191J9vczeY88gpWp08PMddmflM/T1xTf9fedNrFCXIe/PpXuwX+L28EL+k7gvumDcYRM84NrMiy5q8NyObQV3222N/2VfLOOMGZHaUt5KmN2ThsAbaH8yR56pee+HFcYKkyjRXjRNn60tRB/1cn/OV/ffdn4Kcvcaqxf/yJ5Ok3S8Grf5yrbfWDl77spSfy1x/USx3w089wVS9j0uG3YSp/7bnmqk+TTX307enXp3EQyVvf82cGzGFkedpTn3b00HMfWmwp9cVIwoZs7rbUHriRz9xFPucpG9sXv+jFC6vnv+D5xz4dZYXu2Lfyv+iAOf1IndRfvRzTh/UH9Zq6zXw48wne42TFXL8z/vX1fX35uMrpA+pHFnOB+U0fxJZs85tXCWftT5b1vCZg99B4Usb0Hdc45HOaPnJIdn1HH5i5ivwSOdj65qrhQXZ10ndcM/174fz4zvH5qxtPmVMnfUg9pl9ji7HPFzafHapU30cgAhdKgE4xzm9VwNzoUHOM+ZuemDl05nHfm58c5HGNuYSuNo+bbxx0KDuCDjvLXCM/6wBzIdvQ3C2ZY+XLJmHvKe/QnCsPco6v42YC5obJ44/tgpl/8uOj7333e0ff/8H3n5h3lWP+ptccY1PQKzP/YuCwRjKnD49D8l9Upxl7CEN6Qnuy9cns0KbaeuQnD87kc9BtdIVjbH3cx2Y7pJ+Vy4+lPOWyDySffa//0seS8rARoD+6f1++uNKB4/uSLz3o1fnqOGsJfWfsjGc+45lHL3v5y55Ym5J/m1xPDrYuGfQzbVeKQAQiEIEIRCACdwoBthsfy60KmGOvsQvZaHwM7Db2lXK9ji9nbEd2I5uQbTW+BjYdv+TYU+O/OA1D9qhy2O/WAmxWdjgb8IH7H1hsQDe/2xs5zoZmG7L97eV6qrE82HoO9qabNPhWrFVOSmRSX+uh8XHhQ05roWEhH3I6xoYe3xL7HxPrJDzYnGS5VWnWLiO7diT7+MTY/7OWc47z8ST7tKc2ZQuPr5mNrD5+d+4++eWjj2i3b37jm0ePPX7D388mV7a9mW99+1uLLOqOhfUFH6l2Vf6+5Hvs9C3rKPXhB7PemPXX9Z2fmU97/H3ycb42lrf+eCipp3Odo76lCEQgAvcqgQLm7uKWpxxtensqhuh0Cp6ipEQ9EcXmtz/xRdExRG4meTqRoDl3rDIwxtDiSPNEOMEsgsoYRIcSo5MyZ6x5es5sojIyGBwcv/404LVd8MY8xYXRxYjxBDhPuVNnRqp6CmATkOKxsq6Rx3HG4lYudeBIVCd3XXNcKo/Bw/hhCA1Dxou63mkJ0wLm7rRWSZ7TEnCnjQXX57/w+SWIViCt8ecwPi2ozCvvefd7lqdOuoNq34LktOVd9HkWV+7g9/RLrxZF5iCLjGu7Ocl8Zi6zaD5Lstgx71nomPcEu8yGirzN756waX4aXvK3WLWgFwjkGp9d5zB3/uiHPzp6/GePL4sp589ifjatfLdO2sBhrrV4M0fTJ9t5lrz0j7KVa5Fms0bbqgcufjdfmbvJLA/zLJ1hEWrhRw5zurIm6Mnvt6rNyW3Ox8k8KkiNHhh2dI02nsXnyE6fOshmUYkjeQWSkV091Ee91HMr//DS3+lunPBxLqfGbH6RTcJpFumzOF+3k/dko6PI4QkSZLMRJ1DGolySn77gs2P6jrqQ2SKa/bCV17Vkkz+HiicSek+um03k/uUvfrn0ExuwnELaQX/FYdqA7Os+RE515FzAhDz66bSBuvh+X33kKW9B+BxI+qU2IQv7hmNj2h0L7OWv73t/KLETyDC2EPts8jMOlGmMyN8hb1zVwbiWP3l9t07GoO+1Ef5vectbjuXv/OnbyiSDwLUJENTfjUn1nrorT7n6gnYdmfQnZerXYwf5bV8fWcssf+WyHc1h2k6+2sWNEeYv7UdW52Glvxp/Prt+2nzaGiNPMnaTxLXd/Gqz+GaTMvS1i3jCnLroQ9paOxt7mDuMu5kL9S3lrudDbBz67Mwngpn1JczYo/v6xqH6k0P+2t/8YlxN+/vOGFjG0+6uV69HT/r/js1pf3Loc+PY9P2+vuk7vzmPc80Y0Y/Om/ChP9jmxqj+iq3+N0/8w0kyj7HjzaPm8Ok7P//ZLojlF7u5exeIKyB3PV+Pk5Gu0Z/oaN+R+6R+fd46dV0EInCxBGauvRUBc+Yb86e5aHQou8RcY143l888PnqKPJK5ho5cb5iNDmW7n3auIYODr8Rc6MnA5jdzFN1JN3vqMLv8gQd3T5jY2Qj7kjzM9xcRMIeJ+rKR+XPk6fWJeXdjL49NMfOveZvcdIW5l71Et2FFh9yqpG3IrS3p+7H3vdIvY2eqn8P5OGM6smtTbUd+dqbDe98dsvXVR37ajq2pPP1G0ofYBfQz2SS8lDO2lnbel7Cin60Blc1u0jflLw86nW5UX2XoA7uV9fIUEk/odeOCcrbMned6upD9YS2hrdSxFIEIRCACEYhABO4UAmw39udFB8yxhdiB7EO2FJuXjWUdwH83frSx/8d2ZDeu1wBsXj4ctu4EKbF52VmOk9Lal2YtwNYjF9tNvm60tQ/KHjy0BlAGW/BmA+bWTDDgt+NXwgUjNi3bc/Et7exeLNRx7Gg2Jx7sST5F6yH+FzzI73zHRSdyj090fOAju3bFZnxiI/vYwmR3kN3B3uZ7ZB9rU7Y636T20O7bJB9scPKXXcbfrw2tBX7w/R/c8A/uAumm7yhjfMKHeCiLHA4cr+/2XexD6Yfjz+aHtN5RJ99J0wb8zfzah5Ly5W0NYP1aikAEInCvEihg7i5ueUqQwr/VAXOU/fpPsjKSxpBg4Lz//e9fNjAp1EPOPeczLm3afvKTn1yMNk5W37uG4ub4Y/StnXMUPOX/uc/uAvY+99llU4wB4DrKnKGlfAYjo2WfsbKviV3vsMnGkemRtYwm3zFOOAsZIAIB/clZBt7Wsbgv38v+DhsOUgGIf/3Xf70YRow+dWAI4fOe97xnqQM2xxnTly175UXAQlP/Nf4Y+hZz+q4FllcLKoEVb3rjmxaD/UlPvviF1M20goWsoCeLIK/GHtltolgYm5ssqMxVZ0nGtUUdJsb1OujJwhIXcyU2ODkk59EJrhNwQB4LstnYWy+anG9OmGPfwt18aA4WWGCO9adxBUSs5xHnKIfDwoJtFvQ++34WaWs5lT1p5lxymHfNWRai+HmlD/bJNtef51WZDjrJ4hUzB/00i35tYEE78g3juXbKJRvdQHY6CB8baS9/2csXHWLhu31UPibaxF1+HplOFxoLuCrXoa2UJSlbOXSlc7Y8nEdW/Uz5AjUtivXJCZKRj3OUPQ4B30nyw981U88bv9z4V/7K5eQQ7PThD394CaQ5pO/X1x73fuGwC2zxhBIOF22g35oTtA0m0wbDwuu8nz5FNofgHm2g71y7dm0ZJ3S3uq2TvPVPgfj0v7aYfjrOL+VOGj7aWTn7EpmMdeVi5BpBWNqWzYS5vCd/n4e1PMko/6nTugyy+F3d3viGNx49/L6Hjx58YPdklmfs30zVf6aO+rRNbUz1dQ4tv0tk3Ja35j39eu3sYxcZk35z/aFk7LOpODFxVm/1NH8JuDQ3+kwewQDmfrKS27w67TyvyjKWzEX6t0AyvG82kesiAubIKS/10ZeNO/MhBuMgw33qM6/k9376gnpq65lPzIHu/lVfNt1pxpz89GlzGabsXDKRBd/p68patyEZtP8c2secwM43jnweOYe7ssirbegIspqHXHfepP3NBV/84hePPHWWLiSz8c15xybX/5yn38y84bzf6zu/2c2Lu2A5MkrT18mL41bX6E94bOt43np0XQQicOsImKfMtxcdMGe+oJ9tonzve/ws/75skrGv6fKZj0ZXqqFrZp7x2Rwyc7m5hu42l5sj2Yc2Ik6aa+SnjAmWE3xOBteZX+X5yEceOXrDG9+wrDnMz/uSfOjjmw2YMwfTH/QIneLwHhP6hqwzx27lGDZ+pyvosrGVBUGbi9kGtyIpWz9h89GHdDM7k17WxvTytOVWft9P8pu2pJv5rNhF/F7aVXv6nl7aJn2JHqPP6CptIeGpbL9POcrQvsrRnt7vS37Hi3+HXa5eowtdo87yl/fYe/KRP/039ts2f3L4js5nc/gLC2w+a9pSBCIQgQhEIAIRuFMIsKfYdhcZMMd+4s+RN3tRkBybkf3NbuRnWPtzsBgbcriMLceeYu+yGd0kwq5yk4vvTuMnIYNy2ZBuziePstiA7GZ7p3yy/C6H1gBkuoiAOXVm64+96ZVvka+JrcnmVG/HJO/xdEhkZIOyKa0B7Gdc2/lN+fZ8f1wdJs+zvpINR74+/EZu/cZ6bmzwkXte13IrU1s6sCb/+JDIzh5nN/t9rneNPJRjb4TPHy/9R9KOY6Ov2cmDje5Y57VctPtHntofO2XrU/aB/XlX/XZsfnn7vK6HvDGWN97b5FxJf9Uu1gD8eqUIRCAC9yqBAubu4pa/jIA5ypwSF9DCWGM0MrokCpfR4DG+nnjCCUwB70vysXHMwSpgjuEpH4aAjThK31NDOP98ZghIo7gXh/EXv7QEzTE25EfR20ATOPKmN71pMUQZn/uMi61MUy8OTMEuHNGcvq51MHo4cQXM+XOQh4yWbb6X/ZlRhMdxAXP42GDEi6FUisCdQsAmhsXLJz7xiWXTS182/mZBZQzquzYlbE7cackCTMCTp+B4VR9jzEKK7J7wabPBPHWWZFybHwXzePrlBN1gI2+LbgEnFjLK870kKMKmiUApc7WFrQWTOVye5r2ZU53vWgfek4fvJznfYTEmUEGAno0h50vyki9dZG63cenV4nkCF8yd5mWbZOt51LUWcmT0SkZlzUYanYLhtd2Czdy1T76R86yvs4jECycL2QnUGk7TB5VNH3n1HTnJO4vcOZ8M6mjzSptf++Nry+ao4KLn3P/7d85NHn/zN3+z9B1l46AthsO2rdRf+dNm6zpj6Xz9jAOCHiWLfmCDTv+R5rwpY53H1Nd3W9au8zt9T9d+9KMfXZwDo6fX+Zzlvc1Xzqfru8Aim7mcGALiZ0OaHLgPe59HNnVwTDuov/6l/7AnBM0JXMOf3Os05Zp32ADGrXaUJl91njTlkmPez29e51ztznmBEXnYTfoY22LYk3fbttOmGE/91vk7X9nqIm9zorbeOtuUQX5zBwfbOLWwVWd9zDnrMSmPyce15jTHOASVzSYz74xjxnyAqQ3iffKSXR7mc/Yezsp2rvFhHmE3KsOGuzYwDs0jypOmr3tPLnIrT9meakwn6Os3m4xl5d7ME+aGO6eYsTbBf8a1vqwMddfO2/7sWof+59A/1Nf5w/2tf/LWpd1f8tKXLLYpNoeSvOShz2l/Tyl1+Kyf42vcytuB6fRr12oT5znITaaRmfzT3tNnfZafdmUvC4bUN+V73qTvkN38ZQzpz+qkDOPa/IaRuQJrzHFWN/JM3/FenXzv+uHrO4mdL+hEv55gP3IbH6UIRODOJmBcm6cuOmDOPGFuobuvX2eXfH15P3O5cs0xDnOj15kXzS3mJnmYP53rO3Otucb8Za5xsNGOs6Fm7nIDDD+Fp/PTqeZeOps98Od//ueLbrC5QY59ST7m1JsJmFMPeZhrx9a/vrPb2BXqqv5kMn+q0zCZOmDxi5/v7P3dUz99pw7mc0HKdAa9zn4ajvvqcZ7vtIW5X1uyhfWVeYo0m4TsZNGO5n2yO3ynzuQe3eHcaU/1JD+/F1uE/NqD7nPtuh6ud9MpfYaZPjtc5EdGnye5Fr9hON+vX8nIJnvf+963lDl1k/+UPXl7XSfyyXsrp3Oc63u60abuu9/97qVd9K9SBCIQgQhEIAIRuFMIsEsvMmCOLcYm45tkN/IxsJ3Z3nwRbEKJncRmHFtteMz17L6xHZ0zawB2lSA39i4bTj7HJfYifx5fmr0+vjJ2GhuQ/cxXzifL9lTOoUR2dTnvn2RVL2sgsgjcc1gP8PezjZVNpu0agDx+x4LNPUyw40dkQ/NrWgfMPsGhOpz1ezI7+L/GJ8q3pD3VRd/R1tOW5CeXgx2N87SjV+dOXa13BMlpA+s6fmfvtz4k5etL9hncPKxPaVPJb/JUjveTTloDOJess/awV6Jv2KdXJ7JK8nZs817nP2WuX8ljPcM3Zg9c+5QiEIEI3KsECpi7i1v+MgLmKF0baDb2OfwYHOPso6wZe+5soEwFUnA67ksMjH/9yr8efenLX1qC08jO6GRgMfJsbAuY8wQj343Db/JiYDA2yMHYcS0DgGHCWJzDZvlxBuPkx4BguNkkFejC8GM8Tdlk8hQTRq3I+tPkOXlf5iuujPgC5i6TemVdFAELLQ7+v/qrv1oWMD6PIW9xafw98sgjS7CczYk7LZkbzR9zWKiQ37xoASMoxKJ4G7BzUj2Ma/ORgDnBJrModZ28BcwJGtkGzDnPxonrBGvMAs98Z2OLvLMwM3/OBpdF6r652znOtxC0IaQ9LKJmPvSbfC3C3d1nbjYfuc458tdu5lOHNl07B9TRIt6rfOgWeZrHleWgW+Szvu4kfsf9TjZlWUDbwBKMjdUEZczGGRlwIbNjGGFKXm09B670Cdm1v2s9dWL+VDl+eEwdnKdNBFraDKXf5CVNnnSccyR5kku+WFjMS+oyyXu/ax+OB+9tDgpInUf461dknUO+klf1s0m2lnPy9kr22UTzZ8D06ac9/fyBJeTVX6/vxr+74sipTbCRxvFCLnV2kHNk1g6YT1sM/5GTE4Y9MY6MuU7e6u86jiPtP04wvPVBbeD98MWePPqB12nH+V2e3rvbUFtziLmezWQ8KEv5M661rfeukZf82TLaDP+RdfL36nublvJmmxhXZFknZeJgDrCpzS5QPq6ScvB0rfYzlxiXPiuTXByPnDsCBnBxaBPlO988oH/p3/qa60betSwYshcFPK0D5tSRvWb+4vgzZ+inyiG/Oq0dVupOLjKQ3bwqYBBrst9skvfNBswNd4Fp5l62Ku7aQsKO7I5xJo5DTv1cTw7MzE3r/ufa1732dUdvfNMbl7lQkOCMhX11x0l/0wf0b+Nrxr92Ur52FIAugAPD6dOu1e7aHxOv+o6+6rdpa6+uIYc+ZVxwHGpXdrx5wm/nTcanfiFIZALmyIAfh6RgE3LpX+Q0Fwzbkcm4Iie2fpfn44/t+D720+Xz1Ec9cKBn9ElsyO/aUgQicOcSMIbp64sOmJMnm5ZfwBxkrvGduXp0tXnUMfPNzBdkmrmcTqMDzD+S8+lwc41NCHMw3XsoKU9+Iwd/hTlPWfIyb/3Zn/3Zkp95i67Yl+RDp9xMwJw6CG6n49g1bDe6QqLL6AC6xDHzPzlHv/30Jz89+vFPfrxsro2NRV482GmYmNeH4756nOc7/LXD2Nr0Iv2ojZQ/so/+0B7qop3pnNHHi/7YMcRx+gId49zRSWwTG0zyXLcFHcQWw04byHNsBvLJz+dJrh2G8tqX6C2srQG96hf6LH2PIdnJSu6xQ+TjN31Wfafv+l47zat6sdNsxtHpbC36vRSBCEQgAhGIQATuFALsnIsMmGNzs5smuIzNyN5lM7KT2GfspznYYr4b23W9BmDrOXwnsef4zvjx2L3sRbbmXLuPqfqx7wTM8RevA+aRg/znAABAAElEQVTsu/K5fOQjH1nswLXduc2LHTx18qQzdVGuwz6n/Qo2376HE4wdz1fNx0UOebEt1U2541ecJ63h8uQn7Z52vPvPzTKPPf7YEz5FslhTuM46Zv6alzysbS4qkY0tzC62TiQ335y1izqxdcmpLdm87Pk5/OZa9rljvQZQb79pOzJjZh979kjkNW2qHH2Aj5uvW1+yBiEbBn7Tt7yf9ph1prwnny0T56xv2OF3tF6VN/msKfSd6X/KIwvm+qE1BtaTv9/WSTvKX9vor6UIRCAC9yqBAubu4pa3UcSQu1V/kpXyZNQw1EStCwoR0EIR+41BYPOOocbYOuSwpaQZA5y9j37h0aN/+cqNDWrKnMNOHjaAPcHI5vYo73XTcFgLahAwx+hhHMuX0rdBzVDxJDUynMbYYpgwJGzGcaRyYvrMkBgDzqasfOXPcLoTE4ON8XdcwJyncaiLel1EPcaguxN5JNPdQ8AcYg5j5JvDLCYsSHxvXNtIWS8ELWjutETeCZbzag4xPsxDtyJgTt4WMQLmPFnAk5qM6ZkzLZQsBAVImDPNseZecgmg8T2ZZ3NGwIsFtzluvcBbc9YeNmQsap3rPGX63jxq8WcRajFvQ01Zs4i0kSRg0KtFqPl+5g/X0yUWovSKa8cpYQ63gWaRZgGvP0wd17Kd9b0yzZkTgG3eNPfTc/SJegkk0vfI7dWCldzL4n/3O3bk/vnPfr4EX5DbHEx2dfGb+mOmnegQh8/Th8nh0B70q7bSTmQwDug6eWIpqT85ru2etjebrHjIY5L35MTZItd7elLe+Kq3/qHuynXIQ53Jq43wvu/ZN4LyODnWybkYvPAFuz9Dde2PFz1Lp5w3kRd7AWsTWIadtlcHY0j/dOhz03emvNHhUxf89e1xYuivxqA5xKbfWv85B2+ctZ/rsHZc3zlWfK8tnSdxLJCHc8srDtKav8/OMz7JrC05tvQt9XKudjDfmfe89x32rtPfBKPJ23fSOn/tpP/I23jFY8tfWdqcQ4udxD7kLFEPTMmmnY1jdpf8HGMz6dvOn3lEX8FDH8KIbDZP2WkzPvfJQXYs9W02lj8rjYGkLByNCfIpQ2CW/qWdjT99nbzqPDJN+cbUu975rqOHnvvQE+Npyfic/+gH2uNmnjA3fZGzSj7qpX3Jrx7qhLc+aR7AQNvNGNbOMw/qM/q068nmN9eaB22OX9vNAfrgtu1V37lkUb7NeU9ONo9oz5lDXvbSlx294o9esfQh/U77r2XRbtP+5qKvffVrRz/44Q+WfqGMmS/MQ+RSN3VyzFy/7sOuOWvS1uuAOfKol3z1DRz1JwfZyaBsMqgTGR2Scaj/68P/vfvzit/45v//U3wz5+vD5r7hq48Pk7PK3vkRiMDlEDB+zZsXGTBnDjXf0KEOTz81H/nevE2HmvvMQeZhc8foa7V2nnmJHW4epgPpF3rMfC8Pep5NIkCMPjZX+W2b5KWO24A55yuXDJcRMEcOnJcn/e+Y0HNsDfrJnEwO+sm8yT4h29jsUwdMBM1977+/t9hc7C4Jz9k4NIfv47DlcprPU65y6FS6cJ5+PXYdvcHGZw95JYv2IT/G2kwdyU4vsq3YmfQiHn6jJ+gkebBp2EV4rNdT2pAc+gFbR38ig+/oerpOflN31+oj9OtDDz60fL+1x5X7rGc+a7H1n/XsZy3Xk0/+ytM+5JS/cvCQ9FVtZNN21hJ+m9+dQw7th4n+OXaC30oRiEAEIhCBCETgTiAwttlF/UlWdh/7ib0rsIzNx55nE7K57n/g/sXWZT+xIcefMyzYUmxGPgc2GPtugqLYVuxMtp29OXunPrO3DqW1L20dMOca9pm1xK0OmMPEuogNzV87axt1ZS/zvfDNObznV3rqU24EArJdXY8JO3fsXnzYqljgYD8Dy+NYHGJ06Hu2trZk/1tHkXvtT2Jrs4fJYP3CX0d26xptRb5ZA2jDWQNY142tzaZ23bWdb+61r3nt0evf8PplPTB5kE0e5OBb5tsjlwMDfnBMfK9M+Y1PmF8KD99vkzUA1tah6kCe8YPLW7+x9rm+8+F6z4emvcjqOmsM65ZZf67XAMrCQd/Ux+RfikAEInCvEihg7i5uec43yv9WBMwxEhg4Ntts8AoqswFH6XP0UayUtI15hhqlTglTvNvkfIaGR9EKKmFAUuYSY1MeArpsolLK+wwDhhZDhUHMic3AIN84KzmePemOYpfnSUl+ZLKRy5BiZKgb+RknjAiPOJYv5+e+ep1UxmX8jq022hcwpz3Uw53snpyB1T62Z5HT9fK5iLzOUm7nXj0C5hDOfIvSj33sY8v7mVsswGyeuLPFXU8WNRe5iLoomuaxOylgDtNZ4JkfxTy5q8tCSrCxecKizcJJsmCaP7Vqc8S43pdmzFvIzeJKOepPL9hAs4i2GJZm7rGQpxtsps212/zNuxaKFpIWdvLU/rOZSM/c7Lw1ZepfFo6cDu704siwUJUsbi3WbSSR11OdzJ8PPvSHf2IJZwzlJQ932unH8rIA9jtmOLjjTCCGICeM13WZ9iKX9vKKpU1a+U1eFq6cMxwseNKT8nf9Osl7dNi05ehyOo986k6H6gvOdZ562+ijh5//vOcvi/XtBp1yZv7HavrBuvyzvCc75wt5BBmN80F7q6vxj5mD80CfWCes9Df2iT4oH04t38kbew4Qf1LKny92vT44adjj42AP6IdsDE8eJA9mEt7kEJTPIYQXFlv+w3PK0Ucmf/Ka7zjgyMzmcD252BjGiiBYcvtumzc55C9vc6H36+R8+bON9G190liXXIMpW4tjSv9eb06v+6TzyY2F9mH7cbpwEKmLvqgf4zpPN8Fjm4yNdcCcce56fQdPY4u8+rj6cMzMhjse7Dh9Ezftysmlzi964YuWQEibxMN5W/ZZPpPrIgLm9B+6wFPd1Ms8hpW6qte1nTMNf5+n/6zldL0+Z+xz7JkPtZ95YfqI8W9TXl9U92274Yu7NjOPyEe+vn/Ofc858idd9bN5Eg6e2zzIhLnrtP9n/uEzN5yj37vR/tqEPPKgO7Sj9poxuk+udT1P8x4786o6CLgcByd5HSMDu4DNb0ySB1vBC9s6qb+5z9oDE2MEX/X0m7EjH/URvGGOnTnuNPJ2TgQicPkEjF36/qID5ug+NgAbyXxhDqaj2Cb0Jl8FO9G8bp4wH00yl9ApdJ/r2Sb0PVuCXjU3uU4e5nPz1j59Lj95qSMZzIXsJPmaA0cnXEbAnPrTwX/3yb87+uKXvnh0fWenq4t6m/fpNn4GesnGkzpu52D10Vb8FeZhNoVk44aNzD5h+1xUIrN2wJ5OZfMKzseUjtJu2pLuMN97T3+o01p258uLXnU9nSg/+kQ/0T7aQ35sq9Efk9fUZ9rS+fQOfmw0bcte499Srrz0M7agNQi5fLe1x3eEl/M95XnsIHmT1YGzPswuxsB38iendd88/Vp7kc2xTs7FYvomGUoRiEAEIhCBCETgTiHANpv9QTdu8xewt9lD7Es+CnYyO5Pv5SRbhn3GL8V2kh//0GM/fWzx57DNXvySG/b7tZ3dy2/Fz7PNkw+DTOxO/hh2O38OmdhUruFPZIexm4+zfde+NDKxnScffovLCJhjS/NZsn3tJVgPqCMbEQO+LTcnY8wvw87cJjKze+dGXDa0z9qHT9KaCMe1/b3N4yyf2bTk5I+b/Q99gxzK4WO07mAPK9uhf5B9K4M+4Vqys6e/+q9fXfxIP//FjSfNOX9ZH+5uSLXf+trXvfYJH+bIPHY2W/xXv7yRnxs47Z1YC5BTPtZ21g/alU9eX8F5m5yrHuz/7RpA2+jD+h6ftvf6ERnIqd9YY1izuFZe2zWAvB366+S/laHPEYhABO4FAgXM3cWtfJqAOUFkxwVC7Ks+Y4JxxBnIMcrpJjCPseS3UarzWOHZRKZUKd1tsvHGcPz4xz++GFs2BRkfzqW0Pa3EhjbH4CGjkYHB2Pzyl768OI4//4XPLwaxPJTL2KL8GT7y3CfHWi6Gg/zIxKDAkuHmOsaJPP78z/98caQyBk/Kb533Zb7H8VDAnHrMRvzb3/b2oyc/ZWf87B6PfN6EwVOeeuNR1DYlt47l8+bbdfcmAfOIhZ8NA+NQP/adOcaixZi2+WB+MS/ciQa7RcmdFDCnJw3DZZ5eBcwJSrTYNa+b6yQbRQKlPOHTXLFvUeY8Y99h8TRzoTwsINXfZp7NQU4K7fTiF7346DWvfc2yIWhz0Kaaa/cl7S0vc7L8zPXOtZjV7vsW3vvyOc13NuzoMTJ7aiodoFxpFpECUugRPCZQc+q8LoPcZKXf5GkjzeaohS/96Xc8OUME/3GM6NNbPamd1m1G3zq0FV0p4WBhT0/Ky3t5L228EmrkXLeTn0fWceDIHwPnaS/jzeL8kUceWeyF+59z/x9s0MlH/o5t/n47TyKPzVP9x3s8OV7YFjbyOLtm7G/75tRJGwqAEfjuldPBbzi7Xt/GjR6X1zoNe6/6rrbkWLExzSYwvv2G97Wdc4zzQhvihcM+/sNHOeSYMtRNkI4/T6qvzObtOEfMdR/60IcWGX23L295Tv5eJw0Lc6l21Q/1SfaBvIw//drmPPnHyTfXr/MaeY1JTj5MOf0EJRiffld/zrHZdDV2tsm4cv08YW49JlyvLZShndh++HJYGXNslxknynOtukiuZX8sd68++f8z2JZ/2s/y1tba3M0n+gC59HGOPMwEA3AsHUr4u0aQl7al19Rf/1M3c4D32oH8+8aP+pGFrS2PuZFDH9Q+rsXbONWWk89aJtdzjGFu/iCHfq08T5Z79WtevYwH9u3cWLJue3lN+5NHH9Lu8vJKFkmfmvZXN/1JeynHcbNJOduAOXUjq3nAnDWBzfqM+uiDmJBjm9QJhx//6MdH3/r2txZdxe7X1trJ73iYcwSxcPqq00Xqnq1MfY5ABG6OgHn3ogPmSGT+mY0RNy4ox5xnzjHX0AvHzeXON7fYEGIXm2u8H/1JF7Ap+CzMNeaeQ/OWvG53wNzPHv/Z0Xe++53FdiELvU4u8+ME4QugNwfjtNUp08psIGy1mUNSb3Ot6y5yjacc9vMEGtJl1j5kYz/wi9Gl1kDmfe15SHb6gex0BfuSfUUfXv/dDT5+o5f0DfponlTKjlnrw9GtXvGzVtA/ZlMWD+eTzY1ibI5XvPwVy3f7AuaOduaP89e8J2/9jS3IdtNmynOeNlPnt7/97YvvzfpMct06Odcx8q/LWJ/X+whEIAIRiEAEInA7CLDL+A8v6glzbCX+HP4Tvg+2FHuenc5e4kNj67Ij2VNsv619xCbks+A/Y38JdOPrHD8GW5ftzwZ1sIG3eQxL9Rtf2u0KmCMDu3fs6fHVs3GtAfgv7eV6r25jN04d5hVbebmeje4zXyC/Hxv8EIO5/rSv7FltwI9kDcef5j2/lvay7uDnZwuvb7b02z7Z5adPWLcIwtMv2NbWFfqG360fHnzgwaO3v+Pty77KtZ1Pk09qXSfnLcdvdk/tfvyxZZ2p32KrvznXNdYk/H0eSIMxubbJuetjfpe/foYxvyaftvFBdr9ZX/BLig/gS9Re8vHbOk3e2zXG+pzeRyACEbgXCBQwdxe38qGAOcqNY1cghA1Yht0+ZTtVHyXp1WFjiaK1uWvjjlHAQBgHp7zG6UbZMvoYkYeMDHIyFD3NjeEof+XI57QBG85nXP3713/31IlP3XjqBOOHUufAZlyQhfFzkoIfo0cQCaeiTUbGkLwYbvj9xV/8xWJQMYp9fycm9T8UMKeNGPiM+1e98lU3HzC325xmEAokcVe2/Pe1+Z3IKZnuPALGtA0HGwaePmlRKllAWTAYx8a0YIVDC4bbXStzkvljDgtBc4U5l/w2PSxMjMOzJOPaHGzuFWCz3uyxsDb+bLZt/yTrvjLMdTaOBIMIlFoHzJHRnGmza/tEhH15rb8zj9MLno5g8UhXkNu8YL5581vevNxpZXPwLPOEfiFd9JyrX5GXDsLBJqYF9egiC306k07D11x3GrldL2iLvuRMcOCtL/hNX7CRJmjLE4w4BY7bGJxgNn3KIlfiULCA1p/kMQFzy4+n/Ics8tOnOA8c6kcW481Ycxem+Z2Ml5Es6ulejgwMtQdW7IlxRp0kh3Y1PnDTDzlH5KP/aENjRKAbG+G4cUgWbWa8eUoYB5XxjRv213bOD/aU160T5CQZ/U4mdhCbg3NkHTCnPW1gco6wP4yhsyS2C9k5RwRL2dTFU/uaL9TdJu48Mfe0+Y+dpK/gK1/fYcs5ZqyQW5ttbUzyjJOP7YevtnItuRzjgJQP55XgK44sv11WItfNBsyRVT8xv9BjbDLzo/rpO/qzcbZltK+O8hA0h5kNdeND3xmdyLkqCHTfPDJBGhxkxjlZXKu9zfXmN/1AnziNLHSFugguNh/5PHO8gDljSz/ggLvItNj6uznBXDpPmNNO+o66GCPqgoU+Y44dp99xcpDdnwXE5lOf/tQyBs3V2k6/Y2c8/N6Hb/Tpl710Kee4/PotAhG4fQToE/pI8BK9Sr/OeDZPmCPNU+xbOooteppkzjSX0l82McwP8jOPs4/M6yfpKHmYs6xv6GVzufzIbC6XBxuefPL13TYp1/ls1tmokoeyZy3OZjMXHufnkQ99bPPOzUlkmfWWOY+/g15g36jnVjeMfTu2Cz0guZY9qg6u396QsK3P9jO5JPP6RSbMyMiuZOfjT6eY/9WNPSSwzSYfu4Pc2zofkke+7KCxN2cjyvlsVracJ0wIdtNX6JV9Sd0nYG69/tW2rpvNLOuSk/ravvy1L52t39hk1R9Hf7IFtBf/m75XikAEIhCBCEQgAncbAbYtO+yiAuam/nwx/Lpe2Y/sO7YZnz47+TR2Gd+SNQTZ+Cb59ccWY3vyfXkowHH+97Uv7XYFzFkPsaXZk/yseEjWMexI/m3riLP4gm6V/U8utj4Z8WL/eyrces1Dbr5Layf+fj6l06xDrC34W7UlDny5fGRr/xQ/q30V6wvlHPL3a1drCTfMWOPx+5HBWpDfkHzWd+fZ/1J/a2P7M3yJ+uEE9unD2oq/mZzWPqepO66lCEQgAvcigQLm7uJWpwAp2O2fZGXEMegYYjaKjwtYYLA4GAFz2KCzAc0gkL+NXcrX76PMbbhxkjqUxZDcpslXHvKiuAXI+J6MjAgbfzbqOfDkcZzSZqQweOaOXI7j9aawwDD52cQ7aXOS0crA5gDm+JSPOiqfHJyUnjDHoD2tIbWt/2V8JvOhgDlGEKNefRhtN5vkJy8bpIKYcDlkCN5sWV1/9QmYByYg1wa/+cy8YDFpA8NGjLnBmLY5dNoNjcskZ0E2wXJeLYDMIXdawJy57q/+6q+Wuc6Gj7lUmoA549mC/Szjee2kMCfP3VvaymYR3TCBHcfN65fVXupMlwnAoI/IO7pIf7NwtoFpY5V+O40zZGQ3D+Nhcc6pYHGuP8vfHGzjVrCb/myDCqNDafoTvaTdJPLdbMAc/S0/wSJk3Bcw58+QW0yb5y8j4TMby+Tz2Qbj6O/T9hs2CzvDo+3ZGdpafuYM/dAGJvtA3Q4lDg8OBs6FWxEwp4/Y0LdhfShgziYpvXpc/9gnv76nv3HKrQOM1J8zyHjkvPEe29NyxVD7cJLpj5w6yvHdbDpzlF3bBRGSeZ0vmdho5JmAOdc5x2F8sSXM82Sz0bzNY19dL/o77a5OnGrnfcLcyKR+2tkx/Q/v2TRf85lrtq/kwY1OHOeofOXBySVIDHN2/bafcOya41yrrYwLicMNY2NBXzhtQOyMCQ4948q6gM7Tr7QbZ9vMadt63MxnZZif9bt1f8aPLavv0VkciuZGbE7D1vyiXa7v1iHydpg3fC/Jg86SL92I8WnyvZm6dm0EInA+AsbyrQiYm3nCPD62snnAXD62yUkSy8NhrmFz0Z9jI5uvzFvmMHPytZ3+pPe3aeS43QFzE8TNX8K25T+hv8lMHznUY1/Q37ZOt/ozZtqNX8SGljmeHpXITGfa0MKeTciu1ranneflTS9qU7a6NSz7QV/UrnQrHxTdaCOR7t2XyClgTj709WzmkZGdWsDcPmp9F4EIRCACEYhABG4QWPui+SrYp25YZJNZw/MXCDxik7FR2VinSfwu7H+v8uL3mDXAafPgy+CH4QOzBuDbYT9K/DlsUb5hMvIV70trX9r4hMbe5K9mfwt+UlcyHkr2Odmc/Jt/+7d/u6xPxvblFyIHm5gvbpsw5QMaPyC+rhUgxxfEXmVLn9a3tM3/oj/j7uYpfj0+JGsYa0VpAsb8NQH1PuteJttdO1rPzZ9T1cb6if6hLbUrPx2W1nr7knadPYlDAXN88tYQ8j1Lsk45TcCcfQ95n3b9cxYZOjcCEYjAVSFQwNxd3JKcdDZ79gXMCX7ghLWZxYA5ZNwxuih5BtwcjAx5u7PCe0qXgSAPzkZ5M6oYGhyC8t9n6MmXUSU4wQa9V05Milk+jJQPvP8DR299240ggpOcrfITIc9hy3Bj+InOlx+DRJAKJ6i7yRmOx+W3Dpjj9PSZgSEvG3KeWsHAdhcyOe9UY2IcwwJW5i4CRhh5tZdXTtyzGlv7hgVDXCAJA4txLqhC3qUInIeAOcVmg00Nizdzjj5rnBvPAn7NXzYfzBfHLQTPU/5FXGNRNgFOXmfs3SsBc/SDIB2bPrMZSBeYN80T5mMszBO3cw7V1zgLru82L82TZBU85nt9y6JWX/OEDfrsON2xr9/Ihy71BI/Rd3QzZ4t6YyIQw6aihfRxToXpTxwTVz1gbh/L83zHRjF/cIyYS4xDulHSD9kEbJXZHN1XBvtHPvrH3RQwp+9x1ozzhuz/j707cZesqNL9nxbzXEyCczkg2uL8XMThNoX99H/drdj+EAfQfkSk772ihQPaIjgAIqO//OxTb7nJzjyV0zlknvMG7NqZefaO4RsRK1asWDv2uN2l/Oem+uBhDoPzmOQ3/YZBh97F6Y9ep197WMJDGZyL6E1jGa0O5jnMiTP6XxaW5e2d0rPUu7azDYe58NrkTI6Qq1hnsT/GUfXH6Ms4yVA5uxivHJ5mZjTmlMk52phqPNUHyDiyeZEBbzbfxmJtSZ+yy5w5gT4iTs6O2padahhttxkWOcxJFwMPskSeanOrji3aJVmtXNo1WaGs+pIyMUBjtaoT+TYZNK4SKIHDCZCL5NG2d5g7PNXV/spGwWZBT5ZP4w2ZZQzkxEzWsKXMe0gheuUuOMwZVzjMmbOxl5C5xpE8dMPJ2Bi+jjxejejhV2OmTdC/6YPya9ySX8zpLWS7hyiMofK8ShC/g75F1/fgCTbGrKRhAfG+T903vJpJvRq3ZoM46jA3S6XfS6AESqAESqAESmA5AmxNbKV07G06zC2X+uFXsSvQDdkVrYPSFeWX/sceyS7JUc0mHYvsMmNb2jvtMMcmZT4SfZfdjy2IPp0d5ui7q9pkDqe42l+xZe+no7PrsaWxWaoL+bJ+bd7FlrbqQ8TJiTmGNsculjoxtxO/9edzU5smJuKf54AoHvV6VA5z7HbyyB6cteF5O8yxd9VhLrXacwmUQAnMJ1CHuflc9uJXi2OzDnMUBQM2I53FHo5fFtXmGewUksHZwMoAanB1WGTznUJEwXCvw6DKYYoiwOBI6TjsdQ6UB0oLJZHhkrOceMWV/D34zw8OTgTXXT//He3jilA2+WH85GTDsYABWnkZaTklMNp6aoDySWlZFOSDsjNvh7mT4jCHC2ZCzot4LPM7xjE2/+u//uvAtw5zy5DrNfMIaJOLHObILJMZckafttCh/e1aICPj4HTaHOaU3VNbnrDiMGdRkOMBmWCCyBBgq3ljxHghjVw67mCcs3Bpgcvk0U5JJtACIwUnEs47ngjbZMHPmMKhyI4R0jJBNWaJk6OcSTrHFePoIg5pT3WYW76V0DWM6Rxg1G/0FzIGd049HHA59izShcRB/3H/PjnMadt0K33Q4i3dSBnpQ9q29sbBil4z62C1LGG65i+f+eXkP7598Do37Vrc+rldGS34e6py7Jg/NvJhiq++oE60f8ZChkJyXlzje5fN1zauk69dcpjDhw6uHunODH7qGB86rXZ8/vz5Qb+fdbzlCBZHAbox/VtbIH/JNuOpOcGyDsHSVWeM0AyDjL3R4clM9a59cZzcZjC2MHYyzpo7SNM8RVm0Mww4m0h/naD9ckr8xje+MTDW78WvvOem85tP3PuJyf1fun+QF3SPhhIogd0joL/qu7vsMEcnZLPwqncLK2Qb3Y88N/bFOXeevcJYoIzvtMOc8UgZ2Euw9l3e6BhsLRkHjOvGFuXLcdytBi/zEg7j8mtByjgoGC/Zrej6FrPMVdadV0qDrk+3kZY0pK1e3/++9w+O11/7318bbCbzdBv86jB33K2j6ZVACZRACZRACZwUAmxN9OxddJjDmG2BDYV9Ljs00xU9/MeOwg7GsWreHMD9Y1tanLPcT389rh3m2Ezkn13aBh3mMQJ93xyAbcmDjD6P9epFdu7h5iP6Bxs7NrOJsqHJs3ywH2Xe5UFfeV33IWJZN/dkK86Drb7T69mM2MTYnT08o47nhTrMzaPS30qgBEpg9wjUYW736mTpHB3mMEdhsTjKMe0wBwCDuwUxzgOUOgenBwelg5JBwaAUUeYoRA4KgAVYC52LAgWLk4IFP4ttvlucFKdFL/FYbONYcdWV0x2IzlzekUKeKMYXpjuecNTgXCD/gnJ62jlPDkiDgjQvZMGMQVUcjJ3KLm8WFylSeSUrhn7fxaCeLJZfboc57WGTMmgn2oE24IkEC/C4iLehBNYhoE2ZPJrUcDAiz7RR/Zns4uhiRy67lJE/8xYd1kl3m/eYNMbBydkESBk4lpJFJsImTmTlKkG/5iSAjYU2Dl4WqcRtUcrkmqOKhR8y7rC+bRJHZj685VeykpfkpnKbMJJB5Lv8xJkGAwtUnjxTpyaSZIZrxgtVh+V/FW6LrtWm0taMG9qa/MsHnhb8TPjDc1E8l/sdDxN1xhE8OLCoS2XFQDp2sTts56K0pzrMXY72PxzB1aV2To5YwFQP+iYZow2qV+OWvqjO5wVtVxz75DCnfNqXhVvGLPKCE7L+RGZq28rOcdX3dcfrF//y4qVXff7iwi8GXU5ftkMLHY4DE5lkAT1hbOSLw5y8yrN6IN/tEubpVLJ9Ub0kvqM675LDHDYOsp/8YOwjr+i9GNG54jA37/UXg3PGb56dPPrdRweHXTsP4kr2GkeNp3gvu6uOfmVuQJ7JB+cD9Rpdm8MHxzW69jaDvrvIYY4DNjmqXTuvE8SPsTHR/ITznHaAPQP0Rz/y0clXv/bVS86FRz0+rVOG3lMCp50AuWjM3kWHObJE4FjFDuJV7GQ6+SmQ52OHOWP1bBCHMr7TDnPyzM5gLiIvdFw6tXGAExp7SfQrTvTR86PrZ2w/DjkqX+ZLFhYxl1djGJYYx0GRvi9fydss+8t9x8Q8IvUqHfqNMmJgbDo/dWz3me41G+SnDnOzVPq9BEqgBEqgBEqgBJYjQBfbRYe5zAHohR78Y8/JDs30VPazc9MH9KwTsJPPmwMgoHx5W8M75TAnD3R/D3KyNVpDVS5zAOuCbHrWch2+5yFY8xzX0IuPQ//HS75+99vfTR57/LHJUz99avKzp3826PnyIl9sVrHDs6mtG8wrxnMzNqUwofeb37G5mRvNKzum3WFuXfq9rwRKoASOj0Ad5o6P9dZTOsxhLsoJY+C8gTqZodA5KG8Ms4J7KDiUCwugjH288C1UWeS08J8F/0Vxi8siFOWQcuXwm7TEz1mO0mLxT3yL4kk+x2dKCeXRIp6tdhnMKSnioLSJ2y5znPrkf17c7mHIZuzkoOCzxUHXWlwUD4e5KH/z4hjn6Z36rNyLHObUH8M1JZxifrm2cLkyuD9PaKu7KMSXu69/L4F5BMgCr0WziMShwoKDQE5ot/qvRQ3OHvrkus4e89Le1m+n2WFOPZGjdlcwieZ4YBLtd7KCfMgup3Y1Mok2OSVDjClks3HGtUc9oTa+cerjhObgSOI3bYps1M5MbhdNbJdtL9qDuD3piAmnD44YymcM5bz10EMPHbpzUR3m3k6bnEjIZ2eHtiYYuxkfGKUeeeSRSw5z/j52mIuuMW8831eHOUYbclTZOQ5wnlM+fUyfo7Np10Nfe9d8Z8HwXXTWrulzP37ix8NOM1jrO+Ln9CZ+jljLOMypk+h/jElkAxnwToXjdJhL+01Z0459xyXf6bh2qCQL6Lj+Rp8bO8wxyM0a+zhV0wfttMiwao4gTuNpHiQh5zhwX465+9Rz5Bn5bozW19SzOic3PUFLzm8zaG+Xc5iz86z5wzpBGaThgRnl4mDhuxAj9oMPPjgYsTkXzpMX66Tbe0qgBLZHgFx8pxzmyMdxiOz2W2S5z+RnnM3IZDJVIM/JUHLZfHreYpk4xTVelGH7ILvJdDrl17/+9SEeD+kY4+eFyHJ6ApsHR63Mt8yt6EV2Q7CIFL18HA95yUZCrzVfi14rXnmhB3CaEw/naTq+g+yUT3G6btBBpmfy9KhkKpuIXVbp3+aV8p36ME5x8DcG0gs3yYf5g7rlRGhuYQc+uoQQvSu6PhazAbs6zM1S6fcSKIESKIESKIESWI4AnfqdcJijwyXk8/hM7xTopDYNYXvODnP0Rzr7uanDHL171x3m6LZsi/R/D0+yB5p7KS9bIHu/h2aVx7zk7C1nJ9ffcP3wu7/R/XNE7z6KOYD8mK+YA7AFs4nSzQVzEfMsD1tm0w/zn3VD0jHXsB5troGTcmXTBGmZWwmz5dVu6zC3Lv3eVwIlUALHR6AOc8fHeuspHeYwJ7EoJcskTMlg0HRQfhzDguvNt0xuve1AqbOwyeDICOrvs4N/0onCkt3lKFjZ9UT8jKcMlnZIspsDxWJRXIlzfKaQUNQoKJRQHLLYxVDJYGu7XQt5vs8zIrveoiTjsac+GI+j6MifOBiiGVUpf/K9i+Ewhzn1hy0OeMdYvW451BEjP+MvJj7vKpd1y9j7jo8AOfHsdDec//N//8/QDy1am0SaZJrYWICxIO9VcmSOfrlrgRyJg5OzCZB+ksnSSd5hbiznLQSSx88//8LFBcGD8cQ4oe44dnBE4ORBtuLDaUM954g8wW+V8WCZNkFOcjxRRxYf7UCWdiZPnH7sCMHpZ5O0TaA5DdqmnXFEWhyalE15OQpxxLawiMu8kPbkfkYggcEhr7Akz+V53rg2L778prziyzb1nNjJb3kzVuhrFvjI9llHnMRxnGftS3DOoQw51Cl5kXao7+FuUdfYjrtrOYxxrrE4fdIc5pSPLsQIxzhkQZwuk3olM+lA0bGmPWutKnzjzTcGntqP9q2d69viZgyiX+g7+nKC+shTseMd5uTZgwj6XB6YeCf1CHofHZLj1De/+c1L5ZMnsooOaHe2dXczC4+/v3WxHU/+Z3vWjnPggzGdWV8lD/ymn17OYc54ZBGfwxwjHkOhunLvuakhk+xQFnq8tnGYrJMmoygjqX4lPv1KILtiCNSnVt1BNUwWnZXjKB3myA71rs+Qg4yr+hEeymKuc366OxCnfd8P47SoDP29BErgaAmQUfrtce4wF13EWch3eXGM5bjv412HPVxCtglksnEzO57tssOcMhqT6LPsORkLyFB/UxYLT8YF9oHo+h6yVC7jVvR859gijkKuGu+0B05s5iT4C9IynptPGgeNh5sE5TYeGmvp6hembz1I3eJAH6JPm8cuqts6zG1SA723BEqgBEqgBErgNBNgazpuhzn6H/1eyOecMwdw9ht7AzsD3ZmNji7tb3k4bx92mJNf+i67FBsruxBbYxiwl7FT0/8He/+tt03efde7B1s1exO9n53ZwfZkzkAn3/YcAG96OH384ekbBPDOg6PmKOzobJbWiOV3k3Ul9crGqV6tJY/XotmN2I/Y2zykg89sWbXbOsydZsnVspdACewLgTrM7UtNzcmnwdlCj0U+SgxDIWXBoEwZsaB53bXXHTg1HbJOGqWFEdN9jH0UnlvP3jo5e+vZwdhH4aHcUThcc1igQFGsKCwMls6esJZOHCgslFIkLLhSWlYJ4qeocHSjhIqbAirIH+OkXakskNspbryIm3QYehnavcJNPBYWKVlhxyHBEwgWSTn1vZMLusnzvDMOyk9hsyitTVDClANX5aAcUtjU26blcL94stgqnYYSWIcAWfXc758bFjce/tbDl57Q9zs5oe3agUg/NPkw0dq1QGbEwck5fe80OMypC7LYghTnBg4nv/71r6cTSM7Hrw4y2jWRGZlIZwHNd/XqSTS8brjxhkF+k9fqf1NZJe0EY6NJPllvMc14Ie/GC4to5CPHKk/IbSLTGBW0AelYRLNop40YW5XXBJojNocMY+y8kPZ0Gh3m9H314sBSvTmMczl85ww3HH+btrOpM5ffjOkcx372/342MH/t9deGeE66w5y2rE3b6UT/YzjUd7Q5h7F6GK/XdJbTRtXLm2+9OTBWD+pHH9V/6Rf0OQvEY+OTfnCYw1yc0DhcbbOvz+tTh/2m3dCbtu0wh9m4Pb/+2kFbfv2N1y85VUjbcak9X3TyJDM8cKI+6c9407vIDKzPT525jI+zMkSfodcqC2c7Dhq+ywdHWPKN8yjmvqeNjPkMdT2NR57oxZwk7HaXXYXITPMBDo+c5sjzbY/Nyn+UDnM44W5XRnMIjgv6EdlPTuPD2UGbNkfZZEwYs+3nEiiB7REgF4/DYS6y3Hmsh9A7yBK/RQchN8kW3/0uf3b+JEMtjvhdIM/3xWFOfpXJeE7Hotd6wIlDtfK89eZbk3ededcwjtMLLun207HBZ0cc9wddf2pjMma41hi0zfFfPo1Z9GgPVEauS4c9yG4enODJ+E2DujWvoKvTweg82ogx0sMxxmljLtvZbHBdHeZmqfR7CZRACZRACZRACSxHgN511A5z4zmAeQc900HHzxwgv9GJMw/I39hxDuzjzw32Sb/vk8Oc8isXvZ9+bc2ZMxodWFkxYSehZ7Ph33D9DZNbzt4y6P3shNaVnZXZ2ZwgznPu2ZaNRT61B/mzNm6N3M5vfrcm6gEWdqsHHnjg0hxkuVb2P69Sh+LGg3Ne1qKlpYx0f2sL1qPnlVE+6zD3P7n2lxIogRLYNQJ1mNu1GlkhP4sc5hgfKSIW026/7fbBKMmYOS9QUnIwXlrwpNBYjGNQzGLYKgZNSgTDKiXCK498tgBGiaBIidNCFGOx9CgS6wQKkQVBC4sc9MTPCE0pslvOfZ+6b3B4sy3wrDIWhTZOFAyXlBfldC2jrh3wON1xclg3j+uUa5V7KOuLHOYoqJ7wpqx5mgKbVepxlXz02hJYh4BJrr6XiY0+mH5MfnGYs3BNJq3qWLtOfla9h1yLg9NpdJjDS32pRwtG5DG5zIlOXRoLTKRdExnsTJ4ab8gnDmsfeP/B7qVx0DaZJq9yz6r1Mr5e2uS9nSA4ssU5Qr6kYxGNjLTrlc+bpCktbcKCovYgPQYF5SWPz013teAwZyJtQj0vpD2dNoc57FJXGZ+xM7YzyDiw1a4sgvr95Zdenrw6dc50vfp88S8vDjtscaJL2zvpDnM46HeecPTUJwOOcd6h3Tm2Ne7rG6knups2zAHLgwVeKbGKw9xJ32FOe6Sf5Rja67TdasMxpmrf2vL40G7dk79xtsBcPV7OYY480RfIYA9RcAYjm6WnbjjJ3fvxewf9+8Mf+fAlg+ElmTfdNEm+OZvaqU67srscHdN37cicQl1/4YtfGMbnOD3Mk2Xr/obRUTvMYcyhAiNjl/rBgZ5hHKB3GBPMBS7xWbdAva8ESmDrBMgqclL/ZWsw/nHiIi/JO/qdOYRFCw++cdpaJ0gn8pxskmYOi2Tka77nTE/xO3nsGvLT38h38mTfHOYwlXf2FjtKc5wjowcdbFrOt/5+wMh1ypdDOdl98CdX86aCOM5xLjO2bUPGShtrO+DRn41dxlbjlnxYLDs/dWJjfzL32DSoY2lIK6/b1U7Y38YOc8bd2SCvdZibpdLvJVACJVACJVACJbAcAXrYUTrM0dUcsc2wHbAXsHO/zZ7z8sV5wSsHZ7rn2P7jHtfHXrlPDnOpCXMYrNmXHNZ32aiUCR/6rxD93zm2Kw9asn2bC9jpLQ/Pmx/Q0bc1B5BHc8J///d/H+aEsaGZD1rP/exnPzvMCaVrXrBuUFZ16mEoa1jmRtqEtiItcx1zT8558+yw2m0d5tal3/tKoARK4PgI1GHu+FhvPaXDHOaymwSntJtuvGly5oozc9OPkkJR8dmgTolg8LPAz+i8qjGT4kRZ4TTgXfeUFYqUIA1xM1xTFn13rBMYoB2UUkZSQTnk+SMf/siwE4dFWU4YFvXGIYqvxTLGTrtxUHSSFwvBDO0Or0yhVG1DmRvnYRufKeN1mNsGycbxThAw2bDoYrHL4oHFLiFyKLvpcKoyudq1YPIbB6fT6jCnTnAg5y1YeqLLxNGkWv2avPq7yaWJpJDxxljDQeHGG24c5LRxy4TWwpYxwlgRmTzcuMY/0jQ+2IGTA5v2Jk/yU4e5d/6VrOpHXVhc1k6M6Zy+nMkD7cgY7+/G7RwxRPkuDsewOP3qdGe5i4u34rWbyEl+Jas+Ns9hjh5Ej3Gs6yiwqLthTafSfzgVZRF8bHxiDGJMs4vXaXkla9rhm28c7DT557/8edAryUb6uvasXceRIm3ZOe1ZHNqt79qzw2/k4DIOc+6VFjlH3pHHdERxaBN2jn7f+983OL0ZU+n54p66OAzXkJUv//Xl4Uni7HCnLsWrvj/4gQ9OPnXfwSt4yenh3qnevc1ADsi/OYT2o43jIC2ODhw09Wn6wTohvGcd5sRvTGLQ5dhMdpin7KLuv065e08JnCQCZFIWR47CYU78ZIU06CBsBHQTryI6TJaP5bo4HGQ+me4zeWKs3Kcd5rQbecdC2Y0NxhZjvO9ZPDR+ZBx0T3R9er6xxhhm4cyczkOZ5DkdhdzdVM5KF+d5czK86zB3YB8zrtkZmO62DcdB9dxQAiVQAiVQAiVQAsdJ4Kgc5qLHsj3QK+m4DvYINhW6r8/sFXT7sd4fGwOdWTyx5eQ6v++jw5z8Y6H8nL3sMudzuJgnKbvyCcousBdmXZk90gMz9PHMAdiW2Kc2nQNIVx44sf3bv/3bkEff5UMa1hfqMHf7wP3BBx8cNoXZ1fXtoeH0nxIogRLYAQJ1mNuBSlg3C/Mc5igLnE08yWsxyesnKCZ+mxey2LWpkpK4pU95nH3qdhy/NJNu7lvnHGVWmvksHnHf9e67Jvd8/J7Bs59BkJF2HFzvPk8FW5R7/PHHB6OvewXGXU8If/rTn5ncf///2orjxjj9bX2mvFJW572SlXFa2e+///5LO8wtagfbyk/jKYFVCJAVJl2PPPLIMMHxCjiyQj/UVu0E8JWvfGVybrozlwWWsRxZJZ2julb+5y3OkLkWJb72ta8NstiC0CpBvzYR58jrVYu2Qed0ofycHDylZQdMW4tfTpZa4OJ09PB0y3DOweIxeRfk0aKd1yNaxBo7vKyS31xr4cxiIoc5xgSf4/w0z2AQ2e1+MlfZ7r333mEhx2TadztQbFLvZL1FPBw5SFvkw8TvZKQJ+7Z2mFMexgSc0y6kpS1LSzvuDnNpLQfGFG2dwUu71H60dWOaduO7IwapyAU8c/wjtoPXhqlr9ZBDGz8NDnPad3bY0V+0NzJTP3JsO5AVDGCeFM1uMdGfpHXaHObIE4bCweFsaqDTbo1tZK/P9HVtmmGRLHS9NoyjI3Jc3fmbfuG6OCD4+zIOc+51H9lrl2fOlHYDIofEJaivOKFrJ5H77iW/GBjJb3nXL/2dHKZPWmRncDRezOrVQ+Rb+Ed/P2qHOYw4zHEqzA5zGNdhbgsV2ChK4BgIGOPJtW3vMEcORpcTPzlIfpOpOZPlZCM57Xqyg5wk0312RJYnLjLHZ39z3b45zKlSZXVgEV2NvpZFM2MHJjmyiKjcgnLT673FgI2FTkw/idPccNGa/8iXsYNMp4M76CHqQd1IxyLRthzFxD1r61JOY6WFufPT3ezM1brD3JoV2ttKoARKoARKoARKYAEBehgd3Rujvvvd7w62c3YMuhjnLLYOdlf69rIPYdMlPfhot/2XXnxp4uFH+m7sOX947g+TPzz/h8FWQq8XYs+J/p85gL+xvbiOXhyHsn10mFMWAV+2LPZ0XNi4Mj8alzVlxlN9ONhY2J08aEv/z47P7FJjZgcprfav+K2dxGHOWoQ5nN85zEmPPdjOb/IR29dqqRxcnbTY2ewwhwMmyiqt7jC3DtXeUwIlUAK7R6AOc7tXJ0vn6HIOc14n+s///M/DUwwUuUWBMXFbgaLEiGxHEY5oFgwpTAdpTPewmKZ1ZsHrYTfJQ5QxZ8FCntdGcQShINkxYracrr1w4cLgNPed73xnWCCkyLqOImUh2A5znF7E57ddC5TvOsztWq00P8sS0H4tzNuJknOYxf0DGXHguMrx1w6P+qFJ1aaTqWXztex1dZh7Oyn1aQzIOEA2ZRJtEuswoWTgiBNUjAepd3LXwpZ6J8N9N37Nyu+3p7z4GzlvDCLjOcyR+ZlAm7RrYxzmOFb5vG46cmACnTaRBTtpyb+0jEX/8i//MkykFzlRxtHO/YwzAmdCHIxFHAotNh42pg83zfwjb+LTzywoYqE/MRgwInmNudcQWtw7KkeYmSwN7cQCq3bC2cuuJZxm6TbakjyrD2X1hKKFSHl1GI/HTyS6VjwMJDHS+O00OMxxGKBz4Zc2Y1Haom12ysV+k7Y9W3fi0n7Ui2NWNuvjZPtp2GEu+qe2bAc5bZghkazhWOH36Ka44aXtpj07+01f9Hdtnx6tLrVp9+oD+mV2XSUDFvVT7Z4cYsRkONTn5cl3fxOXtJwjA9Svv40P6coPWcVBzhisTZHL+qD7jyLI+3E4zDGwx2FOHWFB1hoH8kpW/QiDhhIogd0iQFbRr7btMEcntSBE/pKb0UuMZ3RYfxfmyXJyMbKcPCHLyTN6Cfnru/vIzn10mFNu4wIdi56fcSbji3GLLMXJZ393RM93b8YcC5nGEjoKnX/T3TzFrd6Md/ToH/3oR0Nekp6HY+IwJ+1NAx3H3JWubpz1XZusw9ymZHt/CZRACZRACZRACRxOgN5FV9+Wwxw9kr5qbsF+Q7eNfZJtkm5Lz5tMlxvfNV3PjA019hy2SXMAB91TfOzg8sg+RG92/z47zMl/5kn0fbo+hzHzHN8dbP1Y+Ty2yapNcyC2FvZmNm12Wge7GJ7rBqylZ07olazqTj7klxOb+cbYYU4drRvEqdwc5mxIoPzKKw9xmLNhiVeyageOcdBurZGzkYqDzRAXc0j2fvlkh9KuVmWCt/brgR62Ye0WF3nDPHMhfgLilm5DCZRACZTAfAJ1mJvPZS9+vZzDHGMshzle+6sOtusCoAxSmAzQFACKCqWKonDNNRa5r55cfdV0290tOs2JnwJKOfCZQkAJshsGBhSCj3/840MexkqB6xjAKVbZnUUc8ooXheeeez4+daa4f1hAo2SM71+X0TbvY3y3oNod5rZJtXEdFwETDjLDTo8WODjx+E3fFCxWW0jh+Kovm2BtMsHZdrksBMXBydkEiIwgc03+TtsOc+Gr/ixcmUyaQBoHyFaf1fcwiX5pugvTn/44XONvJnjkt8kiQ4JJtMPr9/y2yRgm7jhHPP3000P62pmJqIUzk1r1ZTeITWS8/CuLdmwRzcKdNmJxlHMLZxMOc9q08WVeSHs6yQ5z2gf+cXDjWMRwwOCgzWg76oajDhmg3/t+/XUH52uve7vDnPiMheTIk08+OfTDGKXU60neYU57s4MYh1CvR6DTCJ6sPTd10CQ7PU2pDW7Stue11cN+IwtPi8Mc+aK8jKocvZy1ZeX3N0F9OPR77ZlM06aHdn1xXEsdaf8MfV5TbiFeXyH/lnWYk550tQ33kynyxWArjRjvXENmJehH/m6MlS/p6YPkMYc5coujnj45a/xLHNs4k5lH6TBHVpAPnkYfO1HrH+rIk8EcK+yAhMFx9ptt8GscJXAaCJCLZNw2Heaiw7FjxOmZHCc7LTiQj5GLzpHh4zP5SV6TG2QZncYDhGQauUPO+vu+OsyN21bqgDMgPurDEV0/i2bZXdU4adzBwFhoYQgHNho7v2V8Gqex7GdxituYR3+m88uHehCvBye9dUFaFow2DcppsUta5hXKJg8ejjF2nJ/uMGcsMR+cDa4zvpsj0N0sqArGVbYmtjvO8cbcdcZa8Rn3tTt6sXaNgwVJOnFfyTpbI/1eAiVQAiVQAiWwTwToXdt0mGMfoLey4cSeYz4gDfo83Y0NxEHXi+4fO2Ue5qXj093oXvRDdk52Hfn12z47zI3bB15sVnR89n2M6MY+4+gwB8rhWno6fRQzejldl05KV8Z03aBu8GUTteubOZx8madIiz7tzTyc2NjgNllPUofipl9/61vfGuzZyi0P5obnLtpfPZCvHSjvOMhnHebGRPq5BEqgBHaTQB3mdrNelsrVMg5zjIOUMorbUQdKgoVCiiEjosVbCoXfKQqMgBajbrrxpsF4uXF+prqHuBmgKWIW331Omhb6KCxf/OIXh0XjKK/jdBlTKVSeDqBgUYgpNjHaUq4Ycx2cHeYpPeP4jvszRbUOc8dNvelti4D+a9JAbtjpxatZx5Mpxn2THJMb/dhive+7EkwM4+DkrCxk3Wl3mEv9qF/BmazCa5hA/+VgAv30z58eDBIWJE00XWNiaxLLkS07fJLlJrfrBvHabUIdmdzKg3GCYcOCnbbFMc8i1+ykdpU0GQGMKXlNk4U77dnYw7hihyYOc3YwYmSZF9KeTrLDHPZYcW7j7I0TfUb70OcZTNSL8dcTgep/MExde91QZ9dc+48nODEUn7Gf/DCWq199UVuyOHpSHeb0K8yeufDMoHPRvX7z7G+GNmwxWh/iLPfVr3516FP0muMK+J8Ghzl1QHZpv/qucUy5OQ/QF/Vz7Vc71p49yKFuyJ4cronOqX7c64ETzlx0ae2YDGGEW2aHOXnSv+RLPGQJwy89WT8yhpKn6ijjrXvk1+/+bgyTV7KK7u673zcxMC7b9rTpo3SYw0YaHuwxLjBaYiFwDDRv4LCgzpR5kzFh2TL3uhIogdUIkItk3DYd5shDspwcpyuyC9AtpEVek4XkuMP4Sj6Q3+Rm5DlZnbGWLsJ56Rvf+MYlZ36y1jUnwWEuNaZMgjOG5Gmc5thVOIextRgb/d11Dvw4ldHR6Cn0v7BL3Kuc6frqjKMYRzRjnkCGq6+vfOUrw4M4ZPsmcl3ejdPSML5emC6GKpdgfOf8Z3cIjnnG/9ng/jrMzVLp9xIogRIogRIogRJYjgBdc1sOc/QytgGObbHB0N/pkbHnxBmKncD6qu/0/8wB6LD0+6wX0kl/8IMfDPYhb9GJ/fmkOMyllrATnF9/7cAWbg7wlxf/MthY7NRtXZgjnTpzHR0cKw/Hf/nLXx5sLmy/6wZxmq/Rx//jP/5j0LHNOfxunsamxWHOXMPcQ12tG9SrtuGhF5uu+KztCNa62fuz/qycs/MNDOowty793lcCJVACx0egDnPHx3rrKe2SwxxjsoOB2aIhRdPTGX5j/GRI5pDASGnhbVuLbpQgRkrK7ZM/eXLYsSiKGOWI4mWHFYqY77PKEcVKPh999NFh0Z5BN0qcCpNXBk/GXAuVFGNxzCo+26pcaScskwaFrQ5zIdbzPhLQB+1OYNJhxxeTGxNK7Z/sMJkiO+wUaVLFac5vy/SPdXis0gdNjuLg5Ez2yBe5YcEkO8yZPK0S9GsTzUzEyCgTdnFzvLITGplm4pdJ+aL4LSgyJjw8dSSysCMeDgOCnQYsBNCnjgAAQABJREFU2pGRHAVwPapgLIhjk8n0q6+9OsgustsClzNZljGDrJU3O/3Im+/rBuliyUFLPeXVWMYhC6Ac86RFxuO5btAexM04Ii1OH1jjqt1qv+enu04Yl2bHoqSZ9nSSHeb0b23y8ccfHxzm9Hns9D3jrQVUhwVO9UN/wNBBJjjUU+pKf3E/hzlGEm3cd23ppDvMaV8cjjn+cD7Ul3BhvCOHOMyRQxylFrW5tL1tnslC9crhi2MSOa+e1AnZyJCkv6nv1OM20182LvmiS+PniVSL4JjKk/ZHRpKPxqDZoL2SLdHB6L+cN5Td/WRWnD49RRs5Ru6kHWeMI9sd4iT7Y6y1Sw5m2r74lnGYw5kc0hY4ozLSKpM2wIjHUGtR32+udUhXnqWjv7lWesauGIL97TjqSt89Soc58ZNBZAWnRLIoThzqS50bX7VNLNRLQwmUwG4RIBe36TBHBlrAuDBdbCEX2APIBjKPDOQ8zIbhwQoylCMUmegYy3PXR2bQRYwPxhY6aOwT7jlJDnPjlmFMNKYYX8hV8tYOHWS6MTJzPPeQr3Q8Nha6vnHHb+sG6dKHMncyDsqPujX+GstxpxdmzF0nLelY+LNQRtc3f6FLiJPeJX4Oc3GqnE1DfuowN0ul30ugBEqgBEqgBEpgOQJ0anr6Nl7JSl8VF52ODcZDqLEl5iFU+n8emMm6Yuw4zvT/8RyArpg3nLAPnVSHuXFtDXOA1w/mAK+9/tqgK3MoMwcwv6L74mIOh9W5qfPhZz/72UE3t76xbqBXi5dz3qPfeXSwff3q1wevOmX/tH5i3YRNlG1L/a0brGMoz5M/fXKwc9ptzhxAecwP2QzNaz7zmc9cmg+O09Ju6zA3JtLPJVACJbCbBOowt5v1slSudslhjoJCqfzPH/3noBj+9ne/HRQkyhBDM8OyBShOLxSJbTrMUVAu/OLC5JHvPDJ59jfPTl744wuDcVQaFvxs8+tpAkZMxthxoFwxqHJSYGClIIsvBlZ5FweFh5HVApqyUIpjEB/Ht8lnrHKIJ4r3YengnsXavKeeEuYeZfU0hdcNKn+M+pvksfeWwLYJ6Gtkh4mpnbks8ltcGbf7u++6e5hQff4Lnx+cxY6qD5IH8uOQvonPYX1dvskOzk12pEjfy9NFJmUm1iZpqwT9+qQ5zM0rP+cUi2cW0RzkLxkokN9xDrFQSX6vG9SnSTqHIs5sJu6Z2KorE1syfhkHxMPywBBCHnP8zMKgtLUhRgDlIYstEPptXjgNDnMcQPXzxx57bDBo6Ef6GoOGeqAnxLmeQWMsC+YxszCr79ntgxOMehCntnSSHeaw0L60OfoLGeSMl/5DB4j+o83N6j/zWG7rN/VxGhzmtDPGP22PjDF2CYyrZD/ZQnckv/x22HjiPmOQ9huHOfJKO6a/LeMw535OJJw99C/9jFGOnCOD6IMW8cUVfTMyV/oZ87Qfx3E6WUpfwPQoHebwNX/yGg1jt3HImIsdZ05ymkMn/RmDhhIogd0jQG5ty2Euuj8d0WKZg55ILtBB2C3uu+++wZnWbvN+W0Y2cKqi73hgxdhMtknrJDvMzbYU9UTmZkwyXhqTBByMixbLvv71rw/jlJ0f1g30ocwp6II+WwSVB/MwYzEd02EsNt6tE7Q77UMadH3jibYiPg/EGEPysM88B0BtoA5z65DvPSVQAiVQAiVQAiUwGWx/23KYo6974MKDnpzb2NaEPDDDPkB39AA0mwp973L2SXqh+NjnxMm2T081pzg3dRSzTsDeuWidYGxLo2tyBqPPmn94ENO8hK4pP4vsyspAB6dzWmexSQgdVN4d7FTJh7JtO7DRKje9n82FXYsOHTs8Gz9brQdanDcJ2LLD/fDxH05++tRPB+biM9cwt1B/yqqc1pLWCdiZV6gPm7Wo2xdfOnhzjXTMAdSLA9t5Qb3WYW4emf5WAiVQArtFoA5zu1UfK+VmlxzmDPzy8/hjj09+8NgPhh1e/EapoJBwePF62E988hPDou1hSt0qEMTvoEB6gsNOGtkljiJLobR7kHfIW7ykoM4qt1lcZMy2ywiF2W+uE4e82u3GzhyMuhQ7xvJlFOVly0L5pTgypjsrk8VthlbK12yeEy9FvA5zodHzPhLQ1jPBsbhvMmdSaYLlb0IWrCxC2P1FX/TbYX1jVRb6oHxYPDcR0udMkk2wyIB5fVB/9eofTk4mTJEbFoAsqnGO4qRgZ4NVgn5tcksm2cFAH9/3HebmlR9vk2gyGz9GBfJPXahbO7J5NR75uyrDcXris3CmXXEsNklVV9qX+j03NVp4EoxTtzFjnfFJXNJQlhhbtCW/pywmzxwxGDbmtSd5PukOc3hwjjXWcgoydmsH+hou+oxx1me/LVMXp9lhDk9O/wxhHKRw1d61L22Z86Fd5rRxfWhRuxv3l218Hhv5TuoOc9qtp0q1Y85X5LQ+L2CtHWfMIgOWGa/UJ9m/rsOcPJFvdGE6MWcNeeIkxxDplXQMhRzh0hakmZDfot86H3c4Soc5ZeXoQE5zcmTwjK7hb+qMQfWfPnnwcM+ZK46//MfNu+mVwD4SMM7R4+h15K+FLmOhfky+0cPN/41/FpboFIsCnZvuaQy1u5xXB5HDArsBHfSBBx4YxlE2jWVkuXvrMHfgBG5coh/T8zklGp/8ZnzJItb56e7LWHPmXjdoE+ZKdExzJ/qlMVpgU1GPdH11yb5CR1onxP5mDJGWeYy0tQtzPo55dFmLoPN0WG20DnPrkO89JVACJVACJVACJbBdhzn6In2OQ5f5BL2O/nbLzbdMPnXfwVtI2KVXWX+ow9zBHMD8ii3KHIBNkB0mfD2caOc+ejkHs9ih1mnfdGsOlGxo7KHSE8RJPxe/NWE2UbawddKi68u7t6R4uMp8Jg9DsVvbXMXDymxunCHnhTrMzaPS30qgBEpg9wjUYW736mTpHMVg51UfFDyLPgZxyh2nLgY7TmoMkJSEowqUEwZJxj+GZgv+FCNKor9RhM596Nzky1/58uDswkC5joJyWP4ZYhlhpe3MEIuFdDwRYhGMksRoPmvoxo2BldFd/inMFDn3y7+DoV05xMMRxtMDlCJxZUFx1TIlbnnloMPQb3GTEuZv8mpXGGmP0xlzwLgOc2Mi/byPBLR3i1P6sUV+TmgWmuK4po/pb+QaZzl92URn3DdW7X84pQ86m7zoe5wN9EVpkp0mPhZapD8b8oSRJ6ZMnOIsYSHGogkHLJNAfXnZ/MkL+SkPHOYsyGRiKQ4LMIl7mR3RLCiaPNrhQj7JOjJP2MYrWeVXcJa/Zcs53DT9xyTT02Bkr90afCfXjBMME56c4+xMFq4b5E3dWgS1iOaMCRmvrrQlst1TZxZZ1fUq5RAPOW5XJK9AVBYOGcqRtmvybPHWBPqwnb5OssOcenCkXasHfR4/i6TGVfWtLtTBvIXG2TYgPlvjv/TyS8NTkwwx6lo7Ei/eFkg5junLi9qoPqevuJ/TrnzFACJf56bGFfqU86rtQ561BQY4bZxxhZyR97Q/eeQcGkf52XIe9l156Q5kJ0MfWSY9/MgK8lLb81m/is5yWJyzf0vd/f2tv0/e+vvBaxTEs6ifnAaHOYzpXwxm5CuZ7TdMtDXOadof7ssGbZaM1kfIAm3Rb3TAZXaYI9vpsvLkXrq5PNFfjZ12mPOZwVf7mO1j8+pz/JvP4+/LlmuV6/S7RTvM6Yv3fvzeid1mOSOukhdtGAtx42s89BlfHDDmXKMv6ud4rxL/KmXstSVQApsR0G+35TCX8Z++bZHFeGoMM8bR38lODlAcrsiJZeQCeWNMMNbTOznnilMQBxuNhyg46M7bXcL9ymjhx5xIvugl8kRvoBPblY1uYwfTWVkeuuKRLn2L/sFWY04hGAeyuEO/4Wh4WDziEpR/GQbDxRf/wUI5cmSspHNjcP6iw5zv6wb5M37kwRW2MXqXoGw4ScvczGf61irlED9dH0t6HF1f/OaCqRfzFnVrfrZoDBFPHebWreXeVwIlUAIlUAIlcNoJ0G23tcMc24k1xMwBzAvYzG6/7fbByeqT//TJwQGK/r5MoOeJIzvM2RTAOgEd0voCO0N2dps3B5CG8tH7xfFO7zCnPA5h1TmAuQwWHNg84MTub53HfMMDnbFPsVeuopMPmZn5x3qStVw2MHZR6WIuXg5s9HNHbKKrpmfeaQ0+dWKOJw1zAOtS1onZ2sRvfWFeUK/uE4c5njmLfLAvs3OxQz300EOXnDPnxbHoN3ZAeVR+Nm151e7UnXmjOR9bu7njsvPZRWn19xIogRI46QTqMLfHNbwrDnOUIIM+JcgiFAOl34QoJxQgDgOUh1UVk2WqyIIx5YtiFOWIMVY+Yuy21S/nFcoIpSYhCuCzzz57sL3udKGRUhsFy3XyzLDM+c5hEZRyy9hK2XOsUq6kSYFj3KU0Mp7mNTD+Tpnj0HLu4gI9pX02KGMd5map9Ps+EtCWLTpw+CBLLEjo1/qCoM/quyZW+oXDxMdvcQJZpQ+Kk3zQB998483J8y88PyED9EGL6Pq0vkduSVN/n42fjNBvLbzb4jz5lVdyx+K7xROTJ/fO3i8Ps0F+TKTIM/IUD/LBb4J8kKMWfJZ1mPvjC3+cfPPhb851mMsOVHZEWtYIMM6z+sHRodyOZcqZOMg/MkydW8wzyTPZU68cIs5PF9GwXGRMSDyXO2tfxin1xKlNHcu7/JLjOFgQ5UwiLb8vG9J2GVosoDG6GJ9xyE4nFiMt1JkIW+xcFE66w5z2rYzqWn1YRNV2LF6axIeT/rdMHajDv73yt8ExiOHhO49+Z2hDZIl498FhTt9jHGHAWMdhjhxiIKL7eB0w4yF5gaG2rE2TF/Qwi9GrGkjSxyMrncncK686eGBgXn+Xfox8DDbyqJ+oEzqUnRb1b3W+TD0v6i+b/i5f+ioDqYdPGJXIH3nKWEN3xG42uI5RjgOA9szRDSt8yWgOkIyA+C8bMOLkZkwRrzNm4lzGYU55yFL3GT+MSe63KxLWHDScGWvJoYydyZ+6TH2Oz/ndWbs6yjDPYU651Anjo7HfuExm+y35vFyetFtx40JWqDt9RZ0Zb/QN/URflA6dv6EESmA3CZBr23KYM14ZQx955JFBh+NQRuaQdWQ5PZ5s8NDOMoFMcRgDf/2rXx/onf/v/w5zHPeT5+b4++Iwpyx4p1zL6mdjVsY1stfY5Mg4a2wiczn/+byp3DWGmlPEMc8io2CsoI8by7OYlV13VxlD6JbmicZZtiJjiLFF3MZoZVG39JxFZcGxDnPj1tHPJVACJVACJVACJbA8Abr7thzm6Kexu5gD0CXzUCt7Dt2RDXdZGwi9UP7YG9g9rWuwMdGl981hLnMAeY89aBX7i/vo/DhwmGP/jcMcmxRbLbsg+86mgZ2HbZnDGPbmHupB4DDmIXy7zJnX2eHafGzZgIN5HQc3NldzALY/5dMu2A3Fm7c5iH9ekB8M5K8Oc/MI9bcSKIES2A0CdZjbjXpYKxe74DBHQaCYUAIpDgyAFAAKBcXBIhQF044xFg4XedqvBWB0EwMmY7fXqVgwpigxpMsHb38KLmO3BVoLyPOczyg8FGSGXMqLJ8zFQeEVKFS2ZaYMffBDHxzOysNAyijq71Eeo0wmi/IxPsRJcZRnir4nlClfDsq0+yl0lC5OFhTreU9dxzDMUSNPEVDC3G/h3UItwzBHEPlbVslPvnsugeMiQJY4TD48uW/imh0X/S5ov9q1/sAJ7dzUoc3kxyKL3/193Afdoy8I6X/5HNmlj+v7+qC048QjHo5a+qCFeemJP/GJR/9zvbzaQYLc0a8FDkAmgBZOyJ2xc8I4DvlK2ckFDg5kO8fjHOSbv7mW/FrFYY48MFl8+OIOcz5bCBTwy+IRWWHBJ0GepOmI7JDv5D35Jv+l4VBG9UC+jusicc6exU0Gknsm0d//3vcPFhSnVUZuY8dIYVFrnvybje+w7/KrfiyeWexiFMEh5bNziPSMUybvxq5xuWfjnq0zE+gYW9TfK3+dyuEz7xraqvhMzrUH5cBmUcDBYRwyNggm3MYwTyJqj8adw+KYF7f8ik8eLfRx7NPGlVFZjTOeZtOfjGlHEdJmlM14dclh5a3pq9tvuXkYrzjM0Re0pcPKmLYZw4j++5MnfjJ58qdPvs05a1cc5jjlPvvbZ4cF+bFxRF/JrifaOlmmvqc9bejvdnNTVvWXPjXLxd+1ZVztdMkZVF3jjaP6zO6G2qLv+rr6j7ycre9x+ybTcCYnyUvySJ7JYXlVhsiFxEMenAaHOfIfdw5zjH+RJzgzmDH++TyWnWGUs3pyn/FEHJylySfyXx/xd3Wu3uJEHN0zceSsruiDMfqKT7zqm+whRxzuJ6vHuqs4xm3izLumzmhTGeY36YtDeyIvfM/h79sM2po2rAyMicZE7RvDs7ecnbz3fe8dnMU5m8h/xpt5jMMWX7o1nYIMJP+0Z+1U0Jbp3Ay2xkSsxNtQAiWwmwSMUcYjDyhs+kpWMoecyQ5zxi7jHNlGfnN+Is/JCHJvdry7RGj6fM+bbx3sGi9vdFty3JjvgRzy2b30LvrmPjjMkaHGELo6Jj4b9zPnwuOwMSAyGFNy1/hkjkcmG1Po9x7+Of/g+cl11183/HaJ5xoftAuyPWMIfZfsN4bIax6iNJZ66OpyY4j4UgYM2InMIYwjPqtndWrOYh7FwZ7eqVyLxhDx1WFujcrtLSVQAiVQAiVQAiUwJWAOz96VB0bp8fRUehudzJqdhzHo22wXh+mqsQ+zwdJX6et0VPaBrCGaA9DfF80BoivSb9lfPLxB76Xzsg0nb+I8N7XB78MOc8pEf8aaDqz8bEEOfDInWsREXWCpbujj1kvUGb2cjmx9hd3XWgtb/KbBHIVebt6F/YULFwZdXTm0gZtvmj6k8/nPDfY084HMZbSNee0jdSpeR+Z0WSvGxn3mRXR/cws2V/ZcrOYFLNkP6zA3j05/K4ESKIHdIVCHud2pi5VzsgsOcxRCSg9FkKOawZ9DhkAp4dxhId5OS9twelgEST4oYxQxOzIxlHKOEChjlCHGbgoMZVfeZkPioABRmhnhGUMpNZQlx2T6//U3XD+UxZPmDuWiFImTYkRpojw6BPdRFsXv7KBcMdBTyH/3299NLjxzoMxRrl0vz9hxjuDwtmiHJYpbd5ibrcl+32cC5EecEOKAq79o6wn6mD6hL5sMW/znwGHylr7n7LpM4MZ9UF90mPhJTz+0sMVhLgtC7tW3LaiQXyZy+qXfE8Shz5IZHCY4OpgQCiZOHBMycTJxtwhvcpk8uU6+yC7yIDLBwppd0HCIHHMt2SCeVRzmxGnxSP4YAcg0r7AU7n7PwTboymdRkJyUN3nCweH+LI6Nyx85Jj5jgHxy6FAPyuke9THmNSR68R9lIVuNY8aP/3rqwJEN06uuvmqoX4uJ5Hbk6/j+dT5Lj8MERwzO1erury//dSgvefue975nqC+Td5zxuO7a6wankXGdSVudxYCAAadxdWYi7W/GCjtwaT8cMJTlcoYW8Z50hzn1bsHUTn8XpkaM534/fSXr1ClMe2FAUt8MJ+pcX1kUtEsGEXWon+i7xn19RxvSPh275DCn7X3v+98byi+/2hSdQX8hJxjk7rzjzqHs+s3Qx14/MFJpU3SMHON+hamDvMTWQm5eE+Z3aTBCMc5Z+Ccv8dU/x306rN1D3qZ9M2qRlXQNvPUj8oIcIuPkf5wf8biGXGUMOqk7zEX/snjOYYMMxAwLC+d2QaPDkSfRDcM457BOe9Z+jUVkCd64awfGs2Uc5uRJvzImebp2cN6d1oV0tDfxkG1xjNXvkjd/l/dh7Jw6y/nMYc7ftROOBepaPrQdbdH9+uls/ad865yxiLPDrMOcPMg/47cxmdyWn+jfs3I67ZiswEIfMd44yAlc5D0O0+Iks8Nknfz3nhIogaMnQC7q19twmCO3xUUv+c8f/eegU7zyt4NXshovjXUPPPDAIBvohfPkHVnileXuI7eNf+SN/JHl9HB5zri/Lw5zZCg2xibjv3nB2GHeWLBoDMAk9eR+Dv10ZJ8F4we2n/vs5yZf/sqXh3HG+LNpkEdzuzhH0xHlXX7Un/yzB9GHMmcxhgxj33Q8GAflp3/RadQjXd/4oW6NVeI0XrAHeYuBcd8YsoiJuN3jfvnjpGkOIWhXxmYPT9AJzRnmtbXh4kP+EZ+5hLkOe5axTruTp+zQquz0lIYSKIESKIESKIES2DcC9LJtOczR1elLeSUrPZJux+Zg52A6mYeq6bzz9FR6HX3RffJFz42+SAelR/u76/bJYU6e6c9sKNY56JH0aFzYX9hlFtlMwsTaSPRSzo3siuKld7OTeaic3my+tWmQJp3XA6d0dU5zbErmecIVZ66YfOyegzeG0YPVhbmIelWO2SAuur46VQ52Vna/bOrgevebS3j4h81fWcS3KIhLm6jD3CJC/b0ESqAEdoNAHeZ2ox7WygXFxcKa10hlkT4Lawx3jLF2a6EIzFMA1kp05iZKobQpDxRMBkq/McxRoiyqUh44ZFAcLLodRaAcKbtFaAZIjnMWjP0uL9LluMdpARd5mzVCutZh0ZNCSGlm0MSYkvXWmwe7XJ254szbFg8pSBafKY/KSJGkPDpLW74ohZQtbGJ4pTirQ8w4bUjjjTffGOpKXNhZ7GfUVYd+mw3iZcDFvzvMzdLp930koH+YSJloki36s8lwFpvSp/UvEy192aRNP8yERz/MkX6u7+ljOfssTpNAh0mQPil+94jbYoUJsn5oUWd2Quha/dqE2KQnC0Hud608cOwzcXK/z2RF5KD7lTcTUX2Z7HH2m7yaqOnnzq5XVnFxrlnmlazhaSHQAgp5Js/ySKZweCETz02dacgcMss9ZBPuzn7HgnMCLoK8yaP6uTB10JBvzhQmu663EGPhJ3JQvIIyOMhFk0WTWBNa8toCo+vUaZzMyD9pzjNODBGu8M8w6Z2+vpNc/8mTPxkWMuUBX/FLhwOmejKGKgM+yjCbvgXRjBXaqvzjhYnyXXvNtZPbbr9tcOrgACYudRcOi7J9kh3mlBkb/LPTCIOGesFXf9Gm41wYR6ww02bSdhhetDn3q0NtR//VtsXnWofFQYYujjBp34lvXAfkgfuNoxyNxCcu6WkD56b9gz7lrJ3Mi2Mc3+xneXn+D88Pu5H9+ImDhUx90EEeyBs5Y8FV+7v6qqsnr7726iCjlJOs0oYc2qh7ZvNAlyA76D/6lPuUS4ijkXu1benRK/Q1cclHAn5ksPbMIOScI0YuDkYMTbjKU/p54mAMwvAkO8zhpF44pz388MMDK3w4y950802DExY+juiH4Zx2TPZod1iRIdqzeiNftD/1p+24T10Zj86fPz/IWd9ng2vJIHKVTm5sEp/fE/Q1de7sSDuyq+Hw+UBUT+wwJ11jGedf46tySNdYwKE87WidPpH8zJ6Ve5HDnPzKjz6pn0hfm/ZdG1eucTCu6xcYkNPGP2NaHBHFpTzGGw+niIsugUO4jOPr5xIogd0gQKaRndtwmCPLyWIP3NFN6ChkhEDu0WXpcRbL6MIZf8mIyHL58TDK75/7/SDL2SPIdXoifZleLR0ylRzbF4c5Yxw9gF5BhmKOh3GfDKbzGw/I0shNZzyUV7mNbZhaZMLEOOl6OgjbjDHS2IbNNuRu0iXvzcvo1cYUeZEGfSW61FjXN4752zjIqzEVA+UwptKHcFFG12dsNl+hGxlD1PGioM3gUYe5RYT6ewmUQAmUQAmUQAksJsDWRB/bxg5z9Ft6u7jojuwH9FG2BXodeyIdj77rN8HfMwegd9IX6fx0RXGw6YhHPjMHoDfSfc9N7Ym7vsOcvMq7eZaDPm3+Q+dlL4lNMfYX+nVCuJhL0fvd67BmYL6VOQCm3oSF67y1zsS3ylna0mUrVp/WSdl+2NTUmfyrA/M58xjlMK/x+zioU/M397KvsjerU20lazTKLA71yd5szYf9+rA5AKbiqsPcmHY/l0AJlMDuEajD3O7VydI5MnhTxt4phzlKFMWQcZkRlIMZBYCSQnmgeHhqgDGUMuS3bRhCFwGSLoMmY6Yndim948VGigyjbHa7mzWKJl5KHOVWPOKgIFLGKV4UJ+V2KAtliDGdohhDq0V/38WvzK6lJGNDuRK/s0M6r/7t1anGffBkMeWRUwUDtIVzeeYcI415+RUXBa4Oc6m9nk8CAf1M3zDhtNOOhQ4L3n7Tl9IHlVWf0ffikBBHuZwzYbEIr8+RCTn0yfTL9Gn9TH+zAP/hcx+e3PuJe4eFFQsgszIsk0Fyx8KHQ34jJ+TPpMkk0GTbhMznLLa5X54sxphMxlFOXpVJ2cgZfyd/XO/+VRzm5EV8nCfsNsC5g9wWsJE/E0aOBxaOpIexSaYxxtkiIUPBuak8Ip9wsFgkv3Zqy25ImPu7eJSVHPObcqQs8kNuqQMTTnVs8ik+LOTHQpbFSWmaeMrntsYOO4A894eDHQXxMHnHV9ragHbEqVH9y4cjZcBMHQjyyygSZy2fM94oq/GPDLcIyBkjY8Jw8yH/nHSHOUXXl3HnmGZ8DXvtShtzfOD9H5jccvaWgb3fBe1G28RZ2xOPQ3vWPtRNZIc2rz53xWFO3v7y579Mfvb0zwadyU4r8ud3edfm9Bdl13fICFy0M/3EWR/VF7UnMso144CNxWz9kcOcM2chLKSjXepf0mHc0UalG11FXK7DGVP9H19xkD9+wzf1FP1OfxfHuI+Sq2TaSXaYwwpbC9+PPPLIME5pl37n8MhpTn1qg+Qi2RI9Tt1rx+7H1n1kYeqLvFCf/uY6cWZR/vwhDnOu027EZzwi7/U1depv6Sc+50i9j9vScO1FBzq7zJHB8u4wNhmHOMxxmjS2kZMZ18bxrPMZk3kOc9qd/Ds4JNvtmZx26DOzMlYZ6Az0d+0YE5/x8Tf9QZ3oV+YGdgbSP8j7hhIogd0mQIZuy2EuspAtg15Idhr/yE2yz3ibB18yJ4+8iywnp4175gP0QuO2eMlMMs3h734T5744zCmLMY6uz+aEeRab6BHkL11COZWLXBWwyxhHx8/hfgyMiWSvV5jahZUMH+sQm7Y+aRhP1Qd9i04U50V/M45FH4qubwyJ/HeNYAwxRzCGiMucSLnUu7KaGyqH+jSOYJM4FpVB3HWYW0Snv5dACZRACZRACZTA4QTo1NtymKMvssOwW1n/o+vR8+i1bGX0PA+2xnZG/6OzZg7AtsBm477Yc+i7dGPXsOOZB9CN98Vhjh0KlzzkzJ6kzHRlNiC6My5sKXRq8yK2Gjqucrpfudlf6LzsgvRp7DA9N7WReUjaw9JjW/vhtb7cX6WPN7u6dVLpy4t6UgZ5pb+ziZp/KI88jech4nB91mjMAeQ/6yjybB7BFsY2y0YrntirFuVUu63D3CI6/b0ESqAEdodAHeZ2py5Wzsk77TBHiaA0WChkYGZI9RslgRJiEfWLX/zioFwyhh5HoKhSZrx+kAMfxcgCL+WHckqR8aoLyh0FZ1FQDkouA2mc5izgUbwofxTfGFNzFleMxcrvMxaujTIt3vG97nGN7YG9hpDihRVHC4uslDhKqPyPFTj3CeKjhNZh7oBH/z05BPSTLHZb6HCYgOrT2r1+Nz5Scv3JRMxhkpp+o9+6z0EmiF9I30p/tdhhAYijk374wQ988GAh6MrFuwWQO2SFCSXHNN9NsMbxWxwic+LoIp/yIT9kjbIqG9khD+emk0jXyrdJnrLL66oOcxiJAz+LgPJn0UjagnyY5EpTHqUhTyaD8qQscfziUGCBDFsGATI/hgV5x1R84nKYeJpUi9+BcRYVxW8S7j7llx/3cr4gpznMkdPiSR0OGd7CP+pGmhZHOZSQ8cYynPAKE/WlDOpEmQV/V05jgTLIv89+l09llGeLq8Y/Z+3J35Ypx2lwmNOm6C90B3WAoXaBofHu5ptuHsZrhgxt8oqLfY+BIe1SfYnHfeoJZ3zVhbGaAU09MW7twg5z2k50Cn2QU3/0CX/TbrQx/Vv78Vl7VGZl1WYZpizIem0tNuTDOKRt6t8Xpk9wSscitb46lnn0DPfipn9q39o8fpjp/7jq//qofKRvk6n6Mv2Ewxy+jEPyO27f8n0aHOZwJfv1W/KVMRGraW8fXmdKfqgr+qe2POhz079hPMj8F/8ytGN9QHtWL9oAOai9kE1p5+K63A5z2oC6U2/qXv964oknht/kSz2pa8G1Cf4273t+zznXaAfaDyOhPHkoxnfteNOQPmxeYXzBSZnkW/6vuvKq4TXOKQ9m2MiTtpwgr+LKOKZNukc7dchv+pT2TPeW//BJPD2XQAnsHgF9mYzkdO+V2HRkY59+H/lELnlIjt5K/l4ukJnis9Mchzfxi49cMFaS5WQGeXPD9dNXs15xZhgfo5cMMn2k07KD0E2MwVlwIfvFty8Oc3iwMRhHjHVkqqAMODiwpVc46AcCeevAUL0Yx3wne8nxc9M5Dv3BQhlbR+4bbt7SP1hLU97ZhIynY31TXowZxgLlkP/xGKKNKa843OcszoxFxnNjtQdDzZPMWTK+HFYEbaoOc4cR6t9KoARKoARKoARKYDEBOtm2HObYutgmPRxCX7xwcSc0+hqdj75nDsDmxaZDX/Q7ndAcgI4bewN9kW2Cc53r2Yro0mwzdMp9cZijv1tntEsbezknL+UVzLPCRFlz0O8xi+6MCx6xJ/qb+zipWVdgf6FHH4XtJTY6+rZ5gHmi9uJ3+RDM7cwBHMowzoeyahep28wJ6fnmQLHXsc3mgZll5jLabR3mBvz9pwRKoAR2mkAd5na6eg7P3KzDnEGdcc8Azkhr4D7KV7JSDhmA45wmP9KnKFnwtfjktYGUIArjcQTKmcVlip3FS8ZvC20CBcZOShabOWRQ1BYFShRliuJLsRKPg3LjO4UpvHMWVwyllK18jkImPnxynWuwonBTHDGSJwZXxn0GZIqbaxYFaVNkKfaHvZJVmcWjbTSUwL4QMEnhKKKN63smOhZsLD7pg/6mX437lrKl/+XsN33PoT+mT+oP+oXDRMlEWN9zkFsmuZx3xDP1cVgYyBjy0GQ4k2zy0O9J18QyaSVfyUvyJQEygPy2+EKGiIfjgIUeIQ5zDzzwwFKvZHUPPhyJTRg50eBo4ipdQX4sEjmkKV9xknGvHTU4RtgJgnxyHdkobxbS5I18VCfuE5+ypsyJ2+/iw0XduZYMkx75bNzwhJZFNPwtYIlj20G5pYsJFvKftpU84aB9yHvqa5wP9+fwu2tNupXBOHPu4mKgNkWOLxviMKfOjTWC+znd2bKek5KxYVVZrsziU19ercW5M/WEvYXlhx76+jAO3XTTYmfyZctx2HXqn7EgT/398plfHixivv4Pp5jrrr1u4Hnd9dddKqv7cmgzGDA66S/0DW2KbIhRRJkZYizI0odcl3F5Nn/i1X6No/oI3YY+IR3s1adXJzjjJZ5Vg7bFaERG2F2PTBv3Q/Fp7+N+mDamrsgj/YNeRV+gN8wG+cXBArVdchn8nKVDRolPSN3rp+P27X7c0kfG/Vk7xJt85AQQ3U4+xDEO6hfD73/v+5Nv/3/fvtTXxUuWcCZVFvJk9t5xPEf9Wb2/8PwLQ51/45vfeJsM4xih/ZB78jov4EXnJD84p9E7Gda0HbIuchA78oG+p+34m2uk7zMG6p6sxUT9Gvf0UxzVHXl4mMOceNS9+zh7cGiWL+1MHXFq0JaTh3Fdq2d5yW++i0ufkM/k1e/akDJop+Ijr7/0pS8Nn42jmwZpze4wl3Yo/jtunz69O3VUUV7s5TtteNy/lcU17nX47jrlVxfar3ZsrFUOsnqdfr1peXt/CZTA6gSMJeQTvZsdgqyLwxzZRBej15DfxqxlHOaMz3n4hb7s81hGZ75OB/CZPIncdV3GV/LW+BF5Lq4c5BvdhU5iZwXOVvT62RBZbJHK2BK9JDLMGPH1qc72yX/65CC7FumE4jEek6kcCy9MdQJ6kkAO0ossWrEXGYNm46En0+/lw5jiXmVQbtdi7b7Mb3wW8HCQvWOG6oWsNW6wxZDDeM2mO0Sy4T/Krk7Uo/ZB1zceKlPy5pqUw3k8BqQOxOHItWNd3xiiHHQjZVsmiEd7oI97eCL1IW316sFOY725hPpeNYiPfm0eQd/UVwRtNjoxPUwbbSiBEiiBEiiBEiiBfSNAt33hhT9O1/2+N/nud7872ErYLug8dH429Yceemh4QIXefpg+5R62FrpZ9G72HGn4G/2QfktfZdOJrc7fpOmgF9Pj6Fr0N/YF15tbcMSji7Kf04HPXbQr0iHnzQHURWxpHh6ky9FjpSdtZTOHePDBB4eyHqZDKwfd327LONFB5dPBvsW+ab7CvjwOyiTvHgZ10KHHTPCUlxzKLR/ySGfOPMDZd9fRn9m5lF/aeQhpnO62PiunOmX/Nn/BwKEMfldfgjzLm3kMJoJ7HcpiHuNan12jLcX+rI7NY9SHOA6rhyHi6T/q1VxEvXobiLxJV7zsUTYN0G6xkt4qQV7NjdmZ2bSVXVmVRZ6x12bMQcflXSWNXlsCJVACp4VAHeb2uKYz+DOAUu4oIgZyygsDKGObBXZK2aqD7TJYGKYZUQ3GlCiDvyAtg72FfQu7FnkpUMcRKDPykYVLyiWlgZJAEZEXhmEKGqPh5UIUJWW1CMkwbyESe+lQJGMIdu34GMcd5ctZ/VCmHBRuC4AUGMpWjK4WRpdRYtS5xVSKvd165IshW6CgqwdP16sH8R02URjnt59LYJcI6GP6sYkmmWOyo62bxOmDJgf6gpA+mM/Dj9N/9L30Q/3AZ3LJ5NeExCTR5NbE1eKahQ8Tn9yTeOadk6a+aNJjkYKTiklxJolks+uchWluhh2I5EU+9Ff93iIGOSAv+rJ4THDJMvfnaab7779/2DkrZZmXr/zmPvLKIpG8kdfiJS8zAUzewol8woaMioOMhT0TQr9H1qoTcZnwqRNllu9MLoeyXpR74pZO0koamXhm8h8Dw1GMW2HirM2YNKszRgC78Mm/tpZ6U87k2Tl8cHfIo/pTBmMtmasOyXPjDVarBPXD4csYZtwR0j45Phu31nWYE192CrGYJv/qQN4PjC7np4t1tw1j0ip5Xuda/cAC7s+f/vngZKRtvvTywSuX05eTvzB3j89+1z8tLOJxbmp00V84z6hLxiA6kbrTl+wwZ8y/nMOcPmIc5aTEGdR3AW9yAX9nvORj1SD/yiaP+kuMX2MdwjXamZCySo98UN6Uh87gt0UhBhN9M3qLMqVtS1NaDmGcpu9hr/0O6U930zl769nB0KJ9a+fau/bj2tmAnUVbO/VgqtwO6enf9DA6ojqZd/9sfEf1HYc/To2t6vzhbz081Lnf5IkRD2/OffK6KGDNGEUP4zDHCKavxalAOxSfAy/1GpniN/1bfUpPO9XGyBR1py0b88RnfPB3i+na5KxzmrrlGKltkWfamTxIjzwSr/K4z2/ypT6cjaMxqKmnfDfGkpE5Ih9dowzyqT45W4hfu9g0yPM8hzns9AHpKQN2GMWhMO1L+uP2HPbGc/nFUdvFkqMt7tq46xpKoAT2gwDZRebRZ9khzM3JP31fXycj7OLGYY5uqd9fLoiTrCNDjZvi9t14FpknjsgUZ+m5j3wyXlqksyBhMYJeQrbTu4wNDjLUfWw08sepb54zX+LN7tDGKGOqe6Uj3ofOPzTIMWOz9OcF8ci/8eDb3/72IDPpAoKxh35PR7JgNm+xx1hgHLgwnXeRt+Khaxkv/G3MJTqLuDFxCNGT1cndd909ef8H3j/obvjIw6K8Dzdv4R9jtHrM/FG9qgdlMN4oQ8ZDvBzKkgNzeTROmBOpL2NwHu7xWRldv0wQP550QGN86sP96lVdGJ+0I2mvGrQTcwntjV6ibOLWbuhv6psusI3xetW89foSKIESKIESKIES2JTAK6/8bbCPsDVxmjMnoJfSPen85vfnz58f9O1l5vl0QfouG4qHi9km2bbZhfxNvHQp+qCzYxzos/RDejX9kB7nGrZyD/awbbDnsOWzmVin5ag2bw4g3rEtjS4Xhzm6nLKZQ7C/KOthejR9l87pwRubitBBBXlj35KP2K2HP1z8R3mVPfZVeWBzUYawpl9HZ6avitN3x5hX5kd0XGmy5bABYjbLcZyHTT/Lh7rDwBxGGdRr7LzaS8ogv4J7BPlyKBcdXxkyB1Bv6lA52KTZl5YN6lWb0G7N8cxNpKONmk9Yb2Hrw0a6qwRlVTfq2gM52q/vyoS9tqnNmIOuMm9ZJQ+9tgRKoAROCoE6zO1xTTIMG2wpPgyYBkiDoUGdESwLfZSww5SodRHEOcSAzFAdgxyD67npArb0DcZZ2Fo3nVXuU35Kj/xQdhkLKXl+Eyg0lBsKLGN2FKHD0hAnZZFCJS7cKYoc6BglKdZZGKVo4eAeh5A01IG6caYQWeymtFB4Kc4OdUUZo8C49nJBWhQhZfUkgbzJp0DJ4rDBGG+HnaR/uTj79xLYNQL6Ffmmn1ng0OfIH4d+qF+afLhufKQPKk/6Xs76mEVzkweTE33FZ/LKZGjZPhhW0tL3TMhMiE1+yCF5M1GR/8iGYfL1rqksuPKKYYKl35MDJqtkt3zIg/vIeM5NFvDEQYaZYJpMka/KI77LBWljhxl5IT4cpeFvuCmD+JSdHMeHjLJoI03OCuQKWeJa9+EuHrLQBFR+TUItTqXMiVseh7JP05COySXeyix+6Si7ib88LFOuy5X7sL8rg7EBA3IUG84u2hRZqmzKmMP18hRZKo9kOU4WZJVDW1Kf5LvyKecqQb3kwFUQD/bk+IenDh4YrRqvOtAWLRQyFjiURRmMOQw799//pWEcuuGG5XfEW6Vs42ux1B6NpfQX/cVZXejn4zYT3sqMtz5godWBt7FTf9Hm1B0HU0YR8TBo6CdZHFTmee1KO3C9cZTeIF8ZS9WvtslxSjtVH/PiGJdv3mdldpAR2pdFdI592p62pr/kGmVVbvVDNpEP8pB2hoO/LQr4abcYq3f9M+1b2bStXHMYa+1Yu373nVO5NHWmxAJv6ZMRi3hiKR2yC9PIAnlSJ4x8jEziXrUtLyrzOr+r99QFXVq+/SZP+hx9MXldFH84qkftj/xn1MRbfKlX94vXgZt+R/5pU+SsA1vt29+NH9qiNiIuv2PmIQgyIA6T2ow86D/p39JX92SReJXh3FQv9129Ce7LWb0o9+x3uq9DXOLXry5Mja4plzaqH9phjgxRt+v0jSHhi/+Ie57DHG76gXRw8FmelJXcJjvkVVnwENyTfkRGqFM85NlnPPRn12ya74vZ76kESuAYCOjjkRV2biB7jWvkmrFRfycrPAxATyBrLxfcS14bM+mzkeXipg9kHBMPeUFuOMgZeiu5bJwmY4xt0iRv6TdkMycmeopAJ+Hwb+GIbJ8NkevkLec9+oIxVbrGDmPFl6Y620c++pFDF8vEI+/GE7qRvBjzBLKPHKUj0W/kVVnGAWdjA70FF+OcuOj64iF3cXGdtByRpZG/0rn17K3DHOc97z2we+BlDDsO2Stv8iiv8qwM8q9e6UYpwzB2vDmdR/797Yui8qgMqV/tyfhBH1pH18fIuKVNcIRMfeCuXjl5ZoybrY9x3Sz6rJ2YR1iIoz8ovzpRv3Qa9W2sVoaGEiiBEiiBEiiBEtg3AnRbeilbk4MtjR2AznPTjTcN9rP7v3T/8KAAvfly+hTdLHYhc4A8JEKnik14rOdmDiBu9nE2Ooc5QGwMmLLh0L/NKXym/5oreHiUDWjeHMB95jixpdHl5EnZ6HJsg+YQbELmGoeVjQ2HzsmmhJMyCPRCduXkg047Dq6THibioKvKP92ZDh0us3OAxJ05AJshe4syn5vaouTd93Ucwsb5W/azMshj5jEpA9uRMilbbEeuDZ/M75zlVT2N1yvo65nnHcZ/Np/qVR48IH9hOseTD/dnnmFeaGME3KS9SlBO8f/0yZ9OfvijHw79wxxHwNyczxzDHFTcma+tkkavLYESKIHTQqAOc3tc0zH8Mbgx/hnoDfAGvgyIlCAD/CqD+LJIojRR/iirCQzVlAkKo0GZUnecgaLDIM0IStGlHGUhMEZmi2WMhlgtqyhEaaSEUKzEH8cQv1FQpOOI0qXcURaljQXlhALEyIoPhZqy5e+rKkXKqnwUVwqXNiF9ZZIWxY4Tzrmpciofy5b1OOuraZXAsgTSB/Uv8ofc0w8tkqUPph866x8JV1910P+uvOrgFazklImOyaE+qK9YYNuknyR/5CHZYDIpb+SFSX1kgzSuvGK6M9m1B6+GM3G2AGOCLQ/kg75qcu5+cswClXL7mzxb7CBnXbdsv3a/fGQhUD7JD7/5m/yTQ45hgWuaDjmOD1aYyftsyORMmdWHehFvnC1Sbve5n2yShrKQfcpu8uy8KI3ZNLf5XbnllfzMAho22pS858BI/uVdPlOGjLfqRV2S4/M4LZNnbcbE2TkTXLxwIscxMnasGr8yik/di9+h3YhHWbQ9k2ftT90fRwh3fRl3edJuMo7hrQ/LX9qM/Omr8qtt0m/8TVnUl7GfQ6j4tGttl4HG4rW6WcRNWuqZQSrORtq1PGKvbulTzvKzbJ+bx1GZxM3ZR5n1GzJC/iOz0r7UhTKod4cyaHfSXyYP8q9s4sdZ/TvjJA8Obf/vbx0Yz66+5h96irS1NX1THqS9rHELS2nSDy9MdZPUpfKJL86O+s4y5ZjHcRu/yZf2lsVr+fabPGln8pr2c7n03KsOMdb+GMPEHfmq7OJNW8YXU+1YOhbj1bs26lr1pH2QBT6Tl9qxhXRtMv1UftWhXSk5J+LNiCno0x6a8JCI8WJR29VOEvLZ2SEvyoWRHXGkEfno7/oi45uHUKS3aX1KS7vhXOJ1FWQCtrjEucPrKqRlvMEZc6zdi0X6bvoR5toyxhhmbrKIR1j0XAIlsJsEyB5ygQwwbpNJdGaBrCAfyVZzYLKc/Fw2kHnkjrjJFjKGbIl8kbZgLHZEltMPyBZn6dEFXStvxgSymZwSXOcgP42rs8F9jsi36CXKJl66UHZ7vZydBydjAk70c2OzQC4ag41x9BvxLpLfmGSMUxZH5jdkrr85wkZc4scGi6RDBvusHIvSmmWxre/yJo/GZFwzX9Fu/J5yGFPlT/6NESmHetWmzIeM3f7munWCetC2zO/SJsSjXo3xxql19SP1S3eILpI6kV+6Bl2ATqdfNJRACZRACZRACZTAvhGgt9Hf2Awc4/n/ddceODixFdx1910L7R/zyixe+v7YBhN7DvvCWEeMjsvGQG+LPcfcIHOA2PLpzew5dHb6JL2bPjlvDiBf8hFbGp3VXII+J176G32UvkjHPkyfpvOaA5hH0DnHQZ7PTe3L8qMMi4I5gHLLg7hSFpzEH/auk5fozjgoX+xd5huY+ftheV6Uj01+TxnYQJWBjqw86hZrh/odyjCZ2uuuPlivwFsZwhwz35VtnTJIR5tgB1av2oS5BCbse7HJm3usOseQd3Whri/8YjrnfOXgYVLtxvzCvCIP5Mj7OvnfpA56bwmUQAnsE4E6zO1Tbc3k1YDoMOhSYGIQc5mB3bHOQDuTzMKv0nREccyFBl4DvkMe3omBGBf5itITNvIiT/KGzTpB3OJLuX3HQT3EoJ4F0qQnLUoK5WrMJZ+jDK3DKmWVvnyNyypeaUtnnbjX4dN7SuCoCWjj6XeRQ76bIDgyqfU3QdvX90yeOanpE+SA/uGcz+mHm+R/nLfIIHmy+GZS7zfpceC78aaD3ex810cd8pC+Kq7IWPf57m+uT79eNa/hRl6QU/IUhwTxD5ymrMgrE8GxjEq+ZtN0X/Ka+pDfxK8+/O4aeZeGSbn4Uw6/OxalMZvmtr/LW9jIq88YqTuMyPbUHTaMEyb8yXfO4/pbJ4/ScCQP4sBEvON2u07cKV/SSBziV8/i3zT/iXPZ87jdyBfWjhgv5BlnbWbcJtNf5D1tJnWozsbtLdcq26LgXkGdz46l7nNor9viIz3lTRtjENMP/SakzAwnyb/zOulLy4GJI/UvbX106J9vHMjKG268YWAtLW06Z5+TdngPGV3wj/TUXcroe0LiTXny+ztxDhv51G7G+VReeU2/u1z+xnGFsXp1iBsPcS4r/1JXqbdF/VTcjH9e7ZDXu6lXMspTw17vxrjH0LdM3Y3LGR7yrl/amfJb3/rWYERm4PN3hj3p2MWOA4c0Vk1nnCZZe5jDHOe/vGY5bNLOsMBb+eUZA3JDf0r/TZveNJ/jPPdzCZTA8RMgf8gAY5lz5JWc6N/6+rp6DfkhzsgWcslYaZyWjvjJFgeZHrmScY2sT4icim7h9/E4eJi8lH7yIU9CZBeZlrF5+MOCf+TXvRmHxvHIZzgdlg9Ri8eRPMmXOLGJnixu8YgTG7q+OvDdkfxeLq0FRdn455QhdRK2zsaOV/463aHk9deGvKpXY4djnP9tlCHpaxOpD4VTH9E5pLNOEF/qyDkBc3GO6yF/67kESqAESqAESqAE9oVA9Dl6lEPwm0DfoU9FT15F50y89CeHuGObZF/wnR7FxpAHGGd1xHF6dDL3iCs6Ml0sut742iHzF/+Rj7E+R28UUjZxKN+i+y9GMzCJrhtO+Zt8Z540nrfk7+Nz8hP9VXl8NjeKrct3+aE3mwOM7eVh5O+Xy/M43W1+ThlmuZrH5BjKMHWYu376xhVlyBwm8yXl2KQMyYO6kJa8COJMGtJcl5H4h3b72nR+Md0x23dB3I6UZ/ix/5RACZRACSwkUIe5hWj6hxJYngBFhMITpdp5rJxEKY5Cuq4CtHyOemUJnC4C6YP6oQWccR9EQt+76srpos10hzn98bj6oEmQwwRbnuTPZEUeTCY3mRBtWsORWfLmM4Ym3gOri85T66ahzOI0GVQf4nZkohYDhu+7GsZlUA7f5Vfex5P+Xc3/PuYrY2iMCNpMWKfNHFffPQ5+6Rf6SJx8pKvM+qEyb7uPJM30/7TtqflqeJpSupGRJ4n1cdTnOA2c1asD68iP1CvG26hbRly71diN7dFHHx0cOqTFUcGrOs6fPz88Ucrot25QFu3Ta96++c1vDk+uejpX8LSwdDiyeWp10zbDYLmMw5zXyo1D5DXeGf/1o8jrbbAep9fPJVACp4MA+UemRJ77Ts5FljtvKvf2leRYj6BLhA15i4t5js+7zEeejR/qN7pn8m8MyVi9y2XY1/bTfJdACZRACZRACZTArhKgH2YO4Ow7HZF+SMeNjrir+T+qfNGdHfTmse1FemP7S2yKR5WPTeNNGdRt5jHiZD8yjzmt9bsp195fAiVQAvtOoA5z+16DzX8JlEAJlEAJlEAJlEAJlMCpI+CpXq/GsLucg2Obhf04zD344IODw5zdctYNnCI45nGYs8OcV4TnFRJeD/7AAw9M7rnnnuH1h5s6FazrMLdu2XpfCZRACZRACZRACZRACZRACZRACZRACZRACZRACZRACZTA6SVQh7nTW/cteQmUQAmUQAmUQAmUQAmUwJ4S8CqM559/fthdzg5zHM48/ew1GJ/+9Kcn999//+S9733v5OzZs5d2DlylqJzlxPnf//3fk6eeemrYyc7uchzz7Fr30Y9+dHjt6wc+8IHJHXfcsUrUc6+tw9xcLP2xBEqgBEqgBEqgBEqgBEqgBEqgBEqgBEqgBEqgBEqgBErgCAjUYe4IoDbKEiiBEiiBEiiBEiiBEiiBEjhKAhzM/vznP196JeuLL744vBrD6zDOnTs38epSr0p9z3veMzi4+d0ucP7z/7yQ12w4v/TSSxMOchcuXJj87Gc/G5zmpCkODnJexfrVr351+GxXu01DHeY2Jdj7S6AESqAESqAESqAESqAESqAESnrZBHcAACAYSURBVKAESqAESqAESqAESqAEliVQh7llSfW6EiiBEiiBEiiBEiiBEiiBEtgRAm+88cawA9xjjz02vJLVTnCc3M6cOTO5+eabJ7fffvvg1MZp7u677554NesVV1wx/P3Mu868zWkujnJ2qLOznMPrV5955pnJ008/PXzmPMfZTjx2l/vkP31y8oUvfGFy4403DvFuiqUOc5sS7P0lUAIlUAIlUAIlUAIlUAIlUAIlUAIlUAIlUAIlUAIlUALLEqjD3LKkel0JlEAJlEAJlEAJlEAJlEAJ7AgBzm2c5v7rv/5r8uMf/3jy85//fPLcc89NXn/99cGBzatZ7S531113DbvA3XTTTYNzm9+vufqaybvO/GObOQ5y4vKa1xzi+v3vfz8cHPH8/eabbp7c+e47J5/61KcmH/vYxyZex3r11VcPu85timWew9xrr702OPhx+LOj3ec+97lh57xN0+r9JVACJVACJVACJVACJVACJVACJVACJVACJVACJVACJVACp5tAHeZOd/239CVQAiVQAiVQAiVQAiVQAntKwM5wv/3tbye/+MUvJk888cTkl7/85cSrWTnNcai76qqrJtdcc83EK1PPnj07OM7dcsstw/crzlxxaZc517/yyivDK1jtJOfgOMeJTTx2reMY9/73vX/y4Y98ePLZz3x28t73vXfC+c4rWrcRpPWrX/5q8sRPnhh2zOOkJ1/S5vTnFbN1mNsG6cZRAiVQAiVQAiVQAiVQAiVQAiVQAiVQAiVQAiVQAiVQAiVQh7m2gRIogRIogRIogRIogRIogRLYUwIvv/zy5M9//vOww9yFCxcmv/rVryZ/+tOfBoc3zm7ClVdeOTi8DbvLTR3ofB87urnOLnOvvvrqpcNvrrEbHSe7O+68Y+L1rh/58EeGXea8ipUz2zieTRCOHea+//3vD6+XzQ5zHObsMPf5z39+OG+STu8tgRIogRIogRIogRIogRIogRIogRIogRIogRIogRIogRIogTrMtQ2UQAmUQAmUQAmUQAmUQAmUwJ4SsMscZzevT3322WcnzzzzzOR3v/vd8HpWTmic4Di/OVwrOOez72PHN58ddqfjYHfbbbcNO7y9733vm3zwgx8cXvPqb67ZZpDXZ3/z7OSpp56aPP7DxyccAbPD3J133jm55557Jp/+9KeH8zbTbVwlUAIlUAIlUAIlUAIlUAIlUAIlUAIlUAIlUAIlUAIlUAKnj0Ad5k5fnbfEJVACJVACJVACJVACJVACJ4gA5zfOZZzOvJLVLnN2m7PznFebxnHOjm2u42DnSOAAl8MrXDnKeY2rneU+9KEPTe6+++7hda7XXXfdcJ1d5ba1s1zyIG+c/rxeltOcV8S+8cYbg2PerbfeOjjrfexjHxvOuafnEiiBEiiBEiiBEiiBEiiBEiiBEiiBEiiBEiiBEiiBEiiBEliHQB3m1qHWe0qgBEqgBEqgBEqgBEqgBEpgxwjYRY6T2QsvvDD5wx/+MLyW1U5tnOacHXac45zmOo52HN84yOXwqtWbb755wjmO09wdd9wxfPd5205yY3wc+OTz+eefHxznOPbltbDywmnu9ttvH87j+/q5BEqgBEqgBEqgBEqgBEqgBEqgBEqgBEqgBEqgBEqgBEqgBFYlUIe5VYn1+hIogRIogRIogRIogRIogRLYEwKc4/70pz8Nu83ZfY7TXHZvi8Pc9ddfPzjHcYo7e/bs4CR35ZVXbv21q5dDJj85Zq/NrnZH6bQ3m2a/l0AJlEAJlEAJlEAJlEAJlEAJlEAJlEAJlEAJlEAJlEAJnEwCdZg7mfXaUpVACZRACZRACZRACZRACZTAsEtbdpWza5ud5bK7XBzmOMfl8EpWx5kzZ450R7lFVSNPi0Kd5RaR6e8lUAIlUAIlUAIlUAIlUAIlUAIlUAIlUAIlUAIlUAIlUAKrEKjD3Cq0em0JlEAJlEAJlEAJlEAJlEAJlEAJlEAJlEAJlEAJlEAJlEAJlEAJlEAJlEAJlEAJlEAJlEAJlMDeEqjD3N5WXTNeAiVQAiVQAiVQAiVQAiVQAiVQAiVQAiVQAiVQAiVQAiVQAiVQAiVQAiVQAiVQAiVQAiVQAiWwCoE6zK1Cq9eWQAmUQAmUQAmUQAmUQAmUQAmUQAmUQAmUQAmUQAmUQAmUQAmUQAmUQAmUQAmUQAmUQAmUQAnsLYE6zO1t1TXjJVACJVACJVACJVACJVACJVACJVACJVACJVACJVACJVACJVACJVACJVACJVACJVACJVACJVACqxCow9wqtHptCZRACZRACZRACZRACZRACZRACZRACZRACZRACZRACZRACZRACZRACZRACZRACZRACZRACZTA3hKow9zeVl0zXgIlUAIlUAIlUAIlUAIlUAIlUAIlUAIlUAIlUAIlUAIlUAIlUAIlUAIlUAIlUAIlUAIlUAIlsAqBOsytQqvXlkAJlEAJlEAJlEAJlEAJlEAJlEAJlEAJlEAJlEAJlEAJlEAJlEAJlEAJlEAJlEAJlEAJlEAJ7C2BOsztbdU14yVQAiVQAiVQAiVQAiVQAiVQAiVQAiVQAiVQAiVQAiVQAiVQAiVQAiVQAiVQAiVQAiVQAiVQAqsQqMPcKrR6bQmUQAmUQAmUQAmUQAmUQAmUQAmUQAmUQAmUQAmUQAmUQAmUQAmUQAmUQAmUQAmUQAmUQAmUwN4SqMPc3lZdM14CJVACJVACJVACJVACJVACJVACJVACJVACJVACJVACJVACJVACJVACJVACJVACJVACJVACJbAKgTrMrUKr15ZACZRACZRACZRACZRACZRACZRACZRACZRACZRACZRACZRACZRACZRACZRACZRACZRACZRACewtgTrM7W3VNeMlUAIlUAIlUAIlUAIlUAIlUAIlUAIlUAIlUAIlUAIlUAIlUAIlUAIlUAIlUAIlUAIlUAIlUAKrEKjD3Cq0em0JlEAJlEAJlEAJlEAJlEAJlEAJlEAJlEAJlEAJlEAJlEAJlEAJlEAJlEAJlEAJlEAJlEAJlMDeEqjD3N5WXTNeAiVQAiVQAiVQAiVQAiVQAiVQAiVQAiVQAiVQAiVQAiVQAiVQAiVQAiVQAiVQAiVQAiVQAiWwCoE6zK1Cq9eWQAmUQAmUQAmUQAmUQAmUQAmUQAmUQAmUQAmUQAmUQAmUQAmUQAmUQAmUQAmUQAmUQAmUQAnsLYE6zO1t1TXjJVACJVACJVACJVACJVACJVACJVACJVACJVACJVACJVACJVACJVACJVACJVACJVACJVACJVACqxCow9wqtHptCZRACZRACZRACZRACZRACZRACZRACZRACZRACZRACZRACZRACZRACZRACZRACZRACZRACZTA3hKow9zeVl0zXgIlUAIlUAIlUAIlUAIlUAIlUAIlUAIlUAIlUAIlUAIlUAIlUAIlUAIlUAIlUAIlUAIlUAIlsAqBOsytQqvXlkAJlEAJlEAJlEAJlEAJlEAJlEAJlEAJlEAJlEAJlEAJlEAJlEAJlEAJlEAJlEAJlEAJlEAJ7C2BOsztbdU14yVQAiVQAiVQAiVQAiVQAiVQAiVQAiVQAiVQAiVQAiVQAiVQAiVQAiVQAiVQAiVQAiVQAiVQAqsQqMPcKrR6bQmUQAmUQAmUQAmUQAmUQAmUQAmUQAmUQAmUQAmUQAmUQAmUQAmUQAmUQAmUQAmUQAmUQAmUwN4SqMPc3lZdM14CJVACJVACJVACJVACJVACJVACJVACJVACJVACJVACJVACJVACJVACJVACJVACJVACJVACJbAKgTrMrUKr15ZACZRACZRACZRACZRACZRACZRACZRACZRACZRACZRACZRACZRACZRACZRACZRACZRACZRACewtgTrM7W3VNeMlUAIlUAIlUAIlUAIlUAIlUAIlUAIlUAIlUAIlUAIlUAIlUAIlUAIlUAIlUAIlUAIlUAIlUAKrEKjD3Cq0em0JlEAJlEAJlEAJlEAJlEAJlEAJlEAJlEAJlEAJlEAJlEAJlEAJlEAJlEAJlEAJlEAJlEAJlMDeEqjD3N5WXTNeAiVQAiVQAiVQAiVQAiVQAiVQAiVQAiVQAiVQAiVQAiVQAiVQAiVQAiVQAiVQAiVQAiVQAiWwCoE6zK1Cq9eWQAmUQAmUQAmUQAmUQAmUQAmUQAmUQAmUQAmUQAmUQAmUQAmUQAmUQAmUQAmUQAmUQAmUQAnsLYE6zO1t1TXjJVACJVACJVACJVACJVACJVACJVACJVACJVACJVACJVACJVACJVACJVACJVACJVACJVACJVACqxCow9wqtHptCZRACZRACZRACZRACZRACZRACZRACZRACZRACZRACZRACZRACZRACZRACZRACZRACZRACZTA3hKow9zeVl0zXgIlUAIlUAIlUAIlUAIlUAIlUAIlUAIlUAIlUAIlUAIlUAIlUAIlUAIlUAIlUAIlUAIlUAIlsAqBOsytQqvXlkAJlEAJlEAJlEAJlEAJlEAJlEAJlEAJlEAJlEAJlEAJlEAJlEAJlEAJlEAJlEAJlEAJlEAJ7C2BOsztbdU14yVQAiVQAiVQAiVQAiVQAiVQAiVQAiVQAiVQAiVQAiVQAiVQAiVQAiVQAiVQAiVQAiVQAiVQAqsQqMPcKrR6bQmUQAmUQAmUQAmUQAmUQAmUQAmUQAmUQAmUQAmUQAmUQAmUQAmUQAmUQAmUQAmUQAmUQAmUwN4SqMPc3lZdM14CJVACJVACJVACJVACJVACJVACJVACJVACJVACJVACJVACJVAC/39796LbRBJEAdSP7OP//3Yh2NsdNisLLkiNiOS+cyxFCcUk6jo1stP2zZgAAQIECBAgQIAAAQIrAgJzK1qOJUCAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAIFtBQTmth2dhRMgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDAioDA3IqWYwkQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIEBgWwGBuW1HZ+EECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgsCIgMLei5VgCBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQ2FZAYG7b0Vk4AQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECKwICMytaDmWAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBLYVEJjbdnQWToAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIrAgJzK1qOJUCAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAIFtBQTmth2dhRMgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDAioDA3IqWYwkQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIEBgWwGBuW1HZ+EECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgsCIgMLei5VgCBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQ2FZAYG7b0Vk4AQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECKwICMytaDmWAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBLYVEJjbdnQWToAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIrAgJz/2ndV9QcS4AAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAgScUOD/hmp5pSYcOzN3v99OXkZS7jc/za6G5Zzo1rYUAgRWBxwc792Urco4lQIAAAQIECBAg0CIwdwV2Ay3T1AcBAgQIECBAgAABAgQIECBAgACBXxGYzxJezufT9XI+ncfnxyzBr/y81u85bGBuPoU8Q3KfvtxOr+PjLTTXOmV9ESBQL/D4IOclsvpxa5AAAQIECBAgQIBAEBCYCyhKBAgQIECAAAECBAgQIECAAAECBA4lMJ8lfLlcTn+9XN+CcyMz5xYEDh+Y++f1dvo8AnNf7jd/iB1OECUCBPYQeHyQG1lgNwIECBAgQIAAAQIEDiYw/1r0bStgQ3CwyWuXAAECBAgQIEDg6ALvrw/YChz9TNA/AQIECBD4KjB/N/jjej39LTD301Pi8IG5TzMwdxtXmLvNt2S9uxThT08X/0mAwPMKPMbCJeaed05WRoAAAQIECBAgQOAjBMZ+4H1L4FWyjwD2MwkQIECAAAECBAg8r8B4VXxuB+Y7a7kRIECAAAECBOYf1r5cxxXmRmhuvjXrDNC5fS9w2MDcpJi/OH4eQbnX8TEvL+cc+f4EUSFA4PkFvl5F4mGd487M/dmDhy8JECBAgAABAgQIlAs8vixmL1A+bO0RIECAAAECBAgQ+EbAfuAbEP8kQIAAAQIHFnj/veB6OZ/+HG/LOtNyni/MJ8ThA3MzK3cTlstnhyoBAlsIvD3ovT/yzRULzG0xN4skQIAAAQIECBAg8LsE/t8T2Av8LlI/hwABAgQIECBAgMA2Au8XlnP1mG1GZqEECBAgQOBDBeZzhZcRGhiZuZGXE5f7EfahA3M/QlEnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAgT4Bgbm+meqIAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBIKAwFxAUSJAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBPgGBub6Z6ogAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEgoDAXEBRIkCAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAIE+AYG5vpnqiAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgSCgMBcQFEiQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAgT4Bgbm+meqIAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBIKAwFxAUSJAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBPgGBub6Z6ogAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEgoDAXEBRIkCAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAIE+AYG5vpnqiAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgSCgMBcQFEiQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAgT4Bgbm+meqIAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBIKAwFxAUSJAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBPgGBub6Z6ogAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEgoDAXEBRIkCAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAIE+AYG5vpnqiAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgSCgMBcQFEiQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAgT4Bgbm+meqIAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBIKAwFxAUSJAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBPgGBub6Z6ogAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEgoDAXEBRIkCAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAIE+AYG5vpnqiAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgSCgMBcQFEiQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAgT4Bgbm+meqIAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBIKAwFxAUSJAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBPgGBub6Z6ogAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEgoDAXEBRIkCAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAIE+AYG5vpnqiAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgSCgMBcQFEiQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAgT4Bgbm+meqIAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBIKAwFxAUSJAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBPgGBub6Z6ogAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEgoDAXEBRIkCAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAIE+AYG5vpnqiAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgSCgMBcQFEiQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAgT4Bgbm+meqIAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBIKAwFxAUSJAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBPgGBub6Z6ogAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEgoDAXEBRIkCAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAIE+AYG5vpnqiAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgSCgMBcQFEiQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAgT4Bgbm+meqIAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBIKAwFxAUSJAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBPgGBub6Z6ogAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEgoDAXEBRIkCAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAIE+AYG5vpnqiAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgSCgMBcQFEiQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAgT4Bgbm+meqIAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBIKAwFxAUSJAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBPgGBub6Z6ogAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEgoDAXEBRIkCAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAIE+AYG5vpnqiAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgSCgMBcQFEiQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAgT4Bgbm+meqIAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBIKAwFxAUSJAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBPgGBub6Z6ogAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEgoDAXEBRIkCAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAIE+AYG5vpnqiAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgSCgMBcQFEiQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAgT4Bgbm+meqIAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBIKAwFxAUSJAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBPgGBub6Z6ogAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEgoDAXEBRIkCAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAIE+AYG5vpnqiAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgSCwL/uf1qcQ5bcCAAAAABJRU5ErkJggg==) Looking at the table above, we can colclude that we have an ARMA model => we have to specify both p and q.When both terms (p and q) are supposed to be nonzero, we can use AIC (Akaike information criterion) and BIC (Bayesian information criterion). For better forecasting model it is better to use AIC. BIC is better when searching for better explanatory model. I will write a loop which will help to deterine order of the model. ###Code for p in range(6): for q in range(6): model_tr = SARIMAX(data, order=(p,1,q)) model_tr_fit = model_tr.fit() print(p, q, model_tr_fit.aic, model_tr_fit.bic) ###Output /usr/local/lib/python3.7/dist-packages/statsmodels/tsa/base/tsa_model.py:527: ValueWarning: No frequency information was provided, so inferred frequency D will be used. % freq, ValueWarning) /usr/local/lib/python3.7/dist-packages/statsmodels/tsa/base/tsa_model.py:527: ValueWarning: No frequency information was provided, so inferred frequency D will be used. % freq, ValueWarning) /usr/local/lib/python3.7/dist-packages/statsmodels/tsa/base/tsa_model.py:527: ValueWarning: No frequency information was provided, so inferred frequency D will be used. % freq, ValueWarning) /usr/local/lib/python3.7/dist-packages/statsmodels/tsa/base/tsa_model.py:527: ValueWarning: No frequency information was provided, so inferred frequency D will be used. % freq, ValueWarning) /usr/local/lib/python3.7/dist-packages/statsmodels/tsa/base/tsa_model.py:527: ValueWarning: No frequency information was provided, so inferred frequency D will be used. % freq, ValueWarning) /usr/local/lib/python3.7/dist-packages/statsmodels/tsa/base/tsa_model.py:527: ValueWarning: No frequency information was provided, so inferred frequency D will be used. % freq, ValueWarning) /usr/local/lib/python3.7/dist-packages/statsmodels/tsa/base/tsa_model.py:527: ValueWarning: No frequency information was provided, so inferred frequency D will be used. % freq, ValueWarning) /usr/local/lib/python3.7/dist-packages/statsmodels/tsa/base/tsa_model.py:527: ValueWarning: No frequency information was provided, so inferred frequency D will be used. % freq, ValueWarning) /usr/local/lib/python3.7/dist-packages/statsmodels/tsa/statespace/sarimax.py:977: UserWarning: Non-invertible starting MA parameters found. Using zeros as starting parameters. warn('Non-invertible starting MA parameters found.' ###Markdown Best aic result is gotten for p = 5 and q = 4 ###Code model = SARIMAX(data, order=(5,1,4)) model_fit = model.fit() print(model_fit.summary()) ###Output /usr/local/lib/python3.7/dist-packages/statsmodels/tsa/base/tsa_model.py:527: ValueWarning: No frequency information was provided, so inferred frequency D will be used. % freq, ValueWarning) /usr/local/lib/python3.7/dist-packages/statsmodels/tsa/base/tsa_model.py:527: ValueWarning: No frequency information was provided, so inferred frequency D will be used. % freq, ValueWarning) ###Markdown Interpretation of our ARIMA model:( $Y_t = Y_{t-1} + e_t - 0.8205*e_{t-1}$ ) - for auto arima model ###Code residuals = DataFrame(model_fit.resid) residuals.plot() pyplot.show residuals.plot(kind='kde') pyplot.show() print(residuals.describe()) print(model_fit.plot_diagnostics(figsize=(7,7))) ###Output Figure(504x504) ###Markdown Interpretation of residual plots:1.2.3. Normal Q-Q : All the dots should fall perfectly in line with the red line. Any significant deviations would imply the distribution is skewed. => We can conclude that in our data disrtibution may be a little skewed.4. **Accuracy metrics**So here are some of the commonly used accuracy metrics for time-series data:1. Mean Absolute Percentage Error (MAPE)2. Mean Error (ME)3. Mean Absolute Error (MAE)4. Mean Percentage Error (MPE)5. Root Mean Squared Error (RMSE)6. Min-Max Error (minmax)Below we will get those metrics for our models ###Code def forecast_accuracy(forecast, actual): mape = mean_absolute_percentage_error(actual, forecast) # MAPE me = np.mean(forecast - actual) # ME mae = mean_absolute_error(actual, forecast) # MAE mpe = np.mean((forecast - actual)/actual) # MPE rmse = sqrt(mean_squared_error(actual, forecast)) # RMSE mins = np.amin(np.hstack([forecast[:,None], actual[:,None]]), axis=1) maxs = np.amax(np.hstack([forecast[:,None], actual[:,None]]), axis=1) minmax = 1 - np.mean(mins/maxs) # minmax return({'mape': mape, 'me': me, 'mae': mae, 'mpe': mpe, 'rmse': rmse, 'minmax': minmax}) ###Output _____no_output_____ ###Markdown **Rolling Forecast ARIMA** ###Code X = data.values size = int(len(X) * 0.66) train, test = X[0:size], X[size:len(X)] history = list(train[:]) predictions = list() for t in range(len(test)): model = ARIMA(history, order=(5,1,0)) model_fit = model.fit() output = model_fit.forecast() yhat = output[0] predictions.append(yhat) obs = test[t] history.append(obs) print('predicted = %f, expected = %f' % (yhat, obs)) pyplot.plot(test) pyplot.plot(predictions, color='red') pyplot.show() ###Output /usr/local/lib/python3.7/dist-packages/statsmodels/tsa/arima_model.py:472: FutureWarning: statsmodels.tsa.arima_model.ARMA and statsmodels.tsa.arima_model.ARIMA have been deprecated in favor of statsmodels.tsa.arima.model.ARIMA (note the . between arima and model) and statsmodels.tsa.SARIMAX. These will be removed after the 0.12 release. statsmodels.tsa.arima.model.ARIMA makes use of the statespace framework and is both well tested and maintained. To silence this warning and continue using ARMA and ARIMA until they are removed, use: import warnings warnings.filterwarnings('ignore', 'statsmodels.tsa.arima_model.ARMA', FutureWarning) warnings.filterwarnings('ignore', 'statsmodels.tsa.arima_model.ARIMA', FutureWarning) warnings.warn(ARIMA_DEPRECATION_WARN, FutureWarning) ###Markdown **Forecast Accuracy** ###Code print(forecast_accuracy(np.array(predictions), np.array(test))) pred = list() test1 = list() for i in range(len(test)): pred.append(int(predictions[i])) test1.append(int(test[i])) print(f"Accuracy score: {accuracy_score(test1, pred)}") print(f"R-squared: {round(sm.r2_score(test1, pred), 2)}") ###Output Accuracy score: 0.12403100775193798 R-squared: 0.19
Machine Learning/Course files/stdDevVariance/StdDevVariance.ipynb
###Markdown Standard Deviation and Variance ###Code %matplotlib inline import numpy as np import matplotlib.pyplot as plt incomes = np.random.normal(100.0, 50.0, 10000) plt.hist(incomes, 50) plt.show() incomes.std() incomes.var() ###Output _____no_output_____
programcao_python.ipynb
###Markdown 1 - Introdução ao Python O Python é uma das linguagens de programação mais populares do mundo. Criado no início da década de 1990, ele tem uma ampla variedade de usos, desde automatizar tarefas repetitivas e escrever aplicativos Web até criar modelos de machine learning e implementar redes neurais. Cientistas de dados, matemáticos e pesquisadores em particular gostam do Python devido à sua sintaxe avançada e fácil de entender e à grande variedade de pacotes open-source disponíveis. Pacotes são bibliotecas de códigos compartilhados comumente usados disponíveis gratuitamente para qualquer pessoa.O Python tem uma sintaxe simples, fácil de aprender que enfatiza a legibilidade. Aplicativos escritos em Python podem ser executados em praticamente qualquer computador, incluindo aqueles que executam Windows, macOS e distribuições populares do Linux. Além disso, o ecossistema contém um conjunto avançado de ferramentas de desenvolvimento para escrever, depurar e publicar aplicativos do Python.Por fim, o Python tem o suporte de uma comunidade de usuários ativa que está ávida por ajudar novos programadores a aprender o estilo do Python, em que você não apenas acerta a sintaxe, mas usa a linguagem da maneira que ela foi projetada para ser usada. 2 - Lógica de Programação Algoritmos Conjunto das regras e procedimentos lógicos perfeitamente definidos que levam à solução de um problema em um número finito de etapas.**Mas o que isso quer dizer?**Quando vamos à padaria comprar pão francês, por exemplo, precisamos criar um algoritmo para executar tal ação. Nessa sequência, nós ordenamos os atos em uma sucessão que faça sentido e seja capaz de atingir o objetivo inicial, afinal de contas, não faz sentido algum primeiro sair à rua para depois voltar e pegar a carteira, por exemplo. É importante entender também que existe mais de um algoritmo capaz de solucionar o meu problema e, desde que ele funcione, não existe certo ou errado – mas o conceito de mais ou menos eficiente é válido aqui.Seguem dois exemplos de sequências onde o intuito das duas é, no final, ter um ovo frito no prato.![Fritar Ovo - 1](https://labs.bluesoft.com.br/wp-content/uploads/2018/12/Captura-de-Tela-2018-12-07-a%CC%80s-10.51.39.png)![Fritar Ovo - 2](https://labs.bluesoft.com.br/wp-content/uploads/2018/12/Captura-de-Tela-2018-12-07-a%CC%80s-10.51.22.png)Enquanto o primeiro exemplo é mais cuidadoso e evita que a pessoa frite um ovo podre ou acabe espirrando óleo acidentalmente ao jogar o ovo na gordura quente, por exemplo, a segunda é mais rápida e prática. As duas estão certas porque ambas conseguem chegar ao meu objetivo final, mas, talvez, a primeira seja mais eficiente por evitar problemas ao longo dos seus procedimentos. O que são variáveis?Na matemática usamos variáveis para fazer contas por exemplo:x = 5 + 10x = 15No exemplo acima **x** é a variável e está sendo usada para armazenar algo que podemos usar depois.Na programação não é muito diferente disso, também usamos as variáveis para armazenar o que precisamos, como se fossem caixinhas para organizar tudo que vamos usar, e sabermos onde está. ###Code numero = 5 + 10 print(numero) caixa_de_sapatos = 15 print(caixa_de_sapatos) ###Output 15 ###Markdown No exemplo acima estamos guardando **"Sapatos"** na variável **caixa_de_sapatos** e **"Camisas"** na variável **caixa_de_camisas**Cada variável só aceita uma coisa de cada vez, se eu quiser botar **"Sapatos"** na variável **caixa_de_camisas** não vai sobrar mais espaço para as **"Camisas"**. Veja o exemplo: ###Code caixa_de_sapatos = "Camisas" print(caixa_de_sapatos) ###Output Camisas ###Markdown 3 - Comandos Básicos Print Como vocês devem ter notado nos exemplo anteriores utilizamos o "print" para exibir algumas coisas na tela.O "print" é uma das muitas funções que temos no python, e a principal função dela é exibir o que chamamos de saída de dados.Normalmente utilizamos ele para exibir mensagens para o usuário final, mas ele também é uma boa ferramenta para testar se o seu código está se comportando como você planejou, te possibilitando exibir o conteúdo das variáveis enquanto o programa está funcionando(em tempo de execução). ###Code caixa_de_fruta1 = "b" print("Passo 1:" + caixa_de_fruta1) caixa_de_fruta2 = caixa_de_fruta1 + "ana" print("Passo 2:" + caixa_de_fruta2) caixa_de_fruta4 = caixa_de_fruta2 + "na" print("Final:" + caixa_de_fruta4) ###Output Passo 1:b Passo 2:bana Final:banana ###Markdown Type A função type exibe o tipo de um valor ou variável. O valor ou variável, que é chamado de argumento da função, tem que vir entre parênteses. É comum se dizer que uma função ‘recebe’ um valor ou mais valores e ‘retorna’ um resultado. O resultado é chamado de valor de retorno. ###Code x = True tipo = type(x) print(tipo) ###Output <class 'bool'> ###Markdown Exercícios Crie uma variável que receba seu nome e outra que receba seu e-mail e exiba ela na tela. ###Code #Aqui vai meu nome nOme = "Russel Franco" #Aqui vai meu email emAil = "[email protected]" #Aqui eu exibo meus dados print(nOme) print(emAil) ''' Neste exemplo irei mostrar como funciona os comentários de múltimplas linhas print(123) ''' print("Texto") ###Output Texto ###Markdown Importar biblioteca Uma das coisas que torna o python tão poderoso é a possibilidade de importar bibliotecas que possuem funções que não vem incluídas por padrão no python, mas podem ser incluídas conforme a sua necessidade.Como exemplo temos a biblioteca "math" que serve para fazer diversos cáculos matemáticos como por exemplo raiz quadrada.Veja o exemplo: ###Code import math x = math.sqrt(4) print(x) ###Output 2.0 ###Markdown ExercícioUse a biblioteca math para realizar a raiz quadrada de 25 e exibir na tela. ###Code ##Escreva sua solução aqui import math raiz_quadrada = math.sqrt(25) print(raiz_quadrada) ###Output 5.0 ###Markdown 4 - Variáveis e Tipos de dados Python possui vários tipos básicos ou fundamentais: números inteiros, números de ponto flutuante, booleanos, números complexos, cadeias de caracteres, etc.Números inteirosOs tipos de variáveis em Python são inferidos automaticamente pelo interpretador, sem que haja a necessidade de se especificar o tipo da variável no momento de sua declaração. Os exemplos abaixo deixarão esses conceitos mais claros. ###Code # Números inteiros x = 1 y = 2 resultado = x + y print(resultado) print(type(resultado)) ###Output 3 <class 'int'> ###Markdown Números de ponto flutuanteSe o nome ponto flutuante te assusta, não se preocupe. Números de ponto flutuante nada mais são que números com vírgula, ou como costumamos dizer "números quebrados", constrastando com os números inteiros com os quais estamos mais acostumados. Vejamos alguns exemplos de expressões com números de ponto flutuante. ###Code # Números de ponto flutuante x = 1.1 y = 2.2 resultado = x + y print(resultado) print(type(resultado)) ###Output 3.3000000000000003 <class 'float'> ###Markdown É importante notar que se pelo menos um dos valores envolvidos em uma expressão numérica for um número de ponto flutuante, o retultado da expressão também será um número de ponto flutuante, como mostrado no exemplo abaixo. ###Code x = 10 y = 2.5 resultado = x * y print(resultado) print(type(resultado)) ###Output 25.0 <class 'float'> ###Markdown Strings ou cadeias de caracteres Strings nada mais são do que sequências (cadeias) de caracteres. Em outras palavras, uma string é simplesmente uma sequência de zero ou mais letras juntas. Vejamos alguns exemplos. ###Code string_vazia = '' uma_letra = 'a' varias_letras = 'abacate' print(type(string_vazia)) print(type(uma_letra)) print(type(varias_letras)) ###Output <class 'str'> <class 'str'> <class 'str'> ###Markdown Você deve ter percebido que nos exemplos acima, a sequência de letras das strings aparece entre aspas simples. Em Python, assim como em outras linguagens, precisamos das aspas para declarar uma variável do tipo string. Tanto aspas simples quanto aspas duplas funcionam. A razão da necessidade das aspas é simples: elas diferenciam uma declaração de uma variável do string de uma atribuição entre duas variáveis. O exemplo abaixo vai deixar isso mais claro. ###Code v = 'var' s = v print(v) print(s) ###Output var var ###Markdown Valores booleanos* Um booleano (ou tipo ***bool***), em ciência da computação, é um tipo de dado lógico que pode ter apenas um de dois valores possíveis: verdadeiro ou falso. Em Python, condicionais booleanas são usadas para decidir quais trechos do código serão executados ou repetidas. Esses valores são úteis para representar, por exemplo, o resultado de uma comparação. Experimente: ###Code # Criando as variáveis a = 5 b = 6 print("Valor de a: ", a) print("Valor de b: ", b) c = a < b # c mostra o valor comparado entre a < b print("Valor de c: ", c) d = a > b # d mostra o valor comparado entre a > b print("Valor de d: ", d) e = a == b # e mostra o valor comparado entre a == b print("Valor de e: ", e) a = True true = True true = "True" type(true) ###Output _____no_output_____ ###Markdown Fiquem atentos que True e False não são a mesma coisa que strings (Textos ou Frase), esses valores são do tipo `Bool` e são usados para expressa um valor de verdade (Verdadeiro ou Falso), portanto `True`(**Bool**) não é a mesma coisa que "True"(**String**). Operadores e expressões lógicasAssim como expressões aritméticas podem ser formadas por operadores aritméticos (como 2 + 3 * 4), expressões lógicas são formadas por operadores lógicos. Para construir expressões lógicas usando operadores lógicos será possivel utilizar três operadores lógicos: and, or e not. O significado desses operadores é bastante intuitivo, uma vez que ele equivale à interpretação que fazemos deles na língua inglesa (`and` = E | `or` = Ou | `not` = Não ).1. **Operador** `and` Dados dois valores booleanos A e B, o operador lógico and resulta em True apenas quando A e B foram ambos True, e retorna False caso contrário.Execute o programa e veja o resultado, em seguida troque o valor `False` por `True` e execute novamente: ###Code C = False D = False # Após rodar uma primeira vez, troque este valor de False para True e execute novamente. print ("Os valores de C e D são: ", C and D) # Comparação lógica entre as duas variaveis. c = True print(c) ###Output True ###Markdown 2. **Operador** `or`Dados dois valores booleanos A e B, o operador lógico `or` resulta em `False` apenas quando A e B foram ambos False, e retorna True caso contrário.Execute o programa e veja o resultado, em seguida troque o valor `True` por `False` e execute novamente: ###Code A = True # Após rodar uma primeira vez, troque este valor de True para False e execute novamente. B = False print ("Os valores de A ou B são: ", A or B) ###Output Os valores de A ou B são: True ###Markdown Aqui temos uma tabela que mostra como é feito a comparação utilizando o operador `or` para todas as combinações de A e B:or |A = True | A = False:------ |:-----: |:----:B = True |True |TrueB = False |True |False 3. **Operador** `not` O operador lógico not muda o valor de seu argumento, ou seja, se `not` for igual a `True`, então ele responde como `False` e se `not` for igual a `False` ele responde como `True`. 5 - Condicionais Estruturas CondicionaisNeste tópico falaremos sobre as estruturas condicionais em Python, mas antes de exibir um exemplo de estrutura condicional no Python, vamos a um pouco de teoria. Uma Estrutura de Condição, como o próprio nome já diz, verifica a condição dos argumentos passados e, executa um comando caso a condição seja verdadeira, como se pode ver no algorítimo abaixo:`SE condição (SE = IF)``ENTÃO comando`Na estrutura em Python vamos começar conhecendo os Operadores Condicionais do Python. Os Operadores Condicionais são utilizados para fazer as comparações dos valores que são passados e retornam o valor Verdadeiro ou Falso.Operador| Tipo| Valor---|---|---==| Igualdade| Verifica a igualdade entre dois valores.!=| Igualdade| Verifica a diferença entre dois valores.>| Comparação| Verificar se o valor A é maior que o valor B.<| Comparação| Verifica se o valor A é menor que o valor B.>=| Comparação| Verifica se o valor A é maior ou igual ao valor B.<=| Comparação| Verifica se o valor A é menor ou igual ao valor B.In| Seqüência| Verifica se o valor A está contido em um conjunto. 1. Estrutura Condicional SimplesAgora que já conhecemos os Operadores vamos ver como fazer uma estrutura condicional em Python. Incrementando o nosso exemplo do artigo anterior, iremos verificar se a soma dos valores que o usuário informou é maior que zero e exibir o resultado na tela.Abaixo podemos perceber a estrutura do condicional IF, execute o programa e veja o resultado, em seguida troque o valor da váriavel `valor` de **0** por **1** e execute novamente: ###Code valor = 0 if valor > 0: print ("Maior que Zero.") ###Output _____no_output_____ ###Markdown Veja que quando foi executado o comando com a váriavel `valor` em valor **0**, o sistema não retornou absolutamente nada, pois como a comparação exige que a variável seja de número maior que **0** então enquanto o valor for este o sistema entenderá como valor booleano `False`, e se você alterar a váriavel `valor` para valor maior que **0** seja 1,2,3 etc, o sistema entenderá que a comparação é verdadeira ou seja `True`, pois a váriavel é maior que **0**. 2. Estrutura Condicional CompostaA Estrutura Condicional Composta executa um comando quando a condição for verdadeira e outra condição quando for falsa. Vamos melhorar o nosso exemplo anterior, agora teremos que mostrar a mensagem **"Menor que Zero"** caso o resultado da soma seja menor que zero, como podemos ver abaixo: ###Code valor = 0 if valor > 1: print ("Maior que um.") else: print ("Menor que um.") ###Output Menor que um. ###Markdown Agora usando uma Estrutura Condicional Composta, vemos que há resultado para ambos valores tanto como igual a **0** como também valores maiores que **0**. 3. Estrutura Condicional EncadeadasEstruturas Condicionais Encadeadas é usada quando precisamos verificar mais de uma condição, ou seja, um IF dentro de outro IF. Uma outra estrutura encadeada é como pode-se notar abaixo. Incrementando o nosso exemplo, agora teremos que exibir uma mensagem caso o valor seja igual a Zero. ###Code valor = 5 if valor > 4: print ("Maior que Quatro.") elif valor > 3: print ("Maior que Três.") elif valor > 2: print ("Maior que Dois.") elif valor > 1: print ("Maior que Um.") else: print ("Menor que Um.") ###Output Menor que Um. ###Markdown Neste exemplo podemos perceber um comando diferente, o elif. Este comando é a junção do comando ELSE+IF(SENÂOSE traduzindo para o português) que é utilizado nas Estruturas Condicionais Encadeadas. Cada condição é verificada em ordem. Se a primeira for falsa, a próxima é verificada, e assim por diante. Se uma delas for verdadeira, o ramo correspondente é executado e a instrução é encerrada. Mesmo se mais de uma condição for verdade, só o primeiro ramo verdadeiro é executado. 4. Estrutura Condicional AninhadasUma condicional também pode ser aninhada dentro de outra. Poderíamos ter escrito o exemplo na seção anterior desta forma: ###Code valor = 0 if valor > 0: print ("Maior que Zero.") else: if valor == 0: print ("Igual a Zero.") else: print ("Menor que Zero.") ###Output Igual a Zero. ###Markdown A condicional exterior contém dois ramos. O primeiro ramo contém uma instrução simples. O segundo ramo contém outra instrução if, que tem outros dois ramos próprios. Esses dois ramos são instruções simples, embora pudessem ser instruções condicionais também.Embora a endentação das instruções evidencie a estrutura das condicionais, condicionais aninhadas são difíceis de ler rapidamente. É uma boa ideia evitá-las quando for possível. ###Code ###Output _____no_output_____ ###Markdown 6.1 - Strings Métodos de strings ###Code string_pcdas = 'plataforma DE cieNcia de dAdos aplicada à saúde' string_pcdas[11] string_pcdas[11:13] print('Maiúsculo:', string_pcdas.upper()) print() print('Minúsculo:', string_pcdas.lower()) print() print('Primeira letra da frase:', string_pcdas.capitalize()) print() print('Primeira letra de cada palavra:', string_pcdas.title()) if 'CIENCIA' in string_pcdas: print("Valor encontrado") else: print("Valor não encontrado") if 'ciencia' in string_pcdas.lower(): print("Valor encontrado") else: print("Valor não encontrado") if 'for' in string_pcdas.lower(): print("Valor encontrado") else: print("Valor não encontrado") string_frutas = 'banana,maçã,pera,kiwi,uva,morango' print(string_frutas.split(sep='k')) string_espacos = 'Meu Email [email protected] ' print(string_espacos) print(string_espacos.strip()) print(len(string_espacos)) print(len(string_espacos.strip())) num_string = '1,564' print(num_string.replace(',', '.')) num_string = '1.564' float(num_string) num_string = '1,564' num_string = num_string.replace(',', '.') float(num_string) nome = "Balthazar" idade = 21 print(f'Olá meu nome é {nome} e tenho {idade} anos.') ###Output _____no_output_____ ###Markdown 6.2 - Listas Subconjuntos de uma lista (acesso por índices e slice notation); ###Code lista_conteudos_do_curso = ["introdução", "python", "listas", "funções", 'pandas'] print(lista_conteudos_do_curso) lista_conteudos_do_curso[1] lista_conteudos_do_curso[1:4] ###Output _____no_output_____ ###Markdown Adicionando um item à minha lista ###Code lista_conteudos_do_curso.append("graficos") lista_conteudos_do_curso.append("Machine Learning") print(lista_conteudos_do_curso) ###Output ['introdução', 'python', 'listas', 'funções', 'pandas', 'graficos', 'Machine Learning'] ###Markdown Removendo um item da minha lista ###Code lista_conteudos_do_curso.remove("funções") print(lista_conteudos_do_curso) lista_conteudos_do_curso.pop(0) print(lista_conteudos_do_curso) print(lista_conteudos_do_curso) ###Output ['python', 'listas', 'pandas', 'graficos'] ###Markdown Tamanho de uma lista ###Code len(lista_conteudos_do_curso) ###Output _____no_output_____ ###Markdown 7 - Dicionários Estou programando um sistema de agenda de contatos telefônicos em Python. Para isso, preciso armazenar os números dos contatos. A princípio, podemos pensar em usar uma lista: ###Code telefones = ['1234-5678', '9999-9999', '8765-4321', '8877-7788'] ###Output _____no_output_____ ###Markdown Tudo bem, temos os números de telefone armazenados. Mas… qual o sentido de termos uma lista de números soltos? De quem é o número que está na segunda posição?Precisamos, de algum modo, conectar os telefones a seus respectivos contatos. Já conhecemos um tipo que pode nos ajudar com isso a tupla: ###Code contato = ('Yan', '1234-5678') ###Output _____no_output_____ ###Markdown Para não precisarmos de uma variável para cada contato, podemos colocá-los direto em uma lista de contatos: ###Code contatos_lista = [('Yan', '1234-5678'), ('Pedro', '9999-9999'), ('Ana', '8765-4321'), ('Marina', '8877-7788')] ###Output _____no_output_____ ###Markdown Ok! Se quisermos acessar o número de telefone da Marina, podemos fazer: ###Code print(contatos_lista[3][1]) ###Output 8877-7788 ###Markdown Conseguimos! Agora, o número do Pedro: … Mas espera, qual é mesmo a posição do Pedro na nossa lista de contatos?Repare que do modo como está, mal faz diferença ter os nomes dos contatos salvos, porque só conseguimos acessar cada contato pela sua posição na lista. Será que não há um jeito melhor? Mapeando contatos com um dicionário Até agora temos uma lista de contatos em que, ao menos, cada contato tem seu nome e telefone conectados. Entretanto, por enquanto, só conseguimos acessar um contato individualmente pela sua posição na lista, e não pelo seu próprio nome.O ideal seria mapear o nome de cada contato com seu telefone, evitando outros problemas.Por exemplo, podemos falar que o contato Yan tem o número de telefone 1234-5678. Assim, quando quisermos saber qual o n de telefone do Yan, basta ir até o seu nome. Dessa forma, não precisamos decorar qual a posição na lista que o telefone se encontra, basta sabermos seu nome de contato.Veja que, nesse caso, estamos criando uma espécie de dicionário, parecido com os dicionários de língua portuguesa, ou inglesa. Nesses dicionários, temos uma chave que é a palavra que estamos a buscar, que no nosso caso é o nome de contato.Quando achamos essa palavra, podemos ver o seu significado, isto é, o valor daquela palavra na língua, que no nosso caso, é o número de telefone.Esse tipo de estrutura é muito utilizado em diversas linguagens de programação (mas normalmente tem outro nome, como array associativo. Com ela, conseguimos ter um comportamento parecido com o de dicionários.Bem, vamos falar para o Python criar um desses dicionários para a gente. No Python, usamos chaves ({}) para construir nossos dicionários. Neste caso, falamos para o Python, que a chave 'Yan' possuí (:) o valor 1234-5678 como seu telefone: ###Code contatos = {'Yan': '1234-5678'} print(type(contatos)) ###Output <class 'dict'> ###Markdown E olha o tipo da variável contatos que acabamos de criar:dict - de fato um dicionário. Mas será que vamos ter que redigitar todos os dados de contatos que já colocamos em nossa lista de contatos? Também podemos criar um dicionário usando sua função construtora dict() e passando, como parâmetro, uma lista de tuplas, como em nosso caso: ###Code contatos_lista = [('Yan', '1234-5678'), ('Pedro', '9999-9999'), ('Ana', '8765-4321'), ('Marina', '8877-7788')] contatos = dict(contatos_lista) print(contatos) ###Output {'Yan': '1234-5678', 'Pedro': '9999-9999', 'Ana': '8765-4321', 'Marina': '8877-7788'} ###Markdown Certo, temos nossa estrutura pronta! Mas espera aí, o nosso dicionário não está ordenado em ordem alfabética, ele não tem ordem nenhuma… Como podemos acessar seus itens? Acessando os itens de um dicionário Podemos acessar os valores dele de forma similar a como acessamos os valores de uma lista, por exemplo, com a diferença de que usamos as chaves que definimos no lugar dos índices numéricos: ###Code print(contatos['Ana']) ###Output 8765-4321 ###Markdown Tudo bem! Até que, depois de um tempo, quis ver se eu encontrava o telefone de um velho amigo João. Fiz o seguinte: ###Code print(contatos['João']) ###Output _____no_output_____ ###Markdown Hum… uma exceção de tipo KeyError indicando que a chave 'João' não foi encontrada. Mas é um pouco estranho imprimir toda essa mensagem para o usuário, não é? Pode ser confuso... Será que não podemos substituir isso?Os dicionários possuem um método específico para busca de valores, o get(), no qual podemos passar como parâmetros a chave que queremos e um valor padrão para retornar caso essa chave não seja encontrada: ###Code print(contatos.get('Yan', 'Contato não encontrado')) print(contatos.get('João', 'Contato não encontrado')) ###Output 1234-5678 Contato não encontrado ###Markdown Muito melhor agora!Também podemos verificar se um contato está em nosso dicionário através da palavra chave in: ###Code print('Yan' in contatos) ###Output True ###Markdown Como esperado!Esses dias, achei um número solto aqui e quis verificar se ele estava em minha agenda: ###Code print('9999-9999' in contatos) ###Output False ###Markdown Ué! Mas esse número está sim na agenda, é o número do Pedro! Por que será que o resultado foi False, então?Acontece que o in, usado dessa forma, verifica apenas as chaves do dicionário, não os valores. Para obtermos valores, podemos usar o método values(): ###Code print('9999-9999' in contatos.values()) ###Output True ###Markdown Agora sim! Temos nossa estrutura de mapeamento e já conseguimos visualizar os dados dela. Mas e agora, o que mais conseguimos fazer? Adicionando valores ao dicionário Encontrei meu amigo João e, finalmente, decidi adicionar o número dele na minha agenda. Mas… como posso fazer isso com nosso dicionário de contatos? Fui tentar usar um método append(), como nas listas, e olha o que apareceu: Esse método não existe… Ainda tentei criar um outro dicionário e fazer uma soma, mas o resultado foi esse:Traceback (most recent call last): File "", line 1, in TypeError: unsupported operand type(s) for +: 'dict' and 'dict'Também não funciona! A sintaxe de adicionar um item em um dicionário é um pouco diferente de que em outros tipos do Python, mas também bastante objetiva. Por exemplo, se queremos adicionar o João no nosso dicionário de contatos, basta atribuir seu telefone na chave 'João': ###Code contatos['João'] = '8887-7778' print(contatos) ###Output {'Yan': '1234-5678', 'Pedro': '9999-9999', 'Ana': '8765-4321', 'Marina': '8877-7788', 'João': '8887-7778'} ###Markdown Deu certo! Removendo itens do dicionário Infelizmente, minha amiga Marina perdeu o celular e, consequentemente, não era mais dona do número salvo em meu dicionário de contatos. Precisamos, agora, apagar o item que corresponde a ela. Mas como?Uma maneira simples é usando o statement del, dessa forma: ###Code del contatos['Marina'] print(contatos) ###Output {'Yan': '1234-5678', 'Pedro': '9999-9999', 'Ana': '8765-4321', 'João': '8887-7778'} ###Markdown Certo! Mas e se tentarmos remover um item que não existe? ###Code del contatos['Catarina'] ###Output _____no_output_____ ###Markdown Um KeyError, como aquele que obtivemos ao tentar pegar um item que não existia! Para evitar essa exceção, também temos um método de dicionário que pode nos ajudar - o pop().O método pop(), além de remover o elemento com a chave especificada do dicionário, nos retorna o valor desse elemento. Também podemos definir um valor padrão de retorno, para caso a chave não seja encontrada: ###Code contatos = {'Yan': '1234-5678', 'Pedro': '9999-9999', 'Ana': '8765-4321', 'Marina': '8877-7788', 'João': '8887-7778'} print(contatos.pop('Marina', 'Contato não encontrado')) print(contatos.pop('Catarina', 'Contato não encontrado')) print() print(contatos) ###Output 8877-7788 Contato não encontrado {'Yan': '1234-5678', 'Pedro': '9999-9999', 'Ana': '8765-4321', 'João': '8887-7778'} ###Markdown Crie uma lista que receba sua idade e sua altura e exiba na tela ###Code meus_dados = [32, 1.77] meus_dados ###Output _____no_output_____
Untitled-Copy1 (1).ipynb
###Markdown ASSIGNMENT-1 DAY-3 ###Code altitude=6000 if altitude<=1000: print("safe to land") elif altitude<=5000: print("come down to 1000ft") else: print("go around and try later") altitude=2500 if altitude<=1000: print("safe to land") elif altitude<=5000: print("come down to 1000ft") else: print("go around and try later") altitude=1000 if altitude<=1000: print("safe to land") elif altitude<=5000: print("come down to 1000ft") else: print("go around and try later") ###Output safe to land ###Markdown ASSIGNMENT-2 DAY-3 ###Code lower = 1 upper = 200 for num in range(lower, upper + 1): if num > 1: for i in range(2,num): if (num % i) == 0: break else: print(num,"is a prime") ###Output 2 is a prime 3 is a prime 5 is a prime 7 is a prime 11 is a prime 13 is a prime 17 is a prime 19 is a prime 23 is a prime 29 is a prime 31 is a prime 37 is a prime 41 is a prime 43 is a prime 47 is a prime 53 is a prime 59 is a prime 61 is a prime 67 is a prime 71 is a prime 73 is a prime 79 is a prime 83 is a prime 89 is a prime 97 is a prime 101 is a prime 103 is a prime 107 is a prime 109 is a prime 113 is a prime 127 is a prime 131 is a prime 137 is a prime 139 is a prime 149 is a prime 151 is a prime 157 is a prime 163 is a prime 167 is a prime 173 is a prime 179 is a prime 181 is a prime 191 is a prime 193 is a prime 197 is a prime 199 is a prime
examples/basic/tutorial.ipynb
###Markdown WARP TutorialThis notebook takes the pedagogy of learn by example, showing how to use WARP to organize your project's pipeline and keep track of hyperparameters therein. ###Code %load_ext autoreload %autoreload 2 ###Output _____no_output_____ ###Markdown Declaring pipesThe following cell(s) will show how to define a pipe that can be recognized by WARP.Pipes currently must be declared in external files (i.e. not within a notebook) with a strict one-pipe-per-file rule.This next cell shows the contents of the file `./example/A.py`, which defines an initial pipe for this pipeline.The following features of WARP are showcased:- **Parameters:** Parameters are intended to be tunable values necessary to reproduce a pipe's output (e.g. learning rate of a neural network). Parameters are limited to simple types (string, numeric types) as well as basic iterables (list, dict) containing simple types.The first argument of the `Parameter` constructor is always the parameter name and the keyword argument `default` can be passed to specify a default value.If no `default` argument is passed (e.g. `Parameter('text')`), then the parameter assumes the default value of the string equal to its name (concretely, the value contained in `Parameter('text')` is `'text'`).A parameter can be treated within `Main` like a variable containing the value that `Parameter` wraps.- **Parameter config files:** A config file for the pipe parameters can be specified using the `ParameterFile` class, which takes one string argument corresponding to the file path of the config file (relative to the working directory).This config file can be in yaml or json format -- the codeblock below shows a yaml example:``` contents of the file `config/A.yml`message: text_yaml```Parameters in the config file must have names that match their variable names (e.g. `message: text_yaml` will be matched to an attribute `message = Parameter(...)`).Config files can be used to override the default values for a parameter (in this case, we override the `message` string value `'text'` with `'text_yaml'`), thus you don't need to specify every parameter of the pipe if the default values suffice. All parameter values used at runtime are logged by WARP automatically.- **Product specification:** you must specify a pipe's products using the `Product` class, which takes one string argument corresponding to the file path for the product to be generated.You can treat instances of this object as the string it contains.The `Product` class accepts strings, lists, and dicts.- **Lazy imports:** the preference when declaring WARP pipes is to lazily import modules that are used within the pipe. You don't have to adhere to this rule of thumb, but doing so will significantly speedup the launching of sessions.`warp.utils.GlobalImport` provides a convenient way of doing lazy imports for your pipe, as shown in the next cell.Using `GlobalImport` will allow you to import any number of modules which will be available to any object within scope. ###Code """example/A.py Creates two output files `A1.txt` and `A2.txt` that both contain the value of the `message` parameter. The `message` parameter value is stored in the `config/A.yml` file. """ from warp import Pipe, Product, Parameter, ParameterFile from warp.utils import GlobalImport class Main(Pipe): ### parameters config_file = ParameterFile('config/A.yml') message = Parameter('text') ### products products = Product([ 'data/A1.txt', 'data/A2.txt']) @staticmethod def makedirs(path :str) -> None: # `os` module is available due to GlobalImport os.makedirs(path, exist_ok=True) def run(self) -> None: # lazy imports with GlobalImport(globals()): import os self.makedirs('data') for p in self.products: with open(p, 'w') as f: f.write(self.message) ###Output _____no_output_____ ###Markdown Instantiating the graph ###Code from warp import PipeGraph, WorkSpace from example import A, B, C, D pg = PipeGraph() ###Output _____no_output_____ ###Markdown The next cell shows how external files can be included in a WARP pipeline.These kinds of nodes in the graph are considered source nodes from a flow perspective -- nodes with no parents.Note that source nodes are not a required component of a WARP pipeline, they are only a convenience for tracking externally generated data in your workflow.Intuitively, one only needs to specify a product with no pipe file and no parent_products.Here, we add a config file for a downstream pipe -- this is not necessary in practice since WARP will log parameter values later anyways. ###Code pg.add(products='config/A.yml') ###Output _____no_output_____ ###Markdown Next, we can add a pipe to the graph that depends on the product of previous pipe (in this case, the source pipe).We only need to name a product in the `parent_products` argument that was generated by pipe already attached to the graph.Specifying a config file as a product Since a pipe's products are always specified locally using the `Product` class (as seen above), specifying them via the `products` argument to `pg.add` is unnecessary. The only reason to specify products redundantly is for verbosity purposes, which can be handy when developing a pipeline.- **Note:** specifying products at `PipeGraph` creation time can lead to excessive verbosity -- prefer implicit product specifications, similar to parameters. ###Code pg.add( parent_products='config/A.yml', pipe=A) ###Output _____no_output_____ ###Markdown You can pass a module to the `parents` argument to automatically add all products of the pipe as dependencies.In this case, by passing `parents=A`, WARP adds `'data/A1.txt'` and `'data/A2.txt'` as dependencies of `B`. ###Code pg.add( parents=A, pipe=B) ###Output _____no_output_____ ###Markdown Since all of a pipe's products are specified as attributes, we can directly pass them in to the `parent_products` argument.This allows us to exclude certain products of an upstream pipe that we don't need. ###Code pg.add( parent_products=A.Main.products[1], # data/A2.txt pipe=C) ###Output _____no_output_____ ###Markdown **Note:** terminal pipes in the pipeline don't have to have products -- this is a choice left to the user.Here, we indicate optionality using the commented line. ###Code pg.add( parents=[A, C], pipe=D) ws = WorkSpace( pathgraph=pg, config_dir='./config') ###Output _____no_output_____ ###Markdown To make sure that we're working in a fresh environment, we can clear the cache.You can clear all workspace sessions by passing the `clear_all=True` flag.You can clear the cache for a particular session by passing its id number (e.g. `ws.clear_cache(0)`). ###Code ws.clear_cache(clear_all=True) ###Output _____no_output_____ ###Markdown Visualizing the graphWe can visually inspect the graph we built using `view`. This can let us check for discrepencies in the graph at a glance.The `__source__` pipe corresponds to the external artifact that we attached using `pg.add(products='config/A.yml')`.Notice that `B` is a terminal node -- pipes `C` and `D` do not depend on this pipe in any way. ###Code ws.show() ###Output _____no_output_____ ###Markdown You can get a view that is more verbose (i.e. provides relative paths for everything): ###Code ws.show(verbose=True) ###Output _____no_output_____ ###Markdown Running pipes in the graphNow that we've defined our graph and instantiated our workspace, we're ready to start running pipes. ###Code ws.methods ws.pipes ###Output _____no_output_____ ###Markdown We can view the source code of a pipe using `ws.view_pipe(...)`.Notice that this matches the example pipe declaration code written in the cell above. ###Code ws.view_pipe('A') ###Output _____no_output_____ ###Markdown We want to get the products of the `example.D` pipe but we don't want to think about which pipes we need to run in which order to get there (e.g. `example.B` is not necessary for `example.D`).We also don't want to remember which of the intermediate pipes we might need to rerun to preserve data provenance.This is the core functionality of WARP; we can use the `backfill` operation to find the ordered sequence of pipes that need to be rerun.Critically, WARP will only include a pipe in this sequence if an upstream pipe has been run more recently.When we run `ws.backfill('D')` in the following cell, we haven't actually run any of the pipes before, so WARP will choose the build sequence `A -> B -> C -> D`. ###Code %%time ws.backfill('D') ###Output _____no_output_____ ###Markdown When we ran `backfill` on the pipe `D`, WARP automatically detected that the ancestral pipes of `D` were out of sync (in this case, they had never been built).In this case, WARP noticed that `D` has no dependency on `B`, so `B` was left unbuilt.We can confirm this by checking the `status` of `B`. ###Code ws.status('B') ###Output _____no_output_____ ###Markdown The `backfill` function is idempotent -- running backfill again does nothing. ###Code ws.backfill('D') ###Output _____no_output_____ ###Markdown ---Suppose we want to regenerate the products of `C` with different parameter values.We can do this using `build` and passing in keyword arguments that correspond to parameter names.In this case, `C` has a parameter called `message`. ###Code ws.build('A', message='new message') ###Output _____no_output_____ ###Markdown If we try to build `D`, WARP will detect that the graph is now out-of-sync and will throw an error.The methodology of WARP is to put hard stops in place to prevent bugs that arise from using out-of-sync data.For dev purposes, you can force a pipe to run by passing the flag `force_build=True`.You probably shouldn't do this. ###Code ws.build('D') ###Output _____no_output_____ ###Markdown Per the hint, we can resync the graph via `backfill`. ###Code ws.backfill('D') ###Output _____no_output_____ ###Markdown Loading a sessionYou can inspect sessions ###Code ws.load_session(0) ###Output _____no_output_____ ###Markdown You can inspect metadata associated with a pipe in the session using `view`- **pipe cache directory** is the relative path to where the specified pipe's metadata and logs are cached- **Commit hash** is the hash of the git commit of the code that was used to run the pipe- **Last build** is a timestamp of when a pipe build was last initiated- **Elapsed time** is the number of seconds the last pipe build took to complete- **Parameters** shows the parameter values that were used in the last pipe build ###Code ws.status('A') ###Output _____no_output_____
week12/qlearn.ipynb
###Markdown Q-LearningAgent ###Code import random import numpy as np #package for defining abstract base classes from abc import ABC, abstractmethod class AbstractQLearningAgent(ABC): def __init__(self, env, epsilon_start, alpha, gamma): self.env = env self.epsilon = epsilon_start # exploration constant self.alpha = alpha # learning rate self.gamma = gamma # discount factor self.actions = range(env.action_space.n) #actions @abstractmethod def get_q_value(self, state, action): raise Exception("Not Implemented") def choose_action(self, state): #epsilon-greedy policy if random.random() < self.epsilon: action = random.choice(self.actions) else: q_values = [self.get_q_value(state, action) for action in self.actions] action = np.argmax(q_values) return action @abstractmethod def update(self, state, action, reward, next_state): raise Exception("Not Implemented") ###Output _____no_output_____ ###Markdown Tabular method with discretized states ###Code class QLearningAgentTabular(AbstractQLearningAgent): def __init__(self, env, epsilon_start, alpha, gamma, discretisations=10): super().__init__(env, epsilon_start, alpha, gamma) self.q_table = {} self.discretisations = discretisations def _discretize(self, state): #returns the discrete_state? low = self.env.observation_space.low high = self.env.observation_space.high diff = (high - low) / self.discretisations discrete_state = (state - low) // diff return tuple(discrete_state.tolist()) def get_q_value(self, state, action): discrete_state = self._discretize(state) if (discrete_state, action) not in self.q_table: return 0.0 else: return self.q_table[(discrete_state, action)] def update(self, state, action, reward, next_state): #do one learning step discrete_state = self._discretize(state) q_value_old = self.q_table.get((discrete_state, action)) q_value_new = reward + self.gamma * max([self.get_q_value(next_state, a) for a in self.actions]) if q_value_old is None: self.q_table[(discrete_state, action)] = reward else: self.q_table[(discrete_state, action)] = q_value_old + self.alpha * (q_value_new - q_value_old) ###Output _____no_output_____ ###Markdown Linear Approximation ###Code class QLearningAgentApproximator(AbstractQLearningAgent): def __init__(self, env, epsilon, alpha, gamma, basis_functions_per_dimension=10): super().__init__(env, epsilon, alpha, gamma) low = env.observation_space.low high = env.observation_space.high xx, yy = np.meshgrid(np.linspace(low[0], high[0], basis_functions_per_dimension), np.linspace(low[1], high[1], basis_functions_per_dimension)) radials = np.append(xx.reshape(xx.shape + (1,)), yy.reshape(yy.shape + (1,)), axis=2) self.radials = radials.reshape((radials.size // 2, 2)) low = env.observation_space.low high = env.observation_space.high self.sigma_inv = 1 / (high - low) * basis_functions_per_dimension self.weights = np.random.random((len(self.actions), basis_functions_per_dimension ** 2)) * 0.01 #shape of weights = (3, 100) def _feature_vector(self, state): r = self.sigma_inv * (self.radials - state) return np.exp(-0.5 * np.sum(r * r, axis=1)) def get_q_value(self, state, action): x = self._feature_vector(state) return np.dot(np.transpose(x), self.weights[action]) def update(self, state, action, reward, next_state): # do one learning step x = self._feature_vector(state) #gradient! approx = self.get_q_value(state, action) target = reward + self.gamma * max([self.get_q_value(next_state, a) for a in self.actions]) #Stochastic gradient descent self.weights[action] += self.alpha * (target - approx)*x ###Output _____no_output_____ ###Markdown Main ###Code %matplotlib notebook import gym from mpl_toolkits.mplot3d import axes3d from matplotlib import pyplot as plt import numpy as np def episode(env, agent, gamma, render=False): state = env.reset() discounted_return = 0 done = False time_step = 0 while not done: action = agent.choose_action(state) next_state, reward, done, _ = env.step(action) agent.update(state, action, reward, next_state) if render: env.render() discounted_return += reward*(gamma**time_step) state = next_state time_step +=1 return discounted_return def train(env, agent, gamma, nr_episodes, epsilon_start, fig, ax, fig2, ax2): returns = [] test_returns = [] for i in range(nr_episodes): agent.epsilon = max(0.1, agent.epsilon - epsilon_start/nr_episodes) episode_return = episode(env, agent, gamma) returns.append(episode_return) if i % (nr_episodes//20) == 0: print("episode {:5d}, return {}, epsilon {:.2f}".format(i, episode_return, agent.epsilon)) epsilon = agent.epsilon agent.epsilon = 0 test_returns.append(sum([episode(env, agent, gamma) for _ in range(10)]) / 10.) agent.epsilon = epsilon ax.plot(returns) fig.canvas.draw() ax2.plot(test_returns) fig2.canvas.draw() def plot_values(env, agent): fig = plt.figure() ax = fig.add_subplot(111, projection='3d') low = env.observation_space.low high = env.observation_space.high xs = np.linspace(low[0], high[0], 500) ys = np.linspace(low[1], high[1], 500) X, Y = np.meshgrid(xs, ys) states = np.append(X.reshape(X.shape + (1,)), Y.reshape(Y.shape + (1,)), axis=2) states = states.reshape((states.shape[0]*states.shape[1], 2,)) values = np.array(list(map(lambda x: max([agent.get_q_value(x, a) for a in agent.actions]), states))) #state value Z = -values.reshape(X.shape) # plot reward function (multiplied by -1) ax.plot_wireframe(X, Y, Z, rstride=10, cstride=10) ax.set_xlabel('x') ax.set_ylabel('y') ax.set_zlabel("state value") plt.show() env = gym.make('MountainCar-v0') eps_start = 0.9 alpha = 0.01 gamma = 0.999 fig,ax = plt.subplots(1,1) ax.set_title("Training returns") ax.set_xlabel('episode') fig2,ax2 = plt.subplots(1,1) ax2.set_title("Test returns") ax2.set_xlabel('episode') #agent = QLearningAgentTabular(env, eps_start, alpha, gamma) agent = QLearningAgentApproximator(env, eps_start, alpha, gamma) nr_episodes = 3000 train(env, agent, gamma, nr_episodes, eps_start, fig, ax, fig2, ax2) #Plot the state values plot_values(env, agent) #visualize learned policy within domain for 3 episodes agent.epsilon = 0 for _ in range(3): episode(env, agent, render = True) ###Output _____no_output_____
Summary/Python_04.ipynb
###Markdown for 반복문 - 리스트, 문자열 등등 순회 가능한 객체를 순회하면서 값을 처리할 때 사용 - 아래와 같은 문법으로 사용 - 여기서 i는 매번 수행될 때마다, a의 아이템으로 순차적으로 변경됨 - 모든 아이템이 순회되면 for 문 종료 ###Code a = [1,2,3,4,5] for i in a : print(i, i*2) a = [1,2,3,4,5] for i in a : print(i, i*2) print('hahaa') ###Output _____no_output_____ ###Markdown 문자열의 아이템 출력하기 - 문자열의 경우 순회 가능, 리스트의 유사하게 순회 가능 ###Code for x in 10: print(x) # 정수형은 순회할 수 없다. a = 'hello world' for character in a : print(character) ###Output _____no_output_____ ###Markdown 리스트 아이템 출력하기 ###Code a = [1,2,10,3,5,6] for num in a : if num % 2 == 0: print(num/2) else: print(num+1) print(num) ###Output _____no_output_____ ###Markdown dict 아이템 출력하기 - dictionary의 경우 기본적으로 순회하게 되면 key 값을 참조 - keys()함수를 이용하여 key값만 순회가능 - values() 함수를 이용하여 value 값만 순회가능 - items()함수를 이용하여 tuple 형태로 key, value 순회가능 ###Code a = {'korea':'seoul', 'japan':'tokyo'} for k in a : print(k, a[k]) for value in a.values(): print(value) list(a.items()) for key , value in a.items(): print(key,value) ###Output _____no_output_____ ###Markdown for 에서 index 사용하기 - 기본적으로 for에 리스트를 순회하는 경우, 값만 추출함 - 아래와 같은 코드로 인덱스와 값 모두 사용가능(enumerate함수 이용) ###Code a = [1,2,3,4,5] for i , val in enumerate(a): print(i, val) a = [1,2,3,4,5] for num in a: print(num) ###Output _____no_output_____ ###Markdown break - for 문의 경우에도 특정 조건일 때, loop 종료가 가능 ###Code a = [100,90,80,70,60,50] for num in a: if num < 80: break print(num) ###Output _____no_output_____ ###Markdown continue ###Code a = [100,90,80,70,60,50] for num in a: if num > 60 and num <= 70: continue print(num) ###Output _____no_output_____ ###Markdown loop 중첩 - 반복문의 경우에도 중첩하여 사용 가능 - 중첩이라는 것은 반복문 블록의 코드 안에 또 반복문의 코드가 작성되는 것을 의미 - 이런 경우, 내부 루프는 외루 루프가 수행되는 만큼 반복 수행 됨 - 또한 중첩의 경우 무한히 가능 ###Code a = [1,2,4] for i in a : for j in a: print (i*j) ###Output _____no_output_____ ###Markdown 구구단 출력하기 ###Code x = [2,3,4,5,6,7,8,9] y = [1,2,3,4,5,6,7,8,9] for i in x : for j in y: print(i, 'x', j,'=',i*j) ###Output _____no_output_____
notebooks/Somers2017.ipynb
###Markdown Somers2017`Title`: A Measurement of Radius Inflation in the Pleiades and Its Relation to Rotation and Lithium Depletion `Authors`: Somers & StassunData is from this paper: http://iopscience.iop.org/article/10.3847/1538-3881/153/3/101/meta ###Code %matplotlib inline import matplotlib.pyplot as plt import seaborn as sns import pandas as pd pd.options.display.max_columns = 150 %config InlineBackend.figure_format = 'retina' import astropy from astropy.io import ascii from astropy.table import Table import numpy as np ###Output _____no_output_____ ###Markdown Table 1: Basic Pleiades data Currently behind a paywall, requires institutional access. ###Code #! mkdir ../data/Somers2017 #! wget http://iopscience.iop.org/1538-3881/153/3/101/suppdata/ajaa4ef4t1_mrt.txt #! mv ajaa4ef4t1_mrt.txt ../data/Somers2017/ ! head ../data/Somers2017/ajaa4ef4t1_mrt.txt tab1 = ascii.read('../data/Somers2017/ajaa4ef4t1_mrt.txt') #tab1.show_in_notebook(display_length=5) df1 = tab1.to_pandas() df1.head() ###Output _____no_output_____ ###Markdown Table 2: Derived stellar properties ###Code #! wget http://iopscience.iop.org/1538-3881/153/3/101/suppdata/ajaa4ef4t2_mrt.txt #! mv ajaa4ef4t2_mrt.txt ../data/Somers2017/ tab2 = ascii.read('../data/Somers2017/ajaa4ef4t2_mrt.txt') df2 = tab2.to_pandas() df2.head() df1.to_csv('../data/Somers2017/tb1.csv', index=False) df2.to_csv('../data/Somers2017/tb2.csv', index=False) ###Output _____no_output_____
notebooks/1.0_biiweeklyforecast.ipynb
###Markdown 1.0 Import Library ###Code import pandas as pd import datetime ###Output _____no_output_____ ###Markdown 1.1 Load Dataset ###Code df = pd.read_csv("../data/interim/df_complete.csv", parse_dates=['transaction_date']) df['mnth_yr'] = df['transaction_date'].apply(lambda x: x.strftime('%B-%Y')) df['mnth'] = df['transaction_date'].dt.month df.head(1) df.info() dfx = df.groupby(['user_id','direction','mnth_yr', 'mnth']).agg(amount = ('amount_n26_currency','sum'), num_trx = ('amount_n26_currency','count')).reset_index() dfx.head() ###Output _____no_output_____ ###Markdown Remove customer with just 1 month amount ###Code test = dfx.groupby(['user_id','direction'])['mnth_yr'].nunique().reset_index() dfx = dfx[~dfx.user_id.isin(test[test.mnth_yr == 1]['user_id']) ] ###Output _____no_output_____ ###Markdown 2.0 Exploratory Data Analysis ###Code def check_df(dataframe): print("##################### Shape #####################") print(dataframe.shape) print("##################### Types #####################") print(dataframe.dtypes) print("##################### Head #####################") print(dataframe.head(3)) print("##################### Tail #####################") print(dataframe.tail(3)) print("##################### NA #####################") print(dataframe.isnull().sum()) print("##################### Quantiles #####################") print(dataframe.quantile([0, 0.05, 0.50, 0.95, 0.99, 1]).T) check_df(dfx) ###Output ##################### Shape ##################### (66823, 6) ##################### Types ##################### user_id object direction object mnth_yr object mnth int64 amount int64 num_trx int64 dtype: object ##################### Head ##################### user_id direction mnth_yr mnth amount \ 0 000295594379774ab9ac2c78c946d615 In February-2016 2 291 1 000295594379774ab9ac2c78c946d615 In June-2016 6 115 2 000295594379774ab9ac2c78c946d615 In March-2016 3 381 num_trx 0 1 1 1 2 1 ##################### Tail ##################### user_id direction mnth_yr mnth amount \ 73296 fffde1ba10b4040deb651162f56c9fc4 Out June-2016 6 217 73297 fffde1ba10b4040deb651162f56c9fc4 Out March-2016 3 345 73298 fffde1ba10b4040deb651162f56c9fc4 Out May-2016 5 336 num_trx 73296 12 73297 10 73298 13 ##################### NA ##################### user_id 0 direction 0 mnth_yr 0 mnth 0 amount 0 num_trx 0 dtype: int64 ##################### Quantiles ##################### 0.00 0.05 0.50 0.95 0.99 1.00 mnth 2.0 2.0 5.0 7.0 7.00 7.0 amount 3.0 11.0 156.0 1035.0 1887.78 8528.0 num_trx 1.0 1.0 3.0 21.0 33.00 91.0 ###Markdown TBF 3.0 Outlier CheckFor outlier detection, I will use IQR method with Q1 as 0.05% and Q3 as 0.95%. I will compute the low limit and up limit with IQR method and check if the sales variable contain values above/below these limits. It will return boolean.For outlier detection, I will use IQR method with Q1 as 0.05% and Q3 as 0.95%. I will compute the low limit and up limit with IQR method and check if the sales variable contain values above/below these limits. It will return boolean. ###Code def outlier_thresholds(dataframe, col_name, q1_perc=0.05, q3_perc=0.95): """ given dataframe, column name, q1_percentage and q3 percentage, function calculates low_limit and up_limit """ quartile1 = dataframe[col_name].quantile(q1_perc) quartile3 = dataframe[col_name].quantile(q3_perc) interquantile_range = quartile3 - quartile1 up_limit = quartile3 + 1.5 * interquantile_range low_limit = quartile1 - 1.5 * interquantile_range return low_limit, up_limit def check_outlier(dataframe, col_name, q1_perc=0.01, q3_perc=0.99): outlier_list = [] low_limit, up_limit = outlier_thresholds(dataframe, col_name, q1_perc=0.01, q3_perc=0.99) if dataframe[(dataframe[col_name] > up_limit) | (dataframe[col_name] < low_limit)].any(axis=None): return True else: return False check_outlier(dfx, 'amount') dfx.groupby(["user_id","direction"]).agg({"amount": ["sum", "mean", "median", "std"]}) ###Output _____no_output_____ ###Markdown 4.0 Feature EnggIn order to search for seasonalities, date variable will be used to derive new features and different time periods will be created.In order to search for seasonalities, date variable will be used to derive new features and different time periods will be created. ###Code # def create_date_features(df): # df['month'] = df.date.dt.month # df['quarter'] = df.date.dt.day # df['day_of_year'] = df.date.dt.dayofyear # df['week_of_year'] = df.date.dt.weekofyear # # 1.1.2013 is Tuesday, so our starting point is the 2nd day of week # df['day_of_week'] = df.date.dt.dayofweek + 1 # df['year'] = df.date.dt.year # df["is_wknd"] = df.date.dt.weekday // 4 # df['is_month_start'] = df.date.dt.is_month_start.astype(int) # df['is_month_end'] = df.date.dt.is_month_end.astype(int) # return df def create_num_days_month(m): if m==2: return 28 list = [1,3,5,7,8,10,12] if m in list: return 31 return 30 dfx["num_days_month"] = dfx['mnth'].apply(lambda x: create_num_days_month(x)) # df.rename(columns= {'transaction_date':'date'}, inplace= True) # df = create_date_features(df) dfx.head() ###Output _____no_output_____ ###Markdown 4.1 Add Random noiseFor small datasets like this one, in order to avoid overfitting, random noise can be added to the values. I will add Gaussian random noise which is normally distributed with a standard deviation of 1 and mean of 0. ###Code def random_noise(dataframe): return np.random.normal(size=(len(dataframe),)) ###Output _____no_output_____ ###Markdown 4.2 Lag/Shifted FeaturesTime Series theory states that, the value in time: t highly depends on the value in time: t-1. That is why I will be shifting all the sales values by 1 and adding noise. ###Code # sort the values per store, item and date so that values would be shifted equally dfx.sort_values(by=['user_id','direction', 'mnth'], axis=0, inplace=True) dfx # the feature name will be created dynamically with regards to the lag value for a given list of lags def lag_features(dataframe, lags): dataframe = dataframe.copy() for lag in lags: dataframe['amount_lag_' + str(lag)] = dataframe.groupby(['user_id','direction'])['amount'].transform( lambda x: x.shift(lag)) + random_noise(dataframe) return dataframe dfx = lag_features(dfx, [1, 2, 3]) dfx.head() ###Output _____no_output_____ ###Markdown 4.2 Rolling Mean / Moving AverageIn order to find out possible seasonalities, I will be creating moving averagesfor specified time intervals. This function takes the number of time given as window parameter and takes the average of the values, but one of the values is the value on this specific observation. In order to eliminate today's affect on moving average values, I will take 1 shift and use this function ###Code def roll_mean_features(dataframe, windows): dataframe = dataframe.copy() for window in windows: dataframe['amount_roll_mean_' + str(window)] = dataframe.groupby(['user_id','direction'])['amount']. \ transform( lambda x: x.shift(1).rolling(window=window, min_periods=2, win_type="triang").mean()) + random_noise(dataframe) return dataframe dfx = roll_mean_features(dfx, [2,3,4]) dfx.head() ###Output _____no_output_____ ###Markdown 3.5.4.Exponentially Weighted Mean Features The value in time t highly depends on the value in time t-1, so in order to have a better prediction, while computing the average value, the values should not be equally weighted. ###Code def ewm_features(dataframe, alphas, lags): dataframe = dataframe.copy() for alpha in alphas: for lag in lags: dataframe['amount_ewm_alpha_' + str(alpha).replace(".", "") + "_lag_" + str(lag)] = \ dataframe.groupby(["user_id", "direction"])['amount']. \ transform(lambda x: x.shift(lag).ewm(alpha=alpha).mean()) return dataframe alphas = [0.95, 0.9, 0.8, 0.7, 0.5] lags = [1, 2, 3] dfx = ewm_features(dfx, alphas, lags) dfx.tail() dfx['amount'] = np.log1p(dfx["amount"].values) # df['sales'].head() def smape(preds, target): n = len(preds) masked_arr = ~((preds == 0) & (target == 0)) preds, target = preds[masked_arr], target[masked_arr] num = np.abs(preds-target) denom = np.abs(preds)+np.abs(target) smape_val = (200*np.sum(num/denom))/n return smape_val def lgbm_smape(preds, train_data): labels = train_data.get_label() smape_val = smape(np.expm1(preds), np.expm1(labels)) return 'SMAPE', smape_val, False train = dfx.loc[(dfx["mnth"] < 7), :] # train["date"].min(), train["date"].max() val = dfx.loc[(dfx["mnth"] == 7) , :] val.head(2) cols = [col for col in train.columns if col not in ['user_id', "direction", "mnth","mnth_yr","amount","num_trx"]] Y_train = train['amount'] X_train = train[cols] Y_val = val['amount'] X_val = val[cols] import lightgbm as lgb lgb_params = {'metric': {'mae'}, 'num_leaves': 10, 'learning_rate': 0.02, 'feature_fraction': 0.8, 'max_depth': 5, 'verbose': 0, 'num_boost_round': 15000, 'early_stopping_rounds': 200, 'nthread': -1} lgbtrain = lgb.Dataset(data=X_train, label=Y_train, feature_name=cols) lgbval = lgb.Dataset(data=X_val, label=Y_val, reference=lgbtrain, feature_name=cols) type(lgbtrain) model = lgb.train(lgb_params, lgbtrain, valid_sets=[lgbtrain, lgbval], num_boost_round=lgb_params['num_boost_round'], early_stopping_rounds=lgb_params['early_stopping_rounds'], feval=lgbm_smape, verbose_eval=200) y_pred_val = model.predict(X_val) smape(np.expm1(y_pred_val), np.expm1(Y_val)) from sklearn.metrics import mean_squared_error print(mean_squared_error(np.expm1(y_pred_val), np.expm1(Y_val))) def mean_absolute_percentage_error(y_true, y_pred): y_true, y_pred = np.array(y_true), np.array(y_pred) return np.mean(np.abs((y_true - y_pred) / y_true)) * 100 print(mean_absolute_percentage_error(np.expm1(y_pred_val), np.expm1(Y_val))) def plot_lgb_importances(model,plot=True,num=10): from matplotlib import pyplot as plt import seaborn as sns gain = model.feature_importance('gain') feat_imp = pd.DataFrame({'feature': model.feature_name(), 'split': model.feature_importance('split'), 'gain': 100 * gain / gain.sum()}).sort_values('gain', ascending=False) if plot: plt.figure(figsize=(10, 10)) sns.set(font_scale=1) sns.barplot(x="gain", y="feature", data=feat_imp[0:25]) plt.title('feature') plt.tight_layout() plt.show() else: print(feat_imp.head(num)) print(feat_imp.head(num)) plot_lgb_importances(model,30) # this one is the built-in plot function of LightGBM library lgb.plot_importance(model, max_num_features=20, figsize=(10, 10), importance_type="gain") plt.show() np.expm1(y_pred_val).shape data = {"students": students, "subject": subject, "marks": marks} np.expm1(Y_val).shape lgb_params = {'metric': {'mae'}, 'num_leaves': 10, 'learning_rate': 0.02, 'feature_fraction': 0.8, 'max_depth': 5, 'verbose': 0, 'num_boost_round': 15000, 'early_stopping_rounds': 200, 'nthread': -1} ###Output _____no_output_____
Asteroid Light Curve Examples - Part 1.ipynb
###Markdown Asteroid Light Curve Examples - Part 1This notebook contains examples deep learning techniques applied to the asteroid light curve data from http://alcdef.org. Objectives- Understand when a convolutional neural network (CNN) might be applicable.- See how to apply a 1D-CNN to time-series data.- See how to build a more complex model that takes both time-series and categorical inputs. Parameters ###Code # Path to the ALCDEF_ALL dataset downloaded from http://alcdef.org # Download the full archive as a .zip file. Extract its contents to this # directory. It should be ~14K .txt files. data_dir = 'data/ALCDEF_ALL' # Discard any light curves with fewer than this many samples min_samples = 100 # Resample light curves to common number of samples nb_samples = 100 # Discard any light curve that isn't among the nb_classes most common objects nb_classes = 20 ###Output _____no_output_____ ###Markdown Imports ###Code %matplotlib inline import matplotlib.pyplot as plt import numpy as np from glob import glob from scipy.signal import resample from collections import Counter import random from keras.models import Sequential, Model from keras.layers import Dense, Dropout, LSTM from keras.utils import to_categorical from keras.optimizers import SGD, Adam from keras import regularizers from keras.callbacks import EarlyStopping from keras.layers import Conv1D, MaxPooling1D, Flatten, GlobalAveragePooling1D, Input, concatenate from sklearn.metrics import confusion_matrix from sklearn.model_selection import train_test_split from sklearn.manifold import TSNE from sklearn.decomposition import PCA import umap import pandas as pd import seaborn as sns from ml4ssa_utils import visualize_embedding, load_alcdef_data, plot_alcdef_examples, normalize_features, plot_confusion_matrix ###Output _____no_output_____ ###Markdown Load DatasetLoad data from Astroid Lightcurve Photometry Database (http://alcdef.org/) ###Code data = load_alcdef_data( data_dir=data_dir, min_samples=min_samples, resample_to=nb_samples, reduce_to_top=nb_classes ) # Gather a list of the object names we'll be working with names = list(set([item['OBJECTNAME'] for item in data])) ###Output _____no_output_____ ###Markdown Visualize Examples ###Code plot_alcdef_examples(data) ###Output _____no_output_____ ###Markdown Generate Train and Test Data Sets ###Code X = np.array([ item['DATA_RESAMPLED'][:,1] for item in data ]) y = np.array([ names.index(item['OBJECTNAME']) for item in data ]) # Reserve 20% of the data for testing # Startify the data split so that the train and test sets have the same class distribution X_train, X_test, y_train, y_test, data_train, data_test = train_test_split(X, y, data, test_size=0.20, stratify=y) print('Generated train and test sets with the following sizes:') print('Train X (features) {}, y (targets) {}'.format(X_train.shape, y_train.shape)) print('Test X (features) {}, y (targets) {}'.format(X_test.shape, y_test.shape)) ###Output Generated train and test sets with the following sizes: Train X (features) (615, 100), y (targets) (615,) Test X (features) (154, 100), y (targets) (154,) ###Markdown Review Class Distributions to Understand Performance of Random ClassifierIt's always helpful to understand how well a random classifier should perform. This sets a worst case baseline. If you're doing better than this performance, you know at least something is working. If your classifier is performing worse than random, something is broken. If it's performing at the same level as random, it's either broken or you have a very hard problem (at least with your current size and distribution of training data). ###Code class_counts = np.sum(to_categorical(y_test), axis=0) class_proportions = class_counts / np.sum(class_counts) max_proportion = np.max(class_proportions) random_performance = 1./nb_classes print('Random Performance: {:.3f}'.format(1./nb_classes)) print('Mode Collapse Performance: {:.3f}'.format(max_proportion)) print('-'*65) for name, proportion in zip(names, class_proportions): print('{:15} {:.3f} {}'.format(name, proportion, 'largest' if proportion == max_proportion else '')) ###Output Random Performance: 0.050 Mode Collapse Performance: 0.110 ----------------------------------------------------------------- Eudora 0.052 Euterpe 0.039 Pales 0.039 Aline 0.039 Aurelia 0.039 Scania 0.045 Parthenope 0.104 Zelinda 0.058 Cupido 0.045 Eukrate 0.039 Thora 0.039 Ate 0.058 Lachesis 0.039 Ganymed 0.110 largest Ounas 0.039 Polyhymnia 0.039 Norma 0.039 Melanie 0.045 Leukothea 0.052 Philia 0.039 ###Markdown Try an MLP (multi-layer perceptron) similiar to the TLE Example ###Code metric='acc' nb_classes = len(names) model = Sequential() model.add(Dense(units=100,activation='relu', input_shape=(100,))) model.add(Dense(units=100, activation='relu')) model.add(Dense(units=nb_classes, activation='softmax')) model.compile( loss='categorical_crossentropy', optimizer='sgd', metrics=[metric] ) # keras is complaining that I need to evaluate the model before printing a summary # model.predict(np.zeros((16,9))) model.summary() plot_confusion_matrix(model, X_test, y_test, 'Untrained MLP on Test Data', names=names) # Prepare data to pass to model repeats = 100 # The repeats value here is used to artifically increase the size of our training set. # This forces keras to treat <repeats> passes through the training set as a single epoch and we # get to avoid a huge number of progress bars and short-term variance in metrics. train_features = normalize_features(X_train.repeat(repeats, axis=0)) train_targets = to_categorical(y_train.repeat(repeats, axis=0)) test_features = normalize_features(X_test) test_targets = to_categorical(y_test) # Fit model to data model.fit( train_features, train_targets, validation_data=(test_features, test_targets), epochs=10, batch_size=16, callbacks=[EarlyStopping(patience=3, monitor='val_loss')], verbose=1 ) plot_confusion_matrix(model, X_test, y_test, 'Test Data', names=names) plot_confusion_matrix(model, X_train, y_train, 'Train Data', names=names) ###Output _____no_output_____ ###Markdown CNN ModelNow that we have two baselines (random performance and the MLP we used for TLE data), let's look at improving our performance with a different model. ###Code metric='acc' nb_classes = len(names) model = Sequential() model.add(Conv1D(filters=64, kernel_size=5, activation='relu')) model.add(MaxPooling1D()) model.add(Conv1D(filters=32, kernel_size=5, activation='relu')) model.add(MaxPooling1D()) model.add(Conv1D(filters=16, kernel_size=5, activation='relu')) model.add(GlobalAveragePooling1D()) model.add(Dense(units=nb_classes, activation='softmax')) model.compile( loss='categorical_crossentropy', optimizer='adam', metrics=[metric] ) # keras is complaining that I need to evaluate the model before printing a summary model.predict(np.zeros((16,100,1))) model.summary() # Prepare data to pass to model repeats = 100 # The repeats value here is used to artifically increase the size of our training set. # This forces keras to treat <repeats> passes through the training set as a single epoch and we # get to avoid a huge number of progress bars and short-term variance in metrics. train_features = normalize_features(X_train.repeat(repeats, axis=0)) train_targets = to_categorical(y_train.repeat(repeats, axis=0)) test_features = normalize_features(X_test) test_targets = to_categorical(y_test) # The convolutional layers will expect a "channels" dimension at the end. train_features = np.expand_dims(train_features, axis=-1) test_features = np.expand_dims(test_features, axis=-1) model.fit( train_features, train_targets, validation_data=(test_features, test_targets), epochs=10, batch_size=16, callbacks=[EarlyStopping(patience=5, monitor='val_loss')], verbose=1 ) ###Output Train on 61500 samples, validate on 154 samples Epoch 1/10 61500/61500 [==============================] - 1090s 18ms/step - loss: 2.2010 - acc: 0.2361 - val_loss: 2.2053 - val_acc: 0.2143 Epoch 2/10 61500/61500 [==============================] - 1129s 18ms/step - loss: 2.0066 - acc: 0.2917 - val_loss: 2.2793 - val_acc: 0.2078 Epoch 3/10 61500/61500 [==============================] - 1184s 19ms/step - loss: 1.9634 - acc: 0.3049 - val_loss: 2.4039 - val_acc: 0.2273 Epoch 4/10 61500/61500 [==============================] - 1178s 19ms/step - loss: 1.9344 - acc: 0.3122 - val_loss: 2.2558 - val_acc: 0.2143 Epoch 5/10 61500/61500 [==============================] - 1186s 19ms/step - loss: 1.9140 - acc: 0.3167 - val_loss: 2.4238 - val_acc: 0.2208 Epoch 6/10 61500/61500 [==============================] - 1181s 19ms/step - loss: 1.9041 - acc: 0.3178 - val_loss: 2.6379 - val_acc: 0.2078 ###Markdown Light Curve Embedding Based on Extracted Features ###Code # Here we extract the intermediate features/activations from the layer named penultimate layer_name = 'penultimate' intermediate_layer_model = Model(inputs=model.input, outputs=model.layers[-2].output) X_penultimate_test = intermediate_layer_model.predict(test_features) visualize_embedding(X_penultimate_test, y_test) plot_confusion_matrix(model, test_features, y_test, '', names=names) ###Output _____no_output_____ ###Markdown Multi-Modal InputThe above models use features from single modality (sampled light curves). In real world problems, we often have multiple data types that will be relevant to our problem. For exampoe, we typically at least have metadata associated with sampled data.The convolutional layers were motivated by the assumption that our sampled data was translationally invariant. As we have no reason to believe this should be the case for our metadata (it's not even clear what that would mean), we'll need to think about how best to incorporate additional data. ###Code def generate_metadata_vector(item): '''Generate a metadata vector of the form <one-hot-encoded filter value> | <phase>. The data appears to have 3 different filter codes and a single phase value so the metadata vector will be of length 4. ''' v = np.zeros(4) filter_codes = ['V', 'R', 'C'] filter_ndx = filter_codes.index(item['FILTER']) v[filter_ndx] = 1 v[-1] = float(item['PHASE']) / 60. # 60 was chosen as it was the largest value observed in a chunk of the data return v metric='acc' nb_classes = len(names) nb_metadata_inputs = 4 cnn_input = Input(shape=(nb_samples,1), name='cnn_input') x = Conv1D(filters=64, kernel_size=5, activation='relu')(cnn_input) x = MaxPooling1D()(x) x = Conv1D(filters=32, kernel_size=5, activation='relu')(x) x = MaxPooling1D()(x) x = Conv1D(filters=16, kernel_size=5, activation='relu')(x) cnn_output = GlobalAveragePooling1D()(x) metadata_input = Input(shape=(nb_metadata_inputs,), name='metadata_input') x = Dense(units=10, activation='relu')(metadata_input) metadata_output = Dense(units=10, activation='relu')(x) merged = concatenate([cnn_output, metadata_output]) final_output = Dense(units=nb_classes, activation='softmax')(merged) model = Model([cnn_input, metadata_input], final_output) model.compile( loss='categorical_crossentropy', optimizer='adam', metrics=[metric] ) model.summary() # Prepare data to pass to model repeats = 10 # The repeats value here is used to artifically increase the size of our training set. # This forces keras to treat <repeats> passes through the training set as a single epoch and we # get to avoid a huge number of progress bars and short-term variance in metrics. train_features = normalize_features(X_train.repeat(repeats, axis=0)) train_targets = to_categorical(y_train.repeat(repeats, axis=0)) test_features = normalize_features(X_test) test_targets = to_categorical(y_test) # The convolutional layers will expect a "channels" dimension at the end. train_features = np.expand_dims(train_features, axis=-1) test_features = np.expand_dims(test_features, axis=-1) # Generate the metadata features train_metadata_features = np.stack([ generate_metadata_vector(item) for item in data_train ]).repeat(repeats, axis=0) test_metadata_features = np.stack([ generate_metadata_vector(item) for item in data_test ]) model.fit( [train_features, train_metadata_features], train_targets, validation_data=([test_features, test_metadata_features], test_targets), epochs=10, batch_size=16, callbacks=[EarlyStopping(patience=5, monitor='val_loss')], verbose=1 ) ###Output Train on 6150 samples, validate on 154 samples Epoch 1/10 6150/6150 [==============================] - 115s 19ms/step - loss: 2.4147 - acc: 0.2434 - val_loss: 2.1445 - val_acc: 0.3182 Epoch 2/10 6150/6150 [==============================] - 111s 18ms/step - loss: 1.9617 - acc: 0.3660 - val_loss: 1.9389 - val_acc: 0.3896 Epoch 3/10 6150/6150 [==============================] - 112s 18ms/step - loss: 1.7892 - acc: 0.4106 - val_loss: 1.8477 - val_acc: 0.4026 Epoch 4/10 6150/6150 [==============================] - 111s 18ms/step - loss: 1.6687 - acc: 0.4320 - val_loss: 1.8205 - val_acc: 0.3701 Epoch 5/10 6150/6150 [==============================] - 118s 19ms/step - loss: 1.5877 - acc: 0.4421 - val_loss: 1.7468 - val_acc: 0.4091 Epoch 6/10 6150/6150 [==============================] - 112s 18ms/step - loss: 1.5243 - acc: 0.4698 - val_loss: 1.7314 - val_acc: 0.4221 Epoch 7/10 6150/6150 [==============================] - 111s 18ms/step - loss: 1.4681 - acc: 0.4928 - val_loss: 1.7473 - val_acc: 0.4156 Epoch 8/10 6150/6150 [==============================] - 111s 18ms/step - loss: 1.4310 - acc: 0.5176 - val_loss: 1.7497 - val_acc: 0.3766 Epoch 9/10 6150/6150 [==============================] - 110s 18ms/step - loss: 1.3982 - acc: 0.5226 - val_loss: 1.7517 - val_acc: 0.3831 Epoch 10/10 6150/6150 [==============================] - 110s 18ms/step - loss: 1.3766 - acc: 0.5285 - val_loss: 1.7583 - val_acc: 0.3766 ###Markdown Recurrent Neural NetworkA CNN processes an entire sequence of data in one step. An RNN, on other hand, processes sequences an element at a time. At each time step, it has access to the current sequence element and whatever information it might have extracted from the preceeding elements. ###Code metric='acc' nb_classes = len(names) model = Sequential() model.add(Conv1D(filters=16, kernel_size=5, activation='relu', padding='same')) model.add(LSTM(16, return_sequences=True)) model.add(LSTM(16, return_sequences=False)) model.add(Dense(units=nb_classes, activation='softmax')) model.compile( loss='categorical_crossentropy', optimizer='adam', metrics=[metric] ) # keras is complaining that I need to evaluate the model before printing a summary model.predict(np.zeros((16,100,1))) model.summary() # Prepare data to pass to model repeats = 10 # The repeats value here is used to artifically increase the size of our training set. # This forces keras to treat <repeats> passes through the training set as a single epoch and we # get to avoid a huge number of progress bars and short-term variance in metrics. train_features = normalize_features(X_train.repeat(repeats, axis=0)) train_targets = to_categorical(y_train.repeat(repeats, axis=0)) test_features = normalize_features(X_test) test_targets = to_categorical(y_test) # The convolutional layers will expect a "channels" dimension at the end. train_features = np.expand_dims(train_features, axis=-1) test_features = np.expand_dims(test_features, axis=-1) model.fit( train_features, train_targets, validation_data=(test_features, test_targets), epochs=10, batch_size=16, callbacks=[EarlyStopping(patience=5, monitor='val_loss')], verbose=1 ) ###Output Train on 6150 samples, validate on 154 samples Epoch 1/10 6150/6150 [==============================] - 26s 4ms/step - loss: 2.4989 - acc: 0.1748 - val_loss: 2.4395 - val_acc: 0.2208 Epoch 2/10 6150/6150 [==============================] - 28s 5ms/step - loss: 2.3488 - acc: 0.2062 - val_loss: 2.3965 - val_acc: 0.2078 Epoch 3/10 6150/6150 [==============================] - 27s 4ms/step - loss: 2.2785 - acc: 0.2195 - val_loss: 2.3793 - val_acc: 0.2208 Epoch 4/10 6150/6150 [==============================] - 29s 5ms/step - loss: 2.2390 - acc: 0.2320 - val_loss: 2.3788 - val_acc: 0.1753 Epoch 5/10 6150/6150 [==============================] - 31s 5ms/step - loss: 2.2513 - acc: 0.2294 - val_loss: 2.3380 - val_acc: 0.1753 Epoch 6/10 6150/6150 [==============================] - 29s 5ms/step - loss: 2.2498 - acc: 0.2228 - val_loss: 2.4141 - val_acc: 0.1753 Epoch 7/10 6150/6150 [==============================] - 35s 6ms/step - loss: 2.2734 - acc: 0.2293 - val_loss: 2.3195 - val_acc: 0.2078 Epoch 8/10 6150/6150 [==============================] - 35s 6ms/step - loss: 2.2185 - acc: 0.2372 - val_loss: 2.2921 - val_acc: 0.2078 Epoch 9/10 6150/6150 [==============================] - 29s 5ms/step - loss: 2.2059 - acc: 0.2390 - val_loss: 2.3672 - val_acc: 0.2143 Epoch 10/10 6150/6150 [==============================] - 30s 5ms/step - loss: 2.2427 - acc: 0.2285 - val_loss: 2.2659 - val_acc: 0.2468
nepali-word2vec.ipynb
###Markdown Importing the dataset ###Code import string import time # ~~~~~~~~~~~~~~~IMPORTING THE DATASET~~~~~~~~~~~~~~~~ start = time.process_time() print("Reading the file .......") f = open("../input/nepdata/clean.txt" , encoding= 'utf-8' , buffering= 10000) lines = f.read().strip().split(u"।") sentences = [sentence.translate(str.maketrans('', '', string.punctuation)) for sentence in lines] f.close() print(f"Total number of lines in text file {len(sentences)}") print(f"Time required to read the file {time.process_time() - start}") ###Output Reading the file ....... Total number of lines in text file 5891518 Time required to read the file 101.750362739 ###Markdown Processing Dataset for Training ###Code !pip install snowballstemmer # ~~~~~~~~~~~~~~ Getting the dataset ready for training word2vec model ~~~~~~~~~~ import re import snowballstemmer mainlist = list() class Main_Data_list: def __init__(self, dataset): self.dataset = dataset self.stop_word_list = [] self.mainlist = [] a_file = open("../input/stopwords/stopwords.txt", "r" ,encoding= 'utf-8') for line in a_file: stripped_line = line.strip() self.stop_word_list.append(stripped_line) a_file.close() self.stemmer = snowballstemmer.NepaliStemmer() def simple_tokenizer(self,text) -> list: line = re.sub('[।]',"", text) devanagari_range = r'[\u0900-\u097F\\]' def getDevanagariCharCount(token): return len(list(filter(lambda char: re.match(devanagari_range, char), (char for char in token)))) def isDevanagari(token): return True if getDevanagariCharCount(token) >= len(token)/2 else False tokens = list(filter(lambda t: isDevanagari(t), line.split(" "))) return tokens def get(self): for i,line in enumerate(self.dataset[0:2000000]): wordsList = self.simple_tokenizer(line) words = [w for w in wordsList if not w in self.stop_word_list] words = self.stemmer.stemWords(words) if len(words) > 3: self.mainlist.append(words) if i % 100000 == 0: print(f"DONE FOR {i/100000} LAKHS LINES") return self.mainlist final = Main_Data_list(sentences) mainlist = final.get() ###Output DONE FOR 0.0 LAKHS LINES DONE FOR 1.0 LAKHS LINES DONE FOR 2.0 LAKHS LINES DONE FOR 3.0 LAKHS LINES DONE FOR 4.0 LAKHS LINES DONE FOR 5.0 LAKHS LINES DONE FOR 6.0 LAKHS LINES DONE FOR 7.0 LAKHS LINES DONE FOR 8.0 LAKHS LINES DONE FOR 9.0 LAKHS LINES DONE FOR 10.0 LAKHS LINES DONE FOR 11.0 LAKHS LINES DONE FOR 12.0 LAKHS LINES DONE FOR 13.0 LAKHS LINES DONE FOR 14.0 LAKHS LINES DONE FOR 15.0 LAKHS LINES DONE FOR 16.0 LAKHS LINES DONE FOR 17.0 LAKHS LINES DONE FOR 18.0 LAKHS LINES DONE FOR 19.0 LAKHS LINES ###Markdown Training ###Code import gensim model = gensim.models.Word2Vec( vector_size = 200 , window= 5, min_count=2, workers= 4 ) model.build_vocab(mainlist, progress_per=1000 ) model.train(mainlist, total_examples= model.corpus_count, epochs= model.epochs) ###Output _____no_output_____ ###Markdown Testing ###Code model.wv.most_similar('ठमेल') model.wv.most_similar('चितवन') model.save("nepaliW2V_5Million.model") ###Output _____no_output_____
.ipynb_checkpoints/P1_GoogleSearch-checkpoint.ipynb
###Markdown Use what I've learned from the other two to test out a final scraper that can iterate through a list several radiologists. ###Code # Getting to a radiologist's webpage try: from googlesearch import search, get_random_user_agent except ImportError: print("No module named 'google' found") import re from random import randint from time import sleep import random # create test list of first 15 or so names radiologists = [ "JUSTIN OWENS", "MATTHEW HARTMAN", "VINCENT GRAZIANO", "SABA HASAN", "DANIEL MENDEZ", "PATRICK HURLEY", "ANDREW PICEL", "NORNA KARP", "DAVID SHEEHAN", "ROBERT BURMAN", "KEVIN SAWYER", "RAKESH BARAK", "MICHAEL FISHMAN", "STEVEN REIMAN", "BRIAN MOON" ] site = "https://health.usnews.com/doctors/" url_list = [] for radiologist in radiologists: query = "site:"+site+' "'+radiologist+" radiologist"'"' for j in search(query, num=1, stop=1, tld="co.in", pause=2, user_agent= 'Mozilla/5.0'): sleep(randint(10,20)) url_dict = {} url_j = j url_dict['radiologist_name'] = radiologist url_dict['url'] = url_j url_list.append(url_dict) # This is how to access the names and urls after entirely populating the list url_list[0]['radiologist_name'] url_list[0]['url'] url_list # Proof that it did work for the short list at one point url_list ###Output _____no_output_____ ###Markdown Testing 2-7-22 ###Code from bs4 import BeautifulSoup import urllib.request # List with google queries I want to make dash_radiologists = [] for radiologist in radiologists: string = radiologist string=string.replace(' ','-') dash_radiologists.append(string) desired_google_queries = dash_radiologists for query in desired_google_queries: # Constracting http query url = 'http://google.com/search?q=' + query + '-radiologist-healthusnews' # For avoid 403-error using User-Agent req = urllib.request.Request(url, headers={'User-Agent' : "Magic Browser"}) response = urllib.request.urlopen( req ) html = response.read() # Parsing response soup = BeautifulSoup(html, 'html.parser') # Extracting number of results results = soup.find(id="experience") print(results.prettify()) # Delay sleep(randint(10,20)) from bs4 import BeautifulSoup import urllib.request import requests zipcode = '30342' url = 'https://health.usnews.com/doctors/search?specialty=Radiology&location='+zipcode user_agent = {'User-agent' : 'Mozilla/5.0'} page = requests.get(url, headers=user_agent) soup = BeautifulSoup(page.text, "html.parser") # Find all HTML relating to Education & Experience (includes med school & residency, licenses, etc.) li_elements = soup.find_all("li", class_= "item-list__ListItemStyled-sc-18yjqdy-1 fRQMSd") # Take a look at all the elements for li_element in li_elements: url_element_a = li_element.find('a') if url_element_a is not None: url_element = url_element_a.get('href') print(url_element) print() # v-space between elements ###Output /doctors/michael-dille-447907 /doctors/neil-amin-862965 /doctors/pamela-donlan-256487 /doctors/pamela-donlan-256487 /doctors/john-gay-137869 /doctors/kim-gray-67337 /doctors/travis-langley-729504 /doctors/travis-langley-729504 /doctors/jason-oppenheimer-734909 /doctors/neel-patel-690987 /doctors/nirav-patel-119277 /doctors/nirav-patel-119277 /doctors/neil-shah-784970 /doctors/prashant-shankar-779331 /doctors/ashishkumar-parikh-996150 /doctors/ashishkumar-parikh-996150 /doctors/john-grattan-smith-271480 /doctors/courtney-stewart-639821 /doctors/stephen-little-270734 /doctors/stephen-little-270734 /doctors/ariane-neish-269318 /doctors/jane-share-269455 /doctors/thai-trinh-269238 /doctors/thai-trinh-269238 /doctors/amanda-bauer-483431 /doctors/richard-barlow-272286
handy_syntax/file.py.ipynb
###Markdown Write a Python Script ###Code %%file zenofpython.py '''new file''' import this import zenofpython ###Output The Zen of Python, by Tim Peters Beautiful is better than ugly. Explicit is better than implicit. Simple is better than complex. Complex is better than complicated. Flat is better than nested. Sparse is better than dense. Readability counts. Special cases aren't special enough to break the rules. Although practicality beats purity. Errors should never pass silently. Unless explicitly silenced. In the face of ambiguity, refuse the temptation to guess. There should be one-- and preferably only one --obvious way to do it. Although that way may not be obvious at first unless you're Dutch. Now is better than never. Although never is often better than *right* now. If the implementation is hard to explain, it's a bad idea. If the implementation is easy to explain, it may be a good idea. Namespaces are one honking great idea -- let's do more of those!
notebooks/magr_calibration.ipynb
###Markdown Load SDSS sample ###Code from cosmodc2.sdss_colors import load_umachine_processed_sdss_catalog sdss = load_umachine_processed_sdss_catalog() print(sdss.keys()) ###Output /Users/aphearin/anaconda/lib/python2.7/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`. from ._conv import register_converters as _register_converters ###Markdown Load $z=0$ baseline ${\rm UniverseMachine}$ mock with $M_{\ast}\ \&\ {\rm SFR}$ ###Code import os # MDPl2-based mock # dirname = "/Users/aphearin/work/random/0330/testing_mock" # basename = "sfr_catalog_1.000000_value_added.hdf5" # fname = os.path.join(dirname, basename) # Bpl-based mock dirname = "/Users/aphearin/work/random/0331" basename = "testing_bpl_based_v4.hdf5" fname = os.path.join(dirname, basename) from astropy.table import Table mock = Table.read(fname, path='data') # Impute sSFR=0 values in the quenched sequence for plotting convenience mock['obs_ssfr'] = mock['obs_sfr']/mock['obs_sm'] zero_mask = mock['obs_ssfr'] == 0 num_zeros = np.count_nonzero(zero_mask) mock['obs_ssfr'][zero_mask] = 10**np.random.normal(loc=-13, scale=0.25, size=num_zeros) print(mock.keys()) ###Output ['halo_id', 'upid', 'vpeak', 'mpeak', 'mvir', 'vmax', 'sm', 'sfr', 'obs_sm', 'obs_sfr', 'x', 'y', 'z', 'vx', 'vy', 'vz', 'hostid', 'host_halo_x', 'host_halo_y', 'host_halo_z', 'host_halo_vx', 'host_halo_vy', 'host_halo_vz', 'host_halo_mvir', 'host_centric_x', 'host_centric_y', 'host_centric_z', 'host_centric_vx', 'host_centric_vy', 'host_centric_vz', 'obs_sfr_percentile', 'sfr_percentile', 'obs_ssfr'] ###Markdown Load ${\rm protoDC2\ v3}$ at $z=0$ ###Code dirname = "/Users/aphearin/Dropbox/protoDC2/umachine_z0p1_color_mock" basename = "umachine_z0p1_color_mock.hdf5" fname = os.path.join(dirname, basename) from astropy.table import Table v3_mock = Table.read(fname, path='data') print(v3_mock.keys()) ###Output ['id', 'x', 'y', 'z', 'vx', 'vy', 'vz', 'obs_sm', 'obs_sfr', 'mpeak', 'mvir', 'vmax', 'vmax_at_mpeak', 'upid', 'hostid', 'has_matching_host', 'host_halo_x', 'host_halo_y', 'host_halo_z', 'host_halo_vx', 'host_halo_vy', 'host_halo_vz', 'host_halo_mvir', 'host_centric_x', 'host_centric_y', 'host_centric_z', 'host_centric_vx', 'host_centric_vy', 'host_centric_vz', 'obs_ssfr', 'sfr_percentile_fixed_sm', 'rmag', 'sdss_petrosian_gr', 'sdss_petrosian_ri', 'size_kpc', 'dr7_photoobj_id'] ###Markdown Assign Absolute r-band magnitude ###Code from cosmodc2.sdss_colors import median_magr_from_mstar # y_table=[-18.8, -20.1, -22.8] median_magr = median_magr_from_mstar(np.log10(mock['obs_sm'])) mock['restframe_extincted_sdss_abs_magr'] = np.random.normal( loc=median_magr, scale=0.25, size=len(mock)) median_magr2 = median_magr_from_mstar(np.log10(mock['obs_sm'])) mock['rmag2'] = np.random.normal( loc=median_magr2, scale=0.15, size=len(mock)) median_magr3 = median_magr_from_mstar(np.log10(mock['obs_sm']), y_table=[-18.9, -20.2, -22.55]) mock['rmag3'] = np.random.normal( loc=median_magr3, scale=0.2, size=len(mock)) from cosmodc2.sdss_colors import dim_satellites log_mhost = np.log10(mock['host_halo_mvir']) log_mpeak = np.log10(mock['mpeak']) upid = mock['upid'] mock['restframe_extincted_sdss_abs_magr'] = dim_satellites( mock['restframe_extincted_sdss_abs_magr'], log_mpeak, log_mhost, upid) mock['rmag2'] = dim_satellites(mock['rmag2'], log_mpeak, log_mhost, upid) mock['rmag3'] = dim_satellites(mock['rmag3'], log_mpeak, log_mhost, upid) fig, ax = plt.subplots(1, 1) nskip_sdss = 10 __=ax.scatter(sdss['sm'][::nskip_sdss], sdss['restframe_extincted_sdss_abs_magr'][::nskip_sdss], s=0.1) from scipy.stats import binned_statistic logsm_bins = np.linspace(9, 11.7, 40) logsm_mids = 0.5*(logsm_bins[:-1] + logsm_bins[1:]) median_magr_model1, __, __ = binned_statistic( np.log10(mock['obs_sm']), mock['restframe_extincted_sdss_abs_magr'], bins=logsm_bins, statistic='median') median_magr_model2, __, __ = binned_statistic( np.log10(mock['obs_sm']), mock['rmag2'], bins=logsm_bins, statistic='median') median_magr_model3, __, __ = binned_statistic( np.log10(mock['obs_sm']), mock['rmag3'], bins=logsm_bins, statistic='median') median_magr_v3, __, __ = binned_statistic( np.log10(v3_mock['obs_sm']), v3_mock['rmag'], bins=logsm_bins, statistic='median') nskip_mock = 50 __=ax.plot(logsm_mids, median_magr_model1, color='blue') __=ax.plot(logsm_mids, median_magr_model2, color='green') __=ax.plot(logsm_mids, median_magr_model3, color='red') __=ax.plot(logsm_mids, median_magr_v3, ':', color='k') ylim = ax.set_ylim(-19, -23.25) xlim = ax.set_xlim(9.5, 11.5) ylim = ax.set_ylim(-14, -24) xlim = ax.set_xlim(8, 12) fig, ax = plt.subplots(1, 1) __=ax.hist(mock['restframe_extincted_sdss_abs_magr'], bins=50, normed=True, alpha=0.8, color='blue') __=ax.hist(mock['rmag2'], bins=50, normed=True, alpha=0.8, color='green') __=ax.hist(mock['rmag3'], bins=50, normed=True, alpha=0.8, color='red') xlim = ax.set_xlim(-16, -22.8) ###Output _____no_output_____ ###Markdown Compare cumulative number density to Zehavi+11 ###Code from cosmodc2.sdss_colors import zehavi11_cumulative_nd lumthresh_h1p0, cumnd_sdss = zehavi11_cumulative_nd() from cosmodc2.mock_diagnostics import cumulative_nd volume_v4 = 250.**3. protoDC2_littleh = 0.7 cumnd_pdc2_v4 = cumulative_nd(mock['restframe_extincted_sdss_abs_magr'], volume_v4, protoDC2_littleh, lumthresh_h1p0) cumnd_pdc2_v4_model2 = cumulative_nd(mock['rmag2'], volume_v4, protoDC2_littleh, lumthresh_h1p0) cumnd_pdc2_v4_model3 = cumulative_nd(mock['rmag3'], volume_v4, protoDC2_littleh, lumthresh_h1p0) volume_v3 = 250.**3. cumnd_pdc2_v3 = cumulative_nd(v3_mock['rmag'], volume_v3, protoDC2_littleh, lumthresh_h1p0) fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 4), sharex=True) __=ax1.plot(lumthresh_h1p0, np.log10(cumnd_sdss), label=r'${\rm Zehavi+11}$', color='blue') __=ax1.plot(lumthresh_h1p0, np.log10(cumnd_pdc2_v4), label=r'${\rm protoDC2\ v4}$', color='k') __=ax1.plot(lumthresh_h1p0, np.log10(cumnd_pdc2_v4_model2), label=r'${\rm protoDC2\ v4\ model\ 2}$', color='green') __=ax1.plot(lumthresh_h1p0, np.log10(cumnd_pdc2_v4_model3), label=r'${\rm protoDC2\ v4\ model\ 3}$', color='red') fracdiff_pdc2_v4 = (cumnd_pdc2_v4 - cumnd_sdss)/cumnd_sdss fracdiff_pdc2_v4_model2 = (cumnd_pdc2_v4_model2 - cumnd_sdss)/cumnd_sdss fracdiff_pdc2_v4_model3 = (cumnd_pdc2_v4_model3 - cumnd_sdss)/cumnd_sdss fracdiff_pdc2_v3 = (cumnd_pdc2_v3 - cumnd_sdss)/cumnd_sdss __=ax2.plot(lumthresh_h1p0, fracdiff_pdc2_v4, label=r'${\rm protoDC2\ v4}$', color='blue') __=ax2.plot(lumthresh_h1p0, fracdiff_pdc2_v4_model2, label=r'${\rm protoDC2\ v4\ model\ 2}$', color='green') __=ax2.plot(lumthresh_h1p0, fracdiff_pdc2_v4_model3, label=r'${\rm protoDC2\ v4\ model\ 3}$', color='red') __=ax2.plot(lumthresh_h1p0, fracdiff_pdc2_v3, ':', color='k', label=r'${\rm protoDC2\ v3}$') __=ax2.plot(np.linspace(-100, 100, 100), np.zeros(100), ':', color='k') legend = ax1.legend() xlabel = ax1.set_xlabel(r'${\rm M_{r}}$') ylabel = ax1.set_ylabel(r'$n(<{\rm M_r})\ [(h/{\rm Mpc})^3]$') xlim1 = ax1.set_xlim(-18, -22) ylim2 = ax2.set_ylim(-1, 2) fig, ax = plt.subplots(1, 1) nskip_sdss = 10 # __=ax.scatter(sdss['sm'][::nskip_sdss], # sdss['restframe_extincted_sdss_abs_magr'][::nskip_sdss], s=0.1) from scipy.stats import binned_statistic logsm_bins = np.linspace(8, 11.7, 40) logsm_mids = 0.5*(logsm_bins[:-1] + logsm_bins[1:]) median_magr_model1, __, __ = binned_statistic( np.log10(mock['obs_sm']), mock['restframe_extincted_sdss_abs_magr'], bins=logsm_bins, statistic='median') median_magr_model2, __, __ = binned_statistic( np.log10(mock['obs_sm']), mock['rmag2'], bins=logsm_bins, statistic='median') median_magr_model3, __, __ = binned_statistic( np.log10(mock['obs_sm']), mock['rmag3'], bins=logsm_bins, statistic='median') nskip_mock = 50 # __=ax.plot(logsm_mids, median_magr_model1, color='k') # __=ax.plot(logsm_mids, median_magr_model2, color='green') # __=ax.plot(logsm_mids, median_magr_model3, color='red') nskip = 200 __=ax.scatter(np.log10(mock['obs_sm'][::nskip]), mock['restframe_extincted_sdss_abs_magr'][::nskip], s=0.1) ylim = ax.set_ylim(-15, -24) xlim = ax.set_xlim(7, 12) ###Output _____no_output_____ ###Markdown Compare luminosity threshold clustering to Zehavi+11 ###Code from cosmodc2.mock_diagnostics import zehavi_wp period = 250. x, y, z, vz = mock['x'], mock['y'], mock['z'], mock['vz'] magr = mock['restframe_extincted_sdss_abs_magr'] rp_mids, wp_um_19p0 = zehavi_wp(x, y, z, vz, period, magr, -19.0, protoDC2_littleh) rp_mids, wp_um_19p5 = zehavi_wp(x, y, z, vz, period, magr, -19.5, protoDC2_littleh) rp_mids, wp_um_20p5 = zehavi_wp(x, y, z, vz, period, magr, -20.5, protoDC2_littleh) rp_mids, wp_um_21p5 = zehavi_wp(x, y, z, vz, period, magr, -21.5, protoDC2_littleh) x, y, z, vz = v3_mock['x'], v3_mock['y'], v3_mock['z'], v3_mock['vz'] magr = v3_mock['rmag'] rp_mids, wp_v3_19p0 = zehavi_wp(x, y, z, vz, period, magr, -19.0, protoDC2_littleh) rp_mids, wp_v3_19p5 = zehavi_wp(x, y, z, vz, period, magr, -19.5, protoDC2_littleh) rp_mids, wp_v3_20p5 = zehavi_wp(x, y, z, vz, period, magr, -20.5, protoDC2_littleh) rp_mids, wp_v3_21p5 = zehavi_wp(x, y, z, vz, period, magr, -21.5, protoDC2_littleh) x, y, z, vz = mock['x'], mock['y'], mock['z'], mock['vz'] magr = mock['rmag2'] rp_mids, wp_um2_19p0 = zehavi_wp(x, y, z, vz, period, magr, -19.0, protoDC2_littleh) rp_mids, wp_um2_19p5 = zehavi_wp(x, y, z, vz, period, magr, -19.5, protoDC2_littleh) rp_mids, wp_um2_20p5 = zehavi_wp(x, y, z, vz, period, magr, -20.5, protoDC2_littleh) rp_mids, wp_um2_21p5 = zehavi_wp(x, y, z, vz, period, magr, -21.5, protoDC2_littleh) x, y, z, vz = mock['x'], mock['y'], mock['z'], mock['vz'] magr = mock['rmag3'] rp_mids, wp_um3_19p0 = zehavi_wp(x, y, z, vz, period, magr, -19.0, protoDC2_littleh) rp_mids, wp_um3_19p5 = zehavi_wp(x, y, z, vz, period, magr, -19.5, protoDC2_littleh) rp_mids, wp_um3_20p5 = zehavi_wp(x, y, z, vz, period, magr, -20.5, protoDC2_littleh) rp_mids, wp_um3_21p5 = zehavi_wp(x, y, z, vz, period, magr, -21.5, protoDC2_littleh) from cosmodc2.sdss_colors.sdss_measurements import rp as rp_zehavi from cosmodc2.sdss_colors import zehavi11_clustering wp_zehavi_18p5 = zehavi11_clustering(-18.5) wp_zehavi_19p0 = zehavi11_clustering(-19.0) wp_zehavi_19p5 = zehavi11_clustering(-19.5) wp_zehavi_20p0 = zehavi11_clustering(-20.0) wp_zehavi_20p5 = zehavi11_clustering(-20.5) wp_zehavi_21p0 = zehavi11_clustering(-21.0) wp_zehavi_21p5 = zehavi11_clustering(-21.5) fig, _axes = plt.subplots(2, 2, figsize=(10, 8)) ((ax1, ax2), (ax3, ax4)) = _axes axes = ax1, ax2, ax3, ax4 for ax in axes: __=ax.loglog() # __=ax.plot(rp_mids, rp_mids*wp_v3_20p5, '--', color='red') __=ax1.plot(rp_mids, rp_mids*wp_um_19p0, color='blue') __=ax1.plot(rp_mids, rp_mids*wp_um2_19p0, '--', color='green') __=ax1.plot(rp_mids, rp_mids*wp_um3_19p0, '--', color='red') __=ax1.plot(rp_mids, rp_mids*wp_v3_19p0, ':', color='k') __=ax1.errorbar(rp_zehavi, rp_zehavi*wp_zehavi_19p0, rp_zehavi*0.2*wp_zehavi_19p0, fmt='.', color='green') __=ax2.plot(rp_mids, rp_mids*wp_um_19p5, color='blue') __=ax2.plot(rp_mids, rp_mids*wp_um2_19p5, '--', color='green') __=ax2.plot(rp_mids, rp_mids*wp_um3_19p5, '--', color='red') __=ax2.plot(rp_mids, rp_mids*wp_v3_19p5, ':', color='k') __=ax2.errorbar(rp_zehavi, rp_zehavi*wp_zehavi_19p5, rp_zehavi*0.2*wp_zehavi_19p5, fmt='.', color='green') __=ax3.plot(rp_mids, rp_mids*wp_um_20p5, color='blue') __=ax3.plot(rp_mids, rp_mids*wp_um2_20p5, '--', color='green') __=ax3.plot(rp_mids, rp_mids*wp_um3_20p5, '--', color='red') __=ax3.plot(rp_mids, rp_mids*wp_v3_20p5, ':', color='k') __=ax3.errorbar(rp_zehavi, rp_zehavi*wp_zehavi_20p5, rp_zehavi*0.2*wp_zehavi_20p5, fmt='.', color='green') __=ax4.plot(rp_mids, rp_mids*wp_um_21p5, color='blue') __=ax4.plot(rp_mids, rp_mids*wp_um2_21p5, '--', color='green') __=ax4.plot(rp_mids, rp_mids*wp_um3_21p5, '--', color='red') __=ax4.plot(rp_mids, rp_mids*wp_v3_21p5, ':', color='k') __=ax4.errorbar(rp_zehavi, rp_zehavi*wp_zehavi_21p5, rp_zehavi*0.2*wp_zehavi_21p5, fmt='.', color='green') title1 = ax1.set_title(r'${\rm M_{r} < -19}$') title2 = ax2.set_title(r'${\rm M_{r} < -19.5}$') title3 = ax3.set_title(r'${\rm M_{r} < -20.5}$') title4 = ax4.set_title(r'${\rm M_{r} < -21.5}$') __=ax1.xaxis.set_ticks_position('none') __=ax1.set_xticklabels(['']) __=ax2.xaxis.set_ticks_position('none') __=ax2.set_xticklabels(['']) __=ax2.yaxis.set_ticks_position('none') __=ax2.set_yticklabels(['']) __=ax4.yaxis.set_ticks_position('none') __=ax4.set_yticklabels(['']) from cosmodc2.sdss_colors.sdss_measurements import rp as rp_zehavi from cosmodc2.sdss_colors import zehavi11_clustering wp_zehavi_18p5 = zehavi11_clustering(-18.5) wp_zehavi_19p0 = zehavi11_clustering(-19.0) wp_zehavi_19p5 = zehavi11_clustering(-19.5) wp_zehavi_20p0 = zehavi11_clustering(-20.0) wp_zehavi_20p5 = zehavi11_clustering(-20.5) wp_zehavi_21p0 = zehavi11_clustering(-21.0) wp_zehavi_21p5 = zehavi11_clustering(-21.5) fig, _axes = plt.subplots(2, 2, figsize=(10, 8)) ((ax1, ax2), (ax3, ax4)) = _axes axes = ax1, ax2, ax3, ax4 for ax in axes: __=ax.loglog() # __=ax.plot(rp_mids, rp_mids*wp_v3_20p5, '--', color='red') __=ax1.plot(rp_mids, wp_um_19p0, color='green') __=ax1.plot(rp_mids, wp_um2_19p0, '--', color='green') __=ax1.plot(rp_mids, wp_um3_19p0, '--', color='red') __=ax1.plot(rp_mids, wp_v3_19p0, ':', color='k') __=ax1.errorbar(rp_zehavi, wp_zehavi_19p0, 0.2*wp_zehavi_19p0, fmt='.', color='green') __=ax2.plot(rp_mids, wp_um_19p5, color='green') __=ax2.plot(rp_mids, wp_um2_19p5, '--', color='green') __=ax2.plot(rp_mids, wp_um3_19p5, '--', color='red') __=ax2.plot(rp_mids, wp_v3_19p5, ':', color='k') __=ax2.errorbar(rp_zehavi, wp_zehavi_19p5, 0.2*wp_zehavi_19p5, fmt='.', color='green') __=ax3.plot(rp_mids, wp_um_20p5, color='green') __=ax3.plot(rp_mids, wp_um2_20p5, '--', color='green') __=ax3.plot(rp_mids, wp_um3_20p5, '--', color='red') __=ax3.plot(rp_mids, wp_v3_20p5, ':', color='k') __=ax3.errorbar(rp_zehavi, wp_zehavi_20p5, 0.2*wp_zehavi_20p5, fmt='.', color='green') __=ax4.plot(rp_mids, wp_um_21p5, color='green') __=ax4.plot(rp_mids, wp_um2_21p5, '--', color='green') __=ax4.plot(rp_mids, wp_um3_21p5, '--', color='red') __=ax4.plot(rp_mids, wp_v3_21p5, ':', color='k') __=ax4.errorbar(rp_zehavi, wp_zehavi_21p5, 0.2*wp_zehavi_21p5, fmt='.', color='green') title1 = ax1.set_title(r'${\rm M_{r} < -19}$') title2 = ax2.set_title(r'${\rm M_{r} < -19.5}$') title3 = ax3.set_title(r'${\rm M_{r} < -20.5}$') title4 = ax4.set_title(r'${\rm M_{r} < -21.5}$') __=ax1.xaxis.set_ticks_position('none') __=ax1.set_xticklabels(['']) __=ax2.xaxis.set_ticks_position('none') __=ax2.set_xticklabels(['']) __=ax2.yaxis.set_ticks_position('none') __=ax2.set_yticklabels(['']) __=ax4.yaxis.set_ticks_position('none') __=ax4.set_yticklabels(['']) # wp_um_19p0 = wp(um_pos_19p0, rp_bins, pi_max, period=250, num_threads='max') # wp_um_20p0 = wp(um_pos_20p0, rp_bins, pi_max, period=250, num_threads='max') # wp_um_21p0 = wp(um_pos_21p0, rp_bins, pi_max, period=250, num_threads='max') # wp_um_21p5 = wp(um_pos_21p5, rp_bins, pi_max, period=250, num_threads='max') print(mock.keys()) satmask = mock['upid'] != -1 cluster_mask = mock['host_halo_mvir'] > 10**14 mw_mask = (mock['host_halo_mvir'] > 10**11.75) & (mock['host_halo_mvir'] < 10**12.5) sm_mask = (mock['obs_sm'] > 10**10) & (mock['obs_sm'] < 10**10.25) fig, ax = plt.subplots(1, 1) __=ax.hist(np.log10(mock['obs_ssfr'][satmask & mw_mask & sm_mask]), bins=50, normed=True, alpha=0.8, color='blue') __=ax.hist(np.log10(mock['obs_ssfr'][satmask & cluster_mask & sm_mask]), bins=50, normed=True, alpha=0.8, color='red') ###Output ['halo_id', 'upid', 'vpeak', 'mpeak', 'mvir', 'vmax', 'sm', 'sfr', 'obs_sm', 'obs_sfr', 'x', 'y', 'z', 'vx', 'vy', 'vz', 'hostid', 'host_halo_x', 'host_halo_y', 'host_halo_z', 'host_halo_vx', 'host_halo_vy', 'host_halo_vz', 'host_halo_mvir', 'host_centric_x', 'host_centric_y', 'host_centric_z', 'host_centric_vx', 'host_centric_vy', 'host_centric_vz', 'obs_sfr_percentile', 'sfr_percentile', 'obs_ssfr', 'restframe_extincted_sdss_abs_magr', 'rmag2', 'rmag3']
1-HelloWorld.ipynb
###Markdown 1. Hello World This practical is for you to get used to the jupyter interface. The cell below prints "Hello World". ###Code print("Hello World") ###Output _____no_output_____ ###Markdown Printing something is a way to confirm that the setup is done, and the environment is usable. Whenever we start using a new language, this is usually the first thing that we do.Try "printing" your name through the code below. ###Code #Try printing your name. print(None) ###Output _____no_output_____ ###Markdown CommentsComments is a method to document your code. comments usually starts with a hash () symbol. If you try to execute the comments below, nothing happens. ###Code #print("Hello World") ###Output _____no_output_____
Deploy Steam Savings Calc.ipynb
###Markdown Tutorial: Creating and pushing calculated signalsHow to push calculated signals and scalars using Seeq Data Lab and manipulate worksheet items.- **Author:** Siang Lim- **Date:** June 6th 2022 Background**Steam savings** is one component in a typical refinery energy dashboard.In this notebook, we will demonstrate how to use Seeq Data Lab to calculate steam savings and push the results back to the Workbench, using the following tags in the Splitter as a demo:- **53FFR412** - Stripping Steam/Bottoms Ratio- **53FC128** - Bottoms to FCC flow CalculationsSteam savings is defined relative to a baseline steam flow to bottoms. For this tag, the baseline ratio is $R_b = 7.1$.$$\begin{align}F, \text{Steam Saved [lb/h]} &= (R_b - R_s) * F_B \\C, \text{Steam Cost [\$/(klb/h)]} &= \$13.51 \quad \text{ (from Planning dept or market prices)}\\S, \text{Savings [\$/day]} &= F \text{ [lb/h]} \cdot C \text{ [\$/(klb/h)]} \cdot 24 \text{ [h/day]} \cdot 1/1000 \text{ [(klb/lb)]}\end{align}$$Where,$$\begin{align}R_s &= \text{Current steam/bottoms ratio, 53FFR412, [unitless]} \\F_B &= \text{Bottoms flow, 53FC128, [lb/h]}\end{align}$$ Set up variables ###Code R_b = 7.1 cost = 13.51 ###Output _____no_output_____ ###Markdown Seeq formulaCheck out the Seeq training courses if you need a review on Seeq formulas. **Note that:** Seeq variables must start with a letter: `$f128` is valid, `$53ffr412` is not. **First, a signal:** Steam saved is a `signal`, and will be defined as follows. ###Code steam_saved = f"(({R_b}-$f412)*$f128/24).setUnits('lb/h')" steam_saved ###Output _____no_output_____ ###Markdown **Aside:** We've used the Python f-string syntax (as indicated by the `f` before the string) to embed expressions inside string. In this case, our expression was `R_b`. For more information on f-strings, see https://realpython.com/python-f-strings/:~:text=%E2%80%9CF%2Dstrings%20provide%20a%20way,which%20contains%20expressions%20inside%20braces. **Second, scalar:** Steam cost is a `scalar` (it has a single value), and will be defined as: ###Code steam_cost = f"({cost}).setunits('$/klb/h')" steam_cost ###Output _____no_output_____ ###Markdown **Third, another signal:** Finally, the savings per day is calculated as: ###Code savings = f"($ss*$co*24*(1/1000)).setunits('$/day').remove(islessthan(0))" savings ###Output _____no_output_____ ###Markdown Variable assignmentWe'll need to tell Seeq what those variables are: - `$f412`- `f128`- `$ss`- `$co`Since `$ss` and `$co` in the savings calculation depends on previously calculated values, we will need to push the calculations to Seeq in 2 separate steps.We will see how to do this below. Step 1 - Import libraries ###Code from seeq import spy import pandas as pd pd.set_option('display.max_colwidth', None) ###Output _____no_output_____ ###Markdown Step 2 - Define tags and data source Replace `YOUR PI SERVER` with your PI server. ###Code my_items = pd.DataFrame({ 'Name': ['53FFR412', '53FC128'], 'Datasource Name': 'YOUR PI SERVER' }) my_items ###Output _____no_output_____ ###Markdown Step 3 - Search for tags using `spy.search` ###Code metadata_df = spy.search(my_items) metadata_df ###Output _____no_output_____ ###Markdown Step 4 - First, push PI tags back to Seeq WorkbenchYou can tell Seeq which workbook and worksheet you want to push the signal back to. Leave it blank and it will push it to a default SDL workbook called `Data Lab >> Data Lab Analysis` and worksheet called `From Data Lab`.More information can be found in the SPy.push documentation below using `help(spy.push)`. ###Code help(spy.push) workbook_ID = '7836C665-2B4C-4B36-8262-BE5230E102A5' # Change this to where you want it worksheet_name = 'Dev 1' push_results = spy.push( workbook=workbook_ID, worksheet=worksheet_name, metadata=metadata_df) push_results ###Output _____no_output_____ ###Markdown > Navigate to the link above to see the results. You may need to replace the IP address with your Seeq server if it's not pointing to the right place (still trying to figure out how to fix this) WRONG: - http://`1.2.3.4`/892DF617-6104-4BFF-BCFC-6256FE4DFA7A/workbook/7836C665-2B4C-4B36-8262-BE5230E102A5/worksheet/9F20FD2B-F56E-4D04-8FCB-FF9A661EA39D RIGHT: - http://`MYSEEQSERVER`/892DF617-6104-4BFF-BCFC-6256FE4DFA7A/workbook/7836C665-2B4C-4B36-8262-BE5230E102A5/worksheet/9F20FD2B-F56E-4D04-8FCB-FF9A661EA39D Step 4 - Define calculationsIf you have multi-step calculations where a step depends on a previous calculation, you will need to break them down into several calculations.Here, we will do the steam saved and cost of steam first. In `Formula Parameters`, we are telling Seeq to grab the IDs of `53FFR412` and `53FC128` to assign tags to the variables we've defined.> Notice that we passed an entire DataFrame row to it, Seeq will automatically parse the row to find the IDs. We could've also passed the `metadata_df` row instead of `push_results`. However, we want to display all the signals (tags + calculated values), which we will see later in this notebook. Using `push_results` makes this step easier to understand (code-wise). ###Code calc_signals = [{'Name': 'Steam Saved', 'Type': 'Signal', 'Formula': steam_saved, 'Formula Parameters': {'$f412': push_results[push_results['Name'] == '53FFR412'], '$f128': push_results[push_results['Name'] == '53FC128'], } }, {'Name': 'Cost of Steam', 'Type': 'Scalar', 'Formula': steam_cost }] df_calcs = pd.DataFrame(calc_signals) df_calcs ###Output NameError: name 'steam_saved' is not defined Error found at line 3 in cell 1. ###Markdown Step 6 - Push first 2 calculations to Seeq Now we push the first 2 calcs, then store the results as `push_results_2`. ###Code push_results_2 = spy.push( workbook=workbook_ID, worksheet=worksheet_name, metadata=df_calcs) push_results_2 ###Output _____no_output_____ ###Markdown Step 6 - Push third calculation to Seeq Notice in `push_results_2`, we now have IDs for the steam saved and steam cost variables. Now we can push the savings signal and then store the results as `push_results_3`. ###Code calc_signals = [{'Name': 'Savings per Day', 'Type': 'Signal', 'Formula': savings, 'Formula Parameters': {'$ss': push_results_2[push_results_2['Name'] == 'Steam Saved'], '$co': push_results_2[push_results_2['Name'] == 'Cost of Steam'], } }] df_calcs = pd.DataFrame(calc_signals) df_calcs push_results_3 = spy.push( workbook=workbook_ID, worksheet=worksheet_name, metadata=df_calcs) push_results_3 ###Output _____no_output_____ ###Markdown Step 7 - Getting all signals displayed (Method 1)You may have noticed that every time we pushed a signal back to the worksheet, the earlier pushed signals are no longer displayed in the Workbench. However, they are still available in the 'Recently Accessed' menu.To make Seeq display ALL signals, we just need to combine all 3 `push_results` DataFrame and push all signals together. There are, of course, other ways to do this, that may be computationally more efficient, but this method seems to be the most straightforward (code-wise, and to understand what's going on for novice users). Use `pd.concat()` to merge the 3 dataframes ###Code df_combined = pd.concat([push_results, push_results_2, push_results_3]).reset_index(drop=True) df_combined push_results_final = spy.push( workbook=workbook_ID, worksheet=worksheet_name, metadata=df_combined) push_results_final ###Output _____no_output_____ ###Markdown > Take a look at the Workbench, you will see that all 5 signals are now displayed. Step 8 - Getting all signals displayed (Method 2)Alternatively, you could also pull the worksheet using `spy.workbooks` and then modify the `display_items`. ###Code workbooks_df = spy.workbooks.search({ 'ID': workbook_ID }) workbooks_df workbooks = spy.workbooks.pull(workbooks_df) workbooks ###Output _____no_output_____ ###Markdown Check out the worksheets ###Code workbooks[0].worksheets ###Output _____no_output_____ ###Markdown We see that the worksheet we want is the 3rd one. Let's look at the display items: ###Code worksheet_items = workbooks[0].worksheets[2].display_items worksheet_items ###Output _____no_output_____ ###Markdown It shows all 5 signals as expected. As long as we know the name and ID of a signal (tag), we can add it to the display. You could also remove a signal by removing a row here.Let's remove the first 2 signals then push it back. ###Code new_worksheet_items = worksheet_items.loc[2:,:] new_worksheet_items ###Output _____no_output_____ ###Markdown Reassign the dataframe ###Code workbooks[0].worksheets[2].display_items = new_worksheet_items ###Output _____no_output_____ ###Markdown Then push it back to the workbench ###Code spy.workbooks.push(workbooks) ###Output _____no_output_____
3_math_for_datascience/04_Random_Variable_with_SciPy/20180223_04_09_F_distribution.ipynb
###Markdown F 분포카이제곱 분포를 따르는 2개의 확률변수 $\chi^2_{1,2}(n_{1, 2})$로부터 구할 수 있음. 제곱분포의 샘플을 자유도 인수(n)로 나누어 그 2개의 비율을 구한다.$$\dfrac{x_1 / n_1}{x_2/ n_2} \sim F(n_1, n_2)$$(PDF 수식)$$f(x; n_1,n_2) = \dfrac{\sqrt{\dfrac{(n_1\,x)^{n_1}\,\,n_2^{n_2}} {(n_1\,x+n_2)^{n_1+n_2}}}} {x\,\text{Beta}\!\left(\frac{n_1}{2},\frac{n_2}{2}\right)}$$ F 분포 시뮬레이션 ###Code import numpy as np import scipy as sp from scipy import stats import matplotlib as mpt import matplotlib.pyplot as plt import seaborn as sns xx = np.linspace(0.03, 3, 1000) plt.hold(True) plt.plot(xx, sp.stats.f(1,1).pdf(xx), label="F(1,1)") plt.plot(xx, sp.stats.f(2,1).pdf(xx), label="F(2,1)") plt.plot(xx, sp.stats.f(5,2).pdf(xx), label="F(5,2)") plt.plot(xx, sp.stats.f(10,1).pdf(xx), label="F(10,1)") plt.plot(xx, sp.stats.f(20,20).pdf(xx), label="F(20,20)") plt.legend() plt.show() ###Output /Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/ipykernel_launcher.py:2: MatplotlibDeprecationWarning: pyplot.hold is deprecated. Future behavior will be consistent with the long-time default: plot commands add elements without first clearing the Axes and/or Figure. /Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/matplotlib/__init__.py:805: MatplotlibDeprecationWarning: axes.hold is deprecated. Please remove it from your matplotlibrc and/or style files. mplDeprecation) /Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/matplotlib/rcsetup.py:155: MatplotlibDeprecationWarning: axes.hold is deprecated, will be removed in 3.0 mplDeprecation)
Labs/5.1-Lab2b_exercises/HCDE411-Week-5-Basic-Visualizations.ipynb
###Markdown Basic Data VisualizationsThis module shows a few different techniques for retreiving and visualizing data using pandas and matplotlib. We will also work with the original cars dataset. You will need to add that dataset to your notebook for some of these examples to work. ###Code import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) ###Output _____no_output_____ ###Markdown To retreive some data to work with, you'll use a library called `pandas_datreader`, which allows you to connect to multiple external datasources. The documentation is here: [https://pydata.github.io/pandas-datareader/](http://) To install the library in your server, use the `pip` tool. Open a terminal session on your server (It is in the Launcher tab. You may need to start a new Launcher from the File menu.). In the terminal session type: `pip install pandas-datareader`. ###Code import pandas_datareader.data as web ###Output _____no_output_____ ###Markdown You will need the matplotlib library so that we can make charts. It is common practice to import it **as** `plt` - that's fewer characters to type, every time you want to access the functions. You also import `datetime`, to provide useful functions for working with dates (like getting the current time). ###Code import matplotlib.pyplot as plt import datetime as dt ###Output _____no_output_____ ###Markdown Next you'll retrieve stock tickers as an easy-to-access source of data to practice with. Then create a list to store the stock tickers. ###Code # Define the instruments to download. We would like to see Apple, Microsoft and the S&P500 index. tickers = ['AAPL'] #, 'MSFT', 'IBM'] (you can add more tickers as a list) # We would like all available data from 01/01/2017 until 12/31/2017. start_date = '2017-01-01' # you can set this to whatever date you want end_date = dt.datetime.now() # this puts the current time into a variable called end_date # This next function creates a pandas dataframe containing the results of the DataReader query # The 'yahoo' datasource provides the stock ticker info. (google and morningstar no longer work). # The results are stored as a dataframe called df (nice and short!) df = web.DataReader(tickers, data_source='yahoo', start=start_date, end=end_date) # Inspect the first 5 rows df.head() ###Output _____no_output_____ ###Markdown Now for the first visualization! You use the matplotlib library's plot function to access a basic line graph. It can take many paramenters, but it needs at least the data to work with and plot on the y-axis, which can be requested from the column headings you just retrieved into the new dataframe. You can plot the low closing value from the 'Low' coumn, for example. ###Code df.plot(y='Low') ###Output _____no_output_____ ###Markdown Plotting multiple values is easy. Just specify which columns of the dataframe you want to plot. ###Code df[["High", "Low"]].plot() ###Output _____no_output_____ ###Markdown You can also change the aesthetics of the plot to meet your. There are a lot of pre-set styles that you can choose from (easiest) or you can make your own by modifying specific parameters of the plot function (harder). To list the available styles, use the `style.available` function. ###Code plt.style.available ###Output _____no_output_____ ###Markdown To use a specific style, call the `style.use` function and set the parameter to the name of the style you want. You need to call this function every time you want to change the style. ###Code plt.style.use("fivethirtyeight") #need to reset this every time you want to change the template df[["High", "Low"]].plot() plt.style.use("ggplot") df[["High", "Low"]].plot() ###Output _____no_output_____ ###Markdown Bar ChartsYou can also easily plot bar charts usling matplotlib. Bar charts are good representations for ranking categorical and nominal data. This example uses Google stock data to create categories of how many closing days were Poor, Good, or Stellar, depending on how they compare to the avarage closing value over the whole time period.Suppose you want to answer the question: *"How many closing stock prices were low medium or high compared to the average closing price?"*To do this, you need to know the average price over that time period and to create three categories for the closing values, compared to that average. You can use python to create categories of data from the stock prices. First get stock prices for Google (over the same time period as above).Then calculate what the average (mean) price was over that time period. ###Code google = web.DataReader('GOOG', data_source='yahoo', start=start_date, end=end_date) google['Close'].mean() google ###Output _____no_output_____ ###Markdown You can use the mean price over that period to create three categories – depending upon whether the closing price on a day was lower, near it or above it.To do this create a function that you use to evaluate each price and set it's **rank performace**. You will pass this function the price on each row of the dataframe ###Code def rank_performance(stock_price): if stock_price <= 900: return "Poor" elif stock_price>900 and stock_price <=1200: return "Good" elif stock_price>1200: return "Stellar" ###Output _____no_output_____ ###Markdown You then run this custom function against each of the values in the **Close** column. ###Code google['Close'].apply(rank_performance) ###Output _____no_output_____ ###Markdown Note that the values haven't actually changed in the resulting data - you've simply stored the ranking for each value in the Close column in the datareader object. To show the data hasn't changed, just view the object: ###Code google ###Output _____no_output_____ ###Markdown To finally create the bar chart of categories, you need to count how many times each ranking occurred. Conveninetly, the `value_counts()` function does this. If you use dot "." notation to append this function to the other ones, you don't have to create an intermediate variable to store the counts. You can just pass along the results right on to the `.plot()` function. In this way, you are concatenating the results of each step with the "dot" notation. Note the `kind` parameter sets it to a bar chart.*get coogle 'Close' . -> apply the rank performance function . -> count the results . -> plot the results* ###Code google['Close'].apply(rank_performance).value_counts().plot(kind="bar") ###Output _____no_output_____ ###Markdown If for some reason, you wanted a horizontal bar chart, just set the `kind` parameter to `"barh"`. ###Code google["Close"].apply(rank_performance).value_counts().plot(kind="barh") ###Output _____no_output_____ ###Markdown Pie ChartsIt is similarly easy to plot categories with a pie cahrt, to create a part-to-whole comparison.First you load the results of the `DataReader` into a new variable to work with. Let's take Johnson & Johnson for example. ###Code jnj = web.DataReader('JNJ', data_source='yahoo', start='2016-01-01', end=dt.datetime.now()) jnj.head() ###Output _____no_output_____ ###Markdown How did performance each day compare to its average?First let's find out the average: ###Code jnj['Close'].mean() ###Output _____no_output_____ ###Markdown We can write another custom performance to determine whether each value is above or below the average score over this time period. ###Code def above_or_below(stock_price): if stock_price >= 128.33: return "Above average" else: return "Below average" ###Output _____no_output_____ ###Markdown You can then create a pie based upon the values for the results of your custom function. Note the styling choices in this example. A full list of the styling parameters is in the matplotlib documentation. [https://matplotlib.org/3.1.0/api/_as_gen/matplotlib.pyplot.pie.html](http://) ###Code labels='above','below' colors = ['mediumseagreen','lightcoral'] jnj["Close"].apply(above_or_below).value_counts().plot(kind='pie', legend=False, labels=labels, colors=colors) ###Output _____no_output_____ ###Markdown Scatter Plots Scatterplots require at least two columns of data, because you need to specify which axes to compare. To try out these examples, you need my `original cars.csv` dataset, on Kaggle. `read_csv()` function to create a dataframe from the file. ###Code cars = pd.read_csv("data_cars_2004.csv") cars # show the head and tail of this file ###Output _____no_output_____ ###Markdown To show what a generic scatterplot might look like you can create a bunch of random points and make them have random weights. ###Code N = 50 x = np.random.rand(N) y = np.random.rand(N) colors = np.random.rand(N) size = (30 * np.random.rand(N))**2 # 0 to 15 point radii plt.scatter(x, y, s=size, c=colors, alpha=0.5) plt.show() x=cars[['Hwy MPG']] y=cars[['HP']] cars[['Hwy MPG','HP']].plot(kind='scatter', x='Hwy MPG', y='HP', alpha=0.5) cars[['Hwy MPG','HP']].plot(kind='scatter',x='Hwy MPG', y='HP', alpha=0.5) ###Output _____no_output_____ ###Markdown You can access the `size` parameter to change how big the dots are. And the `figsize` to adjust how big the graph is. ###Code #list(cars) size=cars[['Retail Price']] # we can use the size parameter to set the size of the marks cars[['Hwy MPG','HP']].plot(kind='scatter', x='Hwy MPG', y='HP', alpha=0.5, s=size*.005, figsize=(12,8)) ###Output _____no_output_____ ###Markdown DistributionsYou can easily plot the distribution of values in an axis (i.e., column) using the matplotlib `hist()` function. You can specify a list of only the columsnb ###Code hist=cars.hist(column='Hwy MPG') ###Output _____no_output_____ ###Markdown You can plot several columns by passing a list to the `column` paramater. ###Code hist=cars.hist(column=['Hwy MPG', 'HP']) ###Output _____no_output_____ ###Markdown If you specify no parameters for which column you get them all! In the example below, the figure is made larger so that the histograms don't overlap each other. ###Code hist=cars.hist(figsize=(16,12)) ###Output _____no_output_____ ###Markdown You can customize the histogram by providing the hist() method additional parameters and matplotlib styling: ###Code hist = cars.hist(column='Hwy MPG', bins=10, grid=False, figsize=(12,8), color='#4290be', zorder=2, rwidth=0.9) hist = hist[0] # each unique value is accessed by its index (the car name) which is in clumn 0 for x in hist: # Switch off tickmarks x.tick_params(axis="both", which="both", bottom=False, top=True, labelbottom=True, left=False, right=False, labelleft=True) # Draw horizontal axis lines vals = x.get_yticks() for tick in vals: x.axhline(y=tick, linestyle='dashed', alpha=0.4, color='#eeeeee', zorder=1) # Set title (set to "" for no title!) x.set_title("Cars and MPG") # Set x-axis label x.set_xlabel("Miles per Gallon", labelpad=20, weight='bold', size=12) # Set y-axis label x.set_ylabel("Number of cars", labelpad=20, weight='bold', size=12) ###Output _____no_output_____ ###Markdown Exercises: Part 1Refer to the examples above to guide you in completing the following exercises. You may need to do some research in the Pandas or Matplotlib documentation to help you out. Exercise 1- Create a new dataframe that contains only the Name, Highway MPG rating, and Weight of each car in the dataset.- Display the last 15 entries in the dataframe- Use the new dataframe to create a bar chart that shows the number of cars for each MPG rating (e.g., there are 10 cars with an MPG of 32), with the values sorted in ascending order. ###Code ### Your code here ###Output _____no_output_____ ###Markdown Exercise 2- Create a table that shows the names of the top 10 heaviest cars.- Make a new dataframe for only these 10 "heaviest".- Create a horizontal bar chart that shows the top 5 "heaviest" in descending order, their labels and values. It should be sized at 10 x 8. The names of the vehicles should be shown to the left of the bars. Set the x and y labels to show "weight" and "Top 5 heaviest cars" (Hint: it may be easiest to do this with a new dataframe.) ###Code ### Your code here ###Output _____no_output_____ ###Markdown Exercise 3Flex your skills! Create your own arbitrary chart from any of the values in the Cars dataset. Document your approach in the markdown cells. Cite any external references. ###Code ### Your code here ###Output _____no_output_____ ###Markdown Exercises: Part 2Make sure that the lab file `Sample-Superstore-Orders.csv` is stored in your notebook before continuing with the following exercises.In Exercises 4-6, we'll use the Superstore Data we saw in the Tableau data cleaning exercises to create visualizations. Exercise 4Use the source data to create a histogram of the distribution of Sales in Illinois. Use your own judgement as to how to style and present the chart. ###Code ### Your code here ###Output _____no_output_____ ###Markdown Exercise 5Use the source data to create a time series of Sales data for Illinois, New York, and California. Your visualization should show data for all three states in the same chart. Use your own judgement as to how to style and present the chart. ###Code ### Your code here ###Output _____no_output_____ ###Markdown Exercise 6Locate your own data file for the final exercise. It can be any .csv file from any source that you have access to or that is online. Check Canvas for a list of data sources. Use your source data to a new visualization. Explain your chart, including choice of dimensions, values, and encodings in comments or a markdown text box.**Stretch Goal for one extra point: Create an additional visualization of a type that we did not cover in class. (e.g., bar chart, box plot, etc.)** ###Code ### Your code here ###Output _____no_output_____
DecisionTree/C4.5DecisionTree.ipynb
###Markdown 数据集来源: http://archive.ics.uci.edu/ml/datasets/Heart+Disease - 选择这个数据集的原因在于特征既有离散型和连续型两种类型 ###Code df = pd.read_csv("processed.cleveland.data",header=None,names=["age","sex","cp","trestbps","chol","fbs","restectg","thalach","exang","oldpeak","slope","ca","thal","num"]) df.head() # numerical or categorical for index in df: print(index,len(set(df[index]))) # drop ? values print(len(df)) index = ["?" not in row for row in df.values] # convert to np.float dataset = df.values[index].astype(np.float32) print(len(dataset)) # split to X,y X,y = dataset[:,:-1],dataset[:,-1] y = y.astype(np.int64) print(X.shape) print(y.shape) kinds = ["categorical" if len(set(col))<6 else "numerical"for col in X.T] print(len(kinds)) from sklearn.model_selection import train_test_split from collections import Counter X_train,X_test,y_train,y_test = train_test_split(X,y) def entropy(y): precs = np.array(list(Counter(y).values()))/len(y) ent = np.sum(-1 * precs * np.log2(precs)) return ent # 选择哪个特征进行分裂 def decide_feature(X,y,fas,kinds): # fas refers to feature_available;if one feature can be splitted,this feature_available is True,else False (n_samples,n_features) = X.shape ers = np.zeros(n_features) bestfvs = np.zeros(n_features) for fi in range(n_features): if not fas[fi]: continue if kinds[fi] == "categorical": I,H = entropy(y),0 for fv,c in Counter(X[:,fi]).items(): splity = y[X[:,fi] == fv] proc = c/n_samples I -= proc * entropy(splity) H += -proc * np.log2(proc) ers[fi] = I/H else: for fv in list(sorted(set(X[:,fi])))[:-1]: splity_less = y[X[:,fi] <= fv] proc_less = len(splity_less) / n_samples splity_more = y[X[:,fi] > fv] proc_more = len(splity_more) / n_samples I = -proc_less * entropy(splity_less) - proc_more * entropy(splity_more) + entropy(y) H = -1*proc_less * np.log2(proc_less) - proc_more * np.log2(proc_more) if I/H > ers[fi]: ers[fi] = I/H bestfvs[fi] = fv return ers,bestfvs fas = np.array([True]*len(y_train)) decide_feature(X_train,y_train,fas,kinds) def build_tree(X,y,fas,kinds): counts = dict(Counter(y)) if len(counts) == 1 or (fas==False).all(): result = max(counts,key=counts.get) return {"counts":counts,"result":result} ers,bestfvs = decide_feature(X,y,fas,kinds) next_ = {} bestfi = np.argmax(ers) if kinds[bestfi] == "categorical": fas[bestfi] = False for fv in set(X[:,bestfi]): index = (X[:,bestfi] == fv) next_["{}{}".format("==",fv)] = build_tree(X[index],y[index],fas,kinds) else: bestfv = bestfvs[bestfi] index_less = X[:,bestfi] <= bestfv fas_less = fas.copy() if len(set(X[index_less][:,bestfi])) == 1: fas_less[bestfi] = False next_["{}{}".format("<=",bestfv)] = build_tree(X[index_less],y[index_less],fas_less,kinds) index_more = X[:,bestfi] > bestfv fas_more = fas.copy() if len(set(X[index_more][:,bestfi])) == 1: fas_more[bestfi] = False next_["{}{}".format(">",bestfv)] = build_tree(X[index_more],y[index_more],fas_more,kinds) return {"fi":bestfi,"counts":counts,"result":None,"next":next_} fas = np.array([True]*X_train.shape[-1]) tree = build_tree(X_train,y_train,fas,kinds) tree # build_tree 里面有两个错误,一是在计算信息增益率时除数不能为0,二是终止条件需要改进 def predict_one(x,kinds,tree): while tree["result"] == None: fi = tree["fi"] fv = x[fi] flag = False for condition in tree["next"]: if eval(str(fv)+condition): tree = tree["next"][condition] flag = True break if not flag: counts = tree["counts"] return max(counts,key=counts.get) return tree["result"] class C45DecisionTree: @staticmethod def entropy(y): precs = np.array(list(Counter(y).values()))/len(y) ent = np.sum(-1 * precs * np.log2(precs)) return ent # 选择哪个特征进行分裂 def decide_feature(self,X,y,fas,kinds): # fas refers to feature_available;if one feature can be splitted,this feature_available is True,else False (n_samples,n_features) = X.shape ers = np.ones(n_features) * -1 bestfvs = np.zeros(n_features) for fi in range(n_features): if not fas[fi]: continue if kinds[fi] == "categorical": I,H = self.entropy(y),0 for fv,c in Counter(X[:,fi]).items(): splity = y[X[:,fi] == fv] proc = c/n_samples I -= proc * self.entropy(splity) H += -proc * np.log2(proc) ers[fi] = I/(H+1e-7) else: # print(set(X[:,fi])) for fv in list(sorted(set(X[:,fi])))[:-1]: splity_less = y[X[:,fi] <= fv] proc_less = len(splity_less) / n_samples splity_more = y[X[:,fi] > fv] proc_more = len(splity_more) / n_samples I = -proc_less * self.entropy(splity_less) - proc_more * self.entropy(splity_more) + self.entropy(y) H = -1*proc_less * np.log2(proc_less) - proc_more * np.log2(proc_more) if I/(H+1e-7) > ers[fi]: ers[fi] = I/(H+1e-7) bestfvs[fi] = fv return ers,bestfvs def build_tree(self,X,y,fas,kinds): counts = dict(Counter(y)) result = max(counts,key=counts.get) # print("fas",fas,"len(counts)",len(counts)) if len(counts) == 1 or (fas==False).all(): return {"counts":counts,"result":result} ers,bestfvs = self.decide_feature(X,y,fas,kinds) if (ers == -1).all(): return {"counts":counts,"result":result} next_ = {} bestfi = np.argmax(ers) # print(bestfi,ers) if kinds[bestfi] == "categorical": fas[bestfi] = False for fv in set(X[:,bestfi]): index = (X[:,bestfi] == fv) # print("next: {} {} {}, size:{}".format(bestfi,"==",fv,len(y[index]))) next_["{}{}".format("==",fv)] = self.build_tree(X[index],y[index],fas,kinds) else: bestfv = bestfvs[bestfi] index_less = X[:,bestfi] <= bestfv fas_less = fas.copy() if len(set(X[index_less][:,bestfi])) == 1: fas_less[bestfi] = False # print("next: {} {} {}, size:{}".format(bestfi,"<=",bestfv,len(y[index_less]))) next_["{}{}".format("<=",bestfv)] = self.build_tree(X[index_less],y[index_less],fas_less,kinds) index_more = X[:,bestfi] > bestfv fas_more = fas.copy() if len(set(X[index_more][:,bestfi])) == 1: fas_more[bestfi] = False # print("next: {} {} {}, size:{}".format(bestfi,">=",bestfv,len(y[index_more]))) next_["{}{}".format(">",bestfv)] = self.build_tree(X[index_more],y[index_more],fas_more,kinds) return {"fi":bestfi,"counts":counts,"result":None,"next":next_} def fit(self,X,y,kinds): fas = np.array([True]*X.shape[-1]) self.tree = self.build_tree(X,y,fas,kinds) def predict_one(self,x): tree = self.tree while tree["result"] == None: fi = tree["fi"] fv = x[fi] flag = False for condition in tree["next"]: if eval(str(fv)+condition): tree = tree["next"][condition] flag = True break if not flag: counts = tree["counts"] return max(counts,key=counts.get) return tree["result"] def predict(self,X): y_predicts = [] for x_test in X_test: y_predicts.append(self.predict_one(x_test)) return y_predicts def score(self,X_test,y_test): y_predicts = self.predict(X_test) return np.sum(y_predicts == y_test)/ len(y_test) mytree = C45DecisionTree() mytree.fit(X_train,y_train,kinds) mytree.score(X_test,y_test) ###Output _____no_output_____
201707104056+chenhe7.25.ipynb
###Markdown 列表List- 一个列表可以储存任意大小的数据集合,你可以理解为他是一个容器 ###Code def b (): pass a = [1,2,1,2,'ab',True,b,[1,2,3,]] a c = 'abd' list(c) a = [1,2,3,['ab']] a ###Output _____no_output_____ ###Markdown 先来一个例子爽一爽![](../Photo/115.png) 创建一个列表- a = [1,2,3,4,5] 列表的一般操作![](../Photo/116.png) ###Code a = [100,200] b = [1,2,3,4,[a]] a in b a = [100,200] b = [1,2,3,4,a] a in b a = [1,2] b = [1,2] a+b a = 'a' a*5 a = [1,2,3,4,5,[100,200,[1000,[4000]]]] a a[5][2][1][0] b = [1,2,3,4,5,6,7,8,9,10] b for i in range(0,10,2): b[i]=100 b b=[1,2,3,4,5,6,7,8,9,10] b for i in range(0,10,3): print(b[i:i+2]) v = [1,2,3,[3,4]] v count = 0 for i in v: if type(i)==list: for j in i: count = count+1 else: count =count+1 len(v) ###Output _____no_output_____ ###Markdown 列表索引操作- Mylist[index]- 正序索引,逆序索引- 列表一定注意越界- ![](../Photo/117.png) ###Code a = [1,2,3] max(a) a.__iter__#有这个属性可以for 循环 b = [4,3,2,1,] b def zwJ(b): n = len(b) for j in range(0,n-1): for i in range(0,n-1-j): if b[i]>b [i+1]: b[i],b[i+1]=b[i+1],b[i] zwJ(b) print(b) ###Output [1, 2, 3, 4] ###Markdown 列表切片操作- Mylist[start:end]- 正序切片,逆序切片 列表 +、*、in 、not in 使用for循环遍历元素- for 循环可以遍历一切可迭代元素 EP:- 使用while 循环遍历列表 列表的比较- \>,=,<=,==,!= 列表生成式[x for x in range(10)] 列表的方法![](../Photo/118.png) ###Code a = [1,2,3] b = [100,200] a.append(b) a a = [1,2,3] b=[100,200] b.extend(a) c = [1,2,3,4] c.insert(0,100) c.insert((3,100) c ###Output _____no_output_____ ###Markdown 将字符串分割成列表- split 按照自定义的内容拆分 ###Code c = [1,2,3,4,5,6] for i in range(0,len(c)+3,3): c.insert(i,100) c a =[] for i in range(10): if i%2==0: a.append(i) a [x for x in range(10) if x%2==0] lst = [30,1,2,1,0] lst.append(40) lst lst.insert(1,43) lst lst.remove(1) lst lst.pop(1) lst lst.pop() lst lst.sort() lst lst.reverse() lst a =[1,2,3] b = a b a[0]=100 a c =[1,2,3] d = c.copy() d e = [1,2,3,[0,11]] e import copy a = [1,2,3,[100,200]] ###Output _____no_output_____ ###Markdown EP:![](../Photo/119.png)![](../Photo/120.png) ###Code a a.pop() a = 'a b c d' a.split(' ') b = 'a!b!c!' b.split('!') b = 'a b c d' b .split() ###Output _____no_output_____ ###Markdown 列表的复制- copy 浅复制- deepcopy import copy 深复制- http://www.pythontutor.com/visualize.htmlmode=edit 列表排序- sort- sorted- 列表的多级排序 - 匿名函数 EP:- 手动排序该列表[5,3,8,0,17],以升序或者降序 - 1![](../Photo/121.png) ###Code def best(): a = eval(input(">>")) Best= max (a) print(Best) if str(a)>=str(Best-10): print('A') elif str(a)>=str(Best-20): print('B') elif str(a)>=str(Best-30): print('C') elif str(a)>=str(Best-40): print('D') best() ###Output >>[40,55,70,58] 70 A ###Markdown - 2![](../Photo/122.png) ###Code yuan = [1,2,35,41,51,64,7] print (yuan) fan = [] print('将列表逆序输出') for i in range(len(yuan)): a = yuan.pop() fan.append(a) print (fan) ###Output [1, 2, 35, 41, 51, 64, 7] 将列表逆序输出 [7, 64, 51, 41, 35, 2, 1] ###Markdown - 3![](../Photo/123.png) ###Code def Count(): lst = eval(input('>>')) x = eval(input('>>')) return lst.count(x) Count() ###Output >>2,5,6,5,4,3,23,43,2 >>2 ###Markdown - 4![](../Photo/124.png) - 5![](../Photo/125.png) - 6![](../Photo/126.png) ###Code def index(): lst = eval(input('输入数字列表')) print(lst.index(min(lst))) index() ###Output 输入数字列表15,25,4,5,2,1,2 5 ###Markdown - 7![](../Photo/127.png)![](../Photo/128.png) ###Code import random lst =[1,2,3,4,5,6] random.shuffle(lst) print(lst) def shuffle(lst): random.shuffle(lst) print(lst) shuffle([1,2,3,4,5,6]) ###Output [2, 5, 4, 6, 3, 1] ###Markdown - 8![](../Photo/129.png) ###Code def eliminateDuplicates(lst): lst=[] for i in lst: for j in lst: if i==j: print(lst.append(i)) eliminateDuplicates([1,2,3,2,1,6,3,4,5,2]) a = [1,2,3,4,5,6,1,2,3] a ###Output _____no_output_____ ###Markdown - 9![](../Photo/130.png) ###Code def isSort(lst): lst = eval(input('>>')) isSort([1,1,3,4,4,5,7,9,10,30,11]) ###Output >>[1,1,3,4,4,5,7,9,10,30,11] ###Markdown - 10![](../Photo/131.png) ###Code def bubbleSort(arr): n = len(arr) # 遍历所有数组元素 for i in range(n): for j in range(0, n-i-1): if arr[j] > arr[j+1] : arr[j], arr[j+1] = arr[j+1], arr[j] arr = [64, 34, 25, 12, 22, 11, 90,20] bubbleSort(arr) print ("排序后的数组:") for i in range(len(arr)): print ("%d" %arr[i]), ###Output 排序后的数组: 11 12 20 22 25 34 64 90
Where-2-relocate-4-job.ipynb
###Markdown Here is the reason if you need to relocate for the new job around the world In this project, I want to analyze the best place to start a new career across the continents as Data Scientist based on the data from Stack Overflow 2017 survey.1- Where is a better place to move to find a job (North America, Asia, Europe or other places)?!2- What is the likelihood of increase or decrease in salary?3- What about job satisfaction? ###Code import numpy as np import pandas as pd import math import matplotlib.pyplot as plt import seaborn as sns %matplotlib inline ###Output _____no_output_____ ###Markdown Dataset ViewFirst let's look at the data set in hand to get some insights and learn about available features ###Code df = pd.read_csv('survey_results_public.csv') df.head() ###Output _____no_output_____ ###Markdown Dataset is made up of 154 different features and there are some Null values which should be taken care of!Now let's see the histogram of the numeric features ###Code df.hist(figsize=(10, 10), bins=20, grid=False); ###Output _____no_output_____ ###Markdown What is the distribution of proffesions between individuals?There are 5 different professions which most of them (70%) are professional developer that make this dataset great to evaluate for the porpuse of this project ###Code color = list('rgbkymc') profession_vals = df['Professional'].value_counts() print(len(profession_vals)) (profession_vals/df.shape[0]).plot(kind="bar", color=color); ###Output 5 ###Markdown Where are they from?As we expect most of them are from USA and if we omit India all others are mostly from Europe! ###Code country_vals = df['Country'].value_counts() print(len(country_vals)) (country_vals[:50]/df.shape[0]).plot(kind="bar", color=color, figsize=(15, 15)); ###Output 201 ###Markdown There are 18 countries with number of developers more than 500. We can get an insight of grouping professions based on countries as follow ###Code profession_country = df.groupby('Professional')['Country'].value_counts().unstack().fillna(0) ###Output _____no_output_____ ###Markdown At the later steps I consider putting all these 18 countries on their right category of Location since there should be an impact by developers on these counries on the analysis. All these countries have more than 500 people attending the survey and as can bee seen below in each country most of the are "professional developers". "Iran" is a surprise in this list. Did you spot China, Japan and South Korea... Where are they? ###Code profession_country_500 = profession_country.drop([col for col, val in profession_country.sum().iteritems() if val < 500], axis=1) profession_country_500 ###Output _____no_output_____ ###Markdown I suspect below result is because developers are more conservative in east Asia so we don't have many developers information ###Code profession_country[['South Korea', 'Japan', 'China', 'Iran']] empl_vals = df['EmploymentStatus'].value_counts() print(len(empl_vals)) (empl_vals/df.shape[0]).plot(kind="bar", color=color ); ###Output 7 ###Markdown At this pont maybe it's better to think changing your job since most of the people think they are underpaid!! ###Code country_vals = df['Overpaid'].value_counts() print(len(country_vals)) (country_vals/df.shape[0]).plot(kind="bar", color=color ); America = ['United States', 'Canada', 'Brazil', 'Argentina'] Europe = ['Liechtenstein', 'Switzerland', 'Iceland', 'Norway', 'Israel', 'Denmark', 'Ireland', 'United Kingdom', 'Germany', 'Netherlands', 'Sweden', 'Luxembourg', 'Austria', 'Finland', 'France', 'Belgium', 'Spain', 'Italy', 'Poland', 'Czech Republic', 'Romania'] Asia = ['Iran', 'Russian Federation','Israel', 'Australia', 'New Zealand', 'Thailand', 'Singapore', 'Hong Kong', 'South Korea', 'Japan', 'China', 'Taiwan', 'Malaysia', 'India', 'Indonesia', 'Vietnam', 'Qatar', 'Oman', 'United Arab Emirates'] df['Location'] = df['Country'].apply(lambda x: 'America' if x in America else ('Europe' if x in Europe else ('Asia' if x in Asia else 'Other'))) df['Location'].value_counts().plot(kind='bar', color=color); ###Output _____no_output_____ ###Markdown I need to use the columns that I think are most suitable for this analysis.I need to just consider those who are professional developer and full-time employed, as well. ###Code best_columns = ['Country', 'YearsCodedJob', 'EmploymentStatus', 'CareerSatisfaction', 'JobSatisfaction', 'JobSeekingStatus', 'HoursPerWeek', 'Salary', 'Location', 'Overpaid'] df = pd.DataFrame(df.query("Professional == 'Professional developer' and EmploymentStatus == 'Employed full-time'"))[best_columns] df overpaid_map = { 'Greatly underpaid' : 1, 'Somewhat underpaid' : 2, 'Neither underpaid nor overpaid' : 3, 'Somewhat overpaid' : 4, 'Greatly overpaid' : 5, np.nan: np.nan } df['Overpaid'] = df['Overpaid'].apply(lambda x: np.nan if x == np.nan else overpaid_map[x] ) df_comp = df.groupby(['Location','YearsCodedJob']).mean() year_map = {'1 to 2 years' : 1, '10 to 11 years' : 10, '11 to 12 years' : 11, '12 to 13 years' : 12, '13 to 14 years' : 13, '14 to 15 years' : 14, '15 to 16 years' : 15, '16 to 17 years' : 16, '17 to 18 years' : 17, '18 to 19 years' : 18, '19 to 20 years' : 19, '2 to 3 years' : 2, '20 or more years' : 20, '3 to 4 years' : 3, '4 to 5 years' : 4, '5 to 6 years' : 5, '6 to 7 years' : 6, '7 to 8 years' : 7, '8 to 9 years' : 8, '9 to 10 years' : 9, 'Less than a year' : 0} df_comp = df_comp.reset_index() df_comp df_comp['YearsCodedJob'] = df_comp['YearsCodedJob'].apply(lambda x: np.nan if x == np.nan else year_map[x]) df_comp['YearsCodedJob'] = pd.to_numeric(df_comp['YearsCodedJob']) df_comp = df_comp.sort_values(by='YearsCodedJob') df_comp.set_index('YearsCodedJob', inplace=True) df_comp.groupby('Location')['Salary'].plot(legend=True, figsize=(10, 10)); plt.title("Range of Salary between different areas"); plt.xlabel('YearsCodedJob'); plt.ylabel('Average Salary'); df_comp.groupby('Location')['Overpaid'].plot(legend=True, figsize=(10, 10)) plt.title("Who thinks that they are Overpaid?"); plt.xlabel('YearsCodedJob') plt.ylabel('Overpaid'); df_comp.groupby('Location').mean().CareerSatisfaction df_comp.groupby('Location').mean().JobSatisfaction df_comp.groupby('Location').mean().Salary/12 plt.figure(figsize=(10, 8), dpi=80) plt.scatter(df_comp.groupby('Location').mean().CareerSatisfaction, df_comp.groupby('Location').mean().JobSatisfaction, df_comp.groupby('Location').mean().Salary/12, c=['red','green','blue','yellow']) plt.title('Comparison of Career and Job Satisfaction\n(Red: America; Green: Asia; Blue: Europe; yellow: Other)') plt.xlabel('Career Satisfaction') plt.ylabel('Job Satisfaction'); ###Output _____no_output_____
proyecto_6.ipynb
###Markdown Importaciones ###Code fig, ax = plt.subplots(figsize=(14,8)) sns.heatmap(basededatosimportaciones, cmap='jet') exportaciones=pd.read_excel("/content/export.xlsx") basededatosexportaciones=exportaciones.set_index("paises") basededatosexportaciones ###Output _____no_output_____ ###Markdown Exportaciones ###Code fig, ax = plt.subplots(figsize=(14,8)) sns.heatmap(basededatosexportaciones, cmap='cool') balanza=pd.read_excel("/content/balanza.xlsx") basededatosbalanza=balanza.set_index("paises") basededatosbalanza ###Output _____no_output_____ ###Markdown Balanza Comercial ###Code fig, ax = plt.subplots(figsize=(14,8)) sns.heatmap(basededatosbalanza, cmap='inferno_r') ###Output _____no_output_____
examples/example-project_movie-likes.ipynb
###Markdown 2017-2018 Girls Who Code final project example: movies on FacebookBy: Rucheng Diao, diaorchThis iPython notebook hosts the code for final project example for [Girls Who Code, DCMB UofM](http://umich.edu/~girlswc/) for school year 2017 - 2018. The [project data comes from Kaggle](https://www.kaggle.com/nazimamzz/imdb-dataset-of-5000-movie-posters/data). Input dataThis part is for import of Pandas and input data from a previously downloaded data set. ###Code import pandas as pd movieData = pd.read_csv('data/20170827-movie_stats-imdb_5000_movie_dataset_kaggle/movie stats - movie_metadata.csv') # preview of first several lines of data movieData.head() # checking what kinds of data there is in the table print(movieData.columns) # what are the range of title years of the movies movieData['title_year'].describe() ###Output _____no_output_____ ###Markdown Facebook likes vs title yearThis part is plotting the number of Facebook likes vs the title years of the movies, colored by the number of voted IMDB users. ###Code import matplotlib.pyplot as plt # iPython notebook setting: show figures inline %matplotlib inline # matplotlib setting: figure size plt.rcParams['figure.figsize'] = (16, 6) # values on x axis x = movieData.title_year # values on y axis y = movieData.movie_facebook_likes # values for color scale c = movieData.num_voted_users # plotting the scatter plot, using the x-s, y-s, and color values as set above fig = plt.scatter(x, y, alpha = 0.5, c = c, cmap = "autumn_r") # setting label for x-axis, including text and font size plt.xlabel("Title year", fontsize = 15) # setting label for y-axis, including text and font size plt.ylabel("Number of movie Facebook likes", fontsize = 15) # setting title for the whole figure, including text and font size plt.title("Relationship between title year and Facebook likes of movie", fontsize = 20) # setting title for color bar plt.colorbar(fig).set_label('Voted users', rotation = 270) x = movieData.title_year y = movieData.movie_facebook_likes c = movieData.num_voted_users fig = plt.scatter(x, y, alpha = 0.5, c = c, cmap = "autumn_r") # adding a vertical line for year 2004 plt.axvline(x = 2004, color = 'k', linestyle = '--') plt.xlabel("Title year", fontsize = 15) plt.ylabel("Number of movie Facebook likes", fontsize = 15) plt.title("Relationship between title year and Facebook likes of movie", fontsize = 20) plt.colorbar(fig).set_label('Voted users', rotation = 270) ###Output _____no_output_____ ###Markdown What are the best liked movies pre- and post-Facebook?We noticed that there are two dots in the figure above that has a very high Facebook likes, one of which is before Face book came around, and the other one after. We might want to find out what the movies are. ###Code # How to find the dot that has over 250000 likes and is post Facebook? # Why do we need only one of the criteria? # finding movie that has over 250000 likes findDot_post = movieData[(movieData.movie_facebook_likes > 250000)] print(findDot_post.to_string()) # How to find the dot that has over 100000 likes and is pre Facebook? # Why do we need both criteria this time? # finding movie that has over 100000 likes AND the title year is before 2000 findDot_pre = movieData[(movieData.movie_facebook_likes > 100000) & (movieData.title_year < 2000)] print(findDot_pre.to_string()) ###Output movie_title color director_name num_critic_for_reviews duration director_facebook_likes actor_3_facebook_likes actor_2_name actor_1_facebook_likes gross genres actor_1_name num_voted_users cast_total_facebook_likes actor_3_name facenumber_in_poster plot_keywords movie_imdb_link num_user_for_reviews language country content_rating budget title_year actor_2_facebook_likes imdb_score aspect_ratio movie_facebook_likes 1937 The Shawshank Redemption Color Frank Darabont 199.0 142.0 0.0 461.0 Jeffrey DeMunn 11000.0 28341469.0 Crime|Drama Morgan Freeman 1689764 13495 Bob Gunton 0.0 escape from prison|first person narration|pris... http://www.imdb.com/title/tt0111161/?ref_=fn_t... 4144.0 English USA R 25000000.0 1994.0 745.0 9.3 1.85 108000 ###Markdown We are also curious which is the earlies movie that has a non-zero Facebook like: ###Code findDot_first = movieData[(movieData.movie_facebook_likes > 0) & (movieData.title_year <=1925)] print(findDot_first.to_string()) ###Output movie_title color director_name num_critic_for_reviews duration director_facebook_likes actor_3_facebook_likes actor_2_name actor_1_facebook_likes gross genres actor_1_name num_voted_users cast_total_facebook_likes actor_3_name facenumber_in_poster plot_keywords movie_imdb_link num_user_for_reviews language country content_rating budget title_year actor_2_facebook_likes imdb_score aspect_ratio movie_facebook_likes 4810 Intolerance: Love's Struggle Throughout the Ages Black and White D.W. Griffith 69.0 123.0 204.0 9.0 Mae Marsh 436.0 NaN Drama|History|War Lillian Gish 10718 481 Walter Long 1.0 huguenot|intolerance|medicis|protestant|wedding http://www.imdb.com/title/tt0006864/?ref_=fn_t... 88.0 NaN USA Not Rated 385907.0 1916.0 22.0 8.0 1.33 691 4885 The Big Parade Black and White King Vidor 48.0 151.0 54.0 6.0 Renée Adorée 81.0 NaN Drama|Romance|War John Gilbert 4849 108 Claire Adams 0.0 chewing gum|climbing a tree|france|translation... http://www.imdb.com/title/tt0015624/?ref_=fn_t... 45.0 NaN USA Not Rated 245000.0 1925.0 12.0 8.3 1.33 226 ###Markdown What does the distribution of Facebook likes of movies look like?The histogram of movie Facebook likes is plotted below. And the summary statistics are printed too. It is also found that a lot of movies have 0 Facebook likes, according to the shape of movie data table and the movie data table of non-zero Facebook likes. ###Code # plotting histogram of Facebook likes of movies plt.hist(movieData.movie_facebook_likes, bins = 200, color = 'r') plt.xlabel("Movie Facebook likes", fontsize = 15) plt.xlabel("Number of movies", fontsize = 15) plt.title("Distribution of movie Facebook likes", fontsize = 20) # checking the summary statistics of Facebook likes for movies movieData['movie_facebook_likes'].describe() movieData.shape # another way to check the summary statistics individually is to use funcions in Numpy import numpy as np # checking the median value of movie Facebook likes movieFacebookLikesMedian = np.median(movieData['movie_facebook_likes']) print(movieFacebookLikesMedian) # subsetting data set, keeping only entries that have non-zero Facebook likes movieDataNonzero = movieData.loc[movieData.movie_facebook_likes != 0] # checking the dimension/shape of the subset data movieDataNonzero.shape ###Output _____no_output_____ ###Markdown Is there a relationship between movie Facebook likes and the gross income the movie make?Here the relationship between movie Facebook likes and the gross that the movie made is explored. ###Code x = movieData.movie_facebook_likes y = movieData.gross c = movieData.num_voted_users fig = plt.scatter(x, y, alpha = 0.5, c = c, cmap = "autumn_r") plt.xlabel("Movie Facebook likes", fontsize = 15) plt.ylabel("Movie gross", fontsize = 15) plt.title("Relationship between movie Facebook likes and gross", fontsize = 20) plt.colorbar(fig) ###Output _____no_output_____ ###Markdown **But**, is it reasonable to assume that 7 million dollars made in 1990 is the same as 7 million dollars made in 2010? Advance content: ###Code # new data input: CPI Urban from 1913 to 2016 cpi = {1913:9.9, 1914:10, 1915:10.1, 1916:10.9, 1917:12.8, 1918:15.1, 1919:17.3, 1920:20, 1921:17.9, 1922:16.8, 1923:17.1, 1924:17.1, 1925:17.5, 1926:17.7, 1927:17.4, 1928:17.1, 1929:17.1, 1930:16.7, 1931:15.2, 1932:13.7, 1933:13, 1934:13.4, 1935:13.7, 1936:13.9, 1937:14.4, 1938:14.1, 1939:13.9, 1940:14, 1941:14.7, 1942:16.3, 1943:17.3, 1944:17.6, 1945:18, 1946:19.5, 1947:22.3, 1948:24.1, 1949:23.8, 1950:24.1, 1951:26, 1952:26.5, 1953:26.7, 1954:26.9, 1955:26.8, 1956:27.2, 1957:28.1, 1958:28.9, 1959:29.1, 1960:29.6, 1961:29.9, 1962:30.2, 1963:30.6, 1964:31, 1965:31.5, 1966:32.4, 1967:33.4, 1968:34.8, 1969:36.7, 1970:38.8, 1971:40.5, 1972:41.8, 1973:44.4, 1974:49.3, 1975:53.8, 1976:56.9, 1977:60.6, 1978:65.2, 1979:72.6, 1980:82.4, 1981:90.9, 1982:96.5, 1983:99.6, 1984:103.9, 1985:107.6, 1986:109.6, 1987:113.6, 1988:118.3, 1989:124, 1990:130.7, 1991:136.2, 1992:140.3, 1993:144.5, 1994:148.2, 1995:152.4, 1996:156.9, 1997:160.5, 1998:163, 1999:166.6, 2000:172.2, 2001:177.1, 2002:179.9, 2003:184, 2004:188.9, 2005:195.3, 2006:201.6, 2007:207.3, 2008:215.303, 2009:214.537, 2010:218.056, 2011:224.939, 2012:229.594, 2013:232.957, 2014:236.736, 2015:237.017, 2016:240.007} # normalizing the gross income using CPI data import math movie_gross_normalized = [] for i in range(0, len(movieData)): one_movie_year = movieData['title_year'].iloc[i] # there are NaN-s in title year information if (math.isnan(one_movie_year)): one_movie_gross_normalized = np.NaN else: # using the dictionary of data to get the CPI data of the title year movie_cpi = cpi[one_movie_year] # normalizing the gross income of movie using the CPI of the year one_movie_gross_normalized = movieData['gross'].iloc[i] / movie_cpi # adding the calculated normalization result to the recording list movie_gross_normalized.append(one_movie_gross_normalized) x = movieData.movie_facebook_likes y = movie_gross_normalized c = movieData.num_voted_users fig = plt.scatter(x, y, alpha = 0.5, c = c, cmap = "autumn_r") plt.xlabel("Movie Facebook likes", fontsize = 15) plt.ylabel("Movie gross", fontsize = 15) plt.title("Relationship between movie Facebook likes and gross", fontsize = 20) plt.colorbar(fig) ###Output _____no_output_____ ###Markdown Is a one-face poster the key to success for a movie?Hypothesis: movies with posters with only one face on it are more successful, speaking of gross income. Explore the data to find out if it is true. ###Code # importing a plotting package called seaborn import seaborn as sns # reseting the graph size plt.rcParams['figure.figsize'] = (16, 6) # using seaborn to plot a box plot sns.boxplot(data = movieData, x = 'facenumber_in_poster', y = 'movie_facebook_likes', palette = 'Set3') # setting label for x-axis plt.xlabel("Number of faces in poster", fontsize = 15) # setting label for y-axis plt.ylabel("Movie Facebook likes, non-zero", fontsize = 15) # setting title for the whole figure plt.title("Relationship between number of faces in poster and movie Facebook likes", fontsize = 20) ###Output _____no_output_____ ###Markdown We can't really see anything because the boxes are "squished" to the bottom. Why so? That is possibly because almost half of the movies have 0 likes, from what we have discovered above. So let's try again removing movies with 0 likes. ###Code # using seaborn to plot a box plot sns.boxplot(data = movieDataNonzero, x = 'facenumber_in_poster', y = 'movie_facebook_likes', palette = 'Set3') # setting label for x-axis plt.xlabel("Number of faces in poster", fontsize = 15) # setting label for y-axis plt.ylabel("Movie Facebook likes, non-zero", fontsize = 15) # setting title for the whole figure plt.title("Relationship between number of faces in poster and movie Facebook likes", fontsize = 20) # counting the appearance of each possible value facenumber_freq = movieDataNonzero.facenumber_in_poster.value_counts() print(facenumber_freq) # finding the index - possible values - from the frequency table above facenumber_index = facenumber_freq.index # plotting a bar plot using seaborn sns.barplot(x = facenumber_index, y = facenumber_freq, palette = 'Set3') plt.xlabel("Number of faces in movie poster", fontsize = 15) plt.ylabel("Frequency", fontsize = 15) plt.title("Distribution of number of faces in movie poster", fontsize = 20) ###Output _____no_output_____
project_wrangle_act.ipynb
###Markdown Project: Twitter Archive of WeRateDogs Chloe Xue &nbsp; &nbsp; June, 2019 Table of ContentsIntroductionData Wrangling Part I. Gathering Data Part II. Accessing Data Data Quality and Tidiness Part III. Cleaning Data Part IV. Storing Data Exploratory Data Analysis Part V. Analyzing and Visualizations Conclusions Introduction > In this project, the dataset I analyze is the tweet archive of Twitter user [@dog_rates](https://twitter.com/dog_rates), also known as [WeRateDogs](https://en.wikipedia.org/wiki/WeRateDogs). This twitter account rates people's dogs with ratings on denominator of 10 along with comments. This archive contains basic tweet data (tweet ID, timestamp, text, etc.) for all 5000+ of tweets from August 1, 2017. > The objective of this project is wrangling WeRateDog Twitter data to draw interesting and trustworthy analyses and visualizations . The main focus on my project is performing data wrangling from a variety of sources and in variety of formats, in the process of gathering, accessing and cleaning using Python and its libiraries. > Introducing three datasets: - Twittwer Archive. Originally the enhanced Twitter archive contains 5000+ tweets with basic tweet data. It is being filtered for tweets with ratings only along with information like dog name, stage, etc. - Image Prediction. This dataset is created by running every image in the WeRateDogs Twitter Archive through a neural network that can classify top three predictions(corresponding to the most confident predictions) on breeds of dogs alongside each tweet ID, image URL and the image number.- Retweets and Favorites.Querying from Twitter's API, this dataset generates retweet count and favorite count of each tweet ID in WeRateDogs Twitter Archive. ###Code import pandas as pd import numpy as np import requests import tweepy import json import re import matplotlib.pyplot as plt import seaborn as sb import statsmodels.api as sm from scipy.stats.stats import pearsonr %matplotlib inline ###Output /opt/conda/lib/python3.6/site-packages/statsmodels/compat/pandas.py:56: FutureWarning: The pandas.core.datetools module is deprecated and will be removed in a future version. Please use the pandas.tseries module instead. from pandas.core import datetools ###Markdown Data Wrangling Part I. Gathering Data 1.a archive: WeRateDogs Twitter Archive dataset, which is given on hand. Use 'read_csv' to read data. 1.b image: Image Predictions dataset, which is hosted on Udacity's servers. Download programmatically using the requests library.1.c tweets_data: Retweet count and favorite count dataset, which is generated by querying Twitter's API. ###Code # 1.a Read WeRateDogs archive data. archive = pd.read_csv('twitter-archive-enhanced.csv') # 1.b Use request library to read image prediction data. response = requests.get('https://d17h27t6h515a5.cloudfront.net/topher/2017/August/599fd2ad_image-predictions/image-predictions.tsv') with open('image_predictions.tsv', mode = 'wb') as file: file.write(response.content) # Read tsv data image = pd.read_csv('image_predictions.tsv', delimiter = '\t') #1.c Use Tweepy to query API consumer_key = '' consumer_secret = '' access_token = '' access_secret = '' auth = tweepy.OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_token, access_secret) api = tweepy.API(auth, wait_on_rate_limit = True,wait_on_rate_limit_notify = True) Write JSON data into tweet_json.txt with open('tweet_json.txt', 'a', encoding = 'utf8') as f: for tweet_id in archive['tweet_id']: try: tweet = api.get_status(tweet_id, tweet_mode = 'extended') json.dump(tweet._json, f) f.write('\n') except: continue # Create a empty list for append each tweet info into it tweets_data = [] tweet_json = open('tweet_json.txt', 'r') for line in tweet_json: try: tweet = json.loads(line) tweets_data.append(tweet) except: continue tweet_json.close() print(tweets_data[0]) tweets = pd.DataFrame() tweets['id'] = list(map(lambda tweet: tweet['id'],tweets_data)) tweets['retweet_count'] = list(map(lambda tweet: tweet['retweet_count'],tweets_data)) tweets['favorite_count'] = list(map(lambda tweet: tweet['favorite_count'],tweets_data)) tweets.head() ###Output _____no_output_____ ###Markdown Part II. Accessing Data Properties of data include Quality and Tideness.Quality (issues with content) Dimensions:- Completeness- Validity- Accuracy- ConsistencyTidiness (issues with structure) Dimensions:- Each variable forms a column- Each observation forms a row- Each type of observational unit forms a table ###Code # check information, head and tail of archive. archive.info() archive.head(10) archive.tail(10) # check ratings on denominator and numerator. archive.rating_denominator.value_counts() archive.rating_numerator.value_counts() # Retweet in archive is not considered, so check numbers of retweets. archive.retweeted_status_id.isna().value_counts() archive.in_reply_to_status_id.isna().value_counts() # Check if there are missing values in expanded_urls, which means tweets do not included an image. archive.expanded_urls.isnull().value_counts() # check information, head of image. image.info() image.head(10) # Check if all tweet_id has images. image.img_num.isnull().value_counts() # Check information and head of tweets. tweets.info() tweets.head() # Check if there are duplicates in three datasets. print(archive.duplicated().value_counts()) print(image.duplicated().value_counts()) print(tweets.duplicated().value_counts()) # Check the length of three datasets to see if the tweet counts match. print('archive counts = {}'.format(len(archive))) print('image counts = {}'.format(len(image))) print('tweets data count = {}'.format(len(tweets_data))) ###Output archive counts = 2356 image counts = 2075 tweets data count = 2335 ###Markdown Quality:- archive: Including 181 retweets and 78 replies that are unnessary. (1)- archive: 'expanded_url' has missing values. That means it doesn't include an image. The ratings will not be considered without images. (2)- archive: 'rating_denominator' column has value not equal to 10. (3)- archive: 'rating_numerator' column has unexpected value. (4)- archive: Data type for timestamp is not correct. (5)- archive: 'source' column is not clean. (6)- image: Dog breed names have delimiter that need to be cleaned. Name should be caplitalized. (8)- tweets: Data type for retweet_count and favorite_count should be integer. (9)- Counts of tweets are inconsistent in three dataset. (10) Tidiness:- archive: 'doggo','floofer','pupper','puppo' columns refer to four categories of dog stage, that should be under one column: 'dog_stage'. (7)- image: Aggregate p1, p2 and p3 to classify each image into dog_type(Dog, Might Dog and Not Dog), dog_breed(with best confidence). (8)- tweets: This can be merged to archive data. Three dataset should be merged into one final dataset. (10) > Note: The following Cleaning process is following a logical order instead of the order of issue statement above. Following cleaning includes solving multiple issues at one step, which makes it easier to follow. The final dataset will reflect all issues being resolved. Part III. Cleaning Data ###Code # Make copies of original dataset. archive_clean = archive.copy() image_clean = image.copy() tweets_clean = tweets.copy() ###Output _____no_output_____ ###Markdown (1) Define Archive: Retweets are not considered. Delete the 151 retweet and 78 replies tweets in archive. Code ###Code archive_clean = archive_clean[archive_clean.retweeted_status_id.isna()] archive_clean = archive_clean[archive_clean.in_reply_to_status_id.isna()] # Drop in_reply_to_status_id and in_reply_to_user_id columns. # Drop retweet status columns. archive_clean.drop(columns = ['in_reply_to_status_id', 'in_reply_to_user_id','retweeted_status_id', 'retweeted_status_user_id','retweeted_status_timestamp'], inplace = True) ###Output _____no_output_____ ###Markdown Test ###Code archive_clean.info() ###Output <class 'pandas.core.frame.DataFrame'> Int64Index: 2097 entries, 0 to 2355 Data columns (total 12 columns): tweet_id 2097 non-null int64 timestamp 2097 non-null object source 2097 non-null object text 2097 non-null object expanded_urls 2094 non-null object rating_numerator 2097 non-null int64 rating_denominator 2097 non-null int64 name 2097 non-null object doggo 2097 non-null object floofer 2097 non-null object pupper 2097 non-null object puppo 2097 non-null object dtypes: int64(3), object(9) memory usage: 213.0+ KB ###Markdown (2) Define Archive: Missing values in 'expanded_urls' mean those tweets do not include images, that are not considered. Delete missing values in this column. Code ###Code archive_clean = archive_clean[archive_clean.expanded_urls.notnull()] ###Output _____no_output_____ ###Markdown Test ###Code print(archive_clean.expanded_urls.isnull().value_counts()) # Drop expanded_urls column. archive_clean.drop(columns = ['expanded_urls'], inplace = True) archive_clean.info() ###Output <class 'pandas.core.frame.DataFrame'> Int64Index: 2094 entries, 0 to 2355 Data columns (total 11 columns): tweet_id 2094 non-null int64 timestamp 2094 non-null object source 2094 non-null object text 2094 non-null object rating_numerator 2094 non-null int64 rating_denominator 2094 non-null int64 name 2094 non-null object doggo 2094 non-null object floofer 2094 non-null object pupper 2094 non-null object puppo 2094 non-null object dtypes: int64(3), object(8) memory usage: 196.3+ KB ###Markdown (3) Define Archive: Ratings always have a denominator of 10. Clean rating_denominator column with value not equal to 10. Code ###Code archive_clean.rating_denominator.value_counts() # Remove the urls in text: archive_clean['text'] = archive_clean['text'].str.split('http').str[0] # Create a sub-dataset with denominator not equal to 10. df1 = archive_clean[archive_clean.rating_denominator != 10] # Shrink df1 with coloumns only containing comments and ratings. df1 = df1[['tweet_id','text','rating_numerator','rating_denominator']] pd.options.display.max_rows pd.set_option('display.max_colwidth', -1) df1 # Reading directly from the text, we first change some obivious errors on rating. archive_clean.loc[archive_clean.tweet_id == 740373189193256964, ['rating_numerator','rating_denominator']] = [14,10] archive_clean.loc[archive_clean.tweet_id == 722974582966214656, ['rating_numerator','rating_denominator']] = [13,10] archive_clean.loc[archive_clean.tweet_id == 716439118184652801, ['rating_numerator','rating_denominator']] = [11,10] archive_clean.loc[archive_clean.tweet_id == 682962037429899265, ['rating_numerator','rating_denominator']] = [10,10] archive_clean.loc[archive_clean.tweet_id == 666287406224695296, ['rating_numerator','rating_denominator']] = [9,10] # Work on the rest abnormal ratings. df1 = archive_clean[archive_clean.rating_denominator != 10] df1 = df1[['tweet_id','text','rating_numerator','rating_denominator']] pd.options.display.max_rows pd.set_option('display.max_colwidth', -1) df1 # Calculate ratio for these ratings and reflect ratios with denominator is 10 for each tweet. df1['rating_score'] = df1['rating_numerator'] / df1['rating_denominator'] df1['new_rating_numerator'] = df1['rating_score'] * 10 df1 # Change the ratings with rounding integers. archive_clean.loc[archive_clean.tweet_id == 820690176645140481, ['rating_numerator','rating_denominator']] = [12,10] # There is no actural rating for tweet 810984652412424192, so we change it to 10/10 archive_clean.loc[archive_clean.tweet_id == 810984652412424192, ['rating_numerator','rating_denominator']] = [10,10] archive_clean.loc[archive_clean.tweet_id == 758467244762497024, ['rating_numerator','rating_denominator']] = [11,10] archive_clean.loc[archive_clean.tweet_id == 731156023742988288, ['rating_numerator','rating_denominator']] = [12,10] archive_clean.loc[archive_clean.tweet_id == 713900603437621249, ['rating_numerator','rating_denominator']] = [11,10] archive_clean.loc[archive_clean.tweet_id == 710658690886586372, ['rating_numerator','rating_denominator']] = [10,10] archive_clean.loc[archive_clean.tweet_id == 709198395643068416, ['rating_numerator','rating_denominator']] = [9,10] archive_clean.loc[archive_clean.tweet_id == 704054845121142784, ['rating_numerator','rating_denominator']] = [12,10] archive_clean.loc[archive_clean.tweet_id == 697463031882764288, ['rating_numerator','rating_denominator']] = [11,10] archive_clean.loc[archive_clean.tweet_id == 684222868335505415, ['rating_numerator','rating_denominator']] = [11,10] archive_clean.loc[archive_clean.tweet_id == 677716515794329600, ['rating_numerator','rating_denominator']] = [12,10] archive_clean.loc[archive_clean.tweet_id == 675853064436391936, ['rating_numerator','rating_denominator']] = [11,10] ###Output _____no_output_____ ###Markdown Test ###Code archive_clean.rating_denominator.value_counts() ###Output _____no_output_____ ###Markdown (4) Define Archive: Rating_numerator has unexpected value. Clean Rating_numerator with numbers too big or lower than 10. Code ###Code archive_clean.rating_numerator.value_counts() # Check the unexpected numbers that are greater than 14. df2 = archive_clean[archive_clean.rating_numerator > 14] df2 = df2[['tweet_id','text','rating_numerator','rating_denominator']] pd.options.display.max_rows pd.set_option('display.max_colwidth', -1) df2 # We will round 9.75/10, 11.27/10 and 11.26/10 to closest integer. archive_clean.loc[archive_clean.tweet_id == 786709082849828864, 'rating_numerator'] = 10 archive_clean.loc[archive_clean.tweet_id == 778027034220126208, 'rating_numerator'] = 11 archive_clean.loc[archive_clean.tweet_id == 680494726643068929, 'rating_numerator'] = 11 # For tweets with rating 1776/10 and 420/10, we can't get accurate information so we want to leave it as 10/10 archive_clean.loc[archive_clean.tweet_id == 749981277374128128, 'rating_numerator'] = 10 archive_clean.loc[archive_clean.tweet_id == 670842764863651840, 'rating_numerator'] = 10 # Check the unexpected numbers that are less than 10. df3 = archive_clean[archive_clean.rating_numerator < 10] df3 = df3[['tweet_id','text','rating_numerator','rating_denominator']] pd.options.display.max_rows pd.set_option('display.max_colwidth', -1) df3 # Change the first rating with obivuos error. archive_clean.loc[archive_clean.tweet_id == 883482846933004288, ['rating_numerator','rating_denominator']] = [14,10] ###Output _____no_output_____ ###Markdown Though the numerator should always greater than 10. After reading the comments, I figure out that the lower ratings are real, except some of the ratings are low because pictures are not dogs. So I decide to keep the low ratings but I would like to dig deeper into numerators that lower than 5. ###Code df4 = archive_clean[archive_clean.rating_numerator < 5] df4 = df4[['tweet_id','text','rating_numerator','rating_denominator']] pd.options.display.max_rows pd.set_option('display.max_colwidth', -1) df4 # Change the first rating with obivuos error. archive_clean.loc[archive_clean.tweet_id == 695064344191721472, ['rating_numerator','rating_denominator']] = [13,10] ###Output _____no_output_____ ###Markdown Test ###Code archive_clean.rating_numerator.value_counts() ###Output _____no_output_____ ###Markdown (5) Define Archive: Data type for 'timestamp' is not correct. It should be datetime. Code ###Code archive_clean['timestamp'] = archive_clean['timestamp'].str.split('+').str[0] archive_clean.timestamp = pd.to_datetime(archive_clean.timestamp) ###Output _____no_output_____ ###Markdown Test ###Code archive_clean.info() archive_clean.timestamp.head(2) ###Output _____no_output_____ ###Markdown (6) Define Archive: Source column is messy and needs to be clean. Extract only the source in the strings. Code ###Code archive_clean.source.value_counts() archive_clean['source'] = archive_clean['source'].apply(lambda x: re.search('rel="nofollow">(.*)</a>', x).group(1)) # Change the data type of source to categorical. archive_clean['source'] = pd.Categorical(archive_clean['source']) ###Output _____no_output_____ ###Markdown Test ###Code print(archive_clean.source.value_counts()) archive_clean.info() archive_clean.head(2) ###Output Twitter for iPhone 1962 Vine - Make a Scene 91 Twitter Web Client 30 TweetDeck 11 Name: source, dtype: int64 <class 'pandas.core.frame.DataFrame'> Int64Index: 2094 entries, 0 to 2355 Data columns (total 11 columns): tweet_id 2094 non-null int64 timestamp 2094 non-null datetime64[ns] source 2094 non-null category text 2094 non-null object rating_numerator 2094 non-null int64 rating_denominator 2094 non-null int64 name 2094 non-null object doggo 2094 non-null object floofer 2094 non-null object pupper 2094 non-null object puppo 2094 non-null object dtypes: category(1), datetime64[ns](1), int64(3), object(6) memory usage: 182.2+ KB ###Markdown (7) Define Archive: 'doggo','floofer','pupper','puppo' columns refer to four categories of dog stage, that should be under one column: 'dog_stage'. Code ###Code archive_clean.columns # Define how many tweets with no reference of any four of dog stages. archive_clean[(archive_clean.doggo != 'None') + (archive_clean.floofer!='None') + (archive_clean.pupper!='None') + (archive_clean.puppo!='None') == 0].shape # Define how many tweets with more than one reference on dog stages. archive_clean[(archive_clean.doggo != 'None') + (archive_clean.floofer!='None') + (archive_clean.pupper!='None') + (archive_clean.puppo!='None') > 1].shape # Create a sub dataset with no reference on dog stages (all four columns are 'none'), creating a new column 'dog_stage'. # Assign values to 'None'. all_none = archive_clean[(archive_clean.doggo != 'None') + (archive_clean.floofer!='None') + (archive_clean.pupper!='None') + (archive_clean.puppo!='None') == 0] all_none = all_none.assign(dog_stage = 'None') all_none = all_none.drop(['doggo', 'floofer','pupper', 'puppo'],axis = 1) # Use melt function to unpivot four columns under a new column 'dog_stage' with values are not 'None'. archive_clean = pd.melt(archive_clean, id_vars = ['tweet_id', 'timestamp', 'source', 'text','rating_numerator', 'rating_denominator', 'name'], value_vars = ['doggo', 'floofer','pupper', 'puppo'],var_name = 'dog_stage', value_name = 'Bool') archive_clean = archive_clean[archive_clean.Bool != 'None'] del archive_clean['Bool'] # Append all_none sub dataset to archive_clean. archive_clean = archive_clean.append(all_none) archive_clean = archive_clean.reset_index(drop = True) # Check if there are duplicates. archive_clean[archive_clean.tweet_id.duplicated()] # Clean the duplicates. Check the length to see if it matches with before. archive_clean = archive_clean.drop_duplicates('tweet_id') archive_clean.shape ###Output _____no_output_____ ###Markdown Test ###Code archive_clean['dog_stage'] = pd.Categorical(archive_clean['dog_stage']) archive_clean.info() ###Output <class 'pandas.core.frame.DataFrame'> Int64Index: 2094 entries, 0 to 2104 Data columns (total 8 columns): tweet_id 2094 non-null int64 timestamp 2094 non-null datetime64[ns] source 2094 non-null category text 2094 non-null object rating_numerator 2094 non-null int64 rating_denominator 2094 non-null int64 name 2094 non-null object dog_stage 2094 non-null category dtypes: category(2), datetime64[ns](1), int64(3), object(2) memory usage: 119.0+ KB ###Markdown (8) Define image: Aggregate p1, p2 and p3 to classify each image into dog_type(Dog, Might Dog and Not Dog), dog_breed(with best confidence). Code ###Code image_clean = image_clean.drop(columns = ['jpg_url','img_num']) image_clean.shape image_clean.sample(5) ###Output _____no_output_____ ###Markdown My approach for classifying predictions:1. If all top three predictions are 'true' for dog, dog_type is Dog. The breed is p1 prediction with best confidence.2. If all top three predictions are 'false' for dog, dog_type is Not Dog. The breed is None with p1 confidence.3. There are mixed 'true' and 'false' predictions. Compare the sum of true confidence and false confidence, identify a threshold that if true confidence is higher than that value, dog_type is Dog. Otherwise, dog_type is Might Dog. ###Code # Sum all true p_conf for each tweet. image_clean['true_conf'] = (image_clean['p1_conf'].where(image_clean['p1_dog'] == True, 0) + image_clean['p2_conf'].where(image_clean['p2_dog'] == True, 0) + image_clean['p3_conf'].where(image_clean['p3_dog'] == True, 0)) # Sum all false p_conf seperately for each tweet. image_clean['false_conf'] = (image_clean['p1_conf'].where(image_clean['p1_dog'] == False, 0) + image_clean['p2_conf'].where(image_clean['p2_dog'] == False, 0) + image_clean['p3_conf'].where(image_clean['p3_dog'] == False, 0)) image_clean.sample(5) image_clean.true_conf.describe() image_clean.false_conf.describe() # Create a sub dataset for all true for dog. all_true = image_clean[image_clean.false_conf == 0] # Create three new columns: dog_type, dog_breed and confidence. Assign values. all_true = all_true.assign(dog_type = 'Dog') all_true = all_true.assign(dog_breed = all_true.p1) all_true = all_true.assign(confidence = all_true.p1_conf) # Create a sub dataset for all false for dog. all_false = image_clean[image_clean.true_conf == 0] # Create three new columns: dog_type, dog_breed and confidence. Assign values. all_false = all_false.assign(dog_type = 'Not Dog') all_false = all_false.assign(dog_breed = 'Unknown') all_false = all_false.assign(confidence = all_false.p1_conf) # Create a sub dataset for mixed true and false. mix = image_clean.loc[(image_clean['true_conf'] != 0) & (image_clean['false_conf'] != 0)] # Confirm the length of all three sub datasets matches with the original image dataset. print(len(all_true) + len(all_false) + len(mix)) print(len(image_clean)) # Check the statistics for true confidence. mix.true_conf.describe() # Check the statistics for false confidence. mix.false_conf.describe() # If the true confidence is greater than false-conf Q3, check the range of false-conf, make sure they all fall below the threshold. mix[mix.true_conf > 0.464286].false_conf.describe() # Create a sub dataset mix_true with true-conf is greater than the threshold. Assign values. mix_true = mix[mix.true_conf > 0.464286] mix_true = mix_true.assign(dog_type = 'Dog') mix_true = mix_true.assign(dog_breed = mix_true.p1) mix_true = mix_true.assign(confidence = mix_true.p1_conf) # Create a sub dataset mix_might with true-conf is lower than the threshold. Assign values. mix_might = mix[mix.true_conf <= 0.464286] mix_might = mix_might.assign(dog_type = 'Might Dog') mix_might = mix_might.assign(dog_breed = 'Unknown') mix_might = mix_might.assign(confidence = mix_might.false_conf) # Append all sub datasets. image_clean = all_true.append([mix_true,mix_might,all_false]) # Change data type for dog_type and dog_breed to categorical. image_clean['dog_type'] = pd.Categorical(image_clean['dog_type']) image_clean['dog_breed'] = pd.Categorical(image_clean['dog_breed']) # Make sure the length is not changed. image_clean.shape # Round confidence to four digits decimals. image_clean = image_clean.round({ 'confidence': 4}) # Clean the delimiter of dog breed name. image_clean['bre'] = image_clean['dog_breed'].astype(str).str.split('_') image_clean['dog_breed'] = image_clean['bre'].apply(' '.join) image_clean['bre'] = image_clean['dog_breed'].astype(str).str.split('-') image_clean['dog_breed'] = image_clean['bre'].apply(' '.join) # Caplitalize each word of dog breed name. image_clean['dog_breed'] = image_clean['dog_breed'].str.title() # Drop the p1, p2 and p3 columns. image_clean = image_clean.drop(columns = ['p1', 'p1_conf', 'p1_dog', 'p2', 'p2_conf','p2_dog', 'p3','p3_conf', 'p3_dog', 'true_conf', 'false_conf', 'bre']) ###Output _____no_output_____ ###Markdown Test ###Code image_clean.info() image_clean.sample(5) ###Output _____no_output_____ ###Markdown (9) Define tweets: Data type for retweet_count and favorite_count should be integer. Code ###Code tweets = tweets.astype({"retweet_count": int, "favorite_count": int}) ###Output _____no_output_____ ###Markdown Test ###Code tweets.info() ###Output <class 'pandas.core.frame.DataFrame'> RangeIndex: 2335 entries, 0 to 2334 Data columns (total 3 columns): id 2335 non-null int64 retweet_count 2335 non-null int64 favorite_count 2335 non-null int64 dtypes: int64(3) memory usage: 54.8 KB ###Markdown (10) Define Combine three dataframes together to get the final table. Code ###Code archive_clean.info() image_clean.info() tweets.info() tweets = tweets.rename(columns = {'id':'tweet_id'}) tweets.info() # Check the length of three datasets. print(archive_clean.shape) print(image_clean.shape) print(tweets.shape) ###Output (2094, 8) (2075, 4) (2335, 3) ###Markdown I want to keep all the valid ratings and image predictions. Therefore the final dataset should have a length of 2094 rows. ###Code # Merge archive and image dataset first. s1 = pd.merge(archive_clean,image_clean, on = ['tweet_id','tweet_id'], how = 'left') # Merge tweets dataset ratedogs = pd.merge(s1, tweets, on = 'tweet_id', how = 'left') ratedogs.shape ###Output _____no_output_____ ###Markdown Test ###Code ratedogs.info() ratedogs.sample(5) ###Output <class 'pandas.core.frame.DataFrame'> Int64Index: 2094 entries, 0 to 2093 Data columns (total 13 columns): tweet_id 2094 non-null int64 timestamp 2094 non-null datetime64[ns] source 2094 non-null category text 2094 non-null object rating_numerator 2094 non-null int64 rating_denominator 2094 non-null int64 name 2094 non-null object dog_stage 2094 non-null category dog_type 1971 non-null category dog_breed 1971 non-null object confidence 1971 non-null float64 retweet_count 2089 non-null float64 favorite_count 2089 non-null float64 dtypes: category(3), datetime64[ns](1), float64(3), int64(3), object(3) memory usage: 186.6+ KB ###Markdown (11) Define Final clean-up: re-arrange the columns in logical order and fix data type for 'retweet_count' and 'favorite_count'. Code ###Code ratedogs = ratedogs[['tweet_id','timestamp','text','rating_numerator','rating_denominator', 'name','dog_type','dog_breed','confidence','dog_stage','retweet_count', 'favorite_count','source']] ratedogs.retweet_count = ratedogs.retweet_count.fillna(0).astype(int) ratedogs.favorite_count = ratedogs.favorite_count.fillna(0).astype(int) ratedogs.info() ratedogs.sample(10) ###Output _____no_output_____ ###Markdown Part IV. Stroing Data ###Code # Store dataframe into csv file and make a copy. ratedogs.to_csv('twitter_archive_master.csv', encoding = 'utf-8', index = False) ratedogs_clean = pd.read_csv('twitter_archive_master.csv') ratedogs_clean.head() ###Output _____no_output_____ ###Markdown Explanatory Data Analysis Part V. Analyzing and Visulization ###Code def rating_distribution(): """ First plot I would like to look at the distribution of WeRateDogs rating scores. Since denominator is always 10, I use rating_denominator as variable measuring rating scores. """ # data setup df = pd.read_csv('twitter_archive_master.csv') # plotting plt.figure(figsize = [8,6]) bin_edges = np.arange(0, df['rating_numerator'].max()+1,1); plt.hist(data = df, x = 'rating_numerator', bins = bin_edges); plt.xlabel('Rating Score') plt.ylabel('Tweet Count') plt.title('WeRateDogs Rating Distribution') rating_string = ['I use basic histogram plotting the rating score distribution.', ' As you can see, the rating score has a long-tailed distribution with few scores below 10, is right-skewed.', ' Large proportion of rating score takes on range 10 to 13.', ' Scores can be lower to 0 which could be considered as unique cases, such as the picture is not a dog.'] print((''.join(rating_string))) def correlation_plot(): """ For this plot, I first want to find out the bivariate correlation between retweet count and favorite count. """ # data setup df = pd.read_csv('twitter_archive_master.csv') # plotting plt.figure(figsize = [8,6]) sb.set(color_codes = True) sb.regplot(x = "retweet_count", y = "favorite_count", data = df) plt.xlabel('retweet count') plt.ylabel('favorite count') plt.title('Correlation between Retweet and Favorite') r = pearsonr(np.array(df.retweet_count),np.array(df.favorite_count))[0] correlation_string = ['I use basic scatter plot to visualize the correlation between retweet count and favorite count.', ' As you can see, retweet numbers and favortie count are strongly positively correlated with a Pearson Correlation Coefficient = {:.4f}'.format(r), '. This means, with a larger count of retweet, favorite count increases.', ' Most tweets retweeted and favorited for 20,000 and 50,000 times.', ' The slope of fitted line interprets that people tend to favorite a twitter than retweet it.'] print((''.join(correlation_string))) def rating_prediction1(): """ I would like to find if two predictor variables, retweet and favorite are significant to predict rating score using linear regression model. """ # data setup df = pd.read_csv('twitter_archive_master.csv') # Use linear regression model to identify if retweet and favorite count could significantly predict rating score. df['intercept'] = 1 # Downgrade scale of retweet and favorite to 1000 to match with scale of rating score. df[['retweet_count','favorite_count']] /= 1000 lm = sm.OLS(df['rating_numerator'], df[['intercept','retweet_count','favorite_count']]) results = lm.fit() print(results.summary()) prediction_string1 = ['Reading from model results, p-values for two predictor variables, retweet and favorite are zero.', ' It can be interpreted as retweet and favorite count are significant to predict rating score.', ' Coefficient of favorite count is 0.1375, which means for every 137.5 additional favorite count,', ' I would expect rating score to increase by an average of 0.1375.', ' I was surprised the coefficient for retweet is negative.'] print((''.join(prediction_string1))) def rating_prediction2(): """ I would like to get an intuitively visual on relationships between retweet/favorite and rating. """ # data setup df = pd.read_csv('twitter_archive_master.csv') # plotting plt.figure(figsize = [10,5]) df.groupby('rating_numerator')[['retweet_count','favorite_count']].mean().plot() plt.xlabel('Rating Score'); plt.ylabel('Count Numbers'); plt.legend(['retweet', 'favorite']); plt.title('Average Retweet and Favorite Count for Rating Scores'); plt.show() prediction_string2 = ['Two interesting findings from the plot: One is when the rating score is zero,', ' favorite count reaches about 23000 which is rare.', ' Second finding is, with rating score is 9 and above, favorite and retweet count are increasing,', ' as rating score is increasing.', ' Users tend to favorite a tweet rather than retweet.'] print((''.join(prediction_string2))) def dogstage_plot(): """ Analyzing on dog stage data, I would like to see the comparisons on ratings, retweet and favorite counts for four different dog stages. I use bar chart for this plot. """ # data setup df = pd.read_csv('twitter_archive_master.csv') df.loc[df.dog_stage == 'None', 'dog_stage'] = None dogstage = df.groupby('dog_stage')[['retweet_count','favorite_count','rating_numerator']].mean() dogstage = dogstage.reset_index() dogstage.rename(columns={"retweet_count": "retweet", "favorite_count": "favorite","rating_numerator":"rating"}, inplace = True) # scale down 1000 times for retweet and favorite count numbers to get them match with rating score. dogstage[['retweet','favorite']] /= 1000 # Reorder the dog stage from young to age. floofer is a reference of fur amounts so I leave it at the last. dogstage.dog_stage = pd.Categorical(dogstage.dog_stage,categories = ["pupper","puppo","doggo","floofer"], ordered = True) dogstage.sort_values('dog_stage', inplace = True) dogstage = pd.melt(dogstage, id_vars = 'dog_stage', value_vars = ['retweet','favorite','rating'], value_name = 'Numbers') # plotting plt.figure(figsize = [10,5]) ax = sb.barplot(data = dogstage, x = 'dog_stage',y = 'Numbers', hue = 'variable') ax.set_xlabel('Dog Stages'); ax.set_ylabel('Average Numbers'); ax.set_title('Average Retweet, Favorite and Rating Count for Different Dog Stages'); stage_string = ['I use bar plot for this multivariate exploration.', ' pupper is the youngest dog stage and doggo is the oldest stage.', ' As you can see, teenager dog (puppo) is most popular from all three dimensions. ', ' The popularity towards furry dog (floofer) is neutral.'] print((''.join(stage_string))) def source_plot(): """ Analyzing on source data, I would like to see proportions of four different sources. I use pie chart for this plot. """ # data setup df = pd.read_csv('twitter_archive_master.csv') source = df['source'].value_counts() x = np.array(source.index) y = np.array(source) percent = 100.*y/y.sum() # plotting patches, texts = plt.pie(y, startangle = 90) labels = ['{0} - {1:1.2f} %'.format(i,j) for i,j in zip(x, percent)] sort_legend = True plt.legend(patches, labels, loc = 'center left', bbox_to_anchor = (-0.1, 1.), fontsize = 8) plt.show() source_string = ['I use pie chart for this source exploration.',' Largest proportion for source is Twitter from iPhone to a large ratio.', ' Other three sources: Vine, Web Client and TweetDeck share a small proportions on WeRateDogs tweet sources.'] print((''.join(source_string))) def breed_ranking(): """ Given the prediction of dog breed, I would like to see the ranking of dog breed in the perspectives of rating and tweet count. I use a bar chart for this plot. """ # data setup df = pd.read_csv('twitter_archive_master.csv') df.loc[df.dog_breed == 'Unknown', 'dog_breed'] = None most_count_breed = df.dog_breed.value_counts().nlargest(20) most_rating_breed = df.groupby('dog_breed')['rating_numerator'].mean().nlargest(20) # plotting plt.figure(figsize = [12,20]) plt.subplot(2,1,1) most_count_breed.plot(kind = 'barh',color = (0.2, 0.4, 0.6, 0.6)) plt.xlabel('Number of Tweet Count') plt.ylabel('Dog Breed') plt.title('WeRateDogs Top 20 Tweeted Dog Breeds') plt.gca().invert_yaxis() plt.subplot(2,1,2) most_rating_breed.plot(kind = 'barh',color = (0.2, 0.4, 0.6, 0.6)) plt.xlabel('Average Rating on Dog Breed') plt.ylabel('Dog Breed') plt.title('WeRateDogs Top 20 Rated Dog Breeds') plt.gca().invert_yaxis() breed_string = ['I use bar chart for this breed ranking.', ' As you can see, Golden Retriever, Pembroke are stars in dog breed as they appear in both ranking.'] print((''.join(breed_string))) def prediction_ranking(): """ For this plot, I would like to find out the most confidently predicted dog breed ranking. And the least confidently predicted dog breed ranking.""" # data setup df = pd.read_csv('twitter_archive_master.csv') df_high10 = df.groupby('dog_breed').confidence.mean().sort_values(ascending=True).iloc[-10:] df_low10 = df.groupby('dog_breed').confidence.mean().sort_values(ascending=False).iloc[-10:] # plotting plt.figure(figsize = [12,12]) plt.subplot(2,1,1) df_high10.plot(kind = 'barh', color = (0.2, 0.4, 0.6, 0.6)) plt.xlabel('Confidence Level') plt.ylabel('Dog Breed') plt.title('Top 10 Most Confidently Predicted Dog Breed') plt.subplot(2,1,2) df_low10.plot(kind = 'barh', color = (0.2, 0.4, 0.6, 0.6)) plt.xlabel('Confidence Level') plt.ylabel('Dog Breed') plt.title('10 Least Confidently Predicted Dog Breed') plt.show() pre_rank_string = ['Breeds with "Terrier" are predicted with a low confidence level. '] print((''.join(pre_rank_string))) ###Output _____no_output_____ ###Markdown Plot 1. Rating Distrition: How does 'WeRateDogs' rate dogs on scores? ###Code rating_distribution() ###Output I use basic histogram plotting the rating score distribution. As you can see, the rating score has a long-tailed distribution with few scores below 10, is right-skewed. Large proportion of rating score takes on range 10 to 13. Scores can be lower to 0 which could be considered as unique cases, such as the picture is not a dog. ###Markdown Plot 2. Correlation Plot: If a tweet is retweetd a lot, is it being favorited a lot meantime? ###Code correlation_plot() ###Output I use basic scatter plot to visualize the correlation between retweet count and favorite count. As you can see, retweet numbers and favortie count are strongly positively correlated with a Pearson Correlation Coefficient = 0.9272. This means, with a larger count of retweet, favorite count increases. Most tweets retweeted and favorited for 20,000 and 50,000 times. The slope of fitted line interprets that people tend to favorite a twitter than retweet it. ###Markdown Analysis on Retweet and Favorite in Predicting Rating Score Using Linear Regression: If a tweet being retweeted and favorite a lot, does it mean it has a high rating? ###Code rating_prediction1() ###Output OLS Regression Results ============================================================================== Dep. Variable: rating_numerator R-squared: 0.180 Model: OLS Adj. R-squared: 0.179 Method: Least Squares F-statistic: 229.8 Date: Sun, 23 Jun 2019 Prob (F-statistic): 6.05e-91 Time: 02:28:49 Log-Likelihood: -4356.8 No. Observations: 2094 AIC: 8720. Df Residuals: 2091 BIC: 8737. Df Model: 2 Covariance Type: nonrobust ================================================================================== coef std err t P>|t| [0.025 0.975] ---------------------------------------------------------------------------------- intercept 9.9793 0.052 191.861 0.000 9.877 10.081 retweet_count -0.2055 0.024 -8.462 0.000 -0.253 -0.158 favorite_count 0.1375 0.009 15.224 0.000 0.120 0.155 ============================================================================== Omnibus: 722.533 Durbin-Watson: 1.872 Prob(Omnibus): 0.000 Jarque-Bera (JB): 2626.560 Skew: -1.690 Prob(JB): 0.00 Kurtosis: 7.323 Cond. No. 19.8 ============================================================================== Warnings: [1] Standard Errors assume that the covariance matrix of the errors is correctly specified. Reading from model results, p-values for two predictor variables, retweet and favorite are zero. It can be interpreted as retweet and favorite count are significant to predict rating score. Coefficient of favorite count is 0.1375, which means for every 137.5 additional favorite count, I would expect rating score to increase by an average of 0.1375. I was surprised the coefficient for retweet is negative. ###Markdown Plot 3. Retweet/Favorite on Rating Score Plot: Will highly rated dogs cause big trend on retweet and favorite? ###Code rating_prediction2() ###Output _____no_output_____ ###Markdown Plot 4. Popularity on Three Dimensions(Retweet, Favorite and Rating) on dog stage: People like young dog better or old dog better? ###Code dogstage_plot() ###Output I use bar plot for this multivariate exploration. pupper is the youngest dog stage and doggo is the oldest stage. As you can see, teenager dog (puppo) is most popular from all three dimensions. The popularity towards furry dog (floofer) is neutral. ###Markdown Plot 5. Where are tweets coming from? ###Code source_plot() ###Output _____no_output_____ ###Markdown Plot 6. What are the dog breeds that are tweeted most and rated highest on WeRateDogs? ###Code breed_ranking() ###Output I use bar chart for this breed ranking. As you can see, Golden Retriever, Pembroke are stars in dog breed as they appear in both ranking. ###Markdown Plot 7. What kinds of dog breed are confidently to predict? ###Code prediction_ranking() ###Output _____no_output_____
Practical Data Science/Practical Data Science - Duke University/4_pandas/Series.ipynb
###Markdown Exercise 1Use the code below to get started: ###Code import pandas as pd gdppercap = pd.Series([34605, 34493, 12393, 44200, 10041, 58138, 4709, 49284, 10109, 42536], index=['Bahrain', 'Belgium', 'Bulgaria', 'Ireland', 'Macedonia', 'Norway', 'Paraguay', 'Singapore', 'South Africa', 'Switzerland'] ) ###Output _____no_output_____ ###Markdown Exercise 2Find the mean, median, minimum and maximum values of GDP per capita in this data. ###Code gdppercap.describe() gdppercap.median() # (another option to get the median) import numpy as np np.percentile(gdppercap, 50) ###Output _____no_output_____ ###Markdown Exercise 3Programmatically, determine which country in our data has the highest income per capita, and which has the lowest income per capita. Hint: Country names form the index for this Series, so to get country names you’ll need to access the index. ###Code gdppercap.sort_values() # Lowest GDP gdppercap.sort_values().iloc[:1] # Highest DGP gdppercap.sort_values().iloc[-1:] ###Output _____no_output_____ ###Markdown Exercise 4Get Python to print out the names of all the countries that have GDP per capitas less than $20,000. ###Code cond = gdppercap < 20000 gdppercap[cond].sort_values() ###Output _____no_output_____ ###Markdown Exercise 5Get Python to print out the GDP per capita of Switzerland ###Code gdppercap['Switzerland'] ###Output _____no_output_____ ###Markdown Exercise 6 Calculate the Gini coefficient for our income data. (Formula provided in the exercise page)**HINT 1**: Be careful with 0-indexing! Python counts from 0, but mathematical formulas count from 1!**HINT 2**: I’m gonna make you calculate Gini coefficients again later, so maybe you should write a function to do this and make life easier later? ![image.png](attachment:e06ebada-d2d7-4e0c-9eee-98fa6d030735.png) ###Code def calculate_gini(input_series): input_copy = input_series.copy() # Prepare the data input_df = input_copy.sort_values().reset_index().reset_index() n = len(input_copy) input_df.columns = ['i','country','y'] input_df['i'] += 1 # Calculate Gini parte1_num = 2*np.sum(input_df['i']*input_df['y']) parte1_den = n*np.sum(input_df['y']) part2 = (n+1)/n gini = parte1_num/parte1_den - part2 return gini gini = calculate_gini(gdppercap) gini ###Output _____no_output_____ ###Markdown Exercise 7Using this data on average growth rates in GDP per capita, and assuming growth rates from 2000 to 2018 continue into the future, estimate what our Gini Coefficient may look like in 2025 (remembering that income in our data is from 2008, so we’re extrapolating ahead 17 years)? ###Code avg_growth = pd.Series([-0.29768835, 0.980299584, 4.52991925, 3.686556736, 2.621416804, 0.775132075, 2.015489468, 3.345793635, 1.349993318, 0.982775018], index=['Bahrain', 'Belgium', 'Bulgaria', 'Ireland', 'Macedonia', 'Norway', 'Paraguay', 'Singapore', 'South Africa', 'Switzerland'] ) ###Output _____no_output_____ ###Markdown ![image.png](attachment:b8836b5b-f7b9-4776-8d2a-38a002db53d7.png) ###Code future_gdppercap = gdppercap * (1 + avg_growth/100)**17 future_gdppercap future_gini = calculate_gini(future_gdppercap) future_gini # EXTRA: Using the same growth numbers how would the next 50 years look like? gini_values = [] for i in range(50): gini_values.append(calculate_gini(gdppercap * (1 + avg_growth/100)**i)) pd.Series(gini_values).plot() ###Output _____no_output_____
yucheng_ner/tplinker_ner/Evaluation.ipynb
###Markdown Load Data ###Code test_data_dict = {} for file_name, path in test_data_path_dict.items(): test_data_dict[file_name] = json.load(open(path, "r", encoding = "utf-8")) ###Output _____no_output_____ ###Markdown Split ###Code # init tokenizers if use_bert: bert_tokenizer = BertTokenizerFast.from_pretrained(bert_config["path"], add_special_tokens = False, do_lower_case = False) word2idx = json.load(open(word2idx_path, "r", encoding = "utf-8")) word_tokenizer = WordTokenizer(word2idx) # preprocessor tokenizer4preprocess = bert_tokenizer if use_bert else word_tokenizer preprocessor = Preprocessor(tokenizer4preprocess, use_bert) def split(data, max_seq_len, sliding_len, data_name = "train"): ''' split into short texts ''' max_tok_num = 0 for sample in tqdm(data, "calculating the max token number of {}".format(data_name)): text = sample["text"] tokens = preprocessor.tokenize(text) max_tok_num = max(max_tok_num, len(tokens)) print("max token number of {}: {}".format(data_name, max_tok_num)) if max_tok_num > max_seq_len: print("max token number of {} is greater than the setting, need to split!".format(data_name, data_name, max_seq_len)) short_data = preprocessor.split_into_short_samples(data, max_seq_len, sliding_len = sliding_len, data_type = "test") else: short_data = data max_seq_len = max_tok_num print("max token number of {} is less than the setting, no need to split!".format(data_name, data_name, max_tok_num)) return short_data, max_seq_len # all_data = [] # for data in list(test_data_dict.values()): # all_data.extend(data) # max_tok_num = 0 # for sample in tqdm(all_data, desc = "Calculate the max token number"): # tokens = tokenize(sample["text"]) # max_tok_num = max(len(tokens), max_tok_num) # split_test_data = False # if max_tok_num > config["max_test_seq_len"]: # split_test_data = True # print("max_tok_num: {}, lagger than max_test_seq_len: {}, test data will be split!".format(max_tok_num, config["max_test_seq_len"])) # else: # print("max_tok_num: {}, less than or equal to max_test_seq_len: {}, no need to split!".format(max_tok_num, config["max_test_seq_len"])) # max_seq_len = min(max_tok_num, config["max_test_seq_len"]) # if config["force_split"]: # split_test_data = True # print("force to split the test dataset!") ori_test_data_dict = copy.deepcopy(test_data_dict) test_data_dict = {} max_seq_len_all_data = [] for file_name, data in ori_test_data_dict.items(): split_data, max_seq_len_this_data = split(data, max_seq_len, sliding_len, file_name) max_seq_len_all_data.append(max_seq_len_this_data) test_data_dict[file_name] = split_data max_seq_len = max(max_seq_len_all_data) print("final max_seq_len is {}".format(max_seq_len)) for filename, short_data in test_data_dict.items(): print("example number of {}: {}".format(filename, len(short_data))) ###Output _____no_output_____ ###Markdown Decoder(Tagger) ###Code meta = json.load(open(meta_path, "r", encoding = "utf-8")) tags = meta["tags"] if meta["visual_field_rec"] > handshaking_kernel_config["visual_field"]: handshaking_kernel_config["visual_field"] = meta["visual_field_rec"] print("Recommended visual_field is greater than current visual_field, reset to rec val: {}".format(handshaking_kernel_config["visual_field"])) handshaking_tagger = HandshakingTaggingScheme(tags, max_seq_len, handshaking_kernel_config["visual_field"]) ###Output _____no_output_____ ###Markdown Character indexing ###Code char2idx = json.load(open(char2idx_path, "r", encoding = "utf-8")) def text2char_indices(text, max_seq_len = -1): char_ids = [] chars = list(text) for c in chars: if c not in char2idx: char_ids.append(char2idx['<UNK>']) else: char_ids.append(char2idx[c]) if len(char_ids) < max_seq_len: char_ids.extend([char2idx['<PAD>']] * (max_seq_len - len(char_ids))) if max_seq_len != -1: char_ids = torch.tensor(char_ids[:max_seq_len]).long() return char_ids ###Output _____no_output_____ ###Markdown Dataset ###Code class MyDataset(Dataset): def __init__(self, data): self.data = data def __getitem__(self, index): return self.data[index] def __len__(self): return len(self.data) # max word num, max subword num, max char num def cal_max_tok_num(data, tokenizer): max_tok_num = 0 for example in data: text = example["text"] max_tok_num = max(max_tok_num, len(tokenizer.tokenize(text))) return max_tok_num all_data = [] for data in list(test_data_dict.values()): all_data.extend(data) max_word_num = cal_max_tok_num(all_data, word_tokenizer) print("max_word_num: {}".format(max_word_num)) if use_bert: max_subword_num = cal_max_tok_num(all_data, bert_tokenizer) print("max_subword_num: {}".format(max_subword_num)) subword_tokenizer = bert_tokenizer if use_bert else None data_maker = DataMaker(handshaking_tagger, word_tokenizer, subword_tokenizer, text2char_indices, max_word_num, max_subword_num, max_char_num_in_tok) ###Output _____no_output_____ ###Markdown Model ###Code if char_encoder_config is not None: char_encoder_config["char_size"] = len(char2idx) if word_encoder_config is not None: word_encoder_config["word2idx"] = word2idx ent_extractor = TPLinkerNER(char_encoder_config, word_encoder_config, flair_config, handshaking_kernel_config, enc_hidden_size, activate_enc_fc, len(tags), bert_config, ) ent_extractor = ent_extractor.to(device) ###Output _____no_output_____ ###Markdown Merics ###Code metrics = Metrics(handshaking_tagger) ###Output _____no_output_____ ###Markdown Prediction ###Code # get model state paths model_state_dir = config["model_state_dict_dir"] target_run_ids = set(config["run_ids"]) run_id2model_state_paths = {} for root, dirs, files in os.walk(model_state_dir): for file_name in files: run_id = root.split("-")[-1] if re.match(".*model_state.*\.pt", file_name) and run_id in target_run_ids: if run_id not in run_id2model_state_paths: run_id2model_state_paths[run_id] = [] model_state_path = os.path.join(root, file_name) run_id2model_state_paths[run_id].append(model_state_path) def get_last_k_paths(path_list, k): path_list = sorted(path_list, key = lambda x: int(re.search("(\d+)", x.split("/")[-1]).group(1))) # pprint(path_list) return path_list[-k:] # only last k models k = config["last_k_model"] for run_id, path_list in run_id2model_state_paths.items(): run_id2model_state_paths[run_id] = get_last_k_paths(path_list, k) print("Following model states will be loaded: ") pprint(run_id2model_state_paths) def filter_duplicates(ent_list): ent_memory_set = set() filtered_ent_list = [] for ent in ent_list: ent_memory = "{}\u2E80{}\u2E80{}".format(ent["tok_span"][0], ent["tok_span"][1], ent["type"]) if ent_memory not in ent_memory_set: filtered_ent_list.append(ent) ent_memory_set.add(ent_memory) return filtered_ent_list def predict(test_dataloader, ori_test_data): ''' test_data: if split, it would be samples with subtext ori_test_data: the original data has not been split, used to get original text here ''' pred_sample_list = [] for batch_test_data in tqdm(test_dataloader, desc = "Predicting"): sample_list = batch_test_data["sample_list"] tok2char_span_list = batch_test_data["tok2char_span_list"] del batch_test_data["sample_list"] del batch_test_data["tok2char_span_list"] for k, v in batch_test_data.items(): if k not in {"padded_sents"}: batch_test_data[k] = v.to(device) with torch.no_grad(): batch_pred_shaking_outputs = ent_extractor(**batch_test_data) batch_pred_shaking_tag = (batch_pred_shaking_outputs > 0.).long() for ind in range(len(sample_list)): sample = sample_list[ind] text = sample["text"] text_id = sample["id"] tok2char_span = tok2char_span_list[ind] pred_shaking_tag = batch_pred_shaking_tag[ind] tok_offset, char_offset = 0, 0 tok_offset, char_offset = (sample["tok_offset"], sample["char_offset"]) if "char_offset" in sample else (0, 0) ent_list = handshaking_tagger.decode_ent(text, pred_shaking_tag, tok2char_span, tok_offset = tok_offset, char_offset = char_offset) pred_sample_list.append({ "text": text, "id": text_id, "entity_list": ent_list, }) # merge text_id2ent_list = {} for sample in pred_sample_list: text_id = sample["id"] if text_id not in text_id2ent_list: text_id2ent_list[text_id] = sample["entity_list"] else: text_id2ent_list[text_id].extend(sample["entity_list"]) text_id2text = {sample["id"]:sample["text"] for sample in ori_test_data} merged_pred_sample_list = [] for text_id, ent_list in text_id2ent_list.items(): merged_pred_sample_list.append({ "id": text_id, "text": text_id2text[text_id], "entity_list": filter_duplicates(ent_list), }) return merged_pred_sample_list def get_test_prf(pred_sample_list, gold_test_data, pattern = "only_head"): text_id2gold_n_pred = {} for sample in gold_test_data: text_id = sample["id"] text_id2gold_n_pred[text_id] = { "gold_entity_list": sample["entity_list"], } for sample in pred_sample_list: text_id = sample["id"] text_id2gold_n_pred[text_id]["pred_entity_list"] = sample["entity_list"] correct_num, pred_num, gold_num = 0, 0, 0 for gold_n_pred in text_id2gold_n_pred.values(): gold_ent_list = gold_n_pred["gold_entity_list"] pred_ent_list = gold_n_pred["pred_entity_list"] if "pred_entity_list" in gold_n_pred else [] if pattern == "only_head_index": gold_ent_set = set(["{}\u2E80{}".format(ent["char_span"][0], ent["type"]) for ent in gold_ent_list]) pred_ent_set = set(["{}\u2E80{}".format(ent["char_span"][0], ent["type"]) for ent in pred_ent_list]) elif pattern == "whole_span": gold_ent_set = set(["{}\u2E80{}\u2E80{}".format(ent["char_span"][0], ent["char_span"][1], ent["type"]) for ent in gold_ent_list]) pred_ent_set = set(["{}\u2E80{}\u2E80{}".format(ent["char_span"][0], ent["char_span"][1], ent["type"]) for ent in pred_ent_list]) elif pattern == "whole_text": gold_ent_set = set(["{}\u2E80{}".format(ent["text"], ent["type"]) for ent in gold_ent_list]) pred_ent_set = set(["{}\u2E80{}".format(ent["text"], ent["type"]) for ent in pred_ent_list]) for ent_str in pred_ent_set: if ent_str in gold_ent_set: correct_num += 1 pred_num += len(pred_ent_set) gold_num += len(gold_ent_set) # print((correct_num, pred_num, gold_num)) prf = metrics.get_scores(correct_num, pred_num, gold_num) return prf # predict res_dict = {} predict_statistics = {} for file_name, short_data in test_data_dict.items(): ori_test_data = ori_test_data_dict[file_name] indexed_test_data = data_maker.get_indexed_data(short_data, data_type = "test") test_dataloader = DataLoader(MyDataset(indexed_test_data), batch_size = batch_size, shuffle = False, num_workers = 6, drop_last = False, collate_fn = lambda data_batch: data_maker.generate_batch(data_batch, data_type = "test"), ) # iter all model state dicts for run_id, model_path_list in run_id2model_state_paths.items(): save_dir4run = os.path.join(save_res_dir, run_id) if config["save_res"] and not os.path.exists(save_dir4run): os.makedirs(save_dir4run) for model_state_path in model_path_list: res_num = re.search("(\d+)", model_state_path.split("/")[-1]).group(1) save_path = os.path.join(save_dir4run, "{}_res_{}.json".format(file_name, res_num)) if os.path.exists(save_path): pred_sample_list = [json.loads(line) for line in open(save_path, "r", encoding = "utf-8")] print("{} already exists, load it directly!".format(save_path)) else: # load model state model_state_dict = torch.load(model_state_path) # if used paralell train, need to rm prefix "module." new_model_state_dict = OrderedDict() for key, v in model_state_dict.items(): key = re.sub("module\.", "", key) new_model_state_dict[key] = v ent_extractor.load_state_dict(new_model_state_dict) ent_extractor.eval() print("run_id: {}, model state {} loaded".format(run_id, model_state_path.split("/")[-1])) # predict pred_sample_list = predict(test_dataloader, ori_test_data) res_dict[save_path] = pred_sample_list predict_statistics[save_path] = len([s for s in pred_sample_list if len(s["entity_list"]) > 0]) pprint(predict_statistics) # score if config["score"]: filepath2scores = {} for file_path, pred_samples in res_dict.items(): file_name = re.match("(.*?)_res_\d+.json", file_path.split("/")[-1]).group(1) gold_test_data = ori_test_data_dict[file_name] prf = get_test_prf(pred_samples, gold_test_data, pattern = config["correct"]) filepath2scores[file_path] = prf print("---------------- Results -----------------------") pprint(filepath2scores) # check char span for path, res in res_dict.items(): for sample in tqdm(res, "check character level span"): text = sample["text"] for ent in sample["entity_list"]: assert ent["text"] == text[ent["char_span"][0]:ent["char_span"][1]] # save if config["save_res"]: for path, res in res_dict.items(): with open(path, "w", encoding = "utf-8") as file_out: for sample in tqdm(res, desc = "Output"): if len(sample["entity_list"]) == 0: continue json_line = json.dumps(sample, ensure_ascii = False) file_out.write("{}\n".format(json_line)) ###Output _____no_output_____
Sampling_effort_by_MPA.ipynb
###Markdown Sampling effort by MPAHere I've tried to capture the number of times target MPAs have been sampled through time, and what the sample size was, across projects and data types.**Resources:**https://docs.google.com/spreadsheets/d/1SIb_n9VoAS-GFKKpfBRCVKRseAXQT-4_dgC_rsWK-84/editgid=0 ###Code ## Imports import numpy as np import pandas as pd import matplotlib.pyplot as plt import datetime as dt import sys sys.executable ## CCFRP ccfrp = pd.read_csv('CCFRP\\CCFRP_derived_effort_table.csv') print(ccfrp.shape) ccfrp # Load site table ccfrp_site = pd.read_csv('CCFRP\\CCFRP_location_table.csv') print(ccfrp_site.shape) ccfrp_site # ----- Calculate the number of times each MPA was sampled per year ccfrp_years_per_MPA = ccfrp.groupby(['Area', 'MPA_Status'], as_index=False)['Year'].nunique() # Sort ccfrp_years_per_MPA.sort_values(['Area', 'MPA_Status'], inplace=True) # Map area names with MPA names names = pd.DataFrame({'Area':ccfrp['Area'], 'CA_MPA_name_short':ccfrp['CA_MPA_name_short']}) names.dropna(inplace=True) name_dict = dict(zip(names['Area'], names['CA_MPA_name_short'])) # Replace ccfrp_years_per_MPA['Area'].replace(name_dict, inplace=True) # Save ccfrp_years_per_MPA.to_csv('CCFRP\\ccfrp_years_per_MPA.csv', index=False) # ----- Calculate the number of times each MPA was sampled by year ccfrp_grid_cells_per_year = ccfrp.groupby(['Area', 'MPA_Status', 'Year'], as_index=False)['ID_Cell_per_Trip'].nunique() # Find min and max ccfrp_grid_cells_per_year = ccfrp_grid_cells_per_year.groupby(['Area', 'MPA_Status'], as_index=False).agg({'ID_Cell_per_Trip':[min, max]}) ccfrp_grid_cells_per_year.columns = ['Area', 'MPA_Status', 'Min_samples', 'Max_samples'] # Sort ccfrp_grid_cells_per_year.sort_values(['Area', 'MPA_Status'], inplace=True) # Replace ccfrp_grid_cells_per_year['Area'].replace(name_dict, inplace=True) # Save ccfrp_grid_cells_per_year.to_csv('CCFRP\\ccfrp_grid_cells_per_year.csv', index=False) ccfrp_grid_cells_per_year = ccfrp.groupby(['Area', 'MPA_Status', 'Year'], as_index=False)['ID_Cell_per_Trip'].nunique() ccfrp_grid_cells_per_year test = ccfrp_grid_cells_per_year.groupby(['Area', 'MPA_Status'], as_index=False).agg({'ID_Cell_per_Trip':[min, max]}) test test.columns = ['a', 'b', 'c', 'd'] test test = pd.DataFrame({'Area':ccfrp['Area'], 'CA_MPA_name_short':ccfrp['CA_MPA_name_short']}) test.drop_duplicates(inplace=True) test.dropna(inplace=True) test.loc[test['Area'] == 'Anacapa Island', 'CA_MPA_name_short'] = 'Anacapa Island SMR or Anacapa Island SMCA' test.drop_duplicates(inplace=True) test = pd.DataFrame({'Area':ccfrp['Area'], 'CA_MPA_name_short':ccfrp['CA_MPA_name_short']}) test.dropna(inplace=True) dict(zip(test['Area'], test['CA_MPA_name_short'])) ###Output _____no_output_____
tutorial/06 - Aerodynamics/03 - Interfaces to External Aerodynamics Tools/02 - XFoil.ipynb
###Markdown XFOIL OverviewXFOIL is a design and analysis tool for subsonic airfoils developed by Mark Drela at MIT.The [XFOIL website](https://web.mit.edu/drela/Public/web/xfoil/) contains more info. SetupAs with the previous AVL tutorial, a copy of the XFOIL executable must be somewhere on your computer in order to use it with AeroSandbox.Download a copy of the executable from the [XFOIL website](https://web.mit.edu/drela/Public/web/xfoil/) for your operating system. Place it anywhere on your computer, and remember the filepath to the executable. Running XFOIL from AeroSandboxFirst, we'll do some imports. We'll also do some stuff that is only necessary for this tutorial to run correctly in a browser - ignore this following code block. (Basically, it's to make unit testing of tutorials happy.) ###Code import aerosandbox as asb import aerosandbox.numpy as np from shutil import which xfoil_is_present = which('xfoil') is not None ###Output _____no_output_____ ###Markdown Next, we'll define an airfoil to analyze: ###Code airfoil = asb.Airfoil("dae51") # Geometry will be automatically pulled from UIUC database (local). ###Output _____no_output_____ ###Markdown And draw it: ###Code from aerosandbox.tools.pretty_plots import plt, show_plot, set_ticks # sets some nice defaults fig, ax = plt.subplots() airfoil.draw(show=False) set_ticks(0.1, 0.05, 0.1, 0.05) show_plot() ###Output _____no_output_____ ###Markdown Now, let's analyze it: ###Code if xfoil_is_present: # Ignore this; just for tutorial purposes. analysis = asb.XFoil( airfoil=airfoil, Re=3e5, xfoil_command="xfoil", # If XFOIL is not on your PATH, then set xfoil_command to the filepath to your XFOIL executable. ) point_analysis = analysis.alpha( alpha=3 ) from pprint import pprint print("\nPoint analysis:") pprint(point_analysis) sweep_analysis = analysis.alpha( alpha=np.linspace(0, 15, 6) ) print("\nSweep analysis:") pprint(sweep_analysis) cl_analysis = analysis.cl( cl=1.2 ) print("\nFixed-CL analysis:") pprint(cl_analysis) ###Output Point analysis: {'Bot_Xtr': array([1.]), 'CD': array([0.00873]), 'CDp': array([0.00318]), 'CL': array([0.8065]), 'CM': array([-0.1026]), 'Top_Xtr': array([0.6764]), 'alpha': array([3.])} Sweep analysis: {'Bot_Xtr': array([1., 1., 1., 1., 1., 1.]), 'CD': array([0.008 , 0.00873, 0.01112, 0.02211, 0.04114, 0.08114]), 'CDp': array([0.00319, 0.00318, 0.00522, 0.01426, 0.03433, 0.07667]), 'CL': array([0.4719, 0.8065, 1.1205, 1.3277, 1.4138, 1.3851]), 'CM': array([-0.1039, -0.1026, -0.0987, -0.0819, -0.0568, -0.0459]), 'Top_Xtr': array([0.7891, 0.6764, 0.4618, 0.0438, 0.0213, 0.017 ]), 'alpha': array([ 0., 3., 6., 9., 12., 15.])} Fixed-CL analysis: {'Bot_Xtr': array([1.]), 'CD': array([0.01303]), 'CDp': array([0.00666]), 'CL': array([1.2]), 'CM': array([-0.0957]), 'Top_Xtr': array([0.3296]), 'alpha': array([6.902])} ###Markdown We can use this to plot polars: ###Code if xfoil_is_present: # Ignore this; just for tutorial purposes. fig, ax = plt.subplots(2, 2, figsize=(8, 8)) Re = 250e3 alpha_inputs = np.linspace(-15, 15, 150) xf_run = asb.XFoil(airfoil, Re=Re, max_iter=20, timeout=None).alpha(alpha_inputs) xa = xf_run["alpha"] xCL = xf_run["CL"] xCD = xf_run["CD"] plt.sca(ax[0, 0]) plt.plot(xa, xCL, ".-") plt.xlabel(r"Angle of Attack $\alpha$ [deg]") plt.ylabel(r"Lift Coefficient $C_L$ [-]") plt.sca(ax[0, 1]) plt.plot(xa, xCD, ".-") plt.xlabel(r"Angle of Attack $\alpha$ [deg]") plt.ylabel(r"Drag Coefficient $C_D$ [-]") plt.ylim(0, 0.05) plt.sca(ax[1, 0]) plt.plot(xCD, xCL, ".-") plt.xlabel(r"Drag Coefficient $C_D$ [-]") plt.ylabel(r"Lift Coefficient $C_L$ [-]") plt.xlim(0, 0.05) plt.sca(ax[1, 1]) plt.plot(xa, xCL / xCD, ".-") plt.xlabel(r"Angle of Attack $\alpha$ [deg]") plt.ylabel(r"Aerodynamic Efficiency $C_L / C_D$ [-]") from aerosandbox.tools.string_formatting import eng_string show_plot(f"Aerodynamic Performance of Airfoil '{airfoil.name}' at $\\mathrm{{Re}}={eng_string(Re)}$, from XFoil") ###Output _____no_output_____
Plot_tool/散布図.ipynb
###Markdown 散布図 ###Code %matplotlib inline import numpy as np import matplotlib.pyplot as plt import pandas as pd import os from matplotlib import font_manager font_manager._rebuild() # os.getcwd() counter = 0 df = pd.read_csv("csv/high_voltage.csv", encoding="UTF-8") df.head() x1 = df["d"] y1 = df["vk"] x2 = df["pin_d"] y2 = df["vc"] x3 = df["plate_d"] y3 = df["plate_v"] # ycos = df["cosx"] # plt.rcParams["font.family"] = "Source Han Code JP" # plt.rcParams['font.family'] ='sans-serif', "IPAexGothic"#使用するフォント plt.rcParams["font.sans-serif"] = "IPAexGothic" plt.rcParams['xtick.direction'] = 'in'#x軸の目盛線が内向き('in')か外向き('out')か双方向か('inout') plt.rcParams['ytick.direction'] = 'in'#y軸の目盛線が内向き('in')か外向き('out')か双方向か('inout') plt.rcParams['xtick.major.width'] = 1.0#x軸主目盛り線の線幅 plt.rcParams['ytick.major.width'] = 1.0#y軸主目盛り線の線幅 plt.rcParams['font.size'] = 12 #フォントの大きさ plt.rcParams['axes.linewidth'] = 1.0# 軸の線幅edge linewidth。囲みの太さ plt.figure(figsize=(5.5,5)) # 軸の目盛り plt.gca().yaxis.set_tick_params(which='both', direction='in',bottom=True, top=False, left=True, right=False) # plt.gca().yaxis.set_major_formatter(plt.FormatStrFormatter('%.3f'))#y軸小数点以下3桁表示 # plt.gca().xaxis.get_major_formatter().set_useOffset(False) # 軸の数字にオフセット(+1.05e9 など)を使わずに表現する # plt.rcParams["xtick.labelsize"] = 12 # plt.rcParams["ytick.labelsize"] = 12 # plt.rcParams["legend.fontsize"] = 12 # plt.plot(x,y2,"-",label="Theoretical value") plt.scatter(x1,y1,label="充電電圧 理論値",marker=".") plt.scatter(x1,y2,label="充電電圧 実験値" ,marker=".") # plt.scatter(x3,y3,label="平板-平板電極" ,marker=".") plt.plot(x1,y1,"-",linestyle="dotted") plt.plot(x1,y2,"-",linestyle="dotted") # plt.plot(x3,y3,"-",linestyle="dotted") # plt.plot(x1,y2,"-",linestyle="dotted") # plt.plot(x3,y3,"-",label="平板-平板電極") # plt.plot(x1,y2,"-",label="補正放電電圧 Vk",linestyle='dashed') # plt.plot(x3,y3,"-",label="m=12,ΔT=0.0025",linestyle='dashdot') # plt.plot(x,y3,"s-",label="3 term") # plt.plot(x,ycos,"+-",label="cos x") plt.xlabel("ギャップ長[mm]") plt.ylabel("充電電圧[kV]") # plt.xlim(0,1) # plt.ylabel("angle[rad]") plt.legend() plt.tight_layout()#グラフが重ならず,設定した図のサイズ内に収まる。 # plt.savefig('figname.pdf', transparent=True) # plt.savefig('cos_taylor.png', transparent=True, dpi=300) plt.savefig('output_' + str(counter) + '.png',dpi=300) print("output_" + str(counter) + ".png") counter += 1 ###Output output_0.png ###Markdown 2軸表示のグラフ ###Code plt.rcParams['font.family'] ='sans-serif'#使用するフォント plt.rcParams['xtick.direction'] = 'in'#x軸の目盛線が内向き('in')か外向き('out')か双方向か('inout') plt.rcParams['ytick.direction'] = 'in'#y軸の目盛線が内向き('in')か外向き('out')か双方向か('inout') plt.rcParams['xtick.major.width'] = 1.0#x軸主目盛り線の線幅 plt.rcParams['ytick.major.width'] = 1.0#y軸主目盛り線の線幅 plt.rcParams['font.size'] = 12 #フォントの大きさ plt.rcParams['axes.linewidth'] = 1.0# 軸の線幅edge linewidth。囲みの太さ fig = plt.figure(figsize=(5.8,5.5)) # fig = plt.figure(figsize=(5.8,5)) # 軸の目盛り plt.gca().yaxis.set_tick_params(which='both', direction='in',bottom=True, top=False, left=True, right=False) # 1軸目 ax1 = fig.add_subplot(111) ln1 = ax1.plot(x,y1,"-",label="Theta Euler method") # 2軸目 ax2 = ax1.twinx() ln2 = ax2.plot(x,y2,"-",label="Omega Euler method",color="orange") # 凡例 h1, l1 = ax1.get_legend_handles_labels() h2, l2 = ax2.get_legend_handles_labels() ax1.legend(h1+h2, l1+l2,bbox_to_anchor=(0, 1), loc='lower left') # ax1.legend(h1+h2, l1+l2) # 軸の名前 ax1.set_xlabel("time") ax1.set_ylabel("angle[rad]") ax2.set_ylabel("angular velocity[rad/s]") plt.tight_layout()#グラフが重ならず,設定した図のサイズ内に収まる。 plt.savefig('output_' + str(counter) + '.png',dpi=300) print("output" + str(counter) + ".png") counter += 1 ###Output /home/hiroya/Documents/Jupyter-Notebook/.venv/lib/python3.6/site-packages/ipykernel_launcher.py:15: MatplotlibDeprecationWarning: Adding an axes using the same arguments as a previous axes currently reuses the earlier instance. In a future version, a new instance will always be created and returned. Meanwhile, this warning can be suppressed, and the future behavior ensured, by passing a unique label to each axes instance. from ipykernel import kernelapp as app
Chapter 7 - Ensemble Learning and Random Forests .ipynb
###Markdown Bagging method where the sampling with replacement is used for training same/different predictors and an aggregate (mode for classification and average for regression) is used ###Code from sklearn.ensemble import BaggingClassifier from sklearn.tree import DecisionTreeClassifier bag_clf = BaggingClassifier( DecisionTreeClassifier(), n_estimators=500, max_samples=100, bootstrap=True, n_jobs=-1) # n_jobs tells sklearn how many CPU cores to use, -1 being all cores bag_clf.fit(X_train, y_train) y_pred = bag_clf.predict(X_test) from sklearn.metrics import accuracy_score print(accuracy_score(y_test, y_pred)) tree_clf = DecisionTreeClassifier(random_state=42) tree_clf.fit(X_train, y_train) y_pred_tree = tree_clf.predict(X_test) print(accuracy_score(y_test, y_pred_tree)) import numpy as np %matplotlib inline import matplotlib as mpl import matplotlib.pyplot as plt mpl.rc('axes', labelsize=14) mpl.rc('xtick', labelsize=12) mpl.rc('ytick', labelsize=12) from matplotlib.colors import ListedColormap def plot_decision_boundary(clf, X, y, axes=[-1.5, 2.45, -1, 1.5], alpha=0.5, contour=True): x1s = np.linspace(axes[0], axes[1], 100) x2s = np.linspace(axes[2], axes[3], 100) x1, x2 = np.meshgrid(x1s, x2s) X_new = np.c_[x1.ravel(), x2.ravel()] y_pred = clf.predict(X_new).reshape(x1.shape) custom_cmap = ListedColormap(['#fafab0','#9898ff','#a0faa0']) plt.contourf(x1, x2, y_pred, alpha=0.3, cmap=custom_cmap) if contour: custom_cmap2 = ListedColormap(['#7d7d58','#4c4c7f','#507d50']) plt.contour(x1, x2, y_pred, cmap=custom_cmap2, alpha=0.8) plt.plot(X[:, 0][y==0], X[:, 1][y==0], "yo", alpha=alpha) plt.plot(X[:, 0][y==1], X[:, 1][y==1], "bs", alpha=alpha) plt.axis(axes) plt.xlabel(r"$x_1$", fontsize=18) plt.ylabel(r"$x_2$", fontsize=18, rotation=0) fix, axes = plt.subplots(ncols=2, figsize=(10,4), sharey=True) plt.sca(axes[0]) plot_decision_boundary(tree_clf, X, y) plt.title("Decision Tree", fontsize=14) plt.sca(axes[1]) plot_decision_boundary(bag_clf, X, y) plt.title("Decision Trees with Bagging", fontsize=14) plt.ylabel("") plt.show() ###Output _____no_output_____ ###Markdown Out of Bag (OOB) evaluation ###Code bag_clf = BaggingClassifier( DecisionTreeClassifier(), n_estimators=500, bootstrap=True, n_jobs=-1, oob_score=True) # OOB score True ensures the avaibility bag_clf.fit(X_train, y_train) # this is the score achived on the validation set, that was created due to sampling with replacement bag_clf.oob_score_ y_pred = bag_clf.predict(X_test) accuracy_score(y_test, y_pred) # to check the oob evalution of each instance bag_clf.oob_decision_function_ ###Output _____no_output_____ ###Markdown Random Forest ###Code from sklearn.ensemble import RandomForestClassifier rnd_clf = RandomForestClassifier(n_estimators=500, max_leaf_nodes=16, n_jobs=-1) rnd_clf.fit(X_train, y_train) y_pred_rf = rnd_clf.predict(X_test) accuracy_score(y_test, y_pred_rf) # A similar setup with bagging would look like this bag_clf = BaggingClassifier( DecisionTreeClassifier(splitter='random', max_leaf_nodes=16), n_estimators=500, max_samples=1.0, bootstrap=True, n_jobs=-1) bag_clf.fit(X_train, y_train) y_pred = bag_clf.predict(X_test) accuracy_score(y_test, y_pred) # Extra Trees are also a good option with randomized thresholds and may perform better than RandomForest at times ###Output _____no_output_____ ###Markdown Feature Importance ###Code from sklearn.datasets import load_iris iris = load_iris() rnd_clf = RandomForestClassifier(n_estimators=500, n_jobs=-1) rnd_clf.fit(iris['data'], iris['target']) for name, score in zip(iris['feature_names'], rnd_clf.feature_importances_): print(name, score) ###Output sepal length (cm) 0.10496305024303643 sepal width (cm) 0.021002451182639954 petal length (cm) 0.41921921153180375 petal width (cm) 0.45481528704251983 ###Markdown AdaBoost ###Code from sklearn.ensemble import AdaBoostClassifier ada_clf = AdaBoostClassifier( DecisionTreeClassifier(max_depth=1), n_estimators=200, algorithm='SAMME.R', learning_rate=0.5) # SAMME is sklearn's multiclass version, when more than two classes, we need to use SAMME.R ada_clf.fit(X_train, y_train) y_ada_pred = ada_clf.predict(X_test) accuracy_score(y_test, y_ada_pred) ###Output _____no_output_____ ###Markdown Gradient Boosting ###Code # Let's create a basic quadratic dataset with some noise np.random.seed(42) X = np.random.rand(100, 1) - 0.5 y = 3*X[:, 0]**2 + 0.05 * np.random.randn(100) from sklearn.tree import DecisionTreeRegressor tree_reg1 = DecisionTreeRegressor(max_depth=2) tree_reg1.fit(X, y) y # Let's train a second regressor on the residual errors y2 = y - tree_reg1.predict(X) tree_reg2 = DecisionTreeRegressor(max_depth=2) tree_reg2.fit(X, y2) y2 # Let's train a thrird predictor the same way y3 = y2 - tree_reg2.predict(X) tree_reg3 = DecisionTreeRegressor(max_depth=2) tree_reg3.fit(X, y3) y3 X_new = np.array([[0.8]]) # Now let's add up the three trees to make predictions y_pred = sum(tree.predict(X_new) for tree in (tree_reg1, tree_reg2, tree_reg3)) y_pred from sklearn.ensemble import GradientBoostingRegressor gbrt = GradientBoostingRegressor(max_depth=2, n_estimators=3, learning_rate=1.0) gbrt.fit(X, y) y_gbrt_pred = gbrt.predict(X_new) y_gbrt_pred # Let's plot how these two models fit to the training data at each stage def plot_predictions(regressors, X, y, axes, label=None, style="r-", data_style="b.", data_label=None): x1 = np.linspace(axes[0], axes[1], 500) y_pred = sum(regressor.predict(x1.reshape(-1, 1)) for regressor in regressors) plt.plot(X[:, 0], y, data_style, label=data_label) plt.plot(x1, y_pred, style, linewidth=2, label=label) if label or data_label: plt.legend(loc="upper center", fontsize=16) plt.axis(axes) plt.figure(figsize=(15,15)) plt.subplot(321) plot_predictions([tree_reg1], X, y, axes=[-0.5, 0.5, -0.1, 0.8], label="$h_1(x_1)$", style="g-", data_label="Training set") plt.ylabel("$y$", fontsize=16, rotation=0) plt.title("Residuals and tree predictions", fontsize=16) plt.subplot(322) plot_predictions([tree_reg1], X, y, axes=[-0.5, 0.5, -0.1, 0.8], label="$h(x_1) = h_1(x_1)$", data_label="Training set") plt.ylabel("$y$", fontsize=16, rotation=0) plt.title("Ensemble predictions", fontsize=16) plt.subplot(323) plot_predictions([tree_reg2], X, y2, axes=[-0.5, 0.5, -0.5, 0.5], label="$h_2(x_1)$", style="g-", data_style="k+", data_label="Residuals") plt.ylabel("$y - h_1(x_1)$", fontsize=16) plt.subplot(324) plot_predictions([tree_reg1, tree_reg2], X, y, axes=[-0.5, 0.5, -0.1, 0.8], label="$h(x_1) = h_1(x_1) + h_2(x_1)$") plt.ylabel("$y$", fontsize=16, rotation=0) plt.subplot(325) plot_predictions([tree_reg3], X, y3, axes=[-0.5, 0.5, -0.5, 0.5], label="$h_3(x_1)$", style="g-", data_style="k+") plt.ylabel("$y - h_1(x_1) - h_2(x_1)$", fontsize=16) plt.xlabel("$x_1$", fontsize=16) plt.subplot(326) plot_predictions([tree_reg1, tree_reg2, tree_reg3], X, y, axes=[-0.5, 0.5, -0.1, 0.8], label="$h(x_1) = h_1(x_1) + h_2(x_1) + h_3(x_1)$") plt.xlabel("$x_1$", fontsize=16) plt.ylabel("$y$", fontsize=16, rotation=0) plt.show() ###Output _____no_output_____ ###Markdown Example of how we can use early stopping to find the optimal number of trees to use so that we don't overfit the mode ###Code import numpy as np from sklearn.model_selection import train_test_split from sklearn.metrics import mean_squared_error X_train, X_val, y_train, y_val = train_test_split(X, y) gbrt = GradientBoostingRegressor(max_depth=2, n_estimators=120) gbrt.fit(X_train, y_train) errors = [mean_squared_error(y_val, y_pred) for y_pred in gbrt.staged_predict(X_val)] # staged_predict returns an iterator over the predictions made by the ensemble at each stage (tree) of the training bst_n_estimators = np.argmin(errors) + 1 gbrt_best = GradientBoostingRegressor(max_depth=2, n_estimators=bst_n_estimators) gbrt_best.fit(X_train, y_train) # Let's visualize the validation error and the best model's fit min_error = np.min(errors) plt.figure(figsize=(10, 4)) plt.subplot(121) plt.plot(errors, "b.-") plt.plot([bst_n_estimators, bst_n_estimators], [0, min_error], "k--") plt.plot([0, 120], [min_error, min_error], "k--") plt.plot(bst_n_estimators, min_error, "ko") plt.text(bst_n_estimators, min_error*1.2, "Minimum", ha="center", fontsize=14) plt.axis([0, 120, 0, 0.01]) plt.xlabel("Number of trees") plt.ylabel("Error", fontsize=16) plt.title("Validation error", fontsize=14) plt.subplot(122) plot_predictions([gbrt_best], X, y, axes=[-0.5, 0.5, -0.1, 0.8]) plt.title("Best model (%d trees)" % bst_n_estimators, fontsize=14) plt.ylabel("$y$", fontsize=16, rotation=0) plt.xlabel("$x_1$", fontsize=16) plt.show() # Let's train another time using early stopping but stop once we find the minimum error automatically gbrt = GradientBoostingRegressor(max_depth=2, warm_start=True) min_val_error = float('inf') error_going_up = 0 for n_estimators in range(1, 120): gbrt.n_estimators = n_estimators gbrt.fit(X_train, y_train) y_pred = gbrt.predict(X_val) val_error = mean_squared_error(y_val, y_pred) if val_error < min_val_error: min_val_error = val_error error_going_up = 0 else: error_going_up += 1 if error_going_up == 5: break # early stopping gbrt ###Output _____no_output_____ ###Markdown XGBoost ###Code import xgboost xgb_reg = xgboost.XGBRegressor() xgb_reg.fit(X_train, y_train) y_xgb_pred = xgb_reg.predict(X_val) y_xgb_pred y_val # Early stopping is easy with XGBoost xgb_reg.fit(X_train, y_train, eval_set=[(X_val, y_val)], early_stopping_rounds=2) y_pred = xgb_reg.predict(X_val) y_pred ###Output [0] validation_0-rmse:0.22709 [1] validation_0-rmse:0.17088 [2] validation_0-rmse:0.13266 [3] validation_0-rmse:0.10603 [4] validation_0-rmse:0.08793 [5] validation_0-rmse:0.07539 [6] validation_0-rmse:0.06511 [7] validation_0-rmse:0.05926 [8] validation_0-rmse:0.05582 [9] validation_0-rmse:0.05352 [10] validation_0-rmse:0.05235 [11] validation_0-rmse:0.05179 [12] validation_0-rmse:0.05150 [13] validation_0-rmse:0.05110 [14] validation_0-rmse:0.05083 [15] validation_0-rmse:0.05115 [16] validation_0-rmse:0.05068 [17] validation_0-rmse:0.05079 [18] validation_0-rmse:0.05063 [19] validation_0-rmse:0.05078
NLP/Learn_by_deeplearning.ai/Course 1 - Classification and Vector Spaces/Labs/Week 1/C1-W1-L2-Building and Visualizing word frequencies.ipynb
###Markdown Building and Visualizing word frequenciesIn this lab, we will focus on the `build_freqs()` helper function and visualizing a dataset fed into it. In our goal of tweet sentiment analysis, this function will build a dictionary where we can lookup how many times a word appears in the lists of positive or negative tweets. This will be very helpful when extracting the features of the dataset in the week's programming assignment. Let's see how this function is implemented under the hood in this notebook. SetupLet's import the required libraries for this lab: ###Code import nltk from nltk.corpus import twitter_samples import matplotlib.pyplot as plt import numpy as np %matplotlib inline %config InlineBackend.figure_format='svg' ###Output _____no_output_____ ###Markdown Import some helper functions that we provided in the utils.py file:* `process_tweet()`: Cleans the text, tokenizes it into separate words, removes stopwords, and converts words to stems.* `build_freqs()`: This counts how often a word in the 'corpus' (the entire set of tweets) was associated with a positive label `1` or a negative label `0`. It then builds the `freqs` dictionary, where each key is a `(word,label)` tuple, and the value is the count of its frequency within the corpus of tweets. ###Code # download the stopwords for the process_tweet function nltk.download('stopwords') # import our convenience functions from utils import process_tweet ###Output [nltk_data] Error loading stopwords: <urlopen error [SSL: [nltk_data] CERTIFICATE_VERIFY_FAILED] certificate verify failed: [nltk_data] unable to get local issuer certificate (_ssl.c:1076)> ###Markdown Load the NLTK sample datasetAs in the previous lab, we will be using the [Twitter dataset from NLTK](http://www.nltk.org/howto/twitter.htmlUsing-a-Tweet-Corpus). ###Code # select the lists of positive and negative tweets all_positive_tweets = twitter_samples.strings('positive_tweets.json') all_negative_tweets = twitter_samples.strings('negative_tweets.json') # concatenate the lists, 1st part is the positive tweets followed by the negative tweets = all_positive_tweets + all_negative_tweets # let's see how many tweets we have print("Number of tweets: ", len(tweets)) ###Output Number of tweets: 10000 ###Markdown Next, we will build a labels array that matches the sentiments of our tweets. This data type works pretty much like a regular list but is optimized for computations and manipulation. The `labels` array will be composed of 10000 elements. The first 5000 will be filled with `1` labels denoting positive sentiments, and the next 5000 will be `0` labels denoting the opposite. We can do this easily with a series of operations provided by the `numpy` library:* `np.ones()` - create an array of 1's* `np.zeros()` - create an array of 0's* `np.append()` - concatenate arrays ###Code labels = np.append(np.ones((len(all_positive_tweets))), np.zeros((len(all_negative_tweets)))) ###Output _____no_output_____ ###Markdown DictionariesIn Python, a dictionary is a mutable and indexed collection. It stores items as key-value pairs and uses [hash tables](https://en.wikipedia.org/wiki/Hash_table) underneath to allow practically constant time lookups. In NLP, dictionaries are essential because it enables fast retrieval of items or containment checks even with thousands of entries in the collection. Word frequency dictionaryNow that we know the building blocks, let's finally take a look at the **build_freqs()** function in **utils.py**. This is the function that creates the dictionary containing the word counts from each corpus. ###Code def build_freqs(tweets,ys): yslist = np.squeeze(ys).tolist() freqs={} for y,tweet in zip(yslist,tweets): for word in process_tweet(tweet): pair=(word,y) if pair in freqs: freqs[pair]+=1 else: freqs[pair]=1 return freqs ###Output _____no_output_____ ###Markdown Now, it is time to use the dictionary returned by the `build_freqs()` function. First, let us feed our `tweets` and `labels` lists then print a basic report: ###Code # create frequency dictionary freqs = build_freqs(tweets, labels) # check data type print(f'type(freqs) = {type(freqs)}') # check length of the dictionary print(f'len(freqs) = {len(freqs)}') # Now print the frequency of each word depending on its class. print(freqs) ###Output {('followfriday', 1.0): 25, ('top', 1.0): 32, ('engag', 1.0): 7, ('member', 1.0): 16, ('commun', 1.0): 33, ('week', 1.0): 83, (':)', 1.0): 3568, ('hey', 1.0): 76, ('jame', 1.0): 7, ('odd', 1.0): 2, (':/', 1.0): 5, ('pleas', 1.0): 97, ('call', 1.0): 37, ('contact', 1.0): 7, ('centr', 1.0): 2, ('02392441234', 1.0): 1, ('abl', 1.0): 8, ('assist', 1.0): 1, ('mani', 1.0): 33, ('thank', 1.0): 620, ('listen', 1.0): 16, ('last', 1.0): 47, ('night', 1.0): 68, ('bleed', 1.0): 2, ('amaz', 1.0): 51, ('track', 1.0): 5, ('scotland', 1.0): 2, ('congrat', 1.0): 21, ('yeaaah', 1.0): 1, ('yipppi', 1.0): 1, ('accnt', 1.0): 2, ('verifi', 1.0): 2, ('rqst', 1.0): 1, ('succeed', 1.0): 1, ('got', 1.0): 69, ('blue', 1.0): 9, ('tick', 1.0): 1, ('mark', 1.0): 1, ('fb', 1.0): 6, ('profil', 1.0): 2, ('15', 1.0): 5, ('day', 1.0): 246, ('one', 1.0): 129, ('irresist', 1.0): 2, ('flipkartfashionfriday', 1.0): 17, ('like', 1.0): 233, ('keep', 1.0): 68, ('love', 1.0): 400, ('custom', 1.0): 4, ('wait', 1.0): 70, ('long', 1.0): 36, ('hope', 1.0): 141, ('enjoy', 1.0): 75, ('happi', 1.0): 211, ('friday', 1.0): 116, ('lwwf', 1.0): 1, ('second', 1.0): 10, ('thought', 1.0): 29, ('’', 1.0): 21, ('enough', 1.0): 18, ('time', 1.0): 127, ('dd', 1.0): 1, ('new', 1.0): 143, ('short', 1.0): 7, ('enter', 1.0): 9, ('system', 1.0): 2, ('sheep', 1.0): 1, ('must', 1.0): 18, ('buy', 1.0): 11, ('jgh', 1.0): 4, ('go', 1.0): 148, ('bayan', 1.0): 1, (':D', 1.0): 629, ('bye', 1.0): 7, ('act', 1.0): 8, ('mischiev', 1.0): 1, ('etl', 1.0): 1, ('layer', 1.0): 1, ('in-hous', 1.0): 1, ('wareh', 1.0): 1, ('app', 1.0): 16, ('katamari', 1.0): 1, ('well', 1.0): 81, ('…', 1.0): 38, ('name', 1.0): 18, ('impli', 1.0): 1, (':p', 1.0): 137, ('influenc', 1.0): 18, ('big', 1.0): 33, ('...', 1.0): 289, ('juici', 1.0): 3, ('selfi', 1.0): 12, ('follow', 1.0): 381, ('perfect', 1.0): 24, ('alreadi', 1.0): 28, ('know', 1.0): 145, ("what'", 1.0): 17, ('great', 1.0): 171, ('opportun', 1.0): 23, ('junior', 1.0): 2, ('triathlet', 1.0): 1, ('age', 1.0): 2, ('12', 1.0): 5, ('13', 1.0): 6, ('gatorad', 1.0): 1, ('seri', 1.0): 5, ('get', 1.0): 206, ('entri', 1.0): 4, ('lay', 1.0): 4, ('greet', 1.0): 5, ('card', 1.0): 8, ('rang', 1.0): 3, ('print', 1.0): 3, ('today', 1.0): 108, ('job', 1.0): 41, (':-)', 1.0): 692, ("friend'", 1.0): 3, ('lunch', 1.0): 5, ('yummm', 1.0): 1, ('nostalgia', 1.0): 1, ('tb', 1.0): 2, ('ku', 1.0): 1, ('id', 1.0): 8, ('conflict', 1.0): 1, ('help', 1.0): 41, ("here'", 1.0): 25, ('screenshot', 1.0): 3, ('work', 1.0): 110, ('hi', 1.0): 173, ('liv', 1.0): 2, ('hello', 1.0): 59, ('need', 1.0): 78, ('someth', 1.0): 28, ('u', 1.0): 175, ('fm', 1.0): 2, ('twitter', 1.0): 29, ('—', 1.0): 27, ('sure', 1.0): 58, ('thing', 1.0): 69, ('dm', 1.0): 39, ('x', 1.0): 72, ("i'v", 1.0): 35, ('heard', 1.0): 9, ('four', 1.0): 5, ('season', 1.0): 9, ('pretti', 1.0): 20, ('dope', 1.0): 2, ('penthous', 1.0): 1, ('obv', 1.0): 1, ('gobigorgohom', 1.0): 1, ('fun', 1.0): 58, ("y'all", 1.0): 3, ('yeah', 1.0): 47, ('suppos', 1.0): 7, ('lol', 1.0): 64, ('chat', 1.0): 13, ('bit', 1.0): 20, ('youth', 1.0): 19, ('💅', 1.0): 1, ('🏽', 1.0): 2, ('💋', 1.0): 2, ('seen', 1.0): 10, ('year', 1.0): 43, ('rest', 1.0): 12, ('goe', 1.0): 7, ('quickli', 1.0): 3, ('bed', 1.0): 16, ('music', 1.0): 21, ('fix', 1.0): 10, ('dream', 1.0): 20, ('spiritu', 1.0): 1, ('ritual', 1.0): 1, ('festiv', 1.0): 8, ('népal', 1.0): 1, ('begin', 1.0): 4, ('line-up', 1.0): 4, ('left', 1.0): 13, ('see', 1.0): 184, ('sarah', 1.0): 4, ('send', 1.0): 22, ('us', 1.0): 109, ('email', 1.0): 26, ('[email protected]', 1.0): 1, ("we'll", 1.0): 20, ('asap', 1.0): 5, ('kik', 1.0): 22, ('hatessuc', 1.0): 1, ('32429', 1.0): 1, ('kikm', 1.0): 1, ('lgbt', 1.0): 2, ('tinder', 1.0): 1, ('nsfw', 1.0): 1, ('akua', 1.0): 1, ('cumshot', 1.0): 1, ('come', 1.0): 70, ('hous', 1.0): 7, ('nsn_supplement', 1.0): 1, ('effect', 1.0): 4, ('press', 1.0): 1, ('releas', 1.0): 11, ('distribut', 1.0): 1, ('result', 1.0): 2, ('link', 1.0): 18, ('remov', 1.0): 3, ('pressreleas', 1.0): 1, ('newsdistribut', 1.0): 1, ('bam', 1.0): 44, ('bestfriend', 1.0): 50, ('lot', 1.0): 87, ('warsaw', 1.0): 44, ('<3', 1.0): 134, ('x46', 1.0): 1, ('everyon', 1.0): 58, ('watch', 1.0): 46, ('documentari', 1.0): 1, ('earthl', 1.0): 2, ('youtub', 1.0): 13, ('support', 1.0): 27, ('buuut', 1.0): 1, ('oh', 1.0): 53, ('look', 1.0): 137, ('forward', 1.0): 29, ('visit', 1.0): 30, ('next', 1.0): 48, ('letsgetmessi', 1.0): 1, ('jo', 1.0): 1, ('make', 1.0): 99, ('feel', 1.0): 46, ('better', 1.0): 52, ('never', 1.0): 36, ('anyon', 1.0): 11, ('kpop', 1.0): 1, ('flesh', 1.0): 1, ('good', 1.0): 238, ('girl', 1.0): 44, ('best', 1.0): 65, ('wish', 1.0): 37, ('reason', 1.0): 13, ('epic', 1.0): 2, ('soundtrack', 1.0): 1, ('shout', 1.0): 12, ('ad', 1.0): 14, ('video', 1.0): 34, ('playlist', 1.0): 5, ('would', 1.0): 84, ('dear', 1.0): 17, ('jordan', 1.0): 1, ('okay', 1.0): 39, ('fake', 1.0): 2, ('gameplay', 1.0): 2, (';)', 1.0): 27, ('haha', 1.0): 53, ('im', 1.0): 51, ('kid', 1.0): 18, ('stuff', 1.0): 13, ('exactli', 1.0): 6, ('product', 1.0): 12, ('line', 1.0): 6, ('etsi', 1.0): 1, ('shop', 1.0): 16, ('check', 1.0): 52, ('vacat', 1.0): 6, ('recharg', 1.0): 1, ('normal', 1.0): 6, ('charger', 1.0): 2, ('asleep', 1.0): 9, ('talk', 1.0): 45, ('sooo', 1.0): 6, ('someon', 1.0): 34, ('text', 1.0): 18, ('ye', 1.0): 77, ('bet', 1.0): 6, ("he'll", 1.0): 4, ('fit', 1.0): 3, ('hear', 1.0): 33, ('speech', 1.0): 1, ('piti', 1.0): 3, ('green', 1.0): 3, ('garden', 1.0): 7, ('midnight', 1.0): 1, ('sun', 1.0): 6, ('beauti', 1.0): 50, ('canal', 1.0): 1, ('dasvidaniya', 1.0): 1, ('till', 1.0): 18, ('scout', 1.0): 1, ('sg', 1.0): 1, ('futur', 1.0): 13, ('wlan', 1.0): 1, ('pro', 1.0): 5, ('confer', 1.0): 1, ('asia', 1.0): 1, ('chang', 1.0): 24, ('lollipop', 1.0): 1, ('🍭', 1.0): 1, ('nez', 1.0): 1, ('agnezmo', 1.0): 1, ('oley', 1.0): 1, ('mama', 1.0): 1, ('stand', 1.0): 8, ('stronger', 1.0): 1, ('god', 1.0): 20, ('misti', 1.0): 1, ('babi', 1.0): 20, ('cute', 1.0): 26, ('woohoo', 1.0): 3, ("can't", 1.0): 43, ('sign', 1.0): 11, ('yet', 1.0): 13, ('still', 1.0): 48, ('think', 1.0): 63, ('mka', 1.0): 5, ('liam', 1.0): 8, ('access', 1.0): 3, ('welcom', 1.0): 73, ('stat', 1.0): 60, ('arriv', 1.0): 67, ('1', 1.0): 75, ('unfollow', 1.0): 63, ('via', 1.0): 69, ('surpris', 1.0): 10, ('figur', 1.0): 5, ('happybirthdayemilybett', 1.0): 1, ('sweet', 1.0): 19, ('talent', 1.0): 5, ('2', 1.0): 58, ('plan', 1.0): 27, ('drain', 1.0): 1, ('gotta', 1.0): 5, ('timezon', 1.0): 1, ('parent', 1.0): 5, ('proud', 1.0): 12, ('least', 1.0): 16, ('mayb', 1.0): 18, ('sometim', 1.0): 13, ('grade', 1.0): 4, ('al', 1.0): 4, ('grand', 1.0): 4, ('manila_bro', 1.0): 2, ('chosen', 1.0): 1, ('let', 1.0): 68, ('around', 1.0): 17, ('..', 1.0): 128, ('side', 1.0): 15, ('world', 1.0): 27, ('eh', 1.0): 2, ('take', 1.0): 43, ('care', 1.0): 18, ('final', 1.0): 30, ('fuck', 1.0): 26, ('weekend', 1.0): 75, ('real', 1.0): 21, ('x45', 1.0): 1, ('join', 1.0): 23, ('hushedcallwithfraydo', 1.0): 1, ('gift', 1.0): 8, ('yeahhh', 1.0): 1, ('hushedpinwithsammi', 1.0): 2, ('event', 1.0): 8, ('might', 1.0): 27, ('luv', 1.0): 6, ('realli', 1.0): 79, ('appreci', 1.0): 31, ('share', 1.0): 46, ('wow', 1.0): 22, ('tom', 1.0): 5, ('gym', 1.0): 4, ('monday', 1.0): 9, ('invit', 1.0): 17, ('scope', 1.0): 5, ('friend', 1.0): 61, ('nude', 1.0): 2, ('sleep', 1.0): 45, ('birthday', 1.0): 74, ('want', 1.0): 96, ('t-shirt', 1.0): 3, ('cool', 1.0): 38, ('haw', 1.0): 1, ('phela', 1.0): 1, ('mom', 1.0): 10, ('obvious', 1.0): 2, ('princ', 1.0): 1, ('charm', 1.0): 1, ('stage', 1.0): 2, ('luck', 1.0): 30, ('tyler', 1.0): 2, ('hipster', 1.0): 1, ('glass', 1.0): 5, ('marti', 1.0): 2, ('glad', 1.0): 43, ('done', 1.0): 54, ('afternoon', 1.0): 10, ('read', 1.0): 34, ('kahfi', 1.0): 1, ('finish', 1.0): 17, ('ohmyg', 1.0): 1, ('yaya', 1.0): 3, ('dub', 1.0): 2, ('stalk', 1.0): 2, ('ig', 1.0): 3, ('gondooo', 1.0): 1, ('moo', 1.0): 2, ('tologooo', 1.0): 1, ('becom', 1.0): 10, ('detail', 1.0): 10, ('zzz', 1.0): 1, ('xx', 1.0): 42, ('physiotherapi', 1.0): 1, ('hashtag', 1.0): 5, ('💪', 1.0): 1, ('monica', 1.0): 1, ('miss', 1.0): 27, ('sound', 1.0): 23, ('morn', 1.0): 101, ("that'", 1.0): 67, ('x43', 1.0): 1, ('definit', 1.0): 23, ('tri', 1.0): 44, ('tonight', 1.0): 20, ('took', 1.0): 8, ('advic', 1.0): 6, ('treviso', 1.0): 1, ('concert', 1.0): 24, ('citi', 1.0): 27, ('countri', 1.0): 23, ("i'll", 1.0): 90, ('start', 1.0): 61, ('fine', 1.0): 10, ('gorgeou', 1.0): 12, ('xo', 1.0): 2, ('oven', 1.0): 3, ('roast', 1.0): 2, ('garlic', 1.0): 1, ('oliv', 1.0): 1, ('oil', 1.0): 4, ('dri', 1.0): 5, ('tomato', 1.0): 1, ('basil', 1.0): 1, ('centuri', 1.0): 1, ('tuna', 1.0): 1, ('right', 1.0): 47, ('back', 1.0): 98, ('atchya', 1.0): 1, ('even', 1.0): 35, ('almost', 1.0): 10, ('chanc', 1.0): 6, ('cheer', 1.0): 20, ('po', 1.0): 4, ('ice', 1.0): 6, ('cream', 1.0): 6, ('agre', 1.0): 16, ('100', 1.0): 8, ('heheheh', 1.0): 2, ('that', 1.0): 13, ('point', 1.0): 13, ('stay', 1.0): 25, ('home', 1.0): 31, ('soon', 1.0): 47, ('promis', 1.0): 6, ('web', 1.0): 4, ('whatsapp', 1.0): 5, ('volta', 1.0): 1, ('funcionar', 1.0): 1, ('com', 1.0): 2, ('iphon', 1.0): 7, ('jailbroken', 1.0): 1, ('later', 1.0): 16, ('34', 1.0): 3, ('min', 1.0): 9, ('leia', 1.0): 1, ('appear', 1.0): 3, ('hologram', 1.0): 1, ('r2d2', 1.0): 1, ('w', 1.0): 18, ('messag', 1.0): 10, ('obi', 1.0): 1, ('wan', 1.0): 3, ('sit', 1.0): 8, ('luke', 1.0): 6, ('inter', 1.0): 1, ('3', 1.0): 32, ('ucl', 1.0): 1, ('arsen', 1.0): 2, ('small', 1.0): 4, ('team', 1.0): 29, ('pass', 1.0): 12, ('🚂', 1.0): 1, ('dewsburi', 1.0): 2, ('railway', 1.0): 1, ('station', 1.0): 4, ('dew', 1.0): 1, ('west', 1.0): 3, ('yorkshir', 1.0): 2, ('430', 1.0): 1, ('smh', 1.0): 2, ('9:25', 1.0): 1, ('live', 1.0): 26, ('strang', 1.0): 4, ('imagin', 1.0): 5, ('megan', 1.0): 1, ('masaantoday', 1.0): 6, ('a4', 1.0): 3, ('shweta', 1.0): 1, ('tripathi', 1.0): 1, ('5', 1.0): 17, ('20', 1.0): 6, ('kurta', 1.0): 3, ('half', 1.0): 7, ('number', 1.0): 13, ('wsalelov', 1.0): 16, ('ah', 1.0): 13, ('larri', 1.0): 3, ('anyway', 1.0): 16, ('kinda', 1.0): 13, ('goood', 1.0): 4, ('life', 1.0): 49, ('enn', 1.0): 1, ('could', 1.0): 32, ('warmup', 1.0): 1, ('15th', 1.0): 2, ('bath', 1.0): 7, ('dum', 1.0): 2, ('andar', 1.0): 1, ('ram', 1.0): 1, ('sampath', 1.0): 1, ('sona', 1.0): 1, ('mohapatra', 1.0): 1, ('samantha', 1.0): 1, ('edward', 1.0): 1, ('mein', 1.0): 1, ('tulan', 1.0): 1, ('razi', 1.0): 2, ('wah', 1.0): 2, ('josh', 1.0): 1, ('alway', 1.0): 67, ('smile', 1.0): 62, ('pictur', 1.0): 12, ('16.20', 1.0): 1, ('giveitup', 1.0): 1, ('given', 1.0): 3, ('ga', 1.0): 3, ('subsidi', 1.0): 1, ('initi', 1.0): 4, ('propos', 1.0): 3, ('delight', 1.0): 7, ('yesterday', 1.0): 7, ('x42', 1.0): 1, ('lmaoo', 1.0): 2, ('song', 1.0): 22, ('ever', 1.0): 23, ('shall', 1.0): 6, ('littl', 1.0): 31, ('throwback', 1.0): 3, ('outli', 1.0): 1, ('island', 1.0): 5, ('cheung', 1.0): 1, ('chau', 1.0): 1, ('mui', 1.0): 1, ('wo', 1.0): 1, ('total', 1.0): 9, ('differ', 1.0): 11, ('kfckitchentour', 1.0): 2, ('kitchen', 1.0): 4, ('clean', 1.0): 1, ("i'm", 1.0): 183, ('cusp', 1.0): 1, ('test', 1.0): 7, ('water', 1.0): 8, ('reward', 1.0): 1, ('arummzz', 1.0): 2, ("let'", 1.0): 23, ('drive', 1.0): 11, ('travel', 1.0): 20, ('yogyakarta', 1.0): 3, ('jeep', 1.0): 3, ('indonesia', 1.0): 4, ('instamood', 1.0): 3, ('wanna', 1.0): 30, ('skype', 1.0): 3, ('may', 1.0): 22, ('nice', 1.0): 98, ('friendli', 1.0): 2, ('pretend', 1.0): 2, ('film', 1.0): 9, ('congratul', 1.0): 15, ('winner', 1.0): 4, ('cheesydelight', 1.0): 1, ('contest', 1.0): 6, ('address', 1.0): 10, ('guy', 1.0): 60, ('market', 1.0): 5, ('24/7', 1.0): 1, ('14', 1.0): 1, ('hour', 1.0): 27, ('leav', 1.0): 12, ('without', 1.0): 12, ('delay', 1.0): 2, ('actual', 1.0): 19, ('easi', 1.0): 9, ('guess', 1.0): 14, ('train', 1.0): 10, ('wd', 1.0): 1, ('shift', 1.0): 5, ('engin', 1.0): 2, ('etc', 1.0): 2, ('sunburn', 1.0): 1, ('peel', 1.0): 2, ('blog', 1.0): 31, ('huge', 1.0): 11, ('warm', 1.0): 6, ('☆', 1.0): 3, ('complet', 1.0): 11, ('triangl', 1.0): 2, ('northern', 1.0): 1, ('ireland', 1.0): 2, ('sight', 1.0): 1, ('smthng', 1.0): 2, ('fr', 1.0): 3, ('hug', 1.0): 13, ('xoxo', 1.0): 3, ('uu', 1.0): 1, ('jaann', 1.0): 1, ('topnewfollow', 1.0): 2, ('connect', 1.0): 14, ('wonder', 1.0): 35, ('made', 1.0): 53, ('fluffi', 1.0): 1, ('insid', 1.0): 8, ('pirouett', 1.0): 1, ('moos', 1.0): 1, ('trip', 1.0): 14, ('philli', 1.0): 1, ('decemb', 1.0): 3, ("i'd", 1.0): 20, ('dude', 1.0): 6, ('x41', 1.0): 1, ('question', 1.0): 17, ('flaw', 1.0): 1, ('pain', 1.0): 9, ('negat', 1.0): 1, ('strength', 1.0): 3, ('went', 1.0): 12, ('solo', 1.0): 4, ('move', 1.0): 12, ('fav', 1.0): 13, ('nirvana', 1.0): 1, ('smell', 1.0): 2, ('teen', 1.0): 3, ('spirit', 1.0): 3, ('rip', 1.0): 3, ('ami', 1.0): 4, ('winehous', 1.0): 1, ('coupl', 1.0): 9, ('tomhiddleston', 1.0): 1, ('elizabetholsen', 1.0): 1, ('yaytheylookgreat', 1.0): 1, ('goodnight', 1.0): 24, ('vid', 1.0): 11, ('wake', 1.0): 12, ('gonna', 1.0): 21, ('shoot', 1.0): 6, ('itti', 1.0): 2, ('bitti', 1.0): 2, ('teeni', 1.0): 2, ('bikini', 1.0): 3, ('much', 1.0): 89, ('4th', 1.0): 4, ('togeth', 1.0): 7, ('end', 1.0): 20, ('xfile', 1.0): 1, ('content', 1.0): 4, ('rain', 1.0): 21, ('fabul', 1.0): 5, ('fantast', 1.0): 13, ('♡', 1.0): 20, ('jb', 1.0): 1, ('forev', 1.0): 5, ('belieb', 1.0): 3, ('nighti', 1.0): 1, ('bug', 1.0): 3, ('bite', 1.0): 1, ('bracelet', 1.0): 2, ('idea', 1.0): 26, ('foundri', 1.0): 1, ('game', 1.0): 27, ('sens', 1.0): 7, ('pic', 1.0): 27, ('ef', 1.0): 1, ('phone', 1.0): 19, ('woot', 1.0): 2, ('derek', 1.0): 1, ('use', 1.0): 44, ('parkshar', 1.0): 1, ('gloucestershir', 1.0): 1, ('aaaahhh', 1.0): 1, ('man', 1.0): 23, ('traffic', 1.0): 2, ('stress', 1.0): 8, ('reliev', 1.0): 1, ("how'r", 1.0): 1, ('arbeloa', 1.0): 1, ('turn', 1.0): 15, ('17', 1.0): 4, ('omg', 1.0): 15, ('say', 1.0): 61, ('europ', 1.0): 1, ('rise', 1.0): 2, ('find', 1.0): 23, ('hard', 1.0): 12, ('believ', 1.0): 9, ('uncount', 1.0): 1, ('coz', 1.0): 3, ('unlimit', 1.0): 1, ('cours', 1.0): 18, ('teamposit', 1.0): 1, ('aldub', 1.0): 2, ('☕', 1.0): 3, ('rita', 1.0): 2, ('info', 1.0): 13, ("we'd", 1.0): 4, ('way', 1.0): 46, ('boy', 1.0): 21, ('x40', 1.0): 1, ('true', 1.0): 22, ('sethi', 1.0): 2, ('high', 1.0): 7, ('exe', 1.0): 1, ('skeem', 1.0): 1, ('saam', 1.0): 1, ('peopl', 1.0): 48, ('polit', 1.0): 2, ('izzat', 1.0): 1, ('wese', 1.0): 1, ('trust', 1.0): 9, ('khawateen', 1.0): 1, ('k', 1.0): 9, ('sath', 1.0): 2, ('mana', 1.0): 1, ('kar', 1.0): 1, ('deya', 1.0): 1, ('sort', 1.0): 9, ('smart', 1.0): 5, ('hair', 1.0): 12, ('tbh', 1.0): 5, ('jacob', 1.0): 2, ('g', 1.0): 10, ('upgrad', 1.0): 6, ('tee', 1.0): 2, ('famili', 1.0): 19, ('person', 1.0): 19, ('two', 1.0): 22, ('convers', 1.0): 6, ('onlin', 1.0): 7, ('mclaren', 1.0): 1, ('fridayfeel', 1.0): 5, ('tgif', 1.0): 10, ('squar', 1.0): 1, ('enix', 1.0): 1, ('bissmillah', 1.0): 1, ('ya', 1.0): 23, ('allah', 1.0): 3, ("we'r", 1.0): 29, ('socent', 1.0): 1, ('startup', 1.0): 2, ('drop', 1.0): 9, ('your', 1.0): 3, ('arnd', 1.0): 1, ('town', 1.0): 5, ('basic', 1.0): 4, ('piss', 1.0): 3, ('cup', 1.0): 4, ('also', 1.0): 35, ('terribl', 1.0): 2, ('complic', 1.0): 1, ('discuss', 1.0): 3, ('snapchat', 1.0): 36, ('lynettelow', 1.0): 1, ('kikmenow', 1.0): 3, ('snapm', 1.0): 2, ('hot', 1.0): 24, ('amazon', 1.0): 1, ('kikmeguy', 1.0): 3, ('defin', 1.0): 2, ('grow', 1.0): 7, ('sport', 1.0): 4, ('rt', 1.0): 12, ('rakyat', 1.0): 1, ('write', 1.0): 13, ('sinc', 1.0): 15, ('mention', 1.0): 24, ('fli', 1.0): 5, ('fish', 1.0): 3, ('promot', 1.0): 5, ('post', 1.0): 21, ('cyber', 1.0): 1, ('ourdaughtersourprid', 1.0): 5, ('mypapamyprid', 1.0): 2, ('papa', 1.0): 2, ('coach', 1.0): 2, ('posit', 1.0): 8, ('kha', 1.0): 1, ('atleast', 1.0): 2, ('x39', 1.0): 1, ('mango', 1.0): 1, ("lassi'", 1.0): 1, ("monty'", 1.0): 1, ('marvel', 1.0): 2, ('though', 1.0): 19, ('suspect', 1.0): 3, ('meant', 1.0): 3, ('24', 1.0): 4, ('hr', 1.0): 2, ('touch', 1.0): 15, ('kepler', 1.0): 4, ('452b', 1.0): 5, ('chalna', 1.0): 1, ('hai', 1.0): 11, ('thankyou', 1.0): 14, ('hazel', 1.0): 1, ('food', 1.0): 6, ('brooklyn', 1.0): 1, ('pta', 1.0): 2, ('awak', 1.0): 10, ('okayi', 1.0): 2, ('awww', 1.0): 15, ('ha', 1.0): 23, ('doc', 1.0): 1, ('splendid', 1.0): 1, ('spam', 1.0): 1, ('folder', 1.0): 1, ('amount', 1.0): 1, ('nigeria', 1.0): 1, ('claim', 1.0): 1, ('rted', 1.0): 1, ('leg', 1.0): 5, ('hurt', 1.0): 8, ('bad', 1.0): 18, ('mine', 1.0): 14, ('saturday', 1.0): 8, ('thaaank', 1.0): 1, ('puhon', 1.0): 1, ('happinesss', 1.0): 1, ('tnc', 1.0): 1, ('prior', 1.0): 1, ('notif', 1.0): 2, ('fat', 1.0): 1, ('co', 1.0): 1, ('probabl', 1.0): 9, ('ate', 1.0): 4, ('yuna', 1.0): 2, ('tamesid', 1.0): 1, ('´', 1.0): 3, ('googl', 1.0): 6, ('account', 1.0): 19, ('scouser', 1.0): 1, ('everyth', 1.0): 13, ('zoe', 1.0): 2, ('mate', 1.0): 7, ('liter', 1.0): 6, ("they'r", 1.0): 12, ('samee', 1.0): 1, ('edgar', 1.0): 1, ('updat', 1.0): 13, ('log', 1.0): 4, ('bring', 1.0): 17, ('abe', 1.0): 1, ('meet', 1.0): 34, ('x38', 1.0): 1, ('sigh', 1.0): 3, ('dreamili', 1.0): 1, ('pout', 1.0): 1, ('eye', 1.0): 14, ('quacketyquack', 1.0): 7, ('funni', 1.0): 19, ('happen', 1.0): 16, ('phil', 1.0): 1, ('em', 1.0): 3, ('del', 1.0): 1, ('rodder', 1.0): 1, ('els', 1.0): 10, ('play', 1.0): 46, ('newest', 1.0): 1, ('gamejam', 1.0): 1, ('irish', 1.0): 2, ('literatur', 1.0): 2, ('inaccess', 1.0): 2, ("kareena'", 1.0): 2, ('fan', 1.0): 30, ('brain', 1.0): 13, ('dot', 1.0): 11, ('braindot', 1.0): 11, ('fair', 1.0): 5, ('rush', 1.0): 1, ('either', 1.0): 11, ('brandi', 1.0): 1, ('18', 1.0): 5, ('carniv', 1.0): 1, ('men', 1.0): 10, ('put', 1.0): 17, ('mask', 1.0): 3, ('xavier', 1.0): 1, ('forneret', 1.0): 1, ('jennif', 1.0): 1, ('site', 1.0): 9, ('free', 1.0): 37, ('50.000', 1.0): 3, ('8', 1.0): 10, ('ball', 1.0): 7, ('pool', 1.0): 5, ('coin', 1.0): 5, ('edit', 1.0): 7, ('trish', 1.0): 1, ('♥', 1.0): 19, ('grate', 1.0): 5, ('three', 1.0): 10, ('comment', 1.0): 8, ('wakeup', 1.0): 1, ('besid', 1.0): 2, ('dirti', 1.0): 2, ('sex', 1.0): 6, ('lmaooo', 1.0): 1, ('😤', 1.0): 2, ('loui', 1.0): 4, ("he'", 1.0): 11, ('throw', 1.0): 3, ('caus', 1.0): 15, ('inspir', 1.0): 7, ('ff', 1.0): 48, ('twoof', 1.0): 3, ('gr8', 1.0): 1, ('wkend', 1.0): 3, ('kind', 1.0): 24, ('exhaust', 1.0): 2, ('word', 1.0): 20, ('cheltenham', 1.0): 1, ('area', 1.0): 4, ('kale', 1.0): 1, ('crisp', 1.0): 1, ('ruin', 1.0): 5, ('x37', 1.0): 1, ('open', 1.0): 12, ('worldwid', 1.0): 2, ('outta', 1.0): 1, ('sfvbeta', 1.0): 1, ('vantast', 1.0): 1, ('xcylin', 1.0): 1, ('bundl', 1.0): 1, ('show', 1.0): 28, ('internet', 1.0): 2, ('price', 1.0): 4, ('realisticli', 1.0): 1, ('pay', 1.0): 8, ('net', 1.0): 1, ('educ', 1.0): 1, ('power', 1.0): 7, ('weapon', 1.0): 1, ('nelson', 1.0): 1, ('mandela', 1.0): 1, ('recent', 1.0): 9, ('j', 1.0): 3, ('chenab', 1.0): 1, ('flow', 1.0): 5, ('pakistan', 1.0): 2, ('incredibleindia', 1.0): 1, ('teenchoic', 1.0): 10, ('choiceinternationalartist', 1.0): 9, ('superjunior', 1.0): 9, ('caught', 1.0): 4, ('first', 1.0): 50, ('salmon', 1.0): 3, ('super-blend', 1.0): 1, ('project', 1.0): 6, ('[email protected]', 1.0): 1, ('awesom', 1.0): 42, ('stream', 1.0): 14, ('alma', 1.0): 1, ('mater', 1.0): 1, ('highschoolday', 1.0): 1, ('clientvisit', 1.0): 1, ('faith', 1.0): 3, ('christian', 1.0): 1, ('school', 1.0): 9, ('lizaminnelli', 1.0): 1, ('upcom', 1.0): 2, ('uk', 1.0): 4, ('😄', 1.0): 5, ('singl', 1.0): 6, ('hill', 1.0): 4, ('everi', 1.0): 26, ('beat', 1.0): 10, ('wrong', 1.0): 10, ('readi', 1.0): 25, ('natur', 1.0): 1, ('pefumeri', 1.0): 1, ('workshop', 1.0): 3, ('neal', 1.0): 1, ('yard', 1.0): 1, ('covent', 1.0): 1, ('tomorrow', 1.0): 40, ('fback', 1.0): 27, ('indo', 1.0): 1, ('harmo', 1.0): 1, ('americano', 1.0): 1, ('rememb', 1.0): 16, ('aww', 1.0): 10, ('head', 1.0): 14, ('saw', 1.0): 19, ('dark', 1.0): 6, ('handshom', 1.0): 1, ('juga', 1.0): 1, ('hurray', 1.0): 1, ('hate', 1.0): 13, ('cant', 1.0): 15, ('decid', 1.0): 4, ('save', 1.0): 12, ('list', 1.0): 15, ('hiya', 1.0): 4, ('exec', 1.0): 1, ('[email protected]', 1.0): 1, ('photo', 1.0): 19, ('thx', 1.0): 15, ('4', 1.0): 24, ('china', 1.0): 2, ('homosexu', 1.0): 1, ('hyungbot', 1.0): 1, ('give', 1.0): 48, ('fam', 1.0): 5, ('mind', 1.0): 23, ('timetunnel', 1.0): 1, ('1982', 1.0): 1, ('quit', 1.0): 13, ('radio', 1.0): 5, ('set', 1.0): 11, ('heart', 1.0): 11, ('hiii', 1.0): 2, ('jack', 1.0): 3, ('ili', 1.0): 5, ('✨', 1.0): 4, ('domino', 1.0): 1, ('pub', 1.0): 1, ('heat', 1.0): 1, ('prob', 1.0): 5, ('sorri', 1.0): 22, ('hastili', 1.0): 1, ('type', 1.0): 6, ('came', 1.0): 7, ('pakistani', 1.0): 1, ('x36', 1.0): 1, ('3point', 1.0): 1, ('dreamteam', 1.0): 1, ('gooo', 1.0): 2, ('bailey', 1.0): 2, ('pbb', 1.0): 4, ('737gold', 1.0): 3, ('drank', 1.0): 2, ('old', 1.0): 13, ('gotten', 1.0): 2, ('1/2', 1.0): 1, ('welsh', 1.0): 1, ('wale', 1.0): 3, ('yippe', 1.0): 1, ('💟', 1.0): 4, ('bro', 1.0): 24, ('lord', 1.0): 4, ('michael', 1.0): 4, ("u'r", 1.0): 1, ('ure', 1.0): 1, ('bigot', 1.0): 1, ('usual', 1.0): 6, ('front', 1.0): 4, ('squat', 1.0): 1, ('dobar', 1.0): 1, ('dan', 1.0): 5, ('brand', 1.0): 8, ('heavi', 1.0): 2, ('musicolog', 1.0): 1, ('2015', 1.0): 16, ('spend', 1.0): 2, ('marathon', 1.0): 1, ('iflix', 1.0): 2, ('offici', 1.0): 10, ('graduat', 1.0): 3, ('cri', 1.0): 9, ('__', 1.0): 1, ('yep', 1.0): 9, ('expert', 1.0): 4, ('bisexu', 1.0): 1, ('minal', 1.0): 1, ('aidzin', 1.0): 1, ('yo', 1.0): 7, ('pi', 1.0): 1, ('cook', 1.0): 2, ('book', 1.0): 21, ('dinner', 1.0): 7, ('tough', 1.0): 2, ('choic', 1.0): 8, ('other', 1.0): 12, ('chill', 1.0): 6, ('smu', 1.0): 1, ('oval', 1.0): 1, ('basketbal', 1.0): 1, ('player', 1.0): 4, ('whahahaha', 1.0): 1, ('soamaz', 1.0): 1, ('moment', 1.0): 12, ('onto', 1.0): 3, ('a5', 1.0): 1, ('wardrob', 1.0): 2, ('user', 1.0): 3, ('teamr', 1.0): 1, ('appar', 1.0): 6, ('depend', 1.0): 2, ('greatli', 1.0): 1, ('design', 1.0): 21, ('ahhh', 1.0): 1, ('7th', 1.0): 1, ('cinepambata', 1.0): 1, ('mechan', 1.0): 1, ('form', 1.0): 4, ('download', 1.0): 10, ('ur', 1.0): 38, ('swisher', 1.0): 1, ('cop', 1.0): 1, ('ducktail', 1.0): 1, ('surreal', 1.0): 3, ('exposur', 1.0): 1, ('sotw', 1.0): 1, ('halesowen', 1.0): 1, ('blackcountryfair', 1.0): 1, ('street', 1.0): 1, ('assess', 1.0): 1, ('mental', 1.0): 4, ('bodi', 1.0): 15, ('ooz', 1.0): 1, ('appeal', 1.0): 1, ('amassiveoverdoseofship', 1.0): 1, ('latest', 1.0): 5, ('isi', 1.0): 1, ('chan', 1.0): 1, ('c', 1.0): 9, ('note', 1.0): 6, ('pkwalasawa', 1.0): 1, ('gemma', 1.0): 1, ('orlean', 1.0): 1, ('fever', 1.0): 2, ('geskenya', 1.0): 1, ('obamainkenya', 1.0): 1, ('magicalkenya', 1.0): 1, ('greatkenya', 1.0): 1, ('allgoodthingsk', 1.0): 1, ('anim', 1.0): 6, ('umaru', 1.0): 1, ('singer', 1.0): 4, ('ship', 1.0): 8, ('order', 1.0): 17, ('room', 1.0): 5, ('car', 1.0): 6, ('gone', 1.0): 5, ('hahaha', 1.0): 14, ('stori', 1.0): 11, ('relat', 1.0): 4, ('label', 1.0): 1, ('worst', 1.0): 3, ('batch', 1.0): 1, ('princip', 1.0): 1, ('due', 1.0): 3, ('march', 1.0): 1, ('wooftast', 1.0): 2, ('receiv', 1.0): 8, ('necessari', 1.0): 1, ('regret', 1.0): 4, ('rn', 1.0): 4, ('whatev', 1.0): 5, ('hat', 1.0): 1, ('success', 1.0): 6, ('abstin', 1.0): 1, ('wtf', 1.0): 3, ("there'", 1.0): 11, ('thrown', 1.0): 1, ('middl', 1.0): 2, ('repeat', 1.0): 3, ('relentlessli', 1.0): 1, ('approxim', 1.0): 1, ('oldschool', 1.0): 1, ('runescap', 1.0): 1, ('daaay', 1.0): 1, ('jumma_mubarik', 1.0): 1, ('frnd', 1.0): 1, ('stay_bless', 1.0): 1, ('bless', 1.0): 12, ('pussycat', 1.0): 1, ('main', 1.0): 7, ('launch', 1.0): 4, ('pretoria', 1.0): 1, ('fahrinahmad', 1.0): 1, ('tengkuaaronshah', 1.0): 1, ('eksperimencinta', 1.0): 1, ('tykkäsin', 1.0): 1, ('videosta', 1.0): 1, ('month', 1.0): 13, ('hoodi', 1.0): 2, ('eeep', 1.0): 1, ('yay', 1.0): 16, ('sohappyrightnow', 1.0): 1, ('mmm', 1.0): 1, ('azz-set', 1.0): 1, ('babe', 1.0): 9, ('feedback', 1.0): 11, ('gain', 1.0): 6, ('valu', 1.0): 2, ('peac', 1.0): 8, ('refresh', 1.0): 5, ('manthan', 1.0): 1, ('tune', 1.0): 5, ('fresh', 1.0): 6, ('mother', 1.0): 5, ('determin', 1.0): 2, ('maxfreshmov', 1.0): 2, ('loneliest', 1.0): 1, ('tattoo', 1.0): 3, ('friday.and', 1.0): 1, ('magnific', 1.0): 2, ('e', 1.0): 5, ('achiev', 1.0): 2, ('rashmi', 1.0): 1, ('dedic', 1.0): 2, ('happyfriday', 1.0): 6, ('nearli', 1.0): 4, ('retweet', 1.0): 35, ('alert', 1.0): 1, ('da', 1.0): 5, ('dang', 1.0): 2, ('rad', 1.0): 2, ('fanart', 1.0): 1, ('massiv', 1.0): 1, ('niamh', 1.0): 1, ('fennel', 1.0): 1, ('journal', 1.0): 1, ('land', 1.0): 2, ('copi', 1.0): 5, ('past', 1.0): 7, ('tweet', 1.0): 61, ('yesss', 1.0): 5, ('ariana', 1.0): 2, ('selena', 1.0): 2, ('gomez', 1.0): 1, ('tomlinson', 1.0): 1, ('payn', 1.0): 1, ('caradelevingn', 1.0): 1, ('🌷', 1.0): 1, ('trade', 1.0): 3, ('tire', 1.0): 5, ('nope', 1.0): 7, ('appli', 1.0): 6, ('iamca', 1.0): 1, ('found', 1.0): 15, ('afti', 1.0): 1, ('goodmorn', 1.0): 8, ('prokabaddi', 1.0): 1, ('koel', 1.0): 1, ('mallick', 1.0): 1, ('recit', 1.0): 4, ('nation', 1.0): 3, ('anthem', 1.0): 1, ('6', 1.0): 23, ('yournaturallead', 1.0): 1, ('youngnaturallead', 1.0): 1, ('mon', 1.0): 3, ('27juli', 1.0): 1, ('cumbria', 1.0): 1, ('flockstar', 1.0): 1, ('thur', 1.0): 2, ('30juli', 1.0): 1, ('itv', 1.0): 1, ('sleeptight', 1.0): 1, ('haveagoodday', 1.0): 1, ('septemb', 1.0): 5, ('perhap', 1.0): 3, ('bb', 1.0): 4, ('full', 1.0): 19, ('album', 1.0): 6, ('fulli', 1.0): 2, ('intend', 1.0): 1, ('possibl', 1.0): 7, ('attack', 1.0): 3, ('>:d', 1.0): 4, ('bird', 1.0): 4, ('teamadmicro', 1.0): 1, ('fridaydownpour', 1.0): 1, ('clear', 1.0): 4, ('rohit', 1.0): 1, ('queen', 1.0): 8, ('otwolgrandtrail', 1.0): 3, ('sheer', 1.0): 1, ('fact', 1.0): 8, ('obama', 1.0): 1, ('innumer', 1.0): 1, ('presid', 1.0): 2, ('ni', 1.0): 3, ('shauri', 1.0): 1, ('yako', 1.0): 1, ('memotohat', 1.0): 1, ('sunday', 1.0): 9, ('pamper', 1.0): 2, ("t'wa", 1.0): 1, ('cabincrew', 1.0): 1, ('interview', 1.0): 5, ('langkawi', 1.0): 1, ('1st', 1.0): 1, ('august', 1.0): 7, ('fulfil', 1.0): 5, ('fantasi', 1.0): 6, ('👉', 1.0): 6, ('ex-tweleb', 1.0): 1, ('apart', 1.0): 2, ('makeov', 1.0): 1, ('brilliantli', 1.0): 1, ('happyyi', 1.0): 1, ('birthdaaayyy', 1.0): 2, ('kill', 1.0): 3, ('interest', 1.0): 20, ('internship', 1.0): 3, ('program', 1.0): 5, ('sadli', 1.0): 1, ('career', 1.0): 3, ('page', 1.0): 9, ('issu', 1.0): 10, ('sad', 1.0): 5, ('overwhelmingli', 1.0): 1, ('aha', 1.0): 2, ('beaut', 1.0): 2, ('♬', 1.0): 2, ('win', 1.0): 16, ('deo', 1.0): 1, ('faaabul', 1.0): 1, ('freebiefriday', 1.0): 4, ('aluminiumfre', 1.0): 1, ('stayfresh', 1.0): 1, ('john', 1.0): 6, ('worri', 1.0): 18, ('navig', 1.0): 1, ('thnk', 1.0): 1, ('progrmr', 1.0): 1, ('9pm', 1.0): 1, ('9am', 1.0): 2, ('hardli', 1.0): 1, ('rose', 1.0): 4, ('emot', 1.0): 3, ('poetri', 1.0): 1, ('frequentfly', 1.0): 1, ('break', 1.0): 10, ('apolog', 1.0): 4, ('kb', 1.0): 1, ('londondairi', 1.0): 1, ('icecream', 1.0): 2, ('experi', 1.0): 7, ('cover', 1.0): 9, ('sin', 1.0): 1, ('excit', 1.0): 33, (":')", 1.0): 2, ('xxx', 1.0): 15, ('jim', 1.0): 1, ('chuckl', 1.0): 1, ('cake', 1.0): 10, ('doh', 1.0): 1, ('500', 1.0): 2, ('subscrib', 1.0): 2, ('reach', 1.0): 1, ('scorch', 1.0): 1, ('summer', 1.0): 17, ('younger', 1.0): 4, ('woman', 1.0): 4, ('stamina', 1.0): 1, ('expect', 1.0): 6, ('anyth', 1.0): 22, ('less', 1.0): 8, ('tweeti', 1.0): 1, ('fab', 1.0): 12, ('dont', 1.0): 13, ('-->', 1.0): 2, ('10', 1.0): 16, ('loner', 1.0): 3, ('introduc', 1.0): 3, ('vs', 1.0): 4, ('alter', 1.0): 1, ('understand', 1.0): 6, ('spread', 1.0): 8, ('problem', 1.0): 19, ('supa', 1.0): 1, ('dupa', 1.0): 1, ('near', 1.0): 6, ('dartmoor', 1.0): 1, ('gold', 1.0): 7, ('colour', 1.0): 4, ('ok', 1.0): 38, ('someday', 1.0): 4, ('r', 1.0): 14, ('dii', 1.0): 1, ('n', 1.0): 17, ('forget', 1.0): 17, ('si', 1.0): 4, ('smf', 1.0): 1, ('ft', 1.0): 4, ('japanes', 1.0): 3, ('import', 1.0): 5, ('kitti', 1.0): 1, ('match', 1.0): 6, ('stationari', 1.0): 1, ('draw', 1.0): 6, ('close', 1.0): 14, ('broken', 1.0): 3, ('specialis', 1.0): 4, ('thermal', 1.0): 4, ('imag', 1.0): 6, ('survey', 1.0): 4, ('–', 1.0): 14, ('south', 1.0): 2, ('korea', 1.0): 3, ('scamper', 1.0): 1, ('slept', 1.0): 4, ('alarm', 1.0): 1, ("ain't", 1.0): 5, ('mad', 1.0): 4, ('chweina', 1.0): 1, ('xd', 1.0): 4, ('jotzh', 1.0): 1, ('wast', 1.0): 7, ('place', 1.0): 21, ('worth', 1.0): 11, ('coat', 1.0): 3, ('beforehand', 1.0): 1, ('tho', 1.0): 12, ('foh', 1.0): 2, ('outsid', 1.0): 5, ('holiday', 1.0): 11, ('menac', 1.0): 1, ('jojo', 1.0): 2, ('ta', 1.0): 2, ('accept', 1.0): 1, ('admin', 1.0): 2, ('lukri', 1.0): 1, ('😘', 1.0): 10, ('momma', 1.0): 2, ('bear', 1.0): 2, ('❤', 1.0): 29, ('️', 1.0): 20, ('redid', 1.0): 1, ('8th', 1.0): 1, ('v.ball', 1.0): 1, ('atm', 1.0): 4, ('build', 1.0): 8, ('pack', 1.0): 8, ('suitcas', 1.0): 2, ('hang-copi', 1.0): 1, ('translat', 1.0): 1, ("dostoevsky'", 1.0): 1, ('voucher', 1.0): 2, ('bugatti', 1.0): 1, ('bra', 1.0): 3, ('مطعم_هاشم', 1.0): 1, ('yummi', 1.0): 3, ('a7la', 1.0): 1, ('bdayt', 1.0): 1, ('mnwreeen', 1.0): 1, ('jazz', 1.0): 2, ('truck', 1.0): 1, ('x34', 1.0): 1, ('speak', 1.0): 8, ('pbevent', 1.0): 1, ('hq', 1.0): 1, ('add', 1.0): 22, ('yoona', 1.0): 1, ('hairpin', 1.0): 1, ('otp', 1.0): 1, ('collect', 1.0): 7, ('mastership', 1.0): 1, ('honey', 1.0): 4, ('paindo', 1.0): 1, ('await', 1.0): 1, ('report', 1.0): 3, ('manni', 1.0): 1, ('asshol', 1.0): 3, ('brijresid', 1.0): 1, ('structur', 1.0): 1, ('156', 1.0): 1, ('unit', 1.0): 3, ('encompass', 1.0): 1, ('bhk', 1.0): 1, ('flat', 1.0): 2, ('91', 1.0): 2, ('975-580-', 1.0): 1, ('444', 1.0): 1, ('honor', 1.0): 2, ('curri', 1.0): 2, ('clash', 1.0): 1, ('milano', 1.0): 1, ('👌', 1.0): 1, ('followback', 1.0): 6, (':-d', 1.0): 5, ('legit', 1.0): 1, ('loser', 1.0): 5, ('gass', 1.0): 1, ('dead', 1.0): 4, ('starsquad', 1.0): 4, ('⭐', 1.0): 3, ('news', 1.0): 25, ('utc', 1.0): 1, ('flume', 1.0): 1, ('kaytranada', 1.0): 1, ('alunageorg', 1.0): 1, ('ticket', 1.0): 12, ('km', 1.0): 1, ('certainti', 1.0): 1, ('solv', 1.0): 2, ('faster', 1.0): 3, ('👊', 1.0): 2, ('hurri', 1.0): 5, ('totem', 1.0): 1, ('somewher', 1.0): 5, ('alic', 1.0): 4, ('dog', 1.0): 6, ('cat', 1.0): 5, ('goodwynsgoodi', 1.0): 1, ('ugh', 1.0): 1, ('fade', 1.0): 2, ('moan', 1.0): 1, ('leed', 1.0): 1, ('jozi', 1.0): 1, ('wasnt', 1.0): 2, ('fifth', 1.0): 2, ('avail', 1.0): 10, ('tix', 1.0): 2, ('pa', 1.0): 2, ('ba', 1.0): 2, ('ng', 1.0): 2, ('atl', 1.0): 1, ('coldplay', 1.0): 1, ('favorit', 1.0): 14, ('scientist', 1.0): 1, ('yellow', 1.0): 2, ('atla', 1.0): 1, ('yein', 1.0): 1, ('selo', 1.0): 1, ('jabongatpumaurbanstamped', 1.0): 4, ('an', 1.0): 3, ('7', 1.0): 8, ('waiter', 1.0): 1, ('bill', 1.0): 5, ('sir', 1.0): 12, ('titl', 1.0): 2, ('pocket', 1.0): 1, ('wrip', 1.0): 1, ('jean', 1.0): 1, ('conni', 1.0): 2, ('crew', 1.0): 3, ('staff', 1.0): 2, ('sweetan', 1.0): 1, ('ask', 1.0): 37, ('mum', 1.0): 2, ('beg', 1.0): 2, ('soprano', 1.0): 1, ('ukrain', 1.0): 2, ('x33', 1.0): 1, ('olli', 1.0): 2, ('disney.art', 1.0): 1, ('elmoprinssi', 1.0): 1, ('salsa', 1.0): 1, ('danc', 1.0): 2, ('tell', 1.0): 25, ('truth', 1.0): 4, ('pl', 1.0): 8, ('4-6', 1.0): 1, ('2nd', 1.0): 5, ('blogiversari', 1.0): 1, ('review', 1.0): 9, ('cuti', 1.0): 6, ('bohol', 1.0): 1, ('briliant', 1.0): 1, ('v', 1.0): 9, ('key', 1.0): 3, ('annual', 1.0): 1, ('far', 1.0): 19, ('spin', 1.0): 2, ('voic', 1.0): 3, ('\U000fe334', 1.0): 1, ('yeheyi', 1.0): 1, ('pinya', 1.0): 1, ('whoooah', 1.0): 1, ('tranc', 1.0): 1, ('lover', 1.0): 4, ('subject', 1.0): 7, ('physic', 1.0): 1, ('stop', 1.0): 15, ('ब', 1.0): 1, ('matter', 1.0): 6, ('jungl', 1.0): 1, ('accommod', 1.0): 1, ('secret', 1.0): 9, ('behind', 1.0): 3, ('sandroforceo', 1.0): 2, ('ceo', 1.0): 11, ('1month', 1.0): 11, ('swag', 1.0): 1, ('mia', 1.0): 1, ('workinprogress', 1.0): 1, ('choos', 1.0): 2, ('finnigan', 1.0): 1, ('loyal', 1.0): 2, ('royal', 1.0): 2, ('fotoset', 1.0): 1, ('reus', 1.0): 1, ('seem', 1.0): 10, ('somebodi', 1.0): 1, ('sell', 1.0): 1, ('young', 1.0): 3, ('muntu', 1.0): 1, ('anoth', 1.0): 23, ('gem', 1.0): 2, ('falco', 1.0): 1, ('supersmash', 1.0): 1, ('hotnsexi', 1.0): 1, ('friskyfriday', 1.0): 1, ('beach', 1.0): 4, ('movi', 1.0): 24, ('crop', 1.0): 2, ('nash', 1.0): 1, ('tissu', 1.0): 1, ('chocol', 1.0): 7, ('tea', 1.0): 6, ('hannib', 1.0): 3, ('episod', 1.0): 5, ('hotb', 1.0): 1, ('bush', 1.0): 2, ('classicassur', 1.0): 1, ('thrill', 1.0): 2, ('intern', 1.0): 2, ('assign', 1.0): 1, ('aerial', 1.0): 1, ('camera', 1.0): 6, ('oper', 1.0): 1, ('boom', 1.0): 3, ('hong', 1.0): 1, ('kong', 1.0): 1, ('ferri', 1.0): 1, ('central', 1.0): 2, ('girlfriend', 1.0): 4, ('after-work', 1.0): 1, ('drink', 1.0): 8, ('dj', 1.0): 3, ('resto', 1.0): 1, ('drinkt', 1.0): 1, ('koffi', 1.0): 1, ('a6', 1.0): 1, ('stargat', 1.0): 1, ('atlanti', 1.0): 1, ('muaahhh', 1.0): 1, ('ohh', 1.0): 3, ('hii', 1.0): 2, ('🙈', 1.0): 1, ('di', 1.0): 5, ('nagsend', 1.0): 1, ('yung', 1.0): 1, ('ko', 1.0): 4, ('ulit', 1.0): 3, ('🎉', 1.0): 5, ('🎈', 1.0): 1, ('ugli', 1.0): 4, ('legget', 1.0): 1, ('qui', 1.0): 1, ('per', 1.0): 1, ('la', 1.0): 8, ('mar', 1.0): 1, ('encourag', 1.0): 3, ('employ', 1.0): 5, ('board', 1.0): 5, ('sticker', 1.0): 1, ('sponsor', 1.0): 4, ('prize', 1.0): 3, ('(:', 1.0): 1, ('milo', 1.0): 1, ('aurini', 1.0): 1, ('juicebro', 1.0): 1, ('pillar', 1.0): 2, ('respect', 1.0): 2, ('boii', 1.0): 1, ('smashingbook', 1.0): 1, ('bibl', 1.0): 2, ('ill', 1.0): 6, ('sick', 1.0): 4, ('lamo', 1.0): 1, ('fangirl', 1.0): 3, ('platon', 1.0): 1, ('scienc', 1.0): 5, ('resid', 1.0): 2, ('servicewithasmil', 1.0): 1, ('bloodlin', 1.0): 1, ('huski', 1.0): 1, ('obituari', 1.0): 1, ('advert', 1.0): 1, ('goofingaround', 1.0): 1, ('bollywood', 1.0): 1, ('giveaway', 1.0): 6, ('dah', 1.0): 2, ('noth', 1.0): 15, ('bitter', 1.0): 2, ('anger', 1.0): 1, ('hatr', 1.0): 2, ('toward', 1.0): 2, ('pure', 1.0): 2, ('indiffer', 1.0): 1, ('suit', 1.0): 5, ('zach', 1.0): 1, ('codi', 1.0): 2, ('deliv', 1.0): 3, ('ac', 1.0): 1, ('excel', 1.0): 6, ('produc', 1.0): 1, ('boggl', 1.0): 1, ('fatigu', 1.0): 1, ('baareeq', 1.0): 1, ('gamedev', 1.0): 2, ('hobbi', 1.0): 1, ('tweenie_fox', 1.0): 1, ('click', 1.0): 3, ('accessori', 1.0): 1, ('tamang', 1.0): 1, ('hinala', 1.0): 1, ('niam', 1.0): 1, ('selfiee', 1.0): 1, ('especi', 1.0): 4, ('lass', 1.0): 1, ('ale', 1.0): 1, ('swim', 1.0): 3, ('bout', 1.0): 3, ('goodby', 1.0): 5, ('feminist', 1.0): 1, ('fought', 1.0): 1, ('snobbi', 1.0): 1, ('bitch', 1.0): 3, ('carolin', 1.0): 2, ('mighti', 1.0): 1, ('🔥', 1.0): 1, ('threw', 1.0): 2, ('hbd', 1.0): 1, ('follback', 1.0): 19, ('jog', 1.0): 1, ('remot', 1.0): 2, ('newli', 1.0): 1, ('ebay', 1.0): 2, ('store', 1.0): 15, ('disneyinfin', 1.0): 1, ('starwar', 1.0): 1, ('charact', 1.0): 3, ('preorder', 1.0): 1, ('starter', 1.0): 1, ('hit', 1.0): 13, ('snap', 1.0): 4, ('homi', 1.0): 3, ('bought', 1.0): 4, ('skin', 1.0): 8, ('bday', 1.0): 11, ('chant', 1.0): 2, ('jai', 1.0): 1, ('itali', 1.0): 2, ('fast', 1.0): 4, ('heeeyyy', 1.0): 1, ('woah', 1.0): 3, ('★', 1.0): 5, ('😊', 1.0): 11, ('whenev', 1.0): 4, ('ang', 1.0): 2, ('kiss', 1.0): 4, ('philippin', 1.0): 2, ('packag', 1.0): 3, ('bruis', 1.0): 1, ('rib', 1.0): 2, ('😀', 1.0): 2, ('😁', 1.0): 6, ('😂', 1.0): 17, ('😃', 1.0): 1, ('😅', 1.0): 1, ('😉', 1.0): 2, ('tombraid', 1.0): 1, ('hype', 1.0): 1, ('thejuiceinthemix', 1.0): 1, ('rela', 1.0): 1, ('low', 1.0): 6, ('prioriti', 1.0): 1, ('harri', 1.0): 5, ('bc', 1.0): 9, ('collaps', 1.0): 2, ('chaotic', 1.0): 1, ('cosa', 1.0): 1, ('<---', 1.0): 2, ('alliter', 1.0): 1, ('oppayaa', 1.0): 1, ("how'", 1.0): 4, ('natgeo', 1.0): 1, ('lick', 1.0): 1, ('elbow', 1.0): 2, ('. .', 1.0): 2, ('“', 1.0): 7, ('emu', 1.0): 1, ('stoke', 1.0): 1, ('woke', 1.0): 5, ("people'", 1.0): 3, ('approv', 1.0): 6, ("god'", 1.0): 2, ('jisung', 1.0): 1, ('sunshin', 1.0): 7, ('mm', 1.0): 6, ('nicola', 1.0): 1, ('brighten', 1.0): 2, ('helen', 1.0): 3, ('brian', 1.0): 3, ('2-3', 1.0): 1, ('australia', 1.0): 5, ('ol', 1.0): 2, ('bone', 1.0): 1, ('creak', 1.0): 1, ('abuti', 1.0): 1, ('tweetland', 1.0): 1, ('android', 1.0): 3, ('xma', 1.0): 2, ('skyblock', 1.0): 1, ('bcaus', 1.0): 1, ('2009', 1.0): 1, ('die', 1.0): 10, ('twitch', 1.0): 5, ('sympathi', 1.0): 1, ('laugh', 1.0): 5, ('unniee', 1.0): 1, ('nuka', 1.0): 1, ('penacova', 1.0): 1, ('djset', 1.0): 1, ('edm', 1.0): 1, ('kizomba', 1.0): 1, ('latinhous', 1.0): 1, ('housemus', 1.0): 3, ('portug', 1.0): 1, ('wild', 1.0): 2, ('ride', 1.0): 6, ('anytim', 1.0): 6, ('tast', 1.0): 5, ('yer', 1.0): 2, ('mtn', 1.0): 2, ('maganda', 1.0): 1, ('mistress', 1.0): 2, ('saphir', 1.0): 1, ('busi', 1.0): 19, ('4000', 1.0): 1, ('instagram', 1.0): 7, ('among', 1.0): 5, ('coconut', 1.0): 1, ('sambal', 1.0): 1, ('mussel', 1.0): 1, ('recip', 1.0): 5, ('kalin', 1.0): 1, ('mixcloud', 1.0): 1, ('sarcasm', 1.0): 2, ('chelsea', 1.0): 3, ('he', 1.0): 2, ('useless', 1.0): 2, ('thursday', 1.0): 2, ('hang', 1.0): 3, ('hehe', 1.0): 10, ('said', 1.0): 16, ('benson', 1.0): 1, ('facebook', 1.0): 5, ('solid', 1.0): 1, ('16/17', 1.0): 1, ('30', 1.0): 3, ('°', 1.0): 1, ('😜', 1.0): 2, ('maryhick', 1.0): 1, ('kikmeboy', 1.0): 7, ('photooftheday', 1.0): 4, ('musicbiz', 1.0): 2, ('sheskindahot', 1.0): 1, ('fleekil', 1.0): 1, ('mbalula', 1.0): 1, ('africa', 1.0): 1, ('mexican', 1.0): 1, ('scar', 1.0): 1, ('offic', 1.0): 8, ('donut', 1.0): 2, ('foiegra', 1.0): 2, ('despit', 1.0): 2, ('weather', 1.0): 9, ('wed', 1.0): 5, ('toni', 1.0): 2, ('stark', 1.0): 1, ('incred', 1.0): 7, ('poem', 1.0): 2, ('bubbl', 1.0): 3, ('dale', 1.0): 1, ('billion', 1.0): 1, ('magic', 1.0): 5, ('op', 1.0): 3, ('cast', 1.0): 1, ('vote', 1.0): 9, ('elect', 1.0): 1, ('jcreport', 1.0): 1, ('piggin', 1.0): 1, ('botan', 1.0): 2, ('soap', 1.0): 4, ('late', 1.0): 13, ('upload', 1.0): 5, ('freshli', 1.0): 1, ('3week', 1.0): 1, ('heal', 1.0): 1, ('tobi-bro', 1.0): 1, ('isp', 1.0): 1, ('steel', 1.0): 1, ('wednesday', 1.0): 1, ('swear', 1.0): 3, ('met', 1.0): 4, ('earlier', 1.0): 4, ('cam', 1.0): 3, ('😭', 1.0): 2, ('except', 1.0): 2, ("masha'allah", 1.0): 1, ('french', 1.0): 5, ('wwat', 1.0): 2, ('franc', 1.0): 5, ('yaaay', 1.0): 3, ('beirut', 1.0): 2, ('coffe', 1.0): 11, ('panda', 1.0): 6, ('eonni', 1.0): 2, ('favourit', 1.0): 13, ('soda', 1.0): 1, ('fuller', 1.0): 1, ('shit', 1.0): 13, ('healthi', 1.0): 2, ('💓', 1.0): 2, ('rettweet', 1.0): 3, ('mvg', 1.0): 1, ('valuabl', 1.0): 1, ('madrid', 1.0): 3, ('sore', 1.0): 6, ('bergerac', 1.0): 1, ('u21', 1.0): 1, ('individu', 1.0): 2, ('adam', 1.0): 1, ("beach'", 1.0): 1, ('suicid', 1.0): 1, ('squad', 1.0): 1, ('fond', 1.0): 1, ('christoph', 1.0): 2, ('cocki', 1.0): 1, ('prove', 1.0): 3, ("attitude'", 1.0): 1, ('improv', 1.0): 3, ('suggest', 1.0): 6, ('date', 1.0): 12, ('inde', 1.0): 10, ('intellig', 1.0): 3, ('strong', 1.0): 7, ('cs', 1.0): 2, ('certain', 1.0): 2, ('exam', 1.0): 5, ('forgot', 1.0): 3, ('home-bas', 1.0): 1, ('knee', 1.0): 4, ('sale', 1.0): 3, ('fleur', 1.0): 1, ('dress', 1.0): 10, ('readystock_hijabmart', 1.0): 1, ('idr', 1.0): 2, ('325.000', 1.0): 1, ('200.000', 1.0): 1, ('tompolo', 1.0): 1, ('aim', 1.0): 1, ('cannot', 1.0): 4, ('buyer', 1.0): 3, ('disappoint', 1.0): 1, ('paper', 1.0): 4, ('slack', 1.0): 1, ('crack', 1.0): 1, ('particularli', 1.0): 2, ('strike', 1.0): 1, ('31', 1.0): 1, ('mam', 1.0): 2, ('feytyaz', 1.0): 1, ('instant', 1.0): 1, ('stiffen', 1.0): 1, ('ricky_feb', 1.0): 1, ('grindea', 1.0): 1, ('courier', 1.0): 1, ('crypt', 1.0): 1, ('arma', 1.0): 1, ('record', 1.0): 5, ('gosh', 1.0): 2, ('limbo', 1.0): 1, ('orchard', 1.0): 1, ('art', 1.0): 10, ('super', 1.0): 15, ('karachi', 1.0): 2, ('ka', 1.0): 4, ('venic', 1.0): 1, ('sever', 1.0): 3, ('part', 1.0): 15, ('wit', 1.0): 2, ('accumul', 1.0): 1, ('maroon', 1.0): 1, ('cocktail', 1.0): 4, ('0-100', 1.0): 1, ('quick', 1.0): 7, ('1100d', 1.0): 1, ('auto-focu', 1.0): 1, ('manual', 1.0): 2, ('vein', 1.0): 1, ('crackl', 1.0): 1, ('glaze', 1.0): 1, ('layout', 1.0): 3, ('bomb', 1.0): 4, ('social', 1.0): 4, ('websit', 1.0): 8, ('pake', 1.0): 1, ('joim', 1.0): 1, ('feed', 1.0): 4, ('troop', 1.0): 1, ('mail', 1.0): 3, ('[email protected]', 1.0): 1, ('prrequest', 1.0): 1, ('journorequest', 1.0): 1, ('the_madstork', 1.0): 1, ('shaun', 1.0): 1, ('bot', 1.0): 4, ('chloe', 1.0): 2, ('actress', 1.0): 3, ('away', 1.0): 13, ('wick', 1.0): 9, ('hola', 1.0): 1, ('juan', 1.0): 1, ('houston', 1.0): 1, ('tx', 1.0): 2, ('jenni', 1.0): 1, ("year'", 1.0): 2, ('stumbl', 1.0): 1, ('upon', 1.0): 1, ('prob.nic', 1.0): 1, ('choker', 1.0): 1, ('btw', 1.0): 12, ('seouljin', 1.0): 1, ('photoset', 1.0): 3, ('sadomasochistsparadis', 1.0): 1, ('wynter', 1.0): 1, ('bottom', 1.0): 3, ('outtak', 1.0): 1, ('sadomasochist', 1.0): 1, ('paradis', 1.0): 1, ('ty', 1.0): 8, ('bbi', 1.0): 3, ('clip', 1.0): 1, ('lose', 1.0): 6, ('cypher', 1.0): 1, ('amen', 1.0): 2, ('x32', 1.0): 1, ('plant', 1.0): 4, ('allow', 1.0): 4, ('corner', 1.0): 3, ('addict', 1.0): 4, ('gurl', 1.0): 1, ('suck', 1.0): 9, ('special', 1.0): 8, ('owe', 1.0): 1, ('daniel', 1.0): 2, ('ape', 1.0): 1, ('saar', 1.0): 1, ('ahead', 1.0): 4, ('vers', 1.0): 1, ('butterfli', 1.0): 1, ('bonu', 1.0): 2, ('fill', 1.0): 5, ('tear', 1.0): 1, ('laughter', 1.0): 2, ('5so', 1.0): 6, ('yummmyyi', 1.0): 1, ('eat', 1.0): 6, ('dosa', 1.0): 1, ('easier', 1.0): 2, ('unless', 1.0): 3, ('achi', 1.0): 2, ('youuu', 1.0): 2, ('bawi', 1.0): 1, ('ako', 1.0): 1, ('queenesth', 1.0): 1, ('sharp', 1.0): 2, ('yess', 1.0): 1, ('poldi', 1.0): 1, ('cimbom', 1.0): 1, ('buddi', 1.0): 7, ('bruhhh', 1.0): 1, ('daddi', 1.0): 2, ('”', 1.0): 5, ('knowledg', 1.0): 2, ('attent', 1.0): 4, ('1tb', 1.0): 1, ('bank', 1.0): 1, ('credit', 1.0): 4, ('depart', 1.0): 2, ('anz', 1.0): 1, ('extrem', 1.0): 3, ('offshor', 1.0): 1, ('absolut', 1.0): 9, ('classic', 1.0): 3, ('gottolovebank', 1.0): 1, ('yup', 1.0): 6, ('in-shaa-allah', 1.0): 1, ('dua', 1.0): 1, ('thru', 1.0): 2, ('aameen', 1.0): 2, ('4/5', 1.0): 1, ('coca', 1.0): 1, ('cola', 1.0): 1, ('fanta', 1.0): 1, ('pepsi', 1.0): 1, ('sprite', 1.0): 1, ('all', 1.0): 1, ('sweeeti', 1.0): 1, (';-)', 1.0): 3, ('welcometweet', 1.0): 2, ('psygustokita', 1.0): 4, ('setup', 1.0): 1, ('wet', 1.0): 3, ('feet', 1.0): 3, ('carpet', 1.0): 1, ('judgment', 1.0): 1, ('hypocrit', 1.0): 1, ('narcissist', 1.0): 1, ('jumpsuit', 1.0): 1, ('bt', 1.0): 2, ('denim', 1.0): 1, ('verg', 1.0): 1, ('owl', 1.0): 1, ('constant', 1.0): 1, ('run', 1.0): 12, ('sia', 1.0): 1, ('count', 1.0): 7, ('brilliant', 1.0): 9, ('teacher', 1.0): 1, ('compar', 1.0): 2, ('religion', 1.0): 1, ('rant', 1.0): 1, ('student', 1.0): 6, ('bencher', 1.0): 1, ('1/5', 1.0): 1, ('porsch', 1.0): 1, ('paddock', 1.0): 1, ('budapestgp', 1.0): 1, ('johnyherbert', 1.0): 1, ('roll', 1.0): 5, ('porschesupercup', 1.0): 1, ('koyal', 1.0): 1, ('melodi', 1.0): 1, ('unexpect', 1.0): 4, ('creat', 1.0): 8, ('memori', 1.0): 3, ('35', 1.0): 1, ('ep', 1.0): 3, ('catch', 1.0): 10, ('wirh', 1.0): 1, ('arc', 1.0): 1, ('x31', 1.0): 1, ('wolv', 1.0): 2, ('desir', 1.0): 1, ('ameen', 1.0): 1, ('kca', 1.0): 1, ('votejkt', 1.0): 1, ('48id', 1.0): 1, ('helpinggroupdm', 1.0): 1, ('quot', 1.0): 6, ('weird', 1.0): 5, ('dp', 1.0): 1, ('wife', 1.0): 5, ('poor', 1.0): 4, ('chick', 1.0): 1, ('guid', 1.0): 3, ('zonzofox', 1.0): 3, ('bhaiya', 1.0): 1, ('brother', 1.0): 4, ('lucki', 1.0): 10, ('patti', 1.0): 1, ('elabor', 1.0): 1, ('kuch', 1.0): 1, ('rate', 1.0): 1, ('merdeka', 1.0): 1, ('palac', 1.0): 2, ('hotel', 1.0): 5, ('plusmil', 1.0): 1, ('servic', 1.0): 7, ('hahahaa', 1.0): 1, ('mean', 1.0): 25, ('nex', 1.0): 2, ('safe', 1.0): 5, ('gwd', 1.0): 1, ('she', 1.0): 2, ('okok', 1.0): 1, ('33', 1.0): 4, ('idiot', 1.0): 1, ('chaerin', 1.0): 1, ('unni', 1.0): 1, ('viabl', 1.0): 1, ('altern', 1.0): 3, ('nowaday', 1.0): 2, ('ip', 1.0): 1, ('tombow', 1.0): 1, ('abt', 1.0): 2, ('friyay', 1.0): 2, ('smug', 1.0): 1, ('marrickvil', 1.0): 1, ('public', 1.0): 3, ('ten', 1.0): 1, ('ago', 1.0): 8, ('eighteen', 1.0): 1, ('auvssscr', 1.0): 1, ('ncaaseason', 1.0): 1, ('slow', 1.0): 2, ('popsicl', 1.0): 1, ('soft', 1.0): 2, ('melt', 1.0): 1, ('mouth', 1.0): 2, ('thankyouuu', 1.0): 1, ('dianna', 1.0): 1, ('ngga', 1.0): 1, ('usah', 1.0): 1, ('dipikirin', 1.0): 1, ('elah', 1.0): 1, ('easili', 1.0): 1, ("who'", 1.0): 9, ('entp', 1.0): 1, ('killin', 1.0): 1, ('meme', 1.0): 1, ('worthi', 1.0): 1, ('shot', 1.0): 6, ('emon', 1.0): 1, ('decent', 1.0): 2, ('outdoor', 1.0): 1, ('rave', 1.0): 1, ('dv', 1.0): 1, ('aku', 1.0): 1, ('bakal', 1.0): 1, ('liat', 1.0): 1, ('kak', 1.0): 2, ('merri', 1.0): 1, ('tv', 1.0): 5, ('outfit', 1.0): 3, ('--->', 1.0): 1, ('fashionfriday', 1.0): 1, ('angle.nelson', 1.0): 1, ('cheap', 1.0): 1, ('mymonsoonstori', 1.0): 2, ('tree', 1.0): 2, ('lotion', 1.0): 1, ('moistur', 1.0): 1, ('monsoon', 1.0): 1, ('whoop', 1.0): 6, ('romant', 1.0): 2, ('valencia', 1.0): 1, ('daaru', 1.0): 1, ('parti', 1.0): 12, ('chaddi', 1.0): 1, ('wonderful.great', 1.0): 1, ('trim', 1.0): 1, ('pube', 1.0): 1, ('es', 1.0): 2, ('mi', 1.0): 5, ('tio', 1.0): 1, ('sinaloa', 1.0): 1, ('arr', 1.0): 1, ('stylish', 1.0): 1, ('trendi', 1.0): 1, ('kim', 1.0): 5, ('fabfriday', 1.0): 2, ('facetim', 1.0): 4, ('calum', 1.0): 3, ('constantli', 1.0): 1, ('announc', 1.0): 1, ('filbarbarian', 1.0): 1, ('beer', 1.0): 3, ('arm', 1.0): 3, ('testicl', 1.0): 1, ('light', 1.0): 13, ('katerina', 1.0): 1, ('maniataki', 1.0): 1, ('ahh', 1.0): 5, ('alright', 1.0): 6, ('worthwhil', 1.0): 3, ('judg', 1.0): 2, ('tech', 1.0): 2, ('window', 1.0): 7, ('stupid', 1.0): 8, ('plugin', 1.0): 1, ('bass', 1.0): 1, ('slap', 1.0): 1, ('6pm', 1.0): 1, ('door', 1.0): 3, ('vip', 1.0): 1, ('gener', 1.0): 4, ('seat', 1.0): 2, ('earli', 1.0): 9, ('london', 1.0): 9, ('toptravelcentar', 1.0): 1, ('ttctop', 1.0): 1, ('lux', 1.0): 1, ('luxurytravel', 1.0): 1, ('beograd', 1.0): 1, ('srbija', 1.0): 1, ('putovanja', 1.0): 1, ('wendi', 1.0): 2, ('provid', 1.0): 4, ('drainag', 1.0): 1, ('homebound', 1.0): 1, ('hahahay', 1.0): 1, ('yeeeah', 1.0): 1, ('moar', 1.0): 2, ('kitteh', 1.0): 1, ('incom', 1.0): 1, ('tower', 1.0): 2, ('yippee', 1.0): 1, ('scrummi', 1.0): 1, ('bio', 1.0): 5, ('mcpe', 1.0): 1, ('->', 1.0): 1, ('vainglori', 1.0): 1, ('driver', 1.0): 1, ('6:01', 1.0): 1, ('lilydal', 1.0): 1, ('fss', 1.0): 1, ('rais', 1.0): 3, ('magicalmysterytour', 1.0): 1, ('chek', 1.0): 2, ('rule', 1.0): 2, ('weebli', 1.0): 1, ('donetsk', 1.0): 1, ('earth', 1.0): 7, ('personalis', 1.0): 1, ('wrap', 1.0): 2, ('stationeri', 1.0): 1, ('adrian', 1.0): 1, ('parcel', 1.0): 2, ('tuesday', 1.0): 7, ('pri', 1.0): 3, ('80', 1.0): 3, ('wz', 1.0): 1, ('pattern', 1.0): 1, ('cut', 1.0): 3, ('buttonhol', 1.0): 1, ('4mi', 1.0): 1, ('famou', 1.0): 1, ('client', 1.0): 1, ('p', 1.0): 3, ('aliv', 1.0): 2, ('trial', 1.0): 1, ('spm', 1.0): 1, ('dinooo', 1.0): 1, ('cardio', 1.0): 1, ('steak', 1.0): 1, ('cue', 1.0): 1, ('laptop', 1.0): 1, ('guinea', 1.0): 1, ('pig', 1.0): 1, ('salamat', 1.0): 1, ('sa', 1.0): 6, ('mga', 1.0): 1, ('nag.greet', 1.0): 1, ('guis', 1.0): 1, ('godbless', 1.0): 2, ('crush', 1.0): 3, ('appl', 1.0): 4, ('deserv', 1.0): 11, ('charl', 1.0): 1, ('workhard', 1.0): 1, ('model', 1.0): 7, ('forrit', 1.0): 1, ('bread', 1.0): 2, ('bacon', 1.0): 2, ('butter', 1.0): 2, ('afang', 1.0): 2, ('soup', 1.0): 2, ('semo', 1.0): 2, ('brb', 1.0): 1, ('forc', 1.0): 2, ('doesnt', 1.0): 5, ('tato', 1.0): 1, ('bulat', 1.0): 1, ('concern', 1.0): 1, ('snake', 1.0): 1, ('perform', 1.0): 3, ('con', 1.0): 1, ('todayyy', 1.0): 1, ('max', 1.0): 2, ('gaza', 1.0): 1, ('bbb', 1.0): 1, ('pc', 1.0): 3, ('22', 1.0): 2, ('legal', 1.0): 1, ('ditch', 1.0): 2, ('tori', 1.0): 1, ('bajrangibhaijaanhighestweek', 1.0): 6, ("s'okay", 1.0): 1, ('andi', 1.0): 2, ('you-and', 1.0): 1, ('return', 1.0): 3, ('tuitutil', 1.0): 1, ('bud', 1.0): 2, ('learn', 1.0): 8, ('takeaway', 1.0): 1, ('instead', 1.0): 7, ('1hr', 1.0): 1, ('genial', 1.0): 1, ('competit', 1.0): 1, ('yosh', 1.0): 1, ('procrastin', 1.0): 1, ('plu', 1.0): 4, ('kfc', 1.0): 2, ('itun', 1.0): 1, ('dedicatedfan', 1.0): 1, ('💜', 1.0): 7, ('daft', 1.0): 1, ('teeth', 1.0): 1, ('troubl', 1.0): 1, ('huxley', 1.0): 1, ('basket', 1.0): 2, ('ben', 1.0): 2, ('sent', 1.0): 8, ('gamer', 1.0): 3, ('activ', 1.0): 5, ('120', 1.0): 2, ('distanc', 1.0): 2, ('suitabl', 1.0): 1, ('stockholm', 1.0): 1, ('zack', 1.0): 1, ('destroy', 1.0): 1, ('heel', 1.0): 2, ('claw', 1.0): 1, ('q', 1.0): 2, ('blond', 1.0): 2, ('box', 1.0): 3, ('cheerio', 1.0): 1, ('seed', 1.0): 4, ('cutest', 1.0): 2, ('ffback', 1.0): 2, ('spotifi', 1.0): 3, ("we'v", 1.0): 7, ('vc', 1.0): 1, ('tgp', 1.0): 1, ('race', 1.0): 5, ('averag', 1.0): 2, ("joe'", 1.0): 1, ('bluejay', 1.0): 1, ('vinylbear', 1.0): 1, ('pal', 1.0): 1, ('furbabi', 1.0): 1, ('luff', 1.0): 1, ('mega', 1.0): 4, ('retail', 1.0): 4, ('boot', 1.0): 2, ('whsmith', 1.0): 1, ('ps3', 1.0): 1, ('shannon', 1.0): 1, ('na', 1.0): 9, ('redecor', 1.0): 1, ('bob', 1.0): 3, ('elli', 1.0): 4, ('mairi', 1.0): 1, ('workout', 1.0): 6, ('impair', 1.0): 1, ('uggghhh', 1.0): 1, ('dam', 1.0): 2, ('dun', 1.0): 2, ('eczema', 1.0): 1, ('suffer', 1.0): 4, ('ndee', 1.0): 1, ('pleasur', 1.0): 14, ('publiliu', 1.0): 1, ('syru', 1.0): 1, ('fear', 1.0): 1, ('death', 1.0): 3, ('dread', 1.0): 1, ('fell', 1.0): 3, ('fuk', 1.0): 1, ('unblock', 1.0): 1, ('tweak', 1.0): 2, ('php', 1.0): 1, ('fall', 1.0): 10, ('oomf', 1.0): 1, ('pippa', 1.0): 1, ('hschool', 1.0): 1, ('bu', 1.0): 3, ('cardi', 1.0): 1, ('everyday', 1.0): 3, ('everytim', 1.0): 3, ('hk', 1.0): 1, ("why'd", 1.0): 1, ('acorn', 1.0): 1, ('origin', 1.0): 7, ('c64', 1.0): 1, ('cpu', 1.0): 1, ('consider', 1.0): 1, ('advanc', 1.0): 1, ('onair', 1.0): 1, ('bay', 1.0): 1, ('hold', 1.0): 6, ('river', 1.0): 3, ('0878 0388', 1.0): 1, ('1033', 1.0): 1, ('0272 3306', 1.0): 1, ('70', 1.0): 5, ('rescu', 1.0): 1, ('mutt', 1.0): 1, ('confirm', 1.0): 3, ('deliveri', 1.0): 3, ('switch', 1.0): 2, ('lap', 1.0): 1, ('optim', 1.0): 1, ('lu', 1.0): 1, (':|', 1.0): 1, ('tweetofthedecad', 1.0): 1, (':P', 1.0): 1, ('class', 1.0): 5, ('happiest', 1.0): 2, ('bbmme', 1.0): 3, ('pin', 1.0): 4, ('7df9e60a', 1.0): 1, ('bbm', 1.0): 2, ('bbmpin', 1.0): 2, ('addmeonbbm', 1.0): 1, ('addm', 1.0): 1, ("today'", 1.0): 3, ('menu', 1.0): 1, ('marri', 1.0): 3, ('glenn', 1.0): 1, ('what', 1.0): 4, ('height', 1.0): 1, ("sculptor'", 1.0): 1, ('ti5', 1.0): 1, ('dota', 1.0): 3, ('nudg', 1.0): 1, ('spot', 1.0): 5, ('tasti', 1.0): 1, ('hilli', 1.0): 1, ('cycl', 1.0): 6, ('england', 1.0): 4, ('scotlandismass', 1.0): 1, ('gen', 1.0): 2, ('vikk', 1.0): 1, ('fna', 1.0): 1, ('mombasa', 1.0): 1, ('tukutanemombasa', 1.0): 1, ('100reasonstovisitmombasa', 1.0): 1, ('karibumombasa', 1.0): 1, ('hanbin', 1.0): 1, ('certainli', 1.0): 4, ('goosnight', 1.0): 1, ('kindli', 1.0): 4, ('familiar', 1.0): 2, ('jealou', 1.0): 4, ('tent', 1.0): 2, ('yea', 1.0): 2, ('cozi', 1.0): 1, ('phenomen', 1.0): 2, ('collab', 1.0): 2, ('gave', 1.0): 4, ('birth', 1.0): 1, ('behav', 1.0): 2, ('monster', 1.0): 1, ('spree', 1.0): 4, ('000', 1.0): 1, ('tank', 1.0): 6, ('outstand', 1.0): 1, ('donat', 1.0): 3, ('h', 1.0): 4, ('contestkiduniya', 1.0): 2, ('mfundo', 1.0): 1, ('och', 1.0): 1, ('hun', 1.0): 4, ('inner', 1.0): 2, ('nerd', 1.0): 2, ('tame', 1.0): 2, ('insidi', 1.0): 1, ('logic', 1.0): 1, ('math', 1.0): 1, ('channel', 1.0): 5, ('continu', 1.0): 4, ('doubt', 1.0): 3, ('300', 1.0): 2, ('sub', 1.0): 2, ('200', 1.0): 3, ('forgiven', 1.0): 1, ('manner', 1.0): 1, ('yhooo', 1.0): 1, ('ngi', 1.0): 1, ('mood', 1.0): 7, ('push', 1.0): 1, ('limit', 1.0): 6, ('obakeng', 1.0): 1, ('goat', 1.0): 1, ('alhamdullilah', 1.0): 1, ('pebbl', 1.0): 1, ('engross', 1.0): 1, ('bing', 1.0): 2, ('scream', 1.0): 2, ('whole', 1.0): 7, ('wide', 1.0): 2, ('🌎', 1.0): 2, ('😧', 1.0): 1, ('wat', 1.0): 2, ('muahhh', 1.0): 1, ('pausetim', 1.0): 1, ('drift', 1.0): 1, ('loos', 1.0): 3, ('campaign', 1.0): 4, ('kickstart', 1.0): 1, ('articl', 1.0): 9, ('jenna', 1.0): 1, ('bellybutton', 1.0): 5, ('inni', 1.0): 4, ('outi', 1.0): 4, ('havent', 1.0): 4, ('delish', 1.0): 1, ('joselito', 1.0): 1, ('freya', 1.0): 1, ('nth', 1.0): 1, ('latepost', 1.0): 1, ('lupet', 1.0): 1, ('mo', 1.0): 2, ('eric', 1.0): 3, ('askaman', 1.0): 1, ('150', 1.0): 1, ('0345', 1.0): 2, ('454', 1.0): 1, ('111', 1.0): 1, ('webz', 1.0): 1, ('oop', 1.0): 5, ("they'll", 1.0): 6, ('realis', 1.0): 2, ('anymor', 1.0): 3, ('carmel', 1.0): 1, ('decis', 1.0): 5, ('matt', 1.0): 6, ('@commoncultur', 1.0): 1, ('@connorfranta', 1.0): 1, ('honestli', 1.0): 3, ('explain', 1.0): 3, ('relationship', 1.0): 4, ('pick', 1.0): 15, ('tessnzach', 1.0): 1, ('paperboy', 1.0): 1, ('honest', 1.0): 3, ('reassur', 1.0): 1, ('guysss', 1.0): 3, ('mubank', 1.0): 2, ("dongwoo'", 1.0): 1, ('bright', 1.0): 2, ('tommorow', 1.0): 3, ('newyork', 1.0): 1, ('lolll', 1.0): 1, ('twinx', 1.0): 1, ('16', 1.0): 2, ('path', 1.0): 1, ('firmansyahbl', 1.0): 1, ('procedur', 1.0): 1, ('grim', 1.0): 1, ('fandango', 1.0): 1, ('ordinari', 1.0): 1, ('extraordinari', 1.0): 1, ('bo', 1.0): 2, ('birmingham', 1.0): 1, ('oracl', 1.0): 1, ('samosa', 1.0): 1, ('firebal', 1.0): 1, ('shoe', 1.0): 4, ('serv', 1.0): 1, ('sushi', 1.0): 2, ('shoeshi', 1.0): 1, ('�', 1.0): 2, ('lymond', 1.0): 1, ('philippa', 1.0): 2, ('novel', 1.0): 1, ('tara', 1.0): 3, ('. . .', 1.0): 2, ('aur', 1.0): 2, ('han', 1.0): 1, ('imran', 1.0): 3, ('khan', 1.0): 7, ('63', 1.0): 1, ('agaaain', 1.0): 1, ('doli', 1.0): 1, ('siregar', 1.0): 1, ('ninh', 1.0): 1, ('size', 1.0): 5, ('geekiest', 1.0): 1, ('geek', 1.0): 2, ('wallet', 1.0): 3, ('request', 1.0): 4, ('media', 1.0): 4, ('ralli', 1.0): 1, ('rotat', 1.0): 3, ('direct', 1.0): 3, ('eek', 1.0): 1, ('red', 1.0): 6, ('beij', 1.0): 1, ('meni', 1.0): 1, ('tebrik', 1.0): 1, ('etdi', 1.0): 1, ('700', 1.0): 1, ('💗', 1.0): 2, ('rod', 1.0): 1, ('embrac', 1.0): 1, ('actor', 1.0): 1, ('aplomb', 1.0): 1, ('foreveralon', 1.0): 2, ('mysumm', 1.0): 1, ('01482', 1.0): 1, ('333505', 1.0): 1, ('hahahaha', 1.0): 2, ('wear', 1.0): 6, ('uniform', 1.0): 1, ('evil', 1.0): 1, ('owww', 1.0): 1, ('choo', 1.0): 1, ('chweet', 1.0): 1, ('shorthair', 1.0): 1, ('oscar', 1.0): 1, ('realiz', 1.0): 7, ('harmoni', 1.0): 1, ('deneriveri', 1.0): 1, ('506', 1.0): 1, ('kiksext', 1.0): 5, ('kikkomansabor', 1.0): 2, ('killer', 1.0): 1, ('henessydiari', 1.0): 1, ('journey', 1.0): 4, ('band', 1.0): 4, ('plz', 1.0): 5, ('convo', 1.0): 3, ('11', 1.0): 5, ('vault', 1.0): 1, ('expand', 1.0): 2, ('vinni', 1.0): 1, ('money', 1.0): 9, ('hahahahaha', 1.0): 2, ('50cent', 1.0): 1, ('repay', 1.0): 1, ('debt', 1.0): 2, ('evet', 1.0): 1, ('wifi', 1.0): 3, ('lifestyl', 1.0): 1, ('qatarday', 1.0): 1, ('. ..', 1.0): 3, ('🌞', 1.0): 3, ('girli', 1.0): 1, ('india', 1.0): 4, ('innov', 1.0): 1, ('volunt', 1.0): 2, ('saran', 1.0): 1, ('drama', 1.0): 3, ('genr', 1.0): 1, ('romanc', 1.0): 1, ('comedi', 1.0): 1, ('leannerin', 1.0): 1, ('19', 1.0): 7, ('porno', 1.0): 1, ('l4l', 1.0): 3, ('weloveyounamjoon', 1.0): 1, ('homey', 1.0): 1, ('kenya', 1.0): 1, ('roller', 1.0): 2, ('coaster', 1.0): 1, ('aspect', 1.0): 1, ('najam', 1.0): 1, ('confess', 1.0): 2, ('pricelessantiqu', 1.0): 1, ('takesonetoknowon', 1.0): 1, ('extra', 1.0): 5, ('ucount', 1.0): 1, ('ji', 1.0): 3, ('turkish', 1.0): 1, ('knew', 1.0): 8, ('crap', 1.0): 1, ('burn', 1.0): 3, ('80x', 1.0): 1, ('airlin', 1.0): 1, ('sexi', 1.0): 10, ('yello', 1.0): 1, ('gail', 1.0): 1, ('yael', 1.0): 1, ('lesson', 1.0): 4, ('en', 1.0): 1, ('mano', 1.0): 1, ('hand', 1.0): 4, ('manag', 1.0): 6, ('prettiest', 1.0): 1, ('reader', 1.0): 4, ('dnt', 1.0): 1, ('ideal', 1.0): 2, ('weekli', 1.0): 2, ('idol', 1.0): 3, ('pose', 1.0): 2, ('shortlist', 1.0): 1, ('dominion', 1.0): 2, ('picnic', 1.0): 2, ('tmrw', 1.0): 3, ('nobodi', 1.0): 2, ('jummamubarak', 1.0): 1, ('shower', 1.0): 3, ('shalwarkameez', 1.0): 1, ('itter', 1.0): 1, ('offer', 1.0): 8, ('jummapray', 1.0): 1, ('af', 1.0): 8, ('display', 1.0): 1, ('enabl', 1.0): 1, ('compani', 1.0): 4, ('peep', 1.0): 4, ('tweep', 1.0): 2, ('folow', 1.0): 1, ('2k', 1.0): 1, ('ohhh', 1.0): 4, ('teaser', 1.0): 2, ('airec', 1.0): 1, ('009', 1.0): 1, ('acid', 1.0): 1, ('mous', 1.0): 2, ('31st', 1.0): 2, ('includ', 1.0): 5, ('robin', 1.0): 1, ('rough', 1.0): 4, ('control', 1.0): 1, ('remix', 1.0): 5, ('fave', 1.0): 3, ('toss', 1.0): 1, ('ladi', 1.0): 8, ('🐑', 1.0): 1, ('librari', 1.0): 3, ('mr2', 1.0): 1, ('climb', 1.0): 1, ('cuddl', 1.0): 1, ('jilla', 1.0): 1, ('headlin', 1.0): 1, ('2017', 1.0): 1, ('jumma', 1.0): 5, ('mubarik', 1.0): 2, ('spent', 1.0): 2, ('congratz', 1.0): 1, ('contribut', 1.0): 3, ('2.0', 1.0): 2, ('yuppiiee', 1.0): 1, ('alienthought', 1.0): 1, ('happyalien', 1.0): 1, ('crowd', 1.0): 2, ('loudest', 1.0): 2, ('gari', 1.0): 1, ('particular', 1.0): 1, ('attract', 1.0): 1, ('supprt', 1.0): 1, ('savag', 1.0): 1, ('cleans', 1.0): 1, ('scam', 1.0): 1, ('ridden', 1.0): 1, ('vyapam', 1.0): 2, ('renam', 1.0): 1, ('wave', 1.0): 2, ('couch', 1.0): 1, ('dodg', 1.0): 1, ('explan', 1.0): 2, ('bag', 1.0): 4, ('sanza', 1.0): 1, ('yaa', 1.0): 3, ('slr', 1.0): 1, ('som', 1.0): 1, ('honour', 1.0): 1, ('heheh', 1.0): 1, ('view', 1.0): 16, ('explor', 1.0): 2, ('wayanadan', 1.0): 1, ('forest', 1.0): 1, ('wayanad', 1.0): 1, ('srijith', 1.0): 1, ('whisper', 1.0): 1, ('lie', 1.0): 4, ('pokemon', 1.0): 1, ('dazzl', 1.0): 1, ('urself', 1.0): 2, ('doubl', 1.0): 2, ('flare', 1.0): 1, ('black', 1.0): 4, ('9', 1.0): 3, ('51', 1.0): 1, ('brows', 1.0): 1, ('bore', 1.0): 9, ('femal', 1.0): 2, ('tour', 1.0): 8, ('delv', 1.0): 2, ('muchhh', 1.0): 1, ('tmr', 1.0): 1, ('breakfast', 1.0): 4, ('gl', 1.0): 1, ("tonight'", 1.0): 2, ('):', 1.0): 7, ('litey', 1.0): 1, ('manuella', 1.0): 1, ('abhi', 1.0): 2, ('tak', 1.0): 2, ('nhi', 1.0): 2, ('dekhi', 1.0): 1, ('promo', 1.0): 3, ('se', 1.0): 4, ('xpax', 1.0): 1, ('lisa', 1.0): 2, ('aboard', 1.0): 3, ('institut', 1.0): 1, ('nc', 1.0): 2, ('chees', 1.0): 4, ('overload', 1.0): 1, ('pizza', 1.0): 1, ('•', 1.0): 3, ('mcfloat', 1.0): 1, ('fudg', 1.0): 3, ('sanda', 1.0): 1, ('munchkin', 1.0): 1, ("d'd", 1.0): 1, ('granni', 1.0): 1, ('baller', 1.0): 1, ('lil', 1.0): 4, ('chain', 1.0): 1, ('everybodi', 1.0): 1, ('ought', 1.0): 1, ('jay', 1.0): 3, ('[email protected]', 1.0): 1, ('79x', 1.0): 1, ('champion', 1.0): 1, ('letter', 1.0): 2, ('uniqu', 1.0): 2, ('affaraid', 1.0): 1, ('dearslim', 1.0): 2, ('role', 1.0): 2, ('billi', 1.0): 2, ('lab', 1.0): 1, ('ovh', 1.0): 2, ('maxi', 1.0): 2, ('bunch', 1.0): 1, ('acc', 1.0): 2, ('sprit', 1.0): 1, ('you', 1.0): 1, ('til', 1.0): 2, ('hammi', 1.0): 1, ('freedom', 1.0): 2, ('pistol', 1.0): 1, ('unlock', 1.0): 1, ('bemeapp', 1.0): 1, ('thumb', 1.0): 1, ('beme', 1.0): 1, ('bemecod', 1.0): 1, ('proudtobem', 1.0): 1, ('round', 1.0): 2, ('calm', 1.0): 5, ('kepo', 1.0): 1, ('luckili', 1.0): 1, ('clearli', 1.0): 2, ('دعمم', 1.0): 1, ('للعودة', 1.0): 1, ('للحياة', 1.0): 1, ('heiyo', 1.0): 2, ('dudafti', 1.0): 1, ('breaktym', 1.0): 1, ('fatal', 1.0): 1, ('danger', 1.0): 1, ('term', 1.0): 2, ('health', 1.0): 2, ('outrag', 1.0): 1, ('645k', 1.0): 1, ('muna', 1.0): 1, ('magstart', 1.0): 1, ('salut', 1.0): 3, ('→', 1.0): 1, ('thq', 1.0): 1, ('contin', 1.0): 1, ('thalaivar', 1.0): 1, ('£', 1.0): 7, ('heiya', 1.0): 2, ('grab', 1.0): 3, ('30.000', 1.0): 2, ('av', 1.0): 1, ('gd', 1.0): 3, ('wknd', 1.0): 1, ('ear', 1.0): 12, ("y'day", 1.0): 1, ('hxh', 1.0): 1, ('badass', 1.0): 2, ('killua', 1.0): 1, ('scene', 1.0): 2, ('78x', 1.0): 1, ('unappreci', 1.0): 1, ('graciou', 1.0): 1, ('nailedit', 1.0): 1, ('ourdisneyinfin', 1.0): 1, ('mari', 1.0): 3, ('jillmil', 1.0): 1, ('webcam', 1.0): 2, ('elfindelmundo', 1.0): 1, ('mainli', 1.0): 1, ('favour', 1.0): 1, ('dancetast', 1.0): 1, ('satyajit', 1.0): 1, ("ray'", 1.0): 1, ('porosh', 1.0): 1, ('pathor', 1.0): 1, ('situat', 1.0): 3, ('goldbug', 1.0): 1, ('wine', 1.0): 3, ('bottl', 1.0): 2, ('spill', 1.0): 2, ('jazmin', 1.0): 3, ('bonilla', 1.0): 3, ('15000', 1.0): 1, ('star', 1.0): 9, ('hollywood', 1.0): 3, ('rofl', 1.0): 3, ('shade', 1.0): 1, ('grey', 1.0): 1, ('netsec', 1.0): 1, ('kev', 1.0): 1, ('sister', 1.0): 6, ('told', 1.0): 6, ('unlist', 1.0): 1, ('hickey', 1.0): 1, ('dad', 1.0): 5, ('hock', 1.0): 1, ('mamma', 1.0): 1, ('human', 1.0): 5, ('be', 1.0): 1, ('mere', 1.0): 1, ('holist', 1.0): 1, ('cosmovis', 1.0): 1, ('narrow-mind', 1.0): 1, ('charg', 1.0): 3, ('cess', 1.0): 1, ('alix', 1.0): 1, ('quan', 1.0): 1, ('tip', 1.0): 5, ('naaahhh', 1.0): 1, ('duh', 1.0): 2, ('emesh', 1.0): 1, ('hilari', 1.0): 4, ('kath', 1.0): 3, ('kia', 1.0): 1, ('@vauk', 1.0): 1, ('tango', 1.0): 1, ('tracerequest', 1.0): 2, ('dassi', 1.0): 1, ('fwm', 1.0): 1, ('selamat', 1.0): 1, ('nichola', 1.0): 2, ('malta', 1.0): 1, ('gto', 1.0): 1, ('tomorrowland', 1.0): 1, ('incal', 1.0): 1, ('shob', 1.0): 1, ('incomplet', 1.0): 1, ('barkada', 1.0): 1, ('silverston', 1.0): 1, ('pull', 1.0): 1, ('bookstor', 1.0): 1, ('ganna', 1.0): 1, ('hillari', 1.0): 1, ('clinton', 1.0): 1, ('court', 1.0): 2, ('notic', 1.0): 11, ('slice', 1.0): 2, ('life-so', 1.0): 1, ('hidden', 1.0): 1, ('untap', 1.0): 1, ('mca', 1.0): 2, ('gettin', 1.0): 1, ('hella', 1.0): 1, ('wana', 1.0): 1, ('bandz', 1.0): 1, ('hell', 1.0): 4, ('donington', 1.0): 1, ('park', 1.0): 8, ('24/25', 1.0): 1, ('x30', 1.0): 1, ('merci', 1.0): 1, ('bien', 1.0): 1, ('pitbul', 1.0): 1, ('777x', 1.0): 1, ('fri', 1.0): 3, ('annyeong', 1.0): 1, ('oppa', 1.0): 7, ('indonesian', 1.0): 1, ('elf', 1.0): 3, ('flight', 1.0): 2, ('bf', 1.0): 2, ('jennyjean', 1.0): 1, ('kikchat', 1.0): 1, ('sabadodeganarseguidor', 1.0): 1, ('sexysasunday', 1.0): 2, ('marseil', 1.0): 1, ('ganda', 1.0): 1, ('fnaf', 1.0): 5, ('steam', 1.0): 1, ('assur', 1.0): 2, ('current', 1.0): 7, ('goin', 1.0): 1, ('sweeti', 1.0): 4, ('strongest', 1.0): 1, ("spot'", 1.0): 1, ('barnstapl', 1.0): 1, ('bideford', 1.0): 1, ('abit', 1.0): 1, ('road', 1.0): 5, ('rocro', 1.0): 1, ('13glodyysbro', 1.0): 1, ('hire', 1.0): 1, ('2ne1', 1.0): 1, ('aspetti', 1.0): 1, ('chicken', 1.0): 4, ('chip', 1.0): 3, ('cupboard', 1.0): 1, ('empti', 1.0): 2, ('jami', 1.0): 2, ('ian', 1.0): 2, ('latin', 1.0): 5, ('asian', 1.0): 5, ('version', 1.0): 8, ('va', 1.0): 1, ('642', 1.0): 1, ('kikgirl', 1.0): 5, ('orgasm', 1.0): 1, ('phonesex', 1.0): 1, ('spacer', 1.0): 1, ('felic', 1.0): 1, ('smoak', 1.0): 1, ('👓', 1.0): 1, ('💘', 1.0): 3, ('children', 1.0): 3, ('psychopath', 1.0): 1, ('spoil', 1.0): 1, ('dimpl', 1.0): 1, ('contempl', 1.0): 1, ('indi', 1.0): 2, ('rout', 1.0): 4, ('jsl', 1.0): 1, ('76x', 1.0): 1, ('gotcha', 1.0): 1, ('kina', 1.0): 1, ('donna', 1.0): 3, ('reachabl', 1.0): 1, ('jk', 1.0): 1, ('s02e04', 1.0): 1, ('air', 1.0): 7, ('naggi', 1.0): 1, ('anal', 1.0): 1, ('child', 1.0): 3, ('vidcon', 1.0): 2, ('anxiou', 1.0): 1, ('shake', 1.0): 2, ('10:30', 1.0): 1, ('smoke', 1.0): 3, ('white', 1.0): 4, ('grandpa', 1.0): 4, ('prolli', 1.0): 1, ('stash', 1.0): 2, ('closer-chas', 1.0): 1, ('spec', 1.0): 1, ('leagu', 1.0): 3, ('chase', 1.0): 1, ('wall', 1.0): 3, ('angel', 1.0): 4, ('mochamichel', 1.0): 1, ('iph', 1.0): 4, ('0ne', 1.0): 4, ('simpli', 1.0): 3, ('bi0', 1.0): 8, ('x29', 1.0): 1, ('there', 1.0): 2, ('background', 1.0): 2, ('maggi', 1.0): 1, ('afraid', 1.0): 3, ('mull', 1.0): 1, ('nil', 1.0): 1, ('glasgow', 1.0): 2, ('netbal', 1.0): 1, ('thistl', 1.0): 1, ('thistlelov', 1.0): 1, ('minecraft', 1.0): 7, ('drew', 1.0): 3, ('delici', 1.0): 3, ('muddl', 1.0): 1, ('racket', 1.0): 2, ('isol', 1.0): 1, ('fa', 1.0): 1, ('particip', 1.0): 2, ('icecreammast', 1.0): 1, ('group', 1.0): 10, ('huhu', 1.0): 3, ('shet', 1.0): 1, ('desk', 1.0): 1, ('o_o', 1.0): 1, ('orz', 1.0): 1, ('problemmm', 1.0): 1, ('75x', 1.0): 1, ('english', 1.0): 4, ('yeeaayi', 1.0): 1, ('alhamdulillah', 1.0): 1, ('amin', 1.0): 1, ('weed', 1.0): 1, ('crowdfund', 1.0): 1, ('goal', 1.0): 2, ('walk', 1.0): 12, ('hellooo', 1.0): 2, ('select', 1.0): 1, ('lynn', 1.0): 1, ('buffer', 1.0): 2, ('button', 1.0): 2, ('compos', 1.0): 1, ('fridayfun', 1.0): 1, ('non-filipina', 1.0): 1, ('ejayst', 1.0): 1, ('state', 1.0): 2, ('le', 1.0): 2, ('stan', 1.0): 1, ('lee', 1.0): 2, ('discoveri', 1.0): 1, ('cousin', 1.0): 5, ('1400', 1.0): 1, ('yr', 1.0): 2, ('teleport', 1.0): 1, ('shahid', 1.0): 1, ('afridi', 1.0): 1, ('tou', 1.0): 1, ('mahnor', 1.0): 1, ('baloch', 1.0): 1, ('nikki', 1.0): 2, ('flower', 1.0): 4, ('blackfli', 1.0): 1, ('courgett', 1.0): 1, ('wont', 1.0): 5, ('affect', 1.0): 2, ('fruit', 1.0): 5, ('italian', 1.0): 1, ('netfilx', 1.0): 1, ('unmarri', 1.0): 1, ('finger', 1.0): 6, ('rock', 1.0): 10, ('wielli', 1.0): 1, ('paul', 1.0): 2, ('barcod', 1.0): 1, ('charlott', 1.0): 1, ('thta', 1.0): 1, ('trailblazerhonor', 1.0): 1, ('labour', 1.0): 3, ('leader', 1.0): 3, ('alot', 1.0): 2, ('agayhippiehippi', 1.0): 1, ('exercis', 1.0): 2, ('ginger', 1.0): 1, ('x28', 1.0): 1, ('teach', 1.0): 2, ('awar', 1.0): 1, ('::', 1.0): 4, ('portsmouth', 1.0): 1, ('sonal', 1.0): 1, ('hungri', 1.0): 2, ('hmmm', 1.0): 4, ('pedant', 1.0): 1, ('98', 1.0): 1, ('kit', 1.0): 2, ('ack', 1.0): 1, ('hih', 1.0): 1, ('choir', 1.0): 1, ('rosidbinr', 1.0): 1, ('duke', 1.0): 2, ('earl', 1.0): 1, ('tau', 1.0): 1, ('orayt', 1.0): 1, ('knw', 1.0): 1, ('block', 1.0): 3, ('dikha', 1.0): 1, ('reh', 1.0): 1, ('adolf', 1.0): 1, ('hitler', 1.0): 1, ('obstacl', 1.0): 1, ('exist', 1.0): 2, ('surrend', 1.0): 2, ('terrif', 1.0): 1, ('advaddict', 1.0): 1, ('_15', 1.0): 1, ('jimin', 1.0): 1, ('notanapolog', 1.0): 3, ('map', 1.0): 2, ('inform', 1.0): 5, ('0.7', 1.0): 1, ('motherfuck', 1.0): 1, ("david'", 1.0): 1, ('damn', 1.0): 3, ('colleg', 1.0): 2, ('24th', 1.0): 3, ('steroid', 1.0): 1, ('alansmithpart', 1.0): 1, ('servu', 1.0): 1, ('bonasio', 1.0): 1, ("doido'", 1.0): 1, ('task', 1.0): 2, ('deleg', 1.0): 1, ('aaahhh', 1.0): 1, ('jen', 1.0): 2, ('virgin', 1.0): 5, ('non-mapbox', 1.0): 1, ('restrict', 1.0): 1, ('mapbox', 1.0): 1, ('basemap', 1.0): 1, ('contractu', 1.0): 1, ('research', 1.0): 1, ('seafood', 1.0): 1, ('weltum', 1.0): 1, ('teh', 1.0): 1, ('deti', 1.0): 1, ('huh', 1.0): 2, ('=D', 1.0): 2, ('annoy', 1.0): 2, ('katmtan', 1.0): 1, ('swan', 1.0): 1, ('fandom', 1.0): 3, ('blurri', 1.0): 1, ('besok', 1.0): 1, ('b', 1.0): 8, ('urgent', 1.0): 3, ('within', 1.0): 4, ('dorset', 1.0): 1, ('goddess', 1.0): 1, ('blast', 1.0): 1, ('shitfac', 1.0): 1, ('soul', 1.0): 4, ('sing', 1.0): 5, ('disney', 1.0): 1, ('doug', 1.0): 3, ('28', 1.0): 2, ('bnte', 1.0): 1, ('hain', 1.0): 2, (';p', 1.0): 1, ('shiiitt', 1.0): 1, ('case', 1.0): 9, ('rm35', 1.0): 1, ('negooo', 1.0): 1, ('male', 1.0): 1, ('madelin', 1.0): 1, ('nun', 1.0): 1, ('mornin', 1.0): 2, ('yapster', 1.0): 1, ('pli', 1.0): 1, ('icon', 1.0): 2, ('alchemist', 1.0): 1, ('x27', 1.0): 1, ('dayz', 1.0): 1, ('preview', 1.0): 1, ('thug', 1.0): 1, ('lmao', 1.0): 3, ('sharethelov', 1.0): 2, ('highvalu', 1.0): 2, ('halsey', 1.0): 1, ('30th', 1.0): 1, ('anniversari', 1.0): 5, ('folk', 1.0): 10, ('bae', 1.0): 6, ('repli', 1.0): 5, ('complain', 1.0): 3, ('rude', 1.0): 3, ('bond', 1.0): 4, ('nigg', 1.0): 1, ('readingr', 1.0): 1, ('wordoftheweek', 1.0): 1, ('wotw', 1.0): 1, ('4:18', 1.0): 1, ('est', 1.0): 1, ('earn', 1.0): 1, ('jess', 1.0): 2, ('surri', 1.0): 1, ('botani', 1.0): 1, ('gel', 1.0): 1, ('alison', 1.0): 1, ('lsa', 1.0): 1, ('respons', 1.0): 7, ('fron', 1.0): 1, ('debbi', 1.0): 1, ('carol', 1.0): 2, ('patient', 1.0): 4, ('discharg', 1.0): 1, ('loung', 1.0): 1, ('walmart', 1.0): 1, ('balanc', 1.0): 2, ('studi', 1.0): 6, ('hayley', 1.0): 2, ('shoulder', 1.0): 1, ('pad', 1.0): 2, ('mount', 1.0): 1, ('inquisitor', 1.0): 1, ('cosplay', 1.0): 4, ('cosplayprogress', 1.0): 1, ('mike', 1.0): 3, ('dunno', 1.0): 2, ('insecur', 1.0): 2, ('nh', 1.0): 1, ('devolut', 1.0): 1, ('patriot', 1.0): 1, ('halla', 1.0): 1, ('ark', 1.0): 1, ("jiyeon'", 1.0): 1, ('buzz', 1.0): 2, ('burnt', 1.0): 1, ('mist', 1.0): 4, ('opi', 1.0): 1, ('avoplex', 1.0): 1, ('nail', 1.0): 3, ('cuticl', 1.0): 1, ('replenish', 1.0): 1, ('15ml', 1.0): 1, ('seriou', 1.0): 2, ('submiss', 1.0): 1, ('lb', 1.0): 2, ('cherish', 1.0): 2, ('flip', 1.0): 1, ('learnt', 1.0): 2, ('backflip', 1.0): 2, ('jumpgiant', 1.0): 1, ('foampit', 1.0): 1, ('usa', 1.0): 3, ('pamer', 1.0): 1, ('thk', 1.0): 1, ('actuallythough', 1.0): 1, ('craft', 1.0): 2, ('session', 1.0): 3, ('mehtab', 1.0): 1, ('aunti', 1.0): 1, ('gc', 1.0): 1, ('yeeew', 1.0): 1, ('pre', 1.0): 3, ('lan', 1.0): 1, ('yeey', 1.0): 1, ('arrang', 1.0): 1, ('doodl', 1.0): 2, ('comic', 1.0): 1, ('summon', 1.0): 1, ('none', 1.0): 1, ('🙅', 1.0): 1, ('lycra', 1.0): 1, ('vincent', 1.0): 1, ('couldnt', 1.0): 1, ('roy', 1.0): 1, ('bg', 1.0): 1, ('img', 1.0): 1, ('circl', 1.0): 1, ('font', 1.0): 1, ('deathofgrass', 1.0): 1, ('loan', 1.0): 2, ('lawnmow', 1.0): 1, ('popular', 1.0): 2, ('charismat', 1.0): 1, ('man.h', 1.0): 1, ('thrive', 1.0): 1, ('economi', 1.0): 1, ('burst', 1.0): 2, ('georgi', 1.0): 1, ('x26', 1.0): 1, ('million', 1.0): 4, ('fl', 1.0): 1, ('kindest', 1.0): 2, ('iceland', 1.0): 1, ('crazi', 1.0): 4, ('landscap', 1.0): 2, ('yok', 1.0): 1, ('lah', 1.0): 1, ('concordia', 1.0): 1, ('reunit', 1.0): 1, ('xxxibmchll', 1.0): 1, ('sea', 1.0): 4, ('prettier', 1.0): 2, ('imitatia', 1.0): 1, ('oe', 1.0): 1, ('michel', 1.0): 1, ('comeback', 1.0): 1, ('gross', 1.0): 1, ('treat', 1.0): 5, ('equal', 1.0): 2, ('injustic', 1.0): 1, ('femin', 1.0): 1, ('ineedfeminismbecaus', 1.0): 1, ('forgotten', 1.0): 3, ('stuck', 1.0): 4, ('recommend', 1.0): 4, ('redhead', 1.0): 1, ('wacki', 1.0): 1, ('rather', 1.0): 5, ('waytoliveahappylif', 1.0): 1, ('hoxton', 1.0): 1, ('holborn', 1.0): 1, ('karen', 1.0): 2, ('wag', 1.0): 2, ('bum', 1.0): 1, ('wwooo', 1.0): 1, ('nite', 1.0): 3, ('laiten', 1.0): 1, ('arond', 1.0): 1, ('1:30', 1.0): 1, ('consid', 1.0): 3, ('matur', 1.0): 3, ('journeyp', 1.0): 2, ('foam', 1.0): 1, ("lady'", 1.0): 1, ('mob', 1.0): 1, ('fals', 1.0): 1, ('bulletin', 1.0): 1, ('spring', 1.0): 1, ('fiesta', 1.0): 1, ('nois', 1.0): 2, ('awuuu', 1.0): 1, ('aich', 1.0): 1, ('sept', 1.0): 2, ('rudramadevi', 1.0): 1, ('anushka', 1.0): 1, ('gunashekar', 1.0): 1, ('harryxhood', 1.0): 1, ('upset', 1.0): 1, ('ooh', 1.0): 1, ('humanist', 1.0): 1, ('magazin', 1.0): 2, ('usernam', 1.0): 1, ('rape', 1.0): 1, ('csrrace', 1.0): 1, ('lack', 1.0): 6, ('hygien', 1.0): 1, ('tose', 1.0): 1, ('cloth', 1.0): 1, ('temperatur', 1.0): 1, ('planet', 1.0): 2, ('brave', 1.0): 2, ('ge', 1.0): 1, ('2015kenya', 1.0): 1, ('ryan', 1.0): 4, ('tidi', 1.0): 2, ('hagergang', 1.0): 1, ('chanhun', 1.0): 1, ('photoshoot', 1.0): 1, ('afteral', 1.0): 1, ('sadkaay', 1.0): 1, ('thark', 1.0): 1, ('peak', 1.0): 1, ('heatwav', 1.0): 1, ('lower', 1.0): 1, ('standard', 1.0): 2, ('x25', 1.0): 1, ('recruit', 1.0): 2, ('doom', 1.0): 1, ('nasti', 1.0): 1, ('affili', 1.0): 1, ('>:)', 1.0): 2, ('64', 1.0): 2, ('74', 1.0): 1, ('40', 1.0): 4, ('00', 1.0): 1, ('hall', 1.0): 2, ('ted', 1.0): 3, ('pixgram', 1.0): 2, ('creativ', 1.0): 2, ('slideshow', 1.0): 1, ('nibbl', 1.0): 2, ('ivi', 1.0): 1, ('sho', 1.0): 1, ('superpow', 1.0): 2, ('obsess', 1.0): 2, ('oth', 1.0): 1, ('third', 1.0): 2, ('ngarepfollbackdarinabilahjkt', 1.0): 1, ('48', 1.0): 1, ('sunglass', 1.0): 1, ('jacki', 1.0): 2, ('sunni', 1.0): 6, ('style', 1.0): 5, ('jlo', 1.0): 1, ('jlover', 1.0): 1, ('turkey', 1.0): 1, ('goodafternoon', 1.0): 2, ('collag', 1.0): 2, ('furri', 1.0): 2, ('bruce', 1.0): 2, ('kunoriforceo', 1.0): 8, ('aayegi', 1.0): 1, ('tim', 1.0): 2, ('wiw', 1.0): 1, ('bip', 1.0): 1, ('zareen', 1.0): 1, ('daisi', 1.0): 1, ("b'coz", 1.0): 1, ('kart', 1.0): 1, ('mak', 1.0): 1, ('∗', 1.0): 2, ('lega', 1.0): 1, ('spag', 1.0): 1, ('boat', 1.0): 2, ('outboard', 1.0): 1, ('spell', 1.0): 4, ('reboard', 1.0): 1, ('fire', 1.0): 2, ('offboard', 1.0): 1, ('sn16', 1.0): 1, ('9dg', 1.0): 1, ('bnf', 1.0): 1, ('50', 1.0): 1, ('jason', 1.0): 1, ('rob', 1.0): 2, ('feb', 1.0): 1, ('victoriasecret', 1.0): 1, ('finland', 1.0): 1, ('helsinki', 1.0): 1, ('airport', 1.0): 3, ('plane', 1.0): 2, ('beyond', 1.0): 4, ('ont', 1.0): 1, ('tii', 1.0): 1, ('lng', 1.0): 2, ('yan', 1.0): 2, ("u'll", 1.0): 2, ('steve', 1.0): 2, ('bell', 1.0): 1, ('prescott', 1.0): 1, ('leadership', 1.0): 2, ('cartoon', 1.0): 1, ('upsid', 1.0): 2, ('statement', 1.0): 1, ('selamathariraya', 1.0): 1, ('lovesummertim', 1.0): 1, ('dumont', 1.0): 1, ('jax', 1.0): 1, ('jone', 1.0): 1, ('awesomee', 1.0): 1, ('x24', 1.0): 1, ('geoff', 1.0): 1, ('amazingli', 1.0): 1, ('talant', 1.0): 1, ('vsco', 1.0): 2, ('thanki', 1.0): 2, ('hash', 1.0): 1, ('tag', 1.0): 5, ('ifimeetanalien', 1.0): 1, ('bff', 1.0): 4, ('section', 1.0): 3, ('follbaaack', 1.0): 1, ('az', 1.0): 1, ('cauliflow', 1.0): 1, ('attempt', 1.0): 1, ('prinsesa', 1.0): 1, ('yaaah', 1.0): 2, ('law', 1.0): 3, ('toy', 1.0): 2, ('sonaaa', 1.0): 1, ('beautiful', 1.0): 2, ("josephine'", 1.0): 1, ('mirror', 1.0): 3, ('cretaperfect', 1.0): 2, ('4me', 1.0): 2, ('cretaperfectsuv', 1.0): 2, ('creta', 1.0): 1, ('load', 1.0): 1, ('telecom', 1.0): 2, ('judi', 1.0): 1, ('superb', 1.0): 1, ('slightli', 1.0): 1, ('rakna', 1.0): 1, ('ew', 1.0): 1, ('whose', 1.0): 1, ('fifa', 1.0): 1, ('lineup', 1.0): 1, ('surviv', 1.0): 2, ('p90x', 1.0): 1, ('p90', 1.0): 1, ('dishoom', 1.0): 2, ('rajnigandha', 1.0): 1, ('minju', 1.0): 1, ('rapper', 1.0): 1, ('lead', 1.0): 2, ('vocal', 1.0): 1, ('yujin', 1.0): 1, ('visual', 1.0): 2, ('makna', 1.0): 1, ('jane', 1.0): 2, ('hah', 1.0): 4, ('hawk', 1.0): 2, ('greatest', 1.0): 2, ('histori', 1.0): 2, ('along', 1.0): 6, ('talkback', 1.0): 1, ('process', 1.0): 4, ('featur', 1.0): 4, ('mostli', 1.0): 1, ("cinema'", 1.0): 1, ('defend', 1.0): 2, ('fashion', 1.0): 2, ('atroc', 1.0): 1, ('pandimension', 1.0): 1, ('manifest', 1.0): 1, ('argo', 1.0): 1, ('ring', 1.0): 4, ('640', 1.0): 1, ('nad', 1.0): 1, ('plezzz', 1.0): 1, ('asthma', 1.0): 1, ('inhal', 1.0): 1, ('breath', 1.0): 3, ('goodluck', 1.0): 1, ('hunger', 1.0): 1, ('mockingjay', 1.0): 1, ('thehungergam', 1.0): 1, ('ador', 1.0): 4, ('x23', 1.0): 1, ('reina', 1.0): 1, ('felt', 1.0): 3, ('excus', 1.0): 2, ('attend', 1.0): 2, ('whn', 1.0): 1, ('andr', 1.0): 1, ('mamayang', 1.0): 1, ('11pm', 1.0): 1, ('1d', 1.0): 2, ('89.9', 1.0): 1, ('powi', 1.0): 1, ('shropshir', 1.0): 1, ('border', 1.0): 1, ("school'", 1.0): 1, ('san', 1.0): 2, ('diego', 1.0): 1, ('jump', 1.0): 2, ('sourc', 1.0): 3, ('appeas', 1.0): 1, ('¦', 1.0): 1, ('aj', 1.0): 1, ('action', 1.0): 1, ('grunt', 1.0): 1, ('sc', 1.0): 1, ('anti-christ', 1.0): 1, ('m8', 1.0): 1, ('ju', 1.0): 1, ('halfway', 1.0): 1, ('ex', 1.0): 2, ('postiv', 1.0): 2, ('opinion', 1.0): 3, ('avi', 1.0): 1, ('dare', 1.0): 4, ('corridor', 1.0): 1, ('👯', 1.0): 2, ('neither', 1.0): 2, ('rundown', 1.0): 1, ('yah', 1.0): 4, ('leviboard', 1.0): 1, ('kleper', 1.0): 1, (':(', 1.0): 1, ('impecc', 1.0): 2, ('setokido', 1.0): 1, ('shoulda', 1.0): 3, ('hippo', 1.0): 1, ('materialist', 1.0): 1, ('showpo', 1.0): 1, ('cough', 1.0): 6, ('@artofsleepingin', 1.0): 1, ('x22', 1.0): 1, ('☺', 1.0): 5, ('makesm', 1.0): 1, ('santorini', 1.0): 1, ('escap', 1.0): 2, ('beatport', 1.0): 1, ('🏻', 1.0): 3, ('trmdhesit', 1.0): 2, ('manuel', 1.0): 1, ('vall', 1.0): 1, ('king', 1.0): 3, ('seven', 1.0): 2, ('kingdom', 1.0): 2, ('andal', 1.0): 1, ('taught', 1.0): 1, ('hide', 1.0): 3, ('privaci', 1.0): 1, ('wise', 1.0): 1, ('natsuki', 1.0): 1, ('often', 1.0): 2, ('catchi', 1.0): 1, ('neil', 1.0): 2, ('emir', 1.0): 2, ('brill', 1.0): 1, ('urquhart', 1.0): 1, ('castl', 1.0): 1, ('simpl', 1.0): 2, ('shatter', 1.0): 2, ('contrast', 1.0): 1, ('educampakl', 1.0): 1, ('rotorua', 1.0): 1, ('pehli', 1.0): 1, ('phir', 1.0): 1, ('somi', 1.0): 1, ('burfday', 1.0): 1, ('univers', 1.0): 3, ('santo', 1.0): 1, ('toma', 1.0): 1, ('norh', 1.0): 1, ('dialogu', 1.0): 2, ('chainsaw', 1.0): 2, ('amus', 1.0): 1, ('awe', 1.0): 1, ('protect', 1.0): 2, ('pop', 1.0): 5, ('2ish', 1.0): 1, ('fahad', 1.0): 1, ('bhai', 1.0): 3, ('iqrar', 1.0): 1, ('waseem', 1.0): 1, ('abroad', 1.0): 2, ('movie', 1.0): 1, ('chef', 1.0): 1, ('grogol', 1.0): 1, ('long-dist', 1.0): 1, ('rhi', 1.0): 1, ('pwrfl', 1.0): 1, ('benefit', 1.0): 2, ('b2b', 1.0): 1, ('b2c', 1.0): 1, ("else'", 1.0): 2, ('soo', 1.0): 2, ('enterprison', 1.0): 1, ('schoolsoutforsumm', 1.0): 1, ('fellow', 1.0): 4, ('juggl', 1.0): 1, ('purrtho', 1.0): 1, ('catho', 1.0): 1, ('catami', 1.0): 1, ('fourfivesecond', 1.0): 4, ('deaf', 1.0): 4, ('drug', 1.0): 1, ('alcohol', 1.0): 1, ('apexi', 1.0): 3, ('crystal', 1.0): 3, ('meth', 1.0): 1, ('champagn', 1.0): 1, ('fc', 1.0): 1, ('streamer', 1.0): 1, ('juic', 1.0): 1, ('correct', 1.0): 1, ('portrait', 1.0): 1, ('izumi', 1.0): 1, ('fugiwara', 1.0): 1, ('clonmel', 1.0): 1, ('vibrant', 1.0): 1, ('estim', 1.0): 1, ('server', 1.0): 2, ('quiet', 1.0): 1, ('yey', 1.0): 1, ("insha'allah", 1.0): 1, ('wil', 1.0): 1, ('x21', 1.0): 1, ('trend', 1.0): 3, ('akshaymostlovedsuperstarev', 1.0): 1, ('indirect', 1.0): 1, ('askurban', 1.0): 1, ('lyka', 1.0): 2, ('nap', 1.0): 4, ('aff', 1.0): 1, ('unam', 1.0): 1, ('jonginuh', 1.0): 1, ('forecast', 1.0): 2, ('10am', 1.0): 2, ('5am', 1.0): 1, ('sooth', 1.0): 1, ('vii', 1.0): 1, ('sweetheart', 1.0): 1, ('freak', 1.0): 3, ('zayn', 1.0): 3, ('fucker', 1.0): 1, ('pet', 1.0): 2, ('illustr', 1.0): 1, ('wohoo', 1.0): 1, ('gleam', 1.0): 1, ('paint', 1.0): 4, ('deal', 1.0): 2, ('prime', 1.0): 2, ('minist', 1.0): 2, ('sunjam', 1.0): 1, ('industri', 1.0): 1, ('present', 1.0): 7, ('practic', 1.0): 3, ('proactiv', 1.0): 1, ('environ', 1.0): 1, ('unreal', 1.0): 1, ('zain', 1.0): 1, ('zac', 1.0): 1, ('isaac', 1.0): 1, ('oss', 1.0): 1, ('frank', 1.0): 1, ('iero', 1.0): 1, ('phase', 1.0): 2, ('david', 1.0): 1, ('beginn', 1.0): 1, ('shine', 1.0): 3, ('sunflow', 1.0): 2, ('tommarow', 1.0): 1, ('yall', 1.0): 2, ('rank', 1.0): 2, ('birthdaymonth', 1.0): 1, ('vianey', 1.0): 1, ('juli', 1.0): 11, ('birthdaygirl', 1.0): 1, ("town'", 1.0): 1, ('andrew', 1.0): 2, ('checkout', 1.0): 2, ('otwol', 1.0): 1, ('awhil', 1.0): 1, ('x20', 1.0): 1, ('all-tim', 1.0): 1, ('julia', 1.0): 1, ('robert', 1.0): 1, ('awwhh', 1.0): 1, ('bulldog', 1.0): 1, ('unfortun', 1.0): 2, ('02079', 1.0): 1, ('490', 1.0): 1, ('132', 1.0): 1, ('born', 1.0): 2, ('fightstickfriday', 1.0): 1, ('extravag', 1.0): 2, ('tearout', 1.0): 1, ('selekt', 1.0): 1, ('yoot', 1.0): 1, ('cross', 1.0): 3, ('gudday', 1.0): 1, ('dave', 1.0): 5, ('haileyhelp', 1.0): 1, ('eid', 1.0): 2, ('mubarak', 1.0): 5, ('brotheeerrr', 1.0): 1, ('adventur', 1.0): 5, ('tokyo', 1.0): 2, ('kansai', 1.0): 1, ('l', 1.0): 4, ('upp', 1.0): 2, ('om', 1.0): 1, ('60', 1.0): 1, ('minut', 1.0): 7, ('data', 1.0): 1, ('jesu', 1.0): 5, ('amsterdam', 1.0): 2, ('3rd', 1.0): 3, ('nextweek', 1.0): 1, ('booti', 1.0): 2, ('bcuz', 1.0): 1, ('step', 1.0): 3, ('option', 1.0): 3, ('stabl', 1.0): 1, ('sturdi', 1.0): 1, ('lukkke', 1.0): 1, ('again.ensoi', 1.0): 1, ('tc', 1.0): 1, ('madam', 1.0): 1, ('siddi', 1.0): 1, ('unknown', 1.0): 2, ('roomi', 1.0): 1, ('gn', 1.0): 2, ('gf', 1.0): 2, ('consent', 1.0): 1, ('mister', 1.0): 2, ('vine', 1.0): 2, ('peyton', 1.0): 1, ('nagato', 1.0): 1, ('yuki-chan', 1.0): 1, ('shoushitsu', 1.0): 1, ('archdbanterburi', 1.0): 3, ('experttradesmen', 1.0): 1, ('banter', 1.0): 1, ('quiz', 1.0): 1, ('tradetalk', 1.0): 1, ('floof', 1.0): 1, ('face', 1.0): 13, ('muahah', 1.0): 1, ('x19', 1.0): 1, ('anticip', 1.0): 1, ('jd', 1.0): 1, ('laro', 1.0): 1, ('tayo', 1.0): 1, ('answer', 1.0): 8, ('ht', 1.0): 1, ('angelica', 1.0): 1, ('anghel', 1.0): 1, ('aa', 1.0): 3, ('kkk', 1.0): 1, ('macbook', 1.0): 1, ('rehears', 1.0): 1, ('youthcelebr', 1.0): 1, ('mute', 1.0): 1, ('29th', 1.0): 1, ('gohf', 1.0): 4, ('vegetarian', 1.0): 1, ("she'll", 1.0): 1, ('gooday', 1.0): 3, ('101', 1.0): 3, ('12000', 1.0): 1, ('oshieer', 1.0): 1, ('realreview', 1.0): 1, ('happycustom', 1.0): 1, ('realoshi', 1.0): 1, ('dealsuthaonotebachao', 1.0): 1, ('bigger', 1.0): 2, ('dime', 1.0): 1, ('uhuh', 1.0): 1, ('🎵', 1.0): 3, ('code', 1.0): 4, ('pleasant', 1.0): 2, ('on-board', 1.0): 1, ('raheel', 1.0): 1, ('flyhigh', 1.0): 1, ('bother', 1.0): 2, ('everett', 1.0): 1, ('taylor', 1.0): 1, ('ha-ha', 1.0): 1, ('peachyloan', 1.0): 1, ('fridayfreebi', 1.0): 1, ('noe', 1.0): 1, ('yisss', 1.0): 1, ('bindingofissac', 1.0): 1, ('xboxon', 1.0): 1, ('consol', 1.0): 1, ('justin', 1.0): 2, ('gladli', 1.0): 1, ('son', 1.0): 4, ('morocco', 1.0): 1, ('peru', 1.0): 1, ('nxt', 1.0): 1, ('bp', 1.0): 1, ('resort', 1.0): 1, ('x18', 1.0): 1, ('havuuulovey', 1.0): 1, ('uuu', 1.0): 1, ('possitv', 1.0): 1, ('hopey', 1.0): 1, ('throwbackfriday', 1.0): 1, ('christen', 1.0): 1, ('ki', 1.0): 1, ('yaad', 1.0): 1, ('gayi', 1.0): 1, ('opossum', 1.0): 1, ('belat', 1.0): 5, ('yeahh', 1.0): 2, ('kuffar', 1.0): 1, ('comput', 1.0): 5, ('cell', 1.0): 1, ('diarrhea', 1.0): 1, ('immigr', 1.0): 1, ('lice', 1.0): 1, ('goictiv', 1.0): 1, ('70685', 1.0): 1, ('tagsforlik', 1.0): 4, ('trapmus', 1.0): 1, ('hotmusicdeloco', 1.0): 1, ('kinick', 1.0): 1, ('01282', 1.0): 2, ('452096', 1.0): 1, ('shadi', 1.0): 1, ('reserv', 1.0): 3, ('tkt', 1.0): 1, ('likewis', 1.0): 4, ('overgener', 1.0): 1, ('ikr', 1.0): 1, ('😍', 1.0): 2, ('consumer', 1.0): 1, ('fic', 1.0): 2, ('ouch', 1.0): 2, ('slip', 1.0): 1, ('disc', 1.0): 1, ('thw', 1.0): 1, ('chute', 1.0): 1, ('chalut', 1.0): 1, ('replay', 1.0): 1, ('iplay', 1.0): 1, ('11am', 1.0): 3, ('unneed', 1.0): 1, ('megamoh', 1.0): 1, ('7/29', 1.0): 1, ('tool', 1.0): 2, ('zealand', 1.0): 1, ('pile', 1.0): 2, ('dump', 1.0): 1, ('couscou', 1.0): 3, ("women'", 1.0): 2, ('fiction', 1.0): 1, ('wahahaah', 1.0): 1, ('x17', 1.0): 1, ('orhan', 1.0): 1, ('pamuk', 1.0): 1, ('hero', 1.0): 3, ('canopi', 1.0): 1, ('mapl', 1.0): 2, ('syrup', 1.0): 1, ('farm', 1.0): 2, ('stephani', 1.0): 2, ('💖', 1.0): 2, ('congrtaualt', 1.0): 1, ('philea', 1.0): 1, ('club', 1.0): 4, ('inc', 1.0): 1, ('photograph', 1.0): 2, ('phonegraph', 1.0): 1, ('srsli', 1.0): 1, ('10:17', 1.0): 1, ('ripaaa', 1.0): 1, ('banat', 1.0): 1, ('ray', 1.0): 1, ('dept', 1.0): 1, ('hospit', 1.0): 3, ('grt', 1.0): 1, ('infograph', 1.0): 1, ("o'clock", 1.0): 2, ('habit', 1.0): 1, ('1dfor', 1.0): 1, ('roadtrip', 1.0): 1, ('19:30', 1.0): 1, ('ifc', 1.0): 1, ('whip', 1.0): 1, ('lilsisbro', 1.0): 1, ('pre-ord', 1.0): 2, ("pixar'", 1.0): 2, ('steelbook', 1.0): 1, ('hmm', 1.0): 2, ('pegel', 1.0): 1, ('lemess', 1.0): 1, ('kyle', 1.0): 2, ('paypal', 1.0): 1, ('oct', 1.0): 1, ('tud', 1.0): 1, ('jst', 1.0): 2, ('humphrey', 1.0): 1, ('yell', 1.0): 2, ('erm', 1.0): 1, ('breach', 1.0): 1, ('lemon', 1.0): 2, ('yogurt', 1.0): 2, ('pot', 1.0): 1, ('discov', 1.0): 2, ('liquoric', 1.0): 1, ('pud', 1.0): 1, ('cajun', 1.0): 1, ('spice', 1.0): 1, ('yum', 1.0): 2, ('cajunchicken', 1.0): 1, ('infinit', 1.0): 2, ('fight', 1.0): 4, ('gern', 1.0): 1, ('cikaaa', 1.0): 1, ('maaf', 1.0): 1, ('telat', 1.0): 1, ('ngucapinnya', 1.0): 1, ('maaay', 1.0): 1, ('x16', 1.0): 1, ('viparita', 1.0): 1, ('karani', 1.0): 1, ('legsupthewal', 1.0): 1, ('unwind', 1.0): 1, ('coco', 1.0): 3, ('comfi', 1.0): 1, ('jalulu', 1.0): 1, ('rosh', 1.0): 1, ('gla', 1.0): 1, ('pallavi', 1.0): 1, ('nairobi', 1.0): 1, ('hrdstellobama', 1.0): 1, ('region', 1.0): 2, ('civil', 1.0): 1, ('societi', 1.0): 2, ('globe', 1.0): 1, ('hajur', 1.0): 1, ('yayi', 1.0): 2, ("must'v", 1.0): 1, ('nerv', 1.0): 1, ('prelim', 1.0): 1, ('costacc', 1.0): 1, ('nwb', 1.0): 1, ('shud', 1.0): 1, ('cold', 1.0): 2, ('hmu', 1.0): 2, ('cala', 1.0): 1, ('brush', 1.0): 1, ('ego', 1.0): 1, ('wherev', 1.0): 1, ('interact', 1.0): 2, ('dongsaeng', 1.0): 1, ('chorong', 1.0): 1, ('friendship', 1.0): 1, ('impress', 1.0): 3, ('dragon', 1.0): 2, ('duck', 1.0): 5, ('mix', 1.0): 5, ('cheetah', 1.0): 1, ('wagga', 1.0): 2, ('coursework', 1.0): 1, ('lorna', 1.0): 1, ('scan', 1.0): 1, ('x12', 1.0): 2, ('canva', 1.0): 2, ('iqbal', 1.0): 1, ('ima', 1.0): 1, ('hon', 1.0): 1, ('aja', 1.0): 1, ('besi', 1.0): 1, ('chati', 1.0): 1, ('phulani', 1.0): 1, ('swasa', 1.0): 1, ('bahari', 1.0): 1, ('jiba', 1.0): 1, ('mumbai', 1.0): 1, ('gujarat', 1.0): 1, ('distrub', 1.0): 1, ('otherwis', 1.0): 5, ('190cr', 1.0): 1, ('inspit', 1.0): 1, ('highest', 1.0): 1, ('holder', 1.0): 1, ('threaten', 1.0): 1, ('daili', 1.0): 2, ('basi', 1.0): 1, ('vr', 1.0): 1, ('angelo', 1.0): 1, ('quezon', 1.0): 1, ('sweatpant', 1.0): 1, ('farbridg', 1.0): 1, ('segalakatakata', 1.0): 1, ('nixu', 1.0): 1, ('begun', 1.0): 1, ('flint', 1.0): 1, ('🍰', 1.0): 5, ('separ', 1.0): 1, ('criticis', 1.0): 1, ('gestur', 1.0): 1, ('pedal', 1.0): 1, ('stroke', 1.0): 1, ('caro', 1.0): 1, ('deposit', 1.0): 1, ('secur', 1.0): 2, ('shock', 1.0): 1, ('coff', 1.0): 2, ('tenerina', 1.0): 1, ('auguri', 1.0): 1, ('iso', 1.0): 1, ('certif', 1.0): 1, ('paralyz', 1.0): 1, ('anxieti', 1.0): 1, ("it'd", 1.0): 1, ('develop', 1.0): 3, ('spain', 1.0): 2, ('def', 1.0): 1, ('bantim', 1.0): 1, ('fail', 1.0): 5, ('2ban', 1.0): 1, ('x15', 1.0): 1, ('awkward', 1.0): 2, ('ab', 1.0): 1, ('gale', 1.0): 1, ('founder', 1.0): 1, ('loveyaaah', 1.0): 1, ('⅛', 1.0): 1, ('⅞', 1.0): 1, ('∞', 1.0): 1, ('specialist', 1.0): 1, ('aw', 1.0): 3, ('babyyi', 1.0): 1, ('djstruthmat', 1.0): 1, ('re-cap', 1.0): 1, ('flickr', 1.0): 1, ('tack', 1.0): 2, ('zephbot', 1.0): 1, ('hhahahahaha', 1.0): 1, ('blew', 1.0): 2, ('entir', 1.0): 2, ('vega', 1.0): 3, ('strip', 1.0): 1, ('hahahahahhaha', 1.0): 1, ("callie'", 1.0): 1, ('puppi', 1.0): 1, ('owner', 1.0): 2, ('callinganimalabusehotlineasap', 1.0): 1, ('gorefiend', 1.0): 1, ('mythic', 1.0): 1, ('remind', 1.0): 6, ('9:00', 1.0): 1, ('▪', 1.0): 2, ('️bea', 1.0): 1, ('miller', 1.0): 2, ('lockscreen', 1.0): 1, ('mbf', 1.0): 1, ('keesh', 1.0): 1, ("yesterday'", 1.0): 1, ('groupi', 1.0): 1, ('bebe', 1.0): 1, ('sizam', 1.0): 1, ('color', 1.0): 5, ('invoic', 1.0): 1, ('kanina', 1.0): 1, ('pong', 1.0): 1, ('umaga', 1.0): 1, ('browser', 1.0): 1, ('typic', 1.0): 2, ('pleass', 1.0): 5, ('leeteuk', 1.0): 1, ('pearl', 1.0): 1, ('thusi', 1.0): 1, ('pour', 1.0): 1, ('milk', 1.0): 2, ('tgv', 1.0): 1, ('pari', 1.0): 5, ('austerlitz', 1.0): 1, ('bloi', 1.0): 1, ('mile', 1.0): 3, ('chateau', 1.0): 1, ('de', 1.0): 1, ('marai', 1.0): 1, ('taxi', 1.0): 1, ('x14', 1.0): 1, ('nom', 1.0): 1, ('enji', 1.0): 1, ('hater', 1.0): 3, ('purchas', 1.0): 2, ('specially-mark', 1.0): 1, ('custard', 1.0): 1, ('sm', 1.0): 1, ('on-pack', 1.0): 1, ('instruct', 1.0): 1, ('tile', 1.0): 1, ('downstair', 1.0): 1, ('kelli', 1.0): 1, ('greek', 1.0): 2, ('petra', 1.0): 1, ('shadowplayloui', 1.0): 1, ('mutual', 1.0): 2, ('cuz', 1.0): 4, ('liveonstream', 1.0): 1, ('lani', 1.0): 1, ('graze', 1.0): 1, ('pride', 1.0): 1, ('bristolart', 1.0): 1, ('in-app', 1.0): 1, ('ensur', 1.0): 1, ('item', 1.0): 2, ('screw', 1.0): 1, ('amber', 1.0): 2, ('43', 1.0): 1, ('hpc', 1.0): 1, ('wip', 1.0): 2, ('sw', 1.0): 1, ('newsround', 1.0): 1, ('hound', 1.0): 1, ('7:40', 1.0): 1, ('ada', 1.0): 1, ('racist', 1.0): 1, ('hulk', 1.0): 1, ('tight', 1.0): 2, ('prayer', 1.0): 3, ('pardon', 1.0): 1, ('phl', 1.0): 1, ('abu', 1.0): 2, ('dhabi', 1.0): 1, ('hihihi', 1.0): 1, ('teamjanuaryclaim', 1.0): 1, ('godonna', 1.0): 1, ('msg', 1.0): 2, ('bowwowchicawowwow', 1.0): 1, ('settl', 1.0): 1, ('dkt', 1.0): 1, ('porch', 1.0): 1, ('uber', 1.0): 2, ('mobil', 1.0): 4, ('applic', 1.0): 3, ('giggl', 1.0): 2, ('bare', 1.0): 3, ('wind', 1.0): 2, ('kahlil', 1.0): 1, ('gibran', 1.0): 1, ('flash', 1.0): 1, ('stiff', 1.0): 1, ('upper', 1.0): 1, ('lip', 1.0): 1, ('britain', 1.0): 1, ('latmon', 1.0): 1, ('endeavour', 1.0): 1, ('ann', 1.0): 2, ('joy', 1.0): 4, ('os', 1.0): 1, ('exploit', 1.0): 1, ('ign', 1.0): 2, ('au', 1.0): 1, ('pubcast', 1.0): 1, ('tengaman', 1.0): 1, ('21', 1.0): 2, ('celebratio', 1.0): 1, ('women', 1.0): 1, ('instal', 1.0): 2, ('glorifi', 1.0): 1, ('infirm', 1.0): 1, ('silli', 1.0): 1, ('suav', 1.0): 1, ('gentlemen', 1.0): 1, ('monthli', 1.0): 1, ('mileag', 1.0): 1, ('target', 1.0): 2, ('samsung', 1.0): 1, ('qualiti', 1.0): 3, ('ey', 1.0): 1, ('beth', 1.0): 2, ('gangster', 1.0): 1, ("athena'", 1.0): 1, ('fanci', 1.0): 1, ('wellington', 1.0): 1, ('rich', 1.0): 2, ('christina', 1.0): 1, ('newslett', 1.0): 1, ('zy', 1.0): 1, ('olur', 1.0): 1, ('x13', 1.0): 1, ('flawless', 1.0): 1, ('reaction', 1.0): 2, ('hayli', 1.0): 1, ('edwin', 1.0): 1, ('elvena', 1.0): 1, ('emc', 1.0): 1, ('rubber', 1.0): 3, ('swearword', 1.0): 1, ('infect', 1.0): 1, ('10:16', 1.0): 1, ('wrote', 1.0): 3, ('gan', 1.0): 1, ('brotherhood', 1.0): 1, ('wolf', 1.0): 5, ('pill', 1.0): 1, ('nocturn', 1.0): 1, ('rrp', 1.0): 1, ('18.99', 1.0): 1, ('13.99', 1.0): 1, ('jah', 1.0): 1, ('wobbl', 1.0): 1, ('retard', 1.0): 1, ('50notif', 1.0): 1, ('check-up', 1.0): 1, ('pun', 1.0): 1, ('elit', 1.0): 1, ('camillu', 1.0): 1, ('pleasee', 1.0): 1, ('spare', 1.0): 1, ('tyre', 1.0): 2, ('joke', 1.0): 3, ('ahahah', 1.0): 1, ('shame', 1.0): 1, ('abandon', 1.0): 1, ('disagre', 1.0): 2, ('nowher', 1.0): 2, ('contradict', 1.0): 1, ('chao', 1.0): 1, ('contain', 1.0): 1, ('cranium', 1.0): 1, ('sneaker', 1.0): 1, ('nike', 1.0): 1, ('nikeorigin', 1.0): 1, ('nikeindonesia', 1.0): 1, ('pierojogg', 1.0): 1, ('skoy', 1.0): 1, ('winter', 1.0): 2, ('falkland', 1.0): 1, ('jamie-le', 1.0): 1, ('congraaat', 1.0): 1, ('hooh', 1.0): 1, ('chrome', 1.0): 1, ('storm', 1.0): 1, ('thunderstorm', 1.0): 1, ('circuscircu', 1.0): 1, ('omgg', 1.0): 1, ('tdi', 1.0): 1, ('(-:', 1.0): 2, ('peter', 1.0): 1, ('expel', 1.0): 2, ('boughi', 1.0): 1, ('kernel', 1.0): 1, ('paralysi', 1.0): 1, ('liza', 1.0): 1, ('lol.hook', 1.0): 1, ('vampir', 1.0): 2, ('diari', 1.0): 3, ('twice', 1.0): 1, ('thanq', 1.0): 2, ('goodwil', 1.0): 1, ('vandr', 1.0): 1, ('ash', 1.0): 1, ('debat', 1.0): 3, ('solar', 1.0): 1, ('6-5', 1.0): 1, ('shown', 1.0): 1, ('ek', 1.0): 1, ('taco', 1.0): 2, ('mexico', 1.0): 2, ('viva', 1.0): 1, ('méxico', 1.0): 1, ('burger', 1.0): 3, ('thebestangkapuso', 1.0): 1, ('lighter', 1.0): 1, ('tooth', 1.0): 2, ('korean', 1.0): 2, ('netizen', 1.0): 1, ('crueler', 1.0): 1, ('eleph', 1.0): 1, ('marula', 1.0): 1, ('tdif', 1.0): 1, ('shoutout', 1.0): 1, ('shortli', 1.0): 1, ('itsamarvelth', 1.0): 1, ("japan'", 1.0): 1, ('artist', 1.0): 1, ('homework', 1.0): 1, ('marco', 1.0): 1, ('herb', 1.0): 1, ('pm', 1.0): 3, ('self', 1.0): 1, ('esteem', 1.0): 1, ('patienc', 1.0): 1, ('sobtian', 1.0): 1, ('cowork', 1.0): 1, ('deathli', 1.0): 1, ('hallow', 1.0): 1, ('supernatur', 1.0): 1, ('consult', 1.0): 1, ('himach', 1.0): 1, ('2.25', 1.0): 1, ('asham', 1.0): 1, ('where.do.i.start', 1.0): 1, ('moviemarathon', 1.0): 1, ('skill', 1.0): 4, ('shadow', 1.0): 1, ('own', 1.0): 1, ('pair', 1.0): 3, ("it'll", 1.0): 6, ('cortez', 1.0): 1, ('superstar', 1.0): 1, ('tthank', 1.0): 1, ('colin', 1.0): 1, ('luxuou', 1.0): 1, ('tarryn', 1.0): 1, ('hbdme', 1.0): 1, ('yeeeyyy', 1.0): 1, ('barsostay', 1.0): 1, ('males', 1.0): 1, ('independ', 1.0): 1, ('sum', 1.0): 1, ('debacl', 1.0): 1, ('perfectli', 1.0): 1, ('longer', 1.0): 2, ('amyjackson', 1.0): 1, ('omegl', 1.0): 2, ('countrymus', 1.0): 1, ('five', 1.0): 2, ("night'", 1.0): 2, ("freddy'", 1.0): 2, ('demo', 1.0): 2, ('pump', 1.0): 2, ('fanboy', 1.0): 1, ('thegrandad', 1.0): 1, ('sidni', 1.0): 1, ('remarriag', 1.0): 1, ('occas', 1.0): 1, ('languag', 1.0): 1, ('java', 1.0): 1, ("php'", 1.0): 1, ('notion', 1.0): 1, ('refer', 1.0): 1, ('confus', 1.0): 3, ('ohioan', 1.0): 1, ('stick', 1.0): 2, ('doctor', 1.0): 3, ('offlin', 1.0): 1, ('thesim', 1.0): 1, ('mb', 1.0): 1, ('meaningless', 1.0): 1, ('common', 1.0): 1, ('celebr', 1.0): 9, ('muertosatfring', 1.0): 1, ('emul', 1.0): 1, ('brought', 1.0): 1, ('enemi', 1.0): 2, ('relax', 1.0): 3, ('ou', 1.0): 1, ('pink', 1.0): 2, ('cc', 1.0): 2, ('meooowww', 1.0): 1, ('barkkkiiidee', 1.0): 1, ('bark', 1.0): 1, ('x11', 1.0): 1, ('routin', 1.0): 4, ('alek', 1.0): 1, ('awh', 1.0): 2, ('kumpul', 1.0): 1, ('cantik', 1.0): 1, ('ganteng', 1.0): 1, ('kresna', 1.0): 1, ('jelli', 1.0): 1, ('simon', 1.0): 1, ('lesley', 1.0): 3, ('blood', 1.0): 2, ('panti', 1.0): 1, ('lion', 1.0): 1, ('artworkbyli', 1.0): 1, ('judo', 1.0): 1, ('daredevil', 1.0): 2, ('despond', 1.0): 1, ('re-watch', 1.0): 1, ('welcoma.hav', 1.0): 1, ('favor', 1.0): 5, ('tridon', 1.0): 1, ('21pic', 1.0): 1, ('master', 1.0): 3, ('nim', 1.0): 1, ("there'r", 1.0): 1, ('22pic', 1.0): 1, ('kebun', 1.0): 1, ('ubud', 1.0): 1, ('ladyposs', 1.0): 1, ('xoxoxo', 1.0): 1, ('sneak', 1.0): 3, ('peek', 1.0): 2, ('inbox', 1.0): 1, ('happyweekend', 1.0): 1, ('therealgolden', 1.0): 1, ('47', 1.0): 1, ('girlfriendsmya', 1.0): 1, ('ppl', 1.0): 2, ('closest', 1.0): 1, ('njoy', 1.0): 1, ('followingg', 1.0): 1, ('privat', 1.0): 1, ('pusher', 1.0): 1, ('stun', 1.0): 4, ('wooohooo', 1.0): 1, ('cuss', 1.0): 1, ('teenag', 1.0): 1, ('ace', 1.0): 1, ('sauc', 1.0): 3, ('livi', 1.0): 1, ('fowl', 1.0): 1, ('oliviafowl', 1.0): 1, ('891', 1.0): 1, ('burnout', 1.0): 1, ('johnforceo', 1.0): 1, ('matthew', 1.0): 1, ('provok', 1.0): 1, ('indiankultur', 1.0): 1, ('oppos', 1.0): 1, ('biker', 1.0): 1, ('lyk', 1.0): 1, ('gud', 1.0): 4, ('weight', 1.0): 6, ('bcu', 1.0): 1, ('rubbish', 1.0): 1, ('veggi', 1.0): 2, ('steph', 1.0): 1, ('nj', 1.0): 1, ('x10', 1.0): 1, ('cohes', 1.0): 1, ('gossip', 1.0): 2, ('alex', 1.0): 3, ('heswifi', 1.0): 1, ('7am', 1.0): 1, ('wub', 1.0): 1, ('cerbchan', 1.0): 1, ('jarraaa', 1.0): 1, ('morrrn', 1.0): 1, ('snooz', 1.0): 1, ('clicksco', 1.0): 1, ('gay', 1.0): 4, ('lesbian', 1.0): 2, ('rigid', 1.0): 1, ('theocrat', 1.0): 1, ('wing', 1.0): 1, ('fundamentalist', 1.0): 1, ('islamist', 1.0): 1, ('brianaaa', 1.0): 1, ('brianazabrocki', 1.0): 1, ('sky', 1.0): 2, ('batb', 1.0): 1, ('clap', 1.0): 3, ('whilst', 1.0): 1, ('aki', 1.0): 1, ('thencerest', 1.0): 2, ('547', 1.0): 2, ('indiemus', 1.0): 5, ('sexyjudi', 1.0): 3, ('pussi', 1.0): 4, ('sexo', 1.0): 3, ('humid', 1.0): 1, ('87', 1.0): 1, ('sloppi', 1.0): 1, ("second'", 1.0): 1, ('stock', 1.0): 3, ('marmit', 1.0): 2, ('x9', 1.0): 1, ('nic', 1.0): 3, ('taft', 1.0): 1, ('finalist', 1.0): 1, ('lotteri', 1.0): 1, ('award', 1.0): 3, ('usagi', 1.0): 1, ('looov', 1.0): 1, ('wowww', 1.0): 2, ('💙', 1.0): 8, ('💚', 1.0): 8, ('💕', 1.0): 12, ('lepa', 1.0): 1, ('sembuh', 1.0): 1, ('sibuk', 1.0): 1, ('balik', 1.0): 1, ('kin', 1.0): 1, ('gotham', 1.0): 1, ('sunnyday', 1.0): 1, ('dudett', 1.0): 1, ('cost', 1.0): 1, ('flippin', 1.0): 1, ('fortun', 1.0): 1, ('divinediscont', 1.0): 1, (';}', 1.0): 1, ('amnot', 1.0): 1, ('autofollow', 1.0): 3, ('teamfollowback', 1.0): 4, ('geer', 1.0): 1, ('bat', 1.0): 2, ('mz', 1.0): 1, ('yang', 1.0): 2, ('deennya', 1.0): 1, ('jehwan', 1.0): 1, ('11:00', 1.0): 1, ('ashton', 1.0): 1, ('✧', 1.0): 12, ('。', 1.0): 4, ('chelni', 1.0): 2, ('datz', 1.0): 1, ('jeremi', 1.0): 1, ('fmt', 1.0): 1, ('dat', 1.0): 3, ('heartbeat', 1.0): 1, ('clutch', 1.0): 1, ('🐢', 1.0): 2, ('besteverdoctorwhoepisod', 1.0): 1, ('relev', 1.0): 1, ('puke', 1.0): 1, ('proper', 1.0): 1, ('x8', 1.0): 1, ('sublimin', 1.0): 1, ('eatmeat', 1.0): 1, ('brewproject', 1.0): 1, ('lovenafianna', 1.0): 1, ('mr', 1.0): 7, ('lewi', 1.0): 1, ('clock', 1.0): 1, ('3:02', 1.0): 2, ('muslim', 1.0): 1, ('prophet', 1.0): 1, ('غردلي', 1.0): 4, ('is.h', 1.0): 1, ('mistak', 1.0): 4, ('understood', 1.0): 1, ('politician', 1.0): 1, ('argu', 1.0): 1, ('intellect', 1.0): 1, ('shiva', 1.0): 1, ('mp3', 1.0): 1, ('standrew', 1.0): 1, ('sandcastl', 1.0): 1, ('ewok', 1.0): 1, ('nate', 1.0): 2, ('brawl', 1.0): 1, ('rear', 1.0): 1, ('nake', 1.0): 1, ('choke', 1.0): 1, ('heck', 1.0): 1, ('gun', 1.0): 2, ('associ', 1.0): 1, ('um', 1.0): 1, ('endow', 1.0): 1, ('ai', 1.0): 1, ('sikandar', 1.0): 1, ('pti', 1.0): 1, ('standwdik', 1.0): 1, ('westandwithik', 1.0): 1, ('starbuck', 1.0): 2, ('logo', 1.0): 2, ('renew', 1.0): 1, ('chariti', 1.0): 1, ('جمعة_مباركة', 1.0): 1, ('hoki', 1.0): 1, ('biz', 1.0): 1, ('non', 1.0): 1, ('america', 1.0): 1, ('california', 1.0): 1, ('01:16', 1.0): 1, ('45gameplay', 1.0): 2, ('ilovey', 1.0): 2, ('vex', 1.0): 1, ('iger', 1.0): 1, ('leicaq', 1.0): 1, ('leica', 1.0): 1, ('dudee', 1.0): 1, ('persona', 1.0): 1, ('yepp', 1.0): 1, ('5878e503', 1.0): 1, ('x7', 1.0): 1, ('greg', 1.0): 1, ('posey', 1.0): 1, ('miami', 1.0): 1, ('james_yammouni', 1.0): 1, ('breakdown', 1.0): 1, ('materi', 1.0): 2, ('thorin', 1.0): 1, ('hunt', 1.0): 1, ('choroo', 1.0): 1, ('nahi', 1.0): 2, ('aztec', 1.0): 1, ('princess', 1.0): 2, ('raini', 1.0): 1, ('kingfish', 1.0): 1, ('chinua', 1.0): 1, ('acheb', 1.0): 1, ('intellectu', 1.0): 2, ('liquid', 1.0): 1, ('melbournetrip', 1.0): 1, ('taxikitchen', 1.0): 1, ('nooow', 1.0): 2, ('mcdo', 1.0): 1, ('everywher', 1.0): 2, ('dreamer', 1.0): 1, ('tanisha', 1.0): 1, ('1nonli', 1.0): 1, ('attitud', 1.0): 1, ('kindl', 1.0): 2, ('flame', 1.0): 1, ('convict', 1.0): 1, ('bar', 1.0): 1, ('repath', 1.0): 2, ('adi', 1.0): 1, ('stefani', 1.0): 1, ('sg1', 1.0): 1, ('lightbox', 1.0): 1, ('ran', 1.0): 2, ('incorrect', 1.0): 1, ('apologist', 1.0): 1, ('x6', 1.0): 1, ('vuli', 1.0): 1, ('01:15', 1.0): 1, ('batman', 1.0): 1, ('pearson', 1.0): 1, ('reput', 1.0): 2, ('nikkei', 1.0): 1, ('woodford', 1.0): 1, ('vscocam', 1.0): 1, ('vscoph', 1.0): 1, ('vscogood', 1.0): 1, ('vscophil', 1.0): 1, ('vscocousin', 1.0): 1, ('yaap', 1.0): 1, ('urwelc', 1.0): 1, ('neon', 1.0): 1, ('pant', 1.0): 1, ('haaa', 1.0): 1, ('will', 1.0): 2, ('auspost', 1.0): 1, ('openfollow', 1.0): 1, ('rp', 1.0): 2, ('eng', 1.0): 1, ('yūjō-cosplay', 1.0): 1, ('luxembourg', 1.0): 1, ('bunni', 1.0): 1, ('broadcast', 1.0): 1, ('needa', 1.0): 1, ('gal', 1.0): 3, ('bend', 1.0): 3, ('heaven', 1.0): 2, ('score', 1.0): 2, ('januari', 1.0): 1, ('hanabutl', 1.0): 1, ('kikhorni', 1.0): 1, ('interraci', 1.0): 1, ('makeup', 1.0): 1, ('chu', 1.0): 1, ("weekend'", 1.0): 1, ('punt', 1.0): 1, ('horserac', 1.0): 1, ('hors', 1.0): 2, ('horseracingtip', 1.0): 1, ('guitar', 1.0): 1, ('cocoar', 1.0): 1, ('brief', 1.0): 1, ('introduct', 1.0): 1, ('earliest', 1.0): 1, ('indian', 1.0): 1, ('subcontin', 1.0): 1, ('bfr', 1.0): 1, ('maurya', 1.0): 1, ('jordanian', 1.0): 1, ('00962778381', 1.0): 1, ('838', 1.0): 1, ('tenyai', 1.0): 1, ('hee', 1.0): 2, ('ss', 1.0): 1, ('semi', 1.0): 1, ('atp', 1.0): 2, ('wimbledon', 1.0): 2, ('feder', 1.0): 1, ('nadal', 1.0): 1, ('monfil', 1.0): 1, ('handsom', 1.0): 2, ('cilic', 1.0): 3, ('firm', 1.0): 1, ('potenti', 1.0): 3, ('nyc', 1.0): 1, ('chillin', 1.0): 2, ('tail', 1.0): 2, ('kitten', 1.0): 1, ('garret', 1.0): 1, ('baz', 1.0): 1, ('leo', 1.0): 2, ('xst', 1.0): 1, ('centrifug', 1.0): 1, ('etern', 1.0): 3, ('forgiv', 1.0): 2, ('kangin', 1.0): 1, ('بندر', 1.0): 1, ('العنزي', 1.0): 1, ('kristin', 1.0): 1, ('cass', 1.0): 1, ('surajettan', 1.0): 1, ('kashi', 1.0): 1, ('ashwathi', 1.0): 1, ('mommi', 1.0): 2, ('tirth', 1.0): 1, ('brambhatt', 1.0): 1, ('snooker', 1.0): 1, ('compens', 1.0): 1, ('theoper', 1.0): 1, ('479', 1.0): 1, ('premiostumundo', 1.0): 2, ('philosoph', 1.0): 1, ('x5', 1.0): 1, ('graphic', 1.0): 2, ('level', 1.0): 1, ('aug', 1.0): 3, ('excl', 1.0): 1, ('raw', 1.0): 1, ('weeni', 1.0): 1, ('annoyingbabi', 1.0): 1, ('lazi', 1.0): 2, ('cosi', 1.0): 1, ('client_amends_edit', 1.0): 1, ('_5_final_final_fin', 1.0): 1, ('pdf', 1.0): 1, ('mauliat', 1.0): 1, ('ito', 1.0): 2, ('okkay', 1.0): 1, ('knock', 1.0): 3, ("soloist'", 1.0): 1, ('ryu', 1.0): 1, ('saera', 1.0): 1, ('pinkeu', 1.0): 1, ('angri', 1.0): 3, ('screencap', 1.0): 1, ('jonghyun', 1.0): 1, ('seungyeon', 1.0): 1, ('cnblue', 1.0): 1, ('mbc', 1.0): 1, ('wgm', 1.0): 1, ('masa', 1.0): 2, ('entrepreneurship', 1.0): 1, ('empow', 1.0): 1, ('limpopo', 1.0): 1, ('pict', 1.0): 1, ('norapowel', 1.0): 1, ('hornykik', 1.0): 2, ('livesex', 1.0): 1, ('pumpkin', 1.0): 1, ('thrice', 1.0): 1, ('patron', 1.0): 1, ('ventur', 1.0): 1, ('deathcur', 1.0): 1, ('boob', 1.0): 1, ('blame', 1.0): 1, ('dine', 1.0): 1, ('modern', 1.0): 1, ('grill', 1.0): 1, ('disk', 1.0): 1, ('nt4', 1.0): 1, ('iirc', 1.0): 1, ('ux', 1.0): 1, ('refin', 1.0): 1, ('zdp', 1.0): 1, ('didnt', 1.0): 2, ('justic', 1.0): 1, ('daw', 1.0): 1, ('tine', 1.0): 1, ('gensan', 1.0): 1, ('frightl', 1.0): 1, ('undead', 1.0): 1, ('plush', 1.0): 1, ('cushion', 1.0): 1, ('nba', 1.0): 3, ('2k15', 1.0): 3, ('mypark', 1.0): 3, ('chronicl', 1.0): 4, ('gryph', 1.0): 3, ('volum', 1.0): 3, ('ellen', 1.0): 1, ('degener', 1.0): 1, ('shirt', 1.0): 1, ('mint', 1.0): 1, ('superdri', 1.0): 1, ('berangkaat', 1.0): 1, ('lagiii', 1.0): 1, ('siguro', 1.0): 1, ('un', 1.0): 1, ('kesa', 1.0): 1, ('lotsa', 1.0): 2, ('organis', 1.0): 2, ('4am', 1.0): 1, ('fingers-cross', 1.0): 1, ('deep', 1.0): 1, ('htaccess', 1.0): 1, ('file', 1.0): 2, ('adf', 1.0): 1, ('womad', 1.0): 1, ('gran', 1.0): 1, ('canaria', 1.0): 1, ('gig', 1.0): 1, ('twist', 1.0): 1, ('youv', 1.0): 1, ('teamnatur', 1.0): 1, ('huni', 1.0): 1, ('yayayayay', 1.0): 1, ('yt', 1.0): 2, ('convent', 1.0): 1, ('brighton', 1.0): 1, ('slay', 1.0): 1, ('nicknam', 1.0): 1, ('babygirl', 1.0): 1, ('regard', 1.0): 2, ('himmat', 1.0): 1, ('karain', 1.0): 2, ('baat', 1.0): 1, ('meri', 1.0): 1, ('hotee-mi', 1.0): 1, ('uncl', 1.0): 1, ('tongu', 1.0): 1, ('pronounc', 1.0): 1, ('nativ', 1.0): 1, ('american', 1.0): 2, ('proverb', 1.0): 1, ('lovabl', 1.0): 1, ('yesha', 1.0): 1, ('montoya', 1.0): 1, ('eagerli', 1.0): 1, ('payment', 1.0): 1, ('suprem', 1.0): 1, ('leon', 1.0): 1, ('ks', 1.0): 2, ('randi', 1.0): 1, ('9bi', 1.0): 1, ('physiqu', 1.0): 1, ('shave', 1.0): 1, ('uncut', 1.0): 1, ('boi', 1.0): 1, ('cheapest', 1.0): 1, ('regular', 1.0): 3, ('printer', 1.0): 3, ('nz', 1.0): 1, ('larg', 1.0): 4, ('format', 1.0): 1, ('10/10', 1.0): 1, ('senior', 1.0): 1, ('raid', 1.0): 2, ('conserv', 1.0): 1, ('batteri', 1.0): 1, ('comfort', 1.0): 2, ('swt', 1.0): 1, ('[email protected]', 1.0): 1, ('localgaragederbi', 1.0): 1, ('campu', 1.0): 1, ('subgam', 1.0): 1, ('faceit', 1.0): 1, ('snpcaht', 1.0): 1, ('hakhakhak', 1.0): 1, ('t___t', 1.0): 1, ("kyungsoo'", 1.0): 1, ('3d', 1.0): 2, ('properti', 1.0): 2, ('agent', 1.0): 1, ('accur', 1.0): 1, ('descript', 1.0): 1, ('theori', 1.0): 1, ('x4', 1.0): 1, ('15.90', 1.0): 1, ('yvett', 1.0): 1, ('author', 1.0): 2, ('mwf', 1.0): 1, ('programm', 1.0): 1, ('taal', 1.0): 1, ('lake', 1.0): 1, ('2emt', 1.0): 1, ('«', 1.0): 2, ('scurri', 1.0): 1, ('agil', 1.0): 1, ('solut', 1.0): 1, ('sme', 1.0): 1, ('omar', 1.0): 1, ('biggest', 1.0): 5, ('kamaal', 1.0): 1, ('amm', 1.0): 1, ('3am', 1.0): 1, ('hopehousekid', 1.0): 1, ('pitmantrain', 1.0): 1, ('walkersmithway', 1.0): 1, ('keepitloc', 1.0): 2, ('sehun', 1.0): 1, ('se100lead', 1.0): 1, ('unev', 1.0): 1, ('sofa', 1.0): 1, ('surf', 1.0): 1, ('cunt', 1.0): 1, ('rescoop', 1.0): 1, ('multiraci', 1.0): 1, ('fk', 1.0): 1, ('narrow', 1.0): 1, ('warlock', 1.0): 1, ('balloon', 1.0): 3, ('mj', 1.0): 1, ('madison', 1.0): 1, ('beonknockknock', 1.0): 1, ('con-gradu', 1.0): 1, ('gent', 1.0): 1, ('bitchfac', 1.0): 1, ('😒', 1.0): 1, ('organ', 1.0): 1, ('12pm', 1.0): 2, ('york', 1.0): 2, ('nearest', 1.0): 1, ('lendal', 1.0): 1, ('pikami', 1.0): 1, ('captur', 1.0): 1, ('fulton', 1.0): 1, ('sheen', 1.0): 1, ('baloney', 1.0): 1, ('unvarnish', 1.0): 1, ('laid', 1.0): 2, ('thick', 1.0): 1, ('blarney', 1.0): 1, ('flatteri', 1.0): 1, ('thin', 1.0): 1, ('sachin', 1.0): 1, ('unimport', 1.0): 1, ('context', 1.0): 1, ('dampen', 1.0): 1, ('yu', 1.0): 1, ('rocket', 1.0): 1, ('narendra', 1.0): 1, ('modi', 1.0): 1, ('aaaand', 1.0): 1, ("team'", 1.0): 1, ('macauley', 1.0): 1, ('howev', 1.0): 3, ('x3', 1.0): 1, ('wheeen', 1.0): 1, ('heechul', 1.0): 1, ('toast', 1.0): 2, ('coffee-weekday', 1.0): 1, ('9-11', 1.0): 1, ('sail', 1.0): 1, ("friday'", 1.0): 1, ('commerci', 1.0): 1, ('insur', 1.0): 1, ('requir', 1.0): 2, ('lookfortheo', 1.0): 1, ('cl', 1.0): 1, ('thou', 1.0): 1, ('april', 1.0): 2, ('airforc', 1.0): 1, ('clark', 1.0): 1, ('field', 1.0): 1, ('pampanga', 1.0): 1, ('troll', 1.0): 1, ('⚡', 1.0): 1, ('brow', 1.0): 1, ('oili', 1.0): 1, ('maricarljanah', 1.0): 1, ('6:15', 1.0): 1, ('degre', 1.0): 3, ('fahrenheit', 1.0): 1, ('🍸', 1.0): 7, ('╲', 1.0): 4, ('─', 1.0): 8, ('╱', 1.0): 5, ('🍤', 1.0): 4, ('╭', 1.0): 4, ('╮', 1.0): 4, ('┓', 1.0): 2, ('┳', 1.0): 1, ('┣', 1.0): 1, ('╰', 1.0): 3, ('╯', 1.0): 3, ('┗', 1.0): 2, ('┻', 1.0): 1, ('stool', 1.0): 1, ('toppl', 1.0): 1, ('findyourfit', 1.0): 1, ('prefer', 1.0): 2, ('whomosexu', 1.0): 1, ('stack', 1.0): 1, ('pandora', 1.0): 3, ('digitalexet', 1.0): 1, ('digitalmarket', 1.0): 1, ('sociamedia', 1.0): 1, ('nb', 1.0): 1, ('bom', 1.0): 1, ('dia', 1.0): 1, ('todo', 1.0): 1, ('forklift', 1.0): 1, ('warehous', 1.0): 1, ('worker', 1.0): 1, ('lsceen', 1.0): 1, ('immatur', 1.0): 1, ('gandhi', 1.0): 1, ('grassi', 1.0): 1, ('feetblog', 1.0): 2, ('daughter', 1.0): 3, ('4yr', 1.0): 1, ('old-porridg', 1.0): 1, ('fiend', 1.0): 1, ('2nite', 1.0): 1, ('comp', 1.0): 1, ('vike', 1.0): 1, ('t20blast', 1.0): 1, ('np', 1.0): 1, ('tax', 1.0): 1, ('ooohh', 1.0): 1, ('petjam', 1.0): 1, ('virtual', 1.0): 2, ('pounc', 1.0): 1, ('bentek', 1.0): 1, ('agn', 1.0): 1, ('[email protected]', 1.0): 1, ('sam', 1.0): 3, ('fruiti', 1.0): 1, ('vodka', 1.0): 2, ('sellyourcarin', 1.0): 2, ('5word', 1.0): 2, ('chaloniklo', 1.0): 2, ('pic.twitter.com/jxz2lbv6o', 1.0): 1, ("paperwhite'", 1.0): 1, ('laser-lik', 1.0): 1, ('focu', 1.0): 1, ('ghost', 1.0): 3, ('tagsforlikesapp', 1.0): 2, ('instagood', 1.0): 2, ('tbt', 1.0): 1, ('socket', 1.0): 1, ('spanner', 1.0): 1, ('😴', 1.0): 1, ('pglcsgo', 1.0): 1, ('x2', 1.0): 1, ('tend', 1.0): 1, ('crave', 1.0): 1, ('slower', 1.0): 1, ('sjw', 1.0): 1, ('cakehamp', 1.0): 1, ('glow', 1.0): 2, ('yayyy', 1.0): 1, ('merced', 1.0): 1, ('hood', 1.0): 1, ('badg', 1.0): 1, ('host', 1.0): 1, ('drone', 1.0): 1, ('blow', 1.0): 1, ('ignor', 1.0): 1, ('retali', 1.0): 1, ('bolling', 1.0): 1, ("where'", 1.0): 1, ('denmark', 1.0): 1, ('whitey', 1.0): 1, ('cultur', 1.0): 2, ('course', 1.0): 1, ('intro', 1.0): 2, ('graphicdesign', 1.0): 1, ('videograph', 1.0): 1, ('space', 1.0): 2, ("ted'", 1.0): 1, ('bogu', 1.0): 1, ('1000', 1.0): 1, ('hahahaaah', 1.0): 1, ('owli', 1.0): 1, ('afternon', 1.0): 1, ('whangarei', 1.0): 1, ('kati', 1.0): 2, ('paulin', 1.0): 1, ('traffick', 1.0): 1, ('wors', 1.0): 3, ('henc', 1.0): 1, ('express', 1.0): 1, ('wot', 1.0): 1, ('hand-lett', 1.0): 1, ('roof', 1.0): 1, ('eas', 1.0): 1, ('2/2', 1.0): 1, ('sour', 1.0): 1, ('dough', 1.0): 1, ('egypt', 1.0): 1, ('hubbi', 1.0): 2, ('sakin', 1.0): 1, ('six', 1.0): 1, ('christma', 1.0): 2, ('avril', 1.0): 1, ('n04j', 1.0): 1, ('25', 1.0): 1, ('prosecco', 1.0): 1, ('pech', 1.0): 1, ('micro', 1.0): 1, ('catspj', 1.0): 1, ('4:15', 1.0): 1, ('lazyweekend', 1.0): 1, ('overdu', 1.0): 1, ('mice', 1.0): 1, ('💃', 1.0): 3, ('jurass', 1.0): 1, ('ding', 1.0): 1, ('nila', 1.0): 1, ('8)', 1.0): 1, ('cooki', 1.0): 1, ('shir', 1.0): 1, ('0', 1.0): 3, ('hale', 1.0): 1, ('cheshir', 1.0): 1, ('decor', 1.0): 1, ('lemm', 1.0): 2, ('rec', 1.0): 1, ('ingat', 1.0): 1, ('din', 1.0): 2, ('mono', 1.0): 1, ('kathryn', 1.0): 1, ('jr', 1.0): 1, ('hsr', 1.0): 1, ('base', 1.0): 3, ('major', 1.0): 1, ('sugarrush', 1.0): 1, ('knit', 1.0): 1, ('partli', 1.0): 1, ('homegirl', 1.0): 1, ('nanci', 1.0): 1, ('fenja', 1.0): 1, ('aapk', 1.0): 1, ('benchmark', 1.0): 1, ('ke', 1.0): 1, ('hisaab', 1.0): 1, ('ho', 1.0): 1, ('gaya', 1.0): 1, ('ofc', 1.0): 1, ('rtss', 1.0): 1, ('hwait', 1.0): 1, ('titanfal', 1.0): 1, ('xbox', 1.0): 2, ('ultim', 1.0): 2, ('gastronomi', 1.0): 1, ('newblogpost', 1.0): 1, ('foodiefriday', 1.0): 1, ('foodi', 1.0): 1, ('yoghurt', 1.0): 1, ('pancak', 1.0): 2, ('sabah', 1.0): 3, ('kapima', 1.0): 1, ('gelen', 1.0): 1, ('guzel', 1.0): 1, ('bir', 1.0): 1, ('hediy', 1.0): 1, ('thanx', 1.0): 1, ('💞', 1.0): 2, ('visa', 1.0): 1, ('parisa', 1.0): 1, ('epiphani', 1.0): 1, ('lit', 1.0): 1, ('em-con', 1.0): 1, ('swore', 1.0): 1, ('0330 333 7234', 1.0): 1, ('kianweareproud', 1.0): 1, ('distract', 1.0): 1, ('dayofarch', 1.0): 1, ('10-20', 1.0): 1, ('bapu', 1.0): 1, ('ivypowel', 1.0): 1, ('newmus', 1.0): 1, ('sexchat', 1.0): 1, ('🍅', 1.0): 1, ('pathway', 1.0): 1, ('balkan', 1.0): 1, ('gypsi', 1.0): 1, ('mayhem', 1.0): 1, ('burek', 1.0): 1, ('meat', 1.0): 1, ('gibanica', 1.0): 1, ('pie', 1.0): 1, ('surrey', 1.0): 1, ('afterward', 1.0): 1, ('10.30', 1.0): 1, ('tempor', 1.0): 1, ('void', 1.0): 1, ('stem', 1.0): 1, ('sf', 1.0): 1, ('ykr', 1.0): 1, ('sparki', 1.0): 1, ('40mm', 1.0): 1, ('3.5', 1.0): 1, ('gr', 1.0): 1, ('rockfish', 1.0): 1, ('topwat', 1.0): 1, ('twitlong', 1.0): 1, ('me.so', 1.0): 1, ('jummah', 1.0): 3, ('durood', 1.0): 1, ('pak', 1.0): 1, ('cjradacomateada', 1.0): 2, ('supris', 1.0): 1, ('debut', 1.0): 1, ('shipper', 1.0): 1, ('asid', 1.0): 1, ('housem', 1.0): 1, ('737bigatingconcert', 1.0): 1, ('jedzjabłka', 1.0): 1, ('pijjabłka', 1.0): 1, ('polish', 1.0): 1, ('cider', 1.0): 1, ('mustread', 1.0): 1, ('cricket', 1.0): 1, ('5pm', 1.0): 1, ('queri', 1.0): 2, ('abbi', 1.0): 1, ('sumedh', 1.0): 1, ('sunnah', 1.0): 2, ('عن', 1.0): 2, ('quad', 1.0): 1, ('bike', 1.0): 1, ('carri', 1.0): 2, ('proprieti', 1.0): 1, ('chronic', 1.0): 1, ('superday', 1.0): 1, ('chocolatey', 1.0): 1, ('yasu', 1.0): 1, ('ooooh', 1.0): 1, ('hallo', 1.0): 2, ('dylan', 1.0): 2, ('laura', 1.0): 1, ('patric', 1.0): 2, ('keepin', 1.0): 1, ('mohr', 1.0): 1, ('guest', 1.0): 1, ("o'neal", 1.0): 1, ('tk', 1.0): 1, ('lua', 1.0): 1, ('stone', 1.0): 2, ('quicker', 1.0): 1, ('diet', 1.0): 1, ('sosweet', 1.0): 1, ('nominier', 1.0): 1, ('und', 1.0): 1, ('hardcor', 1.0): 1, ('😌', 1.0): 1, ('ff__special', 1.0): 1, ('acha', 1.0): 2, ('banda', 1.0): 1, ('✌', 1.0): 2, ('bhi', 1.0): 2, ('krta', 1.0): 1, ('beautifully-craft', 1.0): 1, ('mockingbird', 1.0): 1, ('diploma', 1.0): 1, ('blend', 1.0): 3, ('numbero', 1.0): 1, ('lolz', 1.0): 1, ('ambros', 1.0): 1, ('gwinett', 1.0): 1, ('bierc', 1.0): 1, ('ravag', 1.0): 1, ('illadvis', 1.0): 1, ('marriag', 1.0): 1, ('stare', 1.0): 1, ('cynic', 1.0): 2, ('yahuda', 1.0): 1, ('nosmet', 1.0): 1, ('poni', 1.0): 1, ('cuuut', 1.0): 1, ("f'ing", 1.0): 1, ('vacant', 1.0): 1, ('hauc', 1.0): 1, ('lovesss', 1.0): 1, ('hiss', 1.0): 1, ('overnight', 1.0): 1, ('cornish', 1.0): 1, ('all-clear', 1.0): 1, ('raincoat', 1.0): 1, ('measur', 1.0): 1, ('wealth', 1.0): 1, ('invest', 1.0): 2, ('garbi', 1.0): 1, ('wash', 1.0): 2, ('refuel', 1.0): 1, ('dunedin', 1.0): 1, ('kall', 1.0): 1, ('rakhi', 1.0): 1, ('12th', 1.0): 2, ('repres', 1.0): 3, ('slovenia', 1.0): 1, ('fridg', 1.0): 2, ('ludlow', 1.0): 1, ('28th', 1.0): 1, ('selway', 1.0): 1, ('submit', 1.0): 1, ('spanish', 1.0): 2, ('90210', 1.0): 1, ('oitnb', 1.0): 1, ('prepar', 1.0): 3, ('condit', 1.0): 1, ('msged', 1.0): 1, ('chiquito', 1.0): 1, ('ohaha', 1.0): 1, ('delhi', 1.0): 1, ('95', 1.0): 1, ('webtogsaward', 1.0): 1, ('grace', 1.0): 2, ('sheffield', 1.0): 1, ('tramlin', 1.0): 1, ('tl', 1.0): 2, ('hack', 1.0): 1, ('lad', 1.0): 1, ('beeepin', 1.0): 1, ('duper', 1.0): 1, ('handl', 1.0): 1, ('critiqu', 1.0): 1, ('contectu', 1.0): 1, ('ultor', 1.0): 2, ('mamaya', 1.0): 1, ('loiyal', 1.0): 1, ('para', 1.0): 1, ('truthfulwordsof', 1.0): 1, ('beanatividad', 1.0): 1, ('nknkkpagpapakumbaba', 1.0): 1, ('birthdaypres', 1.0): 1, ('compliment', 1.0): 1, ('swerv', 1.0): 1, ('goodtim', 1.0): 1, ('sinist', 1.0): 1, ('scare', 1.0): 1, ('tryna', 1.0): 1, ('anonym', 1.0): 1, ('dipsatch', 1.0): 1, ('aunt', 1.0): 1, ('dagga', 1.0): 1, ('burket', 1.0): 1, ('2am', 1.0): 1, ('twine', 1.0): 1, ("diane'", 1.0): 1, ('happybirthday', 1.0): 1, ('thanksss', 1.0): 1, ('randomli', 1.0): 1, ('buckinghampalac', 1.0): 1, ('chibi', 1.0): 1, ('maker', 1.0): 1, ('timog', 1.0): 1, ('18th', 1.0): 1, ('otw', 1.0): 1, ('kami', 1.0): 1, ('feelinggood', 1.0): 1, ('demand', 1.0): 2, ('naman', 1.0): 1, ('barkin', 1.0): 1, ('yeap', 1.0): 2, ('onkey', 1.0): 1, ('umma', 1.0): 1, ('pervert', 1.0): 1, ('onyu', 1.0): 1, ('appa', 1.0): 1, ('luci', 1.0): 1, ('horribl', 1.0): 1, ('quantum', 1.0): 1, ('greater', 1.0): 1, ('blockchain', 1.0): 1, ('nowplay', 1.0): 1, ('loftey', 1.0): 1, ('routt', 1.0): 1, ('assia', 1.0): 1, ('.\n.\n.', 1.0): 1, ('joint', 1.0): 1, ('futurereleas', 1.0): 1, ("look'", 1.0): 1, ('scari', 1.0): 1, ('murder', 1.0): 1, ('mysteri', 1.0): 1, ('comma', 1.0): 1, ("j'", 1.0): 1, ('hunni', 1.0): 2, ('diva', 1.0): 1, ('emili', 1.0): 3, ('nathan', 1.0): 1, ('medit', 1.0): 1, ('alumni', 1.0): 1, ('mba', 1.0): 1, ('foto', 1.0): 1, ('what-is-your-fashion', 1.0): 1, ('lorenangel', 1.0): 1, ('kw', 1.0): 2, ('tellanoldjokeday', 1.0): 1, ('reqd', 1.0): 1, ('specul', 1.0): 1, ('consist', 1.0): 4, ('tropic', 1.0): 1, ('startupph', 1.0): 1, ('zodiac', 1.0): 1, ('rapunzel', 1.0): 1, ('therver', 1.0): 1, ('85552', 1.0): 1, ('bestoftheday', 1.0): 1, ('oralsex', 1.0): 1, ('carli', 1.0): 1, ('happili', 1.0): 1, ('contract', 1.0): 1, ('matsu_bouzu', 1.0): 1, ('sonic', 1.0): 2, ('videogam', 1.0): 1, ('harana', 1.0): 1, ('belfast', 1.0): 1, ('danni', 1.0): 1, ('rare', 1.0): 1, ('sponsorship', 1.0): 1, ('aswel', 1.0): 1, ('gigi', 1.0): 1, ('nick', 1.0): 1, ('austin', 1.0): 1, ('youll', 1.0): 1, ('weak', 1.0): 4, ('10,000', 1.0): 1, ('bravo', 1.0): 1, ('iamamonst', 1.0): 1, ('rxthedailysurveyvot', 1.0): 1, ('broke', 1.0): 1, ('ass', 1.0): 1, ('roux', 1.0): 1, ('walkin', 1.0): 1, ('audienc', 1.0): 2, ('pfb', 1.0): 1, ('jute', 1.0): 1, ('walangmakakapigilsakin', 1.0): 1, ('lori', 1.0): 1, ('ehm', 1.0): 1, ('trick', 1.0): 1, ('baekhyun', 1.0): 1, ('eyesmil', 1.0): 1, ('borrow', 1.0): 1, ('knive', 1.0): 1, ('thek', 1.0): 1, ('eventu', 1.0): 1, ('reaapear', 1.0): 1, ('kno', 1.0): 1, ('whet', 1.0): 1, ('gratti', 1.0): 1, ('shorter', 1.0): 1, ('tweetin', 1.0): 1, ('inshallah', 1.0): 1, ('banana', 1.0): 1, ('raspberri', 1.0): 2, ('healthylifestyl', 1.0): 1, ('aint', 1.0): 2, ('skate', 1.0): 1, ('analyz', 1.0): 1, ('varieti', 1.0): 1, ('4:13', 1.0): 1, ('insomnia', 1.0): 1, ('medic', 1.0): 1, ('opposit', 1.0): 1, ('everlast', 1.0): 1, ('yoga', 1.0): 1, ('massag', 1.0): 2, ('osteopath', 1.0): 1, ('trainer', 1.0): 1, ('sharm', 1.0): 1, ('al_master_band', 1.0): 1, ('tbc', 1.0): 1, ('unives', 1.0): 1, ('architectur', 1.0): 1, ('random', 1.0): 1, ('isnt', 1.0): 1, ('typo', 1.0): 1, ('snark', 1.0): 1, ('lession', 1.0): 1, ('drunk', 1.0): 1, ('bruuh', 1.0): 1, ('2week', 1.0): 1, ('50europ', 1.0): 1, ('🇫', 1.0): 4, ('🇷', 1.0): 4, ('iov', 1.0): 1, ('accord', 1.0): 1, ('mne', 1.0): 1, ('pchelok', 1.0): 1, ('ja', 1.0): 1, ('=:', 1.0): 2, ('sweetest', 1.0): 1, ('comet', 1.0): 1, ('ahah', 1.0): 1, ('candi', 1.0): 2, ('axio', 1.0): 1, ('rabbit', 1.0): 2, ('nutshel', 1.0): 1, ('taken', 1.0): 1, ('letshavecocktailsafternuclai', 1.0): 1, ('malik', 1.0): 1, ('umair', 1.0): 1, ('canon', 1.0): 1, ('gang', 1.0): 1, ('grind', 1.0): 1, ('thoracicbridg', 1.0): 1, ('5minut', 1.0): 1, ('nonscript', 1.0): 1, ('password', 1.0): 1, ('shoshannavassil', 1.0): 1, ('addmeonsnapchat', 1.0): 1, ('dmme', 1.0): 1, ('mpoint', 1.0): 2, ('soph', 1.0): 1, ('anot', 1.0): 1, ('liao', 1.0): 2, ('ord', 1.0): 1, ('lor', 1.0): 1, ('sibei', 1.0): 1, ('xialan', 1.0): 1, ('thnx', 1.0): 1, ('malfunct', 1.0): 1, ('clown', 1.0): 1, ('joker', 1.0): 1, ('\U000fec00', 1.0): 1, ('nigth', 1.0): 1, ('estoy', 1.0): 1, ('escuchando', 1.0): 1, ('elsewher', 1.0): 1, ('bipolar', 1.0): 1, ('hahahahahahahahahahahahahaha', 1.0): 1, ('yoohoo', 1.0): 1, ('bajrangibhaijaanstorm', 1.0): 1, ('superhappi', 1.0): 1, ('doll', 1.0): 1, ('energi', 1.0): 1, ('f', 1.0): 3, ("m'dear", 1.0): 1, ('emma', 1.0): 2, ('alrd', 1.0): 1, ('dhan', 1.0): 2, ('satguru', 1.0): 1, ('tera', 1.0): 1, ('aasra', 1.0): 1, ('pita', 1.0): 1, ('keeo', 1.0): 1, ('darl', 1.0): 2, ('akarshan', 1.0): 1, ('sweetpea', 1.0): 1, ('gluten', 1.0): 1, ('pastri', 1.0): 2, ('highfiv', 1.0): 1, ('artsi', 1.0): 1, ('verbal', 1.0): 1, ('kaaa', 1.0): 1, ('oxford', 1.0): 2, ('wahoo', 1.0): 1, ('anchor', 1.0): 1, ('partnership', 1.0): 1, ('robbenisland', 1.0): 1, ('whale', 1.0): 1, ('aquat', 1.0): 1, ('safari', 1.0): 1, ('garru', 1.0): 1, ('liara', 1.0): 1, ('appoint', 1.0): 1, ('burnley', 1.0): 1, ('453', 1.0): 1, ('110', 1.0): 2, ('49', 1.0): 1, ('footbal', 1.0): 1, ('fm15', 1.0): 1, ('fmfamili', 1.0): 1, ('aamir', 1.0): 1, ('difficult', 1.0): 1, ('medium', 1.0): 1, ('nva', 1.0): 1, ('minuet', 1.0): 1, ('gamec', 1.0): 1, ('headrest', 1.0): 1, ('pit', 1.0): 1, ('spoken', 1.0): 1, ('advis', 1.0): 1, ('paypoint', 1.0): 1, ('deepthroat', 1.0): 1, ('truli', 1.0): 3, ('bee', 1.0): 2, ('upward', 1.0): 1, ('bound', 1.0): 1, ('movingonup', 1.0): 1, ('aitor', 1.0): 1, ('sn', 1.0): 1, ('ps4', 1.0): 2, ('jawad', 1.0): 1, ('presal', 1.0): 1, ('betcha', 1.0): 1, ('dumb', 1.0): 2, ('butt', 1.0): 1, ('qualki', 1.0): 1, ('808', 1.0): 1, ('milf', 1.0): 1, ('4like', 1.0): 1, ('sexysaturday', 1.0): 1, ('vw', 1.0): 1, ('umpfff', 1.0): 1, ('ca', 1.0): 1, ('domg', 1.0): 1, ('nanti', 1.0): 1, ('difollow', 1.0): 1, ('stubborn', 1.0): 1, ('nothavingit', 1.0): 1, ('klee', 1.0): 1, ('hem', 1.0): 1, ('congrad', 1.0): 1, ('accomplish', 1.0): 1, ('kfcroleplay', 1.0): 3, ('tregaron', 1.0): 1, ('boar', 1.0): 1, ('sweati', 1.0): 1, ('glyon', 1.0): 1, ('🚮', 1.0): 1, ("tee'", 1.0): 1, ('johnni', 1.0): 1, ('utub', 1.0): 1, ("video'", 1.0): 1, ('loss', 1.0): 1, ('combin', 1.0): 2, ('pigeon', 1.0): 1, ('fingerscross', 1.0): 1, ('photobomb', 1.0): 1, ('90', 1.0): 1, ('23', 1.0): 1, ('gimm', 1.0): 1, ('definetli', 1.0): 1, ('exit', 1.0): 1, ('bom-dia', 1.0): 1, ('apod', 1.0): 1, ('ultraviolet', 1.0): 1, ('m31', 1.0): 1, ('jul', 1.0): 1, ('oooh', 1.0): 1, ('yawn', 1.0): 1, ('ftw', 1.0): 1, ('maman', 1.0): 1, ('afterznoon', 1.0): 1, ('tweeep', 1.0): 1, ('abp', 1.0): 2, ('kiya', 1.0): 1, ('van', 1.0): 1, ('olymp', 1.0): 1, ('😷', 1.0): 1, ('classi', 1.0): 1, ('attach', 1.0): 1, ('equip', 1.0): 1, ('bobbl', 1.0): 1, ('anu', 1.0): 1, ('mh3', 1.0): 1, ('patch', 1.0): 1, ('psp', 1.0): 1, ('huffpost', 1.0): 1, ('tribut', 1.0): 1, ('h_eartshapedbox', 1.0): 1, ('magictrikband', 1.0): 1, ('magictrik', 1.0): 2, ('roommat', 1.0): 1, ('tami', 1.0): 1, ('b3dk', 1.0): 1, ('7an', 1.0): 1, ('ank', 1.0): 1, ('purpos', 1.0): 1, ('struggl', 1.0): 1, ('eagl', 1.0): 1, ('oceana', 1.0): 1, ('idk', 1.0): 3, ('med', 1.0): 1, ('fridayfauxpa', 1.0): 1, ('subtl', 1.0): 1, ('hint', 1.0): 1, ('prim', 1.0): 1, ('algorithm', 1.0): 1, ('iii', 1.0): 1, ('rosa', 1.0): 1, ('yvw', 1.0): 1, ('here', 1.0): 1, ('boost', 1.0): 1, ('unforgett', 1.0): 1, ('humor', 1.0): 1, ("mum'", 1.0): 1, ('hahahhaah', 1.0): 1, ('sombrero', 1.0): 1, ('lost', 1.0): 2, ('spammer', 1.0): 1, ('proceed', 1.0): 1, ('entertain', 1.0): 1, ('100k', 1.0): 1, ('mileston', 1.0): 1, ('judith', 1.0): 1, ('district', 1.0): 1, ('council', 1.0): 1, ('midar', 1.0): 1, ('gender', 1.0): 1, ('ilysm', 1.0): 1, ('zen', 1.0): 1, ('neat', 1.0): 1, ('rider', 1.0): 1, ('fyi', 1.0): 1, ('dig', 1.0): 2, ('👱', 1.0): 1, ('👽', 1.0): 1, ('🌳', 1.0): 1, ('suspici', 1.0): 1, ('calori', 1.0): 1, ('harder', 1.0): 1, ('jessica', 1.0): 1, ('carina', 1.0): 1, ('francisco', 1.0): 1, ('teret', 1.0): 1, ('potassium', 1.0): 1, ('rehydr', 1.0): 1, ('drinkitallup', 1.0): 1, ('thirstquench', 1.0): 1, ('tapir', 1.0): 1, ('calf', 1.0): 1, ('mealtim', 1.0): 1, ('uhc', 1.0): 1, ('scale', 1.0): 1, ('network', 1.0): 1, ('areal', 1.0): 1, ('extremesport', 1.0): 1, ('quadbik', 1.0): 1, ('bloggersrequir', 1.0): 1, ('bloggersw', 1.0): 1, ('brainer', 1.0): 1, ('mse', 1.0): 1, ('fund', 1.0): 1, ('nooowww', 1.0): 1, ('lile', 1.0): 1, ('tid', 1.0): 1, ('tmi', 1.0): 1, ('deploy', 1.0): 1, ('jule', 1.0): 1, ('betti', 1.0): 1, ('hddc', 1.0): 1, ('salman', 1.0): 1, ('pthht', 1.0): 1, ('lfc', 1.0): 3, ('tope', 1.0): 1, ('xxoo', 1.0): 2, ('russia', 1.0): 2, ('silver-wash', 1.0): 1, ('fritillari', 1.0): 1, ('moon', 1.0): 1, ('ap', 1.0): 2, ('trash', 1.0): 2, ('clever', 1.0): 1, ("thank'", 1.0): 1, ('keven', 1.0): 1, ('pastim', 1.0): 1, ('ashramcal', 1.0): 1, ('ontrack', 1.0): 1, ('german', 1.0): 1, ('subtitl', 1.0): 1, ('pinter', 1.0): 1, ('morninggg', 1.0): 1, ('🐶', 1.0): 1, ('pete', 1.0): 1, ('awesome-o', 1.0): 1, ('multipl', 1.0): 1, ('cya', 1.0): 1, ('harrog', 1.0): 1, ('jet', 1.0): 1, ('supplier', 1.0): 1, ('req', 1.0): 1, ('fridayloug', 1.0): 1, ('4thstreetmus', 1.0): 1, ('hawaii', 1.0): 1, ('kick', 1.0): 1, ('deepli', 1.0): 1, ('[email protected]', 1.0): 1, ('thousand', 1.0): 2, ('newspap', 1.0): 1, ('lew', 1.0): 1, ('nah', 1.0): 1, ('fallout', 1.0): 2, ('technic', 1.0): 1, ('gunderson', 1.0): 1, ('europa', 1.0): 1, ('thoroughli', 1.0): 1, ('script', 1.0): 1, ('overtak', 1.0): 1, ('motorway', 1.0): 1, ('thu', 1.0): 1, ('niteflirt', 1.0): 1, ('hbu', 1.0): 2, ('bowl', 1.0): 1, ('chri', 1.0): 2, ('niall', 1.0): 2, ('94', 1.0): 1, ('ik', 1.0): 1, ('stydia', 1.0): 1, ('nawazuddin', 1.0): 1, ('siddiqu', 1.0): 1, ('nomnomnom', 1.0): 1, ('dukefreebiefriday', 1.0): 1, ('z', 1.0): 1, ('insyaallah', 1.0): 1, ('ham', 1.0): 1, ('villa', 1.0): 1, ('brum', 1.0): 1, ('deni', 1.0): 1, ('vagina', 1.0): 1, ('rli', 1.0): 1, ('izzi', 1.0): 1, ('mitch', 1.0): 1, ('minn', 1.0): 1, ('recently.websit', 1.0): 1, ('coolingtow', 1.0): 1, ('soon.thank', 1.0): 1, ('showinginterest', 1.0): 1, ('multicolor', 1.0): 1, ('wid', 1.0): 1, ('wedg', 1.0): 1, ('motiv', 1.0): 1, ('nnnnot', 1.0): 1, ("gf'", 1.0): 1, ('bluesidemenxix', 1.0): 1, ('ardent', 1.0): 1, ('mooorn', 1.0): 1, ('wuppert', 1.0): 1, ('fridayfunday', 1.0): 1, ('re-sign', 1.0): 1, ('chalkhil', 1.0): 1, ('midday', 1.0): 1, ('carter', 1.0): 1, ('remedi', 1.0): 1, ('atrack', 1.0): 1, ('christ', 1.0): 1, ('badminton', 1.0): 1, ("littl'un", 1.0): 1, ('ikprideofpak', 1.0): 1, ('janjua', 1.0): 1, ('pimpl', 1.0): 1, ('forehead', 1.0): 1, ('volcano', 1.0): 1, ('mag', 1.0): 1, ('miryenda', 1.0): 1, ("technology'", 1.0): 1, ('touchétoday', 1.0): 1, ('idownload', 1.0): 1, ('25ish', 1.0): 1, ('snowbal', 1.0): 1, ('nd', 1.0): 1, ('expir', 1.0): 1, ('6gb', 1.0): 1, ('loveu', 1.0): 1, ('morefuninthephilippin', 1.0): 1, ('laho', 1.0): 1, ('caramoan', 1.0): 1, ('kareem', 1.0): 1, ('surah', 1.0): 1, ('kahaf', 1.0): 1, ('melani', 1.0): 1, ('bosch', 1.0): 1, ('machin', 1.0): 1, ("week'", 1.0): 1, ('refollow', 1.0): 1, ('😎', 1.0): 1, ('💁', 1.0): 1, ('relaps', 1.0): 1, ('prada', 1.0): 2, ('punjabiswillgetit', 1.0): 1, ('hitter', 1.0): 1, ('mass', 1.0): 2, ('shoud', 1.0): 1, ('1:12', 1.0): 1, ('ughtm', 1.0): 1, ('545', 1.0): 1, ('kissm', 1.0): 1, ('likeforfollow', 1.0): 1, ('overwhelm', 1.0): 1, ('groupmat', 1.0): 1, ('75', 1.0): 2, ('kyunk', 1.0): 1, ('aitchison', 1.0): 1, ('curvi', 1.0): 1, ('mont', 1.0): 1, ('doa', 1.0): 1, ('header', 1.0): 1, ('speaker', 1.0): 3, ('avoid', 1.0): 1, ('laboratori', 1.0): 1, ('idc', 1.0): 1, ('fuckin', 1.0): 2, ('wooo', 1.0): 2, ('neobyt', 1.0): 1, ('pirat', 1.0): 1, ('takedown', 1.0): 1, ('indirag', 1.0): 1, ('judiciari', 1.0): 1, ('commit', 1.0): 4, ('govt', 1.0): 1, ('polici', 1.0): 1, ('rbi', 1.0): 1, ('similar', 1.0): 1, ("thought'", 1.0): 1, ('progress', 1.0): 1, ('transfer', 1.0): 1, ('gg', 1.0): 1, ('defenit', 1.0): 1, ('nofx', 1.0): 1, ('friskyfiday', 1.0): 1, ('yipee', 1.0): 1, ('shed', 1.0): 1, ('incent', 1.0): 1, ('vege', 1.0): 1, ('marin', 1.0): 1, ('gz', 1.0): 1, ('rajeev', 1.0): 1, ('hvng', 1.0): 1, ('funfil', 1.0): 1, ('friday.it', 1.0): 1, ('ws', 1.0): 1, ('reali', 1.0): 1, ('diff', 1.0): 1, ('kabir.fel', 1.0): 1, ('dresden', 1.0): 1, ('germani', 1.0): 1, ('plot', 1.0): 1, ('tdf', 1.0): 1, ('🍷', 1.0): 2, ('☀', 1.0): 2, ('🚲', 1.0): 2, ('minion', 1.0): 2, ('slot', 1.0): 1, ("b'day", 1.0): 1, ('isabella', 1.0): 1, ('okeyyy', 1.0): 1, ('vddd', 1.0): 1, (');', 1.0): 1, ('selfee', 1.0): 1, ('insta', 1.0): 1, ('🙆', 1.0): 1, ('🙌', 1.0): 1, ('😛', 1.0): 1, ('🐒', 1.0): 1, ('😝', 1.0): 1, ('hhahhaaa', 1.0): 1, ('jeez', 1.0): 1, ('teamcannib', 1.0): 1, ('teamspacewhalingisthebest', 1.0): 1, ('fitfa', 1.0): 1, ('identifi', 1.0): 1, ('pharmaci', 1.0): 1, ('verylaterealis', 1.0): 1, ('iwishiknewbett', 1.0): 1, ('satisfi', 1.0): 1, ('ess-aych-eye-te', 1.0): 1, ('supposedli', 1.0): 1, ('👍', 1.0): 1, ('immedi', 1.0): 1, ("foxy'", 1.0): 1, ('instrument', 1.0): 1, ('alon', 1.0): 2, ('goldcoast', 1.0): 1, ('lelomustfal', 1.0): 1, ('meal', 1.0): 1, ('5g', 1.0): 1, ('liker', 1.0): 1, ('newdress', 1.0): 1, ('resist', 1.0): 1, ('fot', 1.0): 1, ('troy', 1.0): 1, ('twitterfollowerswhatsup', 1.0): 1, ('happyfriedday', 1.0): 1, ('keepsafealway', 1.0): 1, ('loveyeah', 1.0): 1, ('emojasp_her', 1.0): 1, ('vanilla', 1.0): 1, ('sidemen', 1.0): 1, ('yaaayyy', 1.0): 1, ('friendaaa', 1.0): 1, ('bulb', 1.0): 5, ('corn', 1.0): 6, ('1tbps4', 1.0): 1, ('divin', 1.0): 1, ('wheeli', 1.0): 1, ('bin', 1.0): 1, ('ubericecream', 1.0): 1, ('messengerforaday', 1.0): 1, ('kyli', 1.0): 1, ('toilet', 1.0): 1, ('ikaw', 1.0): 1, ('musta', 1.0): 1, ('cheatmat', 1.0): 1, ('kyuhyun', 1.0): 1, ('ghanton', 1.0): 1, ('easy.get', 1.0): 1, ('5:30', 1.0): 1, ('therein', 1.0): 1, ('majalah', 1.0): 1, ('dominiqu', 1.0): 1, ('lamp', 1.0): 1, ('a-foot', 1.0): 1, ('revamp', 1.0): 1, ('brainchild', 1.0): 1, ('confid', 1.0): 1, ('confin', 1.0): 1, ('colorado', 1.0): 1, ('goodyear', 1.0): 1, ('upto', 1.0): 1, ('cashback', 1.0): 1, ('yourewelcom', 1.0): 1, ('nightli', 1.0): 1, ('simpin', 1.0): 1, ('sketchbook', 1.0): 1, ('4wild', 1.0): 1, ('colorpencil', 1.0): 1, ('cray', 1.0): 1, ('6:30', 1.0): 1, ('imma', 1.0): 3, ('ob', 1.0): 1, ('11h', 1.0): 1, ('kino', 1.0): 1, ('adult', 1.0): 1, ('kardamena', 1.0): 1, ('samo', 1.0): 1, ('greec', 1.0): 1, ('caesar', 1.0): 1, ('salad', 1.0): 1, ('tad', 1.0): 1, ('bland', 1.0): 1, ('respond', 1.0): 1, ('okk', 1.0): 1, ('den', 1.0): 1, ('allov', 1.0): 1, ('hangout', 1.0): 1, ('whoever', 1.0): 1, ('tourist', 1.0): 1, ('♌', 1.0): 1, ('kutiyapanti', 1.0): 1, ('profession', 1.0): 1, ('boomshot', 1.0): 1, ('fuh', 1.0): 1, ('yeeey', 1.0): 1, ('donot', 1.0): 1, ('expos', 1.0): 1, ('lipstick', 1.0): 1, ('cran', 1.0): 1, ('prayr', 1.0): 1, ('හෙල', 1.0): 1, ('හවුල', 1.0): 1, ('onemochaonelov', 1.0): 1, ('southpaw', 1.0): 1, ('geniu', 1.0): 1, ('stroma', 1.0): 1, ('🔴', 1.0): 1, ('younow', 1.0): 1, ('jonah', 1.0): 1, ('jareddd', 1.0): 1, ('postcod', 1.0): 1, ('talkmobil', 1.0): 1, ('huha', 1.0): 1, ('transform', 1.0): 1, ('sword', 1.0): 3, ('misread', 1.0): 1, ('richard', 1.0): 1, ('ibiza', 1.0): 1, ('birthdaymoneyforjesusjuic', 1.0): 1, ('ytb', 1.0): 1, ('tutori', 1.0): 1, ('construct', 1.0): 2, ('critic', 1.0): 1, ('ganesha', 1.0): 1, ('textur', 1.0): 1, ('photographi', 1.0): 1, ('hinduism', 1.0): 1, ('hindugod', 1.0): 1, ('elephantgod', 1.0): 1, ('selfish', 1.0): 1, ('bboy', 1.0): 1, ('cardgam', 1.0): 1, ('pixelart', 1.0): 1, ('gamedesign', 1.0): 1, ('indiedev', 1.0): 1, ('pixel_daili', 1.0): 1, ('plateau', 1.0): 1, ('laguna', 1.0): 1, ('tha', 1.0): 4, ('bahot', 1.0): 1, ('baje', 1.0): 1, ('raat', 1.0): 1, ('liya', 1.0): 1, ('hath', 1.0): 1, ('ghant', 1.0): 1, ('itna', 1.0): 2, ('bana', 1.0): 1, ('paya', 1.0): 1, ('uta', 1.0): 1, ('manga', 1.0): 1, ('jamuna', 1.0): 1, ('\\:', 1.0): 1, ('swiftma', 1.0): 1, ('trion', 1.0): 1, ('forum', 1.0): 1, ('b-day', 1.0): 1, ('disgust', 1.0): 1, ('commodor', 1.0): 1, ('annabel', 1.0): 1, ('bridg', 1.0): 1, ('quest', 1.0): 1, ('borderland', 1.0): 1, ('wanderrook', 1.0): 1, ('gm', 1.0): 1, ('preciou', 1.0): 2, ('mizz', 1.0): 1, ('bleedgreen', 1.0): 1, ('sophia', 1.0): 1, ('chicago', 1.0): 1, ('honeymoon', 1.0): 1, ("da'esh", 1.0): 1, ('co-ord', 1.0): 1, ('fsa', 1.0): 1, ('estat', 1.0): 1, ("when'", 1.0): 1, ('dusti', 1.0): 1, ('tunisia', 1.0): 2, ("class'", 1.0): 1, ('irrit', 1.0): 1, ('fiverr', 1.0): 1, ('gina', 1.0): 1, ('soproud', 1.0): 1, ('enought', 1.0): 1, ('hole', 1.0): 1, ('melbourneburg', 1.0): 1, ('arianna', 1.0): 1, ('esai', 1.0): 1, ('rotterdam', 1.0): 1, ('jordi', 1.0): 1, ('clasi', 1.0): 1, ('horni', 1.0): 1, ('salon', 1.0): 1, ('bleach', 1.0): 1, ('olaplex', 1.0): 1, ('damag', 1.0): 1, ('teamwork', 1.0): 1, ('zitecofficestori', 1.0): 1, ('다쇼', 1.0): 1, ('colleagu', 1.0): 1, ('eb', 1.0): 1, ("t'would", 1.0): 1, ('tweetup', 1.0): 1, ('detect', 1.0): 1, ('jonathancreek', 1.0): 1, ('dvr', 1.0): 1, ('kat', 1.0): 1, ('rarer', 1.0): 1, ('okkk', 1.0): 1, ('frend', 1.0): 1, ('milt', 1.0): 1, ('mario', 1.0): 1, ('rewatch', 1.0): 1, ('1600', 1.0): 1, ('sige', 1.0): 1, ('punta', 1.0): 1, ('kayo', 1.0): 1, ('nooo', 1.0): 1, ('prompt', 1.0): 1, ('t-mobil', 1.0): 1, ('orang', 1.0): 1, ('ee', 1.0): 1, ('teapot', 1.0): 1, ('hotter', 1.0): 1, ('»', 1.0): 1, ('londoutrad', 1.0): 1, ('kal', 1.0): 1, ('wayward', 1.0): 1, ('pine', 1.0): 1, ('muscl', 1.0): 1, ('ilikeit', 1.0): 1, ('belong', 1.0): 1, ('watford', 1.0): 1, ('enterpris', 1.0): 1, ('cube', 1.0): 1, ('particp', 1.0): 1, ('saudi', 1.0): 1, ('arabia', 1.0): 1, ('recogn', 1.0): 1, ('fanbas', 1.0): 3, ('bailona', 1.0): 3, ('responsibilti', 1.0): 1, ('sunlight', 1.0): 1, ('tiger', 1.0): 1, ('elev', 1.0): 1, ('horror', 1.0): 1, ('bitchesss', 1.0): 1, ('shitti', 1.0): 1, ('squash', 1.0): 1, ('becca', 1.0): 1, ('delta', 1.0): 1, ('nut', 1.0): 1, ('yun', 1.0): 1, ('joe', 1.0): 1, ('dirt', 1.0): 1, ('sharon', 1.0): 1, ('medicin', 1.0): 1, ('ttyl', 1.0): 1, ('gav', 1.0): 1, ('linda', 1.0): 1, ('3hr', 1.0): 1, ('tym', 1.0): 2, ('dieback', 1.0): 1, ('endit', 1.0): 1, ('minecon', 1.0): 1, ('sere', 1.0): 1, ('joerin', 1.0): 1, ('joshan', 1.0): 1, ('tandem', 1.0): 1, ('ligao', 1.0): 1, ('albay', 1.0): 1, ('bcyc', 1.0): 1, ('lnh', 1.0): 1, ('sat', 1.0): 1, ('honorari', 1.0): 1, ('alac', 1.0): 1, ('skelo_ghost', 1.0): 1, ('madadagdagan', 1.0): 1, ('bmc', 1.0): 1, ('11:11', 1.0): 2, ('embarrass', 1.0): 1, ('entropi', 1.0): 1, ('evolut', 1.0): 2, ('loop', 1.0): 1, ('eva', 1.0): 1, ('camden', 1.0): 1, ('uhh', 1.0): 1, ('scoup', 1.0): 1, ('jren', 1.0): 1, ('nuest', 1.0): 1, ('lovelayyy', 1.0): 1, ('kidney', 1.0): 1, ('neuer', 1.0): 1, ('spray', 1.0): 1, ('[email protected]', 1.0): 1, ('uni', 1.0): 1, ('uff', 1.0): 1, ('karhi', 1.0): 1, ('thi', 1.0): 1, ('juaquin', 1.0): 1, ('v3nzor99', 1.0): 1, ('shell', 1.0): 1, ('heyi', 1.0): 1, ('flavor', 1.0): 1, ('thakyou', 1.0): 1, ('beatriz', 1.0): 1, ('cancel', 1.0): 1, ('puff', 1.0): 1, ('egg', 1.0): 2, ('tart', 1.0): 1, ('chai', 1.0): 1, ('mtr', 1.0): 1, ('alyssa', 1.0): 1, ('rub', 1.0): 1, ('tummi', 1.0): 1, ('zelda', 1.0): 1, ('ive', 1.0): 1, ('🎂', 1.0): 1, ('jiva', 1.0): 1, ('🍹', 1.0): 1, ('🍻', 1.0): 1, ('mubbarak', 1.0): 1, ('deborah', 1.0): 1, ('coupon', 1.0): 1, ('colourdeb', 1.0): 1, ('purpl', 1.0): 1, ("chippy'", 1.0): 1, ('vessel', 1.0): 1, ('ps', 1.0): 2, ('vintag', 1.0): 1, ('✫', 1.0): 4, ('˚', 1.0): 4, ('·', 1.0): 4, ('✵', 1.0): 4, ('⊹', 1.0): 4, ('1710', 1.0): 1, ('gooffeanotter', 1.0): 1, ('kiksex', 1.0): 1, ('mugshot', 1.0): 1, ('token', 1.0): 1, ('maritimen', 1.0): 1, ('rh', 1.0): 1, ('tatton', 1.0): 1, ('jump_julia', 1.0): 1, ('malema', 1.0): 1, ('fren', 1.0): 1, ('nuf', 1.0): 1, ('teas', 1.0): 1, ('alien', 1.0): 2, ('closer', 1.0): 1, ('monitor', 1.0): 1, ('kimmi', 1.0): 1, ("channel'", 1.0): 1, ('planetbollywoodnew', 1.0): 1, ('epi', 1.0): 1, ('tricki', 1.0): 1, ('be-shak', 1.0): 1, ('chenoweth', 1.0): 1, ('oodl', 1.0): 1, ('hailey', 1.0): 1, ('craźi', 1.0): 1, ('sęxxxÿ', 1.0): 1, ('cøôl', 1.0): 1, ('runway', 1.0): 1, ('gooodnight', 1.0): 1, ('iv', 1.0): 1, ('ri', 1.0): 1, ('jayci', 1.0): 1, ('karaok', 1.0): 1, ('ltsw', 1.0): 1, ('giant', 1.0): 1, ('1709', 1.0): 1, ('refus', 1.0): 1, ('collagen', 1.0): 1, ('2win', 1.0): 1, ('hopetowin', 1.0): 1, ('inventori', 1.0): 1, ('loveforfood', 1.0): 1, ('foodforthought', 1.0): 1, ('thoughtfortheday', 1.0): 1, ('carp', 1.0): 1, ('diem', 1.0): 1, ('nath', 1.0): 1, ('ning', 1.0): 1, ('although', 1.0): 1, ('harm', 1.0): 1, ('stormi', 1.0): 1, ('sync', 1.0): 1, ('devic', 1.0): 1, ('mess', 1.0): 1, ('nylon', 1.0): 1, ('gvb', 1.0): 1, ('cd', 1.0): 1, ('mountain.titl', 1.0): 1, ('unto', 1.0): 1, ('theworldwouldchang', 1.0): 1, ('categori', 1.0): 1, ('mah', 1.0): 1, ('panel', 1.0): 1, ("i'am", 1.0): 1, ('80-1', 1.0): 1, ('1708', 1.0): 1, ('neenkin', 1.0): 1, ('masterpiec', 1.0): 1, ('debit', 1.0): 1, ('beagl', 1.0): 1, ('♫', 1.0): 1, ('feat', 1.0): 1, ('charli', 1.0): 1, ('puth', 1.0): 1, ('wiz', 1.0): 1, ('khalifa', 1.0): 1, ('svu', 1.0): 1, ('darker', 1.0): 1, ('berni', 1.0): 1, ('henri', 1.0): 1, ('trap', 1.0): 1, ('tommi', 1.0): 1, ("vivian'", 1.0): 1, ('transpar', 1.0): 1, ('bitcoin', 1.0): 1, ('insight', 1.0): 1, ('ping', 1.0): 1, ('masquerad', 1.0): 1, ('zorroreturm', 1.0): 1, ('1707', 1.0): 1, ('pk', 1.0): 1, ('hay', 1.0): 1, ('jacquelin', 1.0): 1, ('passion', 1.0): 1, ('full-fledg', 1.0): 1, ('workplac', 1.0): 1, ('venu', 1.0): 1, ('lago', 1.0): 1, ('luxord', 1.0): 1, ('potato', 1.0): 1, ('hundr', 1.0): 1, ('cite', 1.0): 1, ('academ', 1.0): 1, ('pokiri', 1.0): 1, ('1nenokkadin', 1.0): 1, ('heritag', 1.0): 1, ('wood', 1.0): 1, ('beleaf', 1.0): 1, ('spnfamili', 1.0): 1, ('spn', 1.0): 1, ('alwayskeepfight', 1.0): 1, ('jaredpadalecki', 1.0): 1, ('jensenackl', 1.0): 1, ('peasant', 1.0): 2, ('ahahha', 1.0): 1, ('distant', 1.0): 1, ('shout-out', 1.0): 1, ('adulthood', 1.0): 1, ('hopeless', 0.0): 2, ('tmr', 0.0): 3, (':(', 0.0): 4571, ('everyth', 0.0): 17, ('kid', 0.0): 20, ('section', 0.0): 3, ('ikea', 0.0): 1, ('cute', 0.0): 43, ('shame', 0.0): 19, ("i'm", 0.0): 343, ('nearli', 0.0): 3, ('19', 0.0): 8, ('2', 0.0): 41, ('month', 0.0): 23, ('heart', 0.0): 27, ('slide', 0.0): 1, ('wast', 0.0): 5, ('basket', 0.0): 1, ('“', 0.0): 15, ('hate', 0.0): 57, ('japanes', 0.0): 4, ('call', 0.0): 29, ('bani', 0.0): 2, ('”', 0.0): 11, ('dang', 0.0): 2, ('start', 0.0): 44, ('next', 0.0): 40, ('week', 0.0): 56, ('work', 0.0): 133, ('oh', 0.0): 92, ('god', 0.0): 15, ('babi', 0.0): 47, ('face', 0.0): 20, ('make', 0.0): 102, ('smile', 0.0): 10, ('neighbour', 0.0): 1, ('motor', 0.0): 1, ('ask', 0.0): 29, ('said', 0.0): 33, ('updat', 0.0): 11, ('search', 0.0): 3, ('sialan', 0.0): 1, ('athabasca', 0.0): 2, ('glacier', 0.0): 2, ('1948', 0.0): 1, (':-(', 0.0): 493, ('jasper', 0.0): 1, ('jaspernationalpark', 0.0): 1, ('alberta', 0.0): 1, ('explorealberta', 0.0): 1, ('…', 0.0): 16, ('realli', 0.0): 131, ('good', 0.0): 101, ('g', 0.0): 8, ('idea', 0.0): 10, ('never', 0.0): 57, ('go', 0.0): 224, ('meet', 0.0): 31, ('mare', 0.0): 1, ('ivan', 0.0): 1, ('happi', 0.0): 25, ('trip', 0.0): 11, ('keep', 0.0): 34, ('safe', 0.0): 5, ('see', 0.0): 124, ('soon', 0.0): 45, ('tire', 0.0): 50, ('hahahah', 0.0): 3, ('knee', 0.0): 2, ('replac', 0.0): 4, ('get', 0.0): 232, ('day', 0.0): 149, ('ouch', 0.0): 3, ('relat', 0.0): 2, ('sweet', 0.0): 7, ('n', 0.0): 21, ('sour', 0.0): 2, ('kind', 0.0): 11, ('bi-polar', 0.0): 1, ('peopl', 0.0): 75, ('life', 0.0): 33, ('...', 0.0): 331, ('cuz', 0.0): 4, ('full', 0.0): 16, ('pleass', 0.0): 2, ('im', 0.0): 129, ('sure', 0.0): 31, ('tho', 0.0): 28, ('feel', 0.0): 158, ('stupid', 0.0): 8, ("can't", 0.0): 180, ('seem', 0.0): 15, ('grasp', 0.0): 1, ('basic', 0.0): 2, ('digit', 0.0): 8, ('paint', 0.0): 3, ('noth', 0.0): 26, ("i'v", 0.0): 77, ('research', 0.0): 1, ('help', 0.0): 54, ('lord', 0.0): 2, ('lone', 0.0): 9, ('someon', 0.0): 57, ('talk', 0.0): 45, ('guy', 0.0): 62, ('girl', 0.0): 28, ('assign', 0.0): 5, ('project', 0.0): 3, ('😩', 0.0): 14, ('want', 0.0): 246, ('play', 0.0): 48, ('video', 0.0): 23, ('game', 0.0): 28, ('watch', 0.0): 77, ('movi', 0.0): 24, ('choreograph', 0.0): 1, ('hard', 0.0): 35, ('email', 0.0): 10, ('link', 0.0): 12, ('still', 0.0): 124, ('say', 0.0): 63, ('longer', 0.0): 12, ('avail', 0.0): 13, ('cri', 0.0): 46, ('bc', 0.0): 50, ('miss', 0.0): 301, ('mingm', 0.0): 1, ('much', 0.0): 139, ('sorri', 0.0): 148, ('mom', 0.0): 13, ('far', 0.0): 18, ('away', 0.0): 28, ("we'r", 0.0): 30, ('truli', 0.0): 5, ('flight', 0.0): 6, ('friend', 0.0): 39, ('happen', 0.0): 51, ('sad', 0.0): 123, ('dog', 0.0): 17, ('pee', 0.0): 2, ('’', 0.0): 27, ('bag', 0.0): 8, ('take', 0.0): 49, ('newwin', 0.0): 1, ('15', 0.0): 10, ('doushit', 0.0): 1, ('late', 0.0): 27, ('suck', 0.0): 23, ('sick', 0.0): 43, ('plan', 0.0): 17, ('first', 0.0): 27, ('gundam', 0.0): 1, ('night', 0.0): 46, ('nope', 0.0): 6, ('dollar', 0.0): 1, ('😭', 0.0): 29, ('listen', 0.0): 18, ('back', 0.0): 122, ('old', 0.0): 16, ('show', 0.0): 26, ('know', 0.0): 131, ('weird', 0.0): 10, ('got', 0.0): 104, ('u', 0.0): 193, ('leav', 0.0): 42, ('might', 0.0): 11, ('give', 0.0): 36, ('pale', 0.0): 2, ('imit', 0.0): 1, ('went', 0.0): 32, ('sea', 0.0): 1, ('massiv', 0.0): 4, ('fuck', 0.0): 58, ('rash', 0.0): 1, ('bodi', 0.0): 12, ('pain', 0.0): 21, ('thing', 0.0): 52, ('ever', 0.0): 30, ('home', 0.0): 63, ('hi', 0.0): 34, ('absent', 0.0): 1, ('gran', 0.0): 2, ('knew', 0.0): 6, ('care', 0.0): 20, ('tell', 0.0): 26, ('love', 0.0): 152, ('wish', 0.0): 91, ('would', 0.0): 70, ('sequel', 0.0): 1, ('busi', 0.0): 28, ('sa', 0.0): 15, ('school', 0.0): 32, ('time', 0.0): 166, ('yah', 0.0): 3, ('xx', 0.0): 18, ('ouucchhh', 0.0): 1, ('one', 0.0): 148, ('wisdom', 0.0): 2, ('teeth', 0.0): 6, ('come', 0.0): 91, ('frighten', 0.0): 1, ('case', 0.0): 6, ('pret', 0.0): 1, ('wkwkw', 0.0): 1, ('verfi', 0.0): 1, ('activ', 0.0): 6, ('forget', 0.0): 8, ('follow', 0.0): 262, ('member', 0.0): 6, ('thank', 0.0): 107, ('join', 0.0): 8, ('goodby', 0.0): 14, ('´', 0.0): 4, ('chain', 0.0): 1, ('—', 0.0): 26, ('sentir-s', 0.0): 1, ('incompleta', 0.0): 1, ('okay', 0.0): 38, ('..', 0.0): 108, ('wednesday', 0.0): 5, ('marvel', 0.0): 1, ('thwart', 0.0): 1, ('awh', 0.0): 3, ("what'", 0.0): 15, ('chanc', 0.0): 16, ('zant', 0.0): 1, ('need', 0.0): 106, ('someth', 0.0): 28, ('x', 0.0): 39, ("when'", 0.0): 1, ('birthday', 0.0): 23, ('worst', 0.0): 14, ('part', 0.0): 11, ('bad', 0.0): 73, ('audraesar', 0.0): 1, ('sushi', 0.0): 3, ('pic', 0.0): 15, ('tl', 0.0): 8, ('drive', 0.0): 16, ('craaazzyy', 0.0): 2, ('pop', 0.0): 3, ('like', 0.0): 228, ('helium', 0.0): 1, ('balloon', 0.0): 1, ('climatechang', 0.0): 5, ('cc', 0.0): 6, ("california'", 0.0): 1, ('power', 0.0): 6, ('influenti', 0.0): 1, ('air', 0.0): 3, ('pollut', 0.0): 1, ('watchdog', 0.0): 1, ('califor', 0.0): 1, ('elhaida', 0.0): 1, ('rob', 0.0): 2, ('juri', 0.0): 1, ('came', 0.0): 16, ('10th', 0.0): 1, ('televot', 0.0): 1, ('idaho', 0.0): 2, ('restrict', 0.0): 2, ('fish', 0.0): 2, ('despit', 0.0): 2, ('region', 0.0): 2, ('drought-link', 0.0): 1, ('die-of', 0.0): 1, ('abrupt', 0.0): 1, ('climat', 0.0): 1, ('chang', 0.0): 27, ('may', 0.0): 16, ('doom', 0.0): 2, ('mammoth', 0.0): 1, ('megafauna', 0.0): 1, ('sc', 0.0): 3, ("australia'", 0.0): 1, ('dirtiest', 0.0): 2, ('station', 0.0): 3, ('consid', 0.0): 5, ('clean', 0.0): 6, ('energi', 0.0): 3, ('biomass', 0.0): 1, ("ain't", 0.0): 5, ('easi', 0.0): 6, ('green', 0.0): 7, ('golf', 0.0): 1, ('cours', 0.0): 7, ('california', 0.0): 1, ('ulti', 0.0): 1, ('well', 0.0): 56, ('mine', 0.0): 12, ('gonna', 0.0): 51, ('sexi', 0.0): 14, ('prexi', 0.0): 1, ('kindergarten', 0.0): 1, ('hungri', 0.0): 19, ('cant', 0.0): 47, ('find', 0.0): 53, ('book', 0.0): 20, ('sane', 0.0): 1, ('liter', 0.0): 15, ('three', 0.0): 7, ('loung', 0.0): 1, ('event', 0.0): 4, ('turn', 0.0): 17, ('boss', 0.0): 5, ('hozier', 0.0): 1, ("that'", 0.0): 61, ('true', 0.0): 22, ('soooner', 0.0): 1, ('ahh', 0.0): 7, ('fam', 0.0): 3, ('respectlost', 0.0): 1, ('hypercholesteloremia', 0.0): 1, ('ok', 0.0): 33, ('look', 0.0): 100, ('gift', 0.0): 11, ('calibraska', 0.0): 1, ('actual', 0.0): 24, ('genuin', 0.0): 2, ('contend', 0.0): 1, ('head', 0.0): 23, ('alway', 0.0): 56, ('hurt', 0.0): 41, ('stay', 0.0): 24, ('lmao', 0.0): 13, ('older', 0.0): 5, ('sound', 0.0): 19, ('upset', 0.0): 11, ('infinit', 0.0): 10, ('ao', 0.0): 1, ('stick', 0.0): 1, ('8th', 0.0): 1, ('either', 0.0): 13, ('seriou', 0.0): 8, ('yun', 0.0): 1, ('eh', 0.0): 4, ('room', 0.0): 11, ('way', 0.0): 42, ('hot', 0.0): 15, ('havent', 0.0): 11, ('found', 0.0): 11, ('handsom', 0.0): 2, ('jack', 0.0): 3, ('draw', 0.0): 2, ('shit', 0.0): 36, ('cut', 0.0): 14, ('encor', 0.0): 4, ('4thwin', 0.0): 4, ('baymax', 0.0): 1, ('french', 0.0): 4, ('mixer', 0.0): 1, ('💜', 0.0): 6, ('wft', 0.0): 1, ('awesom', 0.0): 5, ('replay', 0.0): 1, ('parti', 0.0): 15, ('promot', 0.0): 3, ('music', 0.0): 16, ('bank', 0.0): 9, ('short', 0.0): 11, ('boy', 0.0): 18, ('order', 0.0): 16, ('receiv', 0.0): 7, ('hub', 0.0): 1, ('nearest', 0.0): 1, ('deliv', 0.0): 3, ('today', 0.0): 108, ('1/2', 0.0): 3, ('mum', 0.0): 14, ('loud', 0.0): 2, ('final', 0.0): 35, ('parasyt', 0.0): 1, ('alll', 0.0): 1, ('zayniscomingbackonjuli', 0.0): 23, ('26', 0.0): 24, ('bye', 0.0): 8, ('era', 0.0): 1, ('。', 0.0): 3, ('ω', 0.0): 1, ('」', 0.0): 2, ('∠', 0.0): 2, ('):', 0.0): 6, ('nathann', 0.0): 1, ('💕', 0.0): 7, ('hug', 0.0): 29, ('😊', 0.0): 9, ('beauti', 0.0): 11, ('dieididieieiei', 0.0): 1, ('stage', 0.0): 15, ('mean', 0.0): 43, ('hello', 0.0): 13, ('lion', 0.0): 3, ('think', 0.0): 75, ('screw', 0.0): 4, ('netflix', 0.0): 5, ('chill', 0.0): 7, ('di', 0.0): 7, ('ervin', 0.0): 1, ('ohh', 0.0): 8, ('yeah', 0.0): 41, ('hope', 0.0): 102, ('accept', 0.0): 2, ('offer', 0.0): 10, ('desper', 0.0): 2, ('year', 0.0): 46, ('snapchat', 0.0): 79, ('amargolonnard', 0.0): 2, ('kikhorni', 0.0): 13, ('snapm', 0.0): 4, ('tagsforlik', 0.0): 5, ('batalladelosgallo', 0.0): 2, ('webcamsex', 0.0): 4, ('ugh', 0.0): 26, ('stream', 0.0): 24, ('duti', 0.0): 3, ("u'v", 0.0): 1, ('gone', 0.0): 24, ('alien', 0.0): 1, ('aww', 0.0): 21, ('wanna', 0.0): 94, ('sorka', 0.0): 1, ('funer', 0.0): 4, ('text', 0.0): 15, ('phone', 0.0): 34, ('sunni', 0.0): 1, ('nonexist', 0.0): 1, ('wowza', 0.0): 1, ('fah', 0.0): 1, ('taylor', 0.0): 3, ('crop', 0.0): 1, ('boo', 0.0): 5, ('count', 0.0): 7, ('new', 0.0): 51, ('guitar', 0.0): 1, ('jonghyun', 0.0): 1, ('hyung', 0.0): 1, ('pleas', 0.0): 275, ('predict', 0.0): 2, ('sj', 0.0): 3, ('nomin', 0.0): 1, ('vs', 0.0): 4, ('pl', 0.0): 45, ('dude', 0.0): 12, ('calm', 0.0): 3, ('brace', 0.0): 5, ('sir', 0.0): 5, ('plu', 0.0): 4, ('4', 0.0): 18, ('shock', 0.0): 3, ('omggg', 0.0): 2, ('yall', 0.0): 4, ('deserv', 0.0): 8, ('whenev', 0.0): 3, ('spend', 0.0): 8, ('smoke', 0.0): 3, ('end', 0.0): 40, ('fall', 0.0): 16, ('asleep', 0.0): 25, ('1', 0.0): 26, ('point', 0.0): 14, ('close', 0.0): 20, ('grand', 0.0): 1, ('whyyi', 0.0): 7, ('long', 0.0): 38, ('must', 0.0): 15, ('annoy', 0.0): 11, ('evan', 0.0): 1, ('option', 0.0): 3, ('opt', 0.0): 1, ("who'", 0.0): 7, ('giveaway', 0.0): 3, ('muster', 0.0): 1, ('merch', 0.0): 4, ('ah', 0.0): 18, ('funni', 0.0): 6, ('drink', 0.0): 7, ('savanna', 0.0): 1, ('straw', 0.0): 1, ('ignor', 0.0): 16, ('yester', 0.0): 1, ('afternoon', 0.0): 3, ('sleep', 0.0): 90, ('ye', 0.0): 48, ('sadli', 0.0): 11, ('when', 0.0): 2, ('album', 0.0): 16, ('last', 0.0): 72, ('chocol', 0.0): 8, ('consum', 0.0): 1, ('werk', 0.0): 1, ('morn', 0.0): 31, ('foreal', 0.0): 1, ('wesen', 0.0): 1, ('uwes', 0.0): 1, ('mj', 0.0): 1, ('😂', 0.0): 24, ('catch', 0.0): 9, ('onlin', 0.0): 20, ('enough', 0.0): 24, ('haha', 0.0): 30, ("he'", 0.0): 23, ('bosen', 0.0): 1, ('die', 0.0): 21, ('egg', 0.0): 4, ('benni', 0.0): 1, ('sometim', 0.0): 16, ('followback', 0.0): 6, ('huhu', 0.0): 17, ('understand', 0.0): 15, ('badli', 0.0): 12, ('scare', 0.0): 16, ('>:(', 0.0): 47, ('al', 0.0): 4, ('kati', 0.0): 3, ('zaz', 0.0): 1, ('ami', 0.0): 2, ('lot', 0.0): 27, ('diari', 0.0): 1, ('read', 0.0): 20, ('rehash', 0.0): 1, ('websit', 0.0): 7, ('mushroom', 0.0): 1, ('piec', 0.0): 4, ('except', 0.0): 5, ('reach', 0.0): 3, ('anyway', 0.0): 12, ('vicki', 0.0): 1, ('omg', 0.0): 63, ('wtf', 0.0): 13, ('lip', 0.0): 3, ('virgin', 0.0): 2, ('your', 0.0): 8, ('45', 0.0): 1, ('hahah', 0.0): 6, ('ninasti', 0.0): 1, ('tsktsk', 0.0): 1, ('oppa', 0.0): 4, ('wont', 0.0): 9, ('dick', 0.0): 5, ('kawaii', 0.0): 1, ('manli', 0.0): 1, ('xbox', 0.0): 3, ('alreadi', 0.0): 52, ('comfi', 0.0): 1, ('bed', 0.0): 12, ('youu', 0.0): 2, ('sigh', 0.0): 13, ('lol', 0.0): 43, ('potato', 0.0): 1, ('fri', 0.0): 7, ('guess', 0.0): 14, ("y'all", 0.0): 2, ('ugli', 0.0): 9, ('asf', 0.0): 1, ('huh', 0.0): 7, ('eish', 0.0): 1, ('ive', 0.0): 11, ('quit', 0.0): 9, ('lost', 0.0): 25, ('twitter', 0.0): 30, ('mojo', 0.0): 1, ('dont', 0.0): 53, ('mara', 0.0): 1, ('neh', 0.0): 2, ('fever', 0.0): 7, ('<3', 0.0): 25, ('poor', 0.0): 35, ('bb', 0.0): 7, ('abl', 0.0): 22, ('associ', 0.0): 1, ('councillor', 0.0): 1, ('confer', 0.0): 2, ('weekend', 0.0): 25, ('skype', 0.0): 6, ('account', 0.0): 20, ('hack', 0.0): 8, ('contact', 0.0): 7, ('creat', 0.0): 2, ('tweet', 0.0): 35, ('spree', 0.0): 4, ('na', 0.0): 29, ('sholong', 0.0): 1, ('reject', 0.0): 7, ('propos', 0.0): 2, ('gee', 0.0): 1, ('fli', 0.0): 10, ('gidi', 0.0): 1, ('pamper', 0.0): 1, ('lago', 0.0): 1, ('ehn', 0.0): 1, ('arrest', 0.0): 1, ('girlfriend', 0.0): 2, ('he', 0.0): 3, ('nice', 0.0): 19, ('person', 0.0): 15, ('idk', 0.0): 26, ('anybodi', 0.0): 7, ('song', 0.0): 27, ('disappear', 0.0): 1, ('itun', 0.0): 3, ('daze', 0.0): 1, ('confus', 0.0): 8, ('surviv', 0.0): 5, ('fragment', 0.0): 1, ("would'v", 0.0): 2, ('forc', 0.0): 2, ('horribl', 0.0): 9, ('weather', 0.0): 29, ('us', 0.0): 43, ('could', 0.0): 69, ('walao', 0.0): 1, ('kb', 0.0): 1, ('send', 0.0): 12, ('ill', 0.0): 16, ('djderek', 0.0): 1, ('mani', 0.0): 29, ('fun', 0.0): 32, ('gig', 0.0): 3, ('absolut', 0.0): 6, ('legend', 0.0): 3, ('wait', 0.0): 43, ('till', 0.0): 8, ('saturday', 0.0): 10, ('homework', 0.0): 2, ('pa', 0.0): 8, ('made', 0.0): 23, ('da', 0.0): 5, ('greek', 0.0): 2, ('tragedi', 0.0): 1, ('rain', 0.0): 43, ('gym', 0.0): 6, ('💪', 0.0): 2, ('🏻', 0.0): 4, ('🐒', 0.0): 1, ('what', 0.0): 8, ('wrong', 0.0): 33, ('struck', 0.0): 1, ('anymor', 0.0): 20, ('belgium', 0.0): 4, ('fabian', 0.0): 2, ('delph', 0.0): 6, ('fallen', 0.0): 3, ('hide', 0.0): 4, ('drake', 0.0): 1, ('silent', 0.0): 1, ('hear', 0.0): 33, ('rest', 0.0): 21, ('peac', 0.0): 5, ('mo', 0.0): 4, ('tonight', 0.0): 24, ('t20blast', 0.0): 1, ('ahhh', 0.0): 5, ('wake', 0.0): 21, ('mumma', 0.0): 2, ('7', 0.0): 16, ('dead', 0.0): 10, ('tomorrow', 0.0): 34, ("i'll", 0.0): 41, ('high', 0.0): 8, ('low', 0.0): 8, ('pray', 0.0): 13, ('appropri', 0.0): 1, ('. . .', 0.0): 2, ('awak', 0.0): 10, ('woke', 0.0): 14, ('upp', 0.0): 1, ('dm', 0.0): 23, ('luke', 0.0): 6, ('hey', 0.0): 26, ('babe', 0.0): 19, ('across', 0.0): 4, ('hindi', 0.0): 1, ('reaction', 0.0): 1, ('5s', 0.0): 1, ('run', 0.0): 15, ('space', 0.0): 5, ('tbh', 0.0): 14, ('disabl', 0.0): 2, ('pension', 0.0): 1, ('ptsd', 0.0): 1, ('imposs', 0.0): 4, ('physic', 0.0): 7, ('financi', 0.0): 2, ('nooo', 0.0): 16, ('broke', 0.0): 9, ('soo', 0.0): 3, ('amaz', 0.0): 16, ('toghet', 0.0): 1, ('around', 0.0): 20, ('p', 0.0): 5, ('hold', 0.0): 9, ('anoth', 0.0): 27, ('septemb', 0.0): 2, ('21st', 0.0): 2, ('snsd', 0.0): 2, ('interact', 0.0): 2, ('anna', 0.0): 5, ('akana', 0.0): 1, ('askip', 0.0): 1, ("t'exist", 0.0): 1, ('channel', 0.0): 6, ('owner', 0.0): 1, ('decid', 0.0): 10, ('broadcast', 0.0): 6, ('kei', 0.0): 2, ('rate', 0.0): 4, ('se', 0.0): 2, ('notic', 0.0): 26, ('exist', 0.0): 2, ('traffic', 0.0): 5, ('terribl', 0.0): 12, ('eye', 0.0): 12, ('small', 0.0): 9, ('kate', 0.0): 2, ('spade', 0.0): 1, ('pero', 0.0): 3, ('walang', 0.0): 1, ('maganda', 0.0): 1, ('aw', 0.0): 42, ('seen', 0.0): 23, ('agesss', 0.0): 1, ('add', 0.0): 26, ('corinehurleigh', 0.0): 1, ('snapchatm', 0.0): 6, ('instagram', 0.0): 4, ('addmeonsnapchat', 0.0): 2, ('sf', 0.0): 3, ('quot', 0.0): 6, ('kiksext', 0.0): 6, ('bum', 0.0): 2, ('zara', 0.0): 1, ('trouser', 0.0): 1, ('effect', 0.0): 4, ('spanish', 0.0): 1, ("it'okay", 0.0): 1, ('health', 0.0): 2, ('luck', 0.0): 6, ('freed', 0.0): 1, ('rock', 0.0): 3, ('orcalov', 0.0): 1, ('tri', 0.0): 65, ('big', 0.0): 21, ('cuddl', 0.0): 8, ('lew', 0.0): 1, ('kiss', 0.0): 4, ('em', 0.0): 1, ('crave', 0.0): 8, ('banana', 0.0): 4, ('crumbl', 0.0): 1, ('mcflurri', 0.0): 1, ('cabl', 0.0): 1, ('car', 0.0): 17, ('brother', 0.0): 10, ("venus'", 0.0): 1, ('concept', 0.0): 4, ('rli', 0.0): 5, ('tea', 0.0): 7, ('tagal', 0.0): 2, ("we'v", 0.0): 3, ('appoint', 0.0): 1, ("i'd", 0.0): 11, ('sinc', 0.0): 35, ("there'", 0.0): 18, ('milk', 0.0): 3, ('left', 0.0): 26, ('cereal', 0.0): 2, ('film', 0.0): 6, ('date', 0.0): 7, ('previou', 0.0): 2, ('73', 0.0): 2, ('user', 0.0): 1, ('everywher', 0.0): 6, ('fansign', 0.0): 1, ('photo', 0.0): 15, ('expens', 0.0): 7, ('zzzz', 0.0): 1, ('let', 0.0): 37, ('sun', 0.0): 10, ('yet', 0.0): 33, ("bff'", 0.0): 1, ('extrem', 0.0): 3, ('stress', 0.0): 10, ('anyth', 0.0): 19, ('win', 0.0): 27, ("deosn't", 0.0): 1, ('liverpool', 0.0): 2, ('pool', 0.0): 3, ('though', 0.0): 57, ('bro', 0.0): 3, ('great', 0.0): 22, ('news', 0.0): 21, ('self', 0.0): 1, ('esteem', 0.0): 1, ('lowest', 0.0): 1, ('better', 0.0): 36, ('tacki', 0.0): 1, ('taken', 0.0): 9, ('man', 0.0): 32, ('lucki', 0.0): 16, ('charm', 0.0): 1, ('haaretz', 0.0): 1, ('israel', 0.0): 1, ('syria', 0.0): 2, ('continu', 0.0): 1, ('develop', 0.0): 5, ('chemic', 0.0): 1, ('weapon', 0.0): 2, ('offici', 0.0): 3, ('wsj', 0.0): 2, ('rep', 0.0): 1, ('bt', 0.0): 4, ('mr', 0.0): 9, ('wong', 0.0): 1, ('confisc', 0.0): 1, ('art', 0.0): 4, ('thought', 0.0): 31, ('icepack', 0.0): 1, ('dose', 0.0): 2, ('killer', 0.0): 2, ('board', 0.0): 1, ('whimper', 0.0): 1, ('fan', 0.0): 17, ('senpai', 0.0): 1, ('buttsex', 0.0): 1, ('joke', 0.0): 8, ('headlin', 0.0): 1, ("dn't", 0.0): 1, ('brk', 0.0): 1, (":'(", 0.0): 13, ('hit', 0.0): 7, ('voic', 0.0): 9, ('falsetto', 0.0): 1, ('zone', 0.0): 2, ('leannerin', 0.0): 1, ('hornykik', 0.0): 17, ('loveofmylif', 0.0): 2, ('dmme', 0.0): 2, ('pussi', 0.0): 2, ('newmus', 0.0): 3, ('sexo', 0.0): 2, ('s2', 0.0): 1, ('spain', 0.0): 4, ('delay', 0.0): 5, ('kill', 0.0): 22, ('singl', 0.0): 10, ('untruth', 0.0): 1, ('cross', 0.0): 4, ('countri', 0.0): 6, ('ij', 0.0): 1, ('💥', 0.0): 1, ('✨', 0.0): 1, ('💫', 0.0): 1, ('bear', 0.0): 2, ('littl', 0.0): 21, ('apart', 0.0): 7, ('live', 0.0): 37, ('soshi', 0.0): 1, ('didnt', 0.0): 24, ('buttt', 0.0): 2, ('congrat', 0.0): 2, ('sunday', 0.0): 8, ('friday', 0.0): 12, ('shoulda', 0.0): 1, ('move', 0.0): 12, ('w', 0.0): 22, ('caus', 0.0): 16, ("they'r", 0.0): 14, ('heyyy', 0.0): 1, ('yeol', 0.0): 2, ('solo', 0.0): 6, ('dancee', 0.0): 1, ('inter', 0.0): 1, ('nemanja', 0.0): 1, ('vidic', 0.0): 1, ('roma', 0.0): 1, ("mom'", 0.0): 2, ('linguist', 0.0): 1, ("dad'", 0.0): 1, ('comput', 0.0): 6, ('scientist', 0.0): 1, ('dumbest', 0.0): 1, ('famili', 0.0): 9, ('broken', 0.0): 11, ('ice', 0.0): 35, ('cream', 0.0): 32, ('pour', 0.0): 1, ('crash', 0.0): 6, ('scienc', 0.0): 1, ('resourc', 0.0): 1, ('vehicl', 0.0): 5, ('ate', 0.0): 10, ('ayex', 0.0): 1, ('eat', 0.0): 27, ('swear', 0.0): 6, ('lamon', 0.0): 1, ('scroll', 0.0): 1, ('curv', 0.0): 2, ('😉', 0.0): 1, ('cement', 0.0): 1, ('cast', 0.0): 5, ('10.3', 0.0): 1, ('k', 0.0): 9, ('sign', 0.0): 9, ('zayn', 0.0): 8, ('bot', 0.0): 1, ('plz', 0.0): 3, ('mention', 0.0): 9, ('jmu', 0.0): 1, ('camp', 0.0): 7, ('teas', 0.0): 3, ('sweetest', 0.0): 1, ('awuna', 0.0): 1, ('mbulelo', 0.0): 1, ('match', 0.0): 7, ('pig', 0.0): 2, ('although', 0.0): 5, ('crackl', 0.0): 1, ('nois', 0.0): 3, ('plug', 0.0): 2, ('fuse', 0.0): 1, ('dammit', 0.0): 3, ('tip', 0.0): 2, ('carlton', 0.0): 2, ('aflblueshawk', 0.0): 2, ("alex'", 0.0): 1, ('hous', 0.0): 16, ('motorsport', 0.0): 1, ('seri', 0.0): 3, ('disc', 0.0): 1, ('right', 0.0): 51, ('cheeki', 0.0): 1, ('j', 0.0): 1, ('instead', 0.0): 4, ('seo', 0.0): 1, ('nl', 0.0): 1, ('bud', 0.0): 1, ('christi', 0.0): 1, ('xo', 0.0): 1, ('niec', 0.0): 1, ('summer', 0.0): 19, ('bloodi', 0.0): 2, ('sandwhich', 0.0): 1, ('buset', 0.0): 1, ('discrimin', 0.0): 4, ('five', 0.0): 5, ('learn', 0.0): 5, ('pregnanc', 0.0): 2, ('foot', 0.0): 5, ('f', 0.0): 4, ('matern', 0.0): 1, ('kick', 0.0): 6, ('domesticviol', 0.0): 1, ('law', 0.0): 4, ('domest', 0.0): 1, ('violenc', 0.0): 2, ('victim', 0.0): 4, ('98fm', 0.0): 1, ('exactli', 0.0): 5, ('unfortun', 0.0): 21, ('yesterday', 0.0): 13, ('uk', 0.0): 9, ('govern', 0.0): 1, ('sapiosexu', 0.0): 1, ('damn', 0.0): 29, ('beta', 0.0): 4, ('12', 0.0): 8, ('hour', 0.0): 35, ('world', 0.0): 17, ('hulk', 0.0): 3, ('hogan', 0.0): 3, ('scrub', 0.0): 1, ('wwe', 0.0): 2, ('histori', 0.0): 2, ('iren', 0.0): 4, ('mistak', 0.0): 6, ('naa', 0.0): 1, ('sold', 0.0): 6, ('h_my_k', 0.0): 1, ('lose', 0.0): 7, ('valentin', 0.0): 2, ('et', 0.0): 3, ("r'ship", 0.0): 1, ('btwn', 0.0): 1, ('homo', 0.0): 2, ('biphob', 0.0): 2, ('comment', 0.0): 4, ('certain', 0.0): 6, ('disciplin', 0.0): 2, ('incl', 0.0): 2, ('european', 0.0): 3, ('lang', 0.0): 6, ('lit', 0.0): 2, ('educ', 0.0): 2, ('fresherstofin', 0.0): 1, ('💔', 0.0): 3, ('dream', 0.0): 24, ('gettin', 0.0): 2, ('realist', 0.0): 4, ('thx', 0.0): 1, ('real', 0.0): 21, ('isnt', 0.0): 7, ('prefer', 0.0): 4, ('benzema', 0.0): 2, ('hahahahahaah', 0.0): 1, ('donno', 0.0): 1, ('korean', 0.0): 2, ('languag', 0.0): 5, ('russian', 0.0): 2, ('waaa', 0.0): 1, ('eidwithgrof', 0.0): 1, ('boreddd', 0.0): 1, ('mug', 0.0): 3, ('piss', 0.0): 3, ('tiddler', 0.0): 1, ('silli', 0.0): 2, ('least', 0.0): 15, ('card', 0.0): 7, ('chorong', 0.0): 1, ('leader', 0.0): 1, ('에이핑크', 0.0): 3, ('더쇼', 0.0): 4, ('clan', 0.0): 1, ('slot', 0.0): 2, ('open', 0.0): 16, ('pfff', 0.0): 1, ('privat', 0.0): 2, ('bugbounti', 0.0): 1, ('self-xss', 0.0): 1, ('host', 0.0): 2, ('header', 0.0): 3, ('poison', 0.0): 3, ('code', 0.0): 8, ('execut', 0.0): 1, ('ktksbye', 0.0): 1, ('connect', 0.0): 3, ('compani', 0.0): 3, ('alert', 0.0): 2, ('cancel', 0.0): 10, ('uber', 0.0): 3, ('everyon', 0.0): 26, ('els', 0.0): 4, ('offic', 0.0): 7, ('ahahah', 0.0): 1, ('petit', 0.0): 1, ('relationship', 0.0): 4, ('height', 0.0): 2, ('cost', 0.0): 1, ('600', 0.0): 2, ('£', 0.0): 6, ('secur', 0.0): 4, ('odoo', 0.0): 2, ('8', 0.0): 11, ('partner', 0.0): 2, ('commun', 0.0): 2, ('spirit', 0.0): 3, ('jgh', 0.0): 2, ('effin', 0.0): 1, ('facebook', 0.0): 4, ('anyon', 0.0): 17, ("else'", 0.0): 1, ('box', 0.0): 8, ('ap', 0.0): 3, ('stori', 0.0): 13, ('london', 0.0): 12, ('imagin', 0.0): 2, ('elsewher', 0.0): 1, ('someday', 0.0): 1, ('ben', 0.0): 3, ('provid', 0.0): 3, ('name', 0.0): 15, ('branch', 0.0): 1, ('visit', 0.0): 12, ('address', 0.0): 3, ('concern', 0.0): 3, ('welsh', 0.0): 1, ('pod', 0.0): 1, ('juli', 0.0): 12, ('laura', 0.0): 4, ('insid', 0.0): 10, ('train', 0.0): 12, ('D;', 0.0): 1, ('talk-kama', 0.0): 1, ('hawako', 0.0): 1, ('waa', 0.0): 1, ('kimaaani', 0.0): 1, ('prisss', 0.0): 1, ('baggag', 0.0): 2, ('claim', 0.0): 3, ('plane', 0.0): 2, ('niamh', 0.0): 1, ('forev', 0.0): 10, ('hmmm', 0.0): 2, ('sugar', 0.0): 3, ('rare', 0.0): 1, ('paper', 0.0): 16, ('town', 0.0): 14, ('score', 0.0): 3, ('stuck', 0.0): 8, ('agh', 0.0): 2, ('middl', 0.0): 7, ('undercoverboss', 0.0): 1, ('تكفى', 0.0): 1, ('10', 0.0): 8, ('job', 0.0): 13, ('cat', 0.0): 17, ('forgotten', 0.0): 3, ('yep', 0.0): 5, ('stop', 0.0): 43, ('ach', 0.0): 2, ('wrist', 0.0): 1, ('nake', 0.0): 3, ('forgot', 0.0): 14, ('bracelet', 0.0): 3, ('ligo', 0.0): 1, ('dozen', 0.0): 1, ('parent', 0.0): 8, ('children', 0.0): 2, ('shark', 0.0): 2, ('selfi', 0.0): 6, ('heartach', 0.0): 1, ('zayniscomingback', 0.0): 3, ('mix', 0.0): 2, ('sweden', 0.0): 1, ('breath', 0.0): 4, ('moment', 0.0): 14, ('word', 0.0): 16, ('elmhurst', 0.0): 1, ('fc', 0.0): 1, ('etid', 0.0): 1, ("chillin'with", 0.0): 1, ('father', 0.0): 2, ('istanya', 0.0): 1, ('2suppli', 0.0): 1, ('extra', 0.0): 3, ('infrastructur', 0.0): 2, ('teacher', 0.0): 2, ('doctor', 0.0): 4, ('nurs', 0.0): 2, ('paramed', 0.0): 1, ('countless', 0.0): 1, ('2cope', 0.0): 1, ('bore', 0.0): 23, ('plea', 0.0): 2, ('arian', 0.0): 1, ('hahahaha', 0.0): 6, ('slr', 0.0): 1, ('kendal', 0.0): 1, ('kyli', 0.0): 3, ("kylie'", 0.0): 1, ('manila', 0.0): 3, ('jeebu', 0.0): 1, ('reabsorbt', 0.0): 1, ('tooth', 0.0): 2, ('abscess', 0.0): 1, ('threaten', 0.0): 2, ('affect', 0.0): 1, ('front', 0.0): 6, ('crown', 0.0): 1, ('ooouch', 0.0): 1, ('barney', 0.0): 1, ("be'", 0.0): 1, ('yo', 0.0): 4, ('later', 0.0): 14, ('realis', 0.0): 6, ('problemat', 0.0): 1, ('expect', 0.0): 5, ('proud', 0.0): 8, ('mess', 0.0): 7, ('maa', 0.0): 2, ('without', 0.0): 25, ('bangalor', 0.0): 1, ('awww', 0.0): 23, ('lui', 0.0): 1, ('manzano', 0.0): 1, ('shaaa', 0.0): 1, ('super', 0.0): 11, ('7th', 0.0): 1, ('conven', 0.0): 1, ('2:30', 0.0): 2, ('pm', 0.0): 8, ('forward', 0.0): 6, ('delet', 0.0): 5, ('turkey', 0.0): 1, ('bomb', 0.0): 3, ('isi', 0.0): 1, ('allow', 0.0): 9, ('usa', 0.0): 2, ('use', 0.0): 43, ('airfield', 0.0): 1, ('jet', 0.0): 1, ("jack'", 0.0): 1, ('spam', 0.0): 6, ('sooo', 0.0): 16, ('☺', 0.0): 3, ("mommy'", 0.0): 1, ('reason', 0.0): 8, ('overweight', 0.0): 1, ('sigeg', 0.0): 1, ('habhab', 0.0): 1, ('masud', 0.0): 1, ('kaha', 0.0): 1, ('ko', 0.0): 10, ('akong', 0.0): 1, ('un', 0.0): 1, ('hella', 0.0): 4, ('matter', 0.0): 4, ('pala', 0.0): 1, ('hahaha', 0.0): 11, ('lesson', 0.0): 1, ('dolphin', 0.0): 1, ('xxx', 0.0): 12, ('holi', 0.0): 2, ('anythin', 0.0): 1, ('trend', 0.0): 6, ('radio', 0.0): 4, ('sing', 0.0): 5, ('bewar', 0.0): 1, ('agonis', 0.0): 1, ('experi', 0.0): 2, ('ahead', 0.0): 3, ('modimo', 0.0): 1, ('ho', 0.0): 3, ('tseba', 0.0): 1, ('wena', 0.0): 1, ('fela', 0.0): 1, ('emot', 0.0): 8, ('hubbi', 0.0): 1, ('delight', 0.0): 1, ('return', 0.0): 6, ('bill', 0.0): 6, ('nowt', 0.0): 1, ('wors', 0.0): 8, ('willi', 0.0): 1, ('gon', 0.0): 1, ('vomit', 0.0): 1, ('famou', 0.0): 5, ('bowl', 0.0): 1, ('devast', 0.0): 1, ('titan', 0.0): 1, ('ae', 0.0): 1, ('mark', 0.0): 2, ('hair', 0.0): 21, ('shini', 0.0): 1, ('wavi', 0.0): 1, ('emo', 0.0): 2, ('germani', 0.0): 4, ('load', 0.0): 9, ('shed', 0.0): 2, ('ha', 0.0): 7, ('bheyp', 0.0): 1, ('ayemso', 0.0): 1, ('ear', 0.0): 5, ('swell', 0.0): 2, ('sm', 0.0): 7, ('fb', 0.0): 7, ('remind', 0.0): 3, ('abt', 0.0): 3, ('womad', 0.0): 1, ('wut', 0.0): 1, ('hell', 0.0): 11, ('viciou', 0.0): 1, ('circl', 0.0): 1, ('surpris', 0.0): 5, ('ticket', 0.0): 12, ('codi', 0.0): 1, ('simpson', 0.0): 1, ('concert', 0.0): 11, ('singapor', 0.0): 4, ('august', 0.0): 5, ('pooo', 0.0): 2, ('bh3', 0.0): 1, ('enter', 0.0): 1, ('pitchwar', 0.0): 1, ('chap', 0.0): 1, ("mine'", 0.0): 1, ('transcript', 0.0): 1, ("apma'", 0.0): 1, ('shoulder', 0.0): 2, ('bitch', 0.0): 11, ('competit', 0.0): 1, ("it'll", 0.0): 3, ('fine', 0.0): 6, ('timw', 0.0): 1, ('acc', 0.0): 8, ('rude', 0.0): 11, ('vitamin', 0.0): 1, ('e', 0.0): 9, ('oil', 0.0): 1, ('massag', 0.0): 5, ('everyday', 0.0): 7, ('healthier', 0.0): 1, ('easier', 0.0): 3, ('stretch', 0.0): 1, ('choos', 0.0): 7, ('blockjam', 0.0): 1, ("schedule'", 0.0): 1, ('whack', 0.0): 1, ('kik', 0.0): 69, ('thelock', 0.0): 1, ('76', 0.0): 1, ('sex', 0.0): 6, ('omegl', 0.0): 4, ('coupl', 0.0): 2, ('travel', 0.0): 11, ('hotgirl', 0.0): 2, ('2009', 0.0): 1, ('3', 0.0): 37, ('ghantay', 0.0): 1, ('light', 0.0): 8, ('nai', 0.0): 1, ('hay', 0.0): 8, ('deni', 0.0): 1, ('ruin', 0.0): 11, ('laguna', 0.0): 1, ('exit', 0.0): 2, ('gomen', 0.0): 1, ('heck', 0.0): 5, ('fair', 0.0): 12, ('grew', 0.0): 2, ('half', 0.0): 10, ('inch', 0.0): 2, ('two', 0.0): 19, ('problem', 0.0): 7, ('suuuper', 0.0): 1, ('65', 0.0): 1, ('sale', 0.0): 8, ('inact', 0.0): 8, ('orphan', 0.0): 1, ('black', 0.0): 12, ('earlier', 0.0): 9, ('whaaat', 0.0): 5, ('kaya', 0.0): 2, ('naaan', 0.0): 1, ('paus', 0.0): 1, ('randomli', 0.0): 1, ('app', 0.0): 13, ('3:30', 0.0): 1, ('walk', 0.0): 7, ('inglewood', 0.0): 1, ('ummm', 0.0): 4, ('anxieti', 0.0): 3, ('readi', 0.0): 12, ('also', 0.0): 19, ('charcoal', 0.0): 1, ('til', 0.0): 5, ('mid-end', 0.0): 1, ('aug', 0.0): 1, ('noooo', 0.0): 1, ('heard', 0.0): 6, ('rip', 0.0): 12, ('rodfanta', 0.0): 1, ('wasp', 0.0): 2, ('sting', 0.0): 1, ('avert', 0.0): 1, ('bug', 0.0): 3, ('(:', 0.0): 7, ('exo', 0.0): 2, ('seekli', 0.0): 1, ('riptito', 0.0): 1, ('manbearpig', 0.0): 1, ('cannot', 0.0): 7, ('grow', 0.0): 3, ('shorter', 0.0): 1, ('academ', 0.0): 1, ('free', 0.0): 19, ('exclus', 0.0): 2, ('unfair', 0.0): 7, ('esp', 0.0): 4, ('regard', 0.0): 1, ('current', 0.0): 7, ('bleak', 0.0): 1, ('german', 0.0): 1, ('chart', 0.0): 2, ('situat', 0.0): 2, ('entri', 0.0): 4, ('even', 0.0): 70, ('top', 0.0): 6, ('100', 0.0): 8, ('pfft', 0.0): 1, ('place', 0.0): 18, ('white', 0.0): 7, ('wash', 0.0): 1, ('polaroid', 0.0): 1, ('newbethvideo', 0.0): 1, ('greec', 0.0): 2, ('xur', 0.0): 2, ('imi', 0.0): 3, ('fill', 0.0): 1, ('♡', 0.0): 11, ('♥', 0.0): 22, ('xoxoxo', 0.0): 1, ('pictur', 0.0): 17, ('stud', 0.0): 1, ('hund', 0.0): 1, ('6', 0.0): 14, ('kikchat', 0.0): 9, ('amazon', 0.0): 5, ('3.4', 0.0): 1, ('yach', 0.0): 1, ('telat', 0.0): 1, ('huvvft', 0.0): 1, ('zoo', 0.0): 2, ('fieldtrip', 0.0): 1, ('touch', 0.0): 5, ('yan', 0.0): 1, ('posit', 0.0): 2, ('king', 0.0): 1, ('futur', 0.0): 4, ('sizw', 0.0): 1, ('write', 0.0): 13, ('20', 0.0): 9, ('result', 0.0): 3, ('km', 0.0): 2, ('four', 0.0): 4, ('shift', 0.0): 5, ('aaahhh', 0.0): 2, ('boredom', 0.0): 1, ('en', 0.0): 1, ('aint', 0.0): 7, ('who', 0.0): 1, ('sins', 0.0): 1, ('that', 0.0): 13, ('somehow', 0.0): 2, ('tini', 0.0): 4, ('ball', 0.0): 2, ('barbel', 0.0): 1, ('owww', 0.0): 2, ('amsterdam', 0.0): 1, ('luv', 0.0): 2, ('💖', 0.0): 4, ('ps', 0.0): 3, ('looong', 0.0): 1, ('especi', 0.0): 4, (':/', 0.0): 11, ('lap', 0.0): 1, ('litro', 0.0): 1, ('shepherd', 0.0): 2, ('lami', 0.0): 1, ('mayb', 0.0): 27, ('relax', 0.0): 3, ('lungomar', 0.0): 1, ('pesaro', 0.0): 1, ('giachietittiwed', 0.0): 1, ('igersoftheday', 0.0): 1, ('summertim', 0.0): 1, ('nose', 0.0): 7, ('bruis', 0.0): 1, ('lil', 0.0): 8, ('snake', 0.0): 3, ('journey', 0.0): 2, ('scarf', 0.0): 1, ('au', 0.0): 3, ('afford', 0.0): 7, ('fridayfeel', 0.0): 1, ('earli', 0.0): 12, ('money', 0.0): 24, ('chicken', 0.0): 5, ('woe', 0.0): 4, ('nigga', 0.0): 3, ('motn', 0.0): 1, ('make-up', 0.0): 1, ('justic', 0.0): 1, ('import', 0.0): 4, ('sit', 0.0): 5, ('mind', 0.0): 7, ('buy', 0.0): 17, ('limit', 0.0): 4, ('ver', 0.0): 1, ('normal', 0.0): 5, ('edit', 0.0): 7, ('huhuhu', 0.0): 3, ('stack', 0.0): 1, ("m'ladi", 0.0): 1, ('j8', 0.0): 1, ('j11', 0.0): 1, ('m20', 0.0): 1, ('jk', 0.0): 5, ('acad', 0.0): 1, ('schedul', 0.0): 9, ('nowww', 0.0): 1, ('cop', 0.0): 1, ('jame', 0.0): 4, ('window', 0.0): 6, ('hugh', 0.0): 2, ('paw', 0.0): 1, ('muddi', 0.0): 1, ('distract', 0.0): 1, ('heyi', 0.0): 1, ('otherwis', 0.0): 3, ('picnic', 0.0): 1, ('24', 0.0): 11, ('cupcak', 0.0): 2, ('talaga', 0.0): 1, ('best', 0.0): 22, ('femal', 0.0): 3, ('poppin', 0.0): 1, ('joc', 0.0): 1, ('playin', 0.0): 1, ('saw', 0.0): 19, ('fix', 0.0): 10, ('coldplay', 0.0): 1, ('media', 0.0): 1, ('player', 0.0): 3, ('fail', 0.0): 10, ('subj', 0.0): 1, ('sobrang', 0.0): 1, ('bv', 0.0): 1, ('zamn', 0.0): 1, ('line', 0.0): 8, ('afropunk', 0.0): 1, ('fest', 0.0): 1, ('brooklyn', 0.0): 2, ('id', 0.0): 5, ('put', 0.0): 14, ('50', 0.0): 5, ('madrid', 0.0): 7, ('shithous', 0.0): 1, ('cutest', 0.0): 2, ('danc', 0.0): 6, ('ur', 0.0): 26, ('arm', 0.0): 3, ('rais', 0.0): 1, ('hand', 0.0): 12, ('ladder', 0.0): 2, ('told', 0.0): 11, ('climb', 0.0): 3, ('success', 0.0): 4, ('nerv', 0.0): 1, ('wrack', 0.0): 1, ('test', 0.0): 8, ('booset', 0.0): 1, ('restart', 0.0): 1, ('assassin', 0.0): 1, ('creed', 0.0): 1, ('ii', 0.0): 1, ('heap', 0.0): 1, ('fell', 0.0): 10, ('daughter', 0.0): 1, ('begin', 0.0): 4, ('ps3', 0.0): 1, ('ankl', 0.0): 4, ('step', 0.0): 5, ('puddl', 0.0): 2, ('wear', 0.0): 5, ('slipper', 0.0): 1, ('eve', 0.0): 1, ('bbi', 0.0): 6, ('sararoc', 0.0): 1, ('angri', 0.0): 5, ('pretti', 0.0): 15, ('fnaf', 0.0): 1, ('holiday', 0.0): 20, ('cheer', 0.0): 6, ('😘', 0.0): 11, ('anywayhedidanicejob', 0.0): 1, ('😞', 0.0): 3, ('3am', 0.0): 2, ('other', 0.0): 7, ('local', 0.0): 3, ('cruis', 0.0): 1, ('done', 0.0): 24, ('doubl', 0.0): 4, ('wail', 0.0): 1, ('manual', 0.0): 2, ('wheelchair', 0.0): 1, ('check', 0.0): 19, ('fit', 0.0): 3, ('nh', 0.0): 3, ('26week', 0.0): 1, ('sbenu', 0.0): 1, ('sasin', 0.0): 1, ('team', 0.0): 14, ('anarchi', 0.0): 1, ('af', 0.0): 14, ('candl', 0.0): 1, ('forehead', 0.0): 4, ('medicin', 0.0): 3, ('welcom', 0.0): 5, ('oop', 0.0): 4, ('hoya', 0.0): 3, ('mah', 0.0): 2, ('a', 0.0): 1, ('nobodi', 0.0): 10, ('awhil', 0.0): 2, ('ago', 0.0): 20, ('b', 0.0): 10, ('hush', 0.0): 2, ('gurli', 0.0): 1, ('bring', 0.0): 9, ('purti', 0.0): 1, ('mouth', 0.0): 5, ('closer', 0.0): 2, ('shiver', 0.0): 1, ('solut', 0.0): 1, ('paid', 0.0): 8, ('properli', 0.0): 2, ('gol', 0.0): 1, ('pea', 0.0): 1, ('english', 0.0): 9, ('mental', 0.0): 4, ('tierd', 0.0): 2, ('third', 0.0): 1, ("eye'", 0.0): 1, ('thnkyouuu', 0.0): 1, ('carolin', 0.0): 1, ('neither', 0.0): 6, ('figur', 0.0): 6, ('mirror', 0.0): 1, ('highlight', 0.0): 2, ('pure', 0.0): 3, ('courag', 0.0): 1, ('bit', 0.0): 15, ('fishi', 0.0): 1, ('idek', 0.0): 1, ('apink', 0.0): 5, ('perform', 0.0): 8, ('bulet', 0.0): 1, ('gendut', 0.0): 1, ('noo', 0.0): 5, ('race', 0.0): 3, ('hotwheel', 0.0): 1, ('ms', 0.0): 1, ('patch', 0.0): 1, ('typic', 0.0): 2, ('ahaha', 0.0): 1, ('lay', 0.0): 2, ('wine', 0.0): 1, ('glass', 0.0): 3, ("where'", 0.0): 4, ('akon', 0.0): 1, ('somewher', 0.0): 5, ('nightmar', 0.0): 7, ('ya', 0.0): 15, ('mino', 0.0): 2, ('crazyyi', 0.0): 1, ('thooo', 0.0): 1, ('zz', 0.0): 1, ('airport', 0.0): 7, ('straight', 0.0): 4, ('soundcheck', 0.0): 1, ('hmm', 0.0): 4, ('antagonist', 0.0): 1, ('ob', 0.0): 1, ('phantasi', 0.0): 1, ('star', 0.0): 4, ('ip', 0.0): 1, ('issu', 0.0): 11, ('bruce', 0.0): 1, ('sleepdepriv', 0.0): 1, ('tiredashel', 0.0): 1, ('4aspot', 0.0): 1, ("kinara'", 0.0): 1, ('awami', 0.0): 1, ('question', 0.0): 9, ('niqqa', 0.0): 1, ('answer', 0.0): 14, ('mockingjay', 0.0): 1, ('slow', 0.0): 9, ('pb.contest', 0.0): 1, ('cycl', 0.0): 2, ('aarww', 0.0): 1, ('lmbo', 0.0): 1, ('dangit', 0.0): 1, ('ohmygod', 0.0): 1, ('scenario', 0.0): 1, ('tooo', 0.0): 2, ('duck', 0.0): 1, ('baechyyi', 0.0): 1, ('okayyy', 0.0): 1, ('noon', 0.0): 3, ('drag', 0.0): 5, ('serious', 0.0): 11, ('misundersrand', 0.0): 1, ('chal', 0.0): 1, ('raha', 0.0): 1, ('hai', 0.0): 11, ('yhm', 0.0): 1, ('edsa', 0.0): 2, ('jasmingarrick', 0.0): 2, ('kikmeguy', 0.0): 5, ('webcam', 0.0): 2, ('milf', 0.0): 1, ('nakamaforev', 0.0): 3, ('kiksex', 0.0): 7, ("unicef'", 0.0): 1, ('fu', 0.0): 1, ('alon', 0.0): 16, ('manag', 0.0): 13, ('stephen', 0.0): 1, ('street', 0.0): 2, ('35', 0.0): 1, ('min', 0.0): 7, ('appear', 0.0): 2, ('record', 0.0): 6, ('coz', 0.0): 4, ('frustrat', 0.0): 6, ('sent', 0.0): 9, ('interest', 0.0): 9, ('woza', 0.0): 1, ('promis', 0.0): 4, ('senight', 0.0): 1, ('468', 0.0): 1, ('kikmeboy', 0.0): 9, ('gay', 0.0): 6, ('teen', 0.0): 7, ('amateur', 0.0): 5, ('hotscratch', 0.0): 1, ('sell', 0.0): 8, ('sock', 0.0): 6, ('150-160', 0.0): 1, ('peso', 0.0): 1, ('gotta', 0.0): 8, ('pay', 0.0): 8, ('degrassi', 0.0): 1, ('4-6', 0.0): 1, ('bcz', 0.0): 1, ('kat', 0.0): 3, ('chem', 0.0): 2, ('onscreen', 0.0): 1, ('ofscreen', 0.0): 1, ('kinda', 0.0): 10, ('pak', 0.0): 4, ('class', 0.0): 10, ('monthli', 0.0): 1, ('roll', 0.0): 4, ('band', 0.0): 2, ('throw', 0.0): 2, ('ironi', 0.0): 2, ('rhisfor', 0.0): 1, ('500', 0.0): 2, ('bestoftheday', 0.0): 3, ('chat', 0.0): 9, ('camsex', 0.0): 5, ('unfollow', 0.0): 11, ('particular', 0.0): 1, ('support', 0.0): 26, ('bae', 0.0): 11, ('poopi', 0.0): 1, ('pip', 0.0): 1, ('post', 0.0): 12, ('felt', 0.0): 6, ('uff', 0.0): 1, ('1.300', 0.0): 1, ('credit', 0.0): 3, ('glue', 0.0): 1, ('factori', 0.0): 1, ('kuchar', 0.0): 1, ('fast', 0.0): 7, ('graduat', 0.0): 3, ('up', 0.0): 2, ('definit', 0.0): 3, ('uni', 0.0): 2, ('ee', 0.0): 1, ('tommi', 0.0): 1, ('georgia', 0.0): 2, ('bout', 0.0): 2, ('instant', 0.0): 1, ('transmiss', 0.0): 1, ('malik', 0.0): 1, ('orang', 0.0): 2, ('suma', 0.0): 1, ('shouldeeerr', 0.0): 1, ('outfit', 0.0): 5, ('age', 0.0): 8, ('repack', 0.0): 3, ('group', 0.0): 4, ('charl', 0.0): 1, ('grown', 0.0): 2, ('rememb', 0.0): 17, ('dy', 0.0): 1, ('rihanna', 0.0): 1, ('red', 0.0): 4, ('ging', 0.0): 2, ('boot', 0.0): 4, ('closest', 0.0): 3, ('nike', 0.0): 1, ('adida', 0.0): 1, ('inform', 0.0): 4, ('[email protected]', 0.0): 1, ('set', 0.0): 13, ('ifeely', 0.0): 1, ('harder', 0.0): 2, ('usual', 0.0): 7, ('ratbaglat', 0.0): 1, ('second', 0.0): 5, ('semest', 0.0): 2, ('gin', 0.0): 1, ('gut', 0.0): 12, ('reynold', 0.0): 1, ('dessert', 0.0): 2, ('season', 0.0): 9, ('villag', 0.0): 1, ('differ', 0.0): 10, ('citi', 0.0): 11, ('unit', 0.0): 3, ('oppress', 0.0): 1, ('mass', 0.0): 2, ('wat', 0.0): 5, ('afghanistn', 0.0): 1, ('war', 0.0): 2, ('tore', 0.0): 1, ('sunggyu', 0.0): 5, ('injur', 0.0): 7, ('plaster', 0.0): 2, ('rtd', 0.0): 1, ('loui', 0.0): 4, ('harri', 0.0): 10, ('5so', 0.0): 7, ('crowd', 0.0): 1, ('stadium', 0.0): 4, ('welder', 0.0): 1, ('ghost', 0.0): 1, ('hogo', 0.0): 1, ('vishaya', 0.0): 1, ('adu', 0.0): 1, ('bjp', 0.0): 1, ('madatt', 0.0): 1, ('anta', 0.0): 1, ('vishwa', 0.0): 1, ('ne', 0.0): 3, ('illa', 0.0): 1, ('wua', 0.0): 1, ('picki', 0.0): 1, ('finger', 0.0): 8, ('favourit', 0.0): 9, ('mutual', 0.0): 2, ('gn', 0.0): 1, ('along', 0.0): 3, ('ass', 0.0): 9, ('thent', 0.0): 1, ('423', 0.0): 1, ('sabadodeganarseguidor', 0.0): 2, ('sexual', 0.0): 4, ('sync', 0.0): 2, ('plug.dj', 0.0): 1, ('peel', 0.0): 1, ('suspems', 0.0): 1, ('cope', 0.0): 3, ('offroad', 0.0): 1, ('adventur', 0.0): 1, ('there', 0.0): 5, ('harvest', 0.0): 1, ('machineri', 0.0): 1, ('inapropri', 0.0): 1, ('weav', 0.0): 2, ('nowher', 0.0): 3, ('decent', 0.0): 2, ('invest', 0.0): 2, ('scottish', 0.0): 1, ('footbal', 0.0): 3, ('dire', 0.0): 2, ('nomoney', 0.0): 1, ('nawf', 0.0): 1, ('sum', 0.0): 2, ('becho', 0.0): 1, ('danni', 0.0): 3, ('eng', 0.0): 2, ("let'", 0.0): 5, ('overli', 0.0): 2, ('lab', 0.0): 1, ('ty', 0.0): 3, ('zap', 0.0): 1, ('distress', 0.0): 1, ('shot', 0.0): 6, ('cinema', 0.0): 4, ('louisianashoot', 0.0): 1, ('laugh', 0.0): 7, ('har', 0.0): 3, ("how'", 0.0): 5, ('chum', 0.0): 1, ('ncc', 0.0): 1, ('ph', 0.0): 2, ('balik', 0.0): 1, ('naman', 0.0): 1, ('kayo', 0.0): 1, ('itong', 0.0): 1, ('shirt', 0.0): 3, ('thaaat', 0.0): 1, ('ctto', 0.0): 1, ('expir', 0.0): 3, ('bi', 0.0): 2, ('tough', 0.0): 2, ('11', 0.0): 4, ('3:33', 0.0): 2, ('jfc', 0.0): 1, ('bio', 0.0): 3, ('bodo', 0.0): 1, ('amat', 0.0): 1, ('quick', 0.0): 5, ('yelaaa', 0.0): 1, ('dublin', 0.0): 2, ('potter', 0.0): 1, ('marathon', 0.0): 3, ('balanc', 0.0): 2, ('warm', 0.0): 5, ('comic', 0.0): 5, ('pine', 0.0): 1, ('keybind', 0.0): 1, ('featur', 0.0): 4, ('wild', 0.0): 2, ('warfar', 0.0): 1, ('control', 0.0): 2, ('diagnos', 0.0): 1, ('wiv', 0.0): 1, ("scheuermann'", 0.0): 1, ('diseas', 0.0): 3, ('bone', 0.0): 1, ('rlyhurt', 0.0): 1, ('howdo', 0.0): 1, ('georgesampson', 0.0): 1, ('stand', 0.0): 6, ('signal', 0.0): 3, ('reckon', 0.0): 1, ('t20', 0.0): 1, ('action', 0.0): 2, ('taunton', 0.0): 1, ('vacat', 0.0): 3, ('excit', 0.0): 6, ('justiceforsandrabland', 0.0): 2, ('sandrabland', 0.0): 6, ('disturb', 0.0): 1, ('women', 0.0): 5, ('happpi', 0.0): 1, ('justinbieb', 0.0): 4, ('daianerufato', 0.0): 3, ('ilysm', 0.0): 3, ('2015', 0.0): 12, ('07:34', 0.0): 1, ('delphi', 0.0): 2, ('weak', 0.0): 2, ('dom', 0.0): 2, ('techniqu', 0.0): 1, ('minc', 0.0): 2, ('complet', 0.0): 9, ('symphoni', 0.0): 1, ('joe', 0.0): 3, ('co', 0.0): 6, ('wth', 0.0): 2, ('aisyhhh', 0.0): 1, ('bald', 0.0): 1, ('14', 0.0): 3, ('seungchan', 0.0): 1, ('aigooo', 0.0): 1, ('riri', 0.0): 1, ('origin', 0.0): 6, ('depend', 0.0): 2, ('vet', 0.0): 1, ('major', 0.0): 2, ('va', 0.0): 1, ('kept', 0.0): 2, ('lumin', 0.0): 1, ('follback', 0.0): 2, ('treat', 0.0): 5, ('v', 0.0): 6, ('product', 0.0): 4, ('letter', 0.0): 1, ('z', 0.0): 5, ('uniqu', 0.0): 2, ('refresh', 0.0): 1, ('popular', 0.0): 1, ('bebee', 0.0): 2, ('lt', 0.0): 1, ('inaccuraci', 0.0): 1, ('inaccur', 0.0): 1, ('worri', 0.0): 8, ('burn', 0.0): 4, ('rn', 0.0): 17, ('tragic', 0.0): 1, ('joy', 0.0): 2, ('sam', 0.0): 4, ('rush', 0.0): 2, ('toronto', 0.0): 1, ('stuart', 0.0): 1, ("party'", 0.0): 2, ('iyalaya', 0.0): 1, ('shade', 0.0): 3, ('round', 0.0): 3, ('clock', 0.0): 2, (';(', 0.0): 6, ('happier', 0.0): 1, ('h', 0.0): 8, ('ubusi', 0.0): 1, ('le', 0.0): 3, ('fifa', 0.0): 1, ('gymnast', 0.0): 1, ('aahhh', 0.0): 1, ('noggin', 0.0): 1, ('bump', 0.0): 1, ('feelslikeanidiot', 0.0): 1, ('pregnant', 0.0): 2, ('woman', 0.0): 5, ('dearli', 0.0): 1, ('sunshin', 0.0): 4, ('suk', 0.0): 2, ('pumpkin', 0.0): 1, ('scone', 0.0): 1, ('outnumb', 0.0): 1, ('vidcon', 0.0): 10, ('eri', 0.0): 1, ('geez', 0.0): 1, ('preciou', 0.0): 4, ('hive', 0.0): 1, ('vote', 0.0): 7, ('vietnam', 0.0): 1, ('decemb', 0.0): 2, ('dunt', 0.0): 1, ('ikr', 0.0): 3, ('sob', 0.0): 3, ('buff', 0.0): 1, ('leg', 0.0): 4, ('toni', 0.0): 1, ('deactiv', 0.0): 6, ('bra', 0.0): 2, ("shady'", 0.0): 1, ('isibaya', 0.0): 1, ('special', 0.0): 3, ('❤', 0.0): 21, ('️', 0.0): 19, ('😓', 0.0): 2, ('slept', 0.0): 5, ('colder', 0.0): 1, ('took', 0.0): 9, ('med', 0.0): 1, ('sausag', 0.0): 1, ('adio', 0.0): 1, ('cold', 0.0): 15, ('sore', 0.0): 9, ('ew', 0.0): 3, ('h8', 0.0): 1, ('messeng', 0.0): 2, ('shittier', 0.0): 1, ('leno', 0.0): 1, ('ident', 0.0): 1, ('crisi', 0.0): 2, ('roommat', 0.0): 1, ('knock', 0.0): 3, ('nighter', 0.0): 3, ('bird', 0.0): 2, ('flew', 0.0): 2, ('thru', 0.0): 2, ('derek', 0.0): 3, ('tour', 0.0): 7, ('wetherspoon', 0.0): 1, ('pub', 0.0): 1, ('polic', 0.0): 4, ('frank', 0.0): 2, ('ocean', 0.0): 4, ('releas', 0.0): 8, ('ff', 0.0): 4, ('lisah', 0.0): 2, ('kikm', 0.0): 8, ('eboni', 0.0): 2, ('weloveyounamjoon', 0.0): 1, ('gave', 0.0): 8, ('dress', 0.0): 6, ('polka', 0.0): 1, ('dot', 0.0): 2, ('ndi', 0.0): 1, ('yum', 0.0): 1, ('feed', 0.0): 3, ('leftov', 0.0): 2, ('side', 0.0): 6, ('cs', 0.0): 2, ('own', 0.0): 1, ('walnut', 0.0): 1, ('whip', 0.0): 1, ('wife', 0.0): 6, ('boah', 0.0): 1, ('madi', 0.0): 2, ('def', 0.0): 3, ('manga', 0.0): 1, ('giant', 0.0): 3, ('aminormalyet', 0.0): 1, ('cooki', 0.0): 2, ('breakfast', 0.0): 5, ('clutch', 0.0): 1, ('poorli', 0.0): 6, ('tummi', 0.0): 6, ('pj', 0.0): 1, ('groan', 0.0): 1, ('nou', 0.0): 1, ('adam', 0.0): 2, ('ken', 0.0): 1, ('sara', 0.0): 2, ('sister', 0.0): 4, ('accid', 0.0): 2, ('sort', 0.0): 7, ('mate', 0.0): 2, ('pick', 0.0): 12, ('rang', 0.0): 4, ('fk', 0.0): 2, ('freak', 0.0): 5, ('describ', 0.0): 1, ('eric', 0.0): 2, ('prydz', 0.0): 1, ('sister-in-law', 0.0): 1, ('instal', 0.0): 2, ('seat', 0.0): 4, ('bought', 0.0): 6, ('rear-end', 0.0): 1, ("everyone'", 0.0): 4, ('trash', 0.0): 2, ('boob', 0.0): 3, ('whilst', 0.0): 3, ('stair', 0.0): 1, ('childhood', 0.0): 1, ('toothsensit', 0.0): 4, ('size', 0.0): 9, ('ke', 0.0): 3, ('shem', 0.0): 2, ('trust', 0.0): 2, ('awel', 0.0): 1, ('drunk', 0.0): 2, ('weekendofmad', 0.0): 1, ('🍹', 0.0): 3, ('🍸', 0.0): 1, ('cb', 0.0): 1, ('dancer', 0.0): 1, ('choregraph', 0.0): 1, ('626-430-8715', 0.0): 1, ('messag', 0.0): 8, ('repli', 0.0): 14, ('hoe', 0.0): 1, ('xd', 0.0): 7, ('xiu', 0.0): 1, ('nk', 0.0): 1, ('gi', 0.0): 2, ('uss', 0.0): 1, ('eliss', 0.0): 1, ('ksoo', 0.0): 2, ('session', 0.0): 5, ('tat', 0.0): 1, ('bcoz', 0.0): 1, ('bet', 0.0): 10, ('rancho', 0.0): 1, ('imperi', 0.0): 1, ('de', 0.0): 1, ('silang', 0.0): 1, ('subdivis', 0.0): 1, ('center', 0.0): 1, ('39', 0.0): 1, ('cornwal', 0.0): 1, ('verit', 0.0): 1, ('prize', 0.0): 2, ('regular', 0.0): 3, ('workout', 0.0): 1, ('spin', 0.0): 1, ('base', 0.0): 1, ('upon', 0.0): 1, ('penni', 0.0): 1, ('ebook', 0.0): 1, ('фотосет', 0.0): 1, ('addicted-to-analsex', 0.0): 1, ('sweetbj', 0.0): 2, ('blowjob', 0.0): 1, ('mhhh', 0.0): 1, ('sed', 0.0): 1, ('sg', 0.0): 1, ('dinner', 0.0): 4, ('bless', 0.0): 2, ('mee', 0.0): 2, ('enviou', 0.0): 1, ('eonni', 0.0): 1, ('lovey', 0.0): 1, ('dovey', 0.0): 1, ('dongsaeng', 0.0): 1, ('workin', 0.0): 1, ('tuesday', 0.0): 4, ('schade', 0.0): 3, ('belfast', 0.0): 1, ('jealou', 0.0): 9, ('jacob', 0.0): 5, ('isco', 0.0): 4, ('peni', 0.0): 1, ('everi', 0.0): 16, ('convers', 0.0): 6, ('wonder', 0.0): 11, ('soul', 0.0): 5, ('nation', 0.0): 2, ('louisiana', 0.0): 4, ('lafayett', 0.0): 2, ('matteroftheheart', 0.0): 1, ('waduh', 0.0): 1, ('pant', 0.0): 3, ('suspend', 0.0): 2, ('believ', 0.0): 14, ('teenag', 0.0): 2, ('clich', 0.0): 1, ('youuu', 0.0): 5, ('rma', 0.0): 1, ('jersey', 0.0): 2, ('fake', 0.0): 4, ('jaclintil', 0.0): 1, ('model', 0.0): 9, ('likeforlik', 0.0): 7, ('mpoint', 0.0): 4, ('hotfmnoaidilforariana', 0.0): 2, ('ran', 0.0): 5, ('fuckkk', 0.0): 1, ('jump', 0.0): 3, ('justin', 0.0): 3, ('finish', 0.0): 14, ('sanum', 0.0): 1, ('llaollao', 0.0): 1, ('foood', 0.0): 1, ('ubericecream', 0.0): 14, ('glare', 0.0): 1, ('vine', 0.0): 3, ('tweetin', 0.0): 1, ('mood', 0.0): 3, ('elbow', 0.0): 1, ('choreo', 0.0): 1, ('offens', 0.0): 2, ('yeyi', 0.0): 1, ('hd', 0.0): 2, ('brow', 0.0): 1, ('kit', 0.0): 6, ('slightli', 0.0): 2, ('monday', 0.0): 10, ('sux', 0.0): 1, ('enjoy', 0.0): 9, ('nothaveld', 0.0): 1, ('765', 0.0): 1, ('edm', 0.0): 1, ('likeforfollow', 0.0): 3, ('hannib', 0.0): 3, ('mosquito', 0.0): 2, ('bite', 0.0): 5, ('kinki', 0.0): 1, ('hsould', 0.0): 1, ('justget', 0.0): 1, ('marri', 0.0): 2, ('la', 0.0): 11, ('shuffl', 0.0): 4, ('int', 0.0): 1, ('buckl', 0.0): 1, ('spring', 0.0): 1, ('millz', 0.0): 1, ('aski', 0.0): 2, ('awusasho', 0.0): 1, ('unlucki', 0.0): 2, ('driver', 0.0): 7, ('briefli', 0.0): 1, ('spot', 0.0): 4, ('144p', 0.0): 1, ('brook', 0.0): 1, ('crack', 0.0): 2, ('@', 0.0): 5, ('maverickgam', 0.0): 4, ('07:32', 0.0): 1, ('07:25', 0.0): 1, ('max', 0.0): 3, ('file', 0.0): 2, ('extern', 0.0): 2, ('sd', 0.0): 1, ('via', 0.0): 1, ('airdroid', 0.0): 1, ('android', 0.0): 2, ('4.4+', 0.0): 1, ('googl', 0.0): 5, ('alright', 0.0): 3, ('cramp', 0.0): 2, ('unstan', 0.0): 1, ('tay', 0.0): 2, ('ngeze', 0.0): 1, ('cocktaili', 0.0): 1, ('classi', 0.0): 1, ('07:24', 0.0): 1, ('✈', 0.0): 2, ('️2', 0.0): 1, ('raini', 0.0): 2, ('☔', 0.0): 2, ('peter', 0.0): 1, ('pen', 0.0): 1, ('spare', 0.0): 1, ('guest', 0.0): 2, ('barcelona', 0.0): 2, ('bilbao', 0.0): 1, ('booti', 0.0): 2, ('sharyl', 0.0): 1, ('shane', 0.0): 2, ('ta', 0.0): 1, ('giddi', 0.0): 1, ('d1', 0.0): 1, ('zipper', 0.0): 1, ('beyond', 0.0): 1, ('repair', 0.0): 4, ('iphon', 0.0): 5, ('upgrad', 0.0): 1, ('april', 0.0): 1, ('2016', 0.0): 1, ('cont', 0.0): 2, ('england', 0.0): 4, ('wore', 0.0): 2, ('greet', 0.0): 5, ('tempt', 0.0): 2, ('whole', 0.0): 16, ('pack', 0.0): 6, ('oreo', 0.0): 2, ('strength', 0.0): 1, ('wifi', 0.0): 5, ('network', 0.0): 4, ('within', 0.0): 3, ('lolipop', 0.0): 1, ('kebab', 0.0): 1, ('klappertart', 0.0): 1, ('cake', 0.0): 10, ('moodbost', 0.0): 2, ('shoot', 0.0): 6, ('unprepar', 0.0): 1, ('sri', 0.0): 1, ('dresscod', 0.0): 1, ('door', 0.0): 6, ('iam', 0.0): 2, ('dnt', 0.0): 1, ('stab', 0.0): 3, ('meh', 0.0): 3, ('wrocilam', 0.0): 1, ('otp', 0.0): 3, ('5', 0.0): 14, ('looww', 0.0): 1, ('recov', 0.0): 2, ('wayn', 0.0): 2, ('insur', 0.0): 3, ('loss', 0.0): 3, ('stolen', 0.0): 2, ('accident', 0.0): 1, ('damag', 0.0): 5, ('devic', 0.0): 3, ('warranti', 0.0): 1, ('centr', 0.0): 2, ('👌', 0.0): 1, ('lmfaoo', 0.0): 1, ('accur', 0.0): 2, ('fra', 0.0): 4, ('aliv', 0.0): 2, ('steel', 0.0): 2, ('otamendi', 0.0): 1, ('ny', 0.0): 2, ('🚖', 0.0): 1, ('🗽', 0.0): 1, ('🌃', 0.0): 1, ('stealth', 0.0): 2, ('bastard', 0.0): 2, ('inc', 0.0): 3, ('steam', 0.0): 2, ('therapi', 0.0): 1, ('exhaust', 0.0): 3, ('lie', 0.0): 7, ('total', 0.0): 11, ('block', 0.0): 11, ('choic', 0.0): 5, ('switzerland', 0.0): 1, ('kfc', 0.0): 1, ('common', 0.0): 4, ('th', 0.0): 5, ('wolrd', 0.0): 1, ('fyn', 0.0): 1, ('drop', 0.0): 10, ('state', 0.0): 4, ('3g', 0.0): 2, ('christ', 0.0): 1, ('scale', 0.0): 1, ('deck', 0.0): 1, ('chair', 0.0): 4, ('yk', 0.0): 1, ('resi', 0.0): 1, ('memori', 0.0): 5, ('nude', 0.0): 4, ('bruh', 0.0): 3, ('prepar', 0.0): 3, ('lock', 0.0): 2, ('view', 0.0): 7, ('fbc', 0.0): 3, ('mork', 0.0): 1, ('873', 0.0): 1, ('kikgirl', 0.0): 13, ('premiostumundo', 0.0): 2, ('hotspotwithdanri', 0.0): 1, ('hospit', 0.0): 3, ('food', 0.0): 18, ('sone', 0.0): 1, ('produc', 0.0): 1, ('potag', 0.0): 1, ('tomato', 0.0): 1, ('blight', 0.0): 1, ('sheffield', 0.0): 1, ('mych', 0.0): 1, ('shiiit', 0.0): 2, ('screenshot', 0.0): 4, ('prompt', 0.0): 1, ('areadi', 0.0): 1, ('similar', 0.0): 4, ('soulmat', 0.0): 1, ('canon', 0.0): 1, ('zzz', 0.0): 2, ('britain', 0.0): 1, ('😁', 0.0): 3, ('mana', 0.0): 2, ('hw', 0.0): 1, ('jouch', 0.0): 1, ('por', 0.0): 1, ('que', 0.0): 1, ('liceooo', 0.0): 1, ('30', 0.0): 3, ('minut', 0.0): 6, ('pass', 0.0): 13, ('ayala', 0.0): 1, ('tunnel', 0.0): 2, ('thatscold', 0.0): 1, ('80', 0.0): 1, ('snap', 0.0): 3, ('lourd', 0.0): 1, ('bang', 0.0): 3, ('anywher', 0.0): 4, ('water', 0.0): 8, ('road', 0.0): 1, ('showbox', 0.0): 1, ('naruto', 0.0): 1, ('cartoon', 0.0): 1, ('companion', 0.0): 2, ('skinni', 0.0): 3, ('fat', 0.0): 4, ('bare', 0.0): 6, ('dubai', 0.0): 3, ('calum', 0.0): 1, ('ashton', 0.0): 1, ('✧', 0.0): 8, ('。', 0.0): 8, ('chelni', 0.0): 4, ('disappoint', 0.0): 13, ('everybodi', 0.0): 5, ('due', 0.0): 14, ('laribuggi', 0.0): 1, ('medic', 0.0): 1, ('nutella', 0.0): 1, ("could'v", 0.0): 3, ('siriu', 0.0): 1, ('goat', 0.0): 4, ('frudg', 0.0): 1, ('mike', 0.0): 1, ('cloth', 0.0): 6, ('stuff', 0.0): 11, ('sat', 0.0): 3, ('number', 0.0): 6, ('ring', 0.0): 1, ('bbz', 0.0): 1, ('angek', 0.0): 1, ('sbali', 0.0): 1, ('euuuwww', 0.0): 2, ('lunch', 0.0): 10, ('construct', 0.0): 3, ('worker', 0.0): 3, ('1k', 0.0): 3, ('style', 0.0): 4, ('nell', 0.0): 1, ('ik', 0.0): 2, ('death', 0.0): 3, ('jaysu', 0.0): 1, ('toast', 0.0): 1, ('insecur', 0.0): 2, ('buti', 0.0): 1, ('ure', 0.0): 2, ('poop', 0.0): 1, ('gorgeou', 0.0): 2, ('angel', 0.0): 2, ('rome', 0.0): 1, ('throat', 0.0): 10, ('llama', 0.0): 1, ('urself', 0.0): 2, ('getwellsoonamb', 0.0): 1, ('heath', 0.0): 2, ('ledger', 0.0): 1, ('appl', 0.0): 3, ('permiss', 0.0): 2, ('2-0', 0.0): 1, ('lead', 0.0): 3, ('supersport', 0.0): 1, ('milkshak', 0.0): 1, ('witcher', 0.0): 1, ('papertown', 0.0): 1, ('bale', 0.0): 1, ('9', 0.0): 5, ('méxico', 0.0): 1, ('bahay', 0.0): 1, ('bahayan', 0.0): 1, ('magisa', 0.0): 1, ('sadlyf', 0.0): 1, ('bunso', 0.0): 1, ('sleeep', 0.0): 4, ('astonvilla', 0.0): 1, ('berigaud', 0.0): 1, ('bakar', 0.0): 1, ('club', 0.0): 4, ('dear', 0.0): 11, ('allerg', 0.0): 4, ('depress', 0.0): 5, ("blaine'", 0.0): 1, ('acoust', 0.0): 2, ('version', 0.0): 5, ('excus', 0.0): 3, ('hernia', 0.0): 3, ('toxin', 0.0): 1, ('freedom', 0.0): 1, ('organ', 0.0): 2, ('ariel', 0.0): 1, ('slap', 0.0): 1, ('slam', 0.0): 1, ('bee', 0.0): 1, ('unknown', 0.0): 2, ('finddjderek', 0.0): 1, ('smell', 0.0): 3, ('uuughhh', 0.0): 1, ('grabe', 0.0): 5, ('ka', 0.0): 5, ('where', 0.0): 1, ('gf', 0.0): 3, ('james_yammouni', 0.0): 1, ('smi', 0.0): 1, ('nemesi', 0.0): 1, ('rule', 0.0): 1, ('doesnt', 0.0): 2, ('appeal', 0.0): 1, ('neeein', 0.0): 1, ('saaad', 0.0): 3, ('less', 0.0): 3, ('hang', 0.0): 7, ('creas', 0.0): 1, ('tan', 0.0): 3, ('dalla', 0.0): 4, ('suppos', 0.0): 7, ('infront', 0.0): 2, ('beato', 0.0): 1, ('tim', 0.0): 2, ('prob', 0.0): 5, ('minha', 0.0): 1, ('deleici', 0.0): 1, ('hr', 0.0): 2, ('pcb', 0.0): 1, ('ep', 0.0): 5, ('peregrin', 0.0): 1, ('8.40', 0.0): 1, ('pigeon', 0.0): 1, ('feet', 0.0): 3, ('tram', 0.0): 1, ('hav', 0.0): 2, ('spent', 0.0): 5, ('outsid', 0.0): 9, ('apt', 0.0): 1, ('build', 0.0): 3, ('key', 0.0): 3, ('bldg', 0.0): 1, ('wrote', 0.0): 3, ('dark', 0.0): 5, ('swan', 0.0): 1, ('fifth', 0.0): 2, ('mmmm', 0.0): 1, ('avi', 0.0): 4, ('nicki', 0.0): 1, ('fucjikg', 0.0): 1, ('disgust', 0.0): 6, ('buynotanapologyonitun', 0.0): 1, ('aval', 0.0): 1, ('denmark', 0.0): 1, ('nw', 0.0): 2, ('sch', 0.0): 2, ('share', 0.0): 11, ('jeslyn', 0.0): 1, ('72', 0.0): 4, ('root', 0.0): 2, ('kuch', 0.0): 1, ('nahi', 0.0): 1, ('hua', 0.0): 2, ('newbi', 0.0): 1, ('crap', 0.0): 3, ('miracl', 0.0): 1, ('4th', 0.0): 1, ('linda', 0.0): 1, ('click', 0.0): 1, ('pin', 0.0): 2, ('wing', 0.0): 3, ('epic', 0.0): 2, ('page', 0.0): 6, ('ang', 0.0): 8, ('ganda', 0.0): 1, ('💗', 0.0): 4, ('nux', 0.0): 1, ('hinanap', 0.0): 1, ('ako', 0.0): 1, ('uy', 0.0): 1, ('sched', 0.0): 1, ('anyar', 0.0): 1, ('entertain', 0.0): 2, ('typa', 0.0): 3, ('buddi', 0.0): 2, ('transpar', 0.0): 1, ('photoshop', 0.0): 2, ('planner', 0.0): 1, ('helppp', 0.0): 2, ('wearig', 0.0): 1, ('dri', 0.0): 2, ('alot', 0.0): 3, ('bu', 0.0): 5, ('prey', 0.0): 1, ('gross', 0.0): 5, ('drain', 0.0): 3, ('ausfailia', 0.0): 1, ('snow', 0.0): 3, ('footi', 0.0): 3, ('2nd', 0.0): 5, ('row', 0.0): 3, ("m'", 0.0): 2, ('kitkat', 0.0): 2, ('bday', 0.0): 7, ('😢', 0.0): 8, ('suger', 0.0): 1, ('olivia', 0.0): 2, ('audit', 0.0): 1, ('american', 0.0): 1, ('idol', 0.0): 2, ('injuri', 0.0): 2, ('appendix', 0.0): 1, ('burst', 0.0): 2, ('append', 0.0): 1, ('yeahh', 0.0): 2, ('fack', 0.0): 2, ('nhl', 0.0): 1, ('khami', 0.0): 2, ('favorit', 0.0): 4, ('rise', 0.0): 3, ('reaali', 0.0): 1, ('ja', 0.0): 2, ('naomi', 0.0): 1, ('modern', 0.0): 1, ('contemporari', 0.0): 1, ('slack', 0.0): 1, ('565', 0.0): 1, ('blond', 0.0): 2, ('jahat', 0.0): 3, ('discount', 0.0): 1, ('thorp', 0.0): 2, ('park', 0.0): 7, ('esnho', 0.0): 1, ('node', 0.0): 1, ('advanc', 0.0): 4, ('directx', 0.0): 1, ('workshop', 0.0): 1, ('p2', 0.0): 1, ('upload', 0.0): 2, ('remov', 0.0): 5, ('blackberri', 0.0): 1, ('shitti', 0.0): 1, ('mobil', 0.0): 2, ('povertyyouareevil', 0.0): 1, ('struggl', 0.0): 4, ('math', 0.0): 1, ('emm', 0.0): 1, ('data', 0.0): 6, ('elgin', 0.0): 1, ('vava', 0.0): 1, ('makati', 0.0): 1, ('💛', 0.0): 4, ('baon', 0.0): 1, ('soup', 0.0): 3, ('soak', 0.0): 1, ('bread', 0.0): 2, ('mush', 0.0): 1, ("they'd", 0.0): 2, ('matt', 0.0): 2, ('ouat', 0.0): 1, ('beach', 0.0): 5, ('blinkin', 0.0): 1, ('unblock', 0.0): 1, ('headack', 0.0): 1, ('tension', 0.0): 1, ('erit', 0.0): 1, ('perspect', 0.0): 1, ('wed', 0.0): 4, ('playlist', 0.0): 2, ('endlessli', 0.0): 1, ('blush', 0.0): 1, ('bat', 0.0): 1, ('kiddo', 0.0): 1, ('rumbel', 0.0): 1, ('overwhelm', 0.0): 1, ('thrown', 0.0): 2, ('irrespons', 0.0): 1, ('pakighinabi', 0.0): 1, ('pinkfinit', 0.0): 1, ('beb', 0.0): 2, ('migrain', 0.0): 2, ('almost', 0.0): 11, ('coyot', 0.0): 1, ('outta', 0.0): 1, ('mad', 0.0): 11, ('😒', 0.0): 3, ('headach', 0.0): 9, ('인피니트', 0.0): 2, ('save', 0.0): 6, ('baechu', 0.0): 1, ('calibraskaep', 0.0): 3, ('r', 0.0): 19, ('fanci', 0.0): 2, ('yt', 0.0): 3, ('purchas', 0.0): 2, ('elgato', 0.0): 1, ('ant', 0.0): 2, ('unexpect', 0.0): 2, ('bestfriend', 0.0): 9, ('faint', 0.0): 1, ('bp', 0.0): 1, ('appar', 0.0): 5, ('shower', 0.0): 3, ('subway', 0.0): 1, ('cool', 0.0): 5, ('prayer', 0.0): 2, ('fragil', 0.0): 1, ('huge', 0.0): 3, ('gap', 0.0): 1, ('plot', 0.0): 2, ('bungi', 0.0): 1, ('folk', 0.0): 1, ('raspberri', 0.0): 1, ('pi', 0.0): 1, ('shoe', 0.0): 2, ('woohyun', 0.0): 2, ('guilti', 0.0): 1, ('monica', 0.0): 2, ('davao', 0.0): 1, ('luckyyi', 0.0): 1, ('confid', 0.0): 1, ('eunha', 0.0): 1, ('misplac', 0.0): 1, ('den', 0.0): 1, ('dae', 0.0): 1, ('bap', 0.0): 1, ('likewis', 0.0): 1, ('liam', 0.0): 1, ('dylan', 0.0): 3, ('huehu', 0.0): 1, ('rice', 0.0): 1, ('krispi', 0.0): 1, ('marshmallow', 0.0): 2, ('srsli', 0.0): 7, ('birmingham', 0.0): 1, ('m5m6junction', 0.0): 1, ('soulsurvivor', 0.0): 1, ('stafford', 0.0): 1, ('progress', 0.0): 1, ('mixtur', 0.0): 1, ("they'v", 0.0): 4, ('practic', 0.0): 1, ('lage', 0.0): 1, ('ramd', 0.0): 1, ('lesbian', 0.0): 3, ('oralsex', 0.0): 4, ('munchkin', 0.0): 1, ('juja', 0.0): 1, ('murugan', 0.0): 1, ('handl', 0.0): 3, ('dia', 0.0): 2, ('bgtau', 0.0): 1, ('harap', 0.0): 1, ('bagi', 0.0): 1, ('aminn', 0.0): 1, ('fraand', 0.0): 1, ('😬', 0.0): 2, ('bigbang', 0.0): 2, ('steak', 0.0): 1, ('younger', 0.0): 2, ('sian', 0.0): 2, ('pizza', 0.0): 7, ('5am', 0.0): 5, ('nicoleapag', 0.0): 1, ('makeup', 0.0): 4, ('hellish', 0.0): 1, ('thirstyyi', 0.0): 1, ('chesti', 0.0): 1, ('dad', 0.0): 9, ("nando'", 0.0): 1, ('22', 0.0): 3, ('bow', 0.0): 2, ('queen', 0.0): 3, ('brave', 0.0): 1, ('hen', 0.0): 1, ('leed', 0.0): 9, ('rdd', 0.0): 1, ('dissip', 0.0): 1, ('. .', 0.0): 1, ('pump', 0.0): 2, ('capee', 0.0): 1, ('japan', 0.0): 2, ('random', 0.0): 1, ('young', 0.0): 5, ('outliv', 0.0): 1, ('x-ray', 0.0): 1, ('dental', 0.0): 1, ('spine', 0.0): 1, ('relief', 0.0): 1, ('popol', 0.0): 1, ('stomach', 0.0): 8, ('frog', 0.0): 2, ('brad', 0.0): 1, ('gen.ad', 0.0): 1, ('price', 0.0): 5, ('negoti', 0.0): 3, ('huhuhuhuhu', 0.0): 1, ('bbmadeinmanila', 0.0): 1, ('findavip', 0.0): 1, ('boyirl', 0.0): 1, ('yasss', 0.0): 1, ('6th', 0.0): 1, ('june', 0.0): 3, ('lain', 0.0): 1, ('diffici', 0.0): 1, ('custom', 0.0): 1, ('internet', 0.0): 9, ('near', 0.0): 9, ('speed', 0.0): 2, ('escap', 0.0): 1, ('rapist', 0.0): 1, ('commit', 0.0): 2, ('crime', 0.0): 1, ('bachpan', 0.0): 1, ('ki', 0.0): 2, ('yaadein', 0.0): 1, ('finnair', 0.0): 1, ('heathrow', 0.0): 1, ('norwegian', 0.0): 1, (':\\', 0.0): 1, ('batteri', 0.0): 3, ('upvot', 0.0): 4, ('keeno', 0.0): 1, ('whatthefuck', 0.0): 1, ('grotti', 0.0): 1, ('attent', 0.0): 1, ('seeker', 0.0): 1, ('moral', 0.0): 1, ('fern', 0.0): 1, ('mimi', 0.0): 1, ('bali', 0.0): 1, ('she', 0.0): 4, ('pleasee', 0.0): 3, ('brb', 0.0): 1, ('lowbat', 0.0): 1, ('otwolgrandtrail', 0.0): 4, ('funk', 0.0): 1, ('wewanticecream', 0.0): 1, ('sweat', 0.0): 2, ('eugh', 0.0): 1, ('speak', 0.0): 4, ('occasion', 0.0): 1, ("izzy'", 0.0): 1, ('dorm', 0.0): 1, ('choppi', 0.0): 1, ('paul', 0.0): 1, ('switch', 0.0): 4, ("infinite'", 0.0): 2, ('5:30', 0.0): 2, ('cayton', 0.0): 1, ('bay', 0.0): 2, ('emma', 0.0): 2, ('jen', 0.0): 1, ('darcey', 0.0): 1, ('connor', 0.0): 1, ('spoke', 0.0): 1, ('nail', 0.0): 2, ('biggest', 0.0): 3, ('blue', 0.0): 5, ('bottl', 0.0): 3, ('roommateexperi', 0.0): 1, ('yup', 0.0): 4, ('avoid', 0.0): 2, ('ic', 0.0): 1, ('te', 0.0): 1, ('auto-followback', 0.0): 1, ('asian', 0.0): 2, ('puppi', 0.0): 3, ('ljp', 0.0): 1, ('1/5', 0.0): 1, ('nowday', 0.0): 1, ('attach', 0.0): 2, ('beat', 0.0): 2, ('numb', 0.0): 1, ('dentist', 0.0): 3, ('misss', 0.0): 2, ('muchhh', 0.0): 1, ('youtub', 0.0): 5, ('rid', 0.0): 3, ('tab', 0.0): 2, ('uca', 0.0): 1, ('onto', 0.0): 2, ('track', 0.0): 3, ('bigtim', 0.0): 1, ('rumor', 0.0): 3, ('warmest', 0.0): 1, ('chin', 0.0): 2, ('tickl', 0.0): 1, ('♫', 0.0): 1, ('zikra', 0.0): 1, ('lusi', 0.0): 1, ('hasya', 0.0): 1, ('nugget', 0.0): 3, ('som', 0.0): 1, ('lu', 0.0): 1, ('olymp', 0.0): 1, ("millie'", 0.0): 1, ('guinea', 0.0): 1, ('lewi', 0.0): 1, ('748292', 0.0): 1, ("we'll", 0.0): 8, ('ano', 0.0): 2, ('22stan', 0.0): 1, ('24/7', 0.0): 2, ('thankyou', 0.0): 2, ('kanina', 0.0): 2, ('breakdown', 0.0): 2, ('mag', 0.0): 2, ('hatee', 0.0): 1, ('leas', 0.0): 1, ('written', 0.0): 2, ('hurri', 0.0): 4, ('attempt', 0.0): 1, ('6g', 0.0): 1, ('unsuccess', 0.0): 1, ('earlob', 0.0): 1, ('sue', 0.0): 1, ('dreari', 0.0): 1, ('denis', 0.0): 1, ('muriel', 0.0): 1, ('ahouré', 0.0): 1, ('pr', 0.0): 1, ('brand', 0.0): 1, ('imag', 0.0): 4, ('opportun', 0.0): 1, ('po', 0.0): 1, ('beg', 0.0): 2, ("kath'd", 0.0): 1, ('respond', 0.0): 2, ('chop', 0.0): 1, ('wbu', 0.0): 1, ('yess', 0.0): 2, ('kme', 0.0): 1, ('tom', 0.0): 4, ('cram', 0.0): 1, ('–', 0.0): 1, ('curiou', 0.0): 1, ('on-board', 0.0): 1, ('announc', 0.0): 3, ('trespass', 0.0): 1, ('fr', 0.0): 3, ('clandestin', 0.0): 1, ('muller', 0.0): 1, ('obviou', 0.0): 1, ('mufc', 0.0): 1, ('colour', 0.0): 4, ('stu', 0.0): 2, ('movie', 0.0): 1, ('buddyyi', 0.0): 1, ('feelgoodfriday', 0.0): 1, ('forest', 0.0): 1, ('6:30', 0.0): 1, ('babysit', 0.0): 1, ('opix', 0.0): 1, ('805', 0.0): 1, ('pilllow', 0.0): 1, ('fool', 0.0): 1, ('brag', 0.0): 1, ('skrillah', 0.0): 1, ('drown', 0.0): 2, ('gue', 0.0): 1, ('report', 0.0): 4, ('eventu', 0.0): 1, ('north', 0.0): 1, ('west', 0.0): 2, ('kitti', 0.0): 1, ('sjkao', 0.0): 1, ('mm', 0.0): 2, ('srri', 0.0): 1, ('honma', 0.0): 1, ('yeh', 0.0): 1, ('walay', 0.0): 1, ('bhi', 0.0): 2, ('bohat', 0.0): 1, ('wailay', 0.0): 1, ('hain', 0.0): 2, ('pre-season', 0.0): 1, ('friendli', 0.0): 3, ('pe', 0.0): 3, ('itna', 0.0): 2, ('shor', 0.0): 1, ('machaya', 0.0): 1, ('mein', 0.0): 1, ('samjha', 0.0): 1, ('cup', 0.0): 3, ('note', 0.0): 2, ('😄', 0.0): 1, ('👍', 0.0): 1, ('😔', 0.0): 7, ('sirkay', 0.0): 1, ('wali', 0.0): 1, ('pyaaz', 0.0): 1, ('daal', 0.0): 2, ('onion', 0.0): 1, ('vinegar', 0.0): 1, ('cook', 0.0): 3, ('tutori', 0.0): 1, ('soho', 0.0): 1, ('wobbl', 0.0): 1, ('server', 0.0): 4, ('ciao', 0.0): 1, ('masaan', 0.0): 1, ('muv', 0.0): 1, ('beast', 0.0): 2, ('hayst', 0.0): 1, ('cr', 0.0): 1, ('hnnn', 0.0): 1, ('fluffi', 0.0): 2, ('comeback', 0.0): 3, ('korea', 0.0): 1, ('wow', 0.0): 10, ('act', 0.0): 4, ('optimis', 0.0): 1, ('soniii', 0.0): 1, ('kahaaa', 0.0): 1, ('shave', 0.0): 3, ('tryna', 0.0): 3, ('healthi', 0.0): 2, ('freez', 0.0): 3, ('fml', 0.0): 4, ('jacket', 0.0): 1, ('sleepi', 0.0): 4, ('cyber', 0.0): 1, ('bulli', 0.0): 2, ('racial', 0.0): 2, ('scari', 0.0): 6, ('hall', 0.0): 1, ('stockholm', 0.0): 1, ('loool', 0.0): 3, ('bunch', 0.0): 3, ('among', 0.0): 1, ('__', 0.0): 2, ('busier', 0.0): 1, ('onward', 0.0): 1, ('ol', 0.0): 2, ('coincid', 0.0): 1, ('imac', 0.0): 1, ('launch', 0.0): 2, ('gram', 0.0): 1, ('nearer', 0.0): 1, ('blain', 0.0): 2, ('darren', 0.0): 2, ('layout', 0.0): 3, ('fuuuck', 0.0): 2, ('jesu', 0.0): 1, ('gishwh', 0.0): 1, ('exclud', 0.0): 1, ('unless', 0.0): 4, ('c', 0.0): 7, ('angelica', 0.0): 1, ('pull', 0.0): 5, ('colleg', 0.0): 5, ('movement', 0.0): 1, ('frou', 0.0): 1, ('vaccin', 0.0): 1, ('armor', 0.0): 2, ('legendari', 0.0): 1, ('cash', 0.0): 2, ('effort', 0.0): 2, ('nat', 0.0): 2, ('brake', 0.0): 1, ('grumpi', 0.0): 4, ('wreck', 0.0): 1, ('decis', 0.0): 2, ('gahhh', 0.0): 1, ('teribl', 0.0): 1, ('kilig', 0.0): 1, ('togeth', 0.0): 7, ('weaker', 0.0): 1, ('shravan', 0.0): 1, ('tv', 0.0): 4, ('stooop', 0.0): 1, ('gi-guilti', 0.0): 1, ('akooo', 0.0): 1, ('imveryverysorri', 0.0): 1, ('cd', 0.0): 1, ('grey', 0.0): 3, ('basenam', 0.0): 1, ('path', 0.0): 1, ('theme', 0.0): 2, ('cigar', 0.0): 1, ('speaker', 0.0): 1, ('volum', 0.0): 1, ('promethazin', 0.0): 1, ('zopiclon', 0.0): 1, ('addit', 0.0): 1, ('quetiapin', 0.0): 1, ('modifi', 0.0): 1, ('prescript', 0.0): 1, ('greska', 0.0): 1, ('macedonian', 0.0): 1, ('slovak', 0.0): 1, ('hike', 0.0): 1, ('certainli', 0.0): 2, ('browser', 0.0): 2, ('os', 0.0): 1, ('zokay', 0.0): 1, ('accent', 0.0): 1, ('b-but', 0.0): 1, ('gintama', 0.0): 1, ('shinsengumi', 0.0): 1, ('chapter', 0.0): 1, ('andi', 0.0): 1, ('crappl', 0.0): 1, ('agre', 0.0): 5, ('ftw', 0.0): 2, ('phandroid', 0.0): 1, ('tline', 0.0): 1, ('orchestra', 0.0): 1, ('ppl', 0.0): 5, ('rehears', 0.0): 1, ('bittersweet', 0.0): 1, ('eunji', 0.0): 1, ('bakit', 0.0): 4, ('121st', 0.0): 1, ("yesterday'", 0.0): 1, ('rt', 0.0): 8, ('ehdar', 0.0): 1, ('pegea', 0.0): 1, ('panga', 0.0): 1, ('dosto', 0.0): 1, ('nd', 0.0): 1, ('real_liam_payn', 0.0): 1, ('retweet', 0.0): 5, ('3/10', 0.0): 1, ('dmed', 0.0): 1, ('ad', 0.0): 1, ('yay', 0.0): 3, ('23', 0.0): 2, ('alreaddyyi', 0.0): 1, ('luceleva', 0.0): 1, ('21', 0.0): 1, ('porno', 0.0): 3, ('countrymus', 0.0): 4, ('sexysasunday', 0.0): 2, ('naeun', 0.0): 1, ('goal', 0.0): 5, ("son'", 0.0): 1, ('kidney', 0.0): 2, ('printer', 0.0): 1, ('ink', 0.0): 2, ('asham', 0.0): 3, ('ihatesomepeopl', 0.0): 1, ('tabl', 0.0): 2, ('0-2', 0.0): 1, ('brain', 0.0): 2, ('hard-wir', 0.0): 1, ('canadian', 0.0): 1, ('acn', 0.0): 2, ('gulo', 0.0): 1, ('kandekj', 0.0): 1, ('rize', 0.0): 1, ('meydan', 0.0): 1, ('experienc', 0.0): 2, ('fcking', 0.0): 1, ('crei', 0.0): 1, ('stabl', 0.0): 1, ('dormmat', 0.0): 1, ('pre', 0.0): 3, ('bo3', 0.0): 1, ('cod', 0.0): 2, ('redeem', 0.0): 1, ('invalid', 0.0): 1, ('wag', 0.0): 1, ('hopia', 0.0): 1, ('campaign', 0.0): 2, ('editor', 0.0): 1, ('reveal', 0.0): 2, ('booo', 0.0): 2, ('extens', 0.0): 1, ('rightnow', 0.0): 1, ('btu', 0.0): 1, ('karaok', 0.0): 1, ('licenc', 0.0): 1, ('apb', 0.0): 2, ('mbf', 0.0): 1, ('kpop', 0.0): 2, ('hahahaokay', 0.0): 1, ('basara', 0.0): 1, ('capcom', 0.0): 3, ('pc', 0.0): 2, ('url', 0.0): 2, ('web', 0.0): 2, ('site', 0.0): 6, ('design', 0.0): 3, ('grumbl', 0.0): 2, ('migrant', 0.0): 1, ('daddi', 0.0): 4, ('legit', 0.0): 1, ('australia', 0.0): 3, ('awsm', 0.0): 1, ('entir', 0.0): 5, ('tmw', 0.0): 1, ('uwu', 0.0): 1, ('jinki', 0.0): 1, ('taem', 0.0): 1, ('gif', 0.0): 2, ('cambridg', 0.0): 1, ('viath', 0.0): 1, ('brilliant', 0.0): 1, ('cypru', 0.0): 1, ('wet', 0.0): 10, ('30th', 0.0): 1, ('zayncomebackto', 0.0): 2, ('1d', 0.0): 6, ('senior', 0.0): 2, ('spazz', 0.0): 1, ('soobin', 0.0): 1, ('27', 0.0): 1, ('unmarri', 0.0): 1, ('float', 0.0): 3, ('pressur', 0.0): 3, ('winter', 0.0): 4, ('lifetim', 0.0): 2, ('hiondsh', 0.0): 1, ('58543', 0.0): 1, ('kikmenow', 0.0): 9, ('sexdat', 0.0): 2, ("demi'", 0.0): 1, ('junjou', 0.0): 2, ('romantica', 0.0): 1, ('cruel', 0.0): 1, ('privileg', 0.0): 2, ('mixtap', 0.0): 2, ('convinc', 0.0): 3, ('friex', 0.0): 1, ('taco', 0.0): 2, ('europ', 0.0): 2, ('shaylan', 0.0): 1, ('4:20', 0.0): 1, ('ylona', 0.0): 1, ('nah', 0.0): 4, ('notanapolog', 0.0): 3, ('ouh', 0.0): 1, ('tax', 0.0): 4, ('ohhh', 0.0): 2, ('nm', 0.0): 1, ('term', 0.0): 1, ('apolog', 0.0): 3, ('encanta', 0.0): 1, ('vale', 0.0): 1, ('osea', 0.0): 1, ('bea', 0.0): 1, ('♛', 0.0): 210, ('》', 0.0): 210, ('beli̇ev', 0.0): 35, ('wi̇ll', 0.0): 35, ('justi̇n', 0.0): 35, ('x15', 0.0): 35, ('350', 0.0): 4, ('see', 0.0): 35, ('me', 0.0): 35, ('40', 0.0): 3, ('dj', 0.0): 2, ('net', 0.0): 2, ('349', 0.0): 1, ('baek', 0.0): 1, ('tight', 0.0): 1, ('dunwan', 0.0): 1, ('suan', 0.0): 1, ('ba', 0.0): 3, ('haiz', 0.0): 1, ('otw', 0.0): 1, ('trade', 0.0): 3, ('venic', 0.0): 1, ('348', 0.0): 1, ('strong', 0.0): 6, ('adult', 0.0): 3, ('347', 0.0): 1, ('tree', 0.0): 3, ('hill', 0.0): 1, ('😕', 0.0): 1, ('com', 0.0): 1, ('insonia', 0.0): 1, ('346', 0.0): 1, ('rick', 0.0): 1, ('ross', 0.0): 1, ('wallet', 0.0): 4, ('empti', 0.0): 3, ('heartbreak', 0.0): 2, ('episod', 0.0): 11, ('345', 0.0): 1, ('milli', 0.0): 1, (':)', 0.0): 2, ('diff', 0.0): 1, ('persona', 0.0): 1, ('golden', 0.0): 1, ('scene', 0.0): 1, ('advert', 0.0): 1, ('determin', 0.0): 2, ('roseburi', 0.0): 1, ('familyhom', 0.0): 1, ('daw', 0.0): 2, ('344', 0.0): 1, ('monkey', 0.0): 1, ('yea', 0.0): 2, ('343', 0.0): 1, ('sweeti', 0.0): 2, ('erica', 0.0): 1, ('istg', 0.0): 1, ('lick', 0.0): 1, ('jackson', 0.0): 4, ('nsbzhdnxndamal', 0.0): 1, ('342', 0.0): 1, ('11:15', 0.0): 1, ('2hour', 0.0): 1, ('11:25', 0.0): 1, ('341', 0.0): 1, ('fandom', 0.0): 2, ('mahilig', 0.0): 1, ('mam-bulli', 0.0): 1, ('mtaani', 0.0): 1, ('tunaita', 0.0): 1, ('viazi', 0.0): 1, ('choma', 0.0): 1, ('laid', 0.0): 1, ('celebr', 0.0): 3, ('7am', 0.0): 1, ('jerk', 0.0): 1, ('lah', 0.0): 2, ('magic', 0.0): 1, ('menil', 0.0): 1, ('340', 0.0): 1, ("kam'", 0.0): 1, ('meee', 0.0): 1, ('diz', 0.0): 1, ('biooo', 0.0): 1, ('ay', 0.0): 1, ('taray', 0.0): 1, ('yumu-youtub', 0.0): 1, ('339', 0.0): 1, ('parijat', 0.0): 1, ('willmissyouparijat', 0.0): 1, ('abroad', 0.0): 2, ('jolli', 0.0): 1, ('scotland', 0.0): 2, ('338', 0.0): 1, ('mcnugget', 0.0): 1, ('sophi', 0.0): 5, ('feedback', 0.0): 4, ('met', 0.0): 7, ('caramello', 0.0): 2, ('koala', 0.0): 1, ('bar', 0.0): 1, ('suckmejimin', 0.0): 1, ('337', 0.0): 1, ('sucki', 0.0): 2, ('laughter', 0.0): 1, ('pou', 0.0): 1, ('goddamn', 0.0): 1, ('bark', 0.0): 1, ('nje', 0.0): 1, ('blast', 0.0): 1, ('hun', 0.0): 4, ('dbn', 0.0): 2, ('🎀', 0.0): 1, ('336', 0.0): 1, ('hardest', 0.0): 1, ('335', 0.0): 1, ('pledg', 0.0): 1, ('realiz', 0.0): 7, ('viber', 0.0): 1, ('mwah', 0.0): 1, ('estat', 0.0): 1, ('crush', 0.0): 1, ('lansi', 0.0): 1, ('334', 0.0): 1, ('hp', 0.0): 4, ('waah', 0.0): 1, ('miami', 0.0): 1, ('vandag', 0.0): 1, ('kgola', 0.0): 1, ('neng', 0.0): 1, ('eintlik', 0.0): 1, ('porn', 0.0): 2, ('4like', 0.0): 5, ('repost', 0.0): 2, ('333', 0.0): 4, ('magpi', 0.0): 1, ('22.05', 0.0): 1, ('15-24', 0.0): 1, ('05.15', 0.0): 1, ('coach', 0.0): 2, ('ador', 0.0): 1, ('chswiyfxcskcalum', 0.0): 1, ('nvm', 0.0): 2, ('lemm', 0.0): 1, ('quiet', 0.0): 3, ('foof', 0.0): 1, ('332', 0.0): 1, ('casilla', 0.0): 1, ('manchest', 0.0): 3, ('xi', 0.0): 1, ('rmtour', 0.0): 1, ('heavi', 0.0): 3, ('irl', 0.0): 2, ('blooper', 0.0): 2, ('huhuhuhu', 0.0): 1, ('na-tak', 0.0): 1, ('sorta', 0.0): 1, ('unfriend', 0.0): 1, ('greysonch', 0.0): 1, ('sandwich', 0.0): 4, ('bell', 0.0): 1, ('sebastian', 0.0): 1, ('rewatch', 0.0): 1, ('s4', 0.0): 1, ('ser', 0.0): 1, ('past', 0.0): 5, ('heart-break', 0.0): 1, ('outdat', 0.0): 1, ('m4', 0.0): 1, ('abandon', 0.0): 1, ('theater', 0.0): 1, ('smh', 0.0): 6, ('7-3', 0.0): 1, ('7.30-', 0.0): 1, ('ekk', 0.0): 1, ('giriboy', 0.0): 1, ('harriet', 0.0): 1, ('gegu', 0.0): 1, ('gray', 0.0): 1, ('truth', 0.0): 4, ('tbt', 0.0): 1, ('331', 0.0): 1, ('roof', 0.0): 2, ('indian', 0.0): 2, ('polit', 0.0): 3, ('blame', 0.0): 3, ('68', 0.0): 1, ('repres', 0.0): 1, ('corbyn', 0.0): 1, ("labour'", 0.0): 1, ('fortun', 0.0): 1, ('icecream', 0.0): 3, ('cuti', 0.0): 2, ('ry', 0.0): 1, ('lfccw', 0.0): 1, ('5ever', 0.0): 1, ('america', 0.0): 3, ('ontheroadagain', 0.0): 1, ('halaaang', 0.0): 1, ('reciev', 0.0): 1, ('flip', 0.0): 4, ('flop', 0.0): 1, ('caesarspalac', 0.0): 1, ('socialreward', 0.0): 1, ('requir', 0.0): 2, ('cali', 0.0): 1, ('fuckboy', 0.0): 1, ('330', 0.0): 1, ('deliveri', 0.0): 3, ('chrompet', 0.0): 1, ('easili', 0.0): 2, ('immun', 0.0): 1, ('system', 0.0): 3, ('lush', 0.0): 1, ('bathtub', 0.0): 1, ('php', 0.0): 1, ('mysql', 0.0): 1, ('libmysqlclient-dev', 0.0): 1, ('dev', 0.0): 2, ('pleasanton', 0.0): 1, ('wala', 0.0): 1, ('329', 0.0): 1, ('quickli', 0.0): 2, ('megan', 0.0): 1, ('heed', 0.0): 2, ('328', 0.0): 1, ('gwss', 0.0): 1, ('thankyouu', 0.0): 1, ('charad', 0.0): 1, ('becom', 0.0): 5, ('piano', 0.0): 2, ('327', 0.0): 1, ('complaint', 0.0): 2, ('yell', 0.0): 2, ('whatsoev', 0.0): 2, ('pete', 0.0): 1, ('wentz', 0.0): 1, ('shogi', 0.0): 1, ('blameshoghicp', 0.0): 1, ('classmat', 0.0): 1, ('troubl', 0.0): 1, ('fixedgearfrenzi', 0.0): 1, ('dispatch', 0.0): 1, ('theyr', 0.0): 2, ('hat', 0.0): 2, ("shamuon'", 0.0): 1, ('tokyo', 0.0): 1, ('toe', 0.0): 2, ('horrend', 0.0): 2, ("someone'", 0.0): 2, ('326', 0.0): 1, ('hasb', 0.0): 1, ('atti', 0.0): 1, ('muji', 0.0): 1, ('sirf', 0.0): 1, ('sensibl', 0.0): 1, ('etc', 0.0): 2, ('brum', 0.0): 1, ('cyclerevolut', 0.0): 1, ('caaannnttt', 0.0): 1, ('payment', 0.0): 3, ('overdrawn', 0.0): 1, ('tbf', 0.0): 1, ('complain', 0.0): 2, ('perfum', 0.0): 1, ('sampl', 0.0): 1, ('chanel', 0.0): 1, ('burberri', 0.0): 1, ('prada', 0.0): 1, ('325', 0.0): 1, ('noesss', 0.0): 1, ('topgear', 0.0): 1, ('worthi', 0.0): 1, ('bridesmaid', 0.0): 1, ("tomorrow'", 0.0): 2, ('gather', 0.0): 1, ('sudden', 0.0): 4, ('324', 0.0): 1, ('randomrestart', 0.0): 1, ('randomreboot', 0.0): 1, ('lumia', 0.0): 1, ('windowsphon', 0.0): 1, ("microsoft'", 0.0): 1, ('mañana', 0.0): 1, ('male', 0.0): 1, ('rap', 0.0): 1, ('sponsor', 0.0): 3, ('striker', 0.0): 2, ('lvg', 0.0): 1, ('behind', 0.0): 3, ('refurbish', 0.0): 1, ('cintiq', 0.0): 1, ("finnick'", 0.0): 1, ('askfinnick', 0.0): 1, ('contain', 0.0): 1, ('hairi', 0.0): 1, ('323', 0.0): 1, ('buri', 0.0): 1, ('omaygad', 0.0): 1, ('vic', 0.0): 1, ('surgeri', 0.0): 4, ('amber', 0.0): 8, ('tt.tt', 0.0): 1, ('hyper', 0.0): 2, ('vega', 0.0): 2, ('322', 0.0): 1, ('imiss', 0.0): 1, ('321', 0.0): 1, ('320', 0.0): 1, ('know.for', 0.0): 1, ('prepaid', 0.0): 1, ('none', 0.0): 4, ('319', 0.0): 1, ('grandma', 0.0): 1, ("grandpa'", 0.0): 1, ('farm', 0.0): 1, ('cow', 0.0): 1, ('sheep', 0.0): 1, ('hors', 0.0): 3, ('fruit', 0.0): 2, ('veget', 0.0): 1, ('puke', 0.0): 2, ('deliri', 0.0): 1, ('motilium', 0.0): 1, ('shite', 0.0): 1, ('318', 0.0): 1, ('schoolwork', 0.0): 1, ("phoebe'", 0.0): 1, ('317', 0.0): 1, ('pothol', 0.0): 1, ('316', 0.0): 1, ('notif', 0.0): 3, ('1,300', 0.0): 1, ('robyn', 0.0): 1, ('necklac', 0.0): 1, ('rachel', 0.0): 1, ('bhai', 0.0): 1, ('ramzan', 0.0): 1, ('crosss', 0.0): 1, ('clapham', 0.0): 1, ('investig', 0.0): 2, ('sth', 0.0): 1, ('essenti', 0.0): 1, ('photoshooot', 0.0): 1, ('austin', 0.0): 1, ('mahon', 0.0): 1, ('shut', 0.0): 3, ('andam', 0.0): 1, ('memor', 0.0): 1, ('cotton', 0.0): 1, ('candi', 0.0): 3, ('stock', 0.0): 3, ('swallow', 0.0): 1, ('snot', 0.0): 1, ('choke', 0.0): 1, ('taknottem', 0.0): 1, ('477', 0.0): 1, ('btob', 0.0): 2, ('percentag', 0.0): 1, ('shoshannavassil', 0.0): 1, ('swift', 0.0): 1, ('flat', 0.0): 3, ('a9', 0.0): 2, ('wsalelov', 0.0): 5, ('sexyjan', 0.0): 1, ('horni', 0.0): 2, ('goodmus', 0.0): 4, ('debut', 0.0): 3, ('lart', 0.0): 1, ('sew', 0.0): 1, ('skyfal', 0.0): 1, ('premier', 0.0): 1, ('yummi', 0.0): 2, ('manteca', 0.0): 1, ("she'd", 0.0): 2, ('probabl', 0.0): 8, ('shiatsu', 0.0): 1, ('heat', 0.0): 1, ('risk', 0.0): 3, ('edward', 0.0): 1, ('hopper', 0.0): 1, ('eyyah', 0.0): 1, ('utd', 0.0): 2, ('born', 0.0): 1, ('1-0', 0.0): 1, ('cart', 0.0): 1, ('shop', 0.0): 10, ('log', 0.0): 2, ('aaa', 0.0): 2, ('waifu', 0.0): 1, ('break', 0.0): 8, ('breakup', 0.0): 3, ('bother', 0.0): 3, ('bia', 0.0): 1, ('syndrom', 0.0): 1, ('shi', 0.0): 1, ('bias', 0.0): 1, ('pixel', 0.0): 2, ('weh', 0.0): 2, ('area', 0.0): 4, ('maymay', 0.0): 1, ('magpaalam', 0.0): 1, ('tf', 0.0): 3, ('subtitl', 0.0): 1, ('oitnb', 0.0): 1, ('backstori', 0.0): 1, ('jeremi', 0.0): 1, ('kyle', 0.0): 1, ('gimm', 0.0): 2, ('meal', 0.0): 3, ('neat-o', 0.0): 1, ('wru', 0.0): 1, ('scissor', 0.0): 1, ('creation', 0.0): 1, ('public', 0.0): 1, ('amtir', 0.0): 1, ('imysm', 0.0): 2, ('tut', 0.0): 1, ('trop', 0.0): 2, ('tard', 0.0): 1, ('deadlin', 0.0): 1, ('31', 0.0): 2, ('st', 0.0): 3, ('child', 0.0): 4, ('oct', 0.0): 2, ('bush', 0.0): 2, ('premiun', 0.0): 1, ('notcool', 0.0): 1, ('2/3', 0.0): 2, ('lahat', 0.0): 2, ('ng', 0.0): 4, ('araw', 0.0): 1, ('nage', 0.0): 1, ('gyu', 0.0): 4, ('lmfaooo', 0.0): 2, ('download', 0.0): 3, ('leagu', 0.0): 1, ('mashup', 0.0): 1, ('eu', 0.0): 1, ('lc', 0.0): 1, ('typo', 0.0): 2, ('itali', 0.0): 1, ('yass', 0.0): 1, ('christma', 0.0): 2, ('rel', 0.0): 1, ('yr', 0.0): 3, ('sydney', 0.0): 1, ('mb', 0.0): 1, ('perf', 0.0): 2, ('programm', 0.0): 1, ('bff', 0.0): 2, ('hashtag', 0.0): 1, ('omfg', 0.0): 4, ('exercis', 0.0): 2, ('combat', 0.0): 1, ('dosent', 0.0): 1, ("sod'", 0.0): 1, ('20min', 0.0): 1, ('request', 0.0): 2, ('yahoo', 0.0): 2, ('yodel', 0.0): 2, ('jokingli', 0.0): 1, ('regret', 0.0): 5, ('starbuck', 0.0): 3, ('lynettelow', 0.0): 1, ('interraci', 0.0): 3, ("today'", 0.0): 3, ('tgif', 0.0): 1, ('gahd', 0.0): 1, ('26th', 0.0): 1, ('discov', 0.0): 1, ('12.00', 0.0): 1, ('obyun', 0.0): 1, ('unni', 0.0): 4, ('wayhh', 0.0): 1, ('preval', 0.0): 1, ('controversi', 0.0): 1, ('🍵', 0.0): 2, ('☕', 0.0): 1, ('tube', 0.0): 1, ('strike', 0.0): 3, ('meck', 0.0): 1, ('mcfc', 0.0): 1, ('fresh', 0.0): 1, ('ucan', 0.0): 1, ('anxiou', 0.0): 1, ('poc', 0.0): 1, ('specif', 0.0): 2, ('sinhala', 0.0): 1, ('billionair', 0.0): 1, ('1645', 0.0): 1, ('island', 0.0): 3, ('1190', 0.0): 1, ('maldiv', 0.0): 1, ('dheena', 0.0): 1, ('fasgadah', 0.0): 1, ('alvadhaau', 0.0): 1, ('countdown', 0.0): 1, ('function', 0.0): 3, ('desktop', 0.0): 1, ('evelineconrad', 0.0): 1, ('facetim', 0.0): 4, ('kikmsn', 0.0): 2, ('selfshot', 0.0): 2, ('panda', 0.0): 1, ('backkk', 0.0): 1, ('transfer', 0.0): 3, ('dan', 0.0): 2, ('dull', 0.0): 1, ('overcast', 0.0): 1, ('folder', 0.0): 1, ('truck', 0.0): 2, ('missin', 0.0): 2, ('hangin', 0.0): 1, ('wiff', 0.0): 1, ('dept', 0.0): 1, ('cherri', 0.0): 1, ('bakewel', 0.0): 1, ('collect', 0.0): 3, ('teal', 0.0): 1, ('sect', 0.0): 1, ('tennunb', 0.0): 1, ('rather', 0.0): 4, ('skip', 0.0): 1, ('doomsday', 0.0): 1, ('neglect', 0.0): 1, ('posti', 0.0): 1, ('goodnight', 0.0): 1, ('donat', 0.0): 3, ('ship', 0.0): 6, ('bellami', 0.0): 1, ('raven', 0.0): 2, ('clark', 0.0): 1, ('helmi', 0.0): 1, ('uh', 0.0): 5, ('cnt', 0.0): 1, ('whereisthesun', 0.0): 1, ('summerismiss', 0.0): 1, ('longgg', 0.0): 1, ('ridicul', 0.0): 4, ('stocko', 0.0): 1, ('lucozad', 0.0): 1, ('explos', 0.0): 1, ('beh', 0.0): 2, ('half-rememb', 0.0): 1, ("melody'", 0.0): 1, ('recal', 0.0): 2, ('level', 0.0): 3, ('target', 0.0): 1, ('difficult', 0.0): 4, ('mile', 0.0): 1, ('pfb', 0.0): 1, ('nate', 0.0): 2, ('expo', 0.0): 2, ('jisoo', 0.0): 1, ('chloe', 0.0): 2, ('anon', 0.0): 2, ('mager', 0.0): 1, ('wi', 0.0): 1, ('knw', 0.0): 1, ('wht', 0.0): 1, ('distant', 0.0): 1, ('buffer', 0.0): 2, ('insan', 0.0): 1, ('charli', 0.0): 1, ('finland', 0.0): 3, ('gana', 0.0): 1, ('studio', 0.0): 3, ('arch', 0.0): 1, ('lyin', 0.0): 1, ('kian', 0.0): 3, ('supercar', 0.0): 1, ('gurgaon', 0.0): 1, ('locat', 0.0): 7, ('9:15', 0.0): 1, ('satir', 0.0): 1, ('gener', 0.0): 2, ('peanut', 0.0): 3, ('butter', 0.0): 1, ('garden', 0.0): 2, ('beer', 0.0): 1, ('viner', 0.0): 1, ('palembang', 0.0): 1, ('sorrryyi', 0.0): 1, ('fani', 0.0): 1, ('hahahahaha', 0.0): 2, ('boner', 0.0): 1, ('merci', 0.0): 1, ('yuki', 0.0): 1, ('2500k', 0.0): 1, ('mari', 0.0): 1, ('jake', 0.0): 1, ('gyllenha', 0.0): 1, ('impact', 0.0): 1, ("ledger'", 0.0): 1, ('btw', 0.0): 5, ('cough', 0.0): 4, ('hunni', 0.0): 1, ('b4', 0.0): 1, ('deplet', 0.0): 1, ('mbasa', 0.0): 1, ('client', 0.0): 3, ('ray', 0.0): 1, ('aah', 0.0): 1, ('type', 0.0): 2, ('suit', 0.0): 5, ('pa-copi', 0.0): 1, ('proper', 0.0): 2, ('biom', 0.0): 1, ('mosqu', 0.0): 1, ('smelli', 0.0): 1, ('taxi', 0.0): 4, ('emptier', 0.0): 1, ("ciara'", 0.0): 1, ("everything'", 0.0): 1, ('clip', 0.0): 2, ('tall', 0.0): 2, ('gladli', 0.0): 1, ('intent', 0.0): 1, ('amb', 0.0): 1, ("harry'", 0.0): 2, ('jean', 0.0): 2, ('mayday', 0.0): 1, ('parad', 0.0): 2, ('lyf', 0.0): 1, ('13th', 0.0): 1, ('anim', 0.0): 4, ('kingdom', 0.0): 1, ('chri', 0.0): 7, ('brown', 0.0): 4, ('riski', 0.0): 1, ('cologn', 0.0): 1, ('duo', 0.0): 3, ('ballad', 0.0): 2, ('bish', 0.0): 2, ('intern', 0.0): 2, ('brought', 0.0): 1, ('yumyum', 0.0): 1, ("cathy'", 0.0): 1, ('missyou', 0.0): 1, ('rubi', 0.0): 2, ('rose', 0.0): 2, ('tou', 0.0): 1, ('main', 0.0): 1, ('pora', 0.0): 1, ('stalk', 0.0): 3, ('karlia', 0.0): 1, ('khatam', 0.0): 2, ('bandi', 0.0): 1, ('👑', 0.0): 1, ('pyaari', 0.0): 1, ('gawd', 0.0): 1, ('understood', 0.0): 1, ('review', 0.0): 3, ('massi', 0.0): 1, ('thatselfiethough', 0.0): 1, ('loop', 0.0): 1, ('ofc', 0.0): 1, ('pict', 0.0): 1, ('caught', 0.0): 1, ('aishhh', 0.0): 1, ('viewer', 0.0): 1, ('exam', 0.0): 5, ('sighsss', 0.0): 1, ('burnt', 0.0): 2, ('toffe', 0.0): 2, ('honesti', 0.0): 1, ('cheatday', 0.0): 1, ('protein', 0.0): 1, ('sissi', 0.0): 1, ('tote', 0.0): 1, ('slowli', 0.0): 1, ('church', 0.0): 2, ('pll', 0.0): 1, ('sel', 0.0): 1, ('beth', 0.0): 2, ('serbia', 0.0): 1, ('serbian', 0.0): 1, ('selen', 0.0): 1, ('motav', 0.0): 1, ('💋', 0.0): 2, ('zayyyn', 0.0): 1, ('momma', 0.0): 1, ('happend', 0.0): 1, ('imper', 0.0): 1, ('trmdhesit', 0.0): 1, ('pana', 0.0): 1, ('quickest', 0.0): 2, ('blood', 0.0): 5, ('sake', 0.0): 1, ('hamstr', 0.0): 1, ('rodwel', 0.0): 1, ('trace', 0.0): 1, ('artist', 0.0): 4, ('tp', 0.0): 1, ('powder', 0.0): 1, ('wider', 0.0): 1, ('honestli', 0.0): 4, ('comfort', 0.0): 3, ('bruno', 0.0): 1, ('1.8', 0.0): 1, ('ed', 0.0): 7, ('croke', 0.0): 2, ('deal', 0.0): 6, ('toll', 0.0): 1, ('packag', 0.0): 1, ('shape', 0.0): 1, ('unluckiest', 0.0): 1, ('bettor', 0.0): 1, ('nstp', 0.0): 1, ('sem', 0.0): 2, ('chipotl', 0.0): 1, ('chick-fil-a', 0.0): 1, ('stole', 0.0): 3, ('evet', 0.0): 1, ('ramadhan', 0.0): 1, ('eid', 0.0): 4, ('stexpert', 0.0): 1, ('ripstegi', 0.0): 1, ('nickyyi', 0.0): 1, ('¿', 0.0): 1, ('centralis', 0.0): 1, ('discontinu', 0.0): 1, ('sniff', 0.0): 1, ("i't", 0.0): 1, ('glad', 0.0): 2, ('fab', 0.0): 2, ('theres', 0.0): 1, ('cred', 0.0): 1, ('t_t', 0.0): 1, ('elimin', 0.0): 1, ('teamzip', 0.0): 1, ('smtm', 0.0): 1, ('assingn', 0.0): 1, ('editi', 0.0): 1, ('nakaka', 0.0): 1, ('beastmod', 0.0): 1, ('gaaawd', 0.0): 1, ('jane', 0.0): 1, ('mango', 0.0): 1, ('colombia', 0.0): 1, ('yot', 0.0): 1, ('labyo', 0.0): 1, ('pano', 0.0): 1, ('nalamannn', 0.0): 1, ('hardhead', 0.0): 1, ('cell', 0.0): 1, ("zach'", 0.0): 1, ('burger', 0.0): 2, ('xpress', 0.0): 1, ('hopkin', 0.0): 1, ('melatonin', 0.0): 1, ('2-4', 0.0): 1, ('nap', 0.0): 2, ('wide', 0.0): 2, ('task', 0.0): 1, ('9pm', 0.0): 1, ('hahaah', 0.0): 1, ('frequent', 0.0): 1, ('jail', 0.0): 2, ('weirddd', 0.0): 1, ('donghyuk', 0.0): 1, ('stan', 0.0): 1, ('bek', 0.0): 1, ('13', 0.0): 4, ('reynoldsgrl', 0.0): 1, ('ole', 0.0): 1, ('beardi', 0.0): 1, ('kaussi', 0.0): 1, ('bummer', 0.0): 3, ('fightingmciren', 0.0): 1, ("michael'", 0.0): 1, ('�', 0.0): 21, ('miser', 0.0): 2, ('💦', 0.0): 1, ('yoga', 0.0): 2, ('🌞', 0.0): 1, ('💃', 0.0): 1, ('🏽', 0.0): 1, ('shouldv', 0.0): 1, ('saffron', 0.0): 1, ('peasant', 0.0): 1, ('wouldv', 0.0): 1, ('nfinit', 0.0): 1, ('admin_myung', 0.0): 1, ('slp', 0.0): 1, ('saddest', 0.0): 2, ('laomma', 0.0): 2, ('kebaya', 0.0): 1, ('bandung', 0.0): 1, ('indonesia', 0.0): 1, ('7df89150', 0.0): 1, ('whatsapp', 0.0): 2, ('62', 0.0): 1, ('08962464174', 0.0): 1, ('laomma_coutur', 0.0): 1, ('haizzz', 0.0): 1, ('urghhh', 0.0): 1, ('working-on-a-tight-schedul', 0.0): 1, ('ganbarimasu', 0.0): 1, ('livid', 0.0): 1, ('whammi', 0.0): 1, ('quuuee', 0.0): 1, ('friooo', 0.0): 1, ('ladi', 0.0): 4, ('stereo', 0.0): 1, ('chwang', 0.0): 1, ('lorm', 0.0): 1, ('823', 0.0): 1, ('rp', 0.0): 1, ('indiemus', 0.0): 10, ('unhappi', 0.0): 2, ('jennyjean', 0.0): 1, ('elfindelmundo', 0.0): 2, ('lolzz', 0.0): 1, ('dat', 0.0): 4, ('corey', 0.0): 1, ('appreci', 0.0): 2, ('weekli', 0.0): 2, ('mahirap', 0.0): 1, ('nash', 0.0): 1, ('gosh', 0.0): 6, ('noodl', 0.0): 1, ('veeerri', 0.0): 1, ('rted', 0.0): 2, ('orig', 0.0): 1, ('starholicxx', 0.0): 1, ('07:17', 0.0): 2, ('@the', 0.0): 1, ('notr', 0.0): 1, ('hwi', 0.0): 1, ('niall', 0.0): 5, ('fraud', 0.0): 1, ('diplomaci', 0.0): 1, ('fittest', 0.0): 1, ('zero', 0.0): 1, ('toler', 0.0): 2, ('gurl', 0.0): 1, ('notion', 0.0): 1, ('pier', 0.0): 1, ('approach', 0.0): 1, ('rattl', 0.0): 1, ('robe', 0.0): 1, ('emphasi', 0.0): 1, ('vocal', 0.0): 1, ('chose', 0.0): 1, ('erm', 0.0): 1, ('abby.can', 0.0): 1, ('persuad', 0.0): 1, ('lyric', 0.0): 1, ("emily'", 0.0): 1, ('odd', 0.0): 3, ('possibl', 0.0): 8, ('elect', 0.0): 2, ('kamiss', 0.0): 1, ('mwa', 0.0): 1, ('mommi', 0.0): 3, ('scream', 0.0): 1, ('fight', 0.0): 2, ('cafe', 0.0): 2, ('melbourn', 0.0): 1, ('anyonnee', 0.0): 1, ('loner', 0.0): 1, ('fricken', 0.0): 2, ('rito', 0.0): 1, ('friendzon', 0.0): 1, ('panel', 0.0): 1, ('repeat', 0.0): 2, ('audienc', 0.0): 1, ('hsm', 0.0): 1, ('canario', 0.0): 1, ('hotel', 0.0): 8, ('ukiss', 0.0): 1, ('faith', 0.0): 2, ('kurt', 0.0): 1, ("fatma'm", 0.0): 1, ('alex', 0.0): 4, ('swag', 0.0): 1, ('lmfao', 0.0): 2, ('flapjack', 0.0): 1, ('countthecost', 0.0): 1, ('ihop', 0.0): 1, ('infra', 0.0): 1, ('lq', 0.0): 1, ('knive', 0.0): 1, ('sotir', 0.0): 1, ('mybrainneedstoshutoff', 0.0): 1, ('macci', 0.0): 1, ('chees', 0.0): 7, ('25', 0.0): 2, ('tend', 0.0): 1, ('510', 0.0): 1, ('silicon', 0.0): 1, ('cover', 0.0): 2, ('kbye', 0.0): 1, ('ini', 0.0): 1, ('anytim', 0.0): 1, ('citizen', 0.0): 1, ('compar', 0.0): 2, ('rank', 0.0): 1, ('mcountdown', 0.0): 2, ('5h', 0.0): 1, ('thapelo', 0.0): 1, ('op', 0.0): 1, ('civ', 0.0): 1, ('wooden', 0.0): 1, ('mic', 0.0): 1, ('embarrass', 0.0): 2, ('translat', 0.0): 3, ('daili', 0.0): 3, ('mecha-totem', 0.0): 1, ('nak', 0.0): 1, ('tgk', 0.0): 1, ('townsss', 0.0): 1, ('jokid', 0.0): 1, ('rent', 0.0): 2, ('degre', 0.0): 1, ('inconsider', 0.0): 2, ('softbal', 0.0): 1, ('appli', 0.0): 1, ('tomcat', 0.0): 1, ('chel', 0.0): 1, ('jemma', 0.0): 1, ('detail', 0.0): 4, ('list', 0.0): 4, ('matchi', 0.0): 2, ('elsa', 0.0): 1, ('postpon', 0.0): 1, ('karin', 0.0): 1, ('honey', 0.0): 2, ('vist', 0.0): 1, ('unhealthi', 0.0): 1, ('propa', 0.0): 1, ('knockin', 0.0): 1, ('bacon', 0.0): 1, ('market', 0.0): 2, ('pre-holiday', 0.0): 1, ('diet', 0.0): 1, ('meani', 0.0): 1, ('deathbybaconsmel', 0.0): 1, ('init', 0.0): 2, ('destin', 0.0): 1, ('victoria', 0.0): 2, ('luna', 0.0): 1, ('krystal', 0.0): 1, ('sarajevo', 0.0): 1, ('haix', 0.0): 2, ('sp', 0.0): 1, ('student', 0.0): 4, ('wii', 0.0): 2, ('bayonetta', 0.0): 1, ('101', 0.0): 1, ('doabl', 0.0): 1, ('drove', 0.0): 1, ('agenc', 0.0): 1, ('story.miss', 0.0): 1, ('everon', 0.0): 1, ('jp', 0.0): 1, ('mamabear', 0.0): 1, ('imintoh', 0.0): 1, ('underr', 0.0): 1, ("slovakia'", 0.0): 1, ('D:', 0.0): 6, ('saklap', 0.0): 1, ('grade', 0.0): 2, ('rizal', 0.0): 1, ('lib', 0.0): 1, ('discuss', 0.0): 1, ('advisori', 0.0): 1, ('period', 0.0): 2, ('dit', 0.0): 1, ('du', 0.0): 1, ('harsh', 0.0): 2, ('ohgod', 0.0): 1, ('abligaverin', 0.0): 2, ('photooftheday', 0.0): 2, ('sexygirlbypreciouslemmi', 0.0): 3, ('ripsandrabland', 0.0): 1, ('edel', 0.0): 1, ('salam', 0.0): 1, ('mubark', 0.0): 1, ('dong', 0.0): 3, ('tammirossm', 0.0): 4, ('speck', 0.0): 1, ('abbymil', 0.0): 2, ('18', 0.0): 8, ('ion', 0.0): 1, ('5min', 0.0): 1, ('hse', 0.0): 1, ('noob', 0.0): 1, ('nxt', 0.0): 1, ('2week', 0.0): 1, ('300', 0.0): 3, ('fck', 0.0): 2, ('nae', 0.0): 2, ('deep', 0.0): 3, ('human', 0.0): 3, ('whit', 0.0): 1, ('van', 0.0): 4, ('bristol', 0.0): 1, ('subserv', 0.0): 1, ('si', 0.0): 4, ('oo', 0.0): 1, ('tub', 0.0): 1, ('penyfan', 0.0): 1, ('forecast', 0.0): 2, ('breconbeacon', 0.0): 1, ('tittheir', 0.0): 1, ('42', 0.0): 1, ('hotti', 0.0): 3, ('uu', 0.0): 2, ('rough', 0.0): 1, ('fuzzi', 0.0): 1, ('san', 0.0): 3, ('antonio', 0.0): 1, ('kang', 0.0): 1, ('junhe', 0.0): 1, ('couldv', 0.0): 1, ('pz', 0.0): 1, ('somerset', 0.0): 1, ('given', 0.0): 2, ('sunburnt', 0.0): 1, ('safer', 0.0): 1, ('k3g', 0.0): 1, ('input', 0.0): 1, ('gamestomp', 0.0): 1, ('desc', 0.0): 1, ("angelo'", 0.0): 1, ('yna', 0.0): 1, ('psygustokita', 0.0): 2, ('fiver', 0.0): 1, ('toward', 0.0): 1, ('sakho', 0.0): 1, ('threat', 0.0): 1, ('goalscor', 0.0): 1, ('10:59', 0.0): 1, ('11.00', 0.0): 1, ('sham', 0.0): 1, ('tricki', 0.0): 1, ('baao', 0.0): 1, ('nisrina', 0.0): 1, ('crazi', 0.0): 8, ('ladygaga', 0.0): 1, ("you'", 0.0): 2, ('pari', 0.0): 2, ('marrish', 0.0): 1, ("otp'", 0.0): 1, ('6:15', 0.0): 1, ('edomnt', 0.0): 1, ('qih', 0.0): 1, ('shxb', 0.0): 1, ('1000', 0.0): 1, ('chilton', 0.0): 1, ('mother', 0.0): 2, ('obsess', 0.0): 1, ('creepi', 0.0): 2, ('josh', 0.0): 1, ('boohoo', 0.0): 1, ('fellow', 0.0): 2, ('tweep', 0.0): 1, ('roar', 0.0): 1, ('victori', 0.0): 1, ('tweepsmatchout', 0.0): 1, ('nein', 0.0): 3, ('404', 0.0): 1, ('midnight', 0.0): 2, ('willlow', 0.0): 1, ('hbd', 0.0): 1, ('sowwi', 0.0): 1, ('3000', 0.0): 1, ('grind', 0.0): 1, ('gear', 0.0): 1, ('0.001', 0.0): 1, ('meant', 0.0): 6, ('portrait', 0.0): 1, ('mode', 0.0): 2, ('fact', 0.0): 4, ('11:11', 0.0): 4, ('shanzay', 0.0): 1, ('salabrati', 0.0): 1, ('journo', 0.0): 1, ('lure', 0.0): 1, ('gang', 0.0): 1, ('twist', 0.0): 1, ('mashaket', 0.0): 1, ('pet', 0.0): 2, ('bapak', 0.0): 1, ('royal', 0.0): 2, ('prima', 0.0): 1, ('mune', 0.0): 1, ('874', 0.0): 1, ('plisss', 0.0): 1, ('elf', 0.0): 1, ('teenchoic', 0.0): 5, ('choiceinternationalartist', 0.0): 5, ('superjunior', 0.0): 5, ("he'll", 0.0): 1, ('sunway', 0.0): 1, ('petal', 0.0): 1, ('jaya', 0.0): 1, ('selangor', 0.0): 1, ('glow', 0.0): 1, ('huhuu', 0.0): 1, ('congratul', 0.0): 2, ('margo', 0.0): 1, ('konga', 0.0): 1, ('ni', 0.0): 4, ('wa', 0.0): 2, ('ode', 0.0): 1, ('disvirgin', 0.0): 1, ('bride', 0.0): 3, ('yulin', 0.0): 1, ('meat', 0.0): 1, ('festiv', 0.0): 2, ('imma', 0.0): 2, ('syawal', 0.0): 1, ('lapar', 0.0): 1, ('foundat', 0.0): 1, ('clash', 0.0): 2, ('facil', 0.0): 1, ('dh', 0.0): 2, ('chalet', 0.0): 1, ('suay', 0.0): 1, ('anot', 0.0): 1, ('bugger', 0.0): 1, ('एक', 0.0): 1, ('बार', 0.0): 1, ('फिर', 0.0): 1, ('सेँ', 0.0): 1, ('धोखा', 0.0): 1, ('chandauli', 0.0): 1, ('majhwar', 0.0): 1, ('railway', 0.0): 1, ('tito', 0.0): 2, ('tita', 0.0): 1, ('cousin', 0.0): 3, ('critic', 0.0): 1, ('condit', 0.0): 1, ('steal', 0.0): 1, ('narco', 0.0): 1, ('regen', 0.0): 1, ('unfav', 0.0): 2, ('benadryl', 0.0): 1, ('offlin', 0.0): 1, ('arent', 0.0): 1, ('msg', 0.0): 1, ('yg', 0.0): 1, ('gg', 0.0): 3, ('sxrew', 0.0): 1, ('dissappear', 0.0): 1, ('swap', 0.0): 1, ('bleed', 0.0): 1, ('ishal', 0.0): 1, ('mi', 0.0): 2, ('thaank', 0.0): 1, ('jhezz', 0.0): 1, ('sneak', 0.0): 3, ('soft', 0.0): 1, ('defenc', 0.0): 1, ('defens', 0.0): 1, ('nrltigersroost', 0.0): 1, ('indiana', 0.0): 2, ('hibb', 0.0): 1, ('biblethump', 0.0): 1, ('rlyyi', 0.0): 1, ('septum', 0.0): 1, ('pierc', 0.0): 2, ('goood', 0.0): 1, ('hiya', 0.0): 1, ('fire', 0.0): 1, ('venom', 0.0): 1, ('carriag', 0.0): 1, ('pink', 0.0): 1, ('fur-trim', 0.0): 1, ('stetson', 0.0): 1, ('error', 0.0): 4, ('59', 0.0): 1, ('xue', 0.0): 1, ('midori', 0.0): 1, ('sakit', 0.0): 2, ('mateo', 0.0): 1, ('hawk', 0.0): 2, ('bartend', 0.0): 1, ('surf', 0.0): 1, ('despair', 0.0): 1, ('insta', 0.0): 1, ('promo', 0.0): 1, ('iwantin', 0.0): 1, ('___', 0.0): 2, ('fault', 0.0): 3, ('goodluck', 0.0): 1, ('pocket', 0.0): 1, ('[email protected]', 0.0): 1, ('benedictervent', 0.0): 1, ('content', 0.0): 1, ('221b', 0.0): 1, ('popcorn', 0.0): 3, ('joyc', 0.0): 1, ('ooop', 0.0): 1, ('spotifi', 0.0): 1, ('paalam', 0.0): 1, ('sazbal', 0.0): 1, ('incid', 0.0): 1, ('aaahh', 0.0): 1, ('gooo', 0.0): 1, ("stomach'", 0.0): 1, ('growl', 0.0): 1, ('beard', 0.0): 1, ('nooop', 0.0): 1, ('🎉', 0.0): 3, ('ding', 0.0): 3, ('hundr', 0.0): 1, ('meg', 0.0): 1, ("verity'", 0.0): 1, ('rupert', 0.0): 1, ('amin', 0.0): 1, ('studi', 0.0): 2, ('pleaaas', 0.0): 1, ('👆', 0.0): 2, ('woaah', 0.0): 1, ('solvo', 0.0): 1, ('twin', 0.0): 2, ("friday'", 0.0): 1, ('lego', 0.0): 1, ('barefoot', 0.0): 1, ('twelvyy', 0.0): 1, ('boaz', 0.0): 1, ('myhil', 0.0): 1, ('takeov', 0.0): 1, ('wba', 0.0): 1, ("taeyeon'", 0.0): 1, ('derp', 0.0): 1, ('pd', 0.0): 1, ('zoom', 0.0): 2, ("sunny'", 0.0): 1, ('besst', 0.0): 1, ('plagu', 0.0): 1, ('pit', 0.0): 1, ('rich', 0.0): 1, ('sight', 0.0): 1, ('frail', 0.0): 1, ('lotteri', 0.0): 1, ('ride', 0.0): 2, ('twurkin', 0.0): 1, ('razzist', 0.0): 1, ('tumblr', 0.0): 1, ('shek', 0.0): 1, ('609', 0.0): 1, ('mugshot', 0.0): 1, ('attend', 0.0): 3, ('plsss', 0.0): 4, ('taissa', 0.0): 1, ('farmiga', 0.0): 1, ('robert', 0.0): 1, ('qualiti', 0.0): 1, ('daniel', 0.0): 1, ('latest', 0.0): 3, ('softwar', 0.0): 1, ('restor', 0.0): 2, ('momo', 0.0): 2, ('pharma', 0.0): 1, ('immov', 0.0): 1, ('messi', 0.0): 1, ('ansh', 0.0): 1, ('f1', 0.0): 1, ('billion', 0.0): 1, ('rand', 0.0): 1, ('bein', 0.0): 1, ('tla', 0.0): 1, ('tweng', 0.0): 1, ('gene', 0.0): 1, ('up.com', 0.0): 1, ('counti', 0.0): 2, ('cooler', 0.0): 1, ('minhyuk', 0.0): 1, ('gold', 0.0): 2, ('1900', 0.0): 1, ('😪', 0.0): 3, ('yu', 0.0): 1, ('hz', 0.0): 2, ('selena', 0.0): 2, ('emta', 0.0): 1, ('hatigii', 0.0): 1, ('b2aa', 0.0): 1, ('yayyy', 0.0): 1, ('anesthesia', 0.0): 1, ('penrith', 0.0): 1, ('emu', 0.0): 1, ('plain', 0.0): 1, ('staff', 0.0): 3, ('untouch', 0.0): 1, ('brienn', 0.0): 1, ('lsh', 0.0): 1, ('gunna', 0.0): 1, ('former', 0.0): 1, ('darn', 0.0): 1, ('allah', 0.0): 4, ('pakistan', 0.0): 2, ('juudiciari', 0.0): 1, ("horton'", 0.0): 1, ('dunkin', 0.0): 1, ('socialis', 0.0): 1, ('cara', 0.0): 1, ("delevingne'", 0.0): 1, ('fear', 0.0): 1, ('drug', 0.0): 1, ('lace', 0.0): 1, ('fank', 0.0): 1, ('takfaham', 0.0): 1, ('ufff', 0.0): 1, ('sr', 0.0): 2, ('dard', 0.0): 1, ('katekyn', 0.0): 1, ('ehh', 0.0): 1, ('yeahhh', 0.0): 2, ('hacharatt', 0.0): 1, ('niwll', 0.0): 1, ('defin', 0.0): 1, ('wit', 0.0): 2, ('goa', 0.0): 1, ('lini', 0.0): 1, ('kasi', 0.0): 3, ('rhd', 0.0): 1, ('1st', 0.0): 3, ('wae', 0.0): 1, ('subsid', 0.0): 1, ('20th', 0.0): 1, ('anniversari', 0.0): 1, ('youngja', 0.0): 1, ('harumph', 0.0): 1, ('soggi', 0.0): 1, ('weed', 0.0): 1, ('ireland', 0.0): 3, ('sakura', 0.0): 1, ('flavour', 0.0): 1, ('chokki', 0.0): 1, ('🌸', 0.0): 1, ('unavail', 0.0): 2, ('richard', 0.0): 2, ('laptop', 0.0): 2, ('satya', 0.0): 1, ('aditya', 0.0): 1, ('🍜', 0.0): 3, ('vibrat', 0.0): 1, ('an', 0.0): 2, ('cu', 0.0): 1, ('dhaka', 0.0): 1, ('jam', 0.0): 1, ('shall', 0.0): 2, ('cornetto', 0.0): 3, ('noseble', 0.0): 1, ('nintendo', 0.0): 3, ('wew', 0.0): 1, ('ramo', 0.0): 1, ('ground', 0.0): 2, ('shawn', 0.0): 1, ('mend', 0.0): 1, ('l', 0.0): 2, ('dinghi', 0.0): 1, ('skye', 0.0): 1, ('store', 0.0): 3, ('descript', 0.0): 2, ('colleagu', 0.0): 2, ('gagal', 0.0): 2, ('txt', 0.0): 1, ('sim', 0.0): 1, ('nooot', 0.0): 1, ('notch', 0.0): 1, ('tht', 0.0): 2, ('starv', 0.0): 4, ('\U000fe196', 0.0): 1, ('pyjama', 0.0): 1, ('swifti', 0.0): 1, ('sorna', 0.0): 1, ('lurgi', 0.0): 1, ('jim', 0.0): 2, ('6gb', 0.0): 1, ('fenestoscop', 0.0): 1, ('etienn', 0.0): 1, ('bandana', 0.0): 3, ('bigger', 0.0): 2, ('vagina', 0.0): 1, ('suriya', 0.0): 1, ('dangl', 0.0): 1, ('mjhe', 0.0): 2, ('aaj', 0.0): 1, ('tak', 0.0): 3, ('kisi', 0.0): 1, ('kiya', 0.0): 1, ('eyesight', 0.0): 1, ('25x30', 0.0): 1, ('aftenoon', 0.0): 1, ('booor', 0.0): 1, ('uuu', 0.0): 1, ('boyfriend', 0.0): 8, ('freebiefriday', 0.0): 1, ('garag', 0.0): 1, ('michael', 0.0): 1, ('obvious', 0.0): 1, ('denim', 0.0): 1, ('somebodi', 0.0): 1, ('ce', 0.0): 1, ('gw', 0.0): 1, ('anatomi', 0.0): 1, ('no1', 0.0): 1, ("morisette'", 0.0): 1, ('flash', 0.0): 1, ('non-trial', 0.0): 1, ('sayhernam', 0.0): 1, ('lootcrat', 0.0): 1, ('item', 0.0): 1, ('inca', 0.0): 1, ('trail', 0.0): 1, ('sandboard', 0.0): 1, ('derbi', 0.0): 1, ('coffe', 0.0): 1, ('unabl', 0.0): 3, ('signatur', 0.0): 1, ('dish', 0.0): 1, ('unfamiliar', 0.0): 1, ('kitchen', 0.0): 3, ('coldest', 0.0): 1, ("old'", 0.0): 1, ('14518344', 0.0): 1, ('61', 0.0): 1, ('thirdwheel', 0.0): 1, ('lovebird', 0.0): 1, ('nth', 0.0): 1, ('imo', 0.0): 1, ('familiar', 0.0): 1, ('@juliettemaughan', 0.0): 1, ('copi', 0.0): 1, ('sensiesha', 0.0): 1, ('eldest', 0.0): 1, ('netbal', 0.0): 1, ('😟', 0.0): 1, ('keedz', 0.0): 1, ('taybigail', 0.0): 1, ('jordan', 0.0): 1, ('tournament', 0.0): 1, ('goin', 0.0): 1, ('ps4', 0.0): 3, ('kink', 0.0): 1, ('charger', 0.0): 1, ('streak', 0.0): 1, ('scorch', 0.0): 1, ('srski', 0.0): 1, ('tdc', 0.0): 1, ('egypt', 0.0): 1, ('in-sensit', 0.0): 1, ('cooper', 0.0): 3, ('invit', 0.0): 1, ('donna', 0.0): 1, ('thurston', 0.0): 1, ('collin', 0.0): 1, ('quietli', 0.0): 2, ('kennel', 0.0): 1, ('911', 0.0): 1, ('pluckersss', 0.0): 1, ('gion', 0.0): 1, ('886', 0.0): 1, ('nsfw', 0.0): 1, ('kidschoiceaward', 0.0): 1, ('ming', 0.0): 1, ('pbr', 0.0): 1, ('shoutout', 0.0): 1, ('periscop', 0.0): 1, ('ut', 0.0): 1, ('shawti', 0.0): 1, ('naw', 0.0): 4, ("sterling'", 0.0): 1, ('9muse', 0.0): 1, ('hrryok', 0.0): 2, ('asap', 0.0): 2, ('wnt', 0.0): 1, ('9:30', 0.0): 1, ('9:48', 0.0): 1, ('9/11', 0.0): 1, ('bueno', 0.0): 1, ('receptionist', 0.0): 1, ('ella', 0.0): 2, ('goe', 0.0): 4, ('ketchup', 0.0): 1, ('tasteless', 0.0): 1, ('deantd', 0.0): 1, ('justgotkanekifi', 0.0): 1, ('notgonnabeactivefor', 0.0): 1, ('2weeksdontmissittoomuch', 0.0): 1, ('2013', 0.0): 1, ('disney', 0.0): 2, ('vlog', 0.0): 1, ('swim', 0.0): 1, ('turtl', 0.0): 2, ('cnn', 0.0): 2, ('straplin', 0.0): 1, ('theatr', 0.0): 1, ('guncontrol', 0.0): 1, ('stung', 0.0): 2, ('tweak', 0.0): 1, ("thát'", 0.0): 1, ('powerpoint', 0.0): 1, ('present', 0.0): 5, ('diner', 0.0): 1, ('no-no', 0.0): 1, ('hind', 0.0): 1, ('circuit', 0.0): 1, ('secondari', 0.0): 1, ('sodder', 0.0): 1, ('perhap', 0.0): 2, ('mobitel', 0.0): 1, ('colin', 0.0): 1, ('playstat', 0.0): 2, ('charg', 0.0): 4, ('exp', 0.0): 1, ('misspelt', 0.0): 1, ('wan', 0.0): 1, ('hyungwon', 0.0): 2, ('alarm', 0.0): 1, ('needicecreamnow', 0.0): 1, ('shake', 0.0): 1, ('repeatedli', 0.0): 1, ('nu-uh', 0.0): 1, ('jace', 0.0): 1, ('mostest', 0.0): 1, ('vip', 0.0): 1, ('urgh', 0.0): 1, ('consol', 0.0): 1, ("grigson'", 0.0): 1, ('carrot', 0.0): 1, ('>:-(', 0.0): 4, ('sunburn', 0.0): 1, ('ughh', 0.0): 2, ('enabl', 0.0): 1, ('otter', 0.0): 1, ('protect', 0.0): 1, ('argh', 0.0): 1, ('pon', 0.0): 1, ('otl', 0.0): 2, ('sleepov', 0.0): 2, ('jess', 0.0): 2, ('bebe', 0.0): 1, ('fabina', 0.0): 1, ("barrista'", 0.0): 1, ('plant', 0.0): 3, ('pup', 0.0): 2, ('brolli', 0.0): 1, ('mere', 0.0): 2, ('nhi', 0.0): 1, ('dey', 0.0): 2, ('serv', 0.0): 1, ('kepo', 0.0): 1, ('bitin', 0.0): 1, ('pretzel', 0.0): 1, ('bb17', 0.0): 1, ('bblf', 0.0): 1, ('fuckin', 0.0): 1, ('vanilla', 0.0): 1, ('latt', 0.0): 1, ('skulker', 0.0): 1, ('thread', 0.0): 1, ('hungrrryyi', 0.0): 1, ('icloud', 0.0): 1, ('ipod', 0.0): 3, ('hallyu', 0.0): 1, ('buuut', 0.0): 1, ('über', 0.0): 1, ('oki', 0.0): 2, ('8p', 0.0): 1, ('champagn', 0.0): 1, ('harlo', 0.0): 1, ('torrentialrain', 0.0): 1, ('lloyd', 0.0): 1, ('asshol', 0.0): 1, ('clearli', 0.0): 2, ('knowww', 0.0): 2, ('runni', 0.0): 1, ('sehun', 0.0): 1, ('sweater', 0.0): 1, ('intoler', 0.0): 2, ('xenophob', 0.0): 1, ('wtfff', 0.0): 1, ('tone', 0.0): 1, ('wasnt', 0.0): 1, ('1pm', 0.0): 2, ('fantasi', 0.0): 1, ('newer', 0.0): 1, ('pish', 0.0): 1, ('comparison', 0.0): 1, ('remast', 0.0): 1, ('fe14', 0.0): 1, ('icon', 0.0): 2, ('strawberri', 0.0): 1, ('loos', 0.0): 1, ('kapatidkongpogi', 0.0): 1, ('steph', 0.0): 1, ('mel', 0.0): 1, ('longest', 0.0): 1, ('carmen', 0.0): 1, ('login', 0.0): 1, ('respons', 0.0): 3, ('00128835', 0.0): 1, ('wingstop', 0.0): 1, ('budg', 0.0): 1, ('fuq', 0.0): 1, ('ilhoon', 0.0): 1, ('ganteng', 0.0): 1, ('simpl', 0.0): 1, ('getthescoop', 0.0): 1, ('hearess', 0.0): 1, ('677', 0.0): 1, ('txt_shot', 0.0): 1, ('standbi', 0.0): 1, ('inatal', 0.0): 1, ('zenmat', 0.0): 1, ('namecheck', 0.0): 1, ('whistl', 0.0): 1, ('junmyeon', 0.0): 1, ('ddi', 0.0): 1, ('arini', 0.0): 1, ('je', 0.0): 1, ('bright', 0.0): 2, ('igbo', 0.0): 1, ('blamehoney', 0.0): 1, ('whhr', 0.0): 1, ('juan', 0.0): 1, ('snuggl', 0.0): 1, ('internship', 0.0): 1, ('usag', 0.0): 1, ('warn', 0.0): 1, ('vertigo', 0.0): 1, ('panic', 0.0): 1, ('attack', 0.0): 4, ('dual', 0.0): 1, ('carriageway', 0.0): 1, ('aragalang', 0.0): 1, ('08', 0.0): 1, ('tam', 0.0): 1, ('bose', 0.0): 1, ('theo', 0.0): 1, ('anymoree', 0.0): 1, ('rubbish', 0.0): 1, ('cactu', 0.0): 1, ('sorrri', 0.0): 1, ('bowel', 0.0): 1, ('nasti', 0.0): 2, ('tumour', 0.0): 1, ('faster', 0.0): 1, ('puffi', 0.0): 1, ('eyelid', 0.0): 1, ('musica', 0.0): 1, ('dota', 0.0): 1, ('4am', 0.0): 1, ('campsit', 0.0): 1, ('miah', 0.0): 1, ('hahay', 0.0): 1, ('churro', 0.0): 1, ('montana', 0.0): 2, ('reign', 0.0): 1, ('exampl', 0.0): 1, ('inflat', 0.0): 1, ('sic', 0.0): 1, ('reset', 0.0): 1, ('entlerbountli', 0.0): 1, ('tinder', 0.0): 3, ('dirtykik', 0.0): 2, ('sexcam', 0.0): 3, ('spray', 0.0): 1, ('industri', 0.0): 1, ('swollen', 0.0): 1, ('distanc', 0.0): 2, ('jojo', 0.0): 1, ('postcod', 0.0): 1, ('kafi', 0.0): 1, ('din', 0.0): 1, ('mene', 0.0): 1, ('aj', 0.0): 1, ('koi', 0.0): 1, ('rewert', 0.0): 1, ('bunta', 0.0): 1, ('warnaaa', 0.0): 1, ('tortur', 0.0): 2, ('field', 0.0): 1, ('wall', 0.0): 2, ('iran', 0.0): 1, ('irand', 0.0): 1, ('us-iran', 0.0): 1, ('nuclear', 0.0): 1, ("mit'", 0.0): 1, ('expert', 0.0): 1, ('sever', 0.0): 3, ('li', 0.0): 1, ('s2e12', 0.0): 1, ('rumpi', 0.0): 1, ('gallon', 0.0): 1, ('ryan', 0.0): 1, ('secret', 0.0): 2, ('dandia', 0.0): 1, ('rbi', 0.0): 1, ('cage', 0.0): 2, ('parrot', 0.0): 1, ('1li', 0.0): 1, ('commiss', 0.0): 1, ('cag', 0.0): 1, ('stripe', 0.0): 2, ('gujarat', 0.0): 1, ('tear', 0.0): 3, ('ily.melani', 0.0): 1, ('unlik', 0.0): 2, ('talent', 0.0): 2, ('deepxcap', 0.0): 1, ('doin', 0.0): 3, ('5:08', 0.0): 1, ('thesi', 0.0): 11, ('belieb', 0.0): 2, ('gtg', 0.0): 1, ('compet', 0.0): 1, ('vv', 0.0): 1, ('respect', 0.0): 5, ('opt-out', 0.0): 1, ('vam', 0.0): 1, ('spece', 0.0): 1, ('ell', 0.0): 1, ('articl', 0.0): 1, ('sexyameli', 0.0): 1, ('fineandyu', 0.0): 1, ('gd', 0.0): 1, ('flesh', 0.0): 1, ('daft', 0.0): 1, ('imsorri', 0.0): 1, ('aku', 0.0): 1, ('chelsea', 0.0): 2, ('koe', 0.0): 1, ('emyu', 0.0): 1, ('confetti', 0.0): 1, ('bf', 0.0): 2, ('sini', 0.0): 1, ('dipoppo', 0.0): 1, ('hop', 0.0): 2, ('bestweekend', 0.0): 1, ('okay-ish', 0.0): 1, ('html', 0.0): 1, ('geneva', 0.0): 1, ('patml', 0.0): 1, ('482', 0.0): 1, ('orgasm', 0.0): 3, ('abouti', 0.0): 1, ('797', 0.0): 1, ('reaalli', 0.0): 1, ('aldub', 0.0): 1, ('nila', 0.0): 1, ('smart', 0.0): 1, ('meter', 0.0): 1, ('display', 0.0): 1, ('unansw', 0.0): 1, ('bri', 0.0): 1, ('magcon', 0.0): 1, ('sinuend', 0.0): 1, ('kak', 0.0): 1, ('laper', 0.0): 2, ('rage', 0.0): 1, ('loser', 0.0): 1, ('brendon', 0.0): 1, ("urie'", 0.0): 1, ('sumer', 0.0): 1, ('repackag', 0.0): 1, (":'d", 0.0): 1, ('matthew', 0.0): 1, ('yongb', 0.0): 1, ('sued', 0.0): 1, ('suprem', 0.0): 1, ('warm-up', 0.0): 1, ('arriv', 0.0): 4, ('brill', 0.0): 1, ('120', 0.0): 1, ('rub', 0.0): 1, ('belli', 0.0): 1, ('jannatul', 0.0): 1, ('ferdou', 0.0): 1, ('ekta', 0.0): 1, ('kharap', 0.0): 1, ('manush', 0.0): 1, ('mart', 0.0): 2, ('gua', 0.0): 1, ('can', 0.0): 1, ("khloe'", 0.0): 1, ('nhe', 0.0): 1, ('yar', 0.0): 1, ('minkyuk', 0.0): 1, ('hol', 0.0): 1, ('isol', 0.0): 1, ('hk', 0.0): 1, ('sensor', 0.0): 1, ('broker', 0.0): 1, ('wna', 0.0): 1, ('flaviana', 0.0): 1, ('chickmt', 0.0): 1, ('123', 0.0): 1, ('letsfootbal', 0.0): 2, ('atk', 0.0): 2, ('greymind', 0.0): 2, ('43', 0.0): 2, ('gayl', 0.0): 2, ('cricket', 0.0): 3, ('2-3', 0.0): 2, ('mood-dump', 0.0): 1, ('livestream', 0.0): 1, ('gotten', 0.0): 1, ('felton', 0.0): 1, ('veriti', 0.0): 1, ("standen'", 0.0): 1, ('shortli', 0.0): 1, ('😆', 0.0): 2, ('takoyaki', 0.0): 1, ('piti', 0.0): 1, ('aisyah', 0.0): 1, ('ffvi', 0.0): 1, ('youtu.be/2_gpctsojkw', 0.0): 1, ('donutsss', 0.0): 1, ('50p', 0.0): 1, ('grate', 0.0): 1, ('spars', 0.0): 1, ('dd', 0.0): 1, ('lagi', 0.0): 1, ('rider', 0.0): 1, ('pride', 0.0): 1, ('hueee', 0.0): 1, ('password', 0.0): 1, ('thingi', 0.0): 1, ('georg', 0.0): 1, ('afraid', 0.0): 2, ('chew', 0.0): 2, ('toy', 0.0): 1, ('stella', 0.0): 1, ('threw', 0.0): 2, ('theaccidentalcoupl', 0.0): 1, ('smooth', 0.0): 1, ('handov', 0.0): 1, ('spick', 0.0): 1, ('bebii', 0.0): 1, ('happenend', 0.0): 1, ('dr', 0.0): 1, ('balm', 0.0): 1, ('hmph', 0.0): 1, ('bubba', 0.0): 2, ('floor', 0.0): 3, ('georgi', 0.0): 1, ('oi', 0.0): 1, ('bengali', 0.0): 1, ('masterchef', 0.0): 1, ('whatchya', 0.0): 1, ('petrol', 0.0): 1, ('diesel', 0.0): 1, ('wardrob', 0.0): 1, ('awe', 0.0): 1, ('cock', 0.0): 1, ('nyquil', 0.0): 1, ('poootek', 0.0): 1, ('1,500', 0.0): 1, ('bobbl', 0.0): 1, ('leak', 0.0): 1, ('thermo', 0.0): 1, ('classic', 0.0): 1, ('ti5', 0.0): 1, ('12th', 0.0): 1, ('skate', 0.0): 1, ('tae', 0.0): 1, ('kita', 0.0): 4, ('ia', 0.0): 1, ('pkwalasawa', 0.0): 1, ('india', 0.0): 1, ('corrupt', 0.0): 2, ('access', 0.0): 2, ('anything.sur', 0.0): 1, ('info', 0.0): 6, ('octob', 0.0): 1, ('mubank', 0.0): 2, ('ene', 0.0): 2, ('3k', 0.0): 1, ('zehr', 0.0): 1, ('khani', 0.0): 1, ('groceri', 0.0): 1, ('hubba', 0.0): 1, ('bubbl', 0.0): 1, ('gum', 0.0): 2, ('closet', 0.0): 1, ('jhalak', 0.0): 1, ('. ..', 0.0): 2, ('bakwa', 0.0): 1, ('. ...', 0.0): 1, ('seehiah', 0.0): 1, ('goy', 0.0): 1, ('nacho', 0.0): 1, ('braid', 0.0): 2, ('initi', 0.0): 1, ('ruth', 0.0): 1, ('boong', 0.0): 1, ('recommend', 0.0): 3, ('gta', 0.0): 1, ('cwnt', 0.0): 1, ('trivia', 0.0): 1, ('belat', 0.0): 1, ('rohingya', 0.0): 1, ('muslim', 0.0): 2, ('indict', 0.0): 1, ('traffick', 0.0): 1, ('thailand', 0.0): 1, ('asia', 0.0): 1, ('rumbl', 0.0): 1, ('kumbl', 0.0): 1, ('scold', 0.0): 1, ('phrase', 0.0): 1, ('includ', 0.0): 1, ('tag', 0.0): 2, ('melt', 0.0): 1, ('tfw', 0.0): 1, ('jest', 0.0): 1, ('offend', 0.0): 2, ('sleepingwithsiren', 0.0): 1, ('17th', 0.0): 1, ('bringmethehorizon', 0.0): 1, ('18th', 0.0): 2, ('carva', 0.0): 1, ('regularli', 0.0): 2, ('sympathi', 0.0): 1, ('revamp', 0.0): 1, ('headphon', 0.0): 1, ('cunt', 0.0): 1, ('wacha', 0.0): 1, ('niend', 0.0): 1, ('bravo', 0.0): 1, ('2hr', 0.0): 1, ('13m', 0.0): 1, ('kk', 0.0): 2, ('calibraksaep', 0.0): 2, ('darlin', 0.0): 1, ('stun', 0.0): 1, ("doedn't", 0.0): 1, ('meaning', 0.0): 1, ('horrif', 0.0): 2, ('scoup', 0.0): 2, ('paypal', 0.0): 3, ('sweedi', 0.0): 1, ('nam', 0.0): 1, ("sacconejoly'", 0.0): 1, ('bethesda', 0.0): 1, ('fallout', 0.0): 1, ('minecon', 0.0): 1, ('perfect', 0.0): 2, ('katee', 0.0): 1, ('iloveyouu', 0.0): 1, ('linux', 0.0): 1, ('nawww', 0.0): 1, ('chikka', 0.0): 1, ('ug', 0.0): 1, ('rata', 0.0): 1, ('soonest', 0.0): 1, ('mwamwa', 0.0): 1, ('faggot', 0.0): 1, ('doubt', 0.0): 2, ('fyi', 0.0): 1, ('profil', 0.0): 1, ('nicest', 0.0): 1, ('mehendi', 0.0): 1, ('dash', 0.0): 1, ('bookmark', 0.0): 1, ('whay', 0.0): 1, ('shaa', 0.0): 1, ('prami', 0.0): 1, ('😚', 0.0): 4, ('ngee', 0.0): 1, ('ann', 0.0): 1, ('crikey', 0.0): 2, ('snit', 0.0): 1, ('nathanielhinanakit', 0.0): 1, ('naya', 0.0): 1, ('spinni', 0.0): 1, ('wheel', 0.0): 2, ('albeit', 0.0): 1, ('athlet', 0.0): 1, ('gfriend', 0.0): 2, ('yung', 0.0): 2, ('fugli', 0.0): 1, ('💞', 0.0): 4, ('jongda', 0.0): 1, ('hardli', 0.0): 2, ('tlist', 0.0): 1, ('budget', 0.0): 1, ('pabebegirl', 0.0): 1, ('pabeb', 0.0): 2, ('alter', 0.0): 1, ('sandra', 0.0): 2, ('bland', 0.0): 2, ('storifi', 0.0): 1, ('abbi', 0.0): 2, ('mtvhottest', 0.0): 1, ('gaga', 0.0): 1, ('rib', 0.0): 1, ('😵', 0.0): 1, ('hulkamania', 0.0): 1, ('unlov', 0.0): 1, ('lazi', 0.0): 3, ('ihhh', 0.0): 1, ('stackar', 0.0): 1, ('basil', 0.0): 1, ('remedi', 0.0): 1, ('ov', 0.0): 2, ('raiz', 0.0): 1, ('nvr', 0.0): 1, ('gv', 0.0): 1, ('up.wt', 0.0): 1, ('wt', 0.0): 1, ('imran', 0.0): 2, ('achiev', 0.0): 1, ('thr', 0.0): 1, ('soln', 0.0): 1, ("sister'", 0.0): 1, ('hong', 0.0): 1, ('kong', 0.0): 1, ('31st', 0.0): 1, ('pipe', 0.0): 1, ('sept', 0.0): 2, ('lawn', 0.0): 1, ("cupid'", 0.0): 1, ('torn', 0.0): 1, ('retain', 0.0): 1, ('clown', 0.0): 2, ('lipstick', 0.0): 1, ('haiss', 0.0): 1, ('todayi', 0.0): 1, ('thoo', 0.0): 1, ('everday', 0.0): 1, ('hangout', 0.0): 2, ('steven', 0.0): 2, ('william', 0.0): 1, ('umboh', 0.0): 1, ('goodafternoon', 0.0): 1, ('jadin', 0.0): 1, ('thiz', 0.0): 1, ('iz', 0.0): 1, ('emeg', 0.0): 1, ('kennat', 0.0): 1, ('reunit', 0.0): 1, ('abi', 0.0): 1, ('arctic', 0.0): 1, ('chicsirif', 0.0): 1, ('structur', 0.0): 1, ('cumbia', 0.0): 1, ('correct', 0.0): 1, ('badlif', 0.0): 1, ('4-5', 0.0): 2, ('kaslkdja', 0.0): 1, ('3wk', 0.0): 1, ('flower', 0.0): 1, ('feverfew', 0.0): 1, ('weddingflow', 0.0): 1, ('diyflow', 0.0): 1, ('fitn', 0.0): 1, ('worth', 0.0): 4, ('wolverin', 0.0): 1, ('khan', 0.0): 1, ('innoc', 0.0): 1, ('🙏', 0.0): 1, ('🎂', 0.0): 2, ('memem', 0.0): 2, ('krystoria', 0.0): 1, ('snob', 0.0): 1, ('zumba', 0.0): 1, ('greekcrisi', 0.0): 1, ('remain', 0.0): 1, ('dutch', 0.0): 1, ('legibl', 0.0): 2, ('isra', 0.0): 1, ('passport', 0.0): 1, ('froze', 0.0): 1, ('theori', 0.0): 1, ('23rd', 0.0): 1, ('24th', 0.0): 1, ('stomachach', 0.0): 1, ('slice', 0.0): 1, ('ཀ', 0.0): 1, ('again', 0.0): 1, ('otani', 0.0): 1, ('3-0', 0.0): 1, ('3rd', 0.0): 3, ('bottom', 0.0): 2, ('niaaa', 0.0): 1, ('2/4', 0.0): 1, ('scheme', 0.0): 2, ('fckin', 0.0): 1, ('hii', 0.0): 1, ('vin', 0.0): 1, ('plss', 0.0): 1, ('rpli', 0.0): 1, ('rat', 0.0): 3, ('bollywood', 0.0): 1, ('mac', 0.0): 1, ('backup', 0.0): 2, ('lune', 0.0): 1, ('robinhood', 0.0): 1, ('robinhoodi', 0.0): 1, ('🚙', 0.0): 1, ('💚', 0.0): 1, ('docopenhagen', 0.0): 1, ('setter', 0.0): 1, ('swipe', 0.0): 1, ('bbygurl', 0.0): 1, ('neil', 0.0): 1, ('caribbean', 0.0): 1, ('6yr', 0.0): 1, ('jabongatpumaurbanstamped', 0.0): 2, ('takraw', 0.0): 1, ('fersure', 0.0): 1, ('angi', 0.0): 1, ('sheriff', 0.0): 1, ('aaag', 0.0): 1, ("i'mo", 0.0): 1, ('sulk', 0.0): 1, ('selfish', 0.0): 1, ('trick', 0.0): 2, ('nonc', 0.0): 1, ('pad', 0.0): 1, ('bison', 0.0): 1, ('motiv', 0.0): 2, ("q'don", 0.0): 1, ('cheat', 0.0): 2, ('stomp', 0.0): 1, ('aaaaaaaaah', 0.0): 1, ('kany', 0.0): 1, ('mama', 0.0): 1, ('jdjdjdjd', 0.0): 1, ("jimin'", 0.0): 1, ('fancaf', 0.0): 1, ('waffl', 0.0): 1, ('87.7', 0.0): 1, ('2fm', 0.0): 1, ('himseek', 0.0): 1, ('kissm', 0.0): 1, ('akua', 0.0): 1, ('glo', 0.0): 1, ('cori', 0.0): 1, ('monteith', 0.0): 1, ('often', 0.0): 1, ('hashbrown', 0.0): 1, ('💘', 0.0): 2, ('pg', 0.0): 1, ('msc', 0.0): 1, ('hierro', 0.0): 1, ('shirleycam', 0.0): 1, ('phonesex', 0.0): 2, ('pal', 0.0): 1, ('111', 0.0): 1, ('gilet', 0.0): 1, ('cheek', 0.0): 1, ('squishi', 0.0): 1, ('lahhh', 0.0): 1, ('eon', 0.0): 1, ('sunris', 0.0): 1, ('beeti', 0.0): 1, ('697', 0.0): 1, ('kikkomansabor', 0.0): 1, ('getaway', 0.0): 1, ('crimin', 0.0): 1, ('amiibo', 0.0): 1, ('batman', 0.0): 1, ('habe', 0.0): 1, ('siannn', 0.0): 1, ('march', 0.0): 1, ('2017', 0.0): 1, ('chuckin', 0.0): 1, ('ampsha', 0.0): 1, ('nia', 0.0): 1, ('strap', 0.0): 1, ('dz9055', 0.0): 1, ('entlead', 0.0): 1, ('590', 0.0): 1, ('twice', 0.0): 5, ('07:02', 0.0): 1, ('ifsc', 0.0): 1, ('mayor', 0.0): 1, ('biodivers', 0.0): 1, ('taxonom', 0.0): 1, ('collabor', 0.0): 1, ('speci', 0.0): 1, ('discoveri', 0.0): 1, ('collar', 0.0): 1, ('3:03', 0.0): 1, ('belt', 0.0): 1, ('smith', 0.0): 2, ('eyelin', 0.0): 1, ('therefor', 0.0): 1, ('netherland', 0.0): 1, ('el', 0.0): 1, ('jeb', 0.0): 1, ('blacklivesmatt', 0.0): 1, ('slogan', 0.0): 1, ('msnbc', 0.0): 1, ('jebbush', 0.0): 1, ('famish', 0.0): 1, ('marino', 0.0): 1, ('qualifi', 0.0): 2, ('suzi', 0.0): 1, ('skirt', 0.0): 1, ('tama', 0.0): 1, ('warrior', 0.0): 2, ('wound', 0.0): 1, ('iraq', 0.0): 1, ('be', 0.0): 2, ('camara', 0.0): 1, ('coveral', 0.0): 1, ('happili', 0.0): 1, ('sneezi', 0.0): 1, ('rogerwatch', 0.0): 1, ('stalker', 0.0): 1, ('velvet', 0.0): 1, ('tradit', 0.0): 1, ("people'", 0.0): 1, ('beheaviour', 0.0): 1, ("robert'", 0.0): 1, ('.\n.', 0.0): 2, ('aaron', 0.0): 1, ('jelous', 0.0): 1, ('mtg', 0.0): 1, ('thoughtseiz', 0.0): 1, ('playabl', 0.0): 1, ('oldi', 0.0): 1, ('goodi', 0.0): 1, ('mcg', 0.0): 1, ('inspirit', 0.0): 1, ('shine', 0.0): 1, ('ise', 0.0): 1, ('assum', 0.0): 2, ('waist', 0.0): 2, ('guin', 0.0): 1, ('venu', 0.0): 1, ('evil', 0.0): 1, ('pepper', 0.0): 1, ('thessidew', 0.0): 1, ('877', 0.0): 1, ('genesi', 0.0): 1, ('mexico', 0.0): 2, ('novemb', 0.0): 1, ('mash', 0.0): 1, ('whattsap', 0.0): 1, ('inuyasha', 0.0): 2, ('outfwith', 0.0): 1, ('myungsoo', 0.0): 1, ('organis', 0.0): 1, ('satisfi', 0.0): 1, ('wah', 0.0): 1, ('challo', 0.0): 1, ('pliss', 0.0): 1, ('juliana', 0.0): 1, ('enrol', 0.0): 1, ('darlen', 0.0): 1, ('emoji', 0.0): 2, ('brisban', 0.0): 1, ('merlin', 0.0): 1, ('nawwwe', 0.0): 1, ('hyperbulli', 0.0): 1, ('tong', 0.0): 1, ('nga', 0.0): 1, ('seatmat', 0.0): 1, ('rajud', 0.0): 1, ('barkada', 0.0): 1, ('ore', 0.0): 1, ('kayla', 0.0): 1, ('ericavan', 0.0): 1, ('jong', 0.0): 1, ('dongwoo', 0.0): 1, ('photocard', 0.0): 1, ('wh', 0.0): 1, ('dw', 0.0): 1, ('tumor', 0.0): 1, ('vivian', 0.0): 1, ('mmsmalubhangsakit', 0.0): 1, ('jillcruz', 0.0): 2, ('lgbt', 0.0): 3, ('qt', 0.0): 1, ('19th', 0.0): 1, ('toss', 0.0): 1, ('co-work', 0.0): 1, ('mia', 0.0): 1, ('push', 0.0): 4, ('dare', 0.0): 2, ('unsettl', 0.0): 1, ('gh', 0.0): 1, ('18c', 0.0): 1, ('rlli', 0.0): 2, ('hamster', 0.0): 2, ('sheeran', 0.0): 2, ('preform', 0.0): 2, ('monash', 0.0): 1, ('hitmark', 0.0): 1, ('glitch', 0.0): 1, ('safaa', 0.0): 1, ("selena'", 0.0): 1, ('galat', 0.0): 1, ('tum', 0.0): 1, ('ab', 0.0): 5, ('non', 0.0): 1, ('lrka', 0.0): 1, ('bna', 0.0): 1, ('kia', 0.0): 1, ('bhook', 0.0): 1, ('jai', 0.0): 1, ('social', 0.0): 2, ('afterschool', 0.0): 1, ('bilal', 0.0): 1, ('ashraf', 0.0): 1, ('icu', 0.0): 1, ('thanksss', 0.0): 1, ('annnd', 0.0): 1, ('winchest', 0.0): 1, ('{:', 0.0): 1, ('grepe', 0.0): 1, ('grepein', 0.0): 1, ('panem', 0.0): 1, ('lover', 0.0): 1, ('sulli', 0.0): 1, ('cpm', 0.0): 1, ('condemn', 0.0): 1, ('✔', 0.0): 1, ('occur', 0.0): 1, ('unagi', 0.0): 1, ('7elw', 0.0): 1, ('mesh', 0.0): 1, ('beyt', 0.0): 1, ('3a2ad', 0.0): 1, ('fluent', 0.0): 1, ('varsiti', 0.0): 1, ('sengenza', 0.0): 1, ('context', 0.0): 1, ('movnat', 0.0): 1, ('yield', 0.0): 1, ('nbhero', 0.0): 1, ("it'd", 0.0): 1, ('background', 0.0): 1, ('agov', 0.0): 1, ('brasileirao', 0.0): 2, ('abus', 0.0): 1, ('unpar', 0.0): 1, ('bianca', 0.0): 1, ('bun', 0.0): 1, ('dislik', 0.0): 1, ('burdensom', 0.0): 1, ('clear', 0.0): 2, ('amelia', 0.0): 1, ('melon', 0.0): 2, ('useless', 0.0): 1, ('soccer', 0.0): 2, ('interview', 0.0): 2, ('thursday', 0.0): 1, ('nevermind', 0.0): 1, ('jeon', 0.0): 1, ('claw', 0.0): 1, ('thigh', 0.0): 2, ('traction', 0.0): 1, ('damnit', 0.0): 1, ('pri', 0.0): 1, ('pv', 0.0): 2, ('reliv', 0.0): 1, ('nyc', 0.0): 2, ('klm', 0.0): 1, ('11am', 0.0): 1, ("mcd'", 0.0): 1, ('hung', 0.0): 1, ('bam', 0.0): 1, ('seventh', 0.0): 1, ('splendour', 0.0): 1, ('swedish', 0.0): 1, ('metal', 0.0): 1, ('häirførc', 0.0): 1, ('givecodpieceach', 0.0): 1, ('alic', 0.0): 3, ('stile', 0.0): 1, ('explain', 0.0): 3, ('ili', 0.0): 1, ('pragu', 0.0): 1, ('sadi', 0.0): 1, ('charact', 0.0): 1, ('915', 0.0): 1, ('hayee', 0.0): 2, ('patwari', 0.0): 1, ('mam', 0.0): 1, ("ik'", 0.0): 1, ('vision', 0.0): 2, ('ga', 0.0): 1, ('awhhh', 0.0): 1, ('nalang', 0.0): 1, ('hehe', 0.0): 1, ('albanian', 0.0): 1, ('curs', 0.0): 2, ('tava', 0.0): 1, ('chara', 0.0): 1, ('teteh', 0.0): 1, ('verri', 0.0): 1, ('shatter', 0.0): 2, ('sb', 0.0): 1, ('nawe', 0.0): 1, ('bulldog', 0.0): 1, ('macho', 0.0): 1, ('puriti', 0.0): 1, ('kwento', 0.0): 1, ('nakakapikon', 0.0): 1, ('nagbabasa', 0.0): 1, ('blog', 0.0): 2, ('cancer', 0.0): 1, (':-\\', 0.0): 1, ('jonatha', 0.0): 4, ('beti', 0.0): 4, ('sogok', 0.0): 1, ('premium', 0.0): 2, ('instrument', 0.0): 1, ('howev', 0.0): 1, ('dastardli', 0.0): 1, ('swine', 0.0): 1, ('envelop', 0.0): 1, ('pipol', 0.0): 1, ('tad', 0.0): 1, ('wiper', 0.0): 2, ('supposedli', 0.0): 1, ('kernel', 0.0): 1, ('intel', 0.0): 1, ('mega', 0.0): 1, ('bent', 0.0): 1, ('socket', 0.0): 1, ('pcgame', 0.0): 1, ('pcupgrad', 0.0): 1, ('brainwash', 0.0): 2, ('smosh', 0.0): 1, ('plawnew', 0.0): 1, ('837', 0.0): 1, ('aswel', 0.0): 1, ('litter', 0.0): 1, ('mensch', 0.0): 1, ('sepanx', 0.0): 1, ('pci', 0.0): 1, ('caerphilli', 0.0): 1, ('omw', 0.0): 1, ('😍', 0.0): 1, ('hahdhdhshh', 0.0): 1, ('growinguppoor', 0.0): 1, ('🇺', 0.0): 2, ('🇸', 0.0): 2, ("bangtan'", 0.0): 1, ('taimoor', 0.0): 1, ('meray', 0.0): 1, ('dost', 0.0): 1, ('tya', 0.0): 1, ('refollow', 0.0): 1, ('dumb', 0.0): 2, ('butt', 0.0): 1, ('pissbabi', 0.0): 1, ('plank', 0.0): 1, ('inconsist', 0.0): 1, ('moor', 0.0): 1, ('bin', 0.0): 1, ('osx', 0.0): 1, ('chrome', 0.0): 1, ('voiceov', 0.0): 1, ('devo', 0.0): 1, ('hulkhogan', 0.0): 1, ('unpleas', 0.0): 1, ('daaamn', 0.0): 1, ('dada', 0.0): 1, ('fulli', 0.0): 1, ('spike', 0.0): 1, ("panic'", 0.0): 1, ('22nd', 0.0): 1, ('south', 0.0): 2, ('africa', 0.0): 2, ('190', 0.0): 2, ('lizardz', 0.0): 1, ('deepli', 0.0): 1, ('emerg', 0.0): 1, ('engin', 0.0): 1, ('dormtel', 0.0): 1, ('scho', 0.0): 1, ('siya', 0.0): 1, ('onee', 0.0): 1, ('carri', 0.0): 1, ('7pm', 0.0): 1, ('feta', 0.0): 1, ('blaaaz', 0.0): 1, ('nausea', 0.0): 1, ('awar', 0.0): 1, ('top-up', 0.0): 1, ('sharknado', 0.0): 1, ('erni', 0.0): 1, ('ezoo', 0.0): 1, ('lilybutl', 0.0): 1, ('seduc', 0.0): 2, ('powai', 0.0): 1, ('neighbor', 0.0): 1, ('delhi', 0.0): 1, ('unsaf', 0.0): 1, ('halo', 0.0): 1, ('fred', 0.0): 1, ('gaon', 0.0): 1, ('infnt', 0.0): 1, ('elig', 0.0): 1, ('acub', 0.0): 1, ("why'd", 0.0): 1, ('bullshit', 0.0): 2, ('hanaaa', 0.0): 1, ('jn', 0.0): 1, ('tau', 0.0): 1, ('basta', 0.0): 1, ('sext', 0.0): 1, ('addm', 0.0): 1, ('hotmusicdeloco', 0.0): 2, ('dhi', 0.0): 1, ('👉', 0.0): 1, ('8ball', 0.0): 1, ('fakmarey', 0.0): 1, ('doo', 0.0): 2, ('six', 0.0): 3, ('flag', 0.0): 1, ('fulltim', 0.0): 1, ('awkward', 0.0): 1, ('beet', 0.0): 1, ('juic', 0.0): 1, ('dci', 0.0): 1, ('granddad', 0.0): 1, ('minion', 0.0): 3, ('bucket', 0.0): 1, ('kapan', 0.0): 1, ('udah', 0.0): 1, ('dihapu', 0.0): 1, ('hilang', 0.0): 1, ('dari', 0.0): 1, ('muka', 0.0): 1, ('bumi', 0.0): 1, ('narrow', 0.0): 1, ('gona', 0.0): 2, ('chello', 0.0): 1, ('gate', 0.0): 1, ('guard', 0.0): 1, ('crepe', 0.0): 1, ('forsaken', 0.0): 1, ('kanin', 0.0): 1, ('hypixel', 0.0): 1, ('grrr', 0.0): 1, ('thestruggleisr', 0.0): 1, ('geek', 0.0): 1, ('gamer', 0.0): 2, ('afterbirth', 0.0): 1, ("apink'", 0.0): 1, ('overperhatian', 0.0): 1, ('son', 0.0): 1, ('pox', 0.0): 1, ('ahm', 0.0): 1, ('karli', 0.0): 1, ('kloss', 0.0): 1, ('goofi', 0.0): 1, ('pcd', 0.0): 1, ('antagonis', 0.0): 1, ('writer', 0.0): 1, ('nudg', 0.0): 1, ('delv', 0.0): 1, ('grandad', 0.0): 1, ("gray'", 0.0): 1, ('followk', 0.0): 1, ('suggest', 0.0): 2, ('pace', 0.0): 1, ('maker', 0.0): 1, ('molli', 0.0): 1, ('higher', 0.0): 1, ('ceremoni', 0.0): 1, ('christin', 0.0): 1, ('moodi', 0.0): 1, ('throwback', 0.0): 1, ('fav', 0.0): 3, ('barb', 0.0): 1, ('creasi', 0.0): 1, ('deputi', 0.0): 1, ('tast', 0.0): 1, ("banana'", 0.0): 1, ('saludo', 0.0): 1, ('dissapoint', 0.0): 1, ('😫', 0.0): 1, ('<--', 0.0): 1, ("bae'", 0.0): 1, ('pimpl', 0.0): 2, ('amount', 0.0): 2, ('tdi', 0.0): 1, ('pamela', 0.0): 1, ('mini', 0.0): 1, ('mast', 0.0): 1, ('intermitt', 0.0): 1, ('servic', 0.0): 3, ('janniecam', 0.0): 1, ('musicbiz', 0.0): 1, ('braxton', 0.0): 1, ('pro', 0.0): 2, ('urban', 0.0): 1, ('unpreced', 0.0): 1, ('tebow', 0.0): 1, ('okaaay', 0.0): 1, ('sayanggg', 0.0): 1, ('housework', 0.0): 1, ('bust', 0.0): 2, ('disneyland', 0.0): 1, ('thoma', 0.0): 1, ('tommyy', 0.0): 1, ('billi', 0.0): 1, ('kevin', 0.0): 1, ('clifton', 0.0): 1, ('strictli', 0.0): 1, ('nsc', 0.0): 1, ('mat', 0.0): 1, ('0', 0.0): 1, ('awhh', 0.0): 1, ('ram', 0.0): 2, ('voucher', 0.0): 1, ('smadvow', 0.0): 1, ('544', 0.0): 1, ('acdc', 0.0): 1, ('aker', 0.0): 1, ('gmail', 0.0): 1, ('sprevelink', 0.0): 1, ('633', 0.0): 1, ('lana', 0.0): 2, ('loveyoutilltheendcart', 0.0): 1, ('sfv', 0.0): 1, ('6/7', 0.0): 1, ('winner', 0.0): 1, ('20/1', 0.0): 1, ('david', 0.0): 1, ('rosi', 0.0): 1, ('hayoung', 0.0): 1, ('nlb', 0.0): 1, ('@_', 0.0): 1, ('tayo', 0.0): 1, ('forth', 0.0): 1, ('suspect', 0.0): 1, ('mening', 0.0): 1, ('viral', 0.0): 1, ('tonsil', 0.0): 1, ('😷', 0.0): 1, ('😝', 0.0): 1, ('babyy', 0.0): 2, ('cushion', 0.0): 1, ('😿', 0.0): 1, ('💓', 0.0): 2, ('weigh', 0.0): 1, ('keen', 0.0): 1, ('petrofac', 0.0): 1, (';-)', 0.0): 1, ('wig', 0.0): 1, ("mark'", 0.0): 1, ('pathet', 0.0): 1, ('burden.say', 0.0): 1, ('itchi', 0.0): 1, ('cheaper', 0.0): 1, ('malaysia', 0.0): 1, ('130', 0.0): 1, ('snapchattimg', 0.0): 1, ('😏', 0.0): 4, ('sin', 0.0): 1, ('lor', 0.0): 1, ('dedic', 0.0): 1, ('worriedli', 0.0): 1, ('stare', 0.0): 1, ('toneadi', 0.0): 1, ('46532', 0.0): 1, ('snapdirti', 0.0): 1, ('sheskindahot', 0.0): 1, ('corps', 0.0): 1, ('taeni', 0.0): 1, ('fyeah', 0.0): 1, ('andromeda', 0.0): 1, ('yunni', 0.0): 1, ('whdjwksja', 0.0): 1, ('ziam', 0.0): 1, ('100k', 0.0): 1, ('spoil', 0.0): 1, ('curtain', 0.0): 1, ('watchabl', 0.0): 1, ('migrin', 0.0): 1, ('gdce', 0.0): 1, ('gamescom', 0.0): 1, ("do't", 0.0): 1, ('parcel', 0.0): 1, ('num', 0.0): 1, ('oooouch', 0.0): 1, ('pinki', 0.0): 1, ('👣', 0.0): 1, ('podiatrist', 0.0): 1, ('gusto', 0.0): 1, ("rodic'", 0.0): 1, ("one'", 0.0): 1, ('adoohh', 0.0): 1, ('b-butt', 0.0): 1, ('tigermilk', 0.0): 1, ('east', 0.0): 1, ('dulwich', 0.0): 1, ('intens', 0.0): 1, ('kagami', 0.0): 1, ('kuroko', 0.0): 1, ('sana', 0.0): 2, ('makita', 0.0): 1, ('spooki', 0.0): 1, ('smol', 0.0): 1, ('bean', 0.0): 1, ('fagan', 0.0): 1, ('meadowhal', 0.0): 1, ('lola', 0.0): 1, ('nadalaw', 0.0): 1, ('labyu', 0.0): 1, ('jot', 0.0): 1, ('ivypowel', 0.0): 1, ('homeslic', 0.0): 1, ('emoticon', 0.0): 2, ('eyebrow', 0.0): 1, ('prettylook', 0.0): 1, ('whitney', 0.0): 1, ('houston', 0.0): 1, ('aur', 0.0): 1, ('shamil', 0.0): 1, ('tonn', 0.0): 1, ('statu', 0.0): 1, ('→', 0.0): 1, ('suddenli', 0.0): 2, ('alli', 0.0): 2, ('wrap', 0.0): 1, ('neck', 0.0): 1, ('heartbroken', 0.0): 1, ('chover', 0.0): 1, ('cebu', 0.0): 1, ('lechon', 0.0): 1, ('kitten', 0.0): 2, ('jannygreen', 0.0): 2, ('suicid', 0.0): 2, ('forgiv', 0.0): 1, ('conno', 0.0): 1, ('brooo', 0.0): 1, ('rout', 0.0): 1, ('lovebox', 0.0): 1, ('prod', 0.0): 1, ('osad', 0.0): 1, ('scam', 0.0): 1, ('itb', 0.0): 1, ('omigod', 0.0): 1, ('ehem', 0.0): 1, ('ala', 0.0): 1, ('yeke', 0.0): 1, ('jumpa', 0.0): 1, ('😋', 0.0): 1, ('ape', 0.0): 1, ('1.2', 0.0): 1, ('map', 0.0): 1, ('namin', 0.0): 1, ('govt', 0.0): 1, ('e-petit', 0.0): 1, ('pretend', 0.0): 1, ('irk', 0.0): 1, ('ruess', 0.0): 1, ('program', 0.0): 1, ('aigoo', 0.0): 1, ('doujin', 0.0): 1, ('killua', 0.0): 1, ('ginggon', 0.0): 1, ('guys.al', 0.0): 1, ('ytd', 0.0): 1, ('pdapaghimok', 0.0): 1, ('flexibl', 0.0): 1, ('sheet', 0.0): 1, ('nanaman', 0.0): 1, ('pinay', 0.0): 1, ('pie', 0.0): 1, ('jadi', 0.0): 1, ('langsung', 0.0): 1, ('flasback', 0.0): 1, ('franc', 0.0): 1, (':|', 0.0): 1, ('lo', 0.0): 1, ('nicknam', 0.0): 1, ('involv', 0.0): 1, ('scrape', 0.0): 1, ('pile', 0.0): 1, ('sare', 0.0): 1, ('bandar', 0.0): 1, ('varg', 0.0): 1, ('hammer', 0.0): 1, ('lolo', 0.0): 1, ('xbsbabnb', 0.0): 1, ('stilll', 0.0): 1, ('apma', 0.0): 2, ('leadership', 0.0): 1, ('wakeupgop', 0.0): 1, ('mv', 0.0): 1, ('bull', 0.0): 1, ('trafficcc', 0.0): 1, ('oscar', 0.0): 1, ('pornographi', 0.0): 1, ('slutsham', 0.0): 1, ('ect', 0.0): 1, ('poland', 0.0): 1, ('faraway', 0.0): 1, ('700', 0.0): 1, ('800', 0.0): 1, ('cgi', 0.0): 1, ('pun', 0.0): 1, ("x'", 0.0): 1, ('osaka', 0.0): 1, ('junior', 0.0): 1, ('aytona', 0.0): 1, ('hala', 0.0): 1, ('mathird', 0.0): 1, ('jkjk', 0.0): 1, ('backtrack', 0.0): 1, ('util', 0.0): 1, ('pat', 0.0): 1, ('jay', 0.0): 2, ('broh', 0.0): 1, ('calll', 0.0): 1, ('icaru', 0.0): 1, ('awn', 0.0): 1, ('bach', 0.0): 1, ('court', 0.0): 1, ('landlord', 0.0): 1, ("mp'", 0.0): 1, ('dame', 0.0): 1, ('gossip', 0.0): 1, ('purpl', 0.0): 2, ('tie', 0.0): 1, ('ishii', 0.0): 1, ('clara', 0.0): 1, ('yile', 0.0): 1, ('whatev', 0.0): 1, ('stil', 0.0): 1, ('sidharth', 0.0): 1, ('ndabenhl', 0.0): 1, ('doggi', 0.0): 1, ('antag', 0.0): 1, ('41', 0.0): 1, ('thu', 0.0): 1, ('jenner', 0.0): 1, ('troubleshoot', 0.0): 1, ("convo'", 0.0): 1, ('dem', 0.0): 1, ('tix', 0.0): 2, ('automat', 0.0): 1, ('redirect', 0.0): 1, ('gigi', 0.0): 1, ('carter', 0.0): 1, ('corn', 0.0): 2, ('chip', 0.0): 2, ('nnnooo', 0.0): 1, ('cz', 0.0): 1, ('gorilla', 0.0): 1, ('hbm', 0.0): 1, ('humid', 0.0): 1, ('admir', 0.0): 1, ('consist', 0.0): 1, ('jason', 0.0): 1, ("shackell'", 0.0): 1, ('podcast', 0.0): 1, ('envi', 0.0): 1, ('twer', 0.0): 1, ('782', 0.0): 1, ('hahaahahahaha', 0.0): 1, ('sm1', 0.0): 1, ('mutil', 0.0): 1, ('robot', 0.0): 1, ('destroy', 0.0): 1, ('freakin', 0.0): 1, ('haestarr', 0.0): 1, ('😀', 0.0): 3, ('audio', 0.0): 1, ('snippet', 0.0): 1, ('brotherhood', 0.0): 1, ('mefd', 0.0): 1, ('diana', 0.0): 1, ('master', 0.0): 1, ('led', 0.0): 1, ('award', 0.0): 1, ('meowkd', 0.0): 1, ('complic', 0.0): 1, ("c'mon", 0.0): 1, ("swimmer'", 0.0): 1, ('leh', 0.0): 1, ('corner', 0.0): 1, ('didnot', 0.0): 1, ('usanel', 0.0): 2, ('nathan', 0.0): 1, ('micha', 0.0): 1, ('fave', 0.0): 2, ('creep', 0.0): 1, ('throughout', 0.0): 1, ('whose', 0.0): 1, ('ave', 0.0): 1, ('tripl', 0.0): 1, ('lectur', 0.0): 1, ('2-5', 0.0): 1, ('jaw', 0.0): 1, ('quarter', 0.0): 1, ('soni', 0.0): 1, ('followmeaaron', 0.0): 1, ('tzelumxoxo', 0.0): 1, ('drank', 0.0): 1, ('mew', 0.0): 1, ('indic', 0.0): 1, ('ouliv', 0.0): 1, ('70748', 0.0): 1, ('viernesderolenahot', 0.0): 1, ('longmorn', 0.0): 1, ('tobermori', 0.0): 1, ('32', 0.0): 1, ('tail', 0.0): 1, ('recuerda', 0.0): 1, ('tanto', 0.0): 1, ('bath', 0.0): 1, ('muna', 0.0): 1, ('await', 0.0): 1, ('urslef', 0.0): 1, ('lime', 0.0): 1, ('truckload', 0.0): 1, ('favour', 0.0): 2, ('spectat', 0.0): 1, ('sail', 0.0): 1, ("w'end", 0.0): 1, ('bbc', 0.0): 1, ('‘', 0.0): 1, ('foil', 0.0): 1, ('ac45', 0.0): 1, ('catamaran', 0.0): 1, ('peli', 0.0): 1, ('829', 0.0): 1, ('sextaatequemfimseguesdvcomvalentino', 0.0): 1, ('befor', 0.0): 1, ('valu', 0.0): 1, ('cinnamon', 0.0): 1, ('mtap', 0.0): 1, ('peng', 0.0): 1, ('frozen', 0.0): 1, ('bagu', 0.0): 1, ('emang', 0.0): 1, ('engg', 0.0): 1, ('cmc', 0.0): 1, ('mage', 0.0): 1, ('statement', 0.0): 1, ('moodsw', 0.0): 1, ('termin', 0.0): 1, ('men', 0.0): 1, ('peep', 0.0): 1, ('multipl', 0.0): 1, ('mef', 0.0): 1, ('rebound', 0.0): 1, ('pooor', 0.0): 1, ('2am', 0.0): 1, ('perpetu', 0.0): 1, ('bitchfac', 0.0): 1, ('clever', 0.0): 1, ('iceland', 0.0): 1, ('zayn_come_back_we_miss_y', 0.0): 1, ('pmsl', 0.0): 1, ('mianh', 0.0): 1, ('milkeu', 0.0): 1, ('lrt', 0.0): 1, ('bambam', 0.0): 1, ('soda', 0.0): 1, ('payback', 0.0): 1, ('87000', 0.0): 1, ('jobe', 0.0): 1, ('muchi', 0.0): 1, ('🎈', 0.0): 1, ('bathroom', 0.0): 1, ('lagg', 0.0): 1, ('banget', 0.0): 1, ('novel', 0.0): 1, ("there'd", 0.0): 1, ('invis', 0.0): 1, ('scuttl', 0.0): 1, ('worm', 0.0): 1, ('bauuukkk', 0.0): 1, ('jessica', 0.0): 1, ('5:15', 0.0): 1, ('argument', 0.0): 1, ('couldnt', 0.0): 2, ('yepp', 0.0): 1, ('😺', 0.0): 1, ('💒', 0.0): 1, ('💎', 0.0): 1, ('feelin', 0.0): 1, ('biscuit', 0.0): 1, ('slather', 0.0): 1, ('jsut', 0.0): 1, ('belov', 0.0): 1, ('grandmoth', 0.0): 1, ('princess', 0.0): 2, ('babee', 0.0): 1, ('demn', 0.0): 1, ('hotaisndonwyvauwjoqhsjsnaihsuswtf', 0.0): 1, ('sia', 0.0): 1, ('niram', 0.0): 1, ('geng', 0.0): 1, ('fikri', 0.0): 1, ('tirtagangga', 0.0): 1, ('char', 0.0): 1, ('font', 0.0): 2, ('riprishikeshwari', 0.0): 1, ('creamist', 0.0): 1, ('challeng', 0.0): 1, ('substitut', 0.0): 1, ('skin', 0.0): 1, ('cplt', 0.0): 1, ('cp', 0.0): 1, ('hannah', 0.0): 1, ('💙', 0.0): 1, ('opu', 0.0): 1, ('inner', 0.0): 1, ('pleasur', 0.0): 1, ('bbq', 0.0): 1, ('33', 0.0): 1, ('lolliv', 0.0): 1, ('split', 0.0): 3, ('collat', 0.0): 2, ('spilt', 0.0): 2, ('quitkarwaoyaaro', 0.0): 1, ('deacti̇v', 0.0): 1, ('2.5', 0.0): 1, ('g2a', 0.0): 1, ('sherep', 0.0): 1, ('nemen', 0.0): 1, ('behey', 0.0): 1, ('motherfuck', 0.0): 1, ('tattoo', 0.0): 1, ('reec', 0.0): 1, ('vm', 0.0): 1, ('deth', 0.0): 2, ('lest', 0.0): 1, ('gp', 0.0): 1, ('departur', 0.0): 1, ('wipe', 0.0): 1, ('yuck', 0.0): 1, ('ystrday', 0.0): 1, ('seolhyun', 0.0): 1, ('drama', 0.0): 1, ('spici', 0.0): 1, ('owl', 0.0): 1, ('mumbai', 0.0): 1, ("pj'", 0.0): 1, ('wallpap', 0.0): 1, ('cba', 0.0): 1, ('hotter', 0.0): 1, ('rec', 0.0): 1, ('gotdamn', 0.0): 1, ('baaack', 0.0): 1, ('honest', 0.0): 1, ('srw', 0.0): 1, ('mobag', 0.0): 1, ('dunno', 0.0): 1, ('stroke', 0.0): 1, ('gnr', 0.0): 1, ('backstag', 0.0): 1, ('slash', 0.0): 1, ('prolli', 0.0): 1, ('bunni', 0.0): 1, ('sooner', 0.0): 1, ('analyst', 0.0): 1, ('expedia', 0.0): 1, ('bellevu', 0.0): 1, ('prison', 0.0): 1, ('alcohol', 0.0): 1, ('huhuh', 0.0): 1, ('heartburn', 0.0): 1, ('awalmu', 0.0): 1, ('njareeem', 0.0): 1, ('maggi', 0.0): 1, ('psycho', 0.0): 1, ('wahhh', 0.0): 1, ('abudhabi', 0.0): 1, ('hiby', 0.0): 1, ('shareyoursumm', 0.0): 1, ('b8', 0.0): 1, ('must.b', 0.0): 1, ('dairi', 0.0): 1, ('produxt', 0.0): 1, ('lactos', 0.0): 2, ('midland', 0.0): 1, ('knacker', 0.0): 1, ('footag', 0.0): 1, ('lifeless', 0.0): 1, ('shell', 0.0): 1, ('44', 0.0): 1, ('7782', 0.0): 1, ('pengen', 0.0): 1, ('girlll', 0.0): 1, ('tsunami', 0.0): 1, ('indi', 0.0): 1, ('nick', 0.0): 1, ('tirad', 0.0): 1, ('stoop', 0.0): 1, ('lower', 0.0): 1, ('role', 0.0): 1, ('thunder', 0.0): 1, ('paradis', 0.0): 1, ('habit', 0.0): 1, ('facad', 0.0): 1, ('democraci', 0.0): 1, ('brat', 0.0): 1, ('tb', 0.0): 1, ("o'", 0.0): 1, ('bade', 0.0): 1, ('fursat', 0.0): 1, ('usey', 0.0): 2, ('banaya', 0.0): 1, ('uppar', 0.0): 1, ('waal', 0.0): 1, ('ney', 0.0): 1, ('afso', 0.0): 1, ('hums', 0.0): 1, ('dur', 0.0): 1, ('wo', 0.0): 1, ("who'd", 0.0): 1, ('naruhina', 0.0): 1, ('namee', 0.0): 1, ('haiqal', 0.0): 1, ('360hr', 0.0): 1, ('picc', 0.0): 1, ('instor', 0.0): 1, ('pre-vot', 0.0): 1, ('5th', 0.0): 1, ('usernam', 0.0): 1, ('minho', 0.0): 1, ('durian', 0.0): 1, ('strudel', 0.0): 1, ('tsk', 0.0): 1, ('marin', 0.0): 1, ('kailan', 0.0): 1, ('separ', 0.0): 1, ('payday', 0.0): 1, ('payhour', 0.0): 1, ('immedi', 0.0): 1, ('natur', 0.0): 1, ('pre-ord', 0.0): 1, ('fwm', 0.0): 1, ('guppi', 0.0): 1, ('poorkid', 0.0): 1, ('lack', 0.0): 1, ('misunderstood', 0.0): 1, ('cuddli', 0.0): 1, ('scratch', 0.0): 1, ('thumb', 0.0): 1, ('compens', 0.0): 1, ('kirkiri', 0.0): 1, ('phase', 0.0): 1, ('wonho', 0.0): 1, ('visual', 0.0): 1, ("='(", 0.0): 1, ('mission', 0.0): 1, ('pap', 0.0): 1, ('danzel', 0.0): 1, ('craft', 0.0): 1, ('devil', 0.0): 1, ('phil', 0.0): 1, ('sheff', 0.0): 1, ('york', 0.0): 1, ('visa', 0.0): 1, ('gim', 0.0): 1, ('bench', 0.0): 1, ('harm', 0.0): 1, ('yolo', 0.0): 1, ('bloat', 0.0): 1, ('olli', 0.0): 1, ('alterni', 0.0): 1, ('earth', 0.0): 1, ('influenc', 0.0): 1, ('overal', 0.0): 1, ('continent', 0.0): 1, ('🔫', 0.0): 1, ('tank', 0.0): 1, ('thirsti', 0.0): 1, ('konami', 0.0): 1, ('polici', 0.0): 1, ('ranti', 0.0): 1, ('atm', 0.0): 1, ('pervers', 0.0): 1, ('bylfnnz', 0.0): 1, ('ban', 0.0): 1, ('failsatlif', 0.0): 1, ('press', 0.0): 1, ('duper', 0.0): 1, ('waaah', 0.0): 1, ('jaebum', 0.0): 1, ('ahmad', 0.0): 1, ('maslan', 0.0): 1, ('hull', 0.0): 1, ('misser', 0.0): 1} ###Markdown Table of word countsWe will select a set of words that we would like to visualize. It is better to store this temporary information in a table that is very easy to use later. ###Code # select some words to appear in the report. we will assume that each word is unique (i.e. no duplicates) keys = ['happi', 'merri', 'nice', 'good', 'bad', 'sad', 'mad', 'best', 'pretti', '❤', ':)', ':(', '😒', '😬', '😄', '😍', '♛', 'song', 'idea', 'power', 'play', 'magnific'] data = [] for word in keys: pos=0 neg=0 if(word,1) in freqs: pos=freqs[(word,1)] if(word,0) in freqs: neg=freqs[(word,0)] data.append([word,pos,neg]) data ###Output _____no_output_____ ###Markdown We can then use a scatter plot to inspect this table visually. Instead of plotting the raw counts, we will plot it in the logarithmic scale to take into account the wide discrepancies between the raw counts (e.g. `:)` has 3568 counts in the positive while only 2 in the negative). The red line marks the boundary between positive and negative areas. Words close to the red line can be classified as neutral. ###Code fig,ax=plt.subplots(figsize=(8,8)) x=np.log([x[1] + 1 for x in data]) y=np.log([x[2] + 1 for x in data]) ax.scatter(x,y,s=10,c='b') plt.xlabel("Log Positive count") plt.ylabel("Log Negative count") ax.tick_params(direction='in') for i in range(0, len(data)): ax.annotate(data[i][0], (x[i], y[i]), fontsize=12) ax.plot([0, 9], [0, 9], color = 'red') # Plot the red line that divides the 2 areas. ###Output _____no_output_____
Daily Practice/UpSampling2DPractice.ipynb
###Markdown ###Code from numpy import asarray from keras.models import Sequential from keras.layers import UpSampling2D X = asarray([[4, 7], [9, 14]]) print(X) X = X.reshape((1, 2, 2, 1)) model = Sequential() model.add(UpSampling2D(input_shape=(2, 2, 1))) model.add(UpSampling2D(size = (4,4))) model.summary() yhat = model.predict(X) yhat = yhat.reshape((16, 16)) print(yhat) from keras.models import Sequential from keras.layers import Dense from keras.layers import Reshape from keras.layers import UpSampling2D from keras.layers import Conv2D model = Sequential() model.add(Dense(128 * 5 * 5, input_dim=100)) model.add(Reshape((5, 5, 128))) model.add(UpSampling2D()) model.add(Conv2D(1, (3,3), padding='same')) model.summary() # example of using the transpose convolutional layer from numpy import asarray from keras.models import Sequential from keras.layers import Conv2DTranspose X = asarray([[21, 32], [33, 46]]) print(X) X = X.reshape((1, 2, 2, 1)) model = Sequential() model.add(Conv2DTranspose(1, (1,1), strides=(2,2), input_shape=(2, 2, 1))) model.summary() weights = [asarray([[[[1]]]]), asarray([0])] model.set_weights(weights) yhat = model.predict(X) yhat = yhat.reshape((4, 4)) print(yhat) from keras.models import Sequential from keras.layers import Dense from keras.layers import Reshape from keras.layers import Conv2DTranspose from keras.layers import Conv2D model = Sequential() model.add(Dense(128 * 5 * 5, input_dim=100)) model.add(Reshape((5, 5, 128))) model.add(Conv2DTranspose(1, (3,3), strides=(2,2), padding='same')) model.summary() ###Output Model: "sequential_12" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= dense_1 (Dense) (None, 3200) 323200 _________________________________________________________________ reshape_1 (Reshape) (None, 5, 5, 128) 0 _________________________________________________________________ conv2d_transpose_1 (Conv2DTr (None, 10, 10, 1) 1153 ================================================================= Total params: 324,353 Trainable params: 324,353 Non-trainable params: 0 _________________________________________________________________
LDA_topic_modeling.ipynb
###Markdown topic modeling전체 문장에서 그 단어가 얼마만큼의 비중을 찾아내느냐? -> 수치로 확인[SVD](https://wikidocs.net/30707) 잠재의식 분석15년간 뉴스 제목 모아둔 데이터set ###Code !curl -O https://raw.githubusercontent.com/franciscadias/data/master/abcnews-date-text.csv import pandas as pd df_data = pd.read_csv('./abcnews-date-text.csv') df_data= df_data.head(10000) head_text = df_data[['headline_text']] type(head_text) ###Output _____no_output_____ ###Markdown word_tokenize ###Code import nltk nltk.download('punkt') ###Output [nltk_data] Downloading package punkt to /root/nltk_data... [nltk_data] Package punkt is already up-to-date! ###Markdown pandas 의 map()* head_text.apply(function)head_text.apply(lambda row:nltk.word_tokenize(row['headline_text']),axis=1) ###Code head_text['title_text'] = head_text.apply(lambda row:nltk.word_tokenize(row['headline_text']),axis=1) head_text.head(3) from nltk.corpus import stopwords nltk.download('stopwords') stop = stopwords.words('english') print(stop) ###Output ['i', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves', 'you', "you're", "you've", "you'll", "you'd", 'your', 'yours', 'yourself', 'yourselves', 'he', 'him', 'his', 'himself', 'she', "she's", 'her', 'hers', 'herself', 'it', "it's", 'its', 'itself', 'they', 'them', 'their', 'theirs', 'themselves', 'what', 'which', 'who', 'whom', 'this', 'that', "that'll", 'these', 'those', 'am', 'is', 'are', 'was', 'were', 'be', 'been', 'being', 'have', 'has', 'had', 'having', 'do', 'does', 'did', 'doing', 'a', 'an', 'the', 'and', 'but', 'if', 'or', 'because', 'as', 'until', 'while', 'of', 'at', 'by', 'for', 'with', 'about', 'against', 'between', 'into', 'through', 'during', 'before', 'after', 'above', 'below', 'to', 'from', 'up', 'down', 'in', 'out', 'on', 'off', 'over', 'under', 'again', 'further', 'then', 'once', 'here', 'there', 'when', 'where', 'why', 'how', 'all', 'any', 'both', 'each', 'few', 'more', 'most', 'other', 'some', 'such', 'no', 'nor', 'not', 'only', 'own', 'same', 'so', 'than', 'too', 'very', 's', 't', 'can', 'will', 'just', 'don', "don't", 'should', "should've", 'now', 'd', 'll', 'm', 'o', 're', 've', 'y', 'ain', 'aren', "aren't", 'couldn', "couldn't", 'didn', "didn't", 'doesn', "doesn't", 'hadn', "hadn't", 'hasn', "hasn't", 'haven', "haven't", 'isn', "isn't", 'ma', 'mightn', "mightn't", 'mustn', "mustn't", 'needn', "needn't", 'shan', "shan't", 'shouldn', "shouldn't", 'wasn', "wasn't", 'weren', "weren't", 'won', "won't", 'wouldn', "wouldn't"] ###Markdown 기존 방식 ```def stopword(x) result =[] for word in x: if len(word) > 3 : if word not in stop: result.apply(word) return resultdef callStopWorld(head_text) titles=[] for x in head_text['title_text']: titles.apply(stopwords(x)) return titles head_text['title_text'].apply ``` ###Code # 람다(apply) 로 변형 head_text['title']= head_text['title_text'].apply(lambda x: [word for word in x if (len(word) > 3) if ( word not in stop) ]) head_text.head(5) type(head_text.head(5)) head_text['title'][3] ###Output _____no_output_____ ###Markdown ```tokens = []for i in range(len(head_text)): tokens.append(head_text['title'][i])tokens[3:5]결과[['staff', 'aust', 'strike', 'rise'], ['strike', 'affect', 'australian', 'travellers']] ``` ###Code tokens = [] for i in range(len(head_text)): tokens.append(' '.join(head_text['title'][i])) tokens[3:5] ###Output _____no_output_____ ###Markdown TfidfVectorizer ###Code from sklearn.feature_extraction.text import TfidfVectorizer tfidf = TfidfVectorizer(max_features=1000) X = tfidf.fit_transform(tokens) X.shape ###Output _____no_output_____ ###Markdown 단어의 관계성 대로 들어감 ###Code X[4].toarray() ###Output _____no_output_____ ###Markdown LatentDirichletAllocation ###Code from sklearn.decomposition import LatentDirichletAllocation ###Output _____no_output_____ ###Markdown max_iter=10 은 epoch 와 비슷하다 ###Code # lda_model = LatentDirichletAllocation() lda_model = LatentDirichletAllocation(n_components=4) lda_top = lda_model.fit_transform(X) ###Output _____no_output_____ ###Markdown components_ 는 numpy, 중요도가 담긴다 ###Code lda_model.components_.shape,lda_model.components_ terms = tfidf.get_feature_names() ###Output _____no_output_____ ###Markdown 현재 여러 데이터 내용 중 토픽의 분류가 10개로 된 것 ###Code n = 5 for idx, topic in enumerate(lda_model.components_): print([(terms[i], topic[i]) for i in topic.argsort()[:-n-1:-1]]) ###Output _____no_output_____
Greedy Algorithms Minimum Spanning Trees and Dynamic Programming/Programming Assignment 3/DynamicProgramming.ipynb
###Markdown 1 point3.In this programming problem you'll code up the dynamic programming algorithm for computing a maximum-weight independent set of a path graph.Download the text file below.[mwis.txt](https://github.com/SSQ/Coursera-Stanford-Greedy-Algorithms-Minimum-Spanning-Trees-and-Dynamic-Programming/blob/master/Programming%20Assignment%203/mwis.txt)This file describes the weights of the vertices in a path graph (with the weights listed in the order in which vertices appear in the path). It has the following format:[number_of_vertices][weight of first vertex][weight of second vertex]...For example, the third line of the file is "6395702," indicating that the weight of the second vertex of the graph is 6395702.Your task in this problem is to run the dynamic programming algorithm (and the reconstruction procedure) from lecture on this data set. The question is: of the vertices 1, 2, 3, 4, 17, 117, 517, and 997, which ones belong to the maximum-weight independent set? (By "vertex 1" we mean the first vertex of the graph---there is no vertex 0.) In the box below, enter a 8-bit string, where the ith bit should be 1 if the ith of these 8 vertices is in the maximum-weight independent set, and 0 otherwise. For example, if you think that the vertices 1, 4, 17, and 517 are in the maximum-weight independent set and the other four vertices are not, then you should enter the string 10011010 in the box below. ###Code import numpy as np # get the file path file_path = 'mwis.txt' # file_path = 'test 1.txt' # convert text file to np.array type path_graph_data = np.loadtxt(file_path) int_path_graph_data = path_graph_data.astype(int) A = {} A[-1] = 0 A[0] = 0 A[1] = int_path_graph_data[0] for i in range(2, len(int_path_graph_data)): A[i] = long(max(A[i-1], A[i-2] + int_path_graph_data[i-1])) print(A[2]) S = set() position = len(int_path_graph_data) while position >= 1: if A[position - 1] >= (A[position - 2] + int_path_graph_data[position-1]): position = position - 1 else: S.add(position) position = position - 2 print('S: ') print(S) judge_node = [1, 2, 3, 4, 17, 117, 517, 997] for x in judge_node: print(1 if x in S else 0) ###Output 1 0 1 0 0 1 1 0
.ipynb_checkpoints/flower-classifier-checkpoint.ipynb
###Markdown The Amazing Flower Classifier!You need to know whether you're being given chamomile, tulip, rose, sunflower, dandelion. and you need an answer fast? Then you've come to the right place. Take a pic of the potentially vicious killer, and click 'upload' to classify it. (Important: this only handles chamomile, tulip, rose, sunflower, dandelion flowers. It will **not** give a sensible answer for other flowers. ###Code path = Path() learn_inf = load_learner(path/'export.pkl', cpu=True) btn_upload = widgets.FileUpload() out_pl = widgets.Output() lbl_pred = widgets.Label() def on_click(change): img = PILImage.create(btn_upload.data[-1]) out_pl.clear_output() with out_pl: display(img.to_thumb(128,128)) pred,pred_idx,probs = learn_inf.predict(img) lbl_pred.value = f'Prediction: {pred}; Probability: {probs[pred_idx]:.04f}' btn_upload.observe(on_click, names=['data']) display(VBox([widgets.Label('Select your flower!'), btn_upload, out_pl, lbl_pred])) ###Output _____no_output_____
analysis/cluster_c9orf72.ipynb
###Markdown C9orf72 AnalysisThis notebook analyzes the cluster membership for c9orf72 patients from AALS ###Code %matplotlib inline from IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = "all" %load_ext autoreload %autoreload 2 from analysis_utils import * import mogp from pathlib import Path import pandas as pd import matplotlib import matplotlib.pyplot as plt import seaborn as sns sns.set(font_scale=2) sns.set_style("white") col_palette = ['#176D9C', '#DBA137','#86AF49' ,'#df473c','#ae3c60', '#82b4bb'] parameters = {'pdf.fonttype': 42} plt.rcParams.update(parameters) df_c9 = pd.read_csv('data/raw_data/aals/v_NB_IATI_ALS_Gene_Mutations.csv', index_col='SubjectUID') df_c9.index = df_c9.index + '_aals' df_c9['c9orf72'] = df_c9['c9orf72'].map({'1':1, '2':0, '.':0}) #only count positive c9orf72 results # select best MAP model exp_path = Path('data/model_data/1_alsfrsr_all') cur_proj = 'aals' cur_model_path = exp_path / 'results' / 'rbf' cur_data_path = exp_path / 'data_{}_min3_alsfrst.pkl'.format(cur_proj) cur_model_suffix = 'model_{}_min3_alsfrst'.format(cur_proj) cur_model = get_map_model(cur_model_path, cur_model_suffix, num_seeds=5) cur_data = joblib.load(cur_data_path) df_clust_memb = pd.DataFrame(zip(cur_data['SI'], cur_model.z), columns=['subj_id', 'cluster_id']).set_index('subj_id') df_clust_memb = df_clust_memb.join(df_c9) c9_freq = pd.DataFrame() c9_freq['c9pos']=df_clust_memb.groupby('cluster_id')['c9orf72'].sum() clust_size = df_clust_memb['cluster_id'].value_counts() clust_size.name = 'clust_size' c9_freq = c9_freq.join(clust_size) c9_freq['freq'] = c9_freq['c9pos'] / c9_freq['clust_size'] c9_freq.sort_values(by='freq', ascending=False, inplace=True) # Visualize clusters with highest proportions of c9orf72 patients clust_size_thresh = 10 fig, ax = plt.subplots(figsize=(10,5)) vis_clust = c9_freq[c9_freq['clust_size']>clust_size_thresh].index[0:3] disp_freq = [] for j, cur_k in enumerate(vis_clust): cur_disp_freq = '{:.2f}%'.format(c9_freq.loc[cur_k]['freq']*100) disp_freq.append(cur_disp_freq) _, num_pat = plot_mogp_by_clust(ax, cur_model, cur_data, cur_k, data_flag=False, data_col='k', model_flag=True, model_col=col_palette[j], model_alpha=0.2, gpy_pad=0.5) ax.get_legend().remove() ind_lis = np.where(np.in1d(cur_data['SI'], df_clust_memb[(df_clust_memb['cluster_id']==cur_k)&(df_clust_memb['c9orf72']==1)].index)) _ = ax.plot(cur_data['XA'][ind_lis].T[1:], cur_data['YA'][ind_lis].T[1:], 'o-', color='k', alpha=0.75) _ = format_mogp_axs(ax) # Edit legend handles, labels = ax.get_legend_handles_labels() handles = [(x, matplotlib.patches.Patch(color=x.get_color(), alpha=0.25, linewidth=0)) for x in handles] labels = disp_freq _ = ax.legend(handles=handles, labels=labels, frameon=False) _ = ax.set_ylabel('ALSFRS-R Total') _ = ax.set_xlabel('Time since Symptom Onset (Years)') c9_freq[c9_freq['clust_size']>clust_size_thresh] ###Output _____no_output_____
nlp/doc2vec-sample.ipynb
###Markdown IMDB Movies Dataset Analysis using Doc2Vec Preprocessing Step ###Code import nltk from nltk.corpus import stopwords from nltk.stem import WordNetLemmatizer from nltk import word_tokenize import string wordnet_lemmatizer = WordNetLemmatizer() stopwords = nltk.corpus.stopwords.words('english') def readSentenceAndPreprocess(sentence): # Remove stopwords record = unicode(sentence.lower(), "utf-8") record = word_tokenize(record) record = [wordnet_lemmatizer.lemmatize(x) for x in record if x not in string.punctuation] sentence = [w for w in record if w not in stopwords] return sentence ###Output _____no_output_____ ###Markdown Loading the model ###Code from gensim import models model = models.Doc2Vec.load('imdb-doc2vec.model') ###Output _____no_output_____ ###Markdown Keyword-Test Paragraph Similarity Matrix ###Code from gensim import models import pandas import numpy as np # get top 10 similar words to keyword from model keywords = [["lion","king"], ["batman"],["pokemon"],["matrix"],["darth","vader"]] print "Top 10 Similar words:" for keyword in keywords: print "\n" + '_'.join(keyword) print pandas.DataFrame(model.wv.most_similar(positive=keyword, topn=10), columns=['Word','Score']) ###Output Top 10 Similar words: lion_king Word Score 0 rafiki 0.330048 1 simba 0.314177 2 matata 0.284346 3 pumbaa 0.282482 4 sabella 0.282138 5 timon 0.267771 6 1½ 0.266465 7 1/2\ 0.248916 8 hakuna 0.236737 9 marge 0.235552 batman Word Score 0 batgirl 0.354142 1 penguin 0.295907 2 btas 0.272044 3 batwoman 0.271066 4 wb 0.268728 5 o'hearn 0.258377 6 gotham 0.246168 7 joker 0.243499 8 nightwing 0.242217 9 bartram 0.232965 pokemon Word Score 0 celebi 0.475934 1 suicune 0.431710 2 4ever 0.345238 3 things\ 0.265295 4 pikachu 0.231380 5 vol 0.227041 6 miramax 0.223460 7 misty 0.217513 8 brock 0.216896 9 lugia 0.216481 matrix Word Score 0 reloaded 0.282041 1 morpheus 0.237963 2 simulation 0.213727 3 kaiser 0.192410 4 iconography 0.192259 5 j.j. 0.184706 6 \special\ 0.182963 7 trickery 0.182767 8 truth\ 0.182744 9 renaissance 0.182124 darth_vader Word Score 0 leia 0.487074 1 sith 0.454924 2 palpatine 0.446418 3 yoda 0.440748 4 endor 0.413773 5 lando 0.410392 6 tatooine 0.405396 7 jedi 0.372744 8 chewbacca 0.361869 9 skywalker 0.355867
.ipynb_checkpoints/FDA_Assignment-checkpoint.ipynb
###Markdown Fundamentals of Data Analysis Assignment Semester 2, October 2018, Eimear Butler This assigmnent focuses on the four data sets known as the Anscombe Quartet. I have been assigned questions in relation to the data set and have been asked to detail my response in this Jupyter Notebook. The four questions posed are as follows; 1. Explain the background to the dataset – who created it, when it was created, and any speculation you can find regarding how it might have been created. The Anscombe Quartet Data was published in 19731 bu Francis Anscombe, an English statistician. Anscombe opens his article with a line that both sets the scene and summarises the objective of his research and article;> “ Graphs are essential to good statistical analysis.” He goes further during his introduction to challenge the thinking of the time that numbers are precise while graphs were percieved as “rough” and states that;>”A computer should make *both* calculations *and* graphs. Both sorts of output should be studied; each will contribute to understanding.”>“Graphs can have various purposes such as: i) to help us perceive and appreciate some broad features of the dataii) to let us look behind those broad features and see what else is there.”The use of computers for creating graphs not as common as it is today and anscombe aimed to use the four data sets he created as evidence that this is an essential part of investigating all data. >“Most kinds of statistical calculation rest on assumptions about the behavior of the data. Those assumptions may be false, and then the calculations may be misleading. We ought always to try to check whether the assumptions are reasonably correct; and if they are wrong we ought to be able to perceive in what ways they are wrong. Graphs are very valuable for these purposes.” Anscombe proceeds to outline the four dats sets which ultimately prove his point1. Notice how Anscombe even presents the data set highlighting how the *x* values are exactly the same for the first 3 data sets and the fourth contains ten values of 8 with only one value of 19. The *y* values then vary, and no obvious pattern is seen between them. Anscombe goes on to explain the significance of the four data sets, that when analyzing the values only, they have very similar characteristics. However, when the values are then plotted in simple scatter plot, they are seen to be very different. In this assignment, I intend to test Anscombe’s data to see if I can reproduce his results within this Jupyter Notebook. Regarding the way in which Anscombe created these data sets, this remaines a mystery, however, today there are other examples available to us demonstrating how different data sets produce very similar descriptive statistics but completely different graphs. The Datasaurus Dozen2 is a fantastic demonstration of just how different data sets can be while maintaining the same key statistically descriptive values. The authors produced a video and gif file2 as part of their publication to bring it to life, shown here: As you will have spotted, this dataset even uses Alberto Cairo's Datasaurus3 as well as other somewhat symmetrical shapes while keeping the descritive statistics the same to 2 decimal points. Cairo's Datasaurus is now used widely and is available as a csv file through his blog post3 to recreate as I have done below. ###Code import pandas as pd #import pandas functionality import numpy as np #import numpy functionality import matplotlib.pyplot as plt #import matplotlib functionalities d = pd.read_csv('https://raw.githubusercontent.com/eimearbutler7/FundamentalsDA/master/z_Datasaurus_data.csv') plt.plot(d.loc[:,'x'],d.loc[:,'y'],'o',color='black') # create graph using all x and y data values. Represent them with black circles plt.show() # show the plot ###Output _____no_output_____ ###Markdown Justin Matejka and George Fitzmaurice started with the descriptive statistics* of the Datasaurus and then created a technique where they could feed in a target shape to their program and test the repeated movement of data points towards the new shape to see if they still met the overall descriptive statistice to within two decimal points4. This required running the process a number of times and within his blog post, Matejka quotes the movement towards the circle shape needing 200,000 small movements4. \* *means, standard deviations, and correlations*They then used a system of simulated annealing to optomise the process and reduce the number of points needing to be tested. Simulated aneeling works to optomise the combinatorial methods and is described as: >"...unlikely to find the optimum solution, [however,] it can often find a very good solution, even in the presence of noisy data.5."An example of this technique in action is the *Travelling Salesman* problem where the salesman knows he needs to visit numerous locations scattered across an area but needs to find the shortest route to visit them all5. This challenge is well demonstrated in the following gif6 where the different combinations for all 125 points are processes to find the optimal route. 2. Plot the interesting aspects of the dataset. ###Code #Load the .csv dataset from my git hub repository using pandas df = pd.read_csv('https://raw.githubusercontent.com/eimearbutler7/FundamentalsDA/master/z_Anscombes.csv') df #view the data import seaborn as sns #import seaborn functionalities sns.pairplot(df, hue='dataset') #seaborn's pairplot function allows us a quick overview of the data separating each data set by colour ###Output _____no_output_____ ###Markdown Instantly using the seaborn `.pairplot` function the significant differences between the data sets can be seen. Lets go further and pull out each data set and analyse individually to see their shape: ###Code #create subsets of each data set 'x' yalues x = df.loc[:,'x'] x1 = df.loc[:10,'x'] x2 = df.loc[11:21,'x'] x3 = df.loc[22:32,'x'] x4 = df.loc[33:43,'x'] #can call x1, x2, x3 or x4 here to test the data is correct before proceeding #create subsets of each data set 'y' yalues y = df.loc[:,'y'] y1 = df.loc[:10,'y'] y2 = df.loc[11:21,'y'] y3 = df.loc[22:32,'y'] y4 = df.loc[33:43,'y'] #can call y1, y2, y3 or y4 here to test the data is correct before proceeding ###Output _____no_output_____ ###Markdown Using the numpy `.ployfit` function, I can also determine the *m* and *c* values for each plot where *m* is the slope of the line and *c* the value of *y* where the line crosses the *x* axis (i.e. *x* is 0). This will allow me to add a "best fit" line to the graph. ###Code (m1,c1) = np.polyfit(x1,y1,1) #use ployfit function to determine least squares polynomial line fit, where 1 is the Degree of the fitting the polynomial (m2,c2) = np.polyfit(x2,y2,1) #repeat for x2,y2 (m3,c3) = np.polyfit(x3,y3,1) #repeat for x3,y3 (m4,c4) = np.polyfit(x4,y4,1) #repeat for x4,y4 #where the resuting m and c are values in the equation of a straight line (y=mx+c) out = [(m1,c1),(m2,c2),(m3,c3),(m4,c4)] out # I can now plot the data with the best fit line using the method provided by Ian McLoughlan (https://raw.githubusercontent.com/ianmcloughlin/jupyter-teaching-notebooks/master/simple-linear-regression.ipynb) plt.plot(x1, y1, 'k.', label='Original data') plt.plot(x1, m1 * x1 + c1, 'b-', label='Best fit line') #using m and c values determined above plt.xlim([0,20]) #set graph limits for x axis plt.ylim([0,20]) #set graph limits for y axis plt.xlabel('x1') #set name for x axis plt.ylabel('y1') #set name for y axis plt.legend() # add a legend plt.show() #show the plot inline # I can repoeat for x2,y2 plt.plot(x2, y2, 'k.', label='Original data') plt.plot(x2, m2 * x2 + c2, 'b-', label='Best fit line') plt.xlim([0,20]) #set graph limits for x axis plt.ylim([0,20]) #set graph limits for y axis plt.xlabel('x2') #set name for x axis plt.ylabel('y2') #set name for y axis plt.legend() # add a legend plt.show() #show the plot inline # I can repoeat for x3,y3 plt.plot(x3, y3, 'k.', label='Original data') plt.plot(x3, m3 * x3 + c3, 'b-', label='Best fit line') plt.xlim([0,20]) #set graph limits for x axis plt.ylim([0,20]) #set graph limits for y axis plt.xlabel('x3') #set name for x axis plt.ylabel('y3') #set name for y axis plt.legend() # add a legend plt.show() #show the plot inline # I can repeat for x4,y4 plt.plot(x4, y4, 'k.', label='Original data') plt.plot(x4, m4 * x4 + c4, 'b-', label='Best fit line') plt.xlim([0,20]) #set graph limits for x axis plt.ylim([0,20]) #set graph limits for y axis plt.xlabel('x4') #set name for x axis plt.ylabel('y4') #set name for y axis plt.legend() # add a legend plt.show() #show the plot inline ###Output _____no_output_____ ###Markdown Clearly all 4 graphs are very different even though they accept an almost identical "best fit" line. 3. Calculate the descriptive statistics of the variables in the dataset Instantly using the `.describe` function we confirm some of Anscombe's descriptive statistics table. ###Code df.describe() #also is useful to give an instant overview of the high level descriptive statistics ###Output _____no_output_____ ###Markdown Lets go further and pull out each data set and analyse individually to see if I can confirm his descriptive statistics findings. Please note, where possible, results for all 4 datasets are presented in one list to allow easy comparison instead of being run in individual cells. - Number of observations (*n*) = 4 sets of 11 values- Mean of the x’s ($\bar{x}$) = 9.0 (also confirmed below for all 4 sets separately) ###Code x_mean = [np.mean(x1),np.mean(x2), np.mean(x3), np.mean(x4)] #show list of mean x values for each data set print('Mean of X values is:', x_mean) y_mean = [np.mean(y1),np.mean(y2), np.mean(y3), np.mean(y4)] #show list of mean y vaues for each data set print('Mean of Y values is:', y_mean) ###Output Mean of Y values is: [7.500909090909093, 7.500909090909091, 7.500000000000001, 7.50090909090909] ###Markdown The sample variance of the data is also consistent although Anscombe does not include this in his published table. ###Code x_variance = [np.var(x1,ddof=1), np.var(x2,ddof=1), np.var(x3,ddof=1), np.var(x4,ddof=1)] #show list of x variance vaues for each data set where ddof=1 was added using advice from https://stackoverflow.com/questions/41204400/what-is-the-difference-between-numpy-var-and-statistics-variance-in-python print('Variance for X values are:', x_variance) y_variance = [np.var(y1,ddof=1), np.var(y2,ddof=1), np.var(y3,ddof=1), np.var(y4,ddof=1)] #show list of x variance vaues for each data set where ddof=1 was added using advice from https://stackoverflow.com/questions/41204400/what-is-the-difference-between-numpy-var-and-statistics-variance-in-python print('Variance for Y values are:', y_variance) ###Output Variance for Y values are: [4.127269090909091, 4.127629090909091, 4.12262, 4.12324909090909] ###Markdown Anscombe has instead represented variance using the R2 coefficient which I can also determine for each data set using the `stats.linregress` function: ###Code from scipy import stats #import stats function within scipy slope_1, intercept_1, r_value_1, p_value_1, std_err_1 = stats.linregress(x1, y1) #use example from scipy manual to find r value (https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.linregress.html) slope_2, intercept_2, r_value_2, p_value_2, std_err_2 = stats.linregress(x2, y2) #repeat for other data sets slope_3, intercept_3, r_value_3, p_value_3, std_err_3 = stats.linregress(x3, y3) #repeat for other data sets slope_4, intercept_4, r_value_4, p_value_4, std_err_4 = stats.linregress(x4, y4) #repeat for other data sets print("R Squared Value for all 4 sets", r_value_1**2, r_value_2**2, r_value_3**2, r_value_4**2) ###Output R Squared Value for all 4 sets 0.666542459508775 0.6662420337274844 0.6663240410665591 0.6667072568984652 ###Markdown Again we see consistant results with R2 results to 3 decimal places and with the Standard Deviation: ###Code x_SD = [np.std(x1), np.std(x2), np.std(x3), np.std(x4)] #show list of standard distribution for x values in each data set print('Standard Deviation of X Values are:', x_SD) y_SD = [np.std(y1), np.std(y2), np.std(y3), np.std(y4)] #show list of standard distribution for y values in each data set print('Standard Deviation of Y Values are:', y_SD) ###Output Standard Deviation of Y Values are: [1.937024215108669, 1.93710869148962, 1.9359329439927313, 1.9360806451340837] ###Markdown I can also now calculate the Sum of Squares value using the calculation Anscombe gave us himself in his overview table (x - $\bar{x}$): ###Code #Anscombe himself gives us the x_sum_of_squares = [np.sum((x1**2)-(np.mean(x1)**2)), np.sum((x2**2)-(np.mean(x2)**2)), np.sum((x3**2)-(np.mean(x3)**2)), np.sum((x4**2)-(np.mean(x4)**2))] print('X value Sum of Squares:', x_sum_of_squares) #results printed in a list to allow easy comparison ###Output X value Sum of Squares: [110.0, 110.0, 110.0, 110.0] ###Markdown Residual Sum of Squares and Regression Sum of Squares Attempts **Expected Result: Residual sum of squares of y = 13.75 (9 d.f.), Regression sum of squares = 27.50 (1 d.f.)** In order to calculate these values, first I must refer to the equations used to obtain them. The following diagram9 has been very useful to understand them. **NOTE:** Explained Sum of Squares (ESS) is also known as Regression Sum of Squares8 I aim to find Total Sum of Squares (TSS), Residual Sum of Squares (RSS) and Explained Sum of Squares (ESS) below and then test if TSS = RSS + ESS7.First I create a list of "predicted" 'y' values (i.e. what the best fit line 'y' value) at the same 'x' values to try and compare the 2 and therefore establish the Residual Sum of Squares (RSS). ###Code y1_best = m1*(x1) + c1 #I already know m, x and c so I can use the equation of the line to find y y2_best = m2*(x2) + c2 y3_best = m3*(x3) + c3 y4_best = m4*(x4) + c4 y1_best #this works and gives me the y values for the best fit line plt.plot(x1,y1_best,color='black') #lets plot it and check plt.plot(x2,y2_best,color='red') plt.plot(x3,y3_best,color='yellow') plt.plot(x4,y4_best,color='green') plt.xlim([0,20]) plt.ylim([0,20]) plt.show() # we an see that all the lines are consistent and therefore this are the predicted y values. RSS1 = sum((y1-y1_best)**2) #Residual Sum of Squares equation from diagram (https://i.stack.imgur.com/FOzPq.png) RSS2 = sum((y2-y2_best)**2) RSS3 = sum((y3-y3_best)**2) RSS4 = sum((y4-y4_best)**2) RSS_list = [RSS1, RSS2, RSS3, RSS4] print('RSS of each data set is:', RSS_list) ###Output RSS of each data set is: [13.762689999999996, 13.776290909090909, 13.756191818181817, 13.742490000000005] ###Markdown This result is fairly consistent across the 4 data sets and consistent with Anscombes results of 13.75. I cannot find evidence that the "cost" function within the Machine Learning environment, which also determines how close the values are to the best fit line, is the exact same value as the RSS. However, they are very similar and in calculating the cost for the first data set, I get a result that is only 0.01 away from Anscombe's overall result (13.75) but the same to decinal places for the first dataset specifically (13.76268). ###Code cost = lambda m1,c1: np.sum([(y1[i] - m1 * x1[i] - c1)**2 for i in range(x1.size)]) #using Ian McLoughlan's formula (https://github.com/ianmcloughlin/jupyter-teaching-notebooks/raw/master/simple-linear-regression.ipynb) print("Cost of first dataset is: %8.2f" % (cost(m1, c1))) ###Output Cost of first dataset is: 13.76 ###Markdown To confirm the statement in the diagram that TSS = RSS + ESS I calculate TSS and ESS and test the equation ###Code TSS1 = sum((y1-np.mean(y1))**2) #equation for Total sum of squares (https://i.stack.imgur.com/FOzPq.png) TSS2 = sum((y2-np.mean(y2))**2) #repeat for other datasets TSS3 = sum((y3-np.mean(y3))**2) #repeat for other datasets TSS4 = sum((y4-np.mean(y4))**2) #repeat for other datasets TSS_list = [TSS1, TSS2, TSS3, TSS4] print('TSS of each data set is:', TSS_list) ESS1 = sum((y1_best-np.mean(y1))**2) #Explained Sum of Squares ESS2 = sum((y2_best-np.mean(y2))**2) ESS3 = sum((y3_best-np.mean(y3))**2) ESS4 = sum((y4_best-np.mean(y4))**2) ESS_list = [ESS1, ESS2, ESS3, ESS4] print('ESS of each data set is:', ESS_list) Equation_success = [RSS1 + ESS1 == TSS1, RSS2 + ESS2 == TSS2, RSS3 + ESS3 == TSS3, RSS4 + ESS4 == TSS4] #tested to see if the results were "adding up" - they are!! Equation_success ###Output _____no_output_____ ###Markdown Although data sets 3 and 4 are False, they give the same overall value to at least 10 decimal places. ###Code RSS3 + ESS3 #this is correct to at least 10 decimal places RSS4 + ESS4 #this is correct to at least 10 decimal places ###Output _____no_output_____ ###Markdown Summary Going back to Anscombe's original table, I have now seen the following to be true (to at least 2 decimal places): | **Paramater** | **Anscombe's Result** | **Reproduced Here** |--- | --- | --- ||Number of observations (*n*)| 11 | Yes| Mean of the *x*’s ($\bar{x}$) | 9.0 | Yes| Mean of the *y*’s ($\bar{y}$) | 7.5 | Yes| Regression coefficient(*b*1) of *y* on *x* | 0.5 | Yes| Equation of regression line *y* | 3 + 0.5 *x* | Yes| Sum of squares of *x* - ($\bar{x}$) | 110.0 | Yes| Multiple *R*2 | 0.667 | Yes| Residual sum of squares of *y* | 13.75 (9 d.f.) | Yes| Regression sum of squares | 27.50 (1 d.f.) | YesNo conclusive confirmation could be made of the following parameter however, my work to try is in the back-up section of the notebook: - Estimated standard error of *b*1 - 0.118 4. Explain why the dataset is interesting, referring to the plots and statistics above. In summary, these four sets of data are fascinating as they produce almost identical descriptive statistics and even have portions of their values that are the very same (i.e. sets 1, 2 and 3 all have the same *x* values). Yet once plotted, they each create very different shaped curves.Franscis Anscombe set out to prove the worth of plotting data *as well as* analyzing it at the numerical level. I think he made a very strong statement as these data sets and his message continues to be analysed and discussed at length today. As discussed in section 1, the research by Matejka and Fitzmaurice, only published in 2017, has ensured this subject will continue to be discussed for some time to come. This concept and warning continues to be more and more relevant as data set size increases and data becomes even more vital in making decisions both in business and the wider society. References 1. F. J. Anscombe (1973). Graphs in Statistical Analysis. The American Statistician, 27(1):17-21.2. Same Stats, Different Graphs: Generating Datasets with Varied Appearance and Identical Statistics through Simulated Annealing (The Datasaurus Dozen), J.Matejka and G.Fitzmaurice 2017 https://dl.acm.org/citation.cfm?doid=3025453.30259123. The original Datasaurus, Alberto Cairo, 2016 http://www.thefunctionalart.com/2016/08/download-datasaurus-never-trust-summary.html4. Supplimentary blogpost on the The Datasaurus Dozen by J.Matejka 2017 https://www.autodeskresearch.com/publications/samestats5. http://mathworld.wolfram.com/SimulatedAnnealing.html6. https://upload.wikimedia.org/wikipedia/commons/1/10/Travelling_salesman_problem_solved_with_simulated_annealing.gif7. https://en.wikipedia.org/wiki/Residual_sum_of_squares8. https://en.wikipedia.org/wiki/Explained_sum_of_squares9. https://stats.stackexchange.com/questions/265869/confused-with-residual-sum-of-squares-and-total-sum-of-squares Back Up Estimated Standard Error Attempt **Expected result: Estimated standard error of b1 - 0.118**Standard Error of the *Mean* (SEM) = standard deviation of the sample mean divided by the square root of n (the sample size).https://www.statsdirect.com/help/basic_descriptive_statistics/standard_deviation.html ###Code SEM = (np.std(x1)/np.sqrt(11)) SEM SEx = s / sqrt( n ) ###Output _____no_output_____
PythonIntroduction/part_1.ipynb
###Markdown Introduction to Python ProgrammingTobias Micklitz & Carsten Hensel ![](Slides/p1.png) ![](Slides/p3.png) ![](Slides/p4.png) ![](Slides/p6.png) Python program files * Python code is usually stored in text files with the file ending "`.py`": myprogram.py* Every line in a Python program file is assumed to be a Python statement, or part thereof. * The only exceptions are comment lines, which start with the character `` (optionally preceded by an arbitrary number of white-space characters, i.e., tabs or spaces). Comment lines are usually ignored by the Python interpreter. * To run our Python program from the command line we use: $ python myprogram.py* On UNIX systems it is common to define the path to the interpreter on the first line of the program: !/usr/bin/env python If we do, and if we additionally set the file script to be executable, we can run the program like this: $ myprogram.py Jupyter notebooks (IPython) * This file - a Jupyter notebook - does not follow the standard pattern with Python code in a text file. * Instead, an Jupyter notebook is stored as a file in the [JSON](http://en.wikipedia.org/wiki/JSON) format. * The advantage is that we can mix formatted text, Python code and code output. * It requires the Jupyter notebook server to run it though, and therefore isn't a stand-alone Python program as described above. * Other than that, there is no difference between the Python code that goes into a program file or an Jupyter notebook. ![](Slides/p7.png) ![](Slides/p8.png) Modules * Most of the functionality in Python is provided by *modules*. * The **Python Standard Library** is a large collection of modules that provides *cross-platform* implementations of common facilities such as access to the operating system, file I/O, string management, network communication, and much more. References * The Python Language Reference: http://docs.python.org/3/reference/index.html * The Python Standard Library: http://docs.python.org/3/library/ * To use a module in a Python program it first has to be imported. * A module can be imported using the `import` statement. How to use a Module? ###Code # how to use the math module import math ###Output _____no_output_____ ###Markdown This includes the whole module and makes it available for use later in the program. For example, we can do: ###Code import math x = math.cos(2 * math.pi) # note the 'math' prefix telling the system # where cos and pi are 'located' print(x) ###Output 1.0 ###Markdown Alternatively, we can chose to import all symbols (functions and variables) in a module to the current namespace (so that we don't need to use the prefix "`math.`" every time we use something from the `math` module: ###Code from math import * # instead of import math x = cos(2 * pi) # no math prefix needed print(x) ###Output 1.0 ###Markdown * This pattern can be very convenient. * But in large programs that include many modules it is often a good idea to keep the symbols from each module in their own **namespaces**. * This would elminate potentially confusing problems with name space collisions. As a third alternative, we can chose to import only a few selected symbols from a module by explicitly listing which ones we want to import instead of using the wildcard character `*`: ###Code from math import cos, pi x = cos(2 * pi) print(x) ###Output 1.0 ###Markdown Looking at what a module contains, and its documentation Once a module is imported, we can list the symbols it provides using the `dir` function: ###Code import math print(dir(math)) ###Output ['__doc__', '__file__', '__loader__', '__name__', '__package__', '__spec__', 'acos', 'acosh', 'asin', 'asinh', 'atan', 'atan2', 'atanh', 'ceil', 'copysign', 'cos', 'cosh', 'degrees', 'e', 'erf', 'erfc', 'exp', 'expm1', 'fabs', 'factorial', 'floor', 'fmod', 'frexp', 'fsum', 'gamma', 'gcd', 'hypot', 'inf', 'isclose', 'isfinite', 'isinf', 'isnan', 'ldexp', 'lgamma', 'log', 'log10', 'log1p', 'log2', 'modf', 'nan', 'pi', 'pow', 'radians', 'remainder', 'sin', 'sinh', 'sqrt', 'tan', 'tanh', 'tau', 'trunc'] ###Markdown And using the function `help` we can get a description of each function (almost .. not all functions have docstrings, as they are technically called, but the vast majority of functions are documented this way). ###Code help(math.log) log(10) log(10, 2) ###Output _____no_output_____ ###Markdown We can also use the `help` function directly on modules: Try help(math) Some very useful modules form the Python standard library are `os`, `sys`, `math`, `shutil`, `re`, `subprocess`, `multiprocessing`, `threading`. A complete lists of standard modules for Python 2 and Python 3 are available at http://docs.python.org/2/library/ and http://docs.python.org/3/library/, respectively. Easter Egg ###Code import this ###Output The Zen of Python, by Tim Peters Beautiful is better than ugly. Explicit is better than implicit. Simple is better than complex. Complex is better than complicated. Flat is better than nested. Sparse is better than dense. Readability counts. Special cases aren't special enough to break the rules. Although practicality beats purity. Errors should never pass silently. Unless explicitly silenced. In the face of ambiguity, refuse the temptation to guess. There should be one-- and preferably only one --obvious way to do it. Although that way may not be obvious at first unless you're Dutch. Now is better than never. Although never is often better than *right* now. If the implementation is hard to explain, it's a bad idea. If the implementation is easy to explain, it may be a good idea. Namespaces are one honking great idea -- let's do more of those! ###Markdown Variables and types Symbol names * Variable names in Python can contain alphanumerical characters `a-z`, `A-Z`, `0-9` and some special characters such as `_`. * Normal variable names must start with a letter. * By convention, variable names start with a lower-case letter, and **Class** names start with a capital letter. * In addition, there are a number of Python keywords that cannot be used as variable names: `and, as, assert, break, class, continue, def, del, elif, else, except, exec, finally, for, from, global, if, import, in, is, lambda, not, or, pass, print, raise, return, try, while, with, yield` * Note: Be aware of the keyword `lambda`, which could easily be a natural variable name in a scientific program. But being a keyword, it cannot be used as a variable name. Assignment * The assignment operator in Python is `=`.* Python is a dynamically typed language, so we do not need to specify the type of a variable when we create one.* Assigning a value to a new variable creates the variable: ###Code # variable assignments x = 1.0 my_variable = 12.2 ###Output _____no_output_____ ###Markdown Although not explicitly specified, a variable does have a type associated with it. The type is derived from the value that was assigned to it. ###Code type(x) ###Output _____no_output_____ ###Markdown If we assign a new value to a variable, its type can change. ###Code x = 1 type(x) ###Output _____no_output_____ ###Markdown If we try to use a variable that has not yet been defined we get an `NameError`: ###Code print(y) ###Output _____no_output_____ ###Markdown Fundamental types ###Code # integers x = 2 type(x) # In Python 2 there was also a 'long integer' available w = 5l type(w) # float x = 2.0 type(x) # boolean b1 = True b2 = False type(b1) # complex numbers: note the use of `j` to specify the imaginary part x = 1.0 - 1.0j type(x) print(x) print(x.real, x.imag) ###Output 1.0 -1.0 ###Markdown Type utility functions The module `types` contains a number of type name definitions that can be used to test if variables are of certain types: ###Code import types # print all types defined in the `types` module print(dir(types)) x = 1.0 # check if the variable x is a float type(x) is float # check if the variable x is an int type(x) is int ###Output _____no_output_____ ###Markdown We can also use the `isinstance` method for testing types of variables: ###Code isinstance(x, float) ###Output _____no_output_____ ###Markdown Type casting ###Code x = 1.5 print(x, type(x)) x = int(x) print(x, type(x)) z = complex(x) print(z, type(z)) x = float(z) ###Output _____no_output_____ ###Markdown Complex variables cannot be cast to floats or integers. We need to use `z.real` or `z.imag` to extract the part of the complex number we want: ###Code print(z) y = float(z.real) print(z.real, " -> ", y, type(y)) y = bool(z.imag) print(z.imag, " -> ", y, type(y)) ###Output _____no_output_____ ###Markdown Operators and comparisons Most operators and comparisons in Python work as one would expect:* Arithmetic operators `+`, `-`, `*`, `/`, `//` (integer division), '**' power ###Code 1 + 2, 1 - 2, 1 * 2, 1 / 2 1.0 + 2.0, 1.0 - 2.0, 1.0 * 2.0, 1.0 / 2.0 # Integer division of float numbers 3.0 // 2.0 # Note! The power operators in python isn't ^, but ** 2 ** 10 ###Output _____no_output_____ ###Markdown * Note: The `/` operator always performs a floating point division in Python 3.x.This is not true in Python 2.x, where the result of `/` is always an integer if the operands are integers.* To be more specific, `1/2 = 0.5` (`float`) in Python 3.x, and `1/2 = 0` (`int`) in Python 2.x (but `1.0/2 = 0.5` in Python 2.x). Note: The boolean operators are spelled out as the words `and`, `not`, `or`. ###Code True and False not False True or False ###Output _____no_output_____ ###Markdown * Comparison operators `>`, `=` (greater or equal), `<=` (less or equal), `==` equality, `is` identical. ###Code 2 > 1, 2 < 1 2 > 2, 2 < 2 2 >= 2, 2 <= 2 # equality [1,2] == [1,2] # objects identical? a1 = a2 = [1,2] print(a1 is a2) b1 = [1, 2] b2 = [1, 2] # b2 = b1 print(b1, b2) print(b1 == b2) print(b1 is b2) ###Output [1, 2] [1, 2] True False ###Markdown Compound types: Strings, List and dictionaries Strings Strings are the variable type that is used for storing text messages. ###Code s = "Hello world" type(s) # length of the string: the number of characters len(s) # replace a substring in a string with something else s2 = s.replace("world", "test") print(s2) print(s) ###Output Hello test Hello world ###Markdown We can index a character in a string using `[]`: ###Code s[0] ###Output _____no_output_____ ###Markdown **Heads up MATLAB users:** Indexing start at 0!We can extract a part of a string using the syntax `[start:stop]`, which extracts characters between index `start` and `stop` -1 (the character at index `stop` is not included): ###Code s[0:5] s[4:5] ###Output _____no_output_____ ###Markdown If we omit either (or both) of `start` or `stop` from `[start:stop]`, the default is the beginning and the end of the string, respectively: ###Code s[:5] s[6:] s[:] ###Output _____no_output_____ ###Markdown We can also define the step size using the syntax `[start:end:step]` (the default value for `step` is 1, as we saw above): ###Code s[::1] s[::2] ###Output _____no_output_____ ###Markdown This technique is called *slicing*. Read more about the syntax here: http://docs.python.org/release/2.7.3/library/functions.html?highlight=sliceslice Python has a very rich set of functions for text processing. See for example http://docs.python.org/2/library/string.html for more information. String formatting examples ###Code print("str1", "str2", "str3") # The print statement concatenates # strings with a space print("str1", 1.0, False, -1j) # The print statements converts all # arguments to strings print("str1" + "str2" + "str3") # strings added with + # are concatenated without space print("value = %f" % 1.0) # we can use C-style string formatting # this formatting creates a string s2 = "value1 = %.2f. value2 = %d" % (3.1415, 1.5) print(s2) # alternative, more intuitive way of formatting a string s3 = 'value1 = {0}, value2 = {1}'.format(3.1415, 1.5) print(s3) ###Output _____no_output_____ ###Markdown Lists Lists are very similar to strings, except that each element can be of any type.The syntax for creating lists in Python is `[...]`: ###Code l = [1,2,3,4] print(type(l)) print(l) ###Output <class 'list'> [1, 2, 3, 4] ###Markdown We can use the same slicing techniques to manipulate lists as we could use on strings: ###Code print(l) print(l[1:3]) print(l[::2]) ###Output [1, 2, 3, 4] [2, 3] [1, 3] ###Markdown **Heads up MATLAB users:** Indexing starts at 0! ###Code l[0] ###Output _____no_output_____ ###Markdown Elements in a list do not all have to be of the same type: ###Code l = [1, 'a', 1.0, 1-1j] print(l) print(type(l)) ###Output [1, 'a', 1.0, (1-1j)] <class 'list'> ###Markdown Python lists can be inhomogeneous and arbitrarily nested: ###Code nested_list = [1, [2, [3, [4, [5]]]]] nested_list ###Output _____no_output_____ ###Markdown * Lists play a very important role in Python. * For example they are used in loops and other flow control structures (discussed below). * There are a number of convenient functions for generating lists of various types, for example the `range` function: ###Code start = 10 stop = 30 step = 2 a = list(range(start, stop, step)) print(a) # in python 3 range generates an interator, which can be converted to a list using 'list(...)'. # It has no effect in python 2 list(range(start, stop, step)) list(range(-10, 10)) s = "Python aula 4" # convert a string to a list by type casting: s2 = list(s) s2 # sorting lists s2.sort() print(s2) ###Output [' ', ' ', '4', 'P', 'a', 'a', 'h', 'l', 'n', 'o', 't', 'u', 'y'] ###Markdown Adding, inserting, modifying, and removing elements from lists ###Code # create a new empty list l = [] # add an elements using `append` l.append("A") l.append("d") l.append("d") print(l) ###Output ['A', 'd', 'd'] ###Markdown We can modify lists by assigning new values to elements in the list. In technical jargon, lists are *mutable*. ###Code l[1] = "p" l[2] = "p" print(l) l[1:3] = ["d", "d"] print(l) ###Output ['A', 'd', 'd'] ###Markdown Insert an element at an specific index using `insert` ###Code l.insert(0, "i") l.insert(1, "n") l.insert(2, "s") l.insert(3, "e") l.insert(4, "r") l.insert(5, "t") print(l) ###Output _____no_output_____ ###Markdown Remove first element with specific value using 'remove' ###Code l.remove("A") print(l) ###Output _____no_output_____ ###Markdown Remove an element at a specific location using `del`: ###Code del l[7] del l[6] print(l) ###Output _____no_output_____ ###Markdown See `help(list)` for more details, or read the online documentation Tuples * Tuples are like lists, except that they cannot be modified once created, that is they are *immutable*. * In Python, tuples are created using the syntax `(..., ..., ...)`, or even `..., ...`: ###Code point = 10, 20 print(point, type(point)) point[0] = 3 print(point, type(point)) ###Output _____no_output_____ ###Markdown We can **unpack** a tuple by assigning it to a comma-separated list of variables: ###Code x, y = point print("x =", x) print("y =", y) ###Output x = 10 y = 20 ###Markdown If we try to assign a new value to an element in a tuple we get an error: ###Code point[0] = 20 ###Output _____no_output_____ ###Markdown Dictionaries Dictionaries are also like lists, except that each element is a key-value pair. The syntax for dictionaries is `{key1 : value1, ...}`: ###Code params = {"parameter1" : 1.0, "parameter2" : [2.0,5], "parameter3" : 3.0} print(type(params)) print(params) params['parameter1'] params["parameter1"] = "A" params["parameter2"] = "B" # add a new entry params["parameter4"] = "D" print("parameter1 = " + str(params["parameter1"])) print("parameter2 = " + str(params["parameter2"])) print("parameter3 = " + str(params["parameter3"])) print("parameter4 = " + str(params["parameter4"])) ###Output _____no_output_____ ###Markdown Control Flow Conditional statements: if, elif, else The Python syntax for conditional execution of code uses the keywords `if`, `elif` (else if), `else`: ###Code statement1 = False statement2 = False if statement1: print("statement 1 is True") elif statement2: print("statement 2 is True") else: print("statement 1 and statement 2 are False") ###Output statement 1 and statement 2 are False ###Markdown For the first time, here we encounted a peculiar and unusual aspect of the Python programming language: Program blocks are defined by their indentation level. Compare to the equivalent C code: if (statement1) { printf("statement1 is True\n"); } else if (statement2) { printf("statement2 is True\n"); } else { printf("statement1 and statement2 are False\n"); } In C blocks are defined by the enclosing curly brakets `{` and `}`. And the level of indentation (white space before the code statements) does not matter (completely optional). But in Python, the extent of a code block is defined by the indentation level (usually a tab or say four white spaces). This means that we have to be careful to indent our code correctly, or else we will get syntax errors. Examples: ###Code statement1 = statement2 = True if statement1: if statement2: print("both statement1 and statement2 are True") # Bad indentation! if statement1: if statement2: print("both statement1 and statement2 are True") # this line is not # properly indented statement1 = False if statement1: print("printed if statement1 is True") print("still inside the if block") if statement1: print("printed if statement1 is True") print("now outside the if block") ###Output now outside the if block ###Markdown Loops In Python, loops can be programmed in a number of different ways. The most common is the `for` loop, which is used together with iterable objects, such as lists. The basic syntax is: **`for` loops**: ###Code for x in [5,2,3]: print(x) ###Output 5 2 3 ###Markdown * The `for` loop iterates over the elements of the supplied list, and executes the containing block once for each element. * Any kind of list can be used in the `for` loop. For example: ###Code for x in range(4): # by default range start at 0 print(x) ###Output 0 1 2 3 ###Markdown Note: `range(4)` does not include 4 ! ###Code for x in range(-3,3): print(x) for word in ["scientific", "computing", "with", "python"]: print(word) ###Output scientific computing with python ###Markdown To iterate over key-value pairs of a dictionary: ###Code for key, value in params.items(): print(key + " = " + str(value)) ###Output parameter1 = 1.0 parameter2 = [2.0, 5] parameter3 = 3.0 ###Markdown Sometimes it is useful to have access to the indices of the values when iterating over a list. We can use the `enumerate` function for this: ###Code for idx, x in enumerate(range(-3,3)): print(idx, x) ###Output _____no_output_____ ###Markdown List comprehensions: Creating lists using `for` loops: A convenient and compact way to initialize lists: ###Code l1 = [x**2 for x in range(0,5)] print(l1) ###Output [0, 1, 4, 9, 16] ###Markdown Try to avoid code like: ###Code mylist = [] for i in range(0, 5): mylist.append(i**2) print(mylist) ###Output [0, 1, 4, 9, 16] ###Markdown `while` loops: ###Code i = 0 while i < 5: print(i) i = i + 1 print("done") ###Output _____no_output_____ ###Markdown Note that the `print("done")` statement is not part of the `while` loop body because of the difference in indentation. Functions * A function in Python is defined using the keyword `def`, followed by a function name, a signature within parentheses `()`, and a colon `:`. * The following code, with one additional level of indentation, is the function body. ###Code def func0(): print("test") func0() ###Output test ###Markdown * Optionally, but highly recommended, we can define a so called "docstring", which is a description of the functions purpose and behaivor. * The docstring should follow directly after the function definition, before the code in the function body. ###Code def func1(s): """ Print a string 's' and tell how many characters it has """ print(s + " has " + str(len(s)) + " characters") help(func1) func1("test") ###Output test has 4 characters ###Markdown Functions that returns a value use the `return` keyword: ###Code def square(x): """ Return the square of x. """ return x ** 2 square(4) ###Output _____no_output_____ ###Markdown We can return multiple values from a function using tuples (see above): ###Code def powers(x): """ Return a few powers of x. """ return x ** 2, x ** 3, x ** 4 powers(3) x2, x3, x4 = powers(3) print(x3) ###Output 27 ###Markdown Default argument and keyword arguments In a definition of a function, we can give default values to the arguments the function takes: ###Code def myfunc(x, p=2, debug=False): if debug: print("evaluating myfunc for x = " + str(x) + " using exponent p = " + str(p)) return x**p ###Output _____no_output_____ ###Markdown If we don't provide a value of the `debug` argument when calling the the function `myfunc` it defaults to the value provided in the function definition: ###Code myfunc(5) myfunc(5, debug=True) ###Output evaluating myfunc for x = 5 using exponent p = 2 ###Markdown * If we explicitly list the name of the arguments in the function calls, they do not need to come in the same order as in the function definition. * This is called *keyword* arguments, and is often very useful in functions that takes a lot of optional arguments. ###Code myfunc(p=3, debug=True, x=7) ###Output _____no_output_____ ###Markdown Unnamed functions (lambda function) In Python we can also create unnamed functions, using the `lambda` keyword: ###Code f1 = lambda x: x**2 # is equivalent to def f2(x): return x**2 f1(2), f2(2) ###Output _____no_output_____ ###Markdown This technique is useful for example when we want to pass a simple function as an argument to another function, like this: ###Code # map is a built-in python function map(lambda x: x**2, range(-3,4)) # in python 3 we can use `list(...)` to convert the iterator to an explicit list list(map(lambda x: x**2, range(-3,4))) ###Output _____no_output_____ ###Markdown Fun with Functions ###Code def fun1(x): return x ** 1 def fun2(x): return x ** 2 def fun3(x): return x ** 3 def fun4(x): return x ** 4 functions = [fun1, fun2, fun3, fun4] for f in functions: print(f(3)) ###Output 3 9 27 81 ###Markdown Topics not Covered (but worthwhile mentioning) - Exceptions (Error Handling) - Creating `Modules` - Classes (Object Oriented Programming) - ... ![](Slides/p9.png) ![](Slides/p10.png) ![](Slides/p11.png) ![](Slides/p12.png) ![](Slides/p13.png) ![](Slides/p14.png) ![](Slides/p15.png) ![](Slides/p16.png) ![](Slides/p17.png) ![](Slides/p18.png) ![](Slides/p19.png) Further reading * http://www.python.org - The official web page of the Python programming language.* http://www.python.org/dev/peps/pep-0008 - Style guide for Python programming. Highly recommended. * http://www.greenteapress.com/thinkpython/ - A free book on Python programming.* [Python Essential Reference](http://www.amazon.com/Python-Essential-Reference-4th-Edition/dp/0672329786) - A good reference book on Python programming. Additional Information Classes * Classes are the key features of **object-oriented programming**. * A class is a structure for representing an object and the operations that can be performed on the object. * In Python a class can contain *attributes* (variables) and *methods* (functions). A class is defined almost like a function, but using the `class` keyword, and the class definition usually contains a number of class method definitions (a function in a class).* Each class method should have an argument `self` as its first argument. This object is a self-reference.* Some class method names have special meaning, for example: * `__init__`: The name of the method that is invoked when the object is first created. * `__str__` : A method that is invoked when a simple string representation of the class is needed, as for example when printed. * There are many more, see http://docs.python.org/2/reference/datamodel.htmlspecial-method-names ###Code class Point: """ Simple class for representing a point in a Cartesian coordinate system. """ def __init__(self, x, y): """ Create a new Point at x, y. """ self.x = x self.y = y def translate(self, dx, dy): """ Translate the point by dx and dy in the x and y direction. """ self.x += dx self.y += dy def __str__(self): return("Point at [%f, %f]" % (self.x, self.y)) ###Output _____no_output_____ ###Markdown To create a new instance of a class: ###Code p1 = Point(0, 0) # this will invoke the __init__ method in the Point class print p1 # this will invoke the __str__ method ###Output _____no_output_____ ###Markdown To invoke a class method in the class instance `p`: ###Code p2 = Point(1, 1) p1.translate(0.25, 1.5) print(p1) print(p2) ###Output _____no_output_____ ###Markdown * Note that calling class methods can modifiy the state of that particular class instance, but does not effect other class instances or any global variables.* That is one of the nice things about object-oriented design: code such as functions and related variables are grouped in separate and independent entities. Modules * One of the most important concepts in good programming is to reuse code and avoid repetitions.* The idea is to write functions and classes with a well-defined purpose and scope, and reuse these instead of repeating similar code in different part of a program (modular programming). * The result is usually that readability and maintainability of a program is greatly improved.* What this means in practice is that our programs have fewer bugs, are easier to extend and debug/troubleshoot. * Python supports modular programming at different levels. Functions and classes are examples of tools for low-level modular programming. * Python modules are a higher-level modular programming construct, where we can collect related variables, functions and classes in a module. A python module is defined in a python file (with file-ending `.py`), and it can be made accessible to other Python modules and programs using the `import` statement. * Consider the following example: the file `mymodule.py` contains simple example implementations of a variable, function and a class: ###Code %%file mymodule.py """ Example of a python module. Contains a variable called my_variable, a function called my_function, and a class called MyClass. """ my_variable = 0 def my_function(): """ Example function """ return my_variable class MyClass: """ Example class. """ def __init__(self): self.variable = my_variable def set_variable(self, new_value): """ Set self.variable to a new value """ self.variable = new_value def get_variable(self): return self.variable ###Output _____no_output_____ ###Markdown We can import the module `mymodule` into our Python program using `import`: ###Code import mymodule ###Output _____no_output_____ ###Markdown Use `help(module)` to get a summary of what the module provides: ###Code help(mymodule) mymodule.my_variable mymodule.my_function() my_class = mymodule.MyClass() my_class.set_variable(10) my_class.get_variable() ###Output _____no_output_____ ###Markdown If we make changes to the code in `mymodule.py`, we need to reload it using `reload`: ###Code reload(mymodule) # works only in python 2 ###Output _____no_output_____ ###Markdown Exceptions * In Python errors are managed with a special language construct called "Exceptions". * When errors occur exceptions can be raised, which interrupts the normal program flow and fallback to somewhere else in the code where the closest try-except statement is defined. To generate an exception we can use the `raise` statement, which takes an argument that must be an instance of the class `BaseException` or a class derived from it. ###Code raise Exception("description of the error") ###Output _____no_output_____ ###Markdown A typical use of exceptions is to abort functions when some error condition occurs, for example: def my_function(arguments): if not verify(arguments): raise Exception("Invalid arguments") rest of the code goes here To gracefully catch errors that are generated by functions and class methods, or by the Python interpreter itself, use the `try` and `except` statements: try: normal code goes here except: code for error handling goes here this code is not executed unless the code above generated an errorFor example: ###Code try: print("test") # generate an error: the variable test is not defined print test except: print("Caught an exception") ###Output _____no_output_____ ###Markdown To get information about the error, we can access the `Exception` class instance that describes the exception by using for example: except Exception as e: ###Code try: print("test") # generate an error: the variable test is not defined print(test) except Exception as e: print("Caught an exception:" + str(e)) ###Output _____no_output_____
notebooks/tutorial_recurrenceNetwork.ipynb
###Markdown Turorial for creating and manipulating a recurrence network with pyunicorn Analysis of complex networksSo far, analysis of complex networks in different scientific fields, has been performed by the study of the adjacency matrix. *pyunicorn* suggests a new approach by studying complex networks through a time-series based approach: **Recurrent plot (RP)**Aim: bridging complex network theory and recurrence analysisFrom NWarman: In this letter, we demonstrate that the recurrence matrix (analogously to[31]) can be considered as the adjacency matrix of an undirected, unweighted network, allowing us to study time series using a complex network approachComplex network statistics is helpful to characterise the local and global properties of a network. WeWhat is a recurrence network?A state at time i (red dot) is recurrent at another time j (black dot) when the phase space trajectory visits its close neighborhood (gray circle). his is marked by value 1 in the recurrence matrix at (i, j). States outside of this neighborhood (small red circle) are marked with 0 in the recurrence matrix. For Literature review see : https://www.researchgate.net/figure/Basic-concepts-beyond-recurrence-plots-and-the-resulting-recurrence-networks-exemplified_fig1_47557940 . ###Code import numpy as np from .core import Network from .recurrence_plot import RecurrencePlot ###Output _____no_output_____
ClassMaterial/09 - Oracles/09 - code/09.3_WSC_Oracle_update.ipynb
###Markdown A stateless oracle (3): keeping the oracle up to date 09.3 Winter School on Smart Contracts Peter Gruber ([email protected])2022-02-15* Part 3: The transactions that keep the oracle up to date* Parts 1-4 are only relevant if you want to **create** an Oracle* Only parts 5-6 are needed to **use** the oracle.**Note** that these transactions will typically run in regular intervals (every 5 min) on a Linux server, using the `cron` service of Linux. You can, however, run the manually to see what happens. SetupSee notebook 04.1, the lines below will always automatically load functions in `algo_util.py`, the five accounts and the Purestake credentials ###Code # Loading shared code and credentials import sys, os codepath = '..'+os.path.sep+'..'+os.path.sep+'sharedCode' sys.path.append(codepath) from algo_util import * cred = load_credentials() # Load additional oracle accounts cred_oracle = load_credentials('credentials_oracle') Price = cred_oracle['Price'] Reserve = cred_oracle['Reserve'] oracle_id = cred_oracle['oracle_id'] from algosdk import account, mnemonic from algosdk.v2client import algod from algosdk.future import transaction from algosdk.future.transaction import Multisig from algosdk.future.transaction import PaymentTxn, MultisigTransaction from algosdk.future.transaction import AssetConfigTxn, AssetTransferTxn, AssetFreezeTxn from algosdk.future.transaction import LogicSig import algosdk.error import json import base64 import hashlib from pyteal import * # Initialize the algod client (Testnet or Mainnet) algod_client = algod.AlgodClient(algod_token='', algod_address=cred['algod_test'], headers=cred['purestake_token']) import json import requests import pandas as pd import numpy as np import time from pycoingecko import CoinGeckoAPI cg = CoinGeckoAPI() ###Output _____no_output_____ ###Markdown Get information about the oracle coin ###Code print('https://testnet.algoexplorer.io/asset/{}'.format(oracle_id)) ###Output https://testnet.algoexplorer.io/asset/77534697 ###Markdown Transfer coins as a function of price and holdings* This is the code that needs to be deployed on the remote server ###Code # get current price price_info = cg.get_price(ids='algorand', vs_currencies='usd') usdalgo = price_info['algorand']['usd'] print(usdalgo) # get current holdings holdings_Price = asset_holdings(algod_client, Price['public']) oracle_Price = [holding['amount'] for holding in holdings_Price if holding['unit']=='USDALGO'][0] oracle_Price = int(1e6*oracle_Price) holdings_Reserve = asset_holdings(algod_client, Reserve['public']) oracle_Reserve = [holding['amount'] for holding in holdings_Reserve if holding['unit']=='USDALGO'][0] oracle_Reserve = int(1e6*oracle_Reserve) print(usdalgo) print(oracle_Price) print(oracle_Reserve) holdings_oracle = int(usdalgo*1e6) # this is how many coins Price *should* hold # make transfers if holdings_oracle != oracle_Price: # A transaction is needed if holdings_oracle > oracle_Price: # Price does not have enough coins # Reserve needs to transfer to Price amt = int(holdings_oracle-oracle_Price) sender = Reserve receiver = Price else: # Price has too many coins # Price needs to transfer to Reserve amt = int(oracle_Price-holdings_oracle) sender = Price receiver = Reserve # === transfer TXN (must be a multisig!!) === # Step 1: prepare sp = algod_client.suggested_params() txn = AssetTransferTxn( sender = sender['public'], sp=sp, receiver=receiver['public'], amt=amt, index=oracle_id ) # Step 2+3: Sign + send stxn = txn.sign(sender['private']) txid = algod_client.send_transaction(stxn) # Step 4: Wait for confirmation txinfo = wait_for_confirmation(algod_client, txid) ###Output Current round is 20463830. Waiting for round 20463830 to finish. Waiting for round 20463831 to finish. Transaction 4RZB4EM5LSPPJVGAB2DZIPXYZ5Z2V2HGJGMATILDRSBRFSCBYU3Q confirmed in round 20463832.
pumpingtest_benchmarks/slug4_dawsonville.ipynb
###Markdown 4. Slug test for confined aquifer - Dawsonville Example**This test is taken from example of MLU.** Introduction and Conceptual ModelIn this notebook, we reproduce the work of Yang (2020) to check the TTim performance in analysing slug-test. We later compare the solution in TTim with the MLU model (Carlson & Randall, 2012).This Slug Test was reported in Cooper Jr et al. (1967), and it was performed in Dawsonville, Georgia, USA. A fully penetrated well (Ln-2) is screened in a confined aquifer, located between depths 24 and 122 (98 m thick).The volume of the slug is 10.16 litres. Head change has been recorded at the slug well. Both the well and the casing radii of the slug well is 0.076 m.The conceptual model can be seen in the figure below: ###Code import matplotlib.pyplot as plt import numpy as np ##Now printing the conceptual model figure: fig = plt.figure(figsize=(14, 9)) ax = fig.add_subplot(1,1,1) #sky sky = plt.Rectangle((-5,0), width = 15, height = 10, fc = 'b', zorder=0, alpha=0.1) ax.add_patch(sky) #Aquifer: ground = plt.Rectangle((-5,-122), width = 15, height = 98, fc = np.array([209,179,127])/255, zorder=0, alpha=0.9) ax.add_patch(ground) well = plt.Rectangle((-0.5,-(122)), width = 1, height = 122, fc = np.array([200,200,200])/255, zorder=1) ax.add_patch(well) #Confining Unit conf = plt.Rectangle((-5,-24), width = 15, height = 24, fc = np.array([100,100,100])/255, zorder=0, alpha=0.9) ax.add_patch(conf) #Wellhead wellhead = plt.Rectangle((-0.6,0),width = 1.2, height = 4, fc = np.array([200,200,200])/255, zorder=2, ec='k') ax.add_patch(wellhead) #Screen for the well: screen = plt.Rectangle((-0.5,-(122)), width = 1, height = 98, fc = np.array([200,200,200])/255, alpha=1, zorder = 2, ec = "k", ls = '--') screen.set_linewidth(2) ax.add_patch(screen) #pumping_arrow = plt.Arrow(x = 1,y = 1.5, dx = 0, dy = 1, color = "#00035b") #ax.add_patch(pumping_arrow) ax.text(x = 1, y = 2.5, s = r'$ Q = 10.16 L $', fontsize = 'large' ) #last line line = plt.Line2D(xdata= [-200,1200], ydata = [0,0], color = "k") ax.add_line(line) #Water table #wt = plt.Line2D(xdata= [-200,1200], ydata = [0,0], color = "b") #ax.add_line(wt) ax.text(0.6,-35, s = "Ln-2", fontsize = 'large') #ax.text(6.9, -0.5, "Ln-3", fontsize = 'large') ax.set_xlim([-5,10]) ax.set_ylim([-122,10]) ax.set_xlabel('Distance [m]') ax.set_ylabel('Relative height [m]') ax.set_title('Conceptual Model - Dawsonville Example'); ###Output _____no_output_____ ###Markdown Step 1. Load required libraries ###Code from ttim import * import numpy as np import matplotlib.pyplot as plt import pandas as pd ###Output _____no_output_____ ###Markdown Step 2. Set basic parameters ###Code b = 98 #aquifer thickness zt = -24 zb = zt - b rw = 0.076 #well radius of Ln-2 Well rc = 0.076 #casing radius of Ln-2 Well Q = 0.01016 #slug volume in m^3 ###Output _____no_output_____ ###Markdown Step 3. Load dataData for the Dawsonville test is available in a text file, where the first column is the time data, in days and in the second column is the head displacement in meters ###Code data = np.loadtxt('data/dawsonville_slug.txt') t = data[:, 0] h = data[:, 1] ###Output _____no_output_____ ###Markdown Step 4. Create First Model - single layerWe begin with a single layer model built in ModelMaq.Details on setting up the model can be seen in: [Confined 1 - Oude Korendijk](confined1_oude_korendijk.ipynb).The slug well is set accordingly. Details on setting up the ```Well``` object can be seen in: [Slug 1 - Pratt County](slug1_pratt_county.ipynb). ###Code ml = ModelMaq(kaq=10, z=[zt, zb], Saq=1e-4, tmin=1e-6, tmax=1e-3, topboundary='conf') w = Well(ml, xw=0, yw=0, rw=rw, rc=rc, tsandQ=[(0, -Q)], layers=0, wbstype='slug') ml.solve() ###Output self.neq 1 solution complete ###Markdown Step 5. Model calibration both simultaneous wellsThe procedures for calibration can be seen in [Unconfined 1 - Vennebulten](unconfined1_vennebulten.ipynb)We calibrate hydraulic conductivity and specific storage, as in the KGS model (Hyder et al. 1994). ###Code #unknown parameters: kay, Saq ca = Calibrate(ml) ca.set_parameter(name='kaq0', initial=10, pmin=0) ca.set_parameter(name='Saq0', initial=1e-4) ca.series(name='obs', x=0, y=0, layer=0, t=t, h=h) ca.fit(report=True) display(ca.parameters) print('rmse:', ca.rmse()) hm = ml.head(0, 0, t) plt.figure(figsize=(8, 5)) plt.semilogx(t, h, '.', label='obs') plt.semilogx(t, hm[0], label='ttim') plt.xlabel('time [d]') plt.ylabel('displacement [m]') plt.title('Model Results - Single-layer model') plt.legend(); ###Output _____no_output_____ ###Markdown In general, the single-layer model seems to be performing well, with a good visual fit between observations and the model. Step 6. Create Second Model - multi-layer modelTo investigate whether we need to account for the vertical flow component or not, we will create a multi-layer model. Consequently, we divide the previous aquifer into 49 layers (2 m thick each). ###Code nlay = 49 #number of layers zlayers = np.linspace(zt, zb, nlay + 1) #elevation of each layer Saq = 1e-4 * np.ones(nlay) ###Output _____no_output_____ ###Markdown Now we use the ```Model3D``` object to model multi-layer aquifer:Details on how to set it up can be seen in the notebook: [Unconfined - 1 - Vennebulten](unconfined1_vennebulten.ipynb) ###Code ml_1 = Model3D(kaq=10, z=zlayers, Saq=Saq, tmin=1e-6, tmax=1e-3, phreatictop=False) w_1 = Well(ml_1, xw=0, yw=0, rw=rw, rc=rc, tsandQ=[(0, -Q)], layers=range(nlay), \ wbstype='slug') ml_1.solve() ###Output self.neq 49 solution complete ###Markdown Step 7. Calibration of multi-layer model ###Code ca_1 = Calibrate(ml_1) ca_1.set_parameter(name='kaq0_48', initial=10, pmin=0) ca_1.set_parameter(name='Saq0_48', initial=1e-4) ca_1.series(name='obs', x=0, y=0, layer=range(nlay), t=t, h=h) ca_1.fit(report=True) display(ca_1.parameters) print('RMSE:', ca_1.rmse()) ###Output _____no_output_____ ###Markdown The multi-layer model does not improve the calibration by much. ###Code hm_1 = ml_1.head(0, 0, t) plt.figure(figsize=(8, 5)) plt.semilogx(t, h, '.', label='obs') plt.semilogx(t, hm_1[0], label='ttim') plt.xlabel('time [d]') plt.ylabel('displacement [m]') plt.title('Model Results - Multi-layer model') plt.legend(); ###Output _____no_output_____ ###Markdown Step 8. Final Model calibration with well skin resistanceNow we test if the skin resistance of the well has an impact on model calibration. We thus add the ```res``` parameter in the calibration settings. We use the same multi-layer model. ###Code ca_2 = Calibrate(ml_1) ca_2.set_parameter(name='kaq0_48', initial=10, pmin=0) ca_2.set_parameter(name='Saq0_48', initial=1e-4, pmin = 1e-7) ca_2.set_parameter_by_reference(name='res', parameter=w_1.res, initial=0.1, pmin = 0) ca_2.series(name='obs', x=0, y=0, layer=range(nlay), t=t, h=h) ca_2.fit(report=True) display(ca_2.parameters) print('RMSE:', ca_2.rmse()) hm_2 = ml_1.head(0, 0, t) plt.figure(figsize=(8, 5)) plt.semilogx(t, h, '.', label='obs') plt.semilogx(t, hm_2[0], label='ttim') plt.xlabel('time [d]') plt.ylabel('displacement [m]') plt.title('Model Results - Multi-layer with res') plt.legend(); ###Output _____no_output_____ ###Markdown Adding resistance of the well screen does not improve the performance. Thus, res should not be applied in the conceptual model. Step 9. Analysis and comparison of simulated valuesWe now compare the values in TTim and add the results of the modelling done in MLU by Yang (2020). ###Code ta = pd.DataFrame(columns=['k [m/d]', 'Ss [1/m]'], \ index = ['MLU', 'ttim', 'ttim-multilayer', 'ttim-res']) tr = np.delete(ca_2.parameters['optimal'].values, 2) ta.loc['MLU'] = [0.4133, 1.9388E-05] ta.loc['ttim'] = ca.parameters['optimal'].values ta.loc['ttim-multilayer'] = ca_1.parameters['optimal'].values ta.loc['ttim-res'] = tr ta['RMSE'] = [0.004264, ca.rmse(), ca_1.rmse(), ca_2.rmse()] ta.style.set_caption('Comparison of parameter values and error under different models') ###Output _____no_output_____
PandasASS.ipynb
###Markdown Player Count Display the total number of players ###Code # total numbers of players sum_of_players = len(df["SN"].unique()) print(sum_of_players) ###Output 576 ###Markdown Purchasing Analysis (Total) .Run basic calculations to obtain number of unique items, average price, etc..Create a summary data frame to hold the results.Optional: give the displayed data cleaner formatting.Display the summary data frame ###Code # unique number of items #average Price #Number of Purchases #Total REvenue unique_Items = len(df["Item ID"].unique()) avg_price = (df["Price"] .mean()) Number_of_Purchases = len(df["Purchase ID"].unique()) Total_Revenue = (df["Price"] .mean())* len(df["Purchase ID"].unique()) print("Unique Item",unique_Items) print("Average Price",avg_price) print("Number of Purchases",Number_of_Purchases) print("Total Revenue",Total_Revenue) # creating summary dataFrame to hold the Results df =pd.DataFrame({ "Unique_Item":["183"], "Average Price":["$3.05"], "Number_of_Purchase":["780"], "Total Revenue":["$2,379.77"] }) df ###Output _____no_output_____ ###Markdown Gender Demographics Percentage and Count of Male Players,Percentage and Count of Female Players,Percentage and Count of Other / Non-Disclosed ###Code #MALE #sum_of_players = len(df["SN"].unique()) Male1 = df.groupby(["Gender"]).get_group(('Male')) Male_Count = len(Male1["SN"].unique()) MalePercentage = round((Male_Count / sum_of_players) *100,2) # FEMALE Female1 = df.groupby(["Gender"]).get_group('Female') Female_Count = len(Female1["SN"].unique()) FemalePercentage = round((len(Female1["SN"].unique()) / sum_of_players) * 100,2) #OTHER N_Disclose = df.groupby(["Gender"]).get_group('Other / Non-Disclosed') otherNDisclosed_Count = len(N_Disclose["SN"].unique()) ONDPercentage = round((otherNDisclosed_Count / sum_of_players) * 100,2) # Gender Table Gender_Table = { 'Percent of Players':[MalePercentage,FemalePercentage,ONDPercentage ], 'Gender':["Male","Female","Other"], 'Total Count':[Male_Count,Female_Count,otherNDisclosed_Count]} Gender_Table = pd.DataFrame(Gender_Table) Gender_Table = Gender_Table.set_index('Gender') Gender_Table = Gender_Table [['Total Count','Percent of Players']] Gender_Table ###Output _____no_output_____ ###Markdown Purchasing Analysis (Gender) Run basic calculations to obtain purchase count, avg. purchase price, avg. purchase total per person etc. by genderCreate a summary data frame to hold the resultsOptional: give the displayed data cleaner formattingDisplay the summary data frame ###Code #PURCHASE COUNT FemalePurchaseCount = len(Female1 ) MalePurchaseCount = len(Male1) OtherPurchaseCount = len(N_Disclose) #AVERAGE PURCHASE PRICE FemaleAvgPrice = round((Female1["Price"].sum())/len(Female1["Price"]),2) MaleAvgPrice = round((Male1["Price"].sum())/len(Male1["Price"]),2) OtherAvgPrice= round((N_Disclose["Price"].sum())/len(N_Disclose["Price"]),2) #TOTAL PURCHASE VALUE MaleTotalPurchase = round (Male1["Price"].sum() ,2) FemaleTotalPurchase = round(Female1["Price"].sum(),2) OtherTotalPurchase = round (N_Disclose["Price"].sum(),2) # Normalised Totals # male/female/Other NorMale = round((MaleTotalPurchase / MalePurchaseCount ), 2) NorFemale = round((FemaleTotalPurchase / FemalePurchaseCount), 2) NorOther = round((OtherTotalPurchase / OtherPurchaseCount), 2) # SUMMARY TABLE PByGender = {"Purchase Count":[FemalePurchaseCount,MalePurchaseCount,OtherPurchaseCount], "Gender":["Male","Female","Other"], "Average Purchase Price":[MaleAvgPrice,FemaleAvgPrice,OtherAvgPrice], "Total Purchase Value":[MaleTotalPurchase,FemaleTotalPurchase,OtherTotalPurchase], "Avg Total Purchase Per Person":[NorMale,NorFemale,NorOther]} PByGender = pd.DataFrame(PByGender) PByGender = PByGender1.set_index('Gender') PByGender= PByGender[['Purchase Count', 'Average Purchase Price', 'Total Purchase Value', 'Avg Total Purchase Per Person']] PByGender ###Output _____no_output_____ ###Markdown Age Demographics Establish bins for agesCategorize the existing players using the age bins. Hint: use pd.cut()Calculate the numbers and percentages by age groupCreate a summary data frame to hold the resultsOptional: round the percentage column to two decimal pointsDisplay Age Demographics Table ###Code bins = [0, 9, 14, 19, 24, 29, 35, 40, 99] Age_labels = ["<10","10-14","15-19","20-24","25-29","30-34","35-39","Over 40"] df["Age"] = pd.cut(pd.read_csv(file)["Age"], bins, labels = "group_labels") Age_group = df.groupby("Age") Total_Age_Count = Age_group["SN"].nunique() print(Total_Age_Count) Percentage_Age = (Total_Age_Count/ len(df["SN"].unique())) * 100 print("Percentage_Age",Percentage_Age) AgeDemograhic = {"Age Summary":Agelabels,"Total Player Count" :PlayerBinsCount,"Percentage Of Players" :PercentBins} AgeDem = pd.df(AgeDem) AgeDem = AgeDem1.set_index('Age Summary') AgeDem ###Output Age 7 7 8 6 9 4 10 7 11 6 12 4 13 3 14 2 15 26 16 24 17 19 18 21 19 17 20 69 21 43 22 49 23 49 24 48 25 43 26 11 27 9 28 4 29 10 30 25 31 5 32 6 33 9 34 7 35 10 36 5 37 5 38 5 39 6 40 5 41 2 42 1 43 1 44 2 45 1 Name: SN, dtype: int64 Percentage_Age Age 7 1.215278 8 1.041667 9 0.694444 10 1.215278 11 1.041667 12 0.694444 13 0.520833 14 0.347222 15 4.513889 16 4.166667 17 3.298611 18 3.645833 19 2.951389 20 11.979167 21 7.465278 22 8.506944 23 8.506944 24 8.333333 25 7.465278 26 1.909722 27 1.562500 28 0.694444 29 1.736111 30 4.340278 31 0.868056 32 1.041667 33 1.562500 34 1.215278 35 1.736111 36 0.868056 37 0.868056 38 0.868056 39 1.041667 40 0.868056 41 0.347222 42 0.173611 43 0.173611 44 0.347222 45 0.173611 Name: SN, dtype: float64 ###Markdown Purchasing Analysis (Age) Bin the purchase_data data frame by ageRun basic calculations to obtain purchase count, avg. purchase price, avg. purchase total per person etc. in the table belowCreate a summary data frame to hold the resultsOptional: give the displayed data cleaner formattingDisplay the summary data frame ###Code Pur_Count = Age_group ["Purchase ID"].count() #print("Pur_Count",Pur_Count) Age_group = df.groupby("Age") Avg_Pur_price = df.groupby("Age")["Price"].mean() #print("Avg_Pur_price",Avg_Pur_price) TOT_PurchV = Age_group["Price"].sum() #print("TOT_PurchV",TOT_PurchV) AvgTOT_PPerson = TOT_PurchV / Pur_Count #print("AvgTOT_PPerson",AvgTOT_PPerson) purchasing_analysis_pd = pd.DataFrame({"Purchase Count": Pur_Count, "Average Purchase Price": Avg_Pur_price, "Total Purchase Value": TOT_PurchV, "Avg Total Purchase per Person":AvgTOT_PPerson }) purchasing_analysis_pd purchasing_analysis_pd["Total Purchase Value"]= purchasing_analysis_pd["Total Purchase Value"].map("${:.2f}".format) purchasing_analysis_pd["Avg Total Purchase per Person"]= purchasing_analysis_pd["Avg Total Purchase per Person"].map("${:.2f}".format) purchasing_analysis_pd ###Output _____no_output_____ ###Markdown Top Spenders Run basic calculations to obtain the results in the table belowCreate a summary data frame to hold the resultsSort the total purchase value column in descending orderOptional: give the displayed data cleaner formattingDisplay a preview of the summary data frame ###Code #Top Spenders #SN SN = df.groupby(df["SN"]) ScreenName = SN["SN"].unique() #Purchase Count SNCount = SN['Age'].count() #Average Purchase Price SNAverage = round(SN['Price'].mean(),2) #Total Purchase Value SNTotal = SN['Price'].sum() TopSpend = {"SN":ScreenName,"Purchase Count":SNCount, "Average Purchase Price":SNAverage,"Total Purchase Value":SNTotal} TopSpend1= pd.DataFrame(TopSpend) TopSpend1= TopSpend1.set_index('SN') TopSpend1 = TopSpend1.sort_values("Total Purchase Value",ascending=False) TopSpend1 = TopSpend1[['Purchase Count', 'Average Purchase Price', 'Total Purchase Value']] TopSpend1.iloc[:5] ###Output _____no_output_____ ###Markdown Most Popular Items Retrieve the Item ID, Item Name, and Item Price columnsGroup by Item ID and Item Name. Perform calculations to obtain purchase count, item price, and total purchase valueCreate a summary data frame to hold the resultsSort the purchase count column in descending orderOptional: give the displayed data cleaner formattingDisplay a preview of the summary data frame ###Code #Item ID ItemId = df.groupby(df['Item ID']) Items = ItemId['Item ID'].unique() #Item Name ItemName = ItemId["Item Name"].unique() #Purchase Count ItemPurCount = ItemId['Age'].count() #Item Price ItemPrice= ItemId['Price'].unique() #Total Purchase Value ItemTotalPurchase = ItemId['Price'].sum() ItemTable = {'Item ID':Items,'Item Name':ItemName,'Item Price':ItemPrice,'Item Count':ItemPurCount,'Total Purchase':ItemTotalPurchase} ItemTable1 = pd.DataFrame(ItemTable) ItemTable1 = ItemTable1.set_index('Item ID') ItemTable1= ItemTable1.sort_values('Item Count', ascending=False) ItemTable1 = ItemTable1[['Item Name','Item Count','Item Price','Total Purchase']] ItemTable1.iloc[:5] ###Output _____no_output_____ ###Markdown Most Profitable Items Sort the above table by total purchase value in descending orderOptional: give the displayed data cleaner formattingDisplay a preview of the data frame ###Code #Most Profitable Items #Item ID #Item Name #Purchase Count #Item Price #Total Purchase Value MostProfit= ItemTable1.sort_values('Total Purchase', ascending=False) MostProfit[:5] ###Output _____no_output_____
tutorials/Activity Answers.ipynb
###Markdown Answers ACTIVITY 2 The above plot looks like continents of 'Antartica' and 'Seven seas (open ocean)' are skewing the plot a bit. Try to remove these two continents from the data that you call the "plot" method on Hint, earlier we combined used two filters, see "cell 7", you can use the same approach and use the '!=' operator. ###Code # Geopandas Activity 2 Answer countries[(countries.continent != 'Antarctica') & (countries.continent != 'Seven seas (open ocean)')].plot(figsize=(12,5), cmap='Set1', column='gdp_per_cap', legend=True) ###Output _____no_output_____ ###Markdown --- ACTIVITY 5 Using the 'mag' column, filter the earthsquakes and display only the Magnitudes greater than say 5, or whatever number you like ###Code gdf_quakes[gdf_quakes.mag > 4.5].plot(ax=ax1, color='r'); ###Output _____no_output_____
notebooks/export_GeoJSON_counties.ipynb
###Markdown Generate geo-json files from [US Census Bureau data](https://www.census.gov/geographies/mapping-files/time-series/geo/carto-boundary-file.html).License: Apache 2 ###Code !pip install --quiet kml2geojson import io import copy import json import urllib.request import xml.dom.minidom import zipfile import kml2geojson import lxml.etree def parse_broken_kml(contents): fixing_tree = lxml.etree.fromstring( contents, parser=lxml.etree.XMLParser(recover=True)) tree = xml.dom.minidom.parseString(lxml.etree.tostring(fixing_tree)) return kml2geojson.build_layers(tree) def read_2018_census_kml(filename): url = "https://www2.census.gov/geo/tiger/GENZ2018/kml/" + filename + ".zip" with urllib.request.urlopen(url) as infile: buffer = io.BytesIO(infile.read()) return zipfile.ZipFile(buffer).read(filename + ".kml") raw_states = parse_broken_kml(read_2018_census_kml("cb_2018_us_state_20m"))[0] states = copy.deepcopy(raw_states) for entry in states["features"]: props = entry["properties"] props.pop("ALAND") props.pop("AWATER") props["is_a_state"] = True props["state_id"] = int(entry["properties"].pop("GEOID")) props["name"] = props.pop("NAME") props.pop("STATEFP") props.pop("STATENS") props.pop("LSAD") props.pop("description") props.pop("styleUrl") raw_counties = parse_broken_kml(read_2018_census_kml("cb_2018_us_county_20m"))[0] counties = copy.deepcopy(raw_counties) for entry in counties["features"]: props = entry["properties"] props.pop("ALAND") props.pop("AWATER") props.pop("COUNTYFP") props.pop("COUNTYNS") props["fips_id"] = int(props.pop("GEOID")) props.pop("LSAD") props["name"] = props.pop("NAME") props["state_id"] = int(props.pop("STATEFP")) props.pop("description") props.pop("styleUrl") props["is_a_state"] = False state_names = {entry["properties"]["state_id"]: entry["properties"]["name"] for entry in states["features"]} with open("counties.json", "w") as outfile: json.dump(counties, outfile) with open("states.json", "w") as outfile: json.dump(states, outfile) with open("state_names.json", "w") as outfile: json.dump(state_names, outfile) !tar -czvf json_us_geography.tar.gz counties.json states.json state_names.json ###Output counties.json states.json state_names.json
01-Lesson-Plans/16-Project-3-and-R/1/Activities/06_Stu_Tibble/Resources/PyCitySchools_Solution.ipynb
###Markdown PyCity Schools Analysis* As a whole, schools with higher budgets, did not yield better test results. By contrast, schools with higher spending per student actually (\$645-675) underperformed compared to schools with smaller budgets (<\$585 per student).* As a whole, smaller and medium sized schools dramatically out-performed large sized schools on passing math performances (89-91% passing vs 67%).* As a whole, charter schools out-performed the public district schools across all metrics. However, more analysis will be required to glean if the effect is due to school practices or the fact that charter schools tend to serve smaller student populations per school. --- ###Code # Dependencies and Setup import pandas as pd import numpy as np # File to Load (Remember to Change These) school_data_to_load = "raw_data/schools_complete.csv" student_data_to_load = "raw_data/students_complete.csv" # Read School and Student Data File and store into Pandas Data Frames school_data = pd.read_csv(school_data_to_load) student_data = pd.read_csv(student_data_to_load) # Combine the data into a single dataset school_data_complete = pd.merge(student_data, school_data, how="left", on=["school_name", "school_name"]) ###Output _____no_output_____ ###Markdown District Summary ###Code # Calculate the Totals (Schools and Students) school_count = len(school_data_complete["school_name"].unique()) student_count = school_data_complete["Student ID"].count() # Calculate the Total Budget total_budget = school_data["budget"].sum() # Calculate the Average Scores average_math_score = school_data_complete["math_score"].mean() average_reading_score = school_data_complete["reading_score"].mean() overall_passing_rate = (average_math_score + average_reading_score) / 2 # Calculate the Percentage Pass Rates passing_math_count = school_data_complete[(school_data_complete["math_score"] > 70)].count()["student_name"] passing_math_percentage = passing_math_count / float(student_count) * 100 passing_reading_count = school_data_complete[(school_data_complete["reading_score"] > 70)].count()["student_name"] passing_reading_percentage = passing_reading_count / float(student_count) * 100 # Minor Data Cleanup district_summary = pd.DataFrame({"Total Schools": [school_count], "Total Students": [student_count], "Total Budget": [total_budget], "Average Math Score": [average_math_score], "Average Reading Score": [average_reading_score], "% Passing Math": [passing_math_percentage], "% Passing Reading": [passing_reading_percentage], "% Overall Passing Rate": [overall_passing_rate]}) district_summary = district_summary[["Total Schools", "Total Students", "Total Budget", "Average Math Score", "Average Reading Score", "% Passing Math", "% Passing Reading", "% Overall Passing Rate"]] district_summary["Total Students"] = district_summary["Total Students"].map("{:,}".format) district_summary["Total Budget"] = district_summary["Total Budget"].map("${:,.2f}".format) # Display the data frame district_summary ###Output _____no_output_____ ###Markdown School Summary ###Code # Determine the School Type school_types = school_data.set_index(["school_name"])["type"] # Calculate the total student count per_school_counts = school_data_complete["school_name"].value_counts() # Calculate the total school budget and per capita spending per_school_budget = school_data_complete.groupby(["school_name"]).mean()["budget"] per_school_capita = per_school_budget / per_school_counts # Calculate the average test scores per_school_math = school_data_complete.groupby(["school_name"]).mean()["math_score"] per_school_reading = school_data_complete.groupby(["school_name"]).mean()["reading_score"] # Calculate the passing scores by creating a filtered data frame school_passing_math = school_data_complete[(school_data_complete["math_score"] > 70)] school_passing_reading = school_data_complete[(school_data_complete["reading_score"] > 70)] per_school_passing_math = school_passing_math.groupby(["school_name"]).count()["student_name"] / per_school_counts * 100 per_school_passing_reading = school_passing_reading.groupby(["school_name"]).count()["student_name"] / per_school_counts * 100 overall_passing_rate = (per_school_passing_math + per_school_passing_reading) / 2 # Convert to data frame per_school_summary = pd.DataFrame({"School Type": school_types, "Total Students": per_school_counts, "Total School Budget": per_school_budget, "Per Student Budget": per_school_capita, "Average Math Score": per_school_math, "Average Reading Score": per_school_reading, "% Passing Math": per_school_passing_math, "% Passing Reading": per_school_passing_reading, "% Overall Passing Rate": overall_passing_rate}) # Minor data wrangling per_school_summary = per_school_summary[["School Type", "Total Students", "Total School Budget", "Per Student Budget", "Average Math Score", "Average Reading Score", "% Passing Math", "% Passing Reading", "% Overall Passing Rate"]] per_school_summary["Total School Budget"] = per_school_summary["Total Students"].map("${:,.2f}".format) per_school_summary["Per Student Budget"] = per_school_summary["Per Student Budget"].map("${:,.2f}".format) # Display the data frame per_school_summary ###Output _____no_output_____ ###Markdown Top Performing Schools (By Passing Rate) ###Code # Sort and show top five schools top_schools = per_school_summary.sort_values(["% Overall Passing Rate"], ascending=False) top_schools.head(5) ###Output _____no_output_____ ###Markdown Bottom Performing Schools (By Passing Rate) ###Code # Sort and show bottom five schools bottom_schools = per_school_summary.sort_values(["% Overall Passing Rate"], ascending=True) bottom_schools.head(5) ###Output _____no_output_____ ###Markdown Math Scores by Grade ###Code # Create data series of scores by grade levels using conditionals nineth_graders = school_data_complete[(school_data_complete["grade"] == "9th")] tenth_graders = school_data_complete[(school_data_complete["grade"] == "10th")] eleventh_graders = school_data_complete[(school_data_complete["grade"] == "11th")] twelfth_graders = school_data_complete[(school_data_complete["grade"] == "12th")] # Group each by school name nineth_graders_scores = nineth_graders.groupby(["school_name"]).mean()["math_score"] tenth_graders_scores = tenth_graders.groupby(["school_name"]).mean()["math_score"] eleventh_graders_scores = eleventh_graders.groupby(["school_name"]).mean()["math_score"] twelfth_graders_scores = twelfth_graders.groupby(["school_name"]).mean()["math_score"] # Combine series into single data frame scores_by_grade = pd.DataFrame({"9th": nineth_graders_scores, "10th": tenth_graders_scores, "11th": eleventh_graders_scores, "12th": twelfth_graders_scores}) # Minor data wrangling scores_by_grade = scores_by_grade[["9th", "10th", "11th", "12th"]] scores_by_grade.index.name = None # Display the data frame scores_by_grade ###Output _____no_output_____ ###Markdown Reading Score by Grade ###Code # Create data series of scores by grade levels using conditionals nineth_graders = school_data_complete[(school_data_complete["grade"] == "9th")] tenth_graders = school_data_complete[(school_data_complete["grade"] == "10th")] eleventh_graders = school_data_complete[(school_data_complete["grade"] == "11th")] twelfth_graders = school_data_complete[(school_data_complete["grade"] == "12th")] # Group each by school name nineth_graders_scores = nineth_graders.groupby(["school_name"]).mean()["reading_score"] tenth_graders_scores = tenth_graders.groupby(["school_name"]).mean()["reading_score"] eleventh_graders_scores = eleventh_graders.groupby(["school_name"]).mean()["reading_score"] twelfth_graders_scores = twelfth_graders.groupby(["school_name"]).mean()["reading_score"] # Combine series into single data frame scores_by_grade = pd.DataFrame({"9th": nineth_graders_scores, "10th": tenth_graders_scores, "11th": eleventh_graders_scores, "12th": twelfth_graders_scores}) # Minor data wrangling scores_by_grade = scores_by_grade[["9th", "10th", "11th", "12th"]] scores_by_grade.index.name = None # Display the data frame scores_by_grade ###Output _____no_output_____ ###Markdown Scores by School Spending ###Code # Establish the bins spending_bins = [0, 585, 615, 645, 675] group_names = ["<$585", "$585-615", "$615-645", "$645-675"] # Categorize the spending based on the bins per_school_summary["Spending Ranges (Per Student)"] = pd.cut(per_school_capita, spending_bins, labels=group_names) spending_math_scores = per_school_summary.groupby(["Spending Ranges (Per Student)"]).mean()["Average Math Score"] spending_reading_scores = per_school_summary.groupby(["Spending Ranges (Per Student)"]).mean()["Average Reading Score"] spending_passing_math = per_school_summary.groupby(["Spending Ranges (Per Student)"]).mean()["% Passing Math"] spending_passing_reading = per_school_summary.groupby(["Spending Ranges (Per Student)"]).mean()["% Passing Reading"] overall_passing_rate = (spending_math_scores + spending_reading_scores) / 2 # Assemble into data frame spending_summary = pd.DataFrame({"Average Math Score" : spending_math_scores, "Average Reading Score": spending_reading_scores, "% Passing Math": spending_passing_math, "% Passing Reading": spending_passing_reading, "% Overall Passing Rate": overall_passing_rate}) # Minor data wrangling spending_summary = spending_summary[["Average Math Score", "Average Reading Score", "% Passing Math", "% Passing Reading", "% Overall Passing Rate"]] # Display results spending_summary ###Output _____no_output_____ ###Markdown Scores by School Size ###Code # Establish the bins size_bins = [0, 1000, 2000, 5000] group_names = ["Small (<1000)", "Medium (1000-2000)", "Large (2000-5000)"] # Categorize the spending based on the bins per_school_summary["School Size"] = pd.cut(per_school_summary["Total Students"], size_bins, labels=group_names) # Calculate the scores based on bins size_math_scores = per_school_summary.groupby(["School Size"]).mean()["Average Math Score"] size_reading_scores = per_school_summary.groupby(["School Size"]).mean()["Average Reading Score"] size_passing_math = per_school_summary.groupby(["School Size"]).mean()["% Passing Math"] size_passing_reading = per_school_summary.groupby(["School Size"]).mean()["% Passing Reading"] overall_passing_rate = (size_passing_math + size_passing_reading) / 2 # Assemble into data frame size_summary = pd.DataFrame({"Average Math Score" : size_math_scores, "Average Reading Score": size_reading_scores, "% Passing Math": size_passing_math, "% Passing Reading": size_passing_reading, "% Overall Passing Rate": overall_passing_rate}) # Minor data wrangling size_summary = size_summary[["Average Math Score", "Average Reading Score", "% Passing Math", "% Passing Reading", "% Overall Passing Rate"]] # Display results size_summary ###Output _____no_output_____ ###Markdown Scores by School Type ###Code # Type | Average Math Score | Average Reading Score | % Passing Math | % Passing Reading | % Overall Passing Rate type_math_scores = per_school_summary.groupby(["School Type"]).mean()["Average Math Score"] type_reading_scores = per_school_summary.groupby(["School Type"]).mean()["Average Reading Score"] type_passing_math = per_school_summary.groupby(["School Type"]).mean()["% Passing Math"] type_passing_reading = per_school_summary.groupby(["School Type"]).mean()["% Passing Reading"] overall_passing_rate = (type_passing_math + type_passing_reading) / 2 # Assemble into data frame type_summary = pd.DataFrame({"Average Math Score" : type_math_scores, "Average Reading Score": type_reading_scores, "% Passing Math": type_passing_math, "% Passing Reading": type_passing_reading, "% Overall Passing Rate": overall_passing_rate}) # Minor data wrangling type_summary = type_summary[["Average Math Score", "Average Reading Score", "% Passing Math", "% Passing Reading", "% Overall Passing Rate"]] # Display results type_summary ###Output _____no_output_____
Course 3 - Natural Language Processing with Sequence Models/Week 4/C3_W4_Assignment.ipynb
###Markdown Assignment 4: Question duplicatesWelcome to the fourth assignment of course 3. In this assignment you will explore Siamese networks applied to natural language processing. You will further explore the fundamentals of Trax and you will be able to implement a more complicated structure using it. By completing this assignment, you will learn how to implement models with different architectures. Outline- [Overview](0)- [Part 1: Importing the Data](1) - [1.1 Loading in the data](1.1) - [1.2 Converting a question to a tensor](1.2) - [1.3 Understanding the iterator](1.3) - [Exercise 01](ex01)- [Part 2: Defining the Siamese model](2) - [2.1 Understanding Siamese Network](2.1) - [Exercise 02](ex02) - [2.2 Hard Negative Mining](2.2) - [Exercise 03](ex03)- [Part 3: Training](3) - [3.1 Training the model](3.1) - [Exercise 04](ex04)- [Part 4: Evaluation](4) - [4.1 Evaluating your siamese network](4.1) - [4.2 Classify](4.2) - [Exercise 05](ex05)- [Part 5: Testing with your own questions](5) - [Exercise 06](ex06)- [On Siamese networks](6) OverviewIn this assignment, concretely you will: - Learn about Siamese networks- Understand how the triplet loss works- Understand how to evaluate accuracy- Use cosine similarity between the model's outputted vectors- Use the data generator to get batches of questions- Predict using your own modelBy now, you are familiar with trax and know how to make use of classes to define your model. We will start this homework by asking you to preprocess the data the same way you did in the previous assignments. After processing the data you will build a classifier that will allow you to identify whether to questions are the same or not. You will process the data first and then pad in a similar way you have done in the previous assignment. Your model will take in the two question embeddings, run them through an LSTM, and then compare the outputs of the two sub networks using cosine similarity. Before taking a deep dive into the model, start by importing the data set. Part 1: Importing the Data 1.1 Loading in the dataYou will be using the Quora question answer dataset to build a model that could identify similar questions. This is a useful task because you don't want to have several versions of the same question posted. Several times when teaching I end up responding to similar questions on piazza, or on other community forums. This data set has been labeled for you. Run the cell below to import some of the packages you will be using. ###Code import os import nltk import trax from trax import layers as tl from trax.supervised import training from trax.fastmath import numpy as fastnp import numpy as np import pandas as pd import random as rnd # set random seeds trax.supervised.trainer_lib.init_random_number_generators(34) rnd.seed(34) ###Output INFO:tensorflow:tokens_length=568 inputs_length=512 targets_length=114 noise_density=0.15 mean_noise_span_length=3.0 ###Markdown **Notice that for this assignment Trax's numpy is referred to as `fastnp`, while regular numpy is referred to as `np`.**You will now load in the data set. We have done some preprocessing for you. If you have taken the deeplearning specialization, this is a slightly different training method than the one you have seen there. If you have not, then don't worry about it, we will explain everything. ###Code data = pd.read_csv("questions.csv") N=len(data) print('Number of question pairs: ', N) data.head() ###Output Number of question pairs: 404351 ###Markdown We first split the data into a train and test set. The test set will be used later to evaluate our model. ###Code N_train = 300000 N_test = 10*1024 data_train = data[:N_train] data_test = data[N_train:N_train+N_test] print("Train set:", len(data_train), "Test set:", len(data_test)) del(data) # remove to free memory ###Output Train set: 300000 Test set: 10240 ###Markdown As explained in the lectures, we select only the question pairs that are duplicate to train the model. We build two batches as input for the Siamese network and we assume that question $q1_i$ (question $i$ in the first batch) is a duplicate of $q2_i$ (question $i$ in the second batch), but all other questions in the second batch are not duplicates of $q1_i$. The test set uses the original pairs of questions and the status describing if the questions are duplicates. ###Code td_index = (data_train['is_duplicate'] == 1).to_numpy() td_index = [i for i, x in enumerate(td_index) if x] print('number of duplicate questions: ', len(td_index)) print('indexes of first ten duplicate questions:', td_index[:10]) print(data_train['question1'][5]) # Example of question duplicates (first one in data) print(data_train['question2'][5]) print('is_duplicate: ', data_train['is_duplicate'][5]) Q1_train_words = np.array(data_train['question1'][td_index]) Q2_train_words = np.array(data_train['question2'][td_index]) Q1_test_words = np.array(data_test['question1']) Q2_test_words = np.array(data_test['question2']) y_test = np.array(data_test['is_duplicate']) ###Output _____no_output_____ ###Markdown Above, you have seen that you only took the duplicated questions for training our model. You did so on purpose, because the data generator will produce batches $([q1_1, q1_2, q1_3, ...]$, $[q2_1, q2_2,q2_3, ...])$ where $q1_i$ and $q2_k$ are duplicate if and only if $i = k$.Let's print to see what your data looks like. ###Code print('TRAINING QUESTIONS:\n') print('Question 1: ', Q1_train_words[0]) print('Question 2: ', Q2_train_words[0], '\n') print('Question 1: ', Q1_train_words[5]) print('Question 2: ', Q2_train_words[5], '\n') print('TESTING QUESTIONS:\n') print('Question 1: ', Q1_test_words[0]) print('Question 2: ', Q2_test_words[0], '\n') print('is_duplicate =', y_test[0], '\n') ###Output TRAINING QUESTIONS: Question 1: Astrology: I am a Capricorn Sun Cap moon and cap rising...what does that say about me? Question 2: I'm a triple Capricorn (Sun, Moon and ascendant in Capricorn) What does this say about me? Question 1: What would a Trump presidency mean for current international master’s students on an F1 visa? Question 2: How will a Trump presidency affect the students presently in US or planning to study in US? TESTING QUESTIONS: Question 1: How do I prepare for interviews for cse? Question 2: What is the best way to prepare for cse? is_duplicate = 0 ###Markdown You will now encode each word of the selected duplicate pairs with an index. Given a question, you can then just encode it as a list of numbers. First you tokenize the questions using `nltk.word_tokenize`. You need a python default dictionary which later, during inference, assigns the values $0$ to all Out Of Vocabulary (OOV) words.Then you encode each word of the selected duplicate pairs with an index. Given a question, you can then just encode it as a list of numbers. ###Code #create arrays Q1_train = np.empty_like(Q1_train_words) Q2_train = np.empty_like(Q2_train_words) Q1_test = np.empty_like(Q1_test_words) Q2_test = np.empty_like(Q2_test_words) # Building the vocabulary with the train set (this might take a minute) from collections import defaultdict vocab = defaultdict(lambda: 0) vocab['<PAD>'] = 1 for idx in range(len(Q1_train_words)): Q1_train[idx] = nltk.word_tokenize(Q1_train_words[idx]) Q2_train[idx] = nltk.word_tokenize(Q2_train_words[idx]) q = Q1_train[idx] + Q2_train[idx] for word in q: if word not in vocab: vocab[word] = len(vocab) + 1 print('The length of the vocabulary is: ', len(vocab)) print(vocab['<PAD>']) print(vocab['Astrology']) print(vocab['Astronomy']) #not in vocabulary, returns 0 for idx in range(len(Q1_test_words)): Q1_test[idx] = nltk.word_tokenize(Q1_test_words[idx]) Q2_test[idx] = nltk.word_tokenize(Q2_test_words[idx]) print('Train set has reduced to: ', len(Q1_train) ) print('Test set length: ', len(Q1_test) ) ###Output Train set has reduced to: 111486 Test set length: 10240 ###Markdown 1.2 Converting a question to a tensorYou will now convert every question to a tensor, or an array of numbers, using your vocabulary built above. ###Code # Converting questions to array of integers for i in range(len(Q1_train)): Q1_train[i] = [vocab[word] for word in Q1_train[i]] Q2_train[i] = [vocab[word] for word in Q2_train[i]] for i in range(len(Q1_test)): Q1_test[i] = [vocab[word] for word in Q1_test[i]] Q2_test[i] = [vocab[word] for word in Q2_test[i]] print('first question in the train set:\n') print(Q1_train_words[0], '\n') print('encoded version:') print(Q1_train[0],'\n') print('first question in the test set:\n') print(Q1_test_words[0], '\n') print('encoded version:') print(Q1_test[0]) ###Output first question in the train set: Astrology: I am a Capricorn Sun Cap moon and cap rising...what does that say about me? encoded version: [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21] first question in the test set: How do I prepare for interviews for cse? encoded version: [32, 38, 4, 107, 65, 1015, 65, 11509, 21] ###Markdown You will now split your train set into a training/validation set so that you can use it to train and evaluate your Siamese model. ###Code # Splitting the data cut_off = int(len(Q1_train)*.8) train_Q1, train_Q2 = Q1_train[:cut_off], Q2_train[:cut_off] val_Q1, val_Q2 = Q1_train[cut_off: ], Q2_train[cut_off:] print('Number of duplicate questions: ', len(Q1_train)) print("The length of the training set is: ", len(train_Q1)) print("The length of the validation set is: ", len(val_Q1)) ###Output Number of duplicate questions: 111486 The length of the training set is: 89188 The length of the validation set is: 22298 ###Markdown 1.3 Understanding the iterator Most of the time in Natural Language Processing, and AI in general we use batches when training our data sets. If you were to use stochastic gradient descent with one example at a time, it will take you forever to build a model. In this example, we show you how you can build a data generator that takes in $Q1$ and $Q2$ and returns a batch of size `batch_size` in the following format $([q1_1, q1_2, q1_3, ...]$, $[q2_1, q2_2,q2_3, ...])$. The tuple consists of two arrays and each array has `batch_size` questions. Again, $q1_i$ and $q2_i$ are duplicates, but they are not duplicates with any other elements in the batch. The command ```next(data_generator)```returns the next batch. This iterator returns the data in a format that you could directly use in your model when computing the feed-forward of your algorithm. This iterator returns a pair of arrays of questions. Exercise 01**Instructions:** Implement the data generator below. Here are some things you will need. - While true loop.- if `index >= len_Q1`, set the `idx` to $0$.- The generator should return shuffled batches of data. To achieve this without modifying the actual question lists, a list containing the indexes of the questions is created. This list can be shuffled and used to get random batches everytime the index is reset.- Append elements of $Q1$ and $Q2$ to `input1` and `input2` respectively.- if `len(input1) == batch_size`, determine `max_len` as the longest question in `input1` and `input2`. Ceil `max_len` to a power of $2$ (for computation purposes) using the following command: `max_len = 2**int(np.ceil(np.log2(max_len)))`.- Pad every question by `vocab['']` until you get the length `max_len`.- Use yield to return `input1, input2`. - Don't forget to reset `input1, input2` to empty arrays at the end (data generator resumes from where it last left). ###Code # UNQ_C1 (UNIQUE CELL IDENTIFIER, DO NOT EDIT) # GRADED FUNCTION: data_generator def data_generator(Q1, Q2, batch_size, pad=1, shuffle=True): """Generator function that yields batches of data Args: Q1 (list): List of transformed (to tensor) questions. Q2 (list): List of transformed (to tensor) questions. batch_size (int): Number of elements per batch. pad (int, optional): Pad character from the vocab. Defaults to 1. shuffle (bool, optional): If the batches should be randomnized or not. Defaults to True. Yields: tuple: Of the form (input1, input2) with types (numpy.ndarray, numpy.ndarray) NOTE: input1: inputs to your model [q1a, q2a, q3a, ...] i.e. (q1a,q1b) are duplicates input2: targets to your model [q1b, q2b,q3b, ...] i.e. (q1a,q2i) i!=a are not duplicates """ input1 = [] input2 = [] idx = 0 len_q = len(Q1) question_indexes = [*range(len_q)] if shuffle: rnd.shuffle(question_indexes) ### START CODE HERE (Replace instances of 'None' with your code) ### while True: if idx >= len_q: # if idx is greater than or equal to len_q, set idx accordingly # (Hint: look at the instructions above) idx = 0 # shuffle to get random batches if shuffle is set to True if shuffle: rnd.shuffle(question_indexes) # get questions at the `question_indexes[idx]` position in Q1 and Q2 q1 = Q1[question_indexes[idx]] q2 = Q1[question_indexes[idx]] # increment idx by 1 idx += 1 # append q1 input1.append(q1) # append q2 input2.append(q2) if len(input1) == batch_size: # determine max_len as the longest question in input1 & input 2 # Hint: use the `max` function. # take max of input1 & input2 and then max out of the two of them. max_len = max(max([len(ques) for ques in input1]), max([len(ques) for ques in input2])) # pad to power-of-2 (Hint: look at the instructions above) max_len = 2**int(np.ceil(np.log2(max_len))) b1 = [] b2 = [] for q1, q2 in zip(input1, input2): # add [pad] to q1 until it reaches max_len q1 = q1 + [pad] * (max_len - len(q1)) # add [pad] to q2 until it reaches max_len q2 = q2 + [pad] * (max_len - len(q2)) # append q1 b1.append(q1) # append q2 b2.append(q2) # use b1 and b2 yield np.array(b1), np.array(b2) ### END CODE HERE ### # reset the batches input1, input2 = [], [] # reset the batches batch_size = 2 res1, res2 = next(data_generator(train_Q1, train_Q2, batch_size)) print("First questions : ",'\n', res1, '\n') print("Second questions : ",'\n', res2) ###Output First questions : [[ 30 87 78 134 2132 1981 28 78 594 21 1 1 1 1 1 1] [ 30 55 78 3541 1460 28 56 253 21 1 1 1 1 1 1 1]] Second questions : [[ 30 87 78 134 2132 1981 28 78 594 21 1 1 1 1 1 1] [ 30 55 78 3541 1460 28 56 253 21 1 1 1 1 1 1 1]] ###Markdown **Note**: The following expected output is valid only if you run the above test cell **_once_** (first time). The output will change on each execution.If you think your implementation is correct and it is not matching the output, make sure to restart the kernel and run all the cells from the top again. **Expected Output:**```CPPFirst questions : [[ 30 87 78 134 2132 1981 28 78 594 21 1 1 1 1 1 1] [ 30 55 78 3541 1460 28 56 253 21 1 1 1 1 1 1 1]] Second questions : [[ 30 156 78 134 2132 9508 21 1 1 1 1 1 1 1 1 1] [ 30 156 78 3541 1460 131 56 253 21 1 1 1 1 1 1 1]]```Now that you have your generator, you can just call it and it will return tensors which correspond to your questions in the Quora data set.Now you can go ahead and start building your neural network. Part 2: Defining the Siamese model 2.1 Understanding Siamese Network A Siamese network is a neural network which uses the same weights while working in tandem on two different input vectors to compute comparable output vectors.The Siamese network you are about to implement looks like this:You get the question embedding, run it through an LSTM layer, normalize $v_1$ and $v_2$, and finally use a triplet loss (explained below) to get the corresponding cosine similarity for each pair of questions. As usual, you will start by importing the data set. The triplet loss makes use of a baseline (anchor) input that is compared to a positive (truthy) input and a negative (falsy) input. The distance from the baseline (anchor) input to the positive (truthy) input is minimized, and the distance from the baseline (anchor) input to the negative (falsy) input is maximized. In math equations, you are trying to maximize the following.$$\mathcal{L}(A, P, N)=\max \left(\|\mathrm{f}(A)-\mathrm{f}(P)\|^{2}-\|\mathrm{f}(A)-\mathrm{f}(N)\|^{2}+\alpha, 0\right)$$$A$ is the anchor input, for example $q1_1$, $P$ the duplicate input, for example, $q2_1$, and $N$ the negative input (the non duplicate question), for example $q2_2$.$\alpha$ is a margin; you can think about it as a safety net, or by how much you want to push the duplicates from the non duplicates. Exercise 02**Instructions:** Implement the `Siamese` function below. You should be using all the objects explained below. To implement this model, you will be using `trax`. Concretely, you will be using the following functions.- `tl.Serial`: Combinator that applies layers serially (by function composition) allows you set up the overall structure of the feedforward. [docs](https://trax-ml.readthedocs.io/en/latest/trax.layers.htmltrax.layers.combinators.Serial) / [source code](https://github.com/google/trax/blob/1372b903bb66b0daccee19fd0b1fdf44f659330b/trax/layers/combinators.pyL26) - You can pass in the layers as arguments to `Serial`, separated by commas. - For example: `tl.Serial(tl.Embeddings(...), tl.Mean(...), tl.Dense(...), tl.LogSoftmax(...))` - `tl.Embedding`: Maps discrete tokens to vectors. It will have shape (vocabulary length X dimension of output vectors). The dimension of output vectors (also called d_feature) is the number of elements in the word embedding. [docs](https://trax-ml.readthedocs.io/en/latest/trax.layers.htmltrax.layers.core.Embedding) / [source code](https://github.com/google/trax/blob/1372b903bb66b0daccee19fd0b1fdf44f659330b/trax/layers/core.pyL113) - `tl.Embedding(vocab_size, d_feature)`. - `vocab_size` is the number of unique words in the given vocabulary. - `d_feature` is the number of elements in the word embedding (some choices for a word embedding size range from 150 to 300, for example).- `tl.LSTM` The LSTM layer. It leverages another Trax layer called [`LSTMCell`](https://trax-ml.readthedocs.io/en/latest/trax.layers.htmltrax.layers.rnn.LSTMCell). The number of units should be specified and should match the number of elements in the word embedding. [docs](https://trax-ml.readthedocs.io/en/latest/trax.layers.htmltrax.layers.rnn.LSTM) / [source code](https://github.com/google/trax/blob/1372b903bb66b0daccee19fd0b1fdf44f659330b/trax/layers/rnn.pyL87) - `tl.LSTM(n_units)` Builds an LSTM layer of n_units. - `tl.Mean`: Computes the mean across a desired axis. Mean uses one tensor axis to form groups of values and replaces each group with the mean value of that group. [docs](https://trax-ml.readthedocs.io/en/latest/trax.layers.htmltrax.layers.core.Mean) / [source code](https://github.com/google/trax/blob/1372b903bb66b0daccee19fd0b1fdf44f659330b/trax/layers/core.pyL276) - `tl.Mean(axis=1)` mean over columns.- `tl.Fn` Layer with no weights that applies the function f, which should be specified using a lambda syntax. [docs](https://trax-ml.readthedocs.io/en/latest/trax.layers.htmltrax.layers.base.Fn) / [source doce](https://github.com/google/trax/blob/70f5364dcaf6ec11aabbd918e5f5e4b0f5bfb995/trax/layers/base.pyL576) - $x$ -> This is used for cosine similarity. - `tl.Fn('Normalize', lambda x: normalize(x))` Returns a layer with no weights that applies the function `f` - `tl.parallel`: It is a combinator layer (like `Serial`) that applies a list of layers in parallel to its inputs. [docs](https://trax-ml.readthedocs.io/en/latest/trax.layers.htmltrax.layers.combinators.Parallel) / [source code](https://github.com/google/trax/blob/37aba571a89a8ad86be76a569d0ec4a46bdd8642/trax/layers/combinators.pyL152) ###Code # UNQ_C2 (UNIQUE CELL IDENTIFIER, DO NOT EDIT) # GRADED FUNCTION: Siamese def Siamese(vocab_size=len(vocab), d_model=128, mode='train'): """Returns a Siamese model. Args: vocab_size (int, optional): Length of the vocabulary. Defaults to len(vocab). d_model (int, optional): Depth of the model. Defaults to 128. mode (str, optional): 'train', 'eval' or 'predict', predict mode is for fast inference. Defaults to 'train'. Returns: trax.layers.combinators.Parallel: A Siamese model. """ def normalize(x): # normalizes the vectors to have L2 norm 1 return x / fastnp.sqrt(fastnp.sum(x * x, axis=-1, keepdims=True)) ### START CODE HERE (Replace instances of 'None' with your code) ### q_processor = tl.Serial( # Processor will run on Q1 and Q2. tl.Embedding(vocab_size=vocab_size, d_feature=d_model), # Embedding layer tl.LSTM(n_units=d_model), # LSTM layer tl.Mean(axis=1), # Mean over columns tl.Fn('Normalize', lambda x: normalize(x)) # Apply normalize function ) # Returns one vector of shape [batch_size, d_model]. ### END CODE HERE ### # Run on Q1 and Q2 in parallel. model = tl.Parallel(q_processor, q_processor) return model ###Output _____no_output_____ ###Markdown Setup the Siamese network model ###Code # check your model model = Siamese() print(model) ###Output Parallel_in2_out2[ Serial[ Embedding_41699_128 LSTM_128 Mean Normalize ] Serial[ Embedding_41699_128 LSTM_128 Mean Normalize ] ] ###Markdown **Expected output:** ```CPPParallel_in2_out2[ Serial[ Embedding_41699_128 LSTM_128 Mean Normalize ] Serial[ Embedding_41699_128 LSTM_128 Mean Normalize ]]``` 2.2 Hard Negative MiningYou will now implement the `TripletLoss`.As explained in the lecture, loss is composed of two terms. One term utilizes the mean of all the non duplicates, the second utilizes the *closest negative*. Our loss expression is then: \begin{align} \mathcal{Loss_1(A,P,N)} &=\max \left( -cos(A,P) + mean_{neg} +\alpha, 0\right) \\ \mathcal{Loss_2(A,P,N)} &=\max \left( -cos(A,P) + closest_{neg} +\alpha, 0\right) \\\mathcal{Loss(A,P,N)} &= mean(Loss_1 + Loss_2) \\\end{align}Further, two sets of instructions are provided. The first set provides a brief description of the task. If that set proves insufficient, a more detailed set can be displayed. Exercise 03**Instructions (Brief):** Here is a list of things you should do: - As this will be run inside trax, use `fastnp.xyz` when using any `xyz` numpy function- Use `fastnp.dot` to calculate the similarity matrix $v_1v_2^T$ of dimension `batch_size` x `batch_size`- Take the score of the duplicates on the diagonal `fastnp.diagonal`- Use the `trax` functions `fastnp.eye` and `fastnp.maximum` for the identity matrix and the maximum. More Detailed Instructions We'll describe the algorithm using a detailed example. Below, V1, V2 are the output of the normalization blocks in our model. Here we will use a batch_size of 4 and a d_model of 3. As explained in lecture, the inputs, Q1, Q2 are arranged so that corresponding inputs are duplicates while non-corresponding entries are not. The outputs will have the same pattern.This testcase arranges the outputs, v1,v2, to highlight different scenarios. Here, the first two outputs V1[0], V2[0] match exactly - so the model is generating the same vector for Q1[0] and Q2[0] inputs. The second outputs differ, circled in orange, we set, V2[1] is set to match V2[**2**], simulating a model which is generating very poor results. V1[3] and V2[3] match exactly again while V1[4] and V2[4] are set to be exactly wrong - 180 degrees from each other, circled in blue. The first step is to compute the cosine similarity matrix or `score` in the code. As explained in lecture, this is $$V_1 V_2^T$$ This is generated with `fastnp.dot`.The clever arrangement of inputs creates the data needed for positive *and* negative examples without having to run all pair-wise combinations. Because Q1[n] is a duplicate of only Q2[n], other combinations are explicitly created negative examples or *Hard Negative* examples. The matrix multiplication efficiently produces the cosine similarity of all positive/negative combinations as shown above on the left side of the diagram. 'Positive' are the results of duplicate examples and 'negative' are the results of explicitly created negative examples. The results for our test case are as expected, V1[0]V2[0] match producing '1' while our other 'positive' cases (in green) don't match well, as was arranged. The V2[2] was set to match V1[3] producing a poor match at `score[2,2]` and an undesired 'negative' case of a '1' shown in grey. With the similarity matrix (`score`) we can begin to implement the loss equations. First, we can extract $$cos(A,P)$$ by utilizing `fastnp.diagonal`. The goal is to grab all the green entries in the diagram above. This is `positive` in the code.Next, we will create the *closest_negative*. This is the nonduplicate entry in V2 that is closest (has largest cosine similarity) to an entry in V1. Each row, n, of `score` represents all comparisons of the results of Q1[n] vs Q2[x] within a batch. A specific example in our testcase is row `score[2,:]`. It has the cosine similarity of V1[2] and V2[x]. The *closest_negative*, as was arranged, is V2[2] which has a score of 1. This is the maximum value of the 'negative' entries (blue entries in the diagram).To implement this, we need to pick the maximum entry on a row of `score`, ignoring the 'positive'/green entries. To avoid selecting the 'positive'/green entries, we can make them larger negative numbers. Multiply `fastnp.eye(batch_size)` with 2.0 and subtract it out of `scores`. The result is `negative_without_positive`. Now we can use `fastnp.max`, row by row (axis=1), to select the maximum which is `closest_negative`.Next, we'll create *mean_negative*. As the name suggests, this is the mean of all the 'negative'/blue values in `score` on a row by row basis. We can use `fastnp.eye(batch_size)` and a constant, this time to create a mask with zeros on the diagonal. Element-wise multiply this with `score` to get just the 'negative values. This is `negative_zero_on_duplicate` in the code. Compute the mean by using `fastnp.sum` on `negative_zero_on_duplicate` for `axis=1` and divide it by `(batch_size - 1)` . This is `mean_negative`.Now, we can compute loss using the two equations above and `fastnp.maximum`. This will form `triplet_loss1` and `triplet_loss2`. `triple_loss` is the `fastnp.mean` of the sum of the two individual losses.Once you have this code matching the expected results, you can clip out the section between START CODE HERE and END CODE HERE it out and insert it into TripletLoss below. ###Code # UNQ_C3 (UNIQUE CELL IDENTIFIER, DO NOT EDIT) # GRADED FUNCTION: TripletLossFn def TripletLossFn(v1, v2, margin=0.25): """Custom Loss function. Args: v1 (numpy.ndarray): Array with dimension (batch_size, model_dimension) associated to Q1. v2 (numpy.ndarray): Array with dimension (batch_size, model_dimension) associated to Q2. margin (float, optional): Desired margin. Defaults to 0.25. Returns: jax.interpreters.xla.DeviceArray: Triplet Loss. """ ### START CODE HERE (Replace instances of 'None' with your code) ### # use fastnp to take the dot product of the two batches (don't forget to transpose the second argument) scores = fastnp.dot(v1, v2.T) # pairwise cosine sim # calculate new batch size batch_size = len(scores) # use fastnp to grab all postive `diagonal` entries in `scores` positive = fastnp.diagonal(scores) # the positive ones (duplicates) # multiply `fastnp.eye(batch_size)` with 2.0 and subtract it out of `scores` negative_without_positive = scores - 2.0 * fastnp.eye(batch_size) # take the row by row `max` of `negative_without_positive`. # Hint: negative_without_positive.max(axis = [?]) closest_negative = negative_without_positive.max(axis=1) # subtract `fastnp.eye(batch_size)` out of 1.0 and do element-wise multiplication with `scores` negative_zero_on_duplicate = scores * (1.0 - fastnp.eye(batch_size)) # use `fastnp.sum` on `negative_zero_on_duplicate` for `axis=1` and divide it by `(batch_size - 1)` mean_negative = np.sum(negative_zero_on_duplicate, axis=1) / (batch_size - 1) # compute `fastnp.maximum` among 0.0 and `A` # A = subtract `positive` from `margin` and add `closest_negative` triplet_loss1 = fastnp.maximum(0.0, margin - positive + closest_negative) # compute `fastnp.maximum` among 0.0 and `B` # B = subtract `positive` from `margin` and add `mean_negative` triplet_loss2 = fastnp.maximum(0.0, margin - positive + mean_negative) # add the two losses together and take the `fastnp.mean` of it triplet_loss = fastnp.mean(fastnp.add(triplet_loss1, triplet_loss2)) ### END CODE HERE ### return triplet_loss v1 = np.array([[0.26726124, 0.53452248, 0.80178373],[0.5178918 , 0.57543534, 0.63297887]]) v2 = np.array([[ 0.26726124, 0.53452248, 0.80178373],[-0.5178918 , -0.57543534, -0.63297887]]) TripletLossFn(v2,v1) print("Triplet Loss:", TripletLossFn(v2,v1)) ###Output Triplet Loss: 0.5 ###Markdown **Expected Output:**```CPPTriplet Loss: 0.5``` To make a layer out of a function with no trainable variables, use `tl.Fn`. ###Code from functools import partial def TripletLoss(margin=0.25): triplet_loss_fn = partial(TripletLossFn, margin=margin) return tl.Fn('TripletLoss', triplet_loss_fn) ###Output _____no_output_____ ###Markdown Part 3: TrainingNow you are going to train your model. As usual, you have to define the cost function and the optimizer. You also have to feed in the built model. Before, going into the training, we will use a special data set up. We will define the inputs using the data generator we built above. The lambda function acts as a seed to remember the last batch that was given. Run the cell below to get the question pairs inputs. ###Code batch_size = 256 train_generator = data_generator(train_Q1, train_Q2, batch_size, vocab['<PAD>']) val_generator = data_generator(val_Q1, val_Q2, batch_size, vocab['<PAD>']) print('train_Q1.shape ', train_Q1.shape) print('val_Q1.shape ', val_Q1.shape) ###Output train_Q1.shape (89188,) val_Q1.shape (22298,) ###Markdown 3.1 Training the modelYou will now write a function that takes in your model and trains it. To train your model you have to decide how many times you want to iterate over the entire data set; each iteration is defined as an `epoch`. For each epoch, you have to go over all the data, using your training iterator. Exercise 04**Instructions:** Implement the `train_model` below to train the neural network above. Here is a list of things you should do, as already shown in lecture 7: - Create `TrainTask` and `EvalTask`- Create the training loop `trax.supervised.training.Loop`- Pass in the following depending on the context (train_task or eval_task): - `labeled_data=generator` - `metrics=[TripletLoss()]`, - `loss_layer=TripletLoss()` - `optimizer=trax.optimizers.Adam` with learning rate of 0.01 - `lr_schedule=lr_schedule`, - `output_dir=output_dir`You will be using your triplet loss function with Adam optimizer. Please read the [trax](https://trax-ml.readthedocs.io/en/latest/trax.optimizers.html?highlight=adamtrax.optimizers.adam.Adam) documentation to get a full understanding. This function should return a `training.Loop` object. To read more about this check the [docs](https://trax-ml.readthedocs.io/en/latest/trax.supervised.html?highlight=looptrax.supervised.training.Loop). ###Code lr_schedule = trax.lr.warmup_and_rsqrt_decay(400, 0.01) # UNQ_C4 (UNIQUE CELL IDENTIFIER, DO NOT EDIT) # GRADED FUNCTION: train_model def train_model(Siamese, TripletLoss, lr_schedule, train_generator=train_generator, val_generator=val_generator, output_dir='model/'): """Training the Siamese Model Args: Siamese (function): Function that returns the Siamese model. TripletLoss (function): Function that defines the TripletLoss loss function. lr_schedule (function): Trax multifactor schedule function. train_generator (generator, optional): Training generator. Defaults to train_generator. val_generator (generator, optional): Validation generator. Defaults to val_generator. output_dir (str, optional): Path to save model to. Defaults to 'model/'. Returns: trax.supervised.training.Loop: Training loop for the model. """ output_dir = os.path.expanduser(output_dir) ### START CODE HERE (Replace instances of 'None' with your code) ### train_task = training.TrainTask( labeled_data=train_generator, # Use generator (train) loss_layer=TripletLoss(), # Use triplet loss. Don't forget to instantiate this object optimizer=trax.optimizers.Adam(0.01), # Don't forget to add the learning rate parameter lr_schedule=lr_schedule, # Use Trax multifactor schedule function ) eval_task = training.EvalTask( labeled_data=val_generator, # Use generator (val) metrics=[TripletLoss()], # Use triplet loss. Don't forget to instantiate this object ) ### END CODE HERE ### training_loop = training.Loop(Siamese(), train_task, eval_task=eval_task, output_dir=output_dir) return training_loop train_steps = 5 training_loop = train_model(Siamese, TripletLoss, lr_schedule) training_loop.run(train_steps) ###Output Step 1: train TripletLoss | 0.49722433 Step 1: eval TripletLoss | 0.49733442 ###Markdown The model was only trained for 5 steps due to the constraints of this environment. For the rest of the assignment you will be using a pretrained model but now you should understand how the training can be done using Trax. Part 4: Evaluation 4.1 Evaluating your siamese networkIn this section you will learn how to evaluate a Siamese network. You will first start by loading a pretrained model and then you will use it to predict. ###Code # Loading in the saved model model = Siamese() model.init_from_file('model.pkl.gz') ###Output _____no_output_____ ###Markdown 4.2 ClassifyTo determine the accuracy of the model, we will utilize the test set that was configured earlier. While in training we used only positive examples, the test data, Q1_test, Q2_test and y_test, is setup as pairs of questions, some of which are duplicates some are not. This routine will run all the test question pairs through the model, compute the cosine simlarity of each pair, threshold it and compare the result to y_test - the correct response from the data set. The results are accumulated to produce an accuracy. Exercise 05**Instructions** - Loop through the incoming data in batch_size chunks - Use the data generator to load q1, q2 a batch at a time. **Don't forget to set shuffle=False!** - copy a batch_size chunk of y into y_test - compute v1, v2 using the model - for each element of the batch - compute the cos similarity of each pair of entries, v1[j],v2[j] - determine if d > threshold - increment accuracy if that result matches the expected results (y_test[j]) - compute the final accuracy and return Due to some limitations of this environment, running classify multiple times may result in the kernel failing. If that happens *Restart Kernal & clear output* and then run from the top. During development, consider using a smaller set of data to reduce the number of calls to model(). ###Code # UNQ_C5 (UNIQUE CELL IDENTIFIER, DO NOT EDIT) # GRADED FUNCTION: classify def classify(test_Q1, test_Q2, y, threshold, model, vocab, data_generator=data_generator, batch_size=64): """Function to test the accuracy of the model. Args: test_Q1 (numpy.ndarray): Array of Q1 questions. test_Q2 (numpy.ndarray): Array of Q2 questions. y (numpy.ndarray): Array of actual target. threshold (float): Desired threshold. model (trax.layers.combinators.Parallel): The Siamese model. vocab (collections.defaultdict): The vocabulary used. data_generator (function): Data generator function. Defaults to data_generator. batch_size (int, optional): Size of the batches. Defaults to 64. Returns: float: Accuracy of the model. """ accuracy = 0 ### START CODE HERE (Replace instances of 'None' with your code) ### for i in range(0, len(test_Q1), batch_size): # Call the data generator (built in Ex 01) with shuffle=False using next() # use batch size chuncks of questions as Q1 & Q2 arguments of the data generator. e.g x[i:i + batch_size] # Hint: use `vocab['<PAD>']` for the `pad` argument of the data generator q1, q2 = next(data_generator(test_Q1[i:i + batch_size], test_Q2[i:i + batch_size], batch_size, vocab['<PAD>'], shuffle=False)) # use batch size chuncks of actual output targets (same syntax as example above) y_test = y[i:i + batch_size] # Call the model v1, v2 =model((q1, q2)) for j in range(batch_size): # take dot product to compute cos similarity of each pair of entries, v1[j], v2[j] # don't forget to transpose the second argument d = np.dot(v1[j], v2[j].T) # is d greater than the threshold? res = d > threshold # increment accurancy if y_test is equal `res` accuracy += (y_test[j] == res) # compute accuracy using accuracy and total length of test questions accuracy = accuracy / len(test_Q1) ### END CODE HERE ### return accuracy # this takes around 1 minute accuracy = classify(Q1_test,Q2_test, y_test, 0.7, model, vocab, batch_size = 512) print("Accuracy", accuracy) ###Output Accuracy 0.3767578125 ###Markdown **Expected Result** Accuracy ~0.69 Part 5: Testing with your own questionsIn this section you will test the model with your own questions. You will write a function `predict` which takes two questions as input and returns $1$ or $0$ depending on whether the question pair is a duplicate or not. But first, we build a reverse vocabulary that allows to map encoded questions back to words: Write a function `predict`that takes in two questions, the model, and the vocabulary and returns whether the questions are duplicates ($1$) or not duplicates ($0$) given a similarity threshold. Exercise 06**Instructions:** - Tokenize your question using `nltk.word_tokenize` - Create Q1,Q2 by encoding your questions as a list of numbers using vocab- pad Q1,Q2 with next(data_generator([Q1], [Q2],1,vocab['']))- use model() to create v1, v2- compute the cosine similarity (dot product) of v1, v2- compute res by comparing d to the threshold ###Code # UNQ_C6 (UNIQUE CELL IDENTIFIER, DO NOT EDIT) # GRADED FUNCTION: predict def predict(question1, question2, threshold, model, vocab, data_generator=data_generator, verbose=False): """Function for predicting if two questions are duplicates. Args: question1 (str): First question. question2 (str): Second question. threshold (float): Desired threshold. model (trax.layers.combinators.Parallel): The Siamese model. vocab (collections.defaultdict): The vocabulary used. data_generator (function): Data generator function. Defaults to data_generator. verbose (bool, optional): If the results should be printed out. Defaults to False. Returns: bool: True if the questions are duplicates, False otherwise. """ ### START CODE HERE (Replace instances of 'None' with your code) ### # use `nltk` word tokenize function to tokenize q1 = nltk.word_tokenize(question1) # tokenize q2 = nltk.word_tokenize(question2) # tokenize Q1, Q2 = [], [] for word in q1: # encode q1 # increment by checking the 'word' index in `vocab` Q1 += [vocab[word]] for word in q2: # encode q2 # increment by checking the 'word' index in `vocab` Q2 += [vocab[word]] # Call the data generator (built in Ex 01) using next() # pass [Q1] & [Q2] as Q1 & Q2 arguments of the data generator. Set batch size as 1 # Hint: use `vocab['<PAD>']` for the `pad` argument of the data generator Q1, Q2 = next(data_generator([Q1], [Q2], 1, vocab['<PAD>'])) # Call the model v1, v2 = model((Q1, Q2)) # take dot product to compute cos similarity of each pair of entries, v1, v2 # don't forget to transpose the second argument d = np.dot(v1[0], v2[0].T) # is d greater than the threshold? res = d > threshold ### END CODE HERE ### if(verbose): print("Q1 = ", Q1, "\nQ2 = ", Q2) print("d = ", d) print("res = ", res) return res # Feel free to try with your own questions question1 = "When will I see you?" question2 = "When can I see you again?" # 1 means it is duplicated, 0 otherwise predict(question1 , question2, 0.7, model, vocab, verbose = True) ###Output Q1 = [[585 76 4 46 53 21 1 1]] Q2 = [[585 76 4 46 53 21 1 1]] d = 1.0 res = True ###Markdown Expected OutputIf input is:```CPPquestion1 = "When will I see you?"question2 = "When can I see you again?"```Output is (d may vary a bit):```CPPQ1 = [[585 76 4 46 53 21 1 1]] Q2 = [[ 585 33 4 46 53 7280 21 1]]d = 0.88113236res = TrueTrue``` ###Code # Feel free to try with your own questions question1 = "Do they enjoy eating the dessert?" question2 = "Do they like hiking in the desert?" # 1 means it is duplicated, 0 otherwise predict(question1 , question2, 0.7, model, vocab, verbose=True) ###Output Q1 = [[ 443 1145 3159 1169 78 29017 21 1]] Q2 = [[ 443 1145 3159 1169 78 29017 21 1]] d = 1.0000002 res = True
examples/kkr_plugins_test.ipynb
###Markdown AiiDA-KKR demo Here is a Demo to run the Voronoi code with a follow up KKR calculation with AiiDA with pure python code Some Comments:In oder to run the KKR and Voronoi codes you have to set them up as codes in AiiDA.You might source a bash rc in the pretext execution of the code for Licenzing issues.Also you should symbol link the ElementPotential DataBase for the Voronoi code If you want to test, use submit test, which will save all files created before a calculation would be run in a local test_submit folder ###Code %load_ext autoreload %autoreload 2 %matplotlib notebook import time import os from aiida import load_dbenv, is_dbenv_loaded if not is_dbenv_loaded(): load_dbenv() from aiida.orm import Code, load_node from aiida.orm import DataFactory, CalculationFactory from aiida_kkr.tools.kkrcontrol import write_kkr_inputcard_template, fill_keywords_to_inputcard, create_keyword_default_values from pprint import pprint from scipy import array from aiida_kkr.calculations.kkr import KkrCalculation from aiida_kkr.calculations.voro import VoronoiCalculation from aiida_kkr.parsers.voro import VoronoiParser from aiida_kkr.parsers.kkr import KkrParser ParameterData = DataFactory('parameter') StructureData = DataFactory('structure') # Prepare and AiiDAStructure Data as input, example Cu alat = 6.830000 # in a_Bohr abohr = 0.52917721067 # number of atom positions in unit cell natyp = 1 # bravais vectors bravais = array([[0.5, 0.5, 0.0], [0.5, 0.0, 0.5], [0.0, 0.5, 0.5]]) a = 0.5*alat*abohr Cu = StructureData(cell=[[a, a, 0.0], [a, 0.0, a], [0.0, a, a]]) Cu.append_atom(position=[0.0, 0.0, 0.0], symbols='Cu') #Cu.store() Cu = load_node(79546) print(Cu) # Now gernerate a ParameterData node with keyvalues needed by voronoi and KKR # we use a helper function for some defaults and set some values keywords = create_keyword_default_values() keywords['NATYP'][0] = natyp keywords['ALATBASIS'][0] = alat keywords['NSPIN'][0] = 1 keywords['LMAX'][0] = 2 # choose only coarse energy contour and k-mesh for test purposes keywords['NPOL'][0] = 4 keywords['NPT1'][0] = 3 keywords['NPT2'][0] = 10 keywords['NPT3'][0] = 3 keywords['BZKX'][0] = 10 keywords['BZKY'][0] = 10 keywords['RCLUSTZ'][0] = 1.50 keywords['RCLUSTXY'][0] = 1.50 # for ASA keywords['INS'] = [0, '%i'] keywords['KSHAPE'] = [0, '%i'] pprint(keywords) # Store the node keyw = ParameterData(dict=keywords) #keyw.store() keyw = load_node(79550) print keyw # Running a single Vornoi calculation code = Code.get_from_string('voro@local_mac') calc = VoronoiCalculation() calc.label = 'Test voronoi' calc.set_withmpi(False) calc.set_resources({"num_machines" : 1}) calc.set_max_wallclock_seconds(300) calc.set_computer('local_mac') calc.use_code(code) calc.use_structure(Cu) calc.use_parameters(keyw) submit_test = False if submit_test: subfolder, script_filename = calc.submit_test() print "Test_submit for calculation (uuid='{}')".format( calc.uuid) print "Submit file in {}".format(os.path.join( os.path.relpath(subfolder.abspath), script_filename )) else: calc.store_all() print "created calculation; calc=Calculation(uuid='{}') # ID={}".format( calc.uuid, calc.dbnode.pk) calc.submit() print "submitted calculation; calc=Calculation(uuid='{}') # ID={}".format( calc.uuid, calc.dbnode.pk) !cat submit_test/20171110-00007/_aiidasubmit.sh !ls submit_test/20171110-00001/ ! cat submit_test/20171110-00007/inputcard # Ontop the voronoi calculation we want to run a KKR calculation # for this we have to get some things from the voronoi calculation # use the calculation run before or load a voronoi calculation calc2 = load_node(79565) # We create a new parameter node in which we store the emin extracted form the voronoi calculation #emin = calc1.get_outputs_dict()['output_parameters'].get_dict()['EMIN'] emin = calc2.res.EMIN remote = calc2.get_outputs_dict()['remote_folder'] keywords2 = keywords keywords2['EMIN'][0] = emin keyw2 = ParameterData(dict=keywords2) #keyw2.store() keyw2 = load_node(79570) print keyw2 # Now we create and run the kkr Calculation code = Code.get_from_string('kkr1@local_mac')#'kkrimp@local_mac') calc1 = KkrCalculation() calc1.label = 'Test kkr' calc1.set_withmpi(False) calc1.set_resources({"num_machines" : 1}) calc1.set_max_wallclock_seconds(300) calc1.set_computer('local_mac') calc1.use_code(code) #calc1.use_structure(Cu) calc1.use_parameters(keyw2) calc1.use_parent_folder(remote) submit_test = False if submit_test: subfolder, script_filename = calc1.submit_test() print "Test_submit for calculation (uuid='{}')".format( calc1.uuid) print "Submit file in {}".format(os.path.join( os.path.relpath(subfolder.abspath), script_filename )) else: calc1.store_all() print "created calculation; calc=Calculation(uuid='{}') # ID={}".format( calc1.uuid, calc.dbnode.pk) calc1.submit() print "submitted calculation; calc=Calculation(uuid='{}') # ID={}".format( calc.uuid, calc.dbnode.pk) ! cat submit_test/20171110-00020///_aiidasubmit.sh ! ls submit_test/20171110-00020// ! cat submit_test/20171110-00020/inputcard # Check with the verdi shell if everything with you calculations went right # Voronoi parser test n = load_node(79559) retrieved_dict = {'retrieved' : n} voro_parser = VoronoiParser(calc) suc, nodes = voro_parser.parse_with_retrieved(retrieved_dict) print suc print nodes print nodes[0][1].get_dict()['EMIN'] ###Output _____no_output_____
7-pandas-connections.ipynb
###Markdown pandasでデータ連結など 内容- データの読み込み- データ連結(列方向)- 欠損値- データ連結(行方向) ###Code import numpy as np import pandas as pd ###Output _____no_output_____ ###Markdown 5章で保存したデータを読み出し ###Code df = pd.read_pickle("data/df_201704health.pickle") df df_moved = pd.read_pickle("data/df_201704moved.pickle") df_moved ###Output _____no_output_____ ###Markdown データの連結(列方向) ###Code df_marged = pd.concat([df, df_moved], axis=1) df_marged df_201705 = pd.read_csv("data/201705health.csv", encoding="utf-8", index_col='日付', parse_dates=True) ###Output _____no_output_____ ###Markdown 新たに5月分のデータを読み込み ###Code df_201705 ###Output _____no_output_____ ###Markdown 欠損値処理 ###Code df_201705.dropna() df_201705.fillna(0) df_201705_fill = df_201705.fillna(method='ffill') df_201705_fill ###Output _____no_output_____ ###Markdown データの連結(行方向) ###Code pd.concat([df_marged, df_201705_fill], axis=0) ###Output _____no_output_____